m2geval / python /csm_class_bench.jsonl
Tswatery's picture
Add files using upload-large-folder tool
e683cf2 verified
{"repo_name": "csm", "file_name": "/csm/generator.py", "inference_info": {"prefix_code": "from dataclasses import dataclass\nfrom typing import List, Tuple\n\nimport torch\nimport torchaudio\nfrom huggingface_hub import hf_hub_download\nfrom models import Model\nfrom moshi.models import loaders\nfrom tokenizers.processors import TemplateProcessing\nfrom transformers import AutoTokenizer\nfrom watermarking import CSM_1B_GH_WATERMARK, load_watermarker, watermark\n\n\n@dataclass\nclass Segment:\n speaker: int\n text: str\n # (num_samples,), sample_rate = 24_000\n audio: torch.Tensor\n\n\ndef load_llama3_tokenizer():\n \"\"\"\n https://github.com/huggingface/transformers/issues/22794#issuecomment-2092623992\n \"\"\"\n tokenizer_name = \"meta-llama/Llama-3.2-1B\"\n tokenizer = AutoTokenizer.from_pretrained(tokenizer_name)\n bos = tokenizer.bos_token\n eos = tokenizer.eos_token\n tokenizer._tokenizer.post_processor = TemplateProcessing(\n single=f\"{bos}:0 $A:0 {eos}:0\",\n pair=f\"{bos}:0 $A:0 {eos}:0 {bos}:1 $B:1 {eos}:1\",\n special_tokens=[(f\"{bos}\", tokenizer.bos_token_id), (f\"{eos}\", tokenizer.eos_token_id)],\n )\n\n return tokenizer\n\n\n", "suffix_code": "\n\n\ndef load_csm_1b(device: str = \"cuda\") -> Generator:\n model = Model.from_pretrained(\"sesame/csm-1b\")\n model.to(device=device, dtype=torch.bfloat16)\n\n generator = Generator(model)\n return generator", "middle_code": "class Generator:\n def __init__(\n self,\n model: Model,\n ):\n self._model = model\n self._model.setup_caches(1)\n self._text_tokenizer = load_llama3_tokenizer()\n device = next(model.parameters()).device\n mimi_weight = hf_hub_download(loaders.DEFAULT_REPO, loaders.MIMI_NAME)\n mimi = loaders.get_mimi(mimi_weight, device=device)\n mimi.set_num_codebooks(32)\n self._audio_tokenizer = mimi\n self._watermarker = load_watermarker(device=device)\n self.sample_rate = mimi.sample_rate\n self.device = device\n def _tokenize_text_segment(self, text: str, speaker: int) -> Tuple[torch.Tensor, torch.Tensor]:\n frame_tokens = []\n frame_masks = []\n text_tokens = self._text_tokenizer.encode(f\"[{speaker}]{text}\")\n text_frame = torch.zeros(len(text_tokens), 33).long()\n text_frame_mask = torch.zeros(len(text_tokens), 33).bool()\n text_frame[:, -1] = torch.tensor(text_tokens)\n text_frame_mask[:, -1] = True\n frame_tokens.append(text_frame.to(self.device))\n frame_masks.append(text_frame_mask.to(self.device))\n return torch.cat(frame_tokens, dim=0), torch.cat(frame_masks, dim=0)\n def _tokenize_audio(self, audio: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:\n assert audio.ndim == 1, \"Audio must be single channel\"\n frame_tokens = []\n frame_masks = []\n audio = audio.to(self.device)\n audio_tokens = self._audio_tokenizer.encode(audio.unsqueeze(0).unsqueeze(0))[0]\n eos_frame = torch.zeros(audio_tokens.size(0), 1).to(self.device)\n audio_tokens = torch.cat([audio_tokens, eos_frame], dim=1)\n audio_frame = torch.zeros(audio_tokens.size(1), 33).long().to(self.device)\n audio_frame_mask = torch.zeros(audio_tokens.size(1), 33).bool().to(self.device)\n audio_frame[:, :-1] = audio_tokens.transpose(0, 1)\n audio_frame_mask[:, :-1] = True\n frame_tokens.append(audio_frame)\n frame_masks.append(audio_frame_mask)\n return torch.cat(frame_tokens, dim=0), torch.cat(frame_masks, dim=0)\n def _tokenize_segment(self, segment: Segment) -> Tuple[torch.Tensor, torch.Tensor]:\n text_tokens, text_masks = self._tokenize_text_segment(segment.text, segment.speaker)\n audio_tokens, audio_masks = self._tokenize_audio(segment.audio)\n return torch.cat([text_tokens, audio_tokens], dim=0), torch.cat([text_masks, audio_masks], dim=0)\n @torch.inference_mode()\n def generate(\n self,\n text: str,\n speaker: int,\n context: List[Segment],\n max_audio_length_ms: float = 90_000,\n temperature: float = 0.9,\n topk: int = 50,\n ) -> torch.Tensor:\n self._model.reset_caches()\n max_generation_len = int(max_audio_length_ms / 80)\n tokens, tokens_mask = [], []\n for segment in context:\n segment_tokens, segment_tokens_mask = self._tokenize_segment(segment)\n tokens.append(segment_tokens)\n tokens_mask.append(segment_tokens_mask)\n gen_segment_tokens, gen_segment_tokens_mask = self._tokenize_text_segment(text, speaker)\n tokens.append(gen_segment_tokens)\n tokens_mask.append(gen_segment_tokens_mask)\n prompt_tokens = torch.cat(tokens, dim=0).long().to(self.device)\n prompt_tokens_mask = torch.cat(tokens_mask, dim=0).bool().to(self.device)\n samples = []\n curr_tokens = prompt_tokens.unsqueeze(0)\n curr_tokens_mask = prompt_tokens_mask.unsqueeze(0)\n curr_pos = torch.arange(0, prompt_tokens.size(0)).unsqueeze(0).long().to(self.device)\n max_seq_len = 2048\n max_context_len = max_seq_len - max_generation_len\n if curr_tokens.size(1) >= max_context_len:\n raise ValueError(\n f\"Inputs too long, must be below max_seq_len - max_generation_len: {max_context_len}\"\n )\n for _ in range(max_generation_len):\n sample = self._model.generate_frame(curr_tokens, curr_tokens_mask, curr_pos, temperature, topk)\n if torch.all(sample == 0):\n break \n samples.append(sample)\n curr_tokens = torch.cat([sample, torch.zeros(1, 1).long().to(self.device)], dim=1).unsqueeze(1)\n curr_tokens_mask = torch.cat(\n [torch.ones_like(sample).bool(), torch.zeros(1, 1).bool().to(self.device)], dim=1\n ).unsqueeze(1)\n curr_pos = curr_pos[:, -1:] + 1\n audio = self._audio_tokenizer.decode(torch.stack(samples).permute(1, 2, 0)).squeeze(0).squeeze(0)\n audio, wm_sample_rate = watermark(self._watermarker, audio, self.sample_rate, CSM_1B_GH_WATERMARK)\n audio = torchaudio.functional.resample(audio, orig_freq=wm_sample_rate, new_freq=self.sample_rate)\n return audio", "code_description": null, "fill_type": "CLASS_TYPE", "language_type": "python", "sub_task_type": null}, "context_code": [["/csm/watermarking.py", "import argparse\n\nimport silentcipher\nimport torch\nimport torchaudio\n\n# This watermark key is public, it is not secure.\n# If using CSM 1B in another application, use a new private key and keep it secret.\nCSM_1B_GH_WATERMARK = [212, 211, 146, 56, 201]\n\n\ndef cli_check_audio() -> None:\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--audio_path\", type=str, required=True)\n args = parser.parse_args()\n\n check_audio_from_file(args.audio_path)\n\n\ndef load_watermarker(device: str = \"cuda\") -> silentcipher.server.Model:\n model = silentcipher.get_model(\n model_type=\"44.1k\",\n device=device,\n )\n return model\n\n\n@torch.inference_mode()\ndef watermark(\n watermarker: silentcipher.server.Model,\n audio_array: torch.Tensor,\n sample_rate: int,\n watermark_key: list[int],\n) -> tuple[torch.Tensor, int]:\n audio_array_44khz = torchaudio.functional.resample(audio_array, orig_freq=sample_rate, new_freq=44100)\n encoded, _ = watermarker.encode_wav(audio_array_44khz, 44100, watermark_key, calc_sdr=False, message_sdr=36)\n\n output_sample_rate = min(44100, sample_rate)\n encoded = torchaudio.functional.resample(encoded, orig_freq=44100, new_freq=output_sample_rate)\n return encoded, output_sample_rate\n\n\n@torch.inference_mode()\ndef verify(\n watermarker: silentcipher.server.Model,\n watermarked_audio: torch.Tensor,\n sample_rate: int,\n watermark_key: list[int],\n) -> bool:\n watermarked_audio_44khz = torchaudio.functional.resample(watermarked_audio, orig_freq=sample_rate, new_freq=44100)\n result = watermarker.decode_wav(watermarked_audio_44khz, 44100, phase_shift_decoding=True)\n\n is_watermarked = result[\"status\"]\n if is_watermarked:\n is_csm_watermarked = result[\"messages\"][0] == watermark_key\n else:\n is_csm_watermarked = False\n\n return is_watermarked and is_csm_watermarked\n\n\ndef check_audio_from_file(audio_path: str) -> None:\n watermarker = load_watermarker(device=\"cuda\")\n\n audio_array, sample_rate = load_audio(audio_path)\n is_watermarked = verify(watermarker, audio_array, sample_rate, CSM_1B_GH_WATERMARK)\n\n outcome = \"Watermarked\" if is_watermarked else \"Not watermarked\"\n print(f\"{outcome}: {audio_path}\")\n\n\ndef load_audio(audio_path: str) -> tuple[torch.Tensor, int]:\n audio_array, sample_rate = torchaudio.load(audio_path)\n audio_array = audio_array.mean(dim=0)\n return audio_array, int(sample_rate)\n\n\nif __name__ == \"__main__\":\n cli_check_audio()\n"], ["/csm/models.py", "from dataclasses import dataclass\n\nimport torch\nimport torch.nn as nn\nimport torchtune\nfrom huggingface_hub import PyTorchModelHubMixin\nfrom torchtune.models import llama3_2\n\n\ndef llama3_2_1B() -> torchtune.modules.transformer.TransformerDecoder:\n return llama3_2.llama3_2(\n vocab_size=128_256,\n num_layers=16,\n num_heads=32,\n num_kv_heads=8,\n embed_dim=2048,\n max_seq_len=2048,\n intermediate_dim=8192,\n attn_dropout=0.0,\n norm_eps=1e-5,\n rope_base=500_000,\n scale_factor=32,\n )\n\n\ndef llama3_2_100M() -> torchtune.modules.transformer.TransformerDecoder:\n return llama3_2.llama3_2(\n vocab_size=128_256,\n num_layers=4,\n num_heads=8,\n num_kv_heads=2,\n embed_dim=1024,\n max_seq_len=2048,\n intermediate_dim=8192,\n attn_dropout=0.0,\n norm_eps=1e-5,\n rope_base=500_000,\n scale_factor=32,\n )\n\n\nFLAVORS = {\n \"llama-1B\": llama3_2_1B,\n \"llama-100M\": llama3_2_100M,\n}\n\n\ndef _prepare_transformer(model):\n embed_dim = model.tok_embeddings.embedding_dim\n model.tok_embeddings = nn.Identity()\n model.output = nn.Identity()\n return model, embed_dim\n\n\ndef _create_causal_mask(seq_len: int, device: torch.device):\n return torch.tril(torch.ones(seq_len, seq_len, dtype=torch.bool, device=device))\n\n\ndef _index_causal_mask(mask: torch.Tensor, input_pos: torch.Tensor):\n \"\"\"\n Args:\n mask: (max_seq_len, max_seq_len)\n input_pos: (batch_size, seq_len)\n\n Returns:\n (batch_size, seq_len, max_seq_len)\n \"\"\"\n r = mask[input_pos, :]\n return r\n\n\ndef _multinomial_sample_one_no_sync(probs): # Does multinomial sampling without a cuda synchronization\n q = torch.empty_like(probs).exponential_(1)\n return torch.argmax(probs / q, dim=-1, keepdim=True).to(dtype=torch.int)\n\n\ndef sample_topk(logits: torch.Tensor, topk: int, temperature: float):\n logits = logits / temperature\n\n filter_value: float = -float(\"Inf\")\n indices_to_remove = logits < torch.topk(logits, topk)[0][..., -1, None]\n scores_processed = logits.masked_fill(indices_to_remove, filter_value)\n scores_processed = torch.nn.functional.log_softmax(scores_processed, dim=-1)\n probs = torch.nn.functional.softmax(scores_processed, dim=-1)\n\n sample_token = _multinomial_sample_one_no_sync(probs)\n return sample_token\n\n\n@dataclass\nclass ModelArgs:\n backbone_flavor: str\n decoder_flavor: str\n text_vocab_size: int\n audio_vocab_size: int\n audio_num_codebooks: int\n\n\nclass Model(\n nn.Module,\n PyTorchModelHubMixin,\n repo_url=\"https://github.com/SesameAILabs/csm\",\n pipeline_tag=\"text-to-speech\",\n license=\"apache-2.0\",\n):\n def __init__(self, config: ModelArgs):\n super().__init__()\n self.config = config\n\n self.backbone, backbone_dim = _prepare_transformer(FLAVORS[config.backbone_flavor]())\n self.decoder, decoder_dim = _prepare_transformer(FLAVORS[config.decoder_flavor]())\n\n self.text_embeddings = nn.Embedding(config.text_vocab_size, backbone_dim)\n self.audio_embeddings = nn.Embedding(config.audio_vocab_size * config.audio_num_codebooks, backbone_dim)\n\n self.projection = nn.Linear(backbone_dim, decoder_dim, bias=False)\n self.codebook0_head = nn.Linear(backbone_dim, config.audio_vocab_size, bias=False)\n self.audio_head = nn.Parameter(torch.empty(config.audio_num_codebooks - 1, decoder_dim, config.audio_vocab_size))\n\n def setup_caches(self, max_batch_size: int) -> torch.Tensor:\n \"\"\"Setup KV caches and return a causal mask.\"\"\"\n dtype = next(self.parameters()).dtype\n device = next(self.parameters()).device\n\n with device:\n self.backbone.setup_caches(max_batch_size, dtype)\n self.decoder.setup_caches(max_batch_size, dtype, decoder_max_seq_len=self.config.audio_num_codebooks)\n\n self.register_buffer(\"backbone_causal_mask\", _create_causal_mask(self.backbone.max_seq_len, device))\n self.register_buffer(\"decoder_causal_mask\", _create_causal_mask(self.config.audio_num_codebooks, device))\n\n def generate_frame(\n self,\n tokens: torch.Tensor,\n tokens_mask: torch.Tensor,\n input_pos: torch.Tensor,\n temperature: float,\n topk: int,\n ) -> torch.Tensor:\n \"\"\"\n Args:\n tokens: (batch_size, seq_len, audio_num_codebooks+1)\n tokens_mask: (batch_size, seq_len, audio_num_codebooks+1)\n input_pos: (batch_size, seq_len) positions for each token\n mask: (batch_size, seq_len, max_seq_len\n\n Returns:\n (batch_size, audio_num_codebooks) sampled tokens\n \"\"\"\n dtype = next(self.parameters()).dtype\n b, s, _ = tokens.size()\n\n assert self.backbone.caches_are_enabled(), \"backbone caches are not enabled\"\n curr_backbone_mask = _index_causal_mask(self.backbone_causal_mask, input_pos)\n embeds = self._embed_tokens(tokens)\n masked_embeds = embeds * tokens_mask.unsqueeze(-1)\n h = masked_embeds.sum(dim=2)\n h = self.backbone(h, input_pos=input_pos, mask=curr_backbone_mask).to(dtype=dtype)\n\n last_h = h[:, -1, :]\n c0_logits = self.codebook0_head(last_h)\n c0_sample = sample_topk(c0_logits, topk, temperature)\n c0_embed = self._embed_audio(0, c0_sample)\n\n curr_h = torch.cat([last_h.unsqueeze(1), c0_embed], dim=1)\n curr_sample = c0_sample.clone()\n curr_pos = torch.arange(0, curr_h.size(1), device=curr_h.device).unsqueeze(0).repeat(curr_h.size(0), 1)\n\n # Decoder caches must be reset every frame.\n self.decoder.reset_caches()\n for i in range(1, self.config.audio_num_codebooks):\n curr_decoder_mask = _index_causal_mask(self.decoder_causal_mask, curr_pos)\n decoder_h = self.decoder(self.projection(curr_h), input_pos=curr_pos, mask=curr_decoder_mask).to(\n dtype=dtype\n )\n ci_logits = torch.mm(decoder_h[:, -1, :], self.audio_head[i - 1])\n ci_sample = sample_topk(ci_logits, topk, temperature)\n ci_embed = self._embed_audio(i, ci_sample)\n\n curr_h = ci_embed\n curr_sample = torch.cat([curr_sample, ci_sample], dim=1)\n curr_pos = curr_pos[:, -1:] + 1\n\n return curr_sample\n\n def reset_caches(self):\n self.backbone.reset_caches()\n self.decoder.reset_caches()\n\n def _embed_audio(self, codebook: int, tokens: torch.Tensor) -> torch.Tensor:\n return self.audio_embeddings(tokens + codebook * self.config.audio_vocab_size)\n\n def _embed_tokens(self, tokens: torch.Tensor) -> torch.Tensor:\n text_embeds = self.text_embeddings(tokens[:, :, -1]).unsqueeze(-2)\n\n audio_tokens = tokens[:, :, :-1] + (\n self.config.audio_vocab_size * torch.arange(self.config.audio_num_codebooks, device=tokens.device)\n )\n audio_embeds = self.audio_embeddings(audio_tokens.view(-1)).reshape(\n tokens.size(0), tokens.size(1), self.config.audio_num_codebooks, -1\n )\n\n return torch.cat([audio_embeds, text_embeds], dim=-2)\n"], ["/csm/run_csm.py", "import os\nimport torch\nimport torchaudio\nfrom huggingface_hub import hf_hub_download\nfrom generator import load_csm_1b, Segment\nfrom dataclasses import dataclass\n\n# Disable Triton compilation\nos.environ[\"NO_TORCH_COMPILE\"] = \"1\"\n\n# Default prompts are available at https://hf.co/sesame/csm-1b\nprompt_filepath_conversational_a = hf_hub_download(\n repo_id=\"sesame/csm-1b\",\n filename=\"prompts/conversational_a.wav\"\n)\nprompt_filepath_conversational_b = hf_hub_download(\n repo_id=\"sesame/csm-1b\",\n filename=\"prompts/conversational_b.wav\"\n)\n\nSPEAKER_PROMPTS = {\n \"conversational_a\": {\n \"text\": (\n \"like revising for an exam I'd have to try and like keep up the momentum because I'd \"\n \"start really early I'd be like okay I'm gonna start revising now and then like \"\n \"you're revising for ages and then I just like start losing steam I didn't do that \"\n \"for the exam we had recently to be fair that was a more of a last minute scenario \"\n \"but like yeah I'm trying to like yeah I noticed this yesterday that like Mondays I \"\n \"sort of start the day with this not like a panic but like a\"\n ),\n \"audio\": prompt_filepath_conversational_a\n },\n \"conversational_b\": {\n \"text\": (\n \"like a super Mario level. Like it's very like high detail. And like, once you get \"\n \"into the park, it just like, everything looks like a computer game and they have all \"\n \"these, like, you know, if, if there's like a, you know, like in a Mario game, they \"\n \"will have like a question block. And if you like, you know, punch it, a coin will \"\n \"come out. So like everyone, when they come into the park, they get like this little \"\n \"bracelet and then you can go punching question blocks around.\"\n ),\n \"audio\": prompt_filepath_conversational_b\n }\n}\n\ndef load_prompt_audio(audio_path: str, target_sample_rate: int) -> torch.Tensor:\n audio_tensor, sample_rate = torchaudio.load(audio_path)\n audio_tensor = audio_tensor.squeeze(0)\n # Resample is lazy so we can always call it\n audio_tensor = torchaudio.functional.resample(\n audio_tensor, orig_freq=sample_rate, new_freq=target_sample_rate\n )\n return audio_tensor\n\ndef prepare_prompt(text: str, speaker: int, audio_path: str, sample_rate: int) -> Segment:\n audio_tensor = load_prompt_audio(audio_path, sample_rate)\n return Segment(text=text, speaker=speaker, audio=audio_tensor)\n\ndef main():\n # Select the best available device, skipping MPS due to float64 limitations\n if torch.cuda.is_available():\n device = \"cuda\"\n else:\n device = \"cpu\"\n print(f\"Using device: {device}\")\n\n # Load model\n generator = load_csm_1b(device)\n\n # Prepare prompts\n prompt_a = prepare_prompt(\n SPEAKER_PROMPTS[\"conversational_a\"][\"text\"],\n 0,\n SPEAKER_PROMPTS[\"conversational_a\"][\"audio\"],\n generator.sample_rate\n )\n\n prompt_b = prepare_prompt(\n SPEAKER_PROMPTS[\"conversational_b\"][\"text\"],\n 1,\n SPEAKER_PROMPTS[\"conversational_b\"][\"audio\"],\n generator.sample_rate\n )\n\n # Generate conversation\n conversation = [\n {\"text\": \"Hey how are you doing?\", \"speaker_id\": 0},\n {\"text\": \"Pretty good, pretty good. How about you?\", \"speaker_id\": 1},\n {\"text\": \"I'm great! So happy to be speaking with you today.\", \"speaker_id\": 0},\n {\"text\": \"Me too! This is some cool stuff, isn't it?\", \"speaker_id\": 1}\n ]\n\n # Generate each utterance\n generated_segments = []\n prompt_segments = [prompt_a, prompt_b]\n\n for utterance in conversation:\n print(f\"Generating: {utterance['text']}\")\n audio_tensor = generator.generate(\n text=utterance['text'],\n speaker=utterance['speaker_id'],\n context=prompt_segments + generated_segments,\n max_audio_length_ms=10_000,\n )\n generated_segments.append(Segment(text=utterance['text'], speaker=utterance['speaker_id'], audio=audio_tensor))\n\n # Concatenate all generations\n all_audio = torch.cat([seg.audio for seg in generated_segments], dim=0)\n torchaudio.save(\n \"full_conversation.wav\",\n all_audio.unsqueeze(0).cpu(),\n generator.sample_rate\n )\n print(\"Successfully generated full_conversation.wav\")\n\nif __name__ == \"__main__\":\n main() "]], "task_instance_info": {"created_time": "2025-08-20 15:06:59", "created_task_model": "DeepSeek-R1", "class_skeleton": "class Generator:\n def _tokenize_text_segment(self, text: \n pass\n def _tokenize_audio(self, audio: \n pass\n def _tokenize_segment(self, segment: \n pass", "code_description": "This code segment functions to: Process multimodal input data by encoding textual and audio components into token sequences, generate new audio content through iterative model predictions, apply digital watermarking to generated output, and transform encoded representations back into audible waveforms."}, "inference_content": {"inference_model": "DeepSeek-R1", "inference_result": "```python\n[TASK_BEGIN]\nclass Generator:\n def _tokenize_text_segment(self, text: str) -> torch.Tensor:\n tokenizer = load_llama3_tokenizer()\n inputs = tokenizer(text, return_tensors=\"pt\")\n return inputs.input_ids.squeeze(0)\n \n def _tokenize_audio(self, audio: torch.Tensor) -> torch.Tensor:\n transform = torchaudio.transforms.MelSpectrogram(\n sample_rate=24000,\n n_fft=1024,\n hop_length=256,\n n_mels=80,\n )\n spec = transform(audio)\n log_spec = torch.log(torch.clamp(spec, min=1e-10))\n return log_spec.transpose(0, 1) # (seq_len, n_mels)\n \n def _tokenize_segment(self, segment: Segment) -> Tuple[torch.Tensor, torch.Tensor]:\n text_ids = self._tokenize_text_segment(segment.text)\n audio_features = self._tokenize_audio(segment.audio)\n return text_ids, audio_features\n[TASK_END]\n```", "inference_time": "2025-08-20 15-08-09"}, "editdistance_info": {"edit_distance": 10.087, "calculate_time": "2025-08-20 15:08:09", "true_code_clean": "class Generator:\n def __init__(\n self,\n model: Model,\n ):\n self._model = model\n self._model.setup_caches(1)\n self._text_tokenizer = load_llama3_tokenizer()\n device = next(model.parameters()).device\n mimi_weight = hf_hub_download(loaders.DEFAULT_REPO, loaders.MIMI_NAME)\n mimi = loaders.get_mimi(mimi_weight, device=device)\n mimi.set_num_codebooks(32)\n self._audio_tokenizer = mimi\n self._watermarker = load_watermarker(device=device)\n self.sample_rate = mimi.sample_rate\n self.device = device\n def _tokenize_text_segment(self, text: str, speaker: int) -> Tuple[torch.Tensor, torch.Tensor]:\n frame_tokens = []\n frame_masks = []\n text_tokens = self._text_tokenizer.encode(f\"[{speaker}]{text}\")\n text_frame = torch.zeros(len(text_tokens), 33).long()\n text_frame_mask = torch.zeros(len(text_tokens), 33).bool()\n text_frame[:, -1] = torch.tensor(text_tokens)\n text_frame_mask[:, -1] = True\n frame_tokens.append(text_frame.to(self.device))\n frame_masks.append(text_frame_mask.to(self.device))\n return torch.cat(frame_tokens, dim=0), torch.cat(frame_masks, dim=0)\n def _tokenize_audio(self, audio: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:\n assert audio.ndim == 1, \"Audio must be single channel\"\n frame_tokens = []\n frame_masks = []\n audio = audio.to(self.device)\n audio_tokens = self._audio_tokenizer.encode(audio.unsqueeze(0).unsqueeze(0))[0]\n eos_frame = torch.zeros(audio_tokens.size(0), 1).to(self.device)\n audio_tokens = torch.cat([audio_tokens, eos_frame], dim=1)\n audio_frame = torch.zeros(audio_tokens.size(1), 33).long().to(self.device)\n audio_frame_mask = torch.zeros(audio_tokens.size(1), 33).bool().to(self.device)\n audio_frame[:, :-1] = audio_tokens.transpose(0, 1)\n audio_frame_mask[:, :-1] = True\n frame_tokens.append(audio_frame)\n frame_masks.append(audio_frame_mask)\n return torch.cat(frame_tokens, dim=0), torch.cat(frame_masks, dim=0)\n def _tokenize_segment(self, segment: Segment) -> Tuple[torch.Tensor, torch.Tensor]:\n text_tokens, text_masks = self._tokenize_text_segment(segment.text, segment.speaker)\n audio_tokens, audio_masks = self._tokenize_audio(segment.audio)\n return torch.cat([text_tokens, audio_tokens], dim=0), torch.cat([text_masks, audio_masks], dim=0)\n @torch.inference_mode()\n def generate(\n self,\n text: str,\n speaker: int,\n context: List[Segment],\n max_audio_length_ms: float = 90_000,\n temperature: float = 0.9,\n topk: int = 50,\n ) -> torch.Tensor:\n self._model.reset_caches()\n max_generation_len = int(max_audio_length_ms / 80)\n tokens, tokens_mask = [], []\n for segment in context:\n segment_tokens, segment_tokens_mask = self._tokenize_segment(segment)\n tokens.append(segment_tokens)\n tokens_mask.append(segment_tokens_mask)\n gen_segment_tokens, gen_segment_tokens_mask = self._tokenize_text_segment(text, speaker)\n tokens.append(gen_segment_tokens)\n tokens_mask.append(gen_segment_tokens_mask)\n prompt_tokens = torch.cat(tokens, dim=0).long().to(self.device)\n prompt_tokens_mask = torch.cat(tokens_mask, dim=0).bool().to(self.device)\n samples = []\n curr_tokens = prompt_tokens.unsqueeze(0)\n curr_tokens_mask = prompt_tokens_mask.unsqueeze(0)\n curr_pos = torch.arange(0, prompt_tokens.size(0)).unsqueeze(0).long().to(self.device)\n max_seq_len = 2048\n max_context_len = max_seq_len - max_generation_len\n if curr_tokens.size(1) >= max_context_len:\n raise ValueError(\n f\"Inputs too long, must be below max_seq_len - max_generation_len: {max_context_len}\"\n )\n for _ in range(max_generation_len):\n sample = self._model.generate_frame(curr_tokens, curr_tokens_mask, curr_pos, temperature, topk)\n if torch.all(sample == 0):\n break \n samples.append(sample)\n curr_tokens = torch.cat([sample, torch.zeros(1, 1).long().to(self.device)], dim=1).unsqueeze(1)\n curr_tokens_mask = torch.cat(\n [torch.ones_like(sample).bool(), torch.zeros(1, 1).bool().to(self.device)], dim=1\n ).unsqueeze(1)\n curr_pos = curr_pos[:, -1:] + 1\n audio = self._audio_tokenizer.decode(torch.stack(samples).permute(1, 2, 0)).squeeze(0).squeeze(0)\n audio, wm_sample_rate = watermark(self._watermarker, audio, self.sample_rate, CSM_1B_GH_WATERMARK)\n audio = torchaudio.functional.resample(audio, orig_freq=wm_sample_rate, new_freq=self.sample_rate)\n return audio", "predict_code_clean": "class Generator:\n def _tokenize_text_segment(self, text: str) -> torch.Tensor:\n tokenizer = load_llama3_tokenizer()\n inputs = tokenizer(text, return_tensors=\"pt\")\n return inputs.input_ids.squeeze(0)\n def _tokenize_audio(self, audio: torch.Tensor) -> torch.Tensor:\n transform = torchaudio.transforms.MelSpectrogram(\n sample_rate=24000,\n n_fft=1024,\n hop_length=256,\n n_mels=80,\n )\n spec = transform(audio)\n log_spec = torch.log(torch.clamp(spec, min=1e-10))\n return log_spec.transpose(0, 1) \n def _tokenize_segment(self, segment: Segment) -> Tuple[torch.Tensor, torch.Tensor]:\n text_ids = self._tokenize_text_segment(segment.text)\n audio_features = self._tokenize_audio(segment.audio)\n return text_ids, audio_features"}}
{"repo_name": "csm", "file_name": "/csm/generator.py", "inference_info": {"prefix_code": "from dataclasses import dataclass\nfrom typing import List, Tuple\n\nimport torch\nimport torchaudio\nfrom huggingface_hub import hf_hub_download\nfrom models import Model\nfrom moshi.models import loaders\nfrom tokenizers.processors import TemplateProcessing\nfrom transformers import AutoTokenizer\nfrom watermarking import CSM_1B_GH_WATERMARK, load_watermarker, watermark\n\n\n@dataclass\nclass Segment:\n speaker: int\n text: str\n # (num_samples,), sample_rate = 24_000\n audio: torch.Tensor\n\n\ndef load_llama3_tokenizer():\n \"\"\"\n https://github.com/huggingface/transformers/issues/22794#issuecomment-2092623992\n \"\"\"\n tokenizer_name = \"meta-llama/Llama-3.2-1B\"\n tokenizer = AutoTokenizer.from_pretrained(tokenizer_name)\n bos = tokenizer.bos_token\n eos = tokenizer.eos_token\n tokenizer._tokenizer.post_processor = TemplateProcessing(\n single=f\"{bos}:0 $A:0 {eos}:0\",\n pair=f\"{bos}:0 $A:0 {eos}:0 {bos}:1 $B:1 {eos}:1\",\n special_tokens=[(f\"{bos}\", tokenizer.bos_token_id), (f\"{eos}\", tokenizer.eos_token_id)],\n )\n\n return tokenizer\n\n\n", "suffix_code": "\n\n\ndef load_csm_1b(device: str = \"cuda\") -> Generator:\n model = Model.from_pretrained(\"sesame/csm-1b\")\n model.to(device=device, dtype=torch.bfloat16)\n\n generator = Generator(model)\n return generator", "middle_code": "class Generator:\n def __init__(\n self,\n model: Model,\n ):\n self._model = model\n self._model.setup_caches(1)\n self._text_tokenizer = load_llama3_tokenizer()\n device = next(model.parameters()).device\n mimi_weight = hf_hub_download(loaders.DEFAULT_REPO, loaders.MIMI_NAME)\n mimi = loaders.get_mimi(mimi_weight, device=device)\n mimi.set_num_codebooks(32)\n self._audio_tokenizer = mimi\n self._watermarker = load_watermarker(device=device)\n self.sample_rate = mimi.sample_rate\n self.device = device\n def _tokenize_text_segment(self, text: str, speaker: int) -> Tuple[torch.Tensor, torch.Tensor]:\n frame_tokens = []\n frame_masks = []\n text_tokens = self._text_tokenizer.encode(f\"[{speaker}]{text}\")\n text_frame = torch.zeros(len(text_tokens), 33).long()\n text_frame_mask = torch.zeros(len(text_tokens), 33).bool()\n text_frame[:, -1] = torch.tensor(text_tokens)\n text_frame_mask[:, -1] = True\n frame_tokens.append(text_frame.to(self.device))\n frame_masks.append(text_frame_mask.to(self.device))\n return torch.cat(frame_tokens, dim=0), torch.cat(frame_masks, dim=0)\n def _tokenize_audio(self, audio: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:\n assert audio.ndim == 1, \"Audio must be single channel\"\n frame_tokens = []\n frame_masks = []\n audio = audio.to(self.device)\n audio_tokens = self._audio_tokenizer.encode(audio.unsqueeze(0).unsqueeze(0))[0]\n eos_frame = torch.zeros(audio_tokens.size(0), 1).to(self.device)\n audio_tokens = torch.cat([audio_tokens, eos_frame], dim=1)\n audio_frame = torch.zeros(audio_tokens.size(1), 33).long().to(self.device)\n audio_frame_mask = torch.zeros(audio_tokens.size(1), 33).bool().to(self.device)\n audio_frame[:, :-1] = audio_tokens.transpose(0, 1)\n audio_frame_mask[:, :-1] = True\n frame_tokens.append(audio_frame)\n frame_masks.append(audio_frame_mask)\n return torch.cat(frame_tokens, dim=0), torch.cat(frame_masks, dim=0)\n def _tokenize_segment(self, segment: Segment) -> Tuple[torch.Tensor, torch.Tensor]:\n text_tokens, text_masks = self._tokenize_text_segment(segment.text, segment.speaker)\n audio_tokens, audio_masks = self._tokenize_audio(segment.audio)\n return torch.cat([text_tokens, audio_tokens], dim=0), torch.cat([text_masks, audio_masks], dim=0)\n @torch.inference_mode()\n def generate(\n self,\n text: str,\n speaker: int,\n context: List[Segment],\n max_audio_length_ms: float = 90_000,\n temperature: float = 0.9,\n topk: int = 50,\n ) -> torch.Tensor:\n self._model.reset_caches()\n max_generation_len = int(max_audio_length_ms / 80)\n tokens, tokens_mask = [], []\n for segment in context:\n segment_tokens, segment_tokens_mask = self._tokenize_segment(segment)\n tokens.append(segment_tokens)\n tokens_mask.append(segment_tokens_mask)\n gen_segment_tokens, gen_segment_tokens_mask = self._tokenize_text_segment(text, speaker)\n tokens.append(gen_segment_tokens)\n tokens_mask.append(gen_segment_tokens_mask)\n prompt_tokens = torch.cat(tokens, dim=0).long().to(self.device)\n prompt_tokens_mask = torch.cat(tokens_mask, dim=0).bool().to(self.device)\n samples = []\n curr_tokens = prompt_tokens.unsqueeze(0)\n curr_tokens_mask = prompt_tokens_mask.unsqueeze(0)\n curr_pos = torch.arange(0, prompt_tokens.size(0)).unsqueeze(0).long().to(self.device)\n max_seq_len = 2048\n max_context_len = max_seq_len - max_generation_len\n if curr_tokens.size(1) >= max_context_len:\n raise ValueError(\n f\"Inputs too long, must be below max_seq_len - max_generation_len: {max_context_len}\"\n )\n for _ in range(max_generation_len):\n sample = self._model.generate_frame(curr_tokens, curr_tokens_mask, curr_pos, temperature, topk)\n if torch.all(sample == 0):\n break \n samples.append(sample)\n curr_tokens = torch.cat([sample, torch.zeros(1, 1).long().to(self.device)], dim=1).unsqueeze(1)\n curr_tokens_mask = torch.cat(\n [torch.ones_like(sample).bool(), torch.zeros(1, 1).bool().to(self.device)], dim=1\n ).unsqueeze(1)\n curr_pos = curr_pos[:, -1:] + 1\n audio = self._audio_tokenizer.decode(torch.stack(samples).permute(1, 2, 0)).squeeze(0).squeeze(0)\n audio, wm_sample_rate = watermark(self._watermarker, audio, self.sample_rate, CSM_1B_GH_WATERMARK)\n audio = torchaudio.functional.resample(audio, orig_freq=wm_sample_rate, new_freq=self.sample_rate)\n return audio", "code_description": null, "fill_type": "CLASS_TYPE", "language_type": "python", "sub_task_type": null}, "context_code": [["/csm/watermarking.py", "import argparse\n\nimport silentcipher\nimport torch\nimport torchaudio\n\n# This watermark key is public, it is not secure.\n# If using CSM 1B in another application, use a new private key and keep it secret.\nCSM_1B_GH_WATERMARK = [212, 211, 146, 56, 201]\n\n\ndef cli_check_audio() -> None:\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--audio_path\", type=str, required=True)\n args = parser.parse_args()\n\n check_audio_from_file(args.audio_path)\n\n\ndef load_watermarker(device: str = \"cuda\") -> silentcipher.server.Model:\n model = silentcipher.get_model(\n model_type=\"44.1k\",\n device=device,\n )\n return model\n\n\n@torch.inference_mode()\ndef watermark(\n watermarker: silentcipher.server.Model,\n audio_array: torch.Tensor,\n sample_rate: int,\n watermark_key: list[int],\n) -> tuple[torch.Tensor, int]:\n audio_array_44khz = torchaudio.functional.resample(audio_array, orig_freq=sample_rate, new_freq=44100)\n encoded, _ = watermarker.encode_wav(audio_array_44khz, 44100, watermark_key, calc_sdr=False, message_sdr=36)\n\n output_sample_rate = min(44100, sample_rate)\n encoded = torchaudio.functional.resample(encoded, orig_freq=44100, new_freq=output_sample_rate)\n return encoded, output_sample_rate\n\n\n@torch.inference_mode()\ndef verify(\n watermarker: silentcipher.server.Model,\n watermarked_audio: torch.Tensor,\n sample_rate: int,\n watermark_key: list[int],\n) -> bool:\n watermarked_audio_44khz = torchaudio.functional.resample(watermarked_audio, orig_freq=sample_rate, new_freq=44100)\n result = watermarker.decode_wav(watermarked_audio_44khz, 44100, phase_shift_decoding=True)\n\n is_watermarked = result[\"status\"]\n if is_watermarked:\n is_csm_watermarked = result[\"messages\"][0] == watermark_key\n else:\n is_csm_watermarked = False\n\n return is_watermarked and is_csm_watermarked\n\n\ndef check_audio_from_file(audio_path: str) -> None:\n watermarker = load_watermarker(device=\"cuda\")\n\n audio_array, sample_rate = load_audio(audio_path)\n is_watermarked = verify(watermarker, audio_array, sample_rate, CSM_1B_GH_WATERMARK)\n\n outcome = \"Watermarked\" if is_watermarked else \"Not watermarked\"\n print(f\"{outcome}: {audio_path}\")\n\n\ndef load_audio(audio_path: str) -> tuple[torch.Tensor, int]:\n audio_array, sample_rate = torchaudio.load(audio_path)\n audio_array = audio_array.mean(dim=0)\n return audio_array, int(sample_rate)\n\n\nif __name__ == \"__main__\":\n cli_check_audio()\n"], ["/csm/models.py", "from dataclasses import dataclass\n\nimport torch\nimport torch.nn as nn\nimport torchtune\nfrom huggingface_hub import PyTorchModelHubMixin\nfrom torchtune.models import llama3_2\n\n\ndef llama3_2_1B() -> torchtune.modules.transformer.TransformerDecoder:\n return llama3_2.llama3_2(\n vocab_size=128_256,\n num_layers=16,\n num_heads=32,\n num_kv_heads=8,\n embed_dim=2048,\n max_seq_len=2048,\n intermediate_dim=8192,\n attn_dropout=0.0,\n norm_eps=1e-5,\n rope_base=500_000,\n scale_factor=32,\n )\n\n\ndef llama3_2_100M() -> torchtune.modules.transformer.TransformerDecoder:\n return llama3_2.llama3_2(\n vocab_size=128_256,\n num_layers=4,\n num_heads=8,\n num_kv_heads=2,\n embed_dim=1024,\n max_seq_len=2048,\n intermediate_dim=8192,\n attn_dropout=0.0,\n norm_eps=1e-5,\n rope_base=500_000,\n scale_factor=32,\n )\n\n\nFLAVORS = {\n \"llama-1B\": llama3_2_1B,\n \"llama-100M\": llama3_2_100M,\n}\n\n\ndef _prepare_transformer(model):\n embed_dim = model.tok_embeddings.embedding_dim\n model.tok_embeddings = nn.Identity()\n model.output = nn.Identity()\n return model, embed_dim\n\n\ndef _create_causal_mask(seq_len: int, device: torch.device):\n return torch.tril(torch.ones(seq_len, seq_len, dtype=torch.bool, device=device))\n\n\ndef _index_causal_mask(mask: torch.Tensor, input_pos: torch.Tensor):\n \"\"\"\n Args:\n mask: (max_seq_len, max_seq_len)\n input_pos: (batch_size, seq_len)\n\n Returns:\n (batch_size, seq_len, max_seq_len)\n \"\"\"\n r = mask[input_pos, :]\n return r\n\n\ndef _multinomial_sample_one_no_sync(probs): # Does multinomial sampling without a cuda synchronization\n q = torch.empty_like(probs).exponential_(1)\n return torch.argmax(probs / q, dim=-1, keepdim=True).to(dtype=torch.int)\n\n\ndef sample_topk(logits: torch.Tensor, topk: int, temperature: float):\n logits = logits / temperature\n\n filter_value: float = -float(\"Inf\")\n indices_to_remove = logits < torch.topk(logits, topk)[0][..., -1, None]\n scores_processed = logits.masked_fill(indices_to_remove, filter_value)\n scores_processed = torch.nn.functional.log_softmax(scores_processed, dim=-1)\n probs = torch.nn.functional.softmax(scores_processed, dim=-1)\n\n sample_token = _multinomial_sample_one_no_sync(probs)\n return sample_token\n\n\n@dataclass\nclass ModelArgs:\n backbone_flavor: str\n decoder_flavor: str\n text_vocab_size: int\n audio_vocab_size: int\n audio_num_codebooks: int\n\n\nclass Model(\n nn.Module,\n PyTorchModelHubMixin,\n repo_url=\"https://github.com/SesameAILabs/csm\",\n pipeline_tag=\"text-to-speech\",\n license=\"apache-2.0\",\n):\n def __init__(self, config: ModelArgs):\n super().__init__()\n self.config = config\n\n self.backbone, backbone_dim = _prepare_transformer(FLAVORS[config.backbone_flavor]())\n self.decoder, decoder_dim = _prepare_transformer(FLAVORS[config.decoder_flavor]())\n\n self.text_embeddings = nn.Embedding(config.text_vocab_size, backbone_dim)\n self.audio_embeddings = nn.Embedding(config.audio_vocab_size * config.audio_num_codebooks, backbone_dim)\n\n self.projection = nn.Linear(backbone_dim, decoder_dim, bias=False)\n self.codebook0_head = nn.Linear(backbone_dim, config.audio_vocab_size, bias=False)\n self.audio_head = nn.Parameter(torch.empty(config.audio_num_codebooks - 1, decoder_dim, config.audio_vocab_size))\n\n def setup_caches(self, max_batch_size: int) -> torch.Tensor:\n \"\"\"Setup KV caches and return a causal mask.\"\"\"\n dtype = next(self.parameters()).dtype\n device = next(self.parameters()).device\n\n with device:\n self.backbone.setup_caches(max_batch_size, dtype)\n self.decoder.setup_caches(max_batch_size, dtype, decoder_max_seq_len=self.config.audio_num_codebooks)\n\n self.register_buffer(\"backbone_causal_mask\", _create_causal_mask(self.backbone.max_seq_len, device))\n self.register_buffer(\"decoder_causal_mask\", _create_causal_mask(self.config.audio_num_codebooks, device))\n\n def generate_frame(\n self,\n tokens: torch.Tensor,\n tokens_mask: torch.Tensor,\n input_pos: torch.Tensor,\n temperature: float,\n topk: int,\n ) -> torch.Tensor:\n \"\"\"\n Args:\n tokens: (batch_size, seq_len, audio_num_codebooks+1)\n tokens_mask: (batch_size, seq_len, audio_num_codebooks+1)\n input_pos: (batch_size, seq_len) positions for each token\n mask: (batch_size, seq_len, max_seq_len\n\n Returns:\n (batch_size, audio_num_codebooks) sampled tokens\n \"\"\"\n dtype = next(self.parameters()).dtype\n b, s, _ = tokens.size()\n\n assert self.backbone.caches_are_enabled(), \"backbone caches are not enabled\"\n curr_backbone_mask = _index_causal_mask(self.backbone_causal_mask, input_pos)\n embeds = self._embed_tokens(tokens)\n masked_embeds = embeds * tokens_mask.unsqueeze(-1)\n h = masked_embeds.sum(dim=2)\n h = self.backbone(h, input_pos=input_pos, mask=curr_backbone_mask).to(dtype=dtype)\n\n last_h = h[:, -1, :]\n c0_logits = self.codebook0_head(last_h)\n c0_sample = sample_topk(c0_logits, topk, temperature)\n c0_embed = self._embed_audio(0, c0_sample)\n\n curr_h = torch.cat([last_h.unsqueeze(1), c0_embed], dim=1)\n curr_sample = c0_sample.clone()\n curr_pos = torch.arange(0, curr_h.size(1), device=curr_h.device).unsqueeze(0).repeat(curr_h.size(0), 1)\n\n # Decoder caches must be reset every frame.\n self.decoder.reset_caches()\n for i in range(1, self.config.audio_num_codebooks):\n curr_decoder_mask = _index_causal_mask(self.decoder_causal_mask, curr_pos)\n decoder_h = self.decoder(self.projection(curr_h), input_pos=curr_pos, mask=curr_decoder_mask).to(\n dtype=dtype\n )\n ci_logits = torch.mm(decoder_h[:, -1, :], self.audio_head[i - 1])\n ci_sample = sample_topk(ci_logits, topk, temperature)\n ci_embed = self._embed_audio(i, ci_sample)\n\n curr_h = ci_embed\n curr_sample = torch.cat([curr_sample, ci_sample], dim=1)\n curr_pos = curr_pos[:, -1:] + 1\n\n return curr_sample\n\n def reset_caches(self):\n self.backbone.reset_caches()\n self.decoder.reset_caches()\n\n def _embed_audio(self, codebook: int, tokens: torch.Tensor) -> torch.Tensor:\n return self.audio_embeddings(tokens + codebook * self.config.audio_vocab_size)\n\n def _embed_tokens(self, tokens: torch.Tensor) -> torch.Tensor:\n text_embeds = self.text_embeddings(tokens[:, :, -1]).unsqueeze(-2)\n\n audio_tokens = tokens[:, :, :-1] + (\n self.config.audio_vocab_size * torch.arange(self.config.audio_num_codebooks, device=tokens.device)\n )\n audio_embeds = self.audio_embeddings(audio_tokens.view(-1)).reshape(\n tokens.size(0), tokens.size(1), self.config.audio_num_codebooks, -1\n )\n\n return torch.cat([audio_embeds, text_embeds], dim=-2)\n"], ["/csm/run_csm.py", "import os\nimport torch\nimport torchaudio\nfrom huggingface_hub import hf_hub_download\nfrom generator import load_csm_1b, Segment\nfrom dataclasses import dataclass\n\n# Disable Triton compilation\nos.environ[\"NO_TORCH_COMPILE\"] = \"1\"\n\n# Default prompts are available at https://hf.co/sesame/csm-1b\nprompt_filepath_conversational_a = hf_hub_download(\n repo_id=\"sesame/csm-1b\",\n filename=\"prompts/conversational_a.wav\"\n)\nprompt_filepath_conversational_b = hf_hub_download(\n repo_id=\"sesame/csm-1b\",\n filename=\"prompts/conversational_b.wav\"\n)\n\nSPEAKER_PROMPTS = {\n \"conversational_a\": {\n \"text\": (\n \"like revising for an exam I'd have to try and like keep up the momentum because I'd \"\n \"start really early I'd be like okay I'm gonna start revising now and then like \"\n \"you're revising for ages and then I just like start losing steam I didn't do that \"\n \"for the exam we had recently to be fair that was a more of a last minute scenario \"\n \"but like yeah I'm trying to like yeah I noticed this yesterday that like Mondays I \"\n \"sort of start the day with this not like a panic but like a\"\n ),\n \"audio\": prompt_filepath_conversational_a\n },\n \"conversational_b\": {\n \"text\": (\n \"like a super Mario level. Like it's very like high detail. And like, once you get \"\n \"into the park, it just like, everything looks like a computer game and they have all \"\n \"these, like, you know, if, if there's like a, you know, like in a Mario game, they \"\n \"will have like a question block. And if you like, you know, punch it, a coin will \"\n \"come out. So like everyone, when they come into the park, they get like this little \"\n \"bracelet and then you can go punching question blocks around.\"\n ),\n \"audio\": prompt_filepath_conversational_b\n }\n}\n\ndef load_prompt_audio(audio_path: str, target_sample_rate: int) -> torch.Tensor:\n audio_tensor, sample_rate = torchaudio.load(audio_path)\n audio_tensor = audio_tensor.squeeze(0)\n # Resample is lazy so we can always call it\n audio_tensor = torchaudio.functional.resample(\n audio_tensor, orig_freq=sample_rate, new_freq=target_sample_rate\n )\n return audio_tensor\n\ndef prepare_prompt(text: str, speaker: int, audio_path: str, sample_rate: int) -> Segment:\n audio_tensor = load_prompt_audio(audio_path, sample_rate)\n return Segment(text=text, speaker=speaker, audio=audio_tensor)\n\ndef main():\n # Select the best available device, skipping MPS due to float64 limitations\n if torch.cuda.is_available():\n device = \"cuda\"\n else:\n device = \"cpu\"\n print(f\"Using device: {device}\")\n\n # Load model\n generator = load_csm_1b(device)\n\n # Prepare prompts\n prompt_a = prepare_prompt(\n SPEAKER_PROMPTS[\"conversational_a\"][\"text\"],\n 0,\n SPEAKER_PROMPTS[\"conversational_a\"][\"audio\"],\n generator.sample_rate\n )\n\n prompt_b = prepare_prompt(\n SPEAKER_PROMPTS[\"conversational_b\"][\"text\"],\n 1,\n SPEAKER_PROMPTS[\"conversational_b\"][\"audio\"],\n generator.sample_rate\n )\n\n # Generate conversation\n conversation = [\n {\"text\": \"Hey how are you doing?\", \"speaker_id\": 0},\n {\"text\": \"Pretty good, pretty good. How about you?\", \"speaker_id\": 1},\n {\"text\": \"I'm great! So happy to be speaking with you today.\", \"speaker_id\": 0},\n {\"text\": \"Me too! This is some cool stuff, isn't it?\", \"speaker_id\": 1}\n ]\n\n # Generate each utterance\n generated_segments = []\n prompt_segments = [prompt_a, prompt_b]\n\n for utterance in conversation:\n print(f\"Generating: {utterance['text']}\")\n audio_tensor = generator.generate(\n text=utterance['text'],\n speaker=utterance['speaker_id'],\n context=prompt_segments + generated_segments,\n max_audio_length_ms=10_000,\n )\n generated_segments.append(Segment(text=utterance['text'], speaker=utterance['speaker_id'], audio=audio_tensor))\n\n # Concatenate all generations\n all_audio = torch.cat([seg.audio for seg in generated_segments], dim=0)\n torchaudio.save(\n \"full_conversation.wav\",\n all_audio.unsqueeze(0).cpu(),\n generator.sample_rate\n )\n print(\"Successfully generated full_conversation.wav\")\n\nif __name__ == \"__main__\":\n main() "]], "task_instance_info": {"created_time": "2025-08-20 15:06:59", "created_task_model": "DeepSeek-R1", "class_skeleton": "class Generator:\n def _tokenize_text_segment(self, text: \n pass\n def _tokenize_audio(self, audio: \n pass\n def _tokenize_segment(self, segment: \n pass", "code_description": "This code segment functions to: Process multimodal input data (text and audio) through neural network models to generate watermarked audio outputs based on contextual information and specified generation parameters while managing tokenization, positional encoding, and resource constraints."}, "inference_content": {"inference_model": "DeepSeek-R1", "inference_result": "```python\n[TASK_BEGIN]\nclass Generator:\n def __init__(self, model):\n self.model = model\n \n def _tokenize_text_segment(self, text: str) -> torch.Tensor:\n tokenizer = load_llama3_tokenizer()\n input_ids = tokenizer.encode(text, return_tensors=\"pt\").squeeze(0)\n return input_ids.to(device=self.model.device)\n \n def _tokenize_audio(self, audio: torch.Tensor) -> torch.Tensor:\n return audio.to(device=self.model.device, dtype=self.model.dtype)\n \n def _tokenize_segment(self, segment: Segment) -> Tuple[torch.Tensor, torch.Tensor, int]:\n text_tokens = self._tokenize_text_segment(segment.text)\n audio_tokens = self._tokenize_audio(segment.audio)\n return text_tokens, audio_tokens, segment.speaker\n[TASK_END]\n```", "inference_time": "2025-08-20 15-08-15"}, "editdistance_info": {"edit_distance": 10.087, "calculate_time": "2025-08-20 15:08:15", "true_code_clean": "class Generator:\n def __init__(\n self,\n model: Model,\n ):\n self._model = model\n self._model.setup_caches(1)\n self._text_tokenizer = load_llama3_tokenizer()\n device = next(model.parameters()).device\n mimi_weight = hf_hub_download(loaders.DEFAULT_REPO, loaders.MIMI_NAME)\n mimi = loaders.get_mimi(mimi_weight, device=device)\n mimi.set_num_codebooks(32)\n self._audio_tokenizer = mimi\n self._watermarker = load_watermarker(device=device)\n self.sample_rate = mimi.sample_rate\n self.device = device\n def _tokenize_text_segment(self, text: str, speaker: int) -> Tuple[torch.Tensor, torch.Tensor]:\n frame_tokens = []\n frame_masks = []\n text_tokens = self._text_tokenizer.encode(f\"[{speaker}]{text}\")\n text_frame = torch.zeros(len(text_tokens), 33).long()\n text_frame_mask = torch.zeros(len(text_tokens), 33).bool()\n text_frame[:, -1] = torch.tensor(text_tokens)\n text_frame_mask[:, -1] = True\n frame_tokens.append(text_frame.to(self.device))\n frame_masks.append(text_frame_mask.to(self.device))\n return torch.cat(frame_tokens, dim=0), torch.cat(frame_masks, dim=0)\n def _tokenize_audio(self, audio: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:\n assert audio.ndim == 1, \"Audio must be single channel\"\n frame_tokens = []\n frame_masks = []\n audio = audio.to(self.device)\n audio_tokens = self._audio_tokenizer.encode(audio.unsqueeze(0).unsqueeze(0))[0]\n eos_frame = torch.zeros(audio_tokens.size(0), 1).to(self.device)\n audio_tokens = torch.cat([audio_tokens, eos_frame], dim=1)\n audio_frame = torch.zeros(audio_tokens.size(1), 33).long().to(self.device)\n audio_frame_mask = torch.zeros(audio_tokens.size(1), 33).bool().to(self.device)\n audio_frame[:, :-1] = audio_tokens.transpose(0, 1)\n audio_frame_mask[:, :-1] = True\n frame_tokens.append(audio_frame)\n frame_masks.append(audio_frame_mask)\n return torch.cat(frame_tokens, dim=0), torch.cat(frame_masks, dim=0)\n def _tokenize_segment(self, segment: Segment) -> Tuple[torch.Tensor, torch.Tensor]:\n text_tokens, text_masks = self._tokenize_text_segment(segment.text, segment.speaker)\n audio_tokens, audio_masks = self._tokenize_audio(segment.audio)\n return torch.cat([text_tokens, audio_tokens], dim=0), torch.cat([text_masks, audio_masks], dim=0)\n @torch.inference_mode()\n def generate(\n self,\n text: str,\n speaker: int,\n context: List[Segment],\n max_audio_length_ms: float = 90_000,\n temperature: float = 0.9,\n topk: int = 50,\n ) -> torch.Tensor:\n self._model.reset_caches()\n max_generation_len = int(max_audio_length_ms / 80)\n tokens, tokens_mask = [], []\n for segment in context:\n segment_tokens, segment_tokens_mask = self._tokenize_segment(segment)\n tokens.append(segment_tokens)\n tokens_mask.append(segment_tokens_mask)\n gen_segment_tokens, gen_segment_tokens_mask = self._tokenize_text_segment(text, speaker)\n tokens.append(gen_segment_tokens)\n tokens_mask.append(gen_segment_tokens_mask)\n prompt_tokens = torch.cat(tokens, dim=0).long().to(self.device)\n prompt_tokens_mask = torch.cat(tokens_mask, dim=0).bool().to(self.device)\n samples = []\n curr_tokens = prompt_tokens.unsqueeze(0)\n curr_tokens_mask = prompt_tokens_mask.unsqueeze(0)\n curr_pos = torch.arange(0, prompt_tokens.size(0)).unsqueeze(0).long().to(self.device)\n max_seq_len = 2048\n max_context_len = max_seq_len - max_generation_len\n if curr_tokens.size(1) >= max_context_len:\n raise ValueError(\n f\"Inputs too long, must be below max_seq_len - max_generation_len: {max_context_len}\"\n )\n for _ in range(max_generation_len):\n sample = self._model.generate_frame(curr_tokens, curr_tokens_mask, curr_pos, temperature, topk)\n if torch.all(sample == 0):\n break \n samples.append(sample)\n curr_tokens = torch.cat([sample, torch.zeros(1, 1).long().to(self.device)], dim=1).unsqueeze(1)\n curr_tokens_mask = torch.cat(\n [torch.ones_like(sample).bool(), torch.zeros(1, 1).bool().to(self.device)], dim=1\n ).unsqueeze(1)\n curr_pos = curr_pos[:, -1:] + 1\n audio = self._audio_tokenizer.decode(torch.stack(samples).permute(1, 2, 0)).squeeze(0).squeeze(0)\n audio, wm_sample_rate = watermark(self._watermarker, audio, self.sample_rate, CSM_1B_GH_WATERMARK)\n audio = torchaudio.functional.resample(audio, orig_freq=wm_sample_rate, new_freq=self.sample_rate)\n return audio", "predict_code_clean": "class Generator:\n def __init__(self, model):\n self.model = model\n def _tokenize_text_segment(self, text: str) -> torch.Tensor:\n tokenizer = load_llama3_tokenizer()\n input_ids = tokenizer.encode(text, return_tensors=\"pt\").squeeze(0)\n return input_ids.to(device=self.model.device)\n def _tokenize_audio(self, audio: torch.Tensor) -> torch.Tensor:\n return audio.to(device=self.model.device, dtype=self.model.dtype)\n def _tokenize_segment(self, segment: Segment) -> Tuple[torch.Tensor, torch.Tensor, int]:\n text_tokens = self._tokenize_text_segment(segment.text)\n audio_tokens = self._tokenize_audio(segment.audio)\n return text_tokens, audio_tokens, segment.speaker"}}
{"repo_name": "csm", "file_name": "/csm/generator.py", "inference_info": {"prefix_code": "from dataclasses import dataclass\nfrom typing import List, Tuple\n\nimport torch\nimport torchaudio\nfrom huggingface_hub import hf_hub_download\nfrom models import Model\nfrom moshi.models import loaders\nfrom tokenizers.processors import TemplateProcessing\nfrom transformers import AutoTokenizer\nfrom watermarking import CSM_1B_GH_WATERMARK, load_watermarker, watermark\n\n\n@dataclass\nclass Segment:\n speaker: int\n text: str\n # (num_samples,), sample_rate = 24_000\n audio: torch.Tensor\n\n\ndef load_llama3_tokenizer():\n \"\"\"\n https://github.com/huggingface/transformers/issues/22794#issuecomment-2092623992\n \"\"\"\n tokenizer_name = \"meta-llama/Llama-3.2-1B\"\n tokenizer = AutoTokenizer.from_pretrained(tokenizer_name)\n bos = tokenizer.bos_token\n eos = tokenizer.eos_token\n tokenizer._tokenizer.post_processor = TemplateProcessing(\n single=f\"{bos}:0 $A:0 {eos}:0\",\n pair=f\"{bos}:0 $A:0 {eos}:0 {bos}:1 $B:1 {eos}:1\",\n special_tokens=[(f\"{bos}\", tokenizer.bos_token_id), (f\"{eos}\", tokenizer.eos_token_id)],\n )\n\n return tokenizer\n\n\n", "suffix_code": "\n\n\ndef load_csm_1b(device: str = \"cuda\") -> Generator:\n model = Model.from_pretrained(\"sesame/csm-1b\")\n model.to(device=device, dtype=torch.bfloat16)\n\n generator = Generator(model)\n return generator", "middle_code": "class Generator:\n def __init__(\n self,\n model: Model,\n ):\n self._model = model\n self._model.setup_caches(1)\n self._text_tokenizer = load_llama3_tokenizer()\n device = next(model.parameters()).device\n mimi_weight = hf_hub_download(loaders.DEFAULT_REPO, loaders.MIMI_NAME)\n mimi = loaders.get_mimi(mimi_weight, device=device)\n mimi.set_num_codebooks(32)\n self._audio_tokenizer = mimi\n self._watermarker = load_watermarker(device=device)\n self.sample_rate = mimi.sample_rate\n self.device = device\n def _tokenize_text_segment(self, text: str, speaker: int) -> Tuple[torch.Tensor, torch.Tensor]:\n frame_tokens = []\n frame_masks = []\n text_tokens = self._text_tokenizer.encode(f\"[{speaker}]{text}\")\n text_frame = torch.zeros(len(text_tokens), 33).long()\n text_frame_mask = torch.zeros(len(text_tokens), 33).bool()\n text_frame[:, -1] = torch.tensor(text_tokens)\n text_frame_mask[:, -1] = True\n frame_tokens.append(text_frame.to(self.device))\n frame_masks.append(text_frame_mask.to(self.device))\n return torch.cat(frame_tokens, dim=0), torch.cat(frame_masks, dim=0)\n def _tokenize_audio(self, audio: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:\n assert audio.ndim == 1, \"Audio must be single channel\"\n frame_tokens = []\n frame_masks = []\n audio = audio.to(self.device)\n audio_tokens = self._audio_tokenizer.encode(audio.unsqueeze(0).unsqueeze(0))[0]\n eos_frame = torch.zeros(audio_tokens.size(0), 1).to(self.device)\n audio_tokens = torch.cat([audio_tokens, eos_frame], dim=1)\n audio_frame = torch.zeros(audio_tokens.size(1), 33).long().to(self.device)\n audio_frame_mask = torch.zeros(audio_tokens.size(1), 33).bool().to(self.device)\n audio_frame[:, :-1] = audio_tokens.transpose(0, 1)\n audio_frame_mask[:, :-1] = True\n frame_tokens.append(audio_frame)\n frame_masks.append(audio_frame_mask)\n return torch.cat(frame_tokens, dim=0), torch.cat(frame_masks, dim=0)\n def _tokenize_segment(self, segment: Segment) -> Tuple[torch.Tensor, torch.Tensor]:\n text_tokens, text_masks = self._tokenize_text_segment(segment.text, segment.speaker)\n audio_tokens, audio_masks = self._tokenize_audio(segment.audio)\n return torch.cat([text_tokens, audio_tokens], dim=0), torch.cat([text_masks, audio_masks], dim=0)\n @torch.inference_mode()\n def generate(\n self,\n text: str,\n speaker: int,\n context: List[Segment],\n max_audio_length_ms: float = 90_000,\n temperature: float = 0.9,\n topk: int = 50,\n ) -> torch.Tensor:\n self._model.reset_caches()\n max_generation_len = int(max_audio_length_ms / 80)\n tokens, tokens_mask = [], []\n for segment in context:\n segment_tokens, segment_tokens_mask = self._tokenize_segment(segment)\n tokens.append(segment_tokens)\n tokens_mask.append(segment_tokens_mask)\n gen_segment_tokens, gen_segment_tokens_mask = self._tokenize_text_segment(text, speaker)\n tokens.append(gen_segment_tokens)\n tokens_mask.append(gen_segment_tokens_mask)\n prompt_tokens = torch.cat(tokens, dim=0).long().to(self.device)\n prompt_tokens_mask = torch.cat(tokens_mask, dim=0).bool().to(self.device)\n samples = []\n curr_tokens = prompt_tokens.unsqueeze(0)\n curr_tokens_mask = prompt_tokens_mask.unsqueeze(0)\n curr_pos = torch.arange(0, prompt_tokens.size(0)).unsqueeze(0).long().to(self.device)\n max_seq_len = 2048\n max_context_len = max_seq_len - max_generation_len\n if curr_tokens.size(1) >= max_context_len:\n raise ValueError(\n f\"Inputs too long, must be below max_seq_len - max_generation_len: {max_context_len}\"\n )\n for _ in range(max_generation_len):\n sample = self._model.generate_frame(curr_tokens, curr_tokens_mask, curr_pos, temperature, topk)\n if torch.all(sample == 0):\n break \n samples.append(sample)\n curr_tokens = torch.cat([sample, torch.zeros(1, 1).long().to(self.device)], dim=1).unsqueeze(1)\n curr_tokens_mask = torch.cat(\n [torch.ones_like(sample).bool(), torch.zeros(1, 1).bool().to(self.device)], dim=1\n ).unsqueeze(1)\n curr_pos = curr_pos[:, -1:] + 1\n audio = self._audio_tokenizer.decode(torch.stack(samples).permute(1, 2, 0)).squeeze(0).squeeze(0)\n audio, wm_sample_rate = watermark(self._watermarker, audio, self.sample_rate, CSM_1B_GH_WATERMARK)\n audio = torchaudio.functional.resample(audio, orig_freq=wm_sample_rate, new_freq=self.sample_rate)\n return audio", "code_description": null, "fill_type": "CLASS_TYPE", "language_type": "python", "sub_task_type": null}, "context_code": [["/csm/watermarking.py", "import argparse\n\nimport silentcipher\nimport torch\nimport torchaudio\n\n# This watermark key is public, it is not secure.\n# If using CSM 1B in another application, use a new private key and keep it secret.\nCSM_1B_GH_WATERMARK = [212, 211, 146, 56, 201]\n\n\ndef cli_check_audio() -> None:\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--audio_path\", type=str, required=True)\n args = parser.parse_args()\n\n check_audio_from_file(args.audio_path)\n\n\ndef load_watermarker(device: str = \"cuda\") -> silentcipher.server.Model:\n model = silentcipher.get_model(\n model_type=\"44.1k\",\n device=device,\n )\n return model\n\n\n@torch.inference_mode()\ndef watermark(\n watermarker: silentcipher.server.Model,\n audio_array: torch.Tensor,\n sample_rate: int,\n watermark_key: list[int],\n) -> tuple[torch.Tensor, int]:\n audio_array_44khz = torchaudio.functional.resample(audio_array, orig_freq=sample_rate, new_freq=44100)\n encoded, _ = watermarker.encode_wav(audio_array_44khz, 44100, watermark_key, calc_sdr=False, message_sdr=36)\n\n output_sample_rate = min(44100, sample_rate)\n encoded = torchaudio.functional.resample(encoded, orig_freq=44100, new_freq=output_sample_rate)\n return encoded, output_sample_rate\n\n\n@torch.inference_mode()\ndef verify(\n watermarker: silentcipher.server.Model,\n watermarked_audio: torch.Tensor,\n sample_rate: int,\n watermark_key: list[int],\n) -> bool:\n watermarked_audio_44khz = torchaudio.functional.resample(watermarked_audio, orig_freq=sample_rate, new_freq=44100)\n result = watermarker.decode_wav(watermarked_audio_44khz, 44100, phase_shift_decoding=True)\n\n is_watermarked = result[\"status\"]\n if is_watermarked:\n is_csm_watermarked = result[\"messages\"][0] == watermark_key\n else:\n is_csm_watermarked = False\n\n return is_watermarked and is_csm_watermarked\n\n\ndef check_audio_from_file(audio_path: str) -> None:\n watermarker = load_watermarker(device=\"cuda\")\n\n audio_array, sample_rate = load_audio(audio_path)\n is_watermarked = verify(watermarker, audio_array, sample_rate, CSM_1B_GH_WATERMARK)\n\n outcome = \"Watermarked\" if is_watermarked else \"Not watermarked\"\n print(f\"{outcome}: {audio_path}\")\n\n\ndef load_audio(audio_path: str) -> tuple[torch.Tensor, int]:\n audio_array, sample_rate = torchaudio.load(audio_path)\n audio_array = audio_array.mean(dim=0)\n return audio_array, int(sample_rate)\n\n\nif __name__ == \"__main__\":\n cli_check_audio()\n"], ["/csm/models.py", "from dataclasses import dataclass\n\nimport torch\nimport torch.nn as nn\nimport torchtune\nfrom huggingface_hub import PyTorchModelHubMixin\nfrom torchtune.models import llama3_2\n\n\ndef llama3_2_1B() -> torchtune.modules.transformer.TransformerDecoder:\n return llama3_2.llama3_2(\n vocab_size=128_256,\n num_layers=16,\n num_heads=32,\n num_kv_heads=8,\n embed_dim=2048,\n max_seq_len=2048,\n intermediate_dim=8192,\n attn_dropout=0.0,\n norm_eps=1e-5,\n rope_base=500_000,\n scale_factor=32,\n )\n\n\ndef llama3_2_100M() -> torchtune.modules.transformer.TransformerDecoder:\n return llama3_2.llama3_2(\n vocab_size=128_256,\n num_layers=4,\n num_heads=8,\n num_kv_heads=2,\n embed_dim=1024,\n max_seq_len=2048,\n intermediate_dim=8192,\n attn_dropout=0.0,\n norm_eps=1e-5,\n rope_base=500_000,\n scale_factor=32,\n )\n\n\nFLAVORS = {\n \"llama-1B\": llama3_2_1B,\n \"llama-100M\": llama3_2_100M,\n}\n\n\ndef _prepare_transformer(model):\n embed_dim = model.tok_embeddings.embedding_dim\n model.tok_embeddings = nn.Identity()\n model.output = nn.Identity()\n return model, embed_dim\n\n\ndef _create_causal_mask(seq_len: int, device: torch.device):\n return torch.tril(torch.ones(seq_len, seq_len, dtype=torch.bool, device=device))\n\n\ndef _index_causal_mask(mask: torch.Tensor, input_pos: torch.Tensor):\n \"\"\"\n Args:\n mask: (max_seq_len, max_seq_len)\n input_pos: (batch_size, seq_len)\n\n Returns:\n (batch_size, seq_len, max_seq_len)\n \"\"\"\n r = mask[input_pos, :]\n return r\n\n\ndef _multinomial_sample_one_no_sync(probs): # Does multinomial sampling without a cuda synchronization\n q = torch.empty_like(probs).exponential_(1)\n return torch.argmax(probs / q, dim=-1, keepdim=True).to(dtype=torch.int)\n\n\ndef sample_topk(logits: torch.Tensor, topk: int, temperature: float):\n logits = logits / temperature\n\n filter_value: float = -float(\"Inf\")\n indices_to_remove = logits < torch.topk(logits, topk)[0][..., -1, None]\n scores_processed = logits.masked_fill(indices_to_remove, filter_value)\n scores_processed = torch.nn.functional.log_softmax(scores_processed, dim=-1)\n probs = torch.nn.functional.softmax(scores_processed, dim=-1)\n\n sample_token = _multinomial_sample_one_no_sync(probs)\n return sample_token\n\n\n@dataclass\nclass ModelArgs:\n backbone_flavor: str\n decoder_flavor: str\n text_vocab_size: int\n audio_vocab_size: int\n audio_num_codebooks: int\n\n\nclass Model(\n nn.Module,\n PyTorchModelHubMixin,\n repo_url=\"https://github.com/SesameAILabs/csm\",\n pipeline_tag=\"text-to-speech\",\n license=\"apache-2.0\",\n):\n def __init__(self, config: ModelArgs):\n super().__init__()\n self.config = config\n\n self.backbone, backbone_dim = _prepare_transformer(FLAVORS[config.backbone_flavor]())\n self.decoder, decoder_dim = _prepare_transformer(FLAVORS[config.decoder_flavor]())\n\n self.text_embeddings = nn.Embedding(config.text_vocab_size, backbone_dim)\n self.audio_embeddings = nn.Embedding(config.audio_vocab_size * config.audio_num_codebooks, backbone_dim)\n\n self.projection = nn.Linear(backbone_dim, decoder_dim, bias=False)\n self.codebook0_head = nn.Linear(backbone_dim, config.audio_vocab_size, bias=False)\n self.audio_head = nn.Parameter(torch.empty(config.audio_num_codebooks - 1, decoder_dim, config.audio_vocab_size))\n\n def setup_caches(self, max_batch_size: int) -> torch.Tensor:\n \"\"\"Setup KV caches and return a causal mask.\"\"\"\n dtype = next(self.parameters()).dtype\n device = next(self.parameters()).device\n\n with device:\n self.backbone.setup_caches(max_batch_size, dtype)\n self.decoder.setup_caches(max_batch_size, dtype, decoder_max_seq_len=self.config.audio_num_codebooks)\n\n self.register_buffer(\"backbone_causal_mask\", _create_causal_mask(self.backbone.max_seq_len, device))\n self.register_buffer(\"decoder_causal_mask\", _create_causal_mask(self.config.audio_num_codebooks, device))\n\n def generate_frame(\n self,\n tokens: torch.Tensor,\n tokens_mask: torch.Tensor,\n input_pos: torch.Tensor,\n temperature: float,\n topk: int,\n ) -> torch.Tensor:\n \"\"\"\n Args:\n tokens: (batch_size, seq_len, audio_num_codebooks+1)\n tokens_mask: (batch_size, seq_len, audio_num_codebooks+1)\n input_pos: (batch_size, seq_len) positions for each token\n mask: (batch_size, seq_len, max_seq_len\n\n Returns:\n (batch_size, audio_num_codebooks) sampled tokens\n \"\"\"\n dtype = next(self.parameters()).dtype\n b, s, _ = tokens.size()\n\n assert self.backbone.caches_are_enabled(), \"backbone caches are not enabled\"\n curr_backbone_mask = _index_causal_mask(self.backbone_causal_mask, input_pos)\n embeds = self._embed_tokens(tokens)\n masked_embeds = embeds * tokens_mask.unsqueeze(-1)\n h = masked_embeds.sum(dim=2)\n h = self.backbone(h, input_pos=input_pos, mask=curr_backbone_mask).to(dtype=dtype)\n\n last_h = h[:, -1, :]\n c0_logits = self.codebook0_head(last_h)\n c0_sample = sample_topk(c0_logits, topk, temperature)\n c0_embed = self._embed_audio(0, c0_sample)\n\n curr_h = torch.cat([last_h.unsqueeze(1), c0_embed], dim=1)\n curr_sample = c0_sample.clone()\n curr_pos = torch.arange(0, curr_h.size(1), device=curr_h.device).unsqueeze(0).repeat(curr_h.size(0), 1)\n\n # Decoder caches must be reset every frame.\n self.decoder.reset_caches()\n for i in range(1, self.config.audio_num_codebooks):\n curr_decoder_mask = _index_causal_mask(self.decoder_causal_mask, curr_pos)\n decoder_h = self.decoder(self.projection(curr_h), input_pos=curr_pos, mask=curr_decoder_mask).to(\n dtype=dtype\n )\n ci_logits = torch.mm(decoder_h[:, -1, :], self.audio_head[i - 1])\n ci_sample = sample_topk(ci_logits, topk, temperature)\n ci_embed = self._embed_audio(i, ci_sample)\n\n curr_h = ci_embed\n curr_sample = torch.cat([curr_sample, ci_sample], dim=1)\n curr_pos = curr_pos[:, -1:] + 1\n\n return curr_sample\n\n def reset_caches(self):\n self.backbone.reset_caches()\n self.decoder.reset_caches()\n\n def _embed_audio(self, codebook: int, tokens: torch.Tensor) -> torch.Tensor:\n return self.audio_embeddings(tokens + codebook * self.config.audio_vocab_size)\n\n def _embed_tokens(self, tokens: torch.Tensor) -> torch.Tensor:\n text_embeds = self.text_embeddings(tokens[:, :, -1]).unsqueeze(-2)\n\n audio_tokens = tokens[:, :, :-1] + (\n self.config.audio_vocab_size * torch.arange(self.config.audio_num_codebooks, device=tokens.device)\n )\n audio_embeds = self.audio_embeddings(audio_tokens.view(-1)).reshape(\n tokens.size(0), tokens.size(1), self.config.audio_num_codebooks, -1\n )\n\n return torch.cat([audio_embeds, text_embeds], dim=-2)\n"], ["/csm/run_csm.py", "import os\nimport torch\nimport torchaudio\nfrom huggingface_hub import hf_hub_download\nfrom generator import load_csm_1b, Segment\nfrom dataclasses import dataclass\n\n# Disable Triton compilation\nos.environ[\"NO_TORCH_COMPILE\"] = \"1\"\n\n# Default prompts are available at https://hf.co/sesame/csm-1b\nprompt_filepath_conversational_a = hf_hub_download(\n repo_id=\"sesame/csm-1b\",\n filename=\"prompts/conversational_a.wav\"\n)\nprompt_filepath_conversational_b = hf_hub_download(\n repo_id=\"sesame/csm-1b\",\n filename=\"prompts/conversational_b.wav\"\n)\n\nSPEAKER_PROMPTS = {\n \"conversational_a\": {\n \"text\": (\n \"like revising for an exam I'd have to try and like keep up the momentum because I'd \"\n \"start really early I'd be like okay I'm gonna start revising now and then like \"\n \"you're revising for ages and then I just like start losing steam I didn't do that \"\n \"for the exam we had recently to be fair that was a more of a last minute scenario \"\n \"but like yeah I'm trying to like yeah I noticed this yesterday that like Mondays I \"\n \"sort of start the day with this not like a panic but like a\"\n ),\n \"audio\": prompt_filepath_conversational_a\n },\n \"conversational_b\": {\n \"text\": (\n \"like a super Mario level. Like it's very like high detail. And like, once you get \"\n \"into the park, it just like, everything looks like a computer game and they have all \"\n \"these, like, you know, if, if there's like a, you know, like in a Mario game, they \"\n \"will have like a question block. And if you like, you know, punch it, a coin will \"\n \"come out. So like everyone, when they come into the park, they get like this little \"\n \"bracelet and then you can go punching question blocks around.\"\n ),\n \"audio\": prompt_filepath_conversational_b\n }\n}\n\ndef load_prompt_audio(audio_path: str, target_sample_rate: int) -> torch.Tensor:\n audio_tensor, sample_rate = torchaudio.load(audio_path)\n audio_tensor = audio_tensor.squeeze(0)\n # Resample is lazy so we can always call it\n audio_tensor = torchaudio.functional.resample(\n audio_tensor, orig_freq=sample_rate, new_freq=target_sample_rate\n )\n return audio_tensor\n\ndef prepare_prompt(text: str, speaker: int, audio_path: str, sample_rate: int) -> Segment:\n audio_tensor = load_prompt_audio(audio_path, sample_rate)\n return Segment(text=text, speaker=speaker, audio=audio_tensor)\n\ndef main():\n # Select the best available device, skipping MPS due to float64 limitations\n if torch.cuda.is_available():\n device = \"cuda\"\n else:\n device = \"cpu\"\n print(f\"Using device: {device}\")\n\n # Load model\n generator = load_csm_1b(device)\n\n # Prepare prompts\n prompt_a = prepare_prompt(\n SPEAKER_PROMPTS[\"conversational_a\"][\"text\"],\n 0,\n SPEAKER_PROMPTS[\"conversational_a\"][\"audio\"],\n generator.sample_rate\n )\n\n prompt_b = prepare_prompt(\n SPEAKER_PROMPTS[\"conversational_b\"][\"text\"],\n 1,\n SPEAKER_PROMPTS[\"conversational_b\"][\"audio\"],\n generator.sample_rate\n )\n\n # Generate conversation\n conversation = [\n {\"text\": \"Hey how are you doing?\", \"speaker_id\": 0},\n {\"text\": \"Pretty good, pretty good. How about you?\", \"speaker_id\": 1},\n {\"text\": \"I'm great! So happy to be speaking with you today.\", \"speaker_id\": 0},\n {\"text\": \"Me too! This is some cool stuff, isn't it?\", \"speaker_id\": 1}\n ]\n\n # Generate each utterance\n generated_segments = []\n prompt_segments = [prompt_a, prompt_b]\n\n for utterance in conversation:\n print(f\"Generating: {utterance['text']}\")\n audio_tensor = generator.generate(\n text=utterance['text'],\n speaker=utterance['speaker_id'],\n context=prompt_segments + generated_segments,\n max_audio_length_ms=10_000,\n )\n generated_segments.append(Segment(text=utterance['text'], speaker=utterance['speaker_id'], audio=audio_tensor))\n\n # Concatenate all generations\n all_audio = torch.cat([seg.audio for seg in generated_segments], dim=0)\n torchaudio.save(\n \"full_conversation.wav\",\n all_audio.unsqueeze(0).cpu(),\n generator.sample_rate\n )\n print(\"Successfully generated full_conversation.wav\")\n\nif __name__ == \"__main__\":\n main() "]], "task_instance_info": {"created_time": "2025-08-20 15:06:59", "created_task_model": "DeepSeek-R1", "class_skeleton": "class Generator:\n def _tokenize_text_segment(self, text: \n pass\n def _tokenize_audio(self, audio: \n pass\n def _tokenize_segment(self, segment: \n pass", "code_description": "This code segment functions to: Process multimodal inputs for audio generation by integrating text and audio tokenization, manage sequence generation parameters, and apply post-processing transformations including watermark embedding and sample rate adjustment."}, "inference_content": {"inference_model": "DeepSeek-R1", "inference_result": "```python\n[TASK_BEGIN]\nclass Generator:\n def __init__(self, model):\n self.model = model\n self.tokenizer = load_llama3_tokenizer()\n self.audio_target_length = CSM_1B_GH_WATERMARK[0]\n\n def _tokenize_text_segment(self, text: str) -> torch.Tensor:\n encoded = self.tokenizer(text, return_tensors=\"pt\")\n return encoded.input_ids[0] # Return 1D tensor of token IDs\n\n def _tokenize_audio(self, audio: torch.Tensor) -> torch.Tensor:\n # Trim or pad audio to target length\n if audio.shape[0] > self.audio_target_length:\n audio = audio[:self.audio_target_length]\n else:\n padding = self.audio_target_length - audio.shape[0]\n audio = torch.nn.functional.pad(audio, (0, padding))\n \n # Add batch dimension and convert to float32\n return audio.to(torch.float32).unsqueeze(0)\n\n def _tokenize_segment(self, segment: Segment) -> dict:\n # Add speaker information to text\n text_with_speaker = f\"<speaker_{segment.speaker}> {segment.text}\"\n \n return {\n \"input_ids\": self._tokenize_text_segment(text_with_speaker).unsqueeze(0).to(self.model.device),\n \"audio_values\": self._tokenize_audio(segment.audio).to(self.model.device)\n }\n[TASK_END]\n```", "inference_time": "2025-08-20 15-08-19"}, "editdistance_info": {"edit_distance": 11.2174, "calculate_time": "2025-08-20 15:08:19", "true_code_clean": "class Generator:\n def __init__(\n self,\n model: Model,\n ):\n self._model = model\n self._model.setup_caches(1)\n self._text_tokenizer = load_llama3_tokenizer()\n device = next(model.parameters()).device\n mimi_weight = hf_hub_download(loaders.DEFAULT_REPO, loaders.MIMI_NAME)\n mimi = loaders.get_mimi(mimi_weight, device=device)\n mimi.set_num_codebooks(32)\n self._audio_tokenizer = mimi\n self._watermarker = load_watermarker(device=device)\n self.sample_rate = mimi.sample_rate\n self.device = device\n def _tokenize_text_segment(self, text: str, speaker: int) -> Tuple[torch.Tensor, torch.Tensor]:\n frame_tokens = []\n frame_masks = []\n text_tokens = self._text_tokenizer.encode(f\"[{speaker}]{text}\")\n text_frame = torch.zeros(len(text_tokens), 33).long()\n text_frame_mask = torch.zeros(len(text_tokens), 33).bool()\n text_frame[:, -1] = torch.tensor(text_tokens)\n text_frame_mask[:, -1] = True\n frame_tokens.append(text_frame.to(self.device))\n frame_masks.append(text_frame_mask.to(self.device))\n return torch.cat(frame_tokens, dim=0), torch.cat(frame_masks, dim=0)\n def _tokenize_audio(self, audio: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:\n assert audio.ndim == 1, \"Audio must be single channel\"\n frame_tokens = []\n frame_masks = []\n audio = audio.to(self.device)\n audio_tokens = self._audio_tokenizer.encode(audio.unsqueeze(0).unsqueeze(0))[0]\n eos_frame = torch.zeros(audio_tokens.size(0), 1).to(self.device)\n audio_tokens = torch.cat([audio_tokens, eos_frame], dim=1)\n audio_frame = torch.zeros(audio_tokens.size(1), 33).long().to(self.device)\n audio_frame_mask = torch.zeros(audio_tokens.size(1), 33).bool().to(self.device)\n audio_frame[:, :-1] = audio_tokens.transpose(0, 1)\n audio_frame_mask[:, :-1] = True\n frame_tokens.append(audio_frame)\n frame_masks.append(audio_frame_mask)\n return torch.cat(frame_tokens, dim=0), torch.cat(frame_masks, dim=0)\n def _tokenize_segment(self, segment: Segment) -> Tuple[torch.Tensor, torch.Tensor]:\n text_tokens, text_masks = self._tokenize_text_segment(segment.text, segment.speaker)\n audio_tokens, audio_masks = self._tokenize_audio(segment.audio)\n return torch.cat([text_tokens, audio_tokens], dim=0), torch.cat([text_masks, audio_masks], dim=0)\n @torch.inference_mode()\n def generate(\n self,\n text: str,\n speaker: int,\n context: List[Segment],\n max_audio_length_ms: float = 90_000,\n temperature: float = 0.9,\n topk: int = 50,\n ) -> torch.Tensor:\n self._model.reset_caches()\n max_generation_len = int(max_audio_length_ms / 80)\n tokens, tokens_mask = [], []\n for segment in context:\n segment_tokens, segment_tokens_mask = self._tokenize_segment(segment)\n tokens.append(segment_tokens)\n tokens_mask.append(segment_tokens_mask)\n gen_segment_tokens, gen_segment_tokens_mask = self._tokenize_text_segment(text, speaker)\n tokens.append(gen_segment_tokens)\n tokens_mask.append(gen_segment_tokens_mask)\n prompt_tokens = torch.cat(tokens, dim=0).long().to(self.device)\n prompt_tokens_mask = torch.cat(tokens_mask, dim=0).bool().to(self.device)\n samples = []\n curr_tokens = prompt_tokens.unsqueeze(0)\n curr_tokens_mask = prompt_tokens_mask.unsqueeze(0)\n curr_pos = torch.arange(0, prompt_tokens.size(0)).unsqueeze(0).long().to(self.device)\n max_seq_len = 2048\n max_context_len = max_seq_len - max_generation_len\n if curr_tokens.size(1) >= max_context_len:\n raise ValueError(\n f\"Inputs too long, must be below max_seq_len - max_generation_len: {max_context_len}\"\n )\n for _ in range(max_generation_len):\n sample = self._model.generate_frame(curr_tokens, curr_tokens_mask, curr_pos, temperature, topk)\n if torch.all(sample == 0):\n break \n samples.append(sample)\n curr_tokens = torch.cat([sample, torch.zeros(1, 1).long().to(self.device)], dim=1).unsqueeze(1)\n curr_tokens_mask = torch.cat(\n [torch.ones_like(sample).bool(), torch.zeros(1, 1).bool().to(self.device)], dim=1\n ).unsqueeze(1)\n curr_pos = curr_pos[:, -1:] + 1\n audio = self._audio_tokenizer.decode(torch.stack(samples).permute(1, 2, 0)).squeeze(0).squeeze(0)\n audio, wm_sample_rate = watermark(self._watermarker, audio, self.sample_rate, CSM_1B_GH_WATERMARK)\n audio = torchaudio.functional.resample(audio, orig_freq=wm_sample_rate, new_freq=self.sample_rate)\n return audio", "predict_code_clean": "class Generator:\n def __init__(self, model):\n self.model = model\n self.tokenizer = load_llama3_tokenizer()\n self.audio_target_length = CSM_1B_GH_WATERMARK[0]\n def _tokenize_text_segment(self, text: str) -> torch.Tensor:\n encoded = self.tokenizer(text, return_tensors=\"pt\")\n return encoded.input_ids[0] \n def _tokenize_audio(self, audio: torch.Tensor) -> torch.Tensor:\n if audio.shape[0] > self.audio_target_length:\n audio = audio[:self.audio_target_length]\n else:\n padding = self.audio_target_length - audio.shape[0]\n audio = torch.nn.functional.pad(audio, (0, padding))\n return audio.to(torch.float32).unsqueeze(0)\n def _tokenize_segment(self, segment: Segment) -> dict:\n text_with_speaker = f\"<speaker_{segment.speaker}> {segment.text}\"\n return {\n \"input_ids\": self._tokenize_text_segment(text_with_speaker).unsqueeze(0).to(self.model.device),\n \"audio_values\": self._tokenize_audio(segment.audio).to(self.model.device)\n }"}}
{"repo_name": "csm", "file_name": "/csm/models.py", "inference_info": {"prefix_code": "from dataclasses import dataclass\n\nimport torch\nimport torch.nn as nn\nimport torchtune\nfrom huggingface_hub import PyTorchModelHubMixin\nfrom torchtune.models import llama3_2\n\n\ndef llama3_2_1B() -> torchtune.modules.transformer.TransformerDecoder:\n return llama3_2.llama3_2(\n vocab_size=128_256,\n num_layers=16,\n num_heads=32,\n num_kv_heads=8,\n embed_dim=2048,\n max_seq_len=2048,\n intermediate_dim=8192,\n attn_dropout=0.0,\n norm_eps=1e-5,\n rope_base=500_000,\n scale_factor=32,\n )\n\n\ndef llama3_2_100M() -> torchtune.modules.transformer.TransformerDecoder:\n return llama3_2.llama3_2(\n vocab_size=128_256,\n num_layers=4,\n num_heads=8,\n num_kv_heads=2,\n embed_dim=1024,\n max_seq_len=2048,\n intermediate_dim=8192,\n attn_dropout=0.0,\n norm_eps=1e-5,\n rope_base=500_000,\n scale_factor=32,\n )\n\n\nFLAVORS = {\n \"llama-1B\": llama3_2_1B,\n \"llama-100M\": llama3_2_100M,\n}\n\n\ndef _prepare_transformer(model):\n embed_dim = model.tok_embeddings.embedding_dim\n model.tok_embeddings = nn.Identity()\n model.output = nn.Identity()\n return model, embed_dim\n\n\ndef _create_causal_mask(seq_len: int, device: torch.device):\n return torch.tril(torch.ones(seq_len, seq_len, dtype=torch.bool, device=device))\n\n\ndef _index_causal_mask(mask: torch.Tensor, input_pos: torch.Tensor):\n \"\"\"\n Args:\n mask: (max_seq_len, max_seq_len)\n input_pos: (batch_size, seq_len)\n\n Returns:\n (batch_size, seq_len, max_seq_len)\n \"\"\"\n r = mask[input_pos, :]\n return r\n\n\ndef _multinomial_sample_one_no_sync(probs): # Does multinomial sampling without a cuda synchronization\n q = torch.empty_like(probs).exponential_(1)\n return torch.argmax(probs / q, dim=-1, keepdim=True).to(dtype=torch.int)\n\n\ndef sample_topk(logits: torch.Tensor, topk: int, temperature: float):\n logits = logits / temperature\n\n filter_value: float = -float(\"Inf\")\n indices_to_remove = logits < torch.topk(logits, topk)[0][..., -1, None]\n scores_processed = logits.masked_fill(indices_to_remove, filter_value)\n scores_processed = torch.nn.functional.log_softmax(scores_processed, dim=-1)\n probs = torch.nn.functional.softmax(scores_processed, dim=-1)\n\n sample_token = _multinomial_sample_one_no_sync(probs)\n return sample_token\n\n\n@dataclass\nclass ModelArgs:\n backbone_flavor: str\n decoder_flavor: str\n text_vocab_size: int\n audio_vocab_size: int\n audio_num_codebooks: int\n\n\n", "suffix_code": "\n", "middle_code": "class Model(\n nn.Module,\n PyTorchModelHubMixin,\n repo_url=\"https://github.com/SesameAILabs/csm\",\n pipeline_tag=\"text-to-speech\",\n license=\"apache-2.0\",\n):\n def __init__(self, config: ModelArgs):\n super().__init__()\n self.config = config\n self.backbone, backbone_dim = _prepare_transformer(FLAVORS[config.backbone_flavor]())\n self.decoder, decoder_dim = _prepare_transformer(FLAVORS[config.decoder_flavor]())\n self.text_embeddings = nn.Embedding(config.text_vocab_size, backbone_dim)\n self.audio_embeddings = nn.Embedding(config.audio_vocab_size * config.audio_num_codebooks, backbone_dim)\n self.projection = nn.Linear(backbone_dim, decoder_dim, bias=False)\n self.codebook0_head = nn.Linear(backbone_dim, config.audio_vocab_size, bias=False)\n self.audio_head = nn.Parameter(torch.empty(config.audio_num_codebooks - 1, decoder_dim, config.audio_vocab_size))\n def setup_caches(self, max_batch_size: int) -> torch.Tensor:\n dtype = next(self.parameters()).dtype\n device = next(self.parameters()).device\n with device:\n self.backbone.setup_caches(max_batch_size, dtype)\n self.decoder.setup_caches(max_batch_size, dtype, decoder_max_seq_len=self.config.audio_num_codebooks)\n self.register_buffer(\"backbone_causal_mask\", _create_causal_mask(self.backbone.max_seq_len, device))\n self.register_buffer(\"decoder_causal_mask\", _create_causal_mask(self.config.audio_num_codebooks, device))\n def generate_frame(\n self,\n tokens: torch.Tensor,\n tokens_mask: torch.Tensor,\n input_pos: torch.Tensor,\n temperature: float,\n topk: int,\n ) -> torch.Tensor:\n dtype = next(self.parameters()).dtype\n b, s, _ = tokens.size()\n assert self.backbone.caches_are_enabled(), \"backbone caches are not enabled\"\n curr_backbone_mask = _index_causal_mask(self.backbone_causal_mask, input_pos)\n embeds = self._embed_tokens(tokens)\n masked_embeds = embeds * tokens_mask.unsqueeze(-1)\n h = masked_embeds.sum(dim=2)\n h = self.backbone(h, input_pos=input_pos, mask=curr_backbone_mask).to(dtype=dtype)\n last_h = h[:, -1, :]\n c0_logits = self.codebook0_head(last_h)\n c0_sample = sample_topk(c0_logits, topk, temperature)\n c0_embed = self._embed_audio(0, c0_sample)\n curr_h = torch.cat([last_h.unsqueeze(1), c0_embed], dim=1)\n curr_sample = c0_sample.clone()\n curr_pos = torch.arange(0, curr_h.size(1), device=curr_h.device).unsqueeze(0).repeat(curr_h.size(0), 1)\n self.decoder.reset_caches()\n for i in range(1, self.config.audio_num_codebooks):\n curr_decoder_mask = _index_causal_mask(self.decoder_causal_mask, curr_pos)\n decoder_h = self.decoder(self.projection(curr_h), input_pos=curr_pos, mask=curr_decoder_mask).to(\n dtype=dtype\n )\n ci_logits = torch.mm(decoder_h[:, -1, :], self.audio_head[i - 1])\n ci_sample = sample_topk(ci_logits, topk, temperature)\n ci_embed = self._embed_audio(i, ci_sample)\n curr_h = ci_embed\n curr_sample = torch.cat([curr_sample, ci_sample], dim=1)\n curr_pos = curr_pos[:, -1:] + 1\n return curr_sample\n def reset_caches(self):\n self.backbone.reset_caches()\n self.decoder.reset_caches()\n def _embed_audio(self, codebook: int, tokens: torch.Tensor) -> torch.Tensor:\n return self.audio_embeddings(tokens + codebook * self.config.audio_vocab_size)\n def _embed_tokens(self, tokens: torch.Tensor) -> torch.Tensor:\n text_embeds = self.text_embeddings(tokens[:, :, -1]).unsqueeze(-2)\n audio_tokens = tokens[:, :, :-1] + (\n self.config.audio_vocab_size * torch.arange(self.config.audio_num_codebooks, device=tokens.device)\n )\n audio_embeds = self.audio_embeddings(audio_tokens.view(-1)).reshape(\n tokens.size(0), tokens.size(1), self.config.audio_num_codebooks, -1\n )\n return torch.cat([audio_embeds, text_embeds], dim=-2)", "code_description": null, "fill_type": "CLASS_TYPE", "language_type": "python", "sub_task_type": null}, "context_code": [["/csm/generator.py", "from dataclasses import dataclass\nfrom typing import List, Tuple\n\nimport torch\nimport torchaudio\nfrom huggingface_hub import hf_hub_download\nfrom models import Model\nfrom moshi.models import loaders\nfrom tokenizers.processors import TemplateProcessing\nfrom transformers import AutoTokenizer\nfrom watermarking import CSM_1B_GH_WATERMARK, load_watermarker, watermark\n\n\n@dataclass\nclass Segment:\n speaker: int\n text: str\n # (num_samples,), sample_rate = 24_000\n audio: torch.Tensor\n\n\ndef load_llama3_tokenizer():\n \"\"\"\n https://github.com/huggingface/transformers/issues/22794#issuecomment-2092623992\n \"\"\"\n tokenizer_name = \"meta-llama/Llama-3.2-1B\"\n tokenizer = AutoTokenizer.from_pretrained(tokenizer_name)\n bos = tokenizer.bos_token\n eos = tokenizer.eos_token\n tokenizer._tokenizer.post_processor = TemplateProcessing(\n single=f\"{bos}:0 $A:0 {eos}:0\",\n pair=f\"{bos}:0 $A:0 {eos}:0 {bos}:1 $B:1 {eos}:1\",\n special_tokens=[(f\"{bos}\", tokenizer.bos_token_id), (f\"{eos}\", tokenizer.eos_token_id)],\n )\n\n return tokenizer\n\n\nclass Generator:\n def __init__(\n self,\n model: Model,\n ):\n self._model = model\n self._model.setup_caches(1)\n\n self._text_tokenizer = load_llama3_tokenizer()\n\n device = next(model.parameters()).device\n mimi_weight = hf_hub_download(loaders.DEFAULT_REPO, loaders.MIMI_NAME)\n mimi = loaders.get_mimi(mimi_weight, device=device)\n mimi.set_num_codebooks(32)\n self._audio_tokenizer = mimi\n\n self._watermarker = load_watermarker(device=device)\n\n self.sample_rate = mimi.sample_rate\n self.device = device\n\n def _tokenize_text_segment(self, text: str, speaker: int) -> Tuple[torch.Tensor, torch.Tensor]:\n frame_tokens = []\n frame_masks = []\n\n text_tokens = self._text_tokenizer.encode(f\"[{speaker}]{text}\")\n text_frame = torch.zeros(len(text_tokens), 33).long()\n text_frame_mask = torch.zeros(len(text_tokens), 33).bool()\n text_frame[:, -1] = torch.tensor(text_tokens)\n text_frame_mask[:, -1] = True\n\n frame_tokens.append(text_frame.to(self.device))\n frame_masks.append(text_frame_mask.to(self.device))\n\n return torch.cat(frame_tokens, dim=0), torch.cat(frame_masks, dim=0)\n\n def _tokenize_audio(self, audio: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:\n assert audio.ndim == 1, \"Audio must be single channel\"\n\n frame_tokens = []\n frame_masks = []\n\n # (K, T)\n audio = audio.to(self.device)\n audio_tokens = self._audio_tokenizer.encode(audio.unsqueeze(0).unsqueeze(0))[0]\n # add EOS frame\n eos_frame = torch.zeros(audio_tokens.size(0), 1).to(self.device)\n audio_tokens = torch.cat([audio_tokens, eos_frame], dim=1)\n\n audio_frame = torch.zeros(audio_tokens.size(1), 33).long().to(self.device)\n audio_frame_mask = torch.zeros(audio_tokens.size(1), 33).bool().to(self.device)\n audio_frame[:, :-1] = audio_tokens.transpose(0, 1)\n audio_frame_mask[:, :-1] = True\n\n frame_tokens.append(audio_frame)\n frame_masks.append(audio_frame_mask)\n\n return torch.cat(frame_tokens, dim=0), torch.cat(frame_masks, dim=0)\n\n def _tokenize_segment(self, segment: Segment) -> Tuple[torch.Tensor, torch.Tensor]:\n \"\"\"\n Returns:\n (seq_len, 33), (seq_len, 33)\n \"\"\"\n text_tokens, text_masks = self._tokenize_text_segment(segment.text, segment.speaker)\n audio_tokens, audio_masks = self._tokenize_audio(segment.audio)\n\n return torch.cat([text_tokens, audio_tokens], dim=0), torch.cat([text_masks, audio_masks], dim=0)\n\n @torch.inference_mode()\n def generate(\n self,\n text: str,\n speaker: int,\n context: List[Segment],\n max_audio_length_ms: float = 90_000,\n temperature: float = 0.9,\n topk: int = 50,\n ) -> torch.Tensor:\n self._model.reset_caches()\n\n max_generation_len = int(max_audio_length_ms / 80)\n tokens, tokens_mask = [], []\n for segment in context:\n segment_tokens, segment_tokens_mask = self._tokenize_segment(segment)\n tokens.append(segment_tokens)\n tokens_mask.append(segment_tokens_mask)\n\n gen_segment_tokens, gen_segment_tokens_mask = self._tokenize_text_segment(text, speaker)\n tokens.append(gen_segment_tokens)\n tokens_mask.append(gen_segment_tokens_mask)\n\n prompt_tokens = torch.cat(tokens, dim=0).long().to(self.device)\n prompt_tokens_mask = torch.cat(tokens_mask, dim=0).bool().to(self.device)\n\n samples = []\n curr_tokens = prompt_tokens.unsqueeze(0)\n curr_tokens_mask = prompt_tokens_mask.unsqueeze(0)\n curr_pos = torch.arange(0, prompt_tokens.size(0)).unsqueeze(0).long().to(self.device)\n\n max_seq_len = 2048\n max_context_len = max_seq_len - max_generation_len\n if curr_tokens.size(1) >= max_context_len:\n raise ValueError(\n f\"Inputs too long, must be below max_seq_len - max_generation_len: {max_context_len}\"\n )\n\n for _ in range(max_generation_len):\n sample = self._model.generate_frame(curr_tokens, curr_tokens_mask, curr_pos, temperature, topk)\n if torch.all(sample == 0):\n break # eos\n\n samples.append(sample)\n\n curr_tokens = torch.cat([sample, torch.zeros(1, 1).long().to(self.device)], dim=1).unsqueeze(1)\n curr_tokens_mask = torch.cat(\n [torch.ones_like(sample).bool(), torch.zeros(1, 1).bool().to(self.device)], dim=1\n ).unsqueeze(1)\n curr_pos = curr_pos[:, -1:] + 1\n\n audio = self._audio_tokenizer.decode(torch.stack(samples).permute(1, 2, 0)).squeeze(0).squeeze(0)\n\n # This applies an imperceptible watermark to identify audio as AI-generated.\n # Watermarking ensures transparency, dissuades misuse, and enables traceability.\n # Please be a responsible AI citizen and keep the watermarking in place.\n # If using CSM 1B in another application, use your own private key and keep it secret.\n audio, wm_sample_rate = watermark(self._watermarker, audio, self.sample_rate, CSM_1B_GH_WATERMARK)\n audio = torchaudio.functional.resample(audio, orig_freq=wm_sample_rate, new_freq=self.sample_rate)\n\n return audio\n\n\ndef load_csm_1b(device: str = \"cuda\") -> Generator:\n model = Model.from_pretrained(\"sesame/csm-1b\")\n model.to(device=device, dtype=torch.bfloat16)\n\n generator = Generator(model)\n return generator"], ["/csm/watermarking.py", "import argparse\n\nimport silentcipher\nimport torch\nimport torchaudio\n\n# This watermark key is public, it is not secure.\n# If using CSM 1B in another application, use a new private key and keep it secret.\nCSM_1B_GH_WATERMARK = [212, 211, 146, 56, 201]\n\n\ndef cli_check_audio() -> None:\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--audio_path\", type=str, required=True)\n args = parser.parse_args()\n\n check_audio_from_file(args.audio_path)\n\n\ndef load_watermarker(device: str = \"cuda\") -> silentcipher.server.Model:\n model = silentcipher.get_model(\n model_type=\"44.1k\",\n device=device,\n )\n return model\n\n\n@torch.inference_mode()\ndef watermark(\n watermarker: silentcipher.server.Model,\n audio_array: torch.Tensor,\n sample_rate: int,\n watermark_key: list[int],\n) -> tuple[torch.Tensor, int]:\n audio_array_44khz = torchaudio.functional.resample(audio_array, orig_freq=sample_rate, new_freq=44100)\n encoded, _ = watermarker.encode_wav(audio_array_44khz, 44100, watermark_key, calc_sdr=False, message_sdr=36)\n\n output_sample_rate = min(44100, sample_rate)\n encoded = torchaudio.functional.resample(encoded, orig_freq=44100, new_freq=output_sample_rate)\n return encoded, output_sample_rate\n\n\n@torch.inference_mode()\ndef verify(\n watermarker: silentcipher.server.Model,\n watermarked_audio: torch.Tensor,\n sample_rate: int,\n watermark_key: list[int],\n) -> bool:\n watermarked_audio_44khz = torchaudio.functional.resample(watermarked_audio, orig_freq=sample_rate, new_freq=44100)\n result = watermarker.decode_wav(watermarked_audio_44khz, 44100, phase_shift_decoding=True)\n\n is_watermarked = result[\"status\"]\n if is_watermarked:\n is_csm_watermarked = result[\"messages\"][0] == watermark_key\n else:\n is_csm_watermarked = False\n\n return is_watermarked and is_csm_watermarked\n\n\ndef check_audio_from_file(audio_path: str) -> None:\n watermarker = load_watermarker(device=\"cuda\")\n\n audio_array, sample_rate = load_audio(audio_path)\n is_watermarked = verify(watermarker, audio_array, sample_rate, CSM_1B_GH_WATERMARK)\n\n outcome = \"Watermarked\" if is_watermarked else \"Not watermarked\"\n print(f\"{outcome}: {audio_path}\")\n\n\ndef load_audio(audio_path: str) -> tuple[torch.Tensor, int]:\n audio_array, sample_rate = torchaudio.load(audio_path)\n audio_array = audio_array.mean(dim=0)\n return audio_array, int(sample_rate)\n\n\nif __name__ == \"__main__\":\n cli_check_audio()\n"], ["/csm/run_csm.py", "import os\nimport torch\nimport torchaudio\nfrom huggingface_hub import hf_hub_download\nfrom generator import load_csm_1b, Segment\nfrom dataclasses import dataclass\n\n# Disable Triton compilation\nos.environ[\"NO_TORCH_COMPILE\"] = \"1\"\n\n# Default prompts are available at https://hf.co/sesame/csm-1b\nprompt_filepath_conversational_a = hf_hub_download(\n repo_id=\"sesame/csm-1b\",\n filename=\"prompts/conversational_a.wav\"\n)\nprompt_filepath_conversational_b = hf_hub_download(\n repo_id=\"sesame/csm-1b\",\n filename=\"prompts/conversational_b.wav\"\n)\n\nSPEAKER_PROMPTS = {\n \"conversational_a\": {\n \"text\": (\n \"like revising for an exam I'd have to try and like keep up the momentum because I'd \"\n \"start really early I'd be like okay I'm gonna start revising now and then like \"\n \"you're revising for ages and then I just like start losing steam I didn't do that \"\n \"for the exam we had recently to be fair that was a more of a last minute scenario \"\n \"but like yeah I'm trying to like yeah I noticed this yesterday that like Mondays I \"\n \"sort of start the day with this not like a panic but like a\"\n ),\n \"audio\": prompt_filepath_conversational_a\n },\n \"conversational_b\": {\n \"text\": (\n \"like a super Mario level. Like it's very like high detail. And like, once you get \"\n \"into the park, it just like, everything looks like a computer game and they have all \"\n \"these, like, you know, if, if there's like a, you know, like in a Mario game, they \"\n \"will have like a question block. And if you like, you know, punch it, a coin will \"\n \"come out. So like everyone, when they come into the park, they get like this little \"\n \"bracelet and then you can go punching question blocks around.\"\n ),\n \"audio\": prompt_filepath_conversational_b\n }\n}\n\ndef load_prompt_audio(audio_path: str, target_sample_rate: int) -> torch.Tensor:\n audio_tensor, sample_rate = torchaudio.load(audio_path)\n audio_tensor = audio_tensor.squeeze(0)\n # Resample is lazy so we can always call it\n audio_tensor = torchaudio.functional.resample(\n audio_tensor, orig_freq=sample_rate, new_freq=target_sample_rate\n )\n return audio_tensor\n\ndef prepare_prompt(text: str, speaker: int, audio_path: str, sample_rate: int) -> Segment:\n audio_tensor = load_prompt_audio(audio_path, sample_rate)\n return Segment(text=text, speaker=speaker, audio=audio_tensor)\n\ndef main():\n # Select the best available device, skipping MPS due to float64 limitations\n if torch.cuda.is_available():\n device = \"cuda\"\n else:\n device = \"cpu\"\n print(f\"Using device: {device}\")\n\n # Load model\n generator = load_csm_1b(device)\n\n # Prepare prompts\n prompt_a = prepare_prompt(\n SPEAKER_PROMPTS[\"conversational_a\"][\"text\"],\n 0,\n SPEAKER_PROMPTS[\"conversational_a\"][\"audio\"],\n generator.sample_rate\n )\n\n prompt_b = prepare_prompt(\n SPEAKER_PROMPTS[\"conversational_b\"][\"text\"],\n 1,\n SPEAKER_PROMPTS[\"conversational_b\"][\"audio\"],\n generator.sample_rate\n )\n\n # Generate conversation\n conversation = [\n {\"text\": \"Hey how are you doing?\", \"speaker_id\": 0},\n {\"text\": \"Pretty good, pretty good. How about you?\", \"speaker_id\": 1},\n {\"text\": \"I'm great! So happy to be speaking with you today.\", \"speaker_id\": 0},\n {\"text\": \"Me too! This is some cool stuff, isn't it?\", \"speaker_id\": 1}\n ]\n\n # Generate each utterance\n generated_segments = []\n prompt_segments = [prompt_a, prompt_b]\n\n for utterance in conversation:\n print(f\"Generating: {utterance['text']}\")\n audio_tensor = generator.generate(\n text=utterance['text'],\n speaker=utterance['speaker_id'],\n context=prompt_segments + generated_segments,\n max_audio_length_ms=10_000,\n )\n generated_segments.append(Segment(text=utterance['text'], speaker=utterance['speaker_id'], audio=audio_tensor))\n\n # Concatenate all generations\n all_audio = torch.cat([seg.audio for seg in generated_segments], dim=0)\n torchaudio.save(\n \"full_conversation.wav\",\n all_audio.unsqueeze(0).cpu(),\n generator.sample_rate\n )\n print(\"Successfully generated full_conversation.wav\")\n\nif __name__ == \"__main__\":\n main() "]], "task_instance_info": {"created_time": "2025-08-20 15:06:59", "created_task_model": "DeepSeek-R1", "class_skeleton": "class Model(\n def __init__(self, config: \n pass\n def setup_caches(self, max_batch_size: \n pass\n def reset_caches(self):\n pass\n def _embed_audio(self, codebook: \n pass\n def _embed_tokens(self, tokens: \n pass", "code_description": "This code segment functions to: Integrate text and audio processing components through transformer architectures, manage embedding layers for multimodal inputs, and implement a stepwise generation mechanism for producing sequential outputs using cached computations and temperature-controlled sampling."}, "inference_content": {"inference_model": "DeepSeek-R1", "inference_result": "```python\n[TASK_BEGIN]\nclass Model(nn.Module):\n def __init__(self, config: ModelArgs):\n super().__init__()\n self.config = config\n self.backbone = FLAVORS[config.backbone_flavor]()\n self.decoder = FLAVORS[config.decoder_flavor]()\n self.backbone, backbone_embed_dim = _prepare_transformer(self.backbone)\n self.decoder, decoder_embed_dim = _prepare_transformer(self.decoder)\n \n self.text_embed = nn.Embedding(config.text_vocab_size, backbone_embed_dim)\n self.audio_embed = nn.Embedding(\n config.audio_vocab_size * config.audio_num_codebooks, decoder_embed_dim\n )\n\n def setup_caches(self, max_batch_size: int):\n self.backbone.setup_caches(max_batch_size)\n self.decoder.setup_caches(max_batch_size)\n\n def reset_caches(self):\n self.backbone.reset_caches()\n self.decoder.reset_caches()\n\n def _embed_audio(self, codebook: torch.Tensor) -> torch.Tensor:\n embeddings = []\n for i in range(self.config.audio_num_codebooks):\n offset = i * self.config.audio_vocab_size\n indices = codebook[..., i] + offset\n embeddings.append(self.audio_embed(indices))\n return torch.sum(torch.stack(embeddings), dim=0)\n\n def _embed_tokens(self, tokens: torch.Tensor) -> torch.Tensor:\n return self.text_embed(tokens)\n[TASK_END]\n```", "inference_time": "2025-08-20 15-08-31"}, "editdistance_info": {"edit_distance": 17.2449, "calculate_time": "2025-08-20 15:08:31", "true_code_clean": "class Model(\n nn.Module,\n PyTorchModelHubMixin,\n repo_url=\"https://github.com/SesameAILabs/csm\",\n pipeline_tag=\"text-to-speech\",\n license=\"apache-2.0\",\n):\n def __init__(self, config: ModelArgs):\n super().__init__()\n self.config = config\n self.backbone, backbone_dim = _prepare_transformer(FLAVORS[config.backbone_flavor]())\n self.decoder, decoder_dim = _prepare_transformer(FLAVORS[config.decoder_flavor]())\n self.text_embeddings = nn.Embedding(config.text_vocab_size, backbone_dim)\n self.audio_embeddings = nn.Embedding(config.audio_vocab_size * config.audio_num_codebooks, backbone_dim)\n self.projection = nn.Linear(backbone_dim, decoder_dim, bias=False)\n self.codebook0_head = nn.Linear(backbone_dim, config.audio_vocab_size, bias=False)\n self.audio_head = nn.Parameter(torch.empty(config.audio_num_codebooks - 1, decoder_dim, config.audio_vocab_size))\n def setup_caches(self, max_batch_size: int) -> torch.Tensor:\n dtype = next(self.parameters()).dtype\n device = next(self.parameters()).device\n with device:\n self.backbone.setup_caches(max_batch_size, dtype)\n self.decoder.setup_caches(max_batch_size, dtype, decoder_max_seq_len=self.config.audio_num_codebooks)\n self.register_buffer(\"backbone_causal_mask\", _create_causal_mask(self.backbone.max_seq_len, device))\n self.register_buffer(\"decoder_causal_mask\", _create_causal_mask(self.config.audio_num_codebooks, device))\n def generate_frame(\n self,\n tokens: torch.Tensor,\n tokens_mask: torch.Tensor,\n input_pos: torch.Tensor,\n temperature: float,\n topk: int,\n ) -> torch.Tensor:\n dtype = next(self.parameters()).dtype\n b, s, _ = tokens.size()\n assert self.backbone.caches_are_enabled(), \"backbone caches are not enabled\"\n curr_backbone_mask = _index_causal_mask(self.backbone_causal_mask, input_pos)\n embeds = self._embed_tokens(tokens)\n masked_embeds = embeds * tokens_mask.unsqueeze(-1)\n h = masked_embeds.sum(dim=2)\n h = self.backbone(h, input_pos=input_pos, mask=curr_backbone_mask).to(dtype=dtype)\n last_h = h[:, -1, :]\n c0_logits = self.codebook0_head(last_h)\n c0_sample = sample_topk(c0_logits, topk, temperature)\n c0_embed = self._embed_audio(0, c0_sample)\n curr_h = torch.cat([last_h.unsqueeze(1), c0_embed], dim=1)\n curr_sample = c0_sample.clone()\n curr_pos = torch.arange(0, curr_h.size(1), device=curr_h.device).unsqueeze(0).repeat(curr_h.size(0), 1)\n self.decoder.reset_caches()\n for i in range(1, self.config.audio_num_codebooks):\n curr_decoder_mask = _index_causal_mask(self.decoder_causal_mask, curr_pos)\n decoder_h = self.decoder(self.projection(curr_h), input_pos=curr_pos, mask=curr_decoder_mask).to(\n dtype=dtype\n )\n ci_logits = torch.mm(decoder_h[:, -1, :], self.audio_head[i - 1])\n ci_sample = sample_topk(ci_logits, topk, temperature)\n ci_embed = self._embed_audio(i, ci_sample)\n curr_h = ci_embed\n curr_sample = torch.cat([curr_sample, ci_sample], dim=1)\n curr_pos = curr_pos[:, -1:] + 1\n return curr_sample\n def reset_caches(self):\n self.backbone.reset_caches()\n self.decoder.reset_caches()\n def _embed_audio(self, codebook: int, tokens: torch.Tensor) -> torch.Tensor:\n return self.audio_embeddings(tokens + codebook * self.config.audio_vocab_size)\n def _embed_tokens(self, tokens: torch.Tensor) -> torch.Tensor:\n text_embeds = self.text_embeddings(tokens[:, :, -1]).unsqueeze(-2)\n audio_tokens = tokens[:, :, :-1] + (\n self.config.audio_vocab_size * torch.arange(self.config.audio_num_codebooks, device=tokens.device)\n )\n audio_embeds = self.audio_embeddings(audio_tokens.view(-1)).reshape(\n tokens.size(0), tokens.size(1), self.config.audio_num_codebooks, -1\n )\n return torch.cat([audio_embeds, text_embeds], dim=-2)", "predict_code_clean": "class Model(nn.Module):\n def __init__(self, config: ModelArgs):\n super().__init__()\n self.config = config\n self.backbone = FLAVORS[config.backbone_flavor]()\n self.decoder = FLAVORS[config.decoder_flavor]()\n self.backbone, backbone_embed_dim = _prepare_transformer(self.backbone)\n self.decoder, decoder_embed_dim = _prepare_transformer(self.decoder)\n self.text_embed = nn.Embedding(config.text_vocab_size, backbone_embed_dim)\n self.audio_embed = nn.Embedding(\n config.audio_vocab_size * config.audio_num_codebooks, decoder_embed_dim\n )\n def setup_caches(self, max_batch_size: int):\n self.backbone.setup_caches(max_batch_size)\n self.decoder.setup_caches(max_batch_size)\n def reset_caches(self):\n self.backbone.reset_caches()\n self.decoder.reset_caches()\n def _embed_audio(self, codebook: torch.Tensor) -> torch.Tensor:\n embeddings = []\n for i in range(self.config.audio_num_codebooks):\n offset = i * self.config.audio_vocab_size\n indices = codebook[..., i] + offset\n embeddings.append(self.audio_embed(indices))\n return torch.sum(torch.stack(embeddings), dim=0)\n def _embed_tokens(self, tokens: torch.Tensor) -> torch.Tensor:\n return self.text_embed(tokens)"}}
{"repo_name": "csm", "file_name": "/csm/models.py", "inference_info": {"prefix_code": "from dataclasses import dataclass\n\nimport torch\nimport torch.nn as nn\nimport torchtune\nfrom huggingface_hub import PyTorchModelHubMixin\nfrom torchtune.models import llama3_2\n\n\ndef llama3_2_1B() -> torchtune.modules.transformer.TransformerDecoder:\n return llama3_2.llama3_2(\n vocab_size=128_256,\n num_layers=16,\n num_heads=32,\n num_kv_heads=8,\n embed_dim=2048,\n max_seq_len=2048,\n intermediate_dim=8192,\n attn_dropout=0.0,\n norm_eps=1e-5,\n rope_base=500_000,\n scale_factor=32,\n )\n\n\ndef llama3_2_100M() -> torchtune.modules.transformer.TransformerDecoder:\n return llama3_2.llama3_2(\n vocab_size=128_256,\n num_layers=4,\n num_heads=8,\n num_kv_heads=2,\n embed_dim=1024,\n max_seq_len=2048,\n intermediate_dim=8192,\n attn_dropout=0.0,\n norm_eps=1e-5,\n rope_base=500_000,\n scale_factor=32,\n )\n\n\nFLAVORS = {\n \"llama-1B\": llama3_2_1B,\n \"llama-100M\": llama3_2_100M,\n}\n\n\ndef _prepare_transformer(model):\n embed_dim = model.tok_embeddings.embedding_dim\n model.tok_embeddings = nn.Identity()\n model.output = nn.Identity()\n return model, embed_dim\n\n\ndef _create_causal_mask(seq_len: int, device: torch.device):\n return torch.tril(torch.ones(seq_len, seq_len, dtype=torch.bool, device=device))\n\n\ndef _index_causal_mask(mask: torch.Tensor, input_pos: torch.Tensor):\n \"\"\"\n Args:\n mask: (max_seq_len, max_seq_len)\n input_pos: (batch_size, seq_len)\n\n Returns:\n (batch_size, seq_len, max_seq_len)\n \"\"\"\n r = mask[input_pos, :]\n return r\n\n\ndef _multinomial_sample_one_no_sync(probs): # Does multinomial sampling without a cuda synchronization\n q = torch.empty_like(probs).exponential_(1)\n return torch.argmax(probs / q, dim=-1, keepdim=True).to(dtype=torch.int)\n\n\ndef sample_topk(logits: torch.Tensor, topk: int, temperature: float):\n logits = logits / temperature\n\n filter_value: float = -float(\"Inf\")\n indices_to_remove = logits < torch.topk(logits, topk)[0][..., -1, None]\n scores_processed = logits.masked_fill(indices_to_remove, filter_value)\n scores_processed = torch.nn.functional.log_softmax(scores_processed, dim=-1)\n probs = torch.nn.functional.softmax(scores_processed, dim=-1)\n\n sample_token = _multinomial_sample_one_no_sync(probs)\n return sample_token\n\n\n@dataclass\nclass ModelArgs:\n backbone_flavor: str\n decoder_flavor: str\n text_vocab_size: int\n audio_vocab_size: int\n audio_num_codebooks: int\n\n\n", "suffix_code": "\n", "middle_code": "class Model(\n nn.Module,\n PyTorchModelHubMixin,\n repo_url=\"https://github.com/SesameAILabs/csm\",\n pipeline_tag=\"text-to-speech\",\n license=\"apache-2.0\",\n):\n def __init__(self, config: ModelArgs):\n super().__init__()\n self.config = config\n self.backbone, backbone_dim = _prepare_transformer(FLAVORS[config.backbone_flavor]())\n self.decoder, decoder_dim = _prepare_transformer(FLAVORS[config.decoder_flavor]())\n self.text_embeddings = nn.Embedding(config.text_vocab_size, backbone_dim)\n self.audio_embeddings = nn.Embedding(config.audio_vocab_size * config.audio_num_codebooks, backbone_dim)\n self.projection = nn.Linear(backbone_dim, decoder_dim, bias=False)\n self.codebook0_head = nn.Linear(backbone_dim, config.audio_vocab_size, bias=False)\n self.audio_head = nn.Parameter(torch.empty(config.audio_num_codebooks - 1, decoder_dim, config.audio_vocab_size))\n def setup_caches(self, max_batch_size: int) -> torch.Tensor:\n dtype = next(self.parameters()).dtype\n device = next(self.parameters()).device\n with device:\n self.backbone.setup_caches(max_batch_size, dtype)\n self.decoder.setup_caches(max_batch_size, dtype, decoder_max_seq_len=self.config.audio_num_codebooks)\n self.register_buffer(\"backbone_causal_mask\", _create_causal_mask(self.backbone.max_seq_len, device))\n self.register_buffer(\"decoder_causal_mask\", _create_causal_mask(self.config.audio_num_codebooks, device))\n def generate_frame(\n self,\n tokens: torch.Tensor,\n tokens_mask: torch.Tensor,\n input_pos: torch.Tensor,\n temperature: float,\n topk: int,\n ) -> torch.Tensor:\n dtype = next(self.parameters()).dtype\n b, s, _ = tokens.size()\n assert self.backbone.caches_are_enabled(), \"backbone caches are not enabled\"\n curr_backbone_mask = _index_causal_mask(self.backbone_causal_mask, input_pos)\n embeds = self._embed_tokens(tokens)\n masked_embeds = embeds * tokens_mask.unsqueeze(-1)\n h = masked_embeds.sum(dim=2)\n h = self.backbone(h, input_pos=input_pos, mask=curr_backbone_mask).to(dtype=dtype)\n last_h = h[:, -1, :]\n c0_logits = self.codebook0_head(last_h)\n c0_sample = sample_topk(c0_logits, topk, temperature)\n c0_embed = self._embed_audio(0, c0_sample)\n curr_h = torch.cat([last_h.unsqueeze(1), c0_embed], dim=1)\n curr_sample = c0_sample.clone()\n curr_pos = torch.arange(0, curr_h.size(1), device=curr_h.device).unsqueeze(0).repeat(curr_h.size(0), 1)\n self.decoder.reset_caches()\n for i in range(1, self.config.audio_num_codebooks):\n curr_decoder_mask = _index_causal_mask(self.decoder_causal_mask, curr_pos)\n decoder_h = self.decoder(self.projection(curr_h), input_pos=curr_pos, mask=curr_decoder_mask).to(\n dtype=dtype\n )\n ci_logits = torch.mm(decoder_h[:, -1, :], self.audio_head[i - 1])\n ci_sample = sample_topk(ci_logits, topk, temperature)\n ci_embed = self._embed_audio(i, ci_sample)\n curr_h = ci_embed\n curr_sample = torch.cat([curr_sample, ci_sample], dim=1)\n curr_pos = curr_pos[:, -1:] + 1\n return curr_sample\n def reset_caches(self):\n self.backbone.reset_caches()\n self.decoder.reset_caches()\n def _embed_audio(self, codebook: int, tokens: torch.Tensor) -> torch.Tensor:\n return self.audio_embeddings(tokens + codebook * self.config.audio_vocab_size)\n def _embed_tokens(self, tokens: torch.Tensor) -> torch.Tensor:\n text_embeds = self.text_embeddings(tokens[:, :, -1]).unsqueeze(-2)\n audio_tokens = tokens[:, :, :-1] + (\n self.config.audio_vocab_size * torch.arange(self.config.audio_num_codebooks, device=tokens.device)\n )\n audio_embeds = self.audio_embeddings(audio_tokens.view(-1)).reshape(\n tokens.size(0), tokens.size(1), self.config.audio_num_codebooks, -1\n )\n return torch.cat([audio_embeds, text_embeds], dim=-2)", "code_description": null, "fill_type": "CLASS_TYPE", "language_type": "python", "sub_task_type": null}, "context_code": [["/csm/generator.py", "from dataclasses import dataclass\nfrom typing import List, Tuple\n\nimport torch\nimport torchaudio\nfrom huggingface_hub import hf_hub_download\nfrom models import Model\nfrom moshi.models import loaders\nfrom tokenizers.processors import TemplateProcessing\nfrom transformers import AutoTokenizer\nfrom watermarking import CSM_1B_GH_WATERMARK, load_watermarker, watermark\n\n\n@dataclass\nclass Segment:\n speaker: int\n text: str\n # (num_samples,), sample_rate = 24_000\n audio: torch.Tensor\n\n\ndef load_llama3_tokenizer():\n \"\"\"\n https://github.com/huggingface/transformers/issues/22794#issuecomment-2092623992\n \"\"\"\n tokenizer_name = \"meta-llama/Llama-3.2-1B\"\n tokenizer = AutoTokenizer.from_pretrained(tokenizer_name)\n bos = tokenizer.bos_token\n eos = tokenizer.eos_token\n tokenizer._tokenizer.post_processor = TemplateProcessing(\n single=f\"{bos}:0 $A:0 {eos}:0\",\n pair=f\"{bos}:0 $A:0 {eos}:0 {bos}:1 $B:1 {eos}:1\",\n special_tokens=[(f\"{bos}\", tokenizer.bos_token_id), (f\"{eos}\", tokenizer.eos_token_id)],\n )\n\n return tokenizer\n\n\nclass Generator:\n def __init__(\n self,\n model: Model,\n ):\n self._model = model\n self._model.setup_caches(1)\n\n self._text_tokenizer = load_llama3_tokenizer()\n\n device = next(model.parameters()).device\n mimi_weight = hf_hub_download(loaders.DEFAULT_REPO, loaders.MIMI_NAME)\n mimi = loaders.get_mimi(mimi_weight, device=device)\n mimi.set_num_codebooks(32)\n self._audio_tokenizer = mimi\n\n self._watermarker = load_watermarker(device=device)\n\n self.sample_rate = mimi.sample_rate\n self.device = device\n\n def _tokenize_text_segment(self, text: str, speaker: int) -> Tuple[torch.Tensor, torch.Tensor]:\n frame_tokens = []\n frame_masks = []\n\n text_tokens = self._text_tokenizer.encode(f\"[{speaker}]{text}\")\n text_frame = torch.zeros(len(text_tokens), 33).long()\n text_frame_mask = torch.zeros(len(text_tokens), 33).bool()\n text_frame[:, -1] = torch.tensor(text_tokens)\n text_frame_mask[:, -1] = True\n\n frame_tokens.append(text_frame.to(self.device))\n frame_masks.append(text_frame_mask.to(self.device))\n\n return torch.cat(frame_tokens, dim=0), torch.cat(frame_masks, dim=0)\n\n def _tokenize_audio(self, audio: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:\n assert audio.ndim == 1, \"Audio must be single channel\"\n\n frame_tokens = []\n frame_masks = []\n\n # (K, T)\n audio = audio.to(self.device)\n audio_tokens = self._audio_tokenizer.encode(audio.unsqueeze(0).unsqueeze(0))[0]\n # add EOS frame\n eos_frame = torch.zeros(audio_tokens.size(0), 1).to(self.device)\n audio_tokens = torch.cat([audio_tokens, eos_frame], dim=1)\n\n audio_frame = torch.zeros(audio_tokens.size(1), 33).long().to(self.device)\n audio_frame_mask = torch.zeros(audio_tokens.size(1), 33).bool().to(self.device)\n audio_frame[:, :-1] = audio_tokens.transpose(0, 1)\n audio_frame_mask[:, :-1] = True\n\n frame_tokens.append(audio_frame)\n frame_masks.append(audio_frame_mask)\n\n return torch.cat(frame_tokens, dim=0), torch.cat(frame_masks, dim=0)\n\n def _tokenize_segment(self, segment: Segment) -> Tuple[torch.Tensor, torch.Tensor]:\n \"\"\"\n Returns:\n (seq_len, 33), (seq_len, 33)\n \"\"\"\n text_tokens, text_masks = self._tokenize_text_segment(segment.text, segment.speaker)\n audio_tokens, audio_masks = self._tokenize_audio(segment.audio)\n\n return torch.cat([text_tokens, audio_tokens], dim=0), torch.cat([text_masks, audio_masks], dim=0)\n\n @torch.inference_mode()\n def generate(\n self,\n text: str,\n speaker: int,\n context: List[Segment],\n max_audio_length_ms: float = 90_000,\n temperature: float = 0.9,\n topk: int = 50,\n ) -> torch.Tensor:\n self._model.reset_caches()\n\n max_generation_len = int(max_audio_length_ms / 80)\n tokens, tokens_mask = [], []\n for segment in context:\n segment_tokens, segment_tokens_mask = self._tokenize_segment(segment)\n tokens.append(segment_tokens)\n tokens_mask.append(segment_tokens_mask)\n\n gen_segment_tokens, gen_segment_tokens_mask = self._tokenize_text_segment(text, speaker)\n tokens.append(gen_segment_tokens)\n tokens_mask.append(gen_segment_tokens_mask)\n\n prompt_tokens = torch.cat(tokens, dim=0).long().to(self.device)\n prompt_tokens_mask = torch.cat(tokens_mask, dim=0).bool().to(self.device)\n\n samples = []\n curr_tokens = prompt_tokens.unsqueeze(0)\n curr_tokens_mask = prompt_tokens_mask.unsqueeze(0)\n curr_pos = torch.arange(0, prompt_tokens.size(0)).unsqueeze(0).long().to(self.device)\n\n max_seq_len = 2048\n max_context_len = max_seq_len - max_generation_len\n if curr_tokens.size(1) >= max_context_len:\n raise ValueError(\n f\"Inputs too long, must be below max_seq_len - max_generation_len: {max_context_len}\"\n )\n\n for _ in range(max_generation_len):\n sample = self._model.generate_frame(curr_tokens, curr_tokens_mask, curr_pos, temperature, topk)\n if torch.all(sample == 0):\n break # eos\n\n samples.append(sample)\n\n curr_tokens = torch.cat([sample, torch.zeros(1, 1).long().to(self.device)], dim=1).unsqueeze(1)\n curr_tokens_mask = torch.cat(\n [torch.ones_like(sample).bool(), torch.zeros(1, 1).bool().to(self.device)], dim=1\n ).unsqueeze(1)\n curr_pos = curr_pos[:, -1:] + 1\n\n audio = self._audio_tokenizer.decode(torch.stack(samples).permute(1, 2, 0)).squeeze(0).squeeze(0)\n\n # This applies an imperceptible watermark to identify audio as AI-generated.\n # Watermarking ensures transparency, dissuades misuse, and enables traceability.\n # Please be a responsible AI citizen and keep the watermarking in place.\n # If using CSM 1B in another application, use your own private key and keep it secret.\n audio, wm_sample_rate = watermark(self._watermarker, audio, self.sample_rate, CSM_1B_GH_WATERMARK)\n audio = torchaudio.functional.resample(audio, orig_freq=wm_sample_rate, new_freq=self.sample_rate)\n\n return audio\n\n\ndef load_csm_1b(device: str = \"cuda\") -> Generator:\n model = Model.from_pretrained(\"sesame/csm-1b\")\n model.to(device=device, dtype=torch.bfloat16)\n\n generator = Generator(model)\n return generator"], ["/csm/watermarking.py", "import argparse\n\nimport silentcipher\nimport torch\nimport torchaudio\n\n# This watermark key is public, it is not secure.\n# If using CSM 1B in another application, use a new private key and keep it secret.\nCSM_1B_GH_WATERMARK = [212, 211, 146, 56, 201]\n\n\ndef cli_check_audio() -> None:\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--audio_path\", type=str, required=True)\n args = parser.parse_args()\n\n check_audio_from_file(args.audio_path)\n\n\ndef load_watermarker(device: str = \"cuda\") -> silentcipher.server.Model:\n model = silentcipher.get_model(\n model_type=\"44.1k\",\n device=device,\n )\n return model\n\n\n@torch.inference_mode()\ndef watermark(\n watermarker: silentcipher.server.Model,\n audio_array: torch.Tensor,\n sample_rate: int,\n watermark_key: list[int],\n) -> tuple[torch.Tensor, int]:\n audio_array_44khz = torchaudio.functional.resample(audio_array, orig_freq=sample_rate, new_freq=44100)\n encoded, _ = watermarker.encode_wav(audio_array_44khz, 44100, watermark_key, calc_sdr=False, message_sdr=36)\n\n output_sample_rate = min(44100, sample_rate)\n encoded = torchaudio.functional.resample(encoded, orig_freq=44100, new_freq=output_sample_rate)\n return encoded, output_sample_rate\n\n\n@torch.inference_mode()\ndef verify(\n watermarker: silentcipher.server.Model,\n watermarked_audio: torch.Tensor,\n sample_rate: int,\n watermark_key: list[int],\n) -> bool:\n watermarked_audio_44khz = torchaudio.functional.resample(watermarked_audio, orig_freq=sample_rate, new_freq=44100)\n result = watermarker.decode_wav(watermarked_audio_44khz, 44100, phase_shift_decoding=True)\n\n is_watermarked = result[\"status\"]\n if is_watermarked:\n is_csm_watermarked = result[\"messages\"][0] == watermark_key\n else:\n is_csm_watermarked = False\n\n return is_watermarked and is_csm_watermarked\n\n\ndef check_audio_from_file(audio_path: str) -> None:\n watermarker = load_watermarker(device=\"cuda\")\n\n audio_array, sample_rate = load_audio(audio_path)\n is_watermarked = verify(watermarker, audio_array, sample_rate, CSM_1B_GH_WATERMARK)\n\n outcome = \"Watermarked\" if is_watermarked else \"Not watermarked\"\n print(f\"{outcome}: {audio_path}\")\n\n\ndef load_audio(audio_path: str) -> tuple[torch.Tensor, int]:\n audio_array, sample_rate = torchaudio.load(audio_path)\n audio_array = audio_array.mean(dim=0)\n return audio_array, int(sample_rate)\n\n\nif __name__ == \"__main__\":\n cli_check_audio()\n"], ["/csm/run_csm.py", "import os\nimport torch\nimport torchaudio\nfrom huggingface_hub import hf_hub_download\nfrom generator import load_csm_1b, Segment\nfrom dataclasses import dataclass\n\n# Disable Triton compilation\nos.environ[\"NO_TORCH_COMPILE\"] = \"1\"\n\n# Default prompts are available at https://hf.co/sesame/csm-1b\nprompt_filepath_conversational_a = hf_hub_download(\n repo_id=\"sesame/csm-1b\",\n filename=\"prompts/conversational_a.wav\"\n)\nprompt_filepath_conversational_b = hf_hub_download(\n repo_id=\"sesame/csm-1b\",\n filename=\"prompts/conversational_b.wav\"\n)\n\nSPEAKER_PROMPTS = {\n \"conversational_a\": {\n \"text\": (\n \"like revising for an exam I'd have to try and like keep up the momentum because I'd \"\n \"start really early I'd be like okay I'm gonna start revising now and then like \"\n \"you're revising for ages and then I just like start losing steam I didn't do that \"\n \"for the exam we had recently to be fair that was a more of a last minute scenario \"\n \"but like yeah I'm trying to like yeah I noticed this yesterday that like Mondays I \"\n \"sort of start the day with this not like a panic but like a\"\n ),\n \"audio\": prompt_filepath_conversational_a\n },\n \"conversational_b\": {\n \"text\": (\n \"like a super Mario level. Like it's very like high detail. And like, once you get \"\n \"into the park, it just like, everything looks like a computer game and they have all \"\n \"these, like, you know, if, if there's like a, you know, like in a Mario game, they \"\n \"will have like a question block. And if you like, you know, punch it, a coin will \"\n \"come out. So like everyone, when they come into the park, they get like this little \"\n \"bracelet and then you can go punching question blocks around.\"\n ),\n \"audio\": prompt_filepath_conversational_b\n }\n}\n\ndef load_prompt_audio(audio_path: str, target_sample_rate: int) -> torch.Tensor:\n audio_tensor, sample_rate = torchaudio.load(audio_path)\n audio_tensor = audio_tensor.squeeze(0)\n # Resample is lazy so we can always call it\n audio_tensor = torchaudio.functional.resample(\n audio_tensor, orig_freq=sample_rate, new_freq=target_sample_rate\n )\n return audio_tensor\n\ndef prepare_prompt(text: str, speaker: int, audio_path: str, sample_rate: int) -> Segment:\n audio_tensor = load_prompt_audio(audio_path, sample_rate)\n return Segment(text=text, speaker=speaker, audio=audio_tensor)\n\ndef main():\n # Select the best available device, skipping MPS due to float64 limitations\n if torch.cuda.is_available():\n device = \"cuda\"\n else:\n device = \"cpu\"\n print(f\"Using device: {device}\")\n\n # Load model\n generator = load_csm_1b(device)\n\n # Prepare prompts\n prompt_a = prepare_prompt(\n SPEAKER_PROMPTS[\"conversational_a\"][\"text\"],\n 0,\n SPEAKER_PROMPTS[\"conversational_a\"][\"audio\"],\n generator.sample_rate\n )\n\n prompt_b = prepare_prompt(\n SPEAKER_PROMPTS[\"conversational_b\"][\"text\"],\n 1,\n SPEAKER_PROMPTS[\"conversational_b\"][\"audio\"],\n generator.sample_rate\n )\n\n # Generate conversation\n conversation = [\n {\"text\": \"Hey how are you doing?\", \"speaker_id\": 0},\n {\"text\": \"Pretty good, pretty good. How about you?\", \"speaker_id\": 1},\n {\"text\": \"I'm great! So happy to be speaking with you today.\", \"speaker_id\": 0},\n {\"text\": \"Me too! This is some cool stuff, isn't it?\", \"speaker_id\": 1}\n ]\n\n # Generate each utterance\n generated_segments = []\n prompt_segments = [prompt_a, prompt_b]\n\n for utterance in conversation:\n print(f\"Generating: {utterance['text']}\")\n audio_tensor = generator.generate(\n text=utterance['text'],\n speaker=utterance['speaker_id'],\n context=prompt_segments + generated_segments,\n max_audio_length_ms=10_000,\n )\n generated_segments.append(Segment(text=utterance['text'], speaker=utterance['speaker_id'], audio=audio_tensor))\n\n # Concatenate all generations\n all_audio = torch.cat([seg.audio for seg in generated_segments], dim=0)\n torchaudio.save(\n \"full_conversation.wav\",\n all_audio.unsqueeze(0).cpu(),\n generator.sample_rate\n )\n print(\"Successfully generated full_conversation.wav\")\n\nif __name__ == \"__main__\":\n main() "]], "task_instance_info": {"created_time": "2025-08-20 15:06:59", "created_task_model": "DeepSeek-R1", "class_skeleton": "class Model(\n def __init__(self, config: \n pass\n def setup_caches(self, max_batch_size: \n pass\n def reset_caches(self):\n pass\n def _embed_audio(self, codebook: \n pass\n def _embed_tokens(self, tokens: \n pass", "code_description": "This code segment functions to: Integrate transformer-based components for sequential data generation by combining text and audio embedding layers, implementing cached attention mechanisms, and performing iterative sampling across multiple codebook dimensions with temperature-controlled top-k selection."}, "inference_content": {"inference_model": "DeepSeek-R1", "inference_result": "```python\n[TASK_BEGIN]\nclass Model(nn.Module):\n def __init__(self, config: ModelArgs):\n super().__init__()\n self.config = config\n \n # Initialize backbone and decoder from flavor configurations\n self.backbone, self.backbone_embed_dim = _prepare_transformer(FLAVORS[config.backbone_flavor]())\n self.decoder, self.decoder_embed_dim = _prepare_transformer(FLAVORS[config.decoder_flavor]())\n \n # Initialize embeddings\n self.token_embedding = nn.Embedding(config.text_vocab_size, self.backbone_embed_dim)\n self.codebook_embeddings = nn.ModuleList([\n nn.Embedding(config.audio_vocab_size, self.backbone_embed_dim)\n for _ in range(config.audio_num_codebooks)\n ])\n \n # Initialize cache lists\n self.backbone_kv_cache = []\n self.decoder_kv_cache = []\n\n def setup_caches(self, max_batch_size: int, max_seq_len: int, device: torch.device):\n # Create caches for backbone layers\n for layer in self.backbone.layers:\n num_heads = layer.attn.num_heads\n num_kv_heads = layer.attn.num_kv_heads\n head_dim = self.backbone_embed_dim // num_heads\n key_cache = torch.zeros(\n (max_batch_size, max_seq_len, num_kv_heads, head_dim),\n device=device\n )\n value_cache = torch.zeros_like(key_cache)\n self.backbone_kv_cache.append((key_cache, value_cache))\n\n # Create caches for decoder layers\n for layer in self.decoder.layers:\n num_heads = layer.attn.num_heads\n num_kv_heads = layer.attn.num_kv_heads\n head_dim = self.decoder_embed_dim // num_heads\n key_cache = torch.zeros(\n (max_batch_size, max_seq_len, num_kv_heads, head_dim),\n device=device\n )\n value_cache = torch.zeros_like(key_cache)\n self.decoder_kv_cache.append((key_cache, value_cache))\n\n def reset_caches(self):\n self.backbone_kv_cache = []\n self.decoder_kv_cache = []\n\n def _embed_audio(self, codebook: torch.Tensor) -> torch.Tensor:\n # Split codebook into individual codebooks along last dimension\n codebooks = torch.unbind(codebook, dim=-1)\n embeddings = []\n for emb, codes in zip(self.codebook_embeddings, codebooks):\n embeddings.append(emb(codes))\n return sum(embeddings)\n\n def _embed_tokens(self, tokens: torch.Tensor) -> torch.Tensor:\n return self.token_embedding(tokens) * (self.backbone_embed_dim ** 0.5)\n[TASK_END]\n```", "inference_time": "2025-08-20 15-08-40"}, "editdistance_info": {"edit_distance": 19.7959, "calculate_time": "2025-08-20 15:08:40", "true_code_clean": "class Model(\n nn.Module,\n PyTorchModelHubMixin,\n repo_url=\"https://github.com/SesameAILabs/csm\",\n pipeline_tag=\"text-to-speech\",\n license=\"apache-2.0\",\n):\n def __init__(self, config: ModelArgs):\n super().__init__()\n self.config = config\n self.backbone, backbone_dim = _prepare_transformer(FLAVORS[config.backbone_flavor]())\n self.decoder, decoder_dim = _prepare_transformer(FLAVORS[config.decoder_flavor]())\n self.text_embeddings = nn.Embedding(config.text_vocab_size, backbone_dim)\n self.audio_embeddings = nn.Embedding(config.audio_vocab_size * config.audio_num_codebooks, backbone_dim)\n self.projection = nn.Linear(backbone_dim, decoder_dim, bias=False)\n self.codebook0_head = nn.Linear(backbone_dim, config.audio_vocab_size, bias=False)\n self.audio_head = nn.Parameter(torch.empty(config.audio_num_codebooks - 1, decoder_dim, config.audio_vocab_size))\n def setup_caches(self, max_batch_size: int) -> torch.Tensor:\n dtype = next(self.parameters()).dtype\n device = next(self.parameters()).device\n with device:\n self.backbone.setup_caches(max_batch_size, dtype)\n self.decoder.setup_caches(max_batch_size, dtype, decoder_max_seq_len=self.config.audio_num_codebooks)\n self.register_buffer(\"backbone_causal_mask\", _create_causal_mask(self.backbone.max_seq_len, device))\n self.register_buffer(\"decoder_causal_mask\", _create_causal_mask(self.config.audio_num_codebooks, device))\n def generate_frame(\n self,\n tokens: torch.Tensor,\n tokens_mask: torch.Tensor,\n input_pos: torch.Tensor,\n temperature: float,\n topk: int,\n ) -> torch.Tensor:\n dtype = next(self.parameters()).dtype\n b, s, _ = tokens.size()\n assert self.backbone.caches_are_enabled(), \"backbone caches are not enabled\"\n curr_backbone_mask = _index_causal_mask(self.backbone_causal_mask, input_pos)\n embeds = self._embed_tokens(tokens)\n masked_embeds = embeds * tokens_mask.unsqueeze(-1)\n h = masked_embeds.sum(dim=2)\n h = self.backbone(h, input_pos=input_pos, mask=curr_backbone_mask).to(dtype=dtype)\n last_h = h[:, -1, :]\n c0_logits = self.codebook0_head(last_h)\n c0_sample = sample_topk(c0_logits, topk, temperature)\n c0_embed = self._embed_audio(0, c0_sample)\n curr_h = torch.cat([last_h.unsqueeze(1), c0_embed], dim=1)\n curr_sample = c0_sample.clone()\n curr_pos = torch.arange(0, curr_h.size(1), device=curr_h.device).unsqueeze(0).repeat(curr_h.size(0), 1)\n self.decoder.reset_caches()\n for i in range(1, self.config.audio_num_codebooks):\n curr_decoder_mask = _index_causal_mask(self.decoder_causal_mask, curr_pos)\n decoder_h = self.decoder(self.projection(curr_h), input_pos=curr_pos, mask=curr_decoder_mask).to(\n dtype=dtype\n )\n ci_logits = torch.mm(decoder_h[:, -1, :], self.audio_head[i - 1])\n ci_sample = sample_topk(ci_logits, topk, temperature)\n ci_embed = self._embed_audio(i, ci_sample)\n curr_h = ci_embed\n curr_sample = torch.cat([curr_sample, ci_sample], dim=1)\n curr_pos = curr_pos[:, -1:] + 1\n return curr_sample\n def reset_caches(self):\n self.backbone.reset_caches()\n self.decoder.reset_caches()\n def _embed_audio(self, codebook: int, tokens: torch.Tensor) -> torch.Tensor:\n return self.audio_embeddings(tokens + codebook * self.config.audio_vocab_size)\n def _embed_tokens(self, tokens: torch.Tensor) -> torch.Tensor:\n text_embeds = self.text_embeddings(tokens[:, :, -1]).unsqueeze(-2)\n audio_tokens = tokens[:, :, :-1] + (\n self.config.audio_vocab_size * torch.arange(self.config.audio_num_codebooks, device=tokens.device)\n )\n audio_embeds = self.audio_embeddings(audio_tokens.view(-1)).reshape(\n tokens.size(0), tokens.size(1), self.config.audio_num_codebooks, -1\n )\n return torch.cat([audio_embeds, text_embeds], dim=-2)", "predict_code_clean": "class Model(nn.Module):\n def __init__(self, config: ModelArgs):\n super().__init__()\n self.config = config\n self.backbone, self.backbone_embed_dim = _prepare_transformer(FLAVORS[config.backbone_flavor]())\n self.decoder, self.decoder_embed_dim = _prepare_transformer(FLAVORS[config.decoder_flavor]())\n self.token_embedding = nn.Embedding(config.text_vocab_size, self.backbone_embed_dim)\n self.codebook_embeddings = nn.ModuleList([\n nn.Embedding(config.audio_vocab_size, self.backbone_embed_dim)\n for _ in range(config.audio_num_codebooks)\n ])\n self.backbone_kv_cache = []\n self.decoder_kv_cache = []\n def setup_caches(self, max_batch_size: int, max_seq_len: int, device: torch.device):\n for layer in self.backbone.layers:\n num_heads = layer.attn.num_heads\n num_kv_heads = layer.attn.num_kv_heads\n head_dim = self.backbone_embed_dim // num_heads\n key_cache = torch.zeros(\n (max_batch_size, max_seq_len, num_kv_heads, head_dim),\n device=device\n )\n value_cache = torch.zeros_like(key_cache)\n self.backbone_kv_cache.append((key_cache, value_cache))\n for layer in self.decoder.layers:\n num_heads = layer.attn.num_heads\n num_kv_heads = layer.attn.num_kv_heads\n head_dim = self.decoder_embed_dim // num_heads\n key_cache = torch.zeros(\n (max_batch_size, max_seq_len, num_kv_heads, head_dim),\n device=device\n )\n value_cache = torch.zeros_like(key_cache)\n self.decoder_kv_cache.append((key_cache, value_cache))\n def reset_caches(self):\n self.backbone_kv_cache = []\n self.decoder_kv_cache = []\n def _embed_audio(self, codebook: torch.Tensor) -> torch.Tensor:\n codebooks = torch.unbind(codebook, dim=-1)\n embeddings = []\n for emb, codes in zip(self.codebook_embeddings, codebooks):\n embeddings.append(emb(codes))\n return sum(embeddings)\n def _embed_tokens(self, tokens: torch.Tensor) -> torch.Tensor:\n return self.token_embedding(tokens) * (self.backbone_embed_dim ** 0.5)"}}