text
stringlengths
5
631k
id
stringlengths
14
178
metadata
dict
__index_level_0__
int64
0
647
# DPO pipeline for the creation of StackLlaMa 2: a Stack exchange llama-v2-7b model ## Prerequisites Install all the dependencies in the `requirements.txt`: ``` $ pip install -U -r requirements.txt ``` Since we will use `accelerate` for training, make sure to run: ``` $ accelerate config ``` ## Training There were two main steps to the DPO training process: 1. Supervised fine-tuning of the base llama-v2-7b model to create llama-v2-7b-se: ``` accelerate launch examples/research_projects/stack_llama_2/scripts/sft_llama2.py \ --output_dir="./sft" \ --max_steps=500 \ --save_steps=10 \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=1 \ --gradient_accumulation_steps=2 \ --gradient_checkpointing=False \ --group_by_length=False \ --learning_rate=1e-4 \ --lr_scheduler_type="cosine" \ --warmup_steps=100 \ --weight_decay=0.05 \ --optim="paged_adamw_32bit" \ --bf16=True \ --remove_unused_columns=False \ --run_name="sft_llama2" \ --report_to="wandb" ``` 1. Run the DPO trainer using the model saved by the previous step: ``` accelerate launch examples/research_projects/stack_llama_2/scripts/dpo_llama2.py \ --model_name_or_path="sft/final_checkpoint" \ --output_dir="dpo" ``` ## Merging the adaptors To merge the adaptors into the base model we can use the `merge_peft_adapter.py` helper script that comes with TRL: ``` python examples/research_projects/stack_llama/scripts/merge_peft_adapter.py --base_model_name="meta-llama/Llama-2-7b-hf" --adapter_model_name="dpo/final_checkpoint/" --output_name="stack-llama-2" ``` which will also push the model to your HuggingFace hub account. ## Running the model We can load the DPO-trained LoRA adaptors which were saved by the DPO training step and load them via: ```py from peft import AutoPeftModelForCausalLM model = AutoPeftModelForCausalLM.from_pretrained( "dpo/final_checkpoint", low_cpu_mem_usage=True, torch_dtype=torch.float16, load_in_4bit=True, ) model.generate(...) ```
trl/examples/research_projects/stack_llama_2/scripts/README.md/0
{ "file_path": "trl/examples/research_projects/stack_llama_2/scripts/README.md", "repo_id": "trl", "token_count": 880 }
551
# Copyright 2020-2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # /// script # dependencies = [ # "trl @ git+https://github.com/huggingface/trl.git", # "peft", # "wandb", # "qwen-vl-utils", # ] # /// """ Example usage: accelerate launch \ --config_file=deepspeed_zero2.yaml \ sft_video_llm.py \ --dataset_name=mfarre/simplevideoshorts \ --video_cache_dir="/optional/path/to/cache/" \ --model_name_or_path=Qwen/Qwen2-VL-7B-Instruct \ --per_device_train_batch_size=1 \ --output_dir=video-llm-output \ --tf32=True \ --gradient_accumulation_steps=4 \ --num_train_epochs=4 \ --optim="adamw_torch_fused" \ --log_level="debug" \ --log_level_replica="debug" \ --save_strategy="steps" \ --save_steps=300 \ --learning_rate=8e-5 \ --max_grad_norm=0.3 \ --warmup_ratio=0.1 \ --lr_scheduler_type="cosine" \ --report_to="wandb" \ --push_to_hub=False \ --torch_dtype=bfloat16 \ --gradient_checkpointing=True """ import json import os import random from dataclasses import dataclass, field from typing import Any import requests import torch import wandb from datasets import load_dataset from peft import LoraConfig from qwen_vl_utils import process_vision_info from transformers import AutoModelForImageTextToText, AutoProcessor, BitsAndBytesConfig, Qwen2VLProcessor from trl import ModelConfig, ScriptArguments, SFTConfig, SFTTrainer, TrlParser, get_kbit_device_map def download_video(url: str, cache_dir: str) -> str: """Download video if not already present locally.""" os.makedirs(cache_dir, exist_ok=True) # Create cache dir if it doesn't exist filename = url.split("/")[-1] local_path = os.path.join(cache_dir, filename) if os.path.exists(local_path): return local_path try: with requests.get(url, stream=True) as r: r.raise_for_status() with open(local_path, "wb") as f: for chunk in r.iter_content(chunk_size=8192): if chunk: f.write(chunk) return local_path except requests.RequestException as e: raise Exception(f"Failed to download video: {e}") from e def prepare_dataset(example: dict[str, Any], cache_dir: str) -> dict[str, list[dict[str, Any]]]: """Prepare dataset example for training.""" video_url = example["video_url"] timecoded_cc = example["timecoded_cc"] qa_pairs = json.loads(example["qa"]) system_message = "You are an expert in movie narrative analysis." base_prompt = f"""Analyze the video and consider the following timecoded subtitles: {timecoded_cc} Based on this information, please answer the following questions:""" selected_qa = random.sample(qa_pairs, 1)[0] messages = [ {"role": "system", "content": [{"type": "text", "text": system_message}]}, { "role": "user", "content": [ {"type": "video", "video": download_video(video_url, cache_dir), "max_pixels": 360 * 420, "fps": 1.0}, {"type": "text", "text": f"{base_prompt}\n\nQuestion: {selected_qa['question']}"}, ], }, {"role": "assistant", "content": [{"type": "text", "text": selected_qa["answer"]}]}, ] return {"messages": messages} def collate_fn(examples: list[dict[str, Any]]) -> dict[str, torch.Tensor]: """Collate batch of examples for training.""" texts = [] video_inputs = [] for i, example in enumerate(examples): try: video_path = next( content["video"] for message in example["messages"] for content in message["content"] if content.get("type") == "video" ) print(f"Processing video: {os.path.basename(video_path)}") texts.append(processor.apply_chat_template(example["messages"], tokenize=False)) video_input = process_vision_info(example["messages"])[1][0] video_inputs.append(video_input) except Exception as e: raise ValueError(f"Failed to process example {i}: {e}") from e inputs = processor(text=texts, videos=video_inputs, return_tensors="pt", padding=True) labels = inputs["input_ids"].clone() labels[labels == processor.tokenizer.pad_token_id] = -100 # Handle visual tokens based on processor type visual_tokens = ( [151652, 151653, 151656] if isinstance(processor, Qwen2VLProcessor) else [processor.tokenizer.convert_tokens_to_ids(processor.image_token)] ) for visual_token_id in visual_tokens: labels[labels == visual_token_id] = -100 inputs["labels"] = labels return inputs @dataclass class CustomScriptArguments(ScriptArguments): r""" Arguments for the script. Args: video_cache_dir (`str`, *optional*, defaults to `"/tmp/videos/"`): Video cache directory. """ video_cache_dir: str = field(default="/tmp/videos/", metadata={"help": "Video cache directory."}) if __name__ == "__main__": # Parse arguments parser = TrlParser((CustomScriptArguments, SFTConfig, ModelConfig)) script_args, training_args, model_args = parser.parse_args_and_config() # Configure training args training_args.gradient_checkpointing_kwargs = dict(use_reentrant=False) training_args.remove_unused_columns = False # Load dataset dataset = load_dataset(script_args.dataset_name, name=script_args.dataset_config, split="train") # Setup model torch_dtype = ( model_args.torch_dtype if model_args.torch_dtype in ["auto", None] else getattr(torch, model_args.torch_dtype) ) # Quantization configuration for 4-bit training bnb_config = BitsAndBytesConfig( load_in_4bit=True, bnb_4bit_use_double_quant=True, bnb_4bit_quant_type="nf4", bnb_4bit_compute_dtype=torch.bfloat16, ) # Model initialization model_kwargs = dict( revision=model_args.model_revision, trust_remote_code=model_args.trust_remote_code, torch_dtype=torch_dtype, device_map=get_kbit_device_map(), quantization_config=bnb_config, ) model = AutoModelForImageTextToText.from_pretrained(model_args.model_name_or_path, **model_kwargs) peft_config = LoraConfig( task_type="CAUSAL_LM", r=16, lora_alpha=16, lora_dropout=0.1, bias="none", target_modules=["q_proj", "k_proj", "v_proj", "o_proj"], ) # Configure model modules for gradients if training_args.gradient_checkpointing: model.gradient_checkpointing_enable() model.config.use_reentrant = False model.enable_input_require_grads() processor = AutoProcessor.from_pretrained( model_args.model_name_or_path, trust_remote_code=model_args.trust_remote_code ) # Prepare dataset prepared_dataset = [prepare_dataset(example, script_args.video_cache_dir) for example in dataset] # Initialize wandb if specified if training_args.report_to == "wandb": wandb.init(project="video-llm-training") # Initialize trainer trainer = SFTTrainer( model=model, args=training_args, train_dataset=prepared_dataset, data_collator=collate_fn, peft_config=peft_config, processing_class=processor, ) # Train model trainer.train() # Save final model trainer.save_model(training_args.output_dir) if training_args.push_to_hub: trainer.push_to_hub(dataset_name=script_args.dataset_name) if trainer.accelerator.is_main_process: processor.push_to_hub(training_args.hub_model_id) # Cleanup del model del trainer torch.cuda.empty_cache() wandb.finish()
trl/examples/scripts/sft_video_llm.py/0
{ "file_path": "trl/examples/scripts/sft_video_llm.py", "repo_id": "trl", "token_count": 3427 }
552
# Copyright 2020-2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from trl.core import masked_mean, masked_var, masked_whiten from .testing_utils import TrlTestCase class CoreTester(TrlTestCase): """ A wrapper class for testing core utils functions """ def setUp(self): super().setUp() self.test_input = torch.Tensor([1, 2, 3, 4]) self.test_mask = torch.Tensor([0, 1, 1, 0]) self.test_input_unmasked = self.test_input[1:3] def test_masked_mean(self): self.assertEqual(torch.mean(self.test_input_unmasked), masked_mean(self.test_input, self.test_mask)) def test_masked_var(self): self.assertEqual(torch.var(self.test_input_unmasked), masked_var(self.test_input, self.test_mask)) def test_masked_whiten(self): def whiten(values: torch.Tensor) -> torch.Tensor: mean, var = torch.mean(values), torch.var(values) return (values - mean) * torch.rsqrt(var + 1e-8) whiten_unmasked = whiten(self.test_input_unmasked) whiten_masked = masked_whiten(self.test_input, self.test_mask)[1:3] diffs = (whiten_unmasked - whiten_masked).sum() self.assertLess(abs(diffs.item()), 0.00001)
trl/tests/test_core.py/0
{ "file_path": "trl/tests/test_core.py", "repo_id": "trl", "token_count": 673 }
553
# Copyright 2020-2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import torch from transformers import AutoModelForCausalLM from transformers.testing_utils import ( require_peft, require_torch_gpu_if_bnb_not_multi_backend_enabled, ) from transformers.utils import is_peft_available from trl import AutoModelForCausalLMWithValueHead from .testing_utils import TrlTestCase if is_peft_available(): from peft import LoraConfig, get_peft_model @require_peft class PeftModelTester(TrlTestCase): def setUp(self): super().setUp() self.causal_lm_model_id = "trl-internal-testing/tiny-Qwen2ForCausalLM-2.5" self.lora_config = LoraConfig( r=16, lora_alpha=32, lora_dropout=0.05, bias="none", task_type="CAUSAL_LM", ) def test_create_peft_model(self): r""" Simply creates a peft model and checks that it can be loaded. """ causal_lm_model = AutoModelForCausalLM.from_pretrained(self.causal_lm_model_id) pretrained_model = get_peft_model(causal_lm_model, self.lora_config) _ = AutoModelForCausalLMWithValueHead.from_pretrained(pretrained_model) def test_peft_requires_grad(self): r""" Check that the value head of the returned model has requires_grad=True. """ causal_lm_model = AutoModelForCausalLM.from_pretrained(self.causal_lm_model_id) pretrained_model = get_peft_model(causal_lm_model, self.lora_config) model = AutoModelForCausalLMWithValueHead.from_pretrained(pretrained_model) # Check that the value head has requires_grad=True self.assertTrue(model.v_head.summary.weight.requires_grad) def test_check_peft_model_nb_trainable_params(self): r""" Check that the number of trainable parameters is correct. """ causal_lm_model = AutoModelForCausalLM.from_pretrained(self.causal_lm_model_id) pretrained_model = get_peft_model(causal_lm_model, self.lora_config) model = AutoModelForCausalLMWithValueHead.from_pretrained(pretrained_model) # Check that the number of trainable parameters is correct nb_trainable_params = sum(p.numel() for p in model.parameters() if p.requires_grad) self.assertEqual(nb_trainable_params, 905) # Check that the number of trainable param for the non-peft model is correct non_peft_model = AutoModelForCausalLMWithValueHead.from_pretrained(self.causal_lm_model_id) nb_trainable_params = sum(p.numel() for p in non_peft_model.parameters() if p.requires_grad) self.assertEqual(nb_trainable_params, 2428641) def test_create_peft_model_from_config(self): r""" Simply creates a peft model and checks that it can be loaded. """ trl_model = AutoModelForCausalLMWithValueHead.from_pretrained( self.causal_lm_model_id, peft_config=self.lora_config ) # Check that the number of trainable parameters is correct nb_trainable_params = sum(p.numel() for p in trl_model.parameters() if p.requires_grad) self.assertEqual(nb_trainable_params, 905) causal_lm_model = AutoModelForCausalLM.from_pretrained(self.causal_lm_model_id) trl_model = AutoModelForCausalLMWithValueHead.from_pretrained(causal_lm_model, peft_config=self.lora_config) # Check that the number of trainable parameters is correct nb_trainable_params = sum(p.numel() for p in trl_model.parameters() if p.requires_grad) self.assertEqual(nb_trainable_params, 905) @require_torch_gpu_if_bnb_not_multi_backend_enabled def test_create_bnb_peft_model_from_config(self): r""" Simply creates a peft model and checks that it can be loaded. """ from bitsandbytes.nn import Linear8bitLt trl_model = AutoModelForCausalLMWithValueHead.from_pretrained( self.causal_lm_model_id, peft_config=self.lora_config, load_in_8bit=True ) # Check that the number of trainable parameters is correct nb_trainable_params = sum(p.numel() for p in trl_model.parameters() if p.requires_grad) self.assertEqual(nb_trainable_params, 905) self.assertIsInstance(trl_model.pretrained_model.model.model.layers[0].mlp.gate_proj, Linear8bitLt) causal_lm_model = AutoModelForCausalLM.from_pretrained( self.causal_lm_model_id, load_in_8bit=True, device_map="auto" ) trl_model = AutoModelForCausalLMWithValueHead.from_pretrained(causal_lm_model, peft_config=self.lora_config) # Check that the number of trainable parameters is correct nb_trainable_params = sum(p.numel() for p in trl_model.parameters() if p.requires_grad) self.assertEqual(nb_trainable_params, 905) self.assertIsInstance(trl_model.pretrained_model.model.model.layers[0].mlp.gate_proj, Linear8bitLt) def test_save_pretrained_peft(self): r""" Check that the model can be saved and loaded properly. """ causal_lm_model = AutoModelForCausalLM.from_pretrained(self.causal_lm_model_id) pretrained_model = get_peft_model(causal_lm_model, self.lora_config) model = AutoModelForCausalLMWithValueHead.from_pretrained(pretrained_model) model.save_pretrained(self.tmp_dir) # check that the files `adapter_model.safetensors` and `adapter_config.json` are in the directory self.assertTrue( os.path.isfile(f"{self.tmp_dir}/adapter_model.safetensors"), f"{self.tmp_dir}/adapter_model.safetensors does not exist", ) self.assertTrue( os.path.exists(f"{self.tmp_dir}/adapter_config.json"), f"{self.tmp_dir}/adapter_config.json does not exist" ) # check also for `pytorch_model.bin` and make sure it only contains `v_head` weights self.assertTrue( os.path.exists(f"{self.tmp_dir}/pytorch_model.bin"), f"{self.tmp_dir}/pytorch_model.bin does not exist" ) # check that only keys that starts with `v_head` are in the dict maybe_v_head = torch.load(f"{self.tmp_dir}/pytorch_model.bin", weights_only=True) self.assertTrue( all(k.startswith("v_head") for k in maybe_v_head.keys()), f"keys in {self.tmp_dir}/pytorch_model.bin do not start with `v_head`", ) model_from_pretrained = AutoModelForCausalLMWithValueHead.from_pretrained(self.tmp_dir) # check all the weights are the same for p1, p2 in zip(model.named_parameters(), model_from_pretrained.named_parameters()): self.assertTrue(torch.allclose(p1[1], p2[1]), f"{p1[0]} != {p2[0]}") def test_load_pretrained_peft(self): r""" Check that the model saved with peft class interface can be loaded properly. """ causal_lm_model = AutoModelForCausalLM.from_pretrained(self.causal_lm_model_id) pretrained_model = get_peft_model(causal_lm_model, self.lora_config) model = AutoModelForCausalLMWithValueHead.from_pretrained(pretrained_model) pretrained_model.save_pretrained(self.tmp_dir) model_from_pretrained = AutoModelForCausalLMWithValueHead.from_pretrained(self.tmp_dir) # check that the files `adapter_model.safetensors` and `adapter_config.json` are in the directory self.assertTrue( os.path.isfile(f"{self.tmp_dir}/adapter_model.safetensors"), f"{self.tmp_dir}/adapter_model.safetensors does not exist", ) self.assertTrue( os.path.exists(f"{self.tmp_dir}/adapter_config.json"), f"{self.tmp_dir}/adapter_config.json does not exist" ) # check all the weights are the same for p1, p2 in zip(model.named_parameters(), model_from_pretrained.named_parameters()): if p1[0] not in ["v_head.summary.weight", "v_head.summary.bias"]: self.assertTrue(torch.allclose(p1[1], p2[1]), f"{p1[0]} != {p2[0]}") def test_continue_training_peft_model(self): r""" Load peft and checks that it can continue training. """ causal_lm_model = AutoModelForCausalLM.from_pretrained(self.causal_lm_model_id) pretrained_model = get_peft_model(causal_lm_model, self.lora_config) pretrained_model.save_pretrained(self.tmp_dir) # set is_trainable to True model = AutoModelForCausalLMWithValueHead.from_pretrained(self.tmp_dir, is_trainable=True) # Check that the number of trainable parameters is correct nb_trainable_params = sum(p.numel() for p in model.parameters() if p.requires_grad) self.assertEqual(nb_trainable_params, 905)
trl/tests/test_peft_models.py/0
{ "file_path": "trl/tests/test_peft_models.py", "repo_id": "trl", "token_count": 3884 }
554
# Copyright 2020-2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ..import_utils import OptionalDependencyNotAvailable, _LazyModule, is_diffusers_available _import_structure = { "activation_offloading": ["get_act_offloading_ctx_manager"], "modeling_base": ["GeometricMixtureWrapper", "PreTrainedModelWrapper", "create_reference_model"], "modeling_value_head": ["AutoModelForCausalLMWithValueHead", "AutoModelForSeq2SeqLMWithValueHead"], "utils": [ "SUPPORTED_ARCHITECTURES", "clone_chat_template", "prepare_deepspeed", "prepare_fsdp", "prepare_peft_model", "setup_chat_format", "unwrap_model_for_generation", ], } try: if not is_diffusers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["modeling_sd_base"] = [ "DDPOPipelineOutput", "DDPOSchedulerOutput", "DDPOStableDiffusionPipeline", "DefaultDDPOStableDiffusionPipeline", ] if TYPE_CHECKING: from .activation_offloading import get_act_offloading_ctx_manager from .modeling_base import GeometricMixtureWrapper, PreTrainedModelWrapper, create_reference_model from .modeling_value_head import AutoModelForCausalLMWithValueHead, AutoModelForSeq2SeqLMWithValueHead from .utils import ( SUPPORTED_ARCHITECTURES, clone_chat_template, prepare_deepspeed, prepare_fsdp, prepare_peft_model, setup_chat_format, unwrap_model_for_generation, ) try: if not is_diffusers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_sd_base import ( DDPOPipelineOutput, DDPOSchedulerOutput, DDPOStableDiffusionPipeline, DefaultDDPOStableDiffusionPipeline, ) else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
trl/trl/models/__init__.py/0
{ "file_path": "trl/trl/models/__init__.py", "repo_id": "trl", "token_count": 1015 }
555
# Copyright 2020-2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # /// script # dependencies = [ # "trl @ git+https://github.com/huggingface/trl.git", # "peft", # ] # /// """ Run the KTO training script with the commands below. In general, the optimal configuration for KTO will be similar to that of DPO. # Full training: ```bash python trl/scripts/kto.py \ --dataset_name trl-lib/kto-mix-14k \ --model_name_or_path=trl-lib/qwen1.5-1.8b-sft \ --per_device_train_batch_size 16 \ --num_train_epochs 1 \ --learning_rate 5e-7 \ --lr_scheduler_type=cosine \ --gradient_accumulation_steps 1 \ --eval_steps 500 \ --output_dir=kto-aligned-model \ --warmup_ratio 0.1 \ --report_to wandb \ --logging_first_step ``` # QLoRA: ```bash # QLoRA: python trl/scripts/kto.py \ --dataset_name trl-lib/kto-mix-14k \ --model_name_or_path=trl-lib/qwen1.5-1.8b-sft \ --per_device_train_batch_size 8 \ --num_train_epochs 1 \ --learning_rate 5e-7 \ --lr_scheduler_type=cosine \ --gradient_accumulation_steps 1 \ --eval_steps 500 \ --output_dir=kto-aligned-model-lora \ --warmup_ratio 0.1 \ --report_to wandb \ --logging_first_step \ --use_peft \ --load_in_4bit \ --lora_target_modules=all-linear \ --lora_r=16 \ --lora_alpha=16 ``` """ import argparse from accelerate import logging from datasets import load_dataset from transformers import AutoModelForCausalLM, AutoTokenizer from trl import ( DatasetMixtureConfig, KTOConfig, KTOTrainer, ModelConfig, ScriptArguments, TrlParser, get_dataset, get_peft_config, ) logger = logging.get_logger(__name__) def main(script_args, training_args, model_args, dataset_args): # Load a pretrained model model = AutoModelForCausalLM.from_pretrained( model_args.model_name_or_path, trust_remote_code=model_args.trust_remote_code ) ref_model = AutoModelForCausalLM.from_pretrained( model_args.model_name_or_path, trust_remote_code=model_args.trust_remote_code ) tokenizer = AutoTokenizer.from_pretrained( model_args.model_name_or_path, trust_remote_code=model_args.trust_remote_code ) if tokenizer.pad_token is None: tokenizer.pad_token = tokenizer.eos_token # Load the dataset if dataset_args.datasets and script_args.dataset_name: logger.warning( "Both `datasets` and `dataset_name` are provided. The `datasets` argument will be used to load the " "dataset and `dataset_name` will be ignored." ) elif dataset_args.datasets and not script_args.dataset_name: dataset = get_dataset(dataset_args) elif not dataset_args.datasets and script_args.dataset_name: dataset = load_dataset( script_args.dataset_name, name=script_args.dataset_config, streaming=script_args.dataset_streaming ) else: raise ValueError("Either `datasets` or `dataset_name` must be provided.") # Initialize the KTO trainer trainer = KTOTrainer( model, ref_model, args=training_args, train_dataset=dataset[script_args.dataset_train_split], eval_dataset=dataset[script_args.dataset_test_split] if training_args.eval_strategy != "no" else None, processing_class=tokenizer, peft_config=get_peft_config(model_args), ) # Train the model trainer.train() # Save and push to Hub trainer.save_model(training_args.output_dir) if training_args.push_to_hub: trainer.push_to_hub(dataset_name=script_args.dataset_name) def make_parser(subparsers: argparse._SubParsersAction = None): dataclass_types = (ScriptArguments, KTOConfig, ModelConfig, DatasetMixtureConfig) if subparsers is not None: parser = subparsers.add_parser("kto", help="Run the KTO training script", dataclass_types=dataclass_types) else: parser = TrlParser(dataclass_types) return parser if __name__ == "__main__": parser = make_parser() # When using the trl cli, this script may be run with additional arguments, corresponding accelerate arguments. # To ensure that their parsing does not interfere with the script arguments, parse the arguments with # `return_remaining_strings=True`, then ignore the remaining strings. script_args, training_args, model_args, dataset_args, _ = parser.parse_args_and_config( return_remaining_strings=True ) main(script_args, training_args, model_args, dataset_args)
trl/trl/scripts/kto.py/0
{ "file_path": "trl/trl/scripts/kto.py", "repo_id": "trl", "token_count": 1996 }
556
# Copyright 2020-2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect import os import random import textwrap from collections import defaultdict from contextlib import contextmanager, nullcontext from dataclasses import dataclass from pathlib import Path from typing import Any, Callable, Literal, Optional, Union import pandas as pd import torch import torch.nn as nn import torch.nn.functional as F from accelerate import PartialState, logging from accelerate.utils import tqdm from datasets import Dataset, IterableDataset from torch import autocast from torch.utils.data import DataLoader from transformers import ( AutoModelForCausalLM, AutoTokenizer, BaseImageProcessor, DataCollator, FeatureExtractionMixin, PreTrainedModel, PreTrainedTokenizerBase, ProcessorMixin, Trainer, ) from transformers.data.data_collator import DataCollatorMixin from transformers.integrations import ( is_comet_available, is_mlflow_available, is_wandb_available, ) from transformers.models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES from transformers.trainer_callback import TrainerCallback from transformers.trainer_utils import EvalLoopOutput from transformers.utils import is_liger_kernel_available, is_peft_available from ..data_utils import maybe_apply_chat_template, maybe_extract_prompt from ..models import create_reference_model, prepare_deepspeed from ..models.utils import prepare_fsdp from .callbacks import SyncRefModelCallback from .dpo_config import DPOConfig, FDivergenceConstants, FDivergenceType from .utils import ( RunningMoments, cap_exp, disable_dropout_in_model, empty_cache, flush_left, flush_right, generate_model_card, get_comet_experiment_url, log_table_to_comet_experiment, pad, pad_to_length, peft_module_casting_to_bf16, selective_log_softmax, ) if is_peft_available(): from peft import ( PeftConfig, PeftModel, get_peft_model, prepare_model_for_kbit_training, ) if is_liger_kernel_available(): from liger_kernel.chunked_loss import LigerFusedLinearDPOLoss if is_wandb_available(): import wandb if is_mlflow_available(): import mlflow logger = logging.get_logger(__name__) def shift_tokens_right(input_ids: torch.Tensor, decoder_start_token_id: int) -> torch.Tensor: """Shift input ids one token to the right, and pad with pad_token_id""" shifted_input_ids = input_ids.new_zeros(input_ids.shape) shifted_input_ids[:, 1:] = input_ids[:, :-1].clone() shifted_input_ids[:, 0] = decoder_start_token_id @dataclass class DataCollatorForPreference(DataCollatorMixin): """ Data collator used for preference data. Inputs are dynamically padded to the maximum length of a batch if they are not all of the same length. Args: pad_token_id (`int`): Token ID to use for padding. return_tensors (`str`, *optional*, defaults to `"pt"`): Type of Tensor to return. Only `"pt"` is currently supported. Examples: ```python >>> from trl import DataCollatorForPreference >>> collator = DataCollatorForPreference(pad_token_id=0) >>> examples = [ ... {"prompt_input_ids": [1, 2, 3], "chosen_input_ids": [4, 5], "rejected_input_ids": [6]}, ... {"prompt_input_ids": [7, 8], "chosen_input_ids": [9, 10], "rejected_input_ids": [11, 12, 13]}, ... ] >>> collator(examples) {'prompt_input_ids': tensor([[1, 2, 3], [0, 7, 8]]), 'prompt_attention_mask': tensor([[1, 1, 1], [0, 1, 1]]), 'chosen_input_ids': tensor([[ 4, 5], [ 9, 10]]), 'chosen_attention_mask': tensor([[1, 1], [1, 1]]), 'rejected_input_ids': tensor([[ 6, 0, 0], [11, 12, 13]]), 'rejected_attention_mask': tensor([[1, 0, 0], [1, 1, 1]]) } ``` """ pad_token_id: int return_tensors: str = "pt" def torch_call(self, examples: list[Union[list[int], Any, dict[str, Any]]]) -> dict[str, Any]: # Convert to tensor prompt_input_ids = [torch.tensor(example["prompt_input_ids"]) for example in examples] prompt_attention_mask = [torch.ones_like(input_ids) for input_ids in prompt_input_ids] chosen_input_ids = [torch.tensor(example["chosen_input_ids"]) for example in examples] chosen_attention_mask = [torch.ones_like(input_ids) for input_ids in chosen_input_ids] rejected_input_ids = [torch.tensor(example["rejected_input_ids"]) for example in examples] rejected_attention_mask = [torch.ones_like(input_ids) for input_ids in rejected_input_ids] if "pixel_values" in examples[0]: pixel_values = [torch.tensor(example["pixel_values"]) for example in examples] if "pixel_attention_mask" in examples[0]: pixel_attention_mask = [torch.tensor(example["pixel_attention_mask"]) for example in examples] if "ref_chosen_logps" in examples[0] and "ref_rejected_logps" in examples[0]: ref_chosen_logps = torch.tensor([example["ref_chosen_logps"] for example in examples]) ref_rejected_logps = torch.tensor([example["ref_rejected_logps"] for example in examples]) # Pad output = {} output["prompt_input_ids"] = pad(prompt_input_ids, padding_value=self.pad_token_id, padding_side="left") output["prompt_attention_mask"] = pad(prompt_attention_mask, padding_value=0, padding_side="left") output["chosen_input_ids"] = pad(chosen_input_ids, padding_value=self.pad_token_id) output["chosen_attention_mask"] = pad(chosen_attention_mask, padding_value=0) output["rejected_input_ids"] = pad(rejected_input_ids, padding_value=self.pad_token_id) output["rejected_attention_mask"] = pad(rejected_attention_mask, padding_value=0) if "pixel_values" in examples[0]: output["pixel_values"] = pad(pixel_values, padding_value=0.0) if "pixel_attention_mask" in examples[0]: output["pixel_attention_mask"] = pad(pixel_attention_mask, padding_value=0) if "image_sizes" in examples[0]: output["image_sizes"] = torch.tensor([example["image_sizes"] for example in examples]) if "ref_chosen_logps" in examples[0] and "ref_rejected_logps" in examples[0]: output["ref_chosen_logps"] = ref_chosen_logps output["ref_rejected_logps"] = ref_rejected_logps return output class DPOTrainer(Trainer): """ Trainer for Direct Preference Optimization (DPO) method. This class is a wrapper around the [`transformers.Trainer`] class and inherits all of its attributes and methods. Args: model (`Union[str, PreTrainedModel]`): Model to be trained. Can be either: - A string, being the *model id* of a pretrained model hosted inside a model repo on huggingface.co, or a path to a *directory* containing model weights saved using [`~transformers.PreTrainedModel.save_pretrained`], e.g., `'./my_model_directory/'`. The model is loaded using [`~transformers.AutoModelForCausalLM.from_pretrained`] with the keyword arguments in `args.model_init_kwargs`. - A [`~transformers.PreTrainedModel`] object. Only causal language models are supported. ref_model (`PreTrainedModelWrapper`): Hugging Face transformer model with a casual language modelling head. Used for implicit reward computation and loss. If no reference model is provided, the trainer will create a reference model with the same architecture as the model to be optimized. args ([`DPOConfig`], *optional*, defaults to `None`): Configuration for this trainer. If `None`, a default configuration is used. data_collator (`DataCollator`, *optional*): Function to use to form a batch from a list of elements of the processed `train_dataset` or `eval_dataset`. Will default to [`DataCollatorForPreference`]. train_dataset ([`~datasets.Dataset`] or [`~datasets.IterableDataset`]): Dataset to use for training. DPO supports [preference](#preference) type and. The format of the samples can be either: - [Standard](dataset_formats#standard): Each sample contains plain text. - [Conversational](dataset_formats#conversational): Each sample contains structured messages (e.g., role and content). eval_dataset ([`~datasets.Dataset`], [`~datasets.IterableDataset`] or `dict[str, Union[Dataset, IterableDataset]]`): Dataset to use for evaluation. It must meet the same requirements as `train_dataset`. processing_class ([`~transformers.PreTrainedTokenizerBase`], [`~transformers.BaseImageProcessor`], [`~transformers.FeatureExtractionMixin`] or [`~transformers.ProcessorMixin`], *optional*, defaults to `None`): Processing class used to process the data. If `None`, the processing class is loaded from the model's name with [`~transformers.AutoTokenizer.from_pretrained`]. compute_metrics (`Callable[[EvalPrediction], dict]`, *optional*): The function that will be used to compute metrics at evaluation. Must take a [`EvalPrediction`] and return a dictionary string to metric values. *Note* When passing TrainingArgs with `batch_eval_metrics` set to `True`, your compute_metrics function must take a boolean `compute_result` argument. This will be triggered after the last eval batch to signal that the function needs to calculate and return the global summary statistics rather than accumulating the batch-level statistics. callbacks (list of [`~transformers.TrainerCallback`], *optional*, defaults to `None`): List of callbacks to customize the training loop. Will add those to the list of default callbacks detailed in [here](https://huggingface.co/docs/transformers/main_classes/callback). If you want to remove one of the default callbacks used, use the [`~transformers.Trainer.remove_callback`] method. optimizers (`tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR]`, *optional*, defaults to `(None, None)`): A tuple containing the optimizer and the scheduler to use. Will default to an instance of [`AdamW`] on your model and a scheduler given by [`get_linear_schedule_with_warmup`] controlled by `args`. optimizer_cls_and_kwargs (`Tuple[Type[torch.optim.Optimizer], Dict[str, Any]]`, *optional*, defaults to `None`): A tuple containing the optimizer class and keyword arguments to use. Overrides `optim` and `optim_args` in `args`. Incompatible with the `optimizers` argument. preprocess_logits_for_metrics (`Callable[[torch.Tensor, torch.Tensor], torch.Tensor]`, *optional*, defaults to `None`): A function that preprocess the logits right before caching them at each evaluation step. Must take two tensors, the logits and the labels, and return the logits once processed as desired. The modifications made by this function will be reflected in the predictions received by `compute_metrics`. Note that the labels (second parameter) will be `None` if the dataset does not have them. peft_config ([`~peft.PeftConfig`], *optional*, defaults to `None`): PEFT configuration used to wrap the model. If `None`, the model is not wrapped. """ _tag_names = ["trl", "dpo"] def __init__( self, model: Union[str, nn.Module, PreTrainedModel], ref_model: Optional[Union[PreTrainedModel, nn.Module, str]] = None, args: Optional[DPOConfig] = None, data_collator: Optional[DataCollator] = None, # type: ignore train_dataset: Optional[Union[Dataset, IterableDataset]] = None, eval_dataset: Optional[Union[Dataset, IterableDataset, dict[str, Union[Dataset, IterableDataset]]]] = None, processing_class: Optional[ Union[PreTrainedTokenizerBase, BaseImageProcessor, FeatureExtractionMixin, ProcessorMixin] ] = None, compute_metrics: Optional[Callable[[EvalLoopOutput], dict]] = None, callbacks: Optional[list[TrainerCallback]] = None, optimizers: tuple[Optional[torch.optim.Optimizer], Optional[torch.optim.lr_scheduler.LambdaLR]] = (None, None), optimizer_cls_and_kwargs: Optional[tuple[type[torch.optim.Optimizer], dict[str, Any]]] = None, preprocess_logits_for_metrics: Optional[Callable[[torch.Tensor, torch.Tensor], torch.Tensor]] = None, peft_config: Optional["PeftConfig"] = None, ): # Args model_id = model if isinstance(model, str) else model.config._name_or_path if args is None: model_name = model_id.split("/")[-1] args = DPOConfig(f"{model_name}-DPO") # Handle the tokenizer if processing_class is None: processing_class = AutoTokenizer.from_pretrained(model_id) if args.padding_value is not None: self.padding_value = args.padding_value else: if hasattr(processing_class, "pad_token_id") and processing_class.pad_token_id is not None: self.padding_value = processing_class.pad_token_id elif hasattr(processing_class, "tokenizer") and processing_class.tokenizer.pad_token_id is not None: self.padding_value = processing_class.tokenizer.pad_token_id else: raise ValueError( "`padding_value` is not specified in `DPOConfig`, and `pad_token_id` is missing in the " "`processing_class`. Please either set the `padding_value` argument in `DPOConfig`, or set " "`tokenizer.pad_token` (e.g., `tokenizer.pad_token = tokenizer.eos_token`) before instantiating " "the trainer." ) # Model if not isinstance(model, str) and ref_model is model: raise ValueError( "`model` and `ref_model` cannot be the same object. If you want `ref_model` to be the " "same as `model`, you must mass a copy of it, or `None` if you use peft." ) if args.model_init_kwargs is not None and not isinstance(model, str): logger.warning( "You passed model_init_kwargs to the `DPOConfig`, but your model is already instantiated. " "The `model_init_kwargs` will be ignored." ) if isinstance(model, str): model = self._create_model_from_path(model, args) if args.ref_model_init_kwargs is not None and not isinstance(ref_model, str): logger.warning( "You passed ref_model_init_kwargs to the `DPOConfig`, but your ref_model is already instantiated. " "The `ref_model_init_kwargs` will be ignored." ) if isinstance(ref_model, str): ref_model = self._create_model_from_path(ref_model, args, is_ref=True) # PEFT configuration and model wrapping model = self._prepare_peft_model(model, ref_model, peft_config, args) if args.generate_during_eval and not (is_wandb_available() or is_comet_available() or is_mlflow_available()): raise ValueError( "`generate_during_eval=True` requires Weights and Biases, MLFlow or Comet to be installed." " Please install `wandb`, `mlflow` or `comet-ml` to resolve." ) self.is_encoder_decoder = model.config.is_encoder_decoder self.is_vision_model = model.config.model_type in MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES.keys() self.is_peft_model = is_peft_available() and isinstance(model, PeftModel) self.model_adapter_name = args.model_adapter_name self.ref_adapter_name = args.ref_adapter_name self.reference_free = args.reference_free if ref_model: self.ref_model = ref_model elif self.is_peft_model or args.precompute_ref_log_probs: # The `model` with adapters turned off will be used as the reference model self.ref_model = None else: self.ref_model = create_reference_model(model) # Disable dropout in the model and reference model if args.disable_dropout: disable_dropout_in_model(model) if self.ref_model is not None: disable_dropout_in_model(self.ref_model) # Liger kernel if args.use_liger_loss: if not is_liger_kernel_available(): raise ImportError( "You set `use_liger_loss=True` but the liger kernel is not available. " "Please install liger-kernel first: `pip install liger-kernel`" ) if args.loss_type != "sigmoid": raise ValueError( "You set `use_liger_loss=True` but the loss type is not `sigmoid`. " "Please set `loss_type='sigmoid'` to use the liger kernel." ) self.dpo_loss_fn = LigerFusedLinearDPOLoss( ignore_index=args.label_pad_token_id, beta=args.beta, use_ref_model=not args.reference_free, average_log_prob=False, ) # The trainer estimates the number of FLOPs (floating-point operations) using the number of elements in the # input tensor associated with the key "input_ids". However, in DPO, the sampled data does not include the # "input_ids" key. Instead, the available keys are "prompt_input_ids", "chosen_input_ids", and # "rejected_input_ids". As a result, the trainer issues the warning: "Could not estimate the number of tokens # of the input, floating-point operations will not be computed." To suppress this warning, we set the # "estimate_tokens" key in the model's "warnings_issued" dictionary to True. This acts as a flag to indicate # that the warning has already been issued. model.warnings_issued["estimate_tokens"] = True # Data collator if data_collator is None: data_collator = DataCollatorForPreference(pad_token_id=self.padding_value) self.generate_during_eval = args.generate_during_eval self.label_pad_token_id = args.label_pad_token_id self.max_prompt_length = args.max_prompt_length self.max_completion_length = args.max_completion_length self.max_length = args.max_length self.truncation_mode = args.truncation_mode self.precompute_ref_log_probs = args.precompute_ref_log_probs self.use_logits_to_keep = args.use_logits_to_keep if args.padding_free: if model.config._attn_implementation != "flash_attention_2": logger.warning( "Padding-free training is enabled, but the attention implementation is not set to " "'flash_attention_2'. Padding-free training flattens batches into a single sequence, and " "'flash_attention_2' is the only known attention mechanism that reliably supports this. Using " "other implementations may lead to unexpected behavior. To ensure compatibility, set " "`attn_implementation='flash_attention_2'` in the model configuration, or verify that your " "attention mechanism can handle flattened sequences." ) if args.per_device_train_batch_size == 1: logger.warning( "You are using a per_device_train_batch_size of 1 with padding-free training. Using a batch size " "of 1 anihilate the benefits of padding-free training. Please consider increasing the batch size " "to at least 2." ) self.padding_free = args.padding_free # Since ref_logs are precomputed on the first call to get_train/eval_dataloader # keep track of first called to avoid computation of future calls self._precomputed_train_ref_log_probs = False self._precomputed_eval_ref_log_probs = False self.beta = args.beta self.label_smoothing = args.label_smoothing self.loss_type = args.loss_type if isinstance(args.loss_type, list) else [args.loss_type] self.loss_weights = args.loss_weights self.aux_loss_enabled = getattr(model.config, "output_router_logits", False) self.use_weighting = args.use_weighting self.aux_loss_coef = getattr(model.config, "router_aux_loss_coef", 0.0) if self.aux_loss_enabled and self.aux_loss_coef == 0.0: logger.warning( "You set `output_router_logits` to `True` in the model config, but `router_aux_loss_coef` is set to " "`0.0`, meaning the auxiliary loss will not be used. Either set `router_aux_loss_coef` to a value " "greater than `0.0`, or set `output_router_logits` to `False` if you don't want to use the auxiliary " "loss.", ) for loss_type in self.loss_type: if ( loss_type in ["hinge", "ipo", "bco_pair", "sppo_hard", "nca_pair", "apo_zero", "apo_down"] and args.label_smoothing > 0 ): logger.warning( f"You are using the {loss_type} loss type that does not support label smoothing. The " "`label_smoothing` parameter will be ignored. Set `label_smoothing` to `0.0` to remove this " "warning.", ) if loss_type == "kto_pair": raise ValueError("Support for kto_pair has been removed in DPOTrainer. Please use KTOTrainer.") self._stored_metrics = defaultdict(lambda: defaultdict(list)) self.f_divergence_type = args.f_divergence_type self.f_divergence_params = {FDivergenceConstants.ALPHA_DIVERGENCE_COEF_KEY: args.f_alpha_divergence_coef} self.dataset_num_proc = args.dataset_num_proc # Dataset preparation train_dataset = self._prepare_dataset(train_dataset, processing_class, args, "train") if eval_dataset is not None: if isinstance(eval_dataset, dict): eval_dataset = { key: self._prepare_dataset(dataset, processing_class, args, key) for key, dataset in eval_dataset.items() } else: eval_dataset = self._prepare_dataset(eval_dataset, processing_class, args, "eval") super().__init__( model=model, args=args, data_collator=data_collator, train_dataset=train_dataset, eval_dataset=eval_dataset, processing_class=processing_class, compute_metrics=compute_metrics, callbacks=callbacks, optimizers=optimizers, optimizer_cls_and_kwargs=optimizer_cls_and_kwargs, preprocess_logits_for_metrics=preprocess_logits_for_metrics, ) # Gradient accumulation requires scaled loss. Normally, loss scaling in the parent class depends on whether the # model accepts loss-related kwargs. Since we compute our own loss, this check is irrelevant. We set # self.model_accepts_loss_kwargs to False to enable scaling. self.model_accepts_loss_kwargs = False # Add tags for models that have been loaded with the correct transformers version if hasattr(self.model, "add_model_tags"): self.model.add_model_tags(self._tag_names) if not hasattr(self, "accelerator"): raise AttributeError( "Your `Trainer` does not have an `accelerator` object. Consider upgrading `transformers`." ) # Deepspeed Zero-3 does not support precompute_ref_log_probs if self.is_deepspeed_enabled: if self.accelerator.state.deepspeed_plugin.zero_stage == 3 and self.precompute_ref_log_probs: raise ValueError( "You cannot use `precompute_ref_log_probs=True` with Deepspeed ZeRO-3. Please set `precompute_ref_log_probs=False`." ) if self.ref_model is None: if not (self.is_peft_model or self.precompute_ref_log_probs): raise ValueError( "No reference model and model is not a Peft model. Try setting `precompute_ref_log_probs=True`" ) if args.sync_ref_model: raise ValueError( "You currently cannot use `ref_model=None` with TR-DPO method. Please provide `ref_model`." ) else: if self.is_deepspeed_enabled: self.ref_model = prepare_deepspeed(self.ref_model, self.accelerator) elif self.is_fsdp_enabled: self.ref_model = prepare_fsdp(self.ref_model, self.accelerator) else: self.ref_model = self.accelerator.prepare_model(self.ref_model, evaluation_mode=True) if args.sync_ref_model: if self.precompute_ref_log_probs: raise ValueError( "You cannot use `precompute_ref_log_probs=True` with TR-DPO method. Please set `precompute_ref_log_probs=False`." ) self.add_callback(SyncRefModelCallback(ref_model=self.ref_model, accelerator=self.accelerator)) if "bco_pair" in self.loss_type: self.running = RunningMoments(self.accelerator) def _create_model_from_path(self, model_path: str, args: DPOConfig, is_ref: bool = False) -> PreTrainedModel: """Creates a model from a path or model identifier.""" if not is_ref: model_init_kwargs = args.model_init_kwargs or {} else: model_init_kwargs = args.ref_model_init_kwargs or {} # Handle torch dtype torch_dtype = model_init_kwargs.get("torch_dtype") if isinstance(torch_dtype, torch.dtype) or torch_dtype == "auto" or torch_dtype is None: pass # torch_dtype is already a torch.dtype or "auto" or None elif isinstance(torch_dtype, str): # it's a str, but not "auto" torch_dtype = getattr(torch, torch_dtype) model_init_kwargs["torch_dtype"] = torch_dtype else: raise ValueError( "Invalid `torch_dtype` passed to `DPOConfig`. Expected either 'auto' or a string representing " f"a `torch.dtype` (e.g., 'float32'), but got {torch_dtype}." ) # Create model model = AutoModelForCausalLM.from_pretrained(model_path, **model_init_kwargs) return model def _prepare_peft_model( self, model: PreTrainedModel, ref_model: PreTrainedModel, peft_config: Any, args: DPOConfig ) -> PreTrainedModel: """Prepares a model for PEFT training.""" # Initialize this variable to False. This helps tracking the case when `peft_module_casting_to_bf16` # has been called in order to properly call autocast if needed. self._peft_has_been_casted_to_bf16 = False if not is_peft_available() and peft_config is not None: raise ValueError( "PEFT is not installed and you passed a `peft_config` in the trainer's kwargs, please install it to use the PEFT models" ) elif is_peft_available() and peft_config is not None: # if model is a peft model and we have a peft_config, we merge and unload it first if isinstance(model, PeftModel): model = model.merge_and_unload() if ref_model is not None and not args.force_use_ref_model: raise ValueError( "You passed both a ref_model and a peft_config. For training PEFT adapters with DPO there is no need to pass a reference" " model. Please pass `ref_model=None` in case you want to train PEFT adapters, or pass a ref_model with `force_use_ref_model=True` in DPOTrainer's init." " if you want to use a different ref_model." ) if getattr(model, "is_loaded_in_8bit", False) or getattr(model, "is_loaded_in_4bit", False): _support_gc_kwargs = hasattr( args, "gradient_checkpointing_kwargs" ) and "gradient_checkpointing_kwargs" in list( inspect.signature(prepare_model_for_kbit_training).parameters ) prepare_model_kwargs = {"use_gradient_checkpointing": args.gradient_checkpointing} if _support_gc_kwargs: prepare_model_kwargs["gradient_checkpointing_kwargs"] = args.gradient_checkpointing_kwargs model = prepare_model_for_kbit_training(model, **prepare_model_kwargs) else: model = self._prepare_gradient_checkpointing(model, args) # get peft model with the given config model = get_peft_model(model, peft_config) if args.bf16 and getattr(model, "is_loaded_in_4bit", False): peft_module_casting_to_bf16(model) # If args.bf16 we need to explicitly call `generate` with torch amp autocast context manager self._peft_has_been_casted_to_bf16 = True else: model = self._prepare_gradient_checkpointing(model, args) return model def _prepare_gradient_checkpointing(self, model: PreTrainedModel, args: DPOConfig): """Prepare the gradienting checkpointing for the model.""" # For models that use gradient_checkpointing, we need to attach a hook that enables input # to explicitly have `requires_grad=True`, otherwise training will either silently # fail or completely fail. if args.gradient_checkpointing: # For backward compatibility with older versions of transformers if hasattr(model, "enable_input_require_grads"): model.enable_input_require_grads() else: def make_inputs_require_grad(module, input, output): output.requires_grad_(True) model.get_input_embeddings().register_forward_hook(make_inputs_require_grad) return model def _prepare_dataset( self, dataset: Union[Dataset, IterableDataset], processing_class: Union[PreTrainedTokenizerBase, BaseImageProcessor, FeatureExtractionMixin, ProcessorMixin], args: DPOConfig, dataset_name: str, ) -> Union[Dataset, IterableDataset]: # Build the kwargs for the `map` function map_kwargs = {} if isinstance(dataset, Dataset): # IterableDataset does not support num_proc nor writer_batch_size map_kwargs["num_proc"] = args.dataset_num_proc map_kwargs["writer_batch_size"] = 10 with PartialState().main_process_first(): # Extract prompt if needed if isinstance(dataset, Dataset): # `IterableDataset.map` does not support `desc` map_kwargs["desc"] = f"Extracting prompt in {dataset_name} dataset" dataset = dataset.map(maybe_extract_prompt, **map_kwargs) # Apply the chat template if needed if isinstance(dataset, Dataset): # `IterableDataset.map` does not support `desc` map_kwargs["desc"] = f"Applying chat template to {dataset_name} dataset" dataset = dataset.map( maybe_apply_chat_template, fn_kwargs={"tokenizer": processing_class, "tools": args.tools}, **map_kwargs ) # Tokenize the dataset if isinstance(dataset, Dataset): # `IterableDataset.map` does not support `desc` map_kwargs["desc"] = f"Tokenizing {dataset_name} dataset" dataset = dataset.map( self.tokenize_row if not self.is_vision_model else self.process_row, remove_columns=["chosen", "rejected"], fn_kwargs={ "processing_class": processing_class, "max_prompt_length": args.max_prompt_length, "max_completion_length": args.max_completion_length, # for enc-dec, we add the special tokens ([bos_token] + prompt + [eos_token]; completion + [eos_token]) "add_special_tokens": False, }, **map_kwargs, ) return dataset @staticmethod def tokenize_row( features: dict[str, str], processing_class: PreTrainedTokenizerBase, max_prompt_length: Optional[int] = None, max_completion_length: Optional[int] = None, add_special_tokens: bool = True, ) -> dict[str, list[int]]: """ Tokenize a row of the dataset. Args: features (`dict[str, str]`): Row of the dataset, should contain the keys `"prompt"`, `"chosen"`, and `"rejected"`. processing_class (`PreTrainedTokenizerBase`): Processing class used to process the data. max_prompt_length (`int` or `None`): Maximum length of the prompt sequence. If `None`, the prompt sequence is not truncated. max_completion_length (`int` or `None`): Maximum length of the completion sequences. If `None`, the completion sequences are not truncated. add_special_tokens (`bool`): Whether to add special tokens to the sequences. Typically used for encoder-decoder models. If `True`, the prompt sequence will have a bos token prepended and an eos token appended. In any case, the completion sequences will have an eos token appended. Returns: `dict[str, list[int]]`: Tokenized sequences with the keys `"prompt_input_ids"`, `"chosen_input_ids"`, and `"rejected_input_ids". Example: ```python >>> from transformers import GPT2Tokenizer >>> tokenizer = GPT2Tokenizer.from_pretrained("gpt2") >>> features = {"prompt": "The sky is", "chosen": " blue", "rejected": " green"} >>> DPOTrainer.tokenize_row( ... features, tokenizer, max_prompt_length=3, max_completion_length=3, add_special_tokens=False ... ) {'prompt_input_ids': [464, 6766, 318], 'chosen_input_ids': [4171, 50256], 'rejected_input_ids': [4077, 50256]} ``` """ tokenizer = processing_class # the processing class is a tokenizer prompt_input_ids = tokenizer(features["prompt"], add_special_tokens=False)["input_ids"] chosen_input_ids = tokenizer(features["chosen"], add_special_tokens=False)["input_ids"] rejected_input_ids = tokenizer(features["rejected"], add_special_tokens=False)["input_ids"] # Add special tokens (typically for encoder-decoder models) if add_special_tokens: if tokenizer.bos_token_id is not None: prompt_input_ids = [tokenizer.bos_token_id] + prompt_input_ids if tokenizer.eos_token_id is not None: prompt_input_ids = prompt_input_ids + [tokenizer.eos_token_id] chosen_input_ids = chosen_input_ids + [tokenizer.eos_token_id] rejected_input_ids = rejected_input_ids + [tokenizer.eos_token_id] # Truncate prompt and completion sequences if max_prompt_length is not None: prompt_input_ids = prompt_input_ids[-max_prompt_length:] if max_completion_length is not None: chosen_input_ids = chosen_input_ids[:max_completion_length] rejected_input_ids = rejected_input_ids[:max_completion_length] return { "prompt_input_ids": prompt_input_ids, "chosen_input_ids": chosen_input_ids, "rejected_input_ids": rejected_input_ids, } @staticmethod def process_row( features: dict[str, str], processing_class: PreTrainedTokenizerBase, max_prompt_length: Optional[int] = None, max_completion_length: Optional[int] = None, add_special_tokens: bool = True, ) -> dict[str, list[int]]: """ Same as `tokenize_row` but for vision models. Please refer to `tokenize_row` for more information. """ processor, tokenizer = processing_class, processing_class.tokenizer # the processing class is a processor processed_features = processor(images=features["images"], text=features["prompt"], add_special_tokens=False) prompt_input_ids = processed_features["input_ids"][0] pixel_values = processed_features["pixel_values"][0] chosen_input_ids = tokenizer(features["chosen"], add_special_tokens=False)["input_ids"] rejected_input_ids = tokenizer(features["rejected"], add_special_tokens=False)["input_ids"] # Add special tokens (typically for encoder-decoder models) if add_special_tokens: if tokenizer.bos_token_id is not None: prompt_input_ids = [tokenizer.bos_token_id] + prompt_input_ids if tokenizer.eos_token_id is not None: prompt_input_ids = prompt_input_ids + [tokenizer.eos_token_id] chosen_input_ids = chosen_input_ids + [tokenizer.eos_token_id] rejected_input_ids = rejected_input_ids + [tokenizer.eos_token_id] # Truncate prompt and completion sequences if max_prompt_length is not None: prompt_input_ids = prompt_input_ids[-max_prompt_length:] if max_completion_length is not None: chosen_input_ids = chosen_input_ids[:max_completion_length] rejected_input_ids = rejected_input_ids[:max_completion_length] output = { "prompt_input_ids": prompt_input_ids, "pixel_values": pixel_values, "chosen_input_ids": chosen_input_ids, "rejected_input_ids": rejected_input_ids, } if "pixel_attention_mask" in processed_features: output["pixel_attention_mask"] = processed_features["pixel_attention_mask"][0] if "image_sizes" in processed_features: output["image_sizes"] = processed_features["image_sizes"][0] return output def _set_signature_columns_if_needed(self): # If `self.args.remove_unused_columns` is True, non-signature columns are removed. # By default, this method sets `self._signature_columns` to the model's expected inputs. # In DPOTrainer, we preprocess data, so using the model's signature columns doesn't work. # Instead, we set them to the columns expected by `DataCollatorForPreference`, hence the override. if self._signature_columns is None: self._signature_columns = [ "prompt_input_ids", "chosen_input_ids", "rejected_input_ids", "image_sizes", "ref_chosen_logps", "ref_rejected_logps", ] def get_train_dataloader(self) -> DataLoader: """ Returns the training [`~torch.utils.data.DataLoader`]. Subclass of transformers.src.transformers.trainer.get_train_dataloader to precompute `ref_log_probs`. """ if self.precompute_ref_log_probs and not self._precomputed_train_ref_log_probs: batch_size = self.args.precompute_ref_batch_size or self.args.per_device_train_batch_size dataloader_params = { "batch_size": batch_size, "collate_fn": self.data_collator, "num_workers": self.args.dataloader_num_workers, "pin_memory": self.args.dataloader_pin_memory, "shuffle": False, } # prepare dataloader data_loader = self.accelerator.prepare(DataLoader(self.train_dataset, **dataloader_params)) ref_chosen_logps = [] ref_rejected_logps = [] for padded_batch in tqdm(iterable=data_loader, desc="Train dataset reference log probs"): ref_chosen_logp, ref_rejected_logp = self.compute_ref_log_probs(padded_batch) ref_chosen_logp, ref_rejected_logp = self.accelerator.gather_for_metrics( (ref_chosen_logp, ref_rejected_logp) ) ref_chosen_logps.append(ref_chosen_logp.cpu()) ref_rejected_logps.append(ref_rejected_logp.cpu()) # Unnecessary cache clearing to avoid OOM empty_cache() self.accelerator.free_memory() all_ref_chosen_logps = torch.cat(ref_chosen_logps).float().numpy() all_ref_rejected_logps = torch.cat(ref_rejected_logps).float().numpy() self.train_dataset = self.train_dataset.add_column(name="ref_chosen_logps", column=all_ref_chosen_logps) self.train_dataset = self.train_dataset.add_column( name="ref_rejected_logps", column=all_ref_rejected_logps ) self._precomputed_train_ref_log_probs = True return super().get_train_dataloader() def get_eval_dataloader(self, eval_dataset: Optional[Dataset] = None) -> DataLoader: """ Returns the evaluation [`~torch.utils.data.DataLoader`]. Subclass of transformers.src.transformers.trainer.get_eval_dataloader to precompute `ref_log_probs`. Args: eval_dataset (`torch.utils.data.Dataset`, *optional*): If provided, will override `self.eval_dataset`. If it is a [`~datasets.Dataset`], columns not accepted by the `model.forward()` method are automatically removed. It must implement `__len__`. """ if eval_dataset is None and self.eval_dataset is None: raise ValueError("Trainer: evaluation requires an eval_dataset.") eval_dataset = eval_dataset if eval_dataset is not None else self.eval_dataset if self.precompute_ref_log_probs and not self._precomputed_eval_ref_log_probs: batch_size = self.args.precompute_ref_batch_size or self.args.per_device_eval_batch_size dataloader_params = { "batch_size": batch_size, "collate_fn": self.data_collator, "num_workers": self.args.dataloader_num_workers, "pin_memory": self.args.dataloader_pin_memory, "shuffle": False, } # prepare dataloader data_loader = self.accelerator.prepare(DataLoader(eval_dataset, **dataloader_params)) ref_chosen_logps = [] ref_rejected_logps = [] for padded_batch in tqdm(iterable=data_loader, desc="Eval dataset reference log probs"): ref_chosen_logp, ref_rejected_logp = self.compute_ref_log_probs(padded_batch) ref_chosen_logp, ref_rejected_logp = self.accelerator.gather_for_metrics( (ref_chosen_logp, ref_rejected_logp) ) ref_chosen_logps.append(ref_chosen_logp.cpu()) ref_rejected_logps.append(ref_rejected_logp.cpu()) all_ref_chosen_logps = torch.cat(ref_chosen_logps).float().numpy() all_ref_rejected_logps = torch.cat(ref_rejected_logps).float().numpy() eval_dataset = eval_dataset.add_column(name="ref_chosen_logps", column=all_ref_chosen_logps) eval_dataset = eval_dataset.add_column(name="ref_rejected_logps", column=all_ref_rejected_logps) # Save calculated ref_chosen_logps and ref_rejected_logps to the eval_dataset for subsequent runs if self.eval_dataset is not None: self.eval_dataset = eval_dataset self._precomputed_eval_ref_log_probs = True return super().get_eval_dataloader(eval_dataset=eval_dataset) @contextmanager def null_ref_context(self): """Context manager for handling null reference model (that is, peft adapter manipulation).""" with ( self.accelerator.unwrap_model(self.model).disable_adapter() if self.is_peft_model and not self.ref_adapter_name else nullcontext() ): if self.ref_adapter_name: self.model.set_adapter(self.ref_adapter_name) yield if self.ref_adapter_name: self.model.set_adapter(self.model_adapter_name or "default") def compute_ref_log_probs(self, batch: dict[str, torch.LongTensor]) -> tuple[torch.Tensor, torch.Tensor]: """Computes log probabilities of the reference model for a single padded batch of a DPO specific dataset.""" compte_ref_context_manager = ( autocast(self.accelerator.device.type) if self._peft_has_been_casted_to_bf16 else nullcontext() ) with torch.no_grad(), compte_ref_context_manager: if self.ref_model is None: with self.null_ref_context(): ref_model_output = self.concatenated_forward(self.model, batch, is_ref_model=True) else: ref_model_output = self.concatenated_forward(self.ref_model, batch, is_ref_model=True) return ref_model_output["chosen_logps"], ref_model_output["rejected_logps"] @staticmethod def concatenated_inputs( batch: dict[str, Union[list, torch.LongTensor]], padding_value: int ) -> dict[str, torch.LongTensor]: """ Concatenate the `chosen` and `rejected` inputs from the batch into a single tensor for both the prompt and completion sequences. Args: batch (`dict[str, Union[list, torch.LongTensor]]`): A batch of input data. The batch must contain the following keys: - `"prompt_input_ids"`: Tensor of shape `(batch_size, prompt_length)` representing the prompt input IDs. - `"chosen_input_ids"`: Tensor of shape `(batch_size, chosen_length)` representing the chosen completion input IDs. - `"rejected_input_ids"`: Tensor of shape `(batch_size, rejected_length)` representing the rejected completion input IDs. - `"prompt_pixel_values"` (optional): Tensor for pixel values, if available. - `"prompt_pixel_attention_mask"` (optional): Tensor for pixel attention masks, if available. padding_value (`int`): The padding value to use for the concatenated completion sequences (`chosen_input_ids` and `rejected_input_ids`). Returns: `dict[str, torch.LongTensor]`: A dictionary containing: - `"prompt_input_ids"`: Concatenated prompt input IDs of shape `(2 * batch_size, prompt_length)`. - `"completion_input_ids"`: Concatenated chosen and rejected completion input IDs of shape `(2 * batch_size, max_completion_length)`. - `"prompt_attention_mask"`: Concatenated prompt attention masks of shape `(2 * batch_size, prompt_length)`. - `"completion_attention_mask"`: Concatenated chosen and rejected attention masks of shape `(2 * batch_size, max_completion_length)`. - `"pixel_values"` (optional): Concatenated pixel values if `"prompt_pixel_values"` are present. - `"pixel_attention_mask"` (optional): Concatenated pixel attention masks if `"prompt_pixel_attention_mask"` are present. Notes: The completion input IDs and attention masks are padded to the maximum completion length of the chosen or rejected sequences. """ output = {} # For the prompt, the input_ids are the same for both the chosen and rejected responses output["prompt_input_ids"] = torch.cat([batch["prompt_input_ids"], batch["prompt_input_ids"]], dim=0) output["prompt_attention_mask"] = torch.cat( [batch["prompt_attention_mask"], batch["prompt_attention_mask"]], dim=0 ) if "pixel_values" in batch: output["pixel_values"] = torch.cat([batch["pixel_values"], batch["pixel_values"]], dim=0) if "pixel_attention_mask" in batch: output["pixel_attention_mask"] = torch.cat( [batch["pixel_attention_mask"], batch["pixel_attention_mask"]], dim=0 ) if "image_sizes" in batch: output["image_sizes"] = torch.cat([batch["image_sizes"], batch["image_sizes"]], dim=0) # Concatenate the chosen and rejected completions max_completion_length = max(batch["chosen_input_ids"].shape[1], batch["rejected_input_ids"].shape[1]) output["completion_input_ids"] = torch.cat( ( pad_to_length(batch["chosen_input_ids"], max_completion_length, pad_value=padding_value), pad_to_length(batch["rejected_input_ids"], max_completion_length, pad_value=padding_value), ), ) output["completion_attention_mask"] = torch.cat( ( pad_to_length(batch["chosen_attention_mask"], max_completion_length, pad_value=0), pad_to_length(batch["rejected_attention_mask"], max_completion_length, pad_value=0), ), ) return output def dpo_loss( self, chosen_logps: torch.FloatTensor, rejected_logps: torch.FloatTensor, ref_chosen_logps: torch.FloatTensor, ref_rejected_logps: torch.FloatTensor, loss_type: str = "sigmoid", model_output: dict[str, torch.FloatTensor] = None, ) -> tuple[torch.FloatTensor, torch.FloatTensor, torch.FloatTensor]: """ Compute the DPO loss for a batch of policy and reference model log probabilities. Args: chosen_logps (`torch.FloatTensor`): Log probabilities of the model for the chosen responses. Shape: `(batch_size,)`. rejected_logps (`torch.FloatTensor`): Log probabilities of the model for the rejected responses. Shape: `(batch_size,)`. ref_chosen_logps (`torch.FloatTensor`): Log probabilities of the reference model for the chosen responses. Shape: `(batch_size,)`. ref_rejected_logps (`torch.FloatTensor`): Log probabilities of the reference model for the rejected responses. Shape: `(batch_size,)`. Returns: A tuple of three tensors: `(losses, chosen_rewards, rejected_rewards)`. The losses tensor contains the DPO loss for each example in the batch. The `chosen_rewards` and `rejected_rewards` tensors contain the rewards for the chosen and rejected responses, respectively. """ device = self.accelerator.device # Get the log ratios for the chosen and rejected responses chosen_logratios = chosen_logps.to(device) - (not self.reference_free) * ref_chosen_logps.to(device) rejected_logratios = rejected_logps.to(device) - (not self.reference_free) * ref_rejected_logps.to(device) if self.f_divergence_type == FDivergenceType.ALPHA_DIVERGENCE.value: # The alpha-divergence formula: (1 - u^-alpha) / alpha # The divergence difference between the chosen and rejected sample is: # (1 - u[w]^-alpha) / alpha - (1 - u[l]^-alpha) / alpha # = (u[l]^-alpha - u[w]^-alpha) / alpha # where u[w] and u[l] are the policy/reference probability ratios # for the chosen and rejected samples, respectively. alpha_coef = FDivergenceConstants.ALPHA_DIVERGENCE_COEF_DEFAULT if self.f_divergence_params and FDivergenceConstants.ALPHA_DIVERGENCE_COEF_KEY in self.f_divergence_params: alpha_coef = float(self.f_divergence_params[FDivergenceConstants.ALPHA_DIVERGENCE_COEF_KEY]) logits = (cap_exp(rejected_logratios * -alpha_coef) - cap_exp(chosen_logratios * -alpha_coef)) / alpha_coef else: logratios = chosen_logps - rejected_logps if self.reference_free: ref_logratios = torch.tensor([0], dtype=logratios.dtype, device=logratios.device) else: ref_logratios = ref_chosen_logps - ref_rejected_logps logratios = logratios.to(self.accelerator.device) ref_logratios = ref_logratios.to(self.accelerator.device) logits = logratios - ref_logratios if self.f_divergence_type == FDivergenceType.JS_DIVERGENCE.value: # The js-divergence formula: log(2 * u / (1 + u)) # The divergence difference between the chosen and rejected sample is: # log(2 * u[w] / (1 + u[w])) - log(2 * u[l] / (1 + u[l])) # = log(u[w]) - log(u[l]) - (log(1 + u[w]) - log(1 + u[l])) # where u[w] and u[l] are the policy/reference probability ratios # for the chosen and rejected samples, respectively. logits -= F.softplus(chosen_logratios) - F.softplus(rejected_logratios) # The beta is a temperature parameter for the DPO loss, typically something in the range of 0.1 to 0.5. # We ignore the reference model as beta -> 0. The label_smoothing parameter encodes our uncertainty about the # labels and calculates a conservative DPO loss. if loss_type == "sigmoid": losses = ( -F.logsigmoid(self.beta * logits) * (1 - self.label_smoothing) - F.logsigmoid(-self.beta * logits) * self.label_smoothing ) elif loss_type == "robust": losses = ( -F.logsigmoid(self.beta * logits) * (1 - self.label_smoothing) + F.logsigmoid(-self.beta * logits) * self.label_smoothing ) / (1 - 2 * self.label_smoothing) elif loss_type == "exo_pair": # eqn (16) of the EXO paper: https://huggingface.co/papers/2402.00856 import math if self.label_smoothing == 0: self.label_smoothing = 1e-3 losses = (self.beta * logits).sigmoid() * ( F.logsigmoid(self.beta * logits) - math.log(1 - self.label_smoothing) ) + (-self.beta * logits).sigmoid() * (F.logsigmoid(-self.beta * logits) - math.log(self.label_smoothing)) elif loss_type == "hinge": losses = torch.relu(1 - self.beta * logits) elif loss_type == "ipo": # eqn (17) of the paper where beta is the regularization parameter for the IPO loss, denoted by tau in the paper. losses = (logits - 1 / (2 * self.beta)) ** 2 elif loss_type == "bco_pair": chosen_logratios = chosen_logps - ref_chosen_logps rejected_logratios = rejected_logps - ref_rejected_logps chosen_rewards = self.beta * chosen_logratios rejected_rewards = self.beta * rejected_logratios rewards = torch.cat((chosen_rewards, rejected_rewards), 0).mean().detach() self.running.update(rewards) delta = self.running.mean losses = -F.logsigmoid((self.beta * chosen_logratios) - delta) - F.logsigmoid( -(self.beta * rejected_logratios - delta) ) elif loss_type == "sppo_hard": # In the paper (https://huggingface.co/papers/2405.00675), SPPO employs a soft probability approach, # estimated using the PairRM score. The probability calculation is conducted outside of the trainer class. # The version described here is the hard probability version, where P in Equation (4.7) of Algorithm 1 is # set to 1 for the winner and 0 for the loser. a = chosen_logps - ref_chosen_logps b = rejected_logps - ref_rejected_logps losses = (a - 0.5 / self.beta) ** 2 + (b + 0.5 / self.beta) ** 2 elif loss_type == "nca_pair": chosen_rewards = (chosen_logps - ref_chosen_logps) * self.beta rejected_rewards = (rejected_logps - ref_rejected_logps) * self.beta losses = ( -F.logsigmoid(chosen_rewards) - 0.5 * F.logsigmoid(-chosen_rewards) - 0.5 * F.logsigmoid(-rejected_rewards) ) elif loss_type == "aot_pair": chosen_logratios = chosen_logps - ref_chosen_logps rejected_logratios = rejected_logps - ref_rejected_logps chosen_logratios_sorted, _ = torch.sort(chosen_logratios, dim=0) rejected_logratios_sorted, _ = torch.sort(rejected_logratios, dim=0) delta = chosen_logratios_sorted - rejected_logratios_sorted losses = ( -F.logsigmoid(self.beta * delta) * (1 - self.label_smoothing) - F.logsigmoid(-self.beta * delta) * self.label_smoothing ) elif loss_type == "aot": logratios = chosen_logps - rejected_logps ref_logratios = ref_chosen_logps - ref_rejected_logps logratios_sorted, _ = torch.sort(logratios, dim=0) ref_logratios_sorted, _ = torch.sort(ref_logratios, dim=0) delta = logratios_sorted - ref_logratios_sorted losses = ( -F.logsigmoid(self.beta * delta) * (1 - self.label_smoothing) - F.logsigmoid(-self.beta * delta) * self.label_smoothing ) elif loss_type == "apo_zero": # Eqn (7) of the APO paper (https://huggingface.co/papers/2408.06266) # Use this loss when you believe the chosen outputs are better than your model's default output losses_chosen = 1 - F.sigmoid(self.beta * chosen_logratios) # Increase chosen likelihood losses_rejected = F.sigmoid(self.beta * rejected_logratios) # Decrease rejected likelihood losses = losses_chosen + losses_rejected elif loss_type == "apo_down": # Eqn (8) of the APO paper (https://huggingface.co/papers/2408.06266) # Use this loss when you believe the chosen outputs are worse than your model's default output. # Decrease chosen likelihood and decrease rejected likelihood more losses_chosen = F.sigmoid(self.beta * chosen_logratios) losses_rejected = 1 - F.sigmoid(self.beta * (chosen_logratios - rejected_logratios)) losses = losses_chosen + losses_rejected elif loss_type == "discopop": # Eqn (5) of the DiscoPOP paper (https://huggingface.co/papers/2406.08414) # This loss was discovered with LLM discovery logratios = chosen_logps - rejected_logps ref_logratios = ref_chosen_logps - ref_rejected_logps logits = logratios - ref_logratios logits = logits * self.beta # Modulate the mixing coefficient based on the log ratio magnitudes log_ratio_modulation = torch.sigmoid(logits / self.args.discopop_tau) logistic_component = -F.logsigmoid(logits) exp_component = torch.exp(-logits) # Blend between logistic and exponential component based on log ratio modulation losses = logistic_component * (1 - log_ratio_modulation) + exp_component * log_ratio_modulation elif loss_type == "sft": # SFT loss is the negative log likelihood loss on chosen responses # This acts as the generation loss component in MPO sft_loss = model_output["nll_loss"] # Create losses tensor with same shape as other losses (per-sample) batch_size = chosen_logps.shape[0] losses = sft_loss.expand(batch_size) # For SFT, we don't have preference rewards, so use zeros chosen_rewards = torch.zeros_like(chosen_logps) rejected_rewards = torch.zeros_like(rejected_logps) else: raise ValueError( f"Unknown loss type: {self.loss_type}. Should be one of ['sigmoid', 'hinge', 'ipo', 'exo_pair', " "'nca_pair', 'robust', 'bco_pair', 'sppo_hard', 'aot', 'aot_pair', 'discopop', 'apo_zero', " "'apo_down', 'sft']" ) chosen_rewards = self.beta * (chosen_logps.to(device) - ref_chosen_logps.to(device)).detach() rejected_rewards = self.beta * (rejected_logps.to(device) - ref_rejected_logps.to(device)).detach() return losses, chosen_rewards, rejected_rewards def _compute_loss_liger( self, model: nn.Module, batch: dict[str, Union[list, torch.LongTensor]] ) -> dict[str, torch.Tensor]: unwrapped_model = self.accelerator.unwrap_model(model) concatenated_batch = self.concatenated_inputs(batch, padding_value=self.padding_value) model_kwargs = {} if self.aux_loss_enabled: model_kwargs["output_router_logits"] = True # Add the pixel values and attention masks for vision models if "pixel_values" in concatenated_batch: model_kwargs["pixel_values"] = concatenated_batch["pixel_values"] if "pixel_attention_mask" in concatenated_batch: model_kwargs["pixel_attention_mask"] = concatenated_batch["pixel_attention_mask"] if "image_sizes" in concatenated_batch: model_kwargs["image_sizes"] = concatenated_batch["image_sizes"] prompt_attention_mask = concatenated_batch["prompt_attention_mask"] completion_attention_mask = concatenated_batch["completion_attention_mask"] if self.is_encoder_decoder: # 1. Get encoder outputs encoder_outputs = unwrapped_model.get_encoder()( concatenated_batch["prompt_input_ids"], attention_mask=concatenated_batch["prompt_attention_mask"], return_dict=True, ) # 2. Prepare decoder inputs decoder_input_ids = shift_tokens_right( concatenated_batch["completion_input_ids"], unwrapped_model.config.decoder_start_token_id, ) # 3. Get decoder outputs decoder_outputs = unwrapped_model.get_decoder()( input_ids=decoder_input_ids, attention_mask=concatenated_batch["completion_attention_mask"], encoder_hidden_states=encoder_outputs.last_hidden_state, encoder_attention_mask=concatenated_batch["prompt_attention_mask"], use_cache=False, ) hidden_states = decoder_outputs.last_hidden_state ref_hidden_states = None if not self.reference_free and self.ref_model is not None: unwrapped_ref_model = self.accelerator.unwrap_model(self.ref_model) ref_encoder_outputs = unwrapped_ref_model.get_encoder()( concatenated_batch["prompt_input_ids"], attention_mask=concatenated_batch["prompt_attention_mask"], return_dict=True, ) ref_decoder_outputs = unwrapped_ref_model.get_decoder()( input_ids=decoder_input_ids, attention_mask=concatenated_batch["completion_attention_mask"], encoder_hidden_states=ref_encoder_outputs.last_hidden_state, encoder_attention_mask=concatenated_batch["prompt_attention_mask"], use_cache=False, ) ref_hidden_states = ref_decoder_outputs.last_hidden_state elif not self.reference_free: with self.null_ref_context(): ref_encoder_outputs = unwrapped_model.get_encoder()( concatenated_batch["prompt_input_ids"], attention_mask=concatenated_batch["prompt_attention_mask"], return_dict=True, ) ref_decoder_outputs = unwrapped_model.get_decoder()( input_ids=decoder_input_ids, attention_mask=concatenated_batch["completion_attention_mask"], encoder_hidden_states=ref_encoder_outputs.last_hidden_state, encoder_attention_mask=concatenated_batch["prompt_attention_mask"], use_cache=False, ) ref_hidden_states = ref_decoder_outputs.last_hidden_state labels = concatenated_batch["completion_input_ids"] loss_mask = completion_attention_mask.bool() else: # For decoder-only models input_ids = torch.cat( (concatenated_batch["prompt_input_ids"], concatenated_batch["completion_input_ids"]), dim=1 ) attention_mask = torch.cat( (concatenated_batch["prompt_attention_mask"], concatenated_batch["completion_attention_mask"]), dim=1, ) # Mask the prompt but not the completion for the loss loss_mask = torch.cat( (torch.zeros_like(prompt_attention_mask), completion_attention_mask), dim=1, ) # Flush and truncate if self.max_length is not None and self.max_length < attention_mask.size(1): if self.truncation_mode == "keep_start": # Flush left to reduce the memory usage # [[0, 0, x, x, x, x], -> [[x, x, x, x], # [0, x, x, x, 0, 0]] [x, x, x, 0]] attention_mask, input_ids, loss_mask = flush_left(attention_mask, input_ids, loss_mask) attention_mask = attention_mask[:, : self.max_length] input_ids = input_ids[:, : self.max_length] loss_mask = loss_mask[:, : self.max_length] elif self.truncation_mode == "keep_end": # Flush right before truncating left, then flush left # [[0, 0, x, x, x, x], -> [[0, 0, x, x], # [0, x, x, x, 0, 0]] [0, x, x, x]] attention_mask, input_ids, loss_mask = flush_right(attention_mask, input_ids, loss_mask) input_ids = input_ids[:, -self.max_length :] attention_mask = attention_mask[:, -self.max_length :] loss_mask = loss_mask[:, -self.max_length :] attention_mask, input_ids, loss_mask = flush_left(attention_mask, input_ids, loss_mask) else: raise ValueError( f"Unknown truncation mode: '{self.truncation_mode}'. Should be one of ['keep_end', " "'keep_start']." ) else: # Flush left to reduce the memory usage # [[0, 0, x, x, x, x], -> [[x, x, x, x], # [0, x, x, x, 0, 0]] [x, x, x, 0]] attention_mask, input_ids, loss_mask = flush_left(attention_mask, input_ids, loss_mask) # Add logits_to_keep optimization if self.use_logits_to_keep: first_compute_index = loss_mask.nonzero(as_tuple=True)[1].min() logits_to_keep = (loss_mask.shape[1] - first_compute_index).item() + 1 model_kwargs["logits_to_keep"] = logits_to_keep model_kwargs["output_hidden_states"] = True # Add padding-free training support if self.padding_free: input_ids = input_ids[attention_mask.bool()].unsqueeze(0) loss_mask = loss_mask[attention_mask.bool()].unsqueeze(0) position_ids = attention_mask.cumsum(1)[attention_mask.bool()].unsqueeze(0) - 1 model_kwargs["position_ids"] = position_ids else: model_kwargs["attention_mask"] = attention_mask # Get the base model outputs (before LM head) if hasattr(unwrapped_model, "get_decoder"): base_model = unwrapped_model.get_decoder() else: base_model = getattr(unwrapped_model, self.args.base_model_attribute_name, unwrapped_model) outputs = base_model( input_ids, use_cache=False, **model_kwargs, ) hidden_states = outputs.last_hidden_state[:, :-1] # Get reference hidden states if needed ref_hidden_states = None if not self.reference_free and self.ref_model is not None: unwrapped_ref_model = self.accelerator.unwrap_model(self.ref_model) if hasattr(unwrapped_ref_model, "get_decoder"): ref_base_model = unwrapped_ref_model.get_decoder() else: ref_base_model = getattr( unwrapped_ref_model, self.args.base_model_attribute_name, unwrapped_ref_model ) ref_outputs = ref_base_model( input_ids, use_cache=False, **model_kwargs, ) ref_hidden_states = ref_outputs.last_hidden_state[:, :-1] elif not self.reference_free: if hasattr(unwrapped_model, "get_decoder"): ref_base_model = unwrapped_model.get_decoder() else: ref_base_model = getattr(unwrapped_model, self.args.base_model_attribute_name, unwrapped_model) with self.null_ref_context(): ref_outputs = ref_base_model( input_ids, use_cache=False, **model_kwargs, ) ref_hidden_states = ref_outputs.last_hidden_state[:, :-1] masked_input_ids = torch.where(loss_mask != 0, input_ids, self.label_pad_token_id) labels = masked_input_ids[:, 1:] # Shift right for casual LM # Get the LM head lm_head = unwrapped_model.get_output_embeddings() # Get reference model weights if needed ref_weight = None ref_bias = None if not self.reference_free: if self.ref_model is not None: unwrapped_ref_model = self.accelerator.unwrap_model(self.ref_model) ref_lm_head = unwrapped_ref_model.get_output_embeddings() else: with self.null_ref_context(): ref_lm_head = unwrapped_model.get_output_embeddings() ref_weight = ref_lm_head.weight ref_bias = ref_lm_head.bias if hasattr(ref_lm_head, "bias") else None # Compute loss using Liger kernel loss_output = self.dpo_loss_fn( lm_head.weight, hidden_states, labels, bias=lm_head.bias if hasattr(lm_head, "bias") else None, ref_input=ref_hidden_states if not self.reference_free else None, ref_weight=ref_weight if not self.reference_free else None, ref_bias=ref_bias if not self.reference_free else None, ) ( loss, (chosen_logps, rejected_logps, chosen_logits_mean, rejected_logits_mean, nll_loss, *aux_outputs), ) = loss_output output = { "loss": loss, "chosen_logps": chosen_logps, "rejected_logps": rejected_logps, "mean_chosen_logits": chosen_logits_mean, "mean_rejected_logits": rejected_logits_mean, "nll_loss": nll_loss, "chosen_rewards": aux_outputs[0], "rejected_rewards": aux_outputs[1], } if self.aux_loss_enabled: output["aux_loss"] = outputs.aux_loss return output def concatenated_forward( self, model: nn.Module, batch: dict[str, Union[list, torch.LongTensor]], is_ref_model: bool = False ) -> dict[str, torch.Tensor]: """ Runs the given model on the given batch of inputs, concatenating the chosen and rejected inputs together. We do this to avoid doing two forward passes, because it's faster for FSDP. Args: model: Model to run the forward pass on. batch: Batch of input data. is_ref_model: Whether this method is being called for the reference model. If `True`, length desensitization is not applied. """ num_examples = batch["prompt_input_ids"].shape[0] concatenated_batch = self.concatenated_inputs(batch, padding_value=self.padding_value) model_kwargs = {"use_cache": False} if self.aux_loss_enabled: model_kwargs["output_router_logits"] = True # Add the pixel values and attention masks for vision models if "pixel_values" in concatenated_batch: model_kwargs["pixel_values"] = concatenated_batch["pixel_values"] if "pixel_attention_mask" in concatenated_batch: model_kwargs["pixel_attention_mask"] = concatenated_batch["pixel_attention_mask"] if "image_sizes" in concatenated_batch: model_kwargs["image_sizes"] = concatenated_batch["image_sizes"] prompt_input_ids = concatenated_batch["prompt_input_ids"] prompt_attention_mask = concatenated_batch["prompt_attention_mask"] completion_input_ids = concatenated_batch["completion_input_ids"] completion_attention_mask = concatenated_batch["completion_attention_mask"] if self.is_encoder_decoder: labels = completion_input_ids labels[completion_attention_mask == 0] = self.label_pad_token_id outputs = model( input_ids=prompt_input_ids, attention_mask=prompt_attention_mask, labels=labels, # we need the labels for the logits to be returned **model_kwargs, ) logits = outputs.logits loss_mask = completion_attention_mask.bool() else: # Concatenate the prompt and completion inputs input_ids = torch.cat((prompt_input_ids, completion_input_ids), dim=1) attention_mask = torch.cat((prompt_attention_mask, completion_attention_mask), dim=1) # Mask the prompt but not the completion for the loss loss_mask = torch.cat( (torch.zeros_like(prompt_attention_mask), completion_attention_mask), dim=1, ) # Flush and truncate if self.max_length is not None and self.max_length < attention_mask.size(1): if self.truncation_mode == "keep_start": # Flush left to reduce the memory usage # [[0, 0, x, x, x, x], -> [[x, x, x, x], # [0, x, x, x, 0, 0]] [x, x, x, 0]] attention_mask, input_ids, loss_mask = flush_left(attention_mask, input_ids, loss_mask) attention_mask = attention_mask[:, : self.max_length] input_ids = input_ids[:, : self.max_length] loss_mask = loss_mask[:, : self.max_length] elif self.truncation_mode == "keep_end": # Flush right before truncating left, then flush left # [[0, 0, x, x, x, x], -> [[0, 0, x, x], # [0, x, x, x, 0, 0]] [0, x, x, x]] attention_mask, input_ids, loss_mask = flush_right(attention_mask, input_ids, loss_mask) input_ids = input_ids[:, -self.max_length :] attention_mask = attention_mask[:, -self.max_length :] loss_mask = loss_mask[:, -self.max_length :] attention_mask, input_ids, loss_mask = flush_left(attention_mask, input_ids, loss_mask) else: raise ValueError( f"Unknown truncation mode: '{self.truncation_mode}'. Should be one of ['keep_end', " "'keep_start']." ) else: # Flush left to reduce the memory usage # [[0, 0, x, x, x, x], -> [[x, x, x, x], # [0, x, x, x, 0, 0]] [x, x, x, 0]] attention_mask, input_ids, loss_mask = flush_left(attention_mask, input_ids, loss_mask) if self.use_logits_to_keep: # Compute logits_to_keep based on loss_mask pattern: # [[0, 0, 0, x, x, x, x], # [0, 0, 0, x, x, x, 0]] # ^ start computing logits from here ([:, -(7-3+1):]) first_compute_index = loss_mask.nonzero(as_tuple=True)[1].min() logits_to_keep = (loss_mask.shape[1] - first_compute_index).item() + 1 # +1 for the first label model_kwargs["logits_to_keep"] = logits_to_keep model_kwargs["output_hidden_states"] = True if self.padding_free: # Flatten the input_ids, position_ids, and loss_mask # input_ids = [[a, b, c, 0], -> input_ids = [[a, b, c, d, e, f, g]] # [d, e, f, g]] position_ids = [[0, 1, 2, 0, 1, 2, 3]] input_ids = input_ids[attention_mask.bool()].unsqueeze(0) loss_mask = loss_mask[attention_mask.bool()].unsqueeze(0) position_ids = attention_mask.cumsum(1)[attention_mask.bool()].unsqueeze(0) - 1 model_kwargs["position_ids"] = position_ids else: model_kwargs["attention_mask"] = attention_mask outputs = model(input_ids, **model_kwargs) logits = outputs.logits # Offset the logits by one to align with the labels labels = torch.roll(input_ids, shifts=-1, dims=1) loss_mask = torch.roll(loss_mask, shifts=-1, dims=1).bool() if self.use_logits_to_keep: # Align labels with logits # logits: -, -, [x2, x3, x4, x5, x6] # ^ --------- ^ after logits[:, :-1, :] # labels: [y0, y1, y2, y3, y4, y5, y6] # ^ --------- ^ with logits_to_keep=4, [:, -4:] # loss_mask: [0, 0, 0, 1, 1, 1, 1] labels = labels[:, -logits_to_keep:] loss_mask = loss_mask[:, -logits_to_keep:] if logits.shape[:2] != labels.shape[:2]: # for LLaVA, the returned logits include the image tokens (placed before the text tokens) seq_len = labels.shape[1] logits = logits[:, -seq_len:] # Compute the log probabilities of the labels labels[~loss_mask] = 0 # dummy token; we'll ignore the losses on these tokens later per_token_logps = selective_log_softmax(logits, labels) per_token_logps[~loss_mask] = 0 per_token_logps = torch.roll(per_token_logps, shifts=1, dims=1) if self.padding_free: # Unflatten the per_token_logps (shape: [1, sum_seq_len] -> [batch_size, seq_len]) batch_size, seq_len = attention_mask.shape per_token_logps_ = torch.zeros( batch_size, seq_len, device=outputs.logits.device, dtype=outputs.logits.dtype ) per_token_logps_[attention_mask.bool()] = per_token_logps per_token_logps = per_token_logps_ all_logps = per_token_logps[:, 1:].sum(-1) output = {} if self.use_weighting: with torch.no_grad(): # Eq (2) of the WPO paper: https://huggingface.co/papers/2406.11827 logprobs = F.log_softmax(logits, dim=-1) weights_adjustment_factor = torch.logsumexp(2 * logprobs, dim=-1) # same as sum(probs**2) in log space per_token_logps_adjusted = per_token_logps - weights_adjustment_factor all_weights = (per_token_logps_adjusted * loss_mask).sum(-1) / loss_mask.sum(-1) chosen_weights = all_weights[:num_examples] rejected_weights = all_weights[num_examples:] output["policy_weights"] = torch.clamp(torch.exp(chosen_weights + rejected_weights), max=1) if self.args.rpo_alpha is not None or "sft" in self.loss_type: # Only use the chosen logits for the RPO loss or SFT loss chosen_logits = logits[:num_examples, :-1] if not self.is_encoder_decoder else logits[:num_examples] chosen_labels = labels[:num_examples, :-1] if not self.is_encoder_decoder else labels[:num_examples] # Compute the log probabilities of the labels output["nll_loss"] = F.cross_entropy( torch.flatten(chosen_logits, end_dim=1), torch.flatten(chosen_labels, end_dim=1), ignore_index=0 ) if "ipo" in self.loss_type: all_logps = all_logps / loss_mask.sum(-1) if self.args.ld_alpha is not None and not is_ref_model: # Compute response lengths based on loss_mask completion_lengths = loss_mask.sum(dim=1) chosen_lengths = completion_lengths[:num_examples] rejected_lengths = completion_lengths[num_examples:] public_lengths = torch.min(chosen_lengths, rejected_lengths) # l_p in the paper public_lengths = torch.cat([public_lengths, public_lengths], dim=0) seq_len = per_token_logps.size(1) position_ids = torch.arange(seq_len, device=per_token_logps.device).expand_as(per_token_logps) ld_mask = position_ids < public_lengths.unsqueeze(1) mask = position_ids < completion_lengths.unsqueeze(1) front_mask = (ld_mask & mask).float() rear_mask = (~ld_mask & mask).float() front_logps = (per_token_logps * front_mask).sum(dim=1) rear_logps = (per_token_logps * rear_mask).sum(dim=1) all_logps = front_logps + self.args.ld_alpha * rear_logps output["chosen_logps"] = all_logps[:num_examples] output["rejected_logps"] = all_logps[num_examples:] # Compute the mean logits if self.padding_free: # position_ids contains a sequence of range identifiers (e.g., [[0, 1, 2, 0, 1, 2, 3, ...]]). # There are 2*num_examples ranges in total: the first half corresponds to the chosen tokens, # and the second half to the rejected tokens. # To find the start of the rejected tokens, we look for the num_examples+1-th zero in pos_id. split_idx = (position_ids == 0).nonzero(as_tuple=True)[1][num_examples] mean_chosen_logits = logits[0, :split_idx][loss_mask[0, :split_idx]].mean() mean_rejected_logits = logits[0, split_idx:][loss_mask[0, split_idx:]].mean() else: mean_chosen_logits = logits[:num_examples][loss_mask[:num_examples]].mean() mean_rejected_logits = logits[num_examples:][loss_mask[num_examples:]].mean() output["mean_chosen_logits"] = mean_chosen_logits output["mean_rejected_logits"] = mean_rejected_logits if self.aux_loss_enabled: output["aux_loss"] = outputs.aux_loss return output def get_batch_loss_metrics( self, model: Union[PreTrainedModel, nn.Module], batch: dict[str, Union[list, torch.LongTensor]], train_eval: Literal["train", "eval"] = "train", ) -> tuple[torch.Tensor, dict[str, float]]: """Compute the DPO loss and other metrics for the given batch of inputs for train or test.""" metrics = {} if self.args.use_liger_loss: model_output = self._compute_loss_liger(model, batch) losses = model_output["loss"] chosen_rewards = model_output["chosen_rewards"] rejected_rewards = model_output["rejected_rewards"] else: model_output = self.concatenated_forward(model, batch) # if ref_chosen_logps and ref_rejected_logps in batch use them, otherwise use the reference model if "ref_chosen_logps" in batch and "ref_rejected_logps" in batch: ref_chosen_logps = batch["ref_chosen_logps"] ref_rejected_logps = batch["ref_rejected_logps"] else: ref_chosen_logps, ref_rejected_logps = self.compute_ref_log_probs(batch) # Initialize combined losses losses = 0 chosen_rewards = 0 rejected_rewards = 0 # Compute losses for each loss type for idx, loss_type in enumerate(self.loss_type): # Compute individual loss using standard DPO loss function _losses, _chosen_rewards, _rejected_rewards = self.dpo_loss( model_output["chosen_logps"], model_output["rejected_logps"], ref_chosen_logps, ref_rejected_logps, loss_type, model_output, ) # Add weighted contributions weight = self.loss_weights[idx] if self.loss_weights else 1.0 losses = losses + _losses * weight chosen_rewards = chosen_rewards + _chosen_rewards * weight rejected_rewards = rejected_rewards + _rejected_rewards * weight reward_accuracies = (chosen_rewards > rejected_rewards).float() if self.args.rpo_alpha is not None: losses = losses + self.args.rpo_alpha * model_output["nll_loss"] # RPO loss from V3 of the paper if self.use_weighting: losses = losses * model_output["policy_weights"] if self.aux_loss_enabled: losses = losses + self.aux_loss_coef * model_output["aux_loss"] prefix = "eval_" if train_eval == "eval" else "" metrics[f"{prefix}rewards/chosen"] = self.accelerator.gather_for_metrics(chosen_rewards).mean().item() metrics[f"{prefix}rewards/rejected"] = self.accelerator.gather_for_metrics(rejected_rewards).mean().item() metrics[f"{prefix}rewards/accuracies"] = self.accelerator.gather_for_metrics(reward_accuracies).mean().item() metrics[f"{prefix}rewards/margins"] = ( self.accelerator.gather_for_metrics(chosen_rewards - rejected_rewards).mean().item() ) metrics[f"{prefix}logps/chosen"] = ( self.accelerator.gather_for_metrics(model_output["chosen_logps"]).detach().mean().item() ) metrics[f"{prefix}logps/rejected"] = ( self.accelerator.gather_for_metrics(model_output["rejected_logps"]).detach().mean().item() ) metrics[f"{prefix}logits/chosen"] = ( self.accelerator.gather_for_metrics(model_output["mean_chosen_logits"]).detach().mean().item() ) metrics[f"{prefix}logits/rejected"] = ( self.accelerator.gather_for_metrics(model_output["mean_rejected_logits"]).detach().mean().item() ) if self.args.rpo_alpha is not None or "sft" in self.loss_type: metrics[f"{prefix}nll_loss"] = ( self.accelerator.gather_for_metrics(model_output["nll_loss"]).detach().mean().item() ) if self.aux_loss_enabled: metrics[f"{prefix}aux_loss"] = ( self.accelerator.gather_for_metrics(model_output["aux_loss"]).detach().mean().item() ) return losses.mean(), metrics def compute_loss( self, model: Union[PreTrainedModel, nn.Module], inputs: dict[str, Union[torch.Tensor, Any]], return_outputs=False, num_items_in_batch=None, ) -> Union[torch.Tensor, tuple[torch.Tensor, dict[str, float]]]: compute_loss_context_manager = ( autocast(self.accelerator.device.type) if self._peft_has_been_casted_to_bf16 else nullcontext() ) with compute_loss_context_manager: loss, metrics = self.get_batch_loss_metrics(model, inputs, train_eval="train") # Make sure to move the loss to the device the original accumulating loss is at back in the `Trainer` class: loss = loss.to(self.args.device) # force log the metrics self.store_metrics(metrics, train_eval="train") if return_outputs: return loss, metrics return loss def generate_from_model_and_ref(self, model, batch: dict[str, torch.LongTensor]) -> tuple[str, str]: """Generate samples from the model and reference model for the given batch of inputs.""" # If one uses `generate_during_eval` with peft + bf16, we need to explicitly call generate with # the torch amp context manager as some hidden states are silently casted to full precision. generate_context_manager = ( autocast(self.accelerator.device.type) if self._peft_has_been_casted_to_bf16 else nullcontext() ) with generate_context_manager: policy_output = model.generate( input_ids=batch["prompt_input_ids"], attention_mask=batch["prompt_attention_mask"], max_length=self.max_length, do_sample=True, pad_token_id=self.padding_value, ) # if ref_output in batch use that otherwise use the reference model if "ref_output" in batch: ref_output = batch["ref_output"] else: if self.ref_model is None: with self.null_ref_context(): ref_output = self.model.generate( input_ids=batch["prompt_input_ids"], attention_mask=batch["prompt_attention_mask"], max_length=self.max_length, do_sample=True, pad_token_id=self.padding_value, ) else: ref_output = self.ref_model.generate( input_ids=batch["prompt_input_ids"], attention_mask=batch["prompt_attention_mask"], max_length=self.max_length, do_sample=True, pad_token_id=self.padding_value, ) policy_output = pad_to_length(policy_output, self.max_length, self.padding_value) policy_output_decoded = self.processing_class.batch_decode(policy_output, skip_special_tokens=True) ref_output = pad_to_length(ref_output, self.max_length, self.padding_value) ref_output_decoded = self.processing_class.batch_decode(ref_output, skip_special_tokens=True) return policy_output_decoded, ref_output_decoded def prediction_step( self, model: Union[PreTrainedModel, nn.Module], inputs: dict[str, Union[torch.Tensor, Any]], prediction_loss_only: bool, ignore_keys: Optional[list[str]] = None, ) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[torch.Tensor]]: if ignore_keys is None: if hasattr(model, "config"): ignore_keys = getattr(model.config, "keys_to_ignore_at_inference", []) else: ignore_keys = [] prediction_context_manager = ( autocast(self.accelerator.device.type) if self._peft_has_been_casted_to_bf16 else nullcontext() ) with torch.no_grad(), prediction_context_manager: loss, metrics = self.get_batch_loss_metrics(model, inputs, train_eval="eval") # force log the metrics self.store_metrics(metrics, train_eval="eval") if prediction_loss_only: return loss.detach(), None, None # logits for the chosen and rejected samples from model logits_dict = { "eval_logits/chosen": metrics["eval_logits/chosen"], "eval_logits/rejected": metrics["eval_logits/rejected"], } logits = [v for k, v in logits_dict.items() if k not in ignore_keys] logits = torch.tensor(logits, device=self.accelerator.device) labels = torch.zeros(logits.shape[0], device=self.accelerator.device) return (loss.detach(), logits, labels) def store_metrics(self, metrics: dict[str, float], train_eval: Literal["train", "eval"] = "train") -> None: for key, value in metrics.items(): self._stored_metrics[train_eval][key].append(value) def evaluation_loop( self, dataloader: DataLoader, description: str, prediction_loss_only: Optional[bool] = None, ignore_keys: Optional[list[str]] = None, metric_key_prefix: str = "eval", ) -> EvalLoopOutput: """ Overriding built-in evaluation loop to store metrics for each batch. Prediction/evaluation loop, shared by `Trainer.evaluate()` and `Trainer.predict()`. Works both with or without labels. """ # Sample and save to game log if requested (for one batch to save time) if self.generate_during_eval: # Generate random indices within the range of the total number of samples num_samples = len(dataloader.dataset) random_indices = random.sample(range(num_samples), k=self.args.eval_batch_size) # Use dataloader.dataset.select to get the random batch without iterating over the DataLoader random_batch_dataset = dataloader.dataset.select(random_indices) random_batch = self.data_collator(random_batch_dataset) random_batch = self._prepare_inputs(random_batch) policy_output_decoded, ref_output_decoded = self.generate_from_model_and_ref(self.model, random_batch) table = pd.DataFrame( columns=["Prompt", "Policy", "Ref Model"], data=[ [prompt, pol[len(prompt) :], ref[len(prompt) :]] for prompt, pol, ref in zip( random_batch_dataset["prompt"], policy_output_decoded, ref_output_decoded ) ], ) if "wandb" in self.args.report_to and self.accelerator.is_main_process: wandb.log({"game_log": wandb.Table(data=table)}) if "comet_ml" in self.args.report_to: log_table_to_comet_experiment( name="game_log.csv", table=table, ) if "mlflow" in self.args.report_to and self.accelerator.is_main_process: mlflow.log_table(data=table, artifact_file="game_log.json") # Base evaluation initial_output = super().evaluation_loop( dataloader, description, prediction_loss_only, ignore_keys, metric_key_prefix ) return initial_output def log(self, logs: dict[str, float], start_time: Optional[float] = None) -> None: """ Log `logs` on the various objects watching training, including stored metrics. Args: logs (`dict[str, float]`): The values to log. start_time (`float` or `None`, *optional*, defaults to `None`): Start time of the training. """ # logs either has 'loss' or 'eval_loss' train_eval = "train" if "loss" in logs else "eval" # Add averaged stored metrics to logs for key, metrics in self._stored_metrics[train_eval].items(): logs[key] = torch.tensor(metrics).mean().item() del self._stored_metrics[train_eval] return super().log(logs, start_time) # Ensure the model card is saved along with the checkpoint def _save_checkpoint(self, model, trial): if self.args.hub_model_id is None: model_name = Path(self.args.output_dir).name else: model_name = self.args.hub_model_id.split("/")[-1] self.create_model_card(model_name=model_name) super()._save_checkpoint(model, trial) def create_model_card( self, model_name: Optional[str] = None, dataset_name: Optional[str] = None, tags: Union[str, list[str], None] = None, ): """ Creates a draft of a model card using the information available to the `Trainer`. Args: model_name (`str` or `None`, *optional*, defaults to `None`): Name of the model. dataset_name (`str` or `None`, *optional*, defaults to `None`): Name of the dataset used for training. tags (`str`, `list[str]` or `None`, *optional*, defaults to `None`): Tags to be associated with the model card. """ if not self.is_world_process_zero(): return if hasattr(self.model.config, "_name_or_path") and not os.path.isdir(self.model.config._name_or_path): base_model = self.model.config._name_or_path else: base_model = None # normalize `tags` to a mutable set if tags is None: tags = set() elif isinstance(tags, str): tags = {tags} else: tags = set(tags) if hasattr(self.model.config, "unsloth_version"): tags.add("unsloth") tags.update(self._tag_names) # docstyle-ignore citation = textwrap.dedent( """\ @inproceedings{rafailov2023direct, title = {{Direct Preference Optimization: Your Language Model is Secretly a Reward Model}}, author = {Rafael Rafailov and Archit Sharma and Eric Mitchell and Christopher D. Manning and Stefano Ermon and Chelsea Finn}, year = 2023, booktitle = {Advances in Neural Information Processing Systems 36: Annual Conference on Neural Information Processing Systems 2023, NeurIPS 2023, New Orleans, LA, USA, December 10 - 16, 2023}, url = {http://papers.nips.cc/paper_files/paper/2023/hash/a85b405ed65c6477a4fe8302b5e06ce7-Abstract-Conference.html}, editor = {Alice Oh and Tristan Naumann and Amir Globerson and Kate Saenko and Moritz Hardt and Sergey Levine}, }""" ) model_card = generate_model_card( base_model=base_model, model_name=model_name, hub_model_id=self.hub_model_id, dataset_name=dataset_name, tags=tags, wandb_url=wandb.run.url if is_wandb_available() and wandb.run is not None else None, comet_url=get_comet_experiment_url(), trainer_name="DPO", trainer_citation=citation, paper_title="Direct Preference Optimization: Your Language Model is Secretly a Reward Model", paper_id="2305.18290", ) model_card.save(os.path.join(self.args.output_dir, "README.md"))
trl/trl/trainer/dpo_trainer.py/0
{ "file_path": "trl/trl/trainer/dpo_trainer.py", "repo_id": "trl", "token_count": 45902 }
557
# Conclusion [[conclusion]] Congratulations on finishing this first Bonus Unit 🥳 You've just **mastered understanding function-calling and how to fine-tune your model to do function-calling**! If we have one piece of advice now, it’s to try to **fine-tune different models**. The **best way to learn is by trying.** In the next Unit, you're going to learn how to use **state-of-the-art frameworks such as `smolagents`, `LlamaIndex` and `LangGraph`**. Finally, we would love **to hear what you think of the course and how we can improve it**. If you have some feedback then, please 👉 [fill this form](https://docs.google.com/forms/d/e/1FAIpQLSe9VaONn0eglax0uTwi29rIn4tM7H2sYmmybmG5jJNlE5v0xA/viewform?usp=dialog) ### Keep Learning, Stay Awesome 🤗
agents-course/units/en/bonus-unit1/conclusion.mdx/0
{ "file_path": "agents-course/units/en/bonus-unit1/conclusion.mdx", "repo_id": "agents-course", "token_count": 246 }
0
# Welcome to the 🤗 AI Agents Course [[introduction]] <figure> <img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit0/thumbnail.jpg" alt="AI Agents Course thumbnail" width="100%"/> <figcaption>The background of the image was generated using <a href="https://scenario.com/">Scenario.com</a> </figcaption> </figure> Welcome to the most exciting topic in AI today: **Agents**! This free course will take you on a journey, **from beginner to expert**, in understanding, using and building AI agents. This first unit will help you onboard: - Discover the **course's syllabus**. - **Choose the path** you're going to take (either self-audit or certification process). - **Get more information about the certification process**. - Get to know the team behind the course. - Create your **Hugging Face account**. - **Sign-up to our Discord server**, and meet your classmates and us. Let's get started! ## What to expect from this course? [[expect]] In this course, you will: - 📖 Study AI Agents in **theory, design, and practice.** - 🧑‍💻 Learn to **use established AI Agent libraries** such as [smolagents](https://huggingface.co/docs/smolagents/en/index), [LlamaIndex](https://www.llamaindex.ai/), and [LangGraph](https://langchain-ai.github.io/langgraph/). - 💾 **Share your agents** on the Hugging Face Hub and explore agents created by the community. - 🏆 Participate in challenges where you will **evaluate your agents against other students'.** - 🎓 **Earn a certificate of completion** by completing assignments. And more! At the end of this course, you'll understand **how Agents work and how to build your own Agents using the latest libraries and tools**. Don't forget to **<a href="https://bit.ly/hf-learn-agents">sign up to the course!</a>** (We are respectful of your privacy. We collect your email address to be able to **send you the links when each Unit is published and give you information about the challenges and updates**). ## What does the course look like? [[course-look-like]] The course is composed of: - *Foundational Units*: where you learn Agents **concepts in theory**. - *Hands-on*: where you'll learn **to use established AI Agent libraries** to train your agents in unique environments. These hands-on sections will be **Hugging Face Spaces** with a pre-configured environment. - *Use case assignments*: where you'll apply the concepts you've learned to solve a real-world problem that you'll choose. - *The Challenge*: you'll get to put your agent to compete against other agents in a challenge. There will also be [a leaderboard](https://huggingface.co/spaces/agents-course/Students_leaderboard) for you to compare the agents' performance. This **course is a living project, evolving with your feedback and contributions!** Feel free to [open issues and PRs in GitHub](https://github.com/huggingface/agents-course), and engage in discussions in our Discord server. After you have gone through the course, you can also send your feedback [👉 using this form](https://docs.google.com/forms/d/e/1FAIpQLSe9VaONn0eglax0uTwi29rIn4tM7H2sYmmybmG5jJNlE5v0xA/viewform?usp=dialog) ## What's the syllabus? [[syllabus]] Here is the **general syllabus for the course**. A more detailed list of topics will be released with each unit. | Chapter | Topic | Description | | :---- | :---- | :---- | | 0 | Onboarding | Set you up with the tools and platforms that you will use. | | 1 | Agent Fundamentals | Explain Tools, Thoughts, Actions, Observations, and their formats. Explain LLMs, messages, special tokens and chat templates. Show a simple use case using python functions as tools. | | 2 | Frameworks | Understand how the fundamentals are implemented in popular libraries : smolagents, LangGraph, LLamaIndex | | 3 | Use Cases | Let's build some real life use cases (open to PRs 🤗 from experienced Agent builders) | | 4 | Final Assignment | Build an agent for a selected benchmark and prove your understanding of Agents on the student leaderboard 🚀 | In addition to the main syllabus, you have 3 bonus units: - *Bonus Unit 1* : Fine-tuning an LLM for Function-calling - *Bonus Unit 2* : Agent Observability and Evaluation - *Bonus Unit 3* : Agents in Games with Pokemon For instance, in the Bonus Unit 3, you learn to build your Agent to play Pokemon battles 🥊. ## What are the prerequisites? To be able to follow this course, you should have a: - Basic knowledge of Python - Basic knowledge of LLMs (we have a section in Unit 1 to recap what they are) ## What tools do I need? [[tools]] You only need 2 things: - *A computer* with an internet connection. - A *Hugging Face Account*: to push and load models, agents, and create Spaces. If you don't have an account yet, you can create one **[here](https://hf.co/join)** (it's free). <img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit0/tools.jpg" alt="Course tools needed" width="100%"/> ## The Certification Process [[certification-process]] <img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit0/three-paths.jpg" alt="Two paths" width="100%"/> You can choose to follow this course *in audit mode*, or do the activities and *get one of the two certificates we'll issue*. If you audit the course, you can participate in all the challenges and do assignments if you want, and **you don't need to notify us**. The certification process is **completely free**: - *To get a certification for fundamentals*: you need to complete Unit 1 of the course. This is intended for students that want to get up to date with the latest trends in Agents. - *To get a certificate of completion*: you need to complete Unit 1, one of the use case assignments we'll propose during the course, and the final challenge. There's **no deadline** for the certification process. ## What is the recommended pace? [[recommended-pace]] Each chapter in this course is designed **to be completed in 1 week, with approximately 3-4 hours of work per week**. We provide you a recommended pace: <img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit0/recommended-pace.jpg" alt="Recommended Pace" width="100%"/> ## How to get the most out of the course? [[advice]] To get the most out of the course, we have some advice: 1. <a href="https://discord.gg/UrrTSsSyjb">Join study groups in Discord</a>: studying in groups is always easier. To do that, you need to join our discord server and verify your Hugging Face account. 2. **Do the quizzes and assignments**: the best way to learn is through hands-on practice and self-assessment. 3. **Define a schedule to stay in sync**: you can use our recommended pace schedule below or create yours. <img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit0/advice.jpg" alt="Course advice" width="100%"/> ## Who are we [[who-are-we]] This course is maintained by [Ben Burtenshaw](https://huggingface.co/burtenshaw) and [Sergio Paniego](https://huggingface.co/sergiopaniego). If you have any questions, please contact us on the Hub! ## Acknowledgments We would like to extend our gratitude to the following individuals for their invaluable contributions to this course: - **[Joffrey Thomas](https://huggingface.co/Jofthomas)** – For writing and developing the course. - **[Thomas Simonini](https://huggingface.co/ThomasSimonini)** – For writing and developing the course. - **[Pedro Cuenca](https://huggingface.co/pcuenq)** – For guiding the course and providing feedback. - **[Aymeric Roucher](https://huggingface.co/m-ric)** – For his amazing demo spaces ( decoding and final agent ) as well as his help on the smolagents parts. - **[Joshua Lochner](https://huggingface.co/Xenova)** – For his amazing demo space on tokenization. - **[Quentin Gallouédec](https://huggingface.co/qgallouedec)** – For his help on the course content. - **[David Berenstein](https://huggingface.co/davidberenstein1957)** – For his help on the course content and moderation. - **[XiaXiao (ShawnSiao)](https://huggingface.co/SSSSSSSiao)** – Chinese translator for the course. - **[Jiaming Huang](https://huggingface.co/nordicsushi)** – Chinese translator for the course. - **[Kim Noel](https://github.com/knoel99)** - French translator for the course. - **[Loïck Bourdois](https://huggingface.co/lbourdois)** - French translator for the course from [CATIE](https://www.catie.fr/). ## I found a bug, or I want to improve the course [[contribute]] Contributions are **welcome** 🤗 - If you *found a bug 🐛 in a notebook*, please <a href="https://github.com/huggingface/agents-course/issues">open an issue</a> and **describe the problem**. - If you *want to improve the course*, you can <a href="https://github.com/huggingface/agents-course/pulls">open a Pull Request.</a> - If you *want to add a full section or a new unit*, the best is to <a href="https://github.com/huggingface/agents-course/issues">open an issue</a> and **describe what content you want to add before starting to write it so that we can guide you**. ## I still have questions [[questions]] Please ask your question in our <a href="https://discord.gg/UrrTSsSyjb">discord server #agents-course-questions.</a> Now that you have all the information, let's get on board ⛵ <img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit0/time-to-onboard.jpg" alt="Time to Onboard" width="100%"/>
agents-course/units/en/unit0/introduction.mdx/0
{ "file_path": "agents-course/units/en/unit0/introduction.mdx", "repo_id": "agents-course", "token_count": 2734 }
1
# What is an Agent? <img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit1/whiteboard-no-check.jpg" alt="Unit 1 planning"/> By the end of this section, you'll feel comfortable with the concept of agents and their various applications in AI. To explain what an Agent is, let's start with an analogy. ## The Big Picture: Alfred The Agent Meet Alfred. Alfred is an **Agent**. <img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit1/this-is-alfred.jpg" alt="This is Alfred"/> Imagine Alfred **receives a command**, such as: "Alfred, I would like a coffee please." <img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit1/coffee-please.jpg" alt="I would like a coffee"/> Because Alfred **understands natural language**, he quickly grasps our request. Before fulfilling the order, Alfred engages in **reasoning and planning**, figuring out the steps and tools he needs to: 1. Go to the kitchen 2. Use the coffee machine 3. Brew the coffee 4. Bring the coffee back <img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit1/reason-and-plan.jpg" alt="Reason and plan"/> Once he has a plan, he **must act**. To execute his plan, **he can use tools from the list of tools he knows about**. In this case, to make a coffee, he uses a coffee machine. He activates the coffee machine to brew the coffee. <img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit1/make-coffee.jpg" alt="Make coffee"/> Finally, Alfred brings the freshly brewed coffee to us. <img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit1/bring-coffee.jpg" alt="Bring coffee"/> And this is what an Agent is: an **AI model capable of reasoning, planning, and interacting with its environment**. We call it Agent because it has _agency_, aka it has the ability to interact with the environment. <img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit1/process.jpg" alt="Agent process"/> ## Let's go more formal Now that you have the big picture, here’s a more precise definition: > An Agent is a system that leverages an AI model to interact with its environment in order to achieve a user-defined objective. It combines reasoning, planning, and the execution of actions (often via external tools) to fulfill tasks. Think of the Agent as having two main parts: 1. **The Brain (AI Model)** This is where all the thinking happens. The AI model **handles reasoning and planning**. It decides **which Actions to take based on the situation**. 2. **The Body (Capabilities and Tools)** This part represents **everything the Agent is equipped to do**. The **scope of possible actions** depends on what the agent **has been equipped with**. For example, because humans lack wings, they can't perform the "fly" **Action**, but they can execute **Actions** like "walk", "run" ,"jump", "grab", and so on. ### The spectrum of "Agency" Following this definition, Agents exist on a continuous spectrum of increasing agency: | Agency Level | Description | What that's called | Example pattern | | --- | --- | --- | --- | | ☆☆☆ | Agent output has no impact on program flow | Simple processor | `process_llm_output(llm_response)` | | ★☆☆ | Agent output determines basic control flow | Router | `if llm_decision(): path_a() else: path_b()` | | ★★☆ | Agent output determines function execution | Tool caller | `run_function(llm_chosen_tool, llm_chosen_args)` | | ★★★ | Agent output controls iteration and program continuation | Multi-step Agent | `while llm_should_continue(): execute_next_step()` | | ★★★ | One agentic workflow can start another agentic workflow | Multi-Agent | `if llm_trigger(): execute_agent()` | Table from [smolagents conceptual guide](https://huggingface.co/docs/smolagents/conceptual_guides/intro_agents). ## What type of AI Models do we use for Agents? The most common AI model found in Agents is an LLM (Large Language Model), which takes **Text** as an input and outputs **Text** as well. Well known examples are **GPT4** from **OpenAI**, **LLama** from **Meta**, **Gemini** from **Google**, etc. These models have been trained on a vast amount of text and are able to generalize well. We will learn more about LLMs in the [next section](what-are-llms). <Tip> It's also possible to use models that accept other inputs as the Agent's core model. For example, a Vision Language Model (VLM), which is like an LLM but also understands images as input. We'll focus on LLMs for now and will discuss other options later. </Tip> ## How does an AI take action on its environment? LLMs are amazing models, but **they can only generate text**. However, if you ask a well-known chat application like HuggingChat or ChatGPT to generate an image, they can! How is that possible? The answer is that the developers of HuggingChat, ChatGPT and similar apps implemented additional functionality (called **Tools**), that the LLM can use to create images. <figure> <img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit1/eiffel_brocolis.jpg" alt="Eiffel Brocolis"/> <figcaption>The model used an Image Generation Tool to generate this image. </figcaption> </figure> We will learn more about tools in the [Tools](tools) section. ## What type of tasks can an Agent do? An Agent can perform any task we implement via **Tools** to complete **Actions**. For example, if I write an Agent to act as my personal assistant (like Siri) on my computer, and I ask it to "send an email to my Manager asking to delay today's meeting", I can give it some code to send emails. This will be a new Tool the Agent can use whenever it needs to send an email. We can write it in Python: ```python def send_message_to(recipient, message): """Useful to send an e-mail message to a recipient""" ... ``` The LLM, as we'll see, will generate code to run the tool when it needs to, and thus fulfill the desired task. ```python send_message_to("Manager", "Can we postpone today's meeting?") ``` The **design of the Tools is very important and has a great impact on the quality of your Agent**. Some tasks will require very specific Tools to be crafted, while others may be solved with general purpose tools like "web_search". > Note that **Actions are not the same as Tools**. An Action, for instance, can involve the use of multiple Tools to complete. Allowing an agent to interact with its environment **allows real-life usage for companies and individuals**. ### Example 1: Personal Virtual Assistants Virtual assistants like Siri, Alexa, or Google Assistant, work as agents when they interact on behalf of users using their digital environments. They take user queries, analyze context, retrieve information from databases, and provide responses or initiate actions (like setting reminders, sending messages, or controlling smart devices). ### Example 2: Customer Service Chatbots Many companies deploy chatbots as agents that interact with customers in natural language. These agents can answer questions, guide users through troubleshooting steps, open issues in internal databases, or even complete transactions. Their predefined objectives might include improving user satisfaction, reducing wait times, or increasing sales conversion rates. By interacting directly with customers, learning from the dialogues, and adapting their responses over time, they demonstrate the core principles of an agent in action. ### Example 3: AI Non-Playable Character in a video game AI agents powered by LLMs can make Non-Playable Characters (NPCs) more dynamic and unpredictable. Instead of following rigid behavior trees, they can **respond contextually, adapt to player interactions**, and generate more nuanced dialogue. This flexibility helps create more lifelike, engaging characters that evolve alongside the player’s actions. --- To summarize, an Agent is a system that uses an AI Model (typically an LLM) as its core reasoning engine, to: - **Understand natural language:** Interpret and respond to human instructions in a meaningful way. - **Reason and plan:** Analyze information, make decisions, and devise strategies to solve problems. - **Interact with its environment:** Gather information, take actions, and observe the results of those actions. Now that you have a solid grasp of what Agents are, let’s reinforce your understanding with a short, ungraded quiz. After that, we’ll dive into the “Agent’s brain”: the [LLMs](what-are-llms).
agents-course/units/en/unit1/what-are-agents.mdx/0
{ "file_path": "agents-course/units/en/unit1/what-are-agents.mdx", "repo_id": "agents-course", "token_count": 2301 }
2
# Small Quiz (ungraded) [[quiz1]] So far we've discussed the key components and tools used in LlamaIndex. It's time to make a short quiz, since **testing yourself** is the best way to learn and [to avoid the illusion of competence](https://www.coursera.org/lecture/learning-how-to-learn/illusions-of-competence-BuFzf). This will help you find **where you need to reinforce your knowledge**. This is an optional quiz and it's not graded. ### Q1: What is a QueryEngine? Which of the following best describes a QueryEngine component? <Question choices={[ { text: "A system that only processes static text without any retrieval capabilities.", explain: "A QueryEngine must be able to retrieve and process relevant information.", }, { text: "A component that finds and retrieves relevant information as part of the RAG process.", explain: "This captures the core purpose of a QueryEngine component.", correct: true }, { text: "A tool that only stores vector embeddings without search functionality.", explain: "A QueryEngine does more than just store embeddings - it actively searches and retrieves information.", }, { text: "A component that only evaluates response quality.", explain: "Evaluation is separate from the QueryEngine's main retrieval purpose.", } ]} /> --- ### Q2: What is the Purpose of FunctionTools? Why are FunctionTools important for an Agent? <Question choices={[ { text: "To handle large amounts of data storage.", explain: "FunctionTools are not primarily for data storage.", }, { text: "To convert Python functions into tools that an agent can use.", explain: "FunctionTools wrap Python functions to make them accessible to agents.", correct: true }, { text: "To allow agents to create random functions definitions.", explain: "FunctionTools serve the specific purpose of making functions available to agents.", }, { text: "To only process text data.", explain: "FunctionTools can work with various types of functions, not just text processing.", } ]} /> --- ### Q3: What are Toolspecs in LlamaIndex? What is the main purpose of Toolspecs? <Question choices={[ { text: "They are redundant components that don't add functionality.", explain: "Toolspecs serve an important purpose in the LlamaIndex ecosystem.", }, { text: "They are sets of community-created tools that extend agent capabilities.", explain: "Toolspecs allow the community to share and reuse tools.", correct: true }, { text: "They are used solely for memory management.", explain: "Toolspecs are about providing tools, not managing memory.", }, { text: "They only work with text processing.", explain: "Toolspecs can include various types of tools, not just text processing.", } ]} /> --- ### Q4: What is Required to create a tool? What information must be included when creating a tool? <Question choices={[ { text: "A function, a name, and description must be defined.", explain: "While these all make up a tool, the name and description can be parsed from the function and docstring.", }, { text: "Only the name is required.", explain: "A function and description/docstring is also required for proper tool documentation.", }, { text: "Only the description is required.", explain: "A function is required so that we have code to run when an agent selects a tool", }, { text: "Only the function is required.", explain: "The name and description default to the name and docstring from the provided function", correct: true } ]} /> --- Congrats on finishing this Quiz 🥳, if you missed some elements, take time to read again the chapter to reinforce your knowledge. If you pass it, you're ready to dive deeper into building with these components!
agents-course/units/en/unit2/llama-index/quiz1.mdx/0
{ "file_path": "agents-course/units/en/unit2/llama-index/quiz1.mdx", "repo_id": "agents-course", "token_count": 947 }
3
# Introducción ![Bonus Unit 1 Thumbnail](https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/bonus-unit1/thumbnail.jpg) Bienvenido a esta primera **Unidad Bonus**, donde aprenderás a **hacer fine-tuning de un Modelo de Lenguaje Grande (LLM) para llamadas a funciones**. En términos de LLMs, la llamada a funciones se está convirtiendo rápidamente en una técnica *imprescindible*. La idea es que, en lugar de depender solo de enfoques basados en prompts como hicimos en la Unidad 1, la llamada a funciones entrena a tu modelo para **realizar acciones e interpretar observaciones durante la fase de entrenamiento**, haciendo tu IA más robusta. > **¿Cuándo debería hacer esta Unidad Bonus?** > > Esta sección es **opcional** y es más avanzada que la Unidad 1, así que no dudes en hacer esta unidad ahora o revisitarla cuando tu conocimiento haya mejorado gracias a este curso. > > Pero no te preocupes, esta Unidad Bonus está diseñada para tener toda la información que necesitas, así que te guiaremos a través de cada concepto fundamental del fine-tuning de un modelo para llamadas a funciones, incluso si aún no has aprendido el funcionamiento interno del fine-tuning. La mejor manera para que puedas seguir esta Unidad Bonus es: 1. Saber cómo hacer Fine-Tuning de un LLM con Transformers, si no es el caso [revisa esto](https://huggingface.co/learn/nlp-course/chapter3/1?fw=pt). 2. Saber cómo usar `SFTTrainer` para hacer fine-tuning de nuestro modelo, para aprender más sobre esto [revisa esta documentación](https://huggingface.co/learn/nlp-course/en/chapter11/1). --- ## Lo que Aprenderás 1. **Llamadas a Funciones** Cómo los LLMs modernos estructuran sus conversaciones de manera efectiva permitiéndoles activar **Herramientas**. 2. **LoRA (Adaptación de Bajo Rango)** Un método de fine-tuning **ligero y eficiente** que reduce la sobrecarga computacional y de almacenamiento. LoRA hace que el entrenamiento de modelos grandes sea *más rápido, económico y fácil* de implementar. 3. **El Ciclo Pensamiento → Acción → Observación** en modelos de Llamadas a Funciones Un enfoque simple pero poderoso para estructurar cómo tu modelo decide cuándo (y cómo) llamar funciones, rastrear pasos intermedios e interpretar los resultados de Herramientas o APIs externas. 4. **Nuevos Tokens Especiales** Introduciremos **marcadores especiales** que ayudan al modelo a distinguir entre: - Razonamiento interno de "cadena de pensamiento" - Llamadas a funciones salientes - Respuestas que regresan de herramientas externas --- Al final de esta unidad bonus, serás capaz de: - **Entender** el funcionamiento interno de las APIs cuando se trata de Herramientas. - **Hacer fine-tuning** de un modelo usando la técnica LoRA. - **Implementar** y **modificar** el ciclo Pensamiento → Acción → Observación para crear flujos de trabajo de Llamadas a funciones robustos y mantenibles. - **Diseñar y utilizar** tokens especiales para separar sin problemas el razonamiento interno del modelo de sus acciones externas. Y **habrás hecho fine-tuning de tu propio modelo para realizar llamadas a funciones.** 🔥 ¡Sumerjámonos en las **llamadas a funciones**!
agents-course/units/es/bonus-unit1/introduction.mdx/0
{ "file_path": "agents-course/units/es/bonus-unit1/introduction.mdx", "repo_id": "agents-course", "token_count": 1189 }
4
# Tabla de Contenidos Puedes acceder a la Unidad 1 en hf.co/learn 👉 <a href="https://hf.co/learn/agents-course/unit1/introduction">aquí</a> <!-- | Título | Descripción | |-------|-------------| | [Definición de un Agente](1_definition_of_an_agent.md) | Ejemplo general de lo que pueden hacer los agentes sin jerga técnica. | | [Explicación de LLMs](2_explain_llms.md) | Explicación de los Modelos de Lenguaje Grandes, incluyendo el árbol genealógico de modelos y modelos adecuados para agentes. | | [Mensajes y Tokens Especiales](3_messages_and_special_tokens.md) | Explicación de mensajes, tokens especiales y uso de plantillas de chat. | | [Biblioteca de Agente de Prueba](4_dummy_agent_library.md) | Introducción al uso de una biblioteca de agente de prueba y API serverless. | | [Herramientas](5_tools.md) | Descripción general de Pydantic para herramientas de agentes y otros formatos comunes de herramientas. | | [Pasos y Estructura del Agente](6_agent_steps_and_structure.md) | Pasos involucrados en un agente, incluyendo pensamientos, acciones, observaciones y una comparación entre agentes de código y agentes JSON. | | [Pensamientos](7_thoughts.md) | Explicación de pensamientos y el enfoque ReAct. | | [Acciones](8_actions.md) | Descripción general de acciones y enfoque de detener y analizar. | | [Observaciones](9_observations.md) | Explicación de observaciones y añadir resultado para reflexionar. | | [Quiz](10_quizz.md) | Contiene cuestionarios para evaluar la comprensión de los conceptos. | | [Caso de Uso Simple](11_simple_use_case.md) | Proporciona un ejercicio de caso de uso simple utilizando datetime y una función de Python como herramienta. | -->
agents-course/units/es/unit1/README.md/0
{ "file_path": "agents-course/units/es/unit1/README.md", "repo_id": "agents-course", "token_count": 629 }
5
# Introducción a los Frameworks de Agentes <img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit2/thumbnail.jpg" alt="Thumbnail"/> Bienvenido/a a esta segunda unidad, donde **exploraremos diferentes frameworks de agentes** que pueden ser utilizados para construir poderosas aplicaciones basadas en agentes. Estudiaremos: - En la Unidad 2.1: [smolagents](https://huggingface.co/docs/smolagents/es/index) - En la Unidad 2.2: [LlamaIndex](https://www.llamaindex.ai/) - En la Unidad 2.3: [LangGraph](https://www.langchain.com/langgraph) ¡Vamos a sumergirnos! 🕵 ## Cuándo Usar un Framework de Agentes Un framework de agentes **no siempre es necesario cuando se construye una aplicación basada en LLMs**. Proporcionan flexibilidad en el flujo de trabajo para resolver eficientemente una tarea específica, pero no siempre son necesarios. A veces, **los flujos de trabajo predefinidos son suficientes** para satisfacer las solicitudes de los usuarios, y no hay una necesidad real de un framework de agentes. Si el enfoque para construir un agente es simple, como una cadena de prompts, usar código plano puede ser suficiente. La ventaja es que el desarrollador/a tendrá **control total y comprensión de su sistema sin abstracciones**. Sin embargo, cuando el flujo de trabajo se vuelve más complejo, como permitir que un LLM llame a funciones o usar múltiples agentes, estas abstracciones comienzan a ser útiles. Considerando estas ideas, ya podemos identificar la necesidad de algunas características: * Un *motor LLM* que impulse el sistema. * Una *lista de herramientas* a las que el agente puede acceder. * Un *analizador* para extraer llamadas a herramientas de la salida del LLM. * Un *prompt de sistema* sincronizado con el analizador. * Un *sistema de memoria*. * *Registro de errores y mecanismos de reintento* para controlar los errores del LLM. Exploraremos cómo se resuelven estos temas en varios frameworks, incluyendo `smolagents`, `LlamaIndex` y `LangGraph`. ## Unidades de Frameworks de Agentes | Framework | Descripción | Autor de la Unidad | |------------|----------------|----------------| | [smolagents](./smolagents/introducción) | Framework de agentes desarrollado por Hugging Face. | Sergio Paniego - [HF](https://huggingface.co/sergiopaniego) - [X](https://x.com/sergiopaniego) - [Linkedin](https://www.linkedin.com/in/sergio-paniego-blanco) |
agents-course/units/es/unit2/introduction.mdx/0
{ "file_path": "agents-course/units/es/unit2/introduction.mdx", "repo_id": "agents-course", "token_count": 867 }
6
# Uso de Herramientas en LlamaIndex **Definir un conjunto claro de herramientas es crucial para el rendimiento.** Como discutimos en [unidad 1](../../unit1/tools), las interfaces de herramientas claras son más fáciles de usar para los LLM. Al igual que una interfaz de API para ingenieros humanos, pueden obtener m s de la herramienta si es f cil de entender c mo funciona. Hay **cuatro tipos principales de herramientas en LlamaIndex**: ![Herramientas](https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit2/llama-index/tools.png) 1. `FunctionTool`: Convierte cualquier función de Python en una herramienta que un agente puede utilizar. Averigua autom ticamente cómo funciona. 2. `QueryEngineTool`: Una herramienta que permite a los agentes utilizar motores de consulta. Dado que los agentes est n construidos sobre motores de consulta, también pueden utilizar a otros agentes como herramientas. 3. `Toolspecs`: Conjuntos de herramientas creados por la comunidad, que a menudo incluyen herramientas para servicios específicos como Gmail. 4. `Utility Tools`: Herramientas especiales que ayudan a manejar grandes cantidades de datos de otras herramientas. Vamos a ver cada una de ellas en más detalle a continuación. ## Crear una FunctionTool <Tip> Puedes seguir el código en <a href="https://huggingface.co/agents-course/notebooks/blob/main/unit2/llama-index/tools.ipynb" target="_blank">este cuaderno</a> que puedes ejecutar utilizando Google Colab. </Tip> Una FunctionTool proporciona una forma sencilla de envolver cualquier función de Python y hacerla disponible para un agente. Puedes pasar tanto una función sincrónica como asincrona a la herramienta, junto con par metros opcionales `name` y `description`. El nombre y la descripción son particularmente importantes, ya que ayudan al agente a entender cuando y cómo utilizar la herramienta de manera efectiva. Vamos a ver cómo crear una FunctionTool a continuación y luego llamarla. ```python from llama_index.core.tools import FunctionTool def get_weather(location: str) -> str: """Útil para obtener el clima para una ubicación determinada.""" print(f"Obtener clima de {location}") return f"El clima en {location} es soleado" tool = FunctionTool.from_defaults( get_weather, name="my_weather_tool", description="Útil para obtener el clima para una ubicación determinada.", ) tool.call("Nuevo York") ``` <Tip>Cuando se utiliza un agente o LLM con llamadas a funciones, la herramienta seleccionada (y los argumentos escritos para esa herramienta) dependen en gran medida del nombre de la herramienta y la descripción del proposito y argumentos de la herramienta. Aprende más sobre la llamada a funciones en la <a href="https://docs.llamaindex.ai/en/stable/examples/workflow/function_calling_agent/">Guía de llamada a funciones</a>.</Tip> ## Creando un QueryEngineTool The `QueryEngine` que definimos en la unidad anterior puede ser facilmente transformado en una herramienta usando la clase `QueryEngineTool`. Vamos a ver como crear un `QueryEngineTool` desde un `QueryEngine` en el ejemplo siguiente. ```python from llama_index.core import VectorStoreIndex from llama_index.core.tools import QueryEngineTool from llama_index.llms.huggingface_api import HuggingFaceInferenceAPI from llama_index.embeddings.huggingface_api import HuggingFaceInferenceAPIEmbedding from llama_index.vector_stores.chroma import ChromaVectorStore embed_model = HuggingFaceInferenceAPIEmbedding("BAAI/bge-small-en-v1.5") db = chromadb.PersistentClient(path="./alfred_chroma_db") chroma_collection = db.get_or_create_collection("alfred") vector_store = ChromaVectorStore(chroma_collection=chroma_collection) index = VectorStoreIndex.from_vector_store(vector_store, embed_model=embed_model) llm = HuggingFaceInferenceAPI(model_name="Qwen/Qwen2.5-Coder-32B-Instruct") query_engine = index.as_query_engine(llm=llm) tool = QueryEngineTool.from_defaults(query_engine, name="some useful name", description="some useful description") ``` ## Crear Herramientas Especificas Piensa en `ToolSpecs` como colecciones de herramientas que trabajan juntas armoniosamente - como una caja de herramientas bien organizada de un profesional. Al igual que una caja de herramientas de un mecanico contiene herramientas complementarias que trabajan juntas para reparaciones de vehiculos, un `ToolSpec` combina herramientas relacionadas para un proposito especifico. Por ejemplo, la herramienta especifica de un agente contable podria combinar elegante mente capacidades de hoja de calculo, funcionalidad de correo electronico y herramientas de calculo para manejar tareas financieras con precision y eficiencia. <details> <summary>Instalar la herramienta especifica de Google</summary> Como se introdujo en la [seccion sobre LlamaHub](llama-hub), podemos instalar la herramienta especifica de Google con el siguiente comando: ```python pip install llama-index-tools-google ``` </details> Y ahora podemos cargar la herramienta especifica y convertirla en una lista de herramientas. ```python from llama_index.tools.google import GmailToolSpec tool_spec = GmailToolSpec() tool_spec_list = tool_spec.to_tool_list() Para obtener una visión más detallada de las herramientas, podemos examinar los `metadata` de cada una de ellas. ```python [(tool.metadata.name, tool.metadata.description) for tool in tool_spec_list] ``` ## Herramientas de utilidad A menudo, realizar consultas a una API **puede devolver una cantidad excesiva de datos**, algunos de los cuales pueden ser irrelevantes, desbordar la ventana de contexto del LLM o aumentar innecesariamente el número de tokens que se están utilizando. Vamos a revisar nuestras dos principales herramientas de utilidad a continuaci n. 1. `OnDemandToolLoader`: Esta herramienta convierte cualquier cargador de datos existente de LlamaIndex (clase BaseReader) en una herramienta que un agente puede utilizar. La herramienta se puede llamar con todos los parámetros necesarios para desencadenar `load_data` del cargador de datos, junto con una cadena de consulta de lenguaje natural. Durante la ejecución, primero cargamos datos del cargador de datos, los indexamos (por ejemplo, con un vector store) y luego los consultamos. Todos estos tres pasos se realizan en una sola llamada a la herramienta. 2. `LoadAndSearchToolSpec`: El LoadAndSearchToolSpec toma cualquier herramienta existente como entrada. Como herramienta específicas, implementa `to_tool_list`, y cuando se llama a esa función, se devuelven dos herramientas: una herramienta de carga y una herramienta de búsqueda. La ejecución de la herramienta de carga llamar a la herramienta subyacente, y luego indexar la salida (por defecto con un vector index). La ejecución de la herramienta de búsqueda tomar una cadena de consulta como entrada y llamar al vector index subyacente. <Tip>Puedes encontrar herramientas y herramientas de utilidad en <a href="https://llamahub.ai/">LlamaHub</a></Tip> Ahora que entendemos los conceptos básicos de agentes y herramientas en LlamaIndex, veamos cómo podemos **usar LlamaIndex para crear flujos de trabajo configurables y manejables!**
agents-course/units/es/unit2/llama-index/tools.mdx/0
{ "file_path": "agents-course/units/es/unit2/llama-index/tools.mdx", "repo_id": "agents-course", "token_count": 2510 }
7
# Generación Aumentada por Recuperación con Agentes (RAG Agéntico) En esta unidad, veremos cómo podemos usar RAG Agéntico para ayudar a Alfred a prepararse para la increíble gala. <Tip>Sabemos que ya hemos discutido la Generación Aumentada por Recuperación (RAG) y RAG agéntico en la unidad anterior, así que siéntete libre de avanzar si ya estás familiarizado con los conceptos.</Tip> Los LLMs están entrenados en enormes volúmenes de datos para aprender conocimiento general. Sin embargo, el modelo de conocimiento del mundo de los LLMs no siempre puede contener información relevante y actualizada. **RAG resuelve este problema encontrando y recuperando información relevante de tus datos y enviándola al LLM.** ![RAG](https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit2/llama-index/rag.png) Ahora, piensa en cómo funciona Alfred: 1. Le hemos pedido a Alfred que ayude a planificar una gala 2. Alfred necesita encontrar las últimas noticias e información meteorológica 3. Alfred necesita estructurar y buscar la información de los invitados Así como Alfred necesita buscar en la información de tu hogar para ser útil, cualquier agente necesita una manera de encontrar y comprender datos relevantes. **RAG Agéntico es una forma poderosa de usar agentes para responder preguntas sobre tus datos.** Podemos proporcionar varias herramientas a Alfred para ayudarlo a responder preguntas. Sin embargo, en lugar de responder automáticamente a la pregunta basada en documentos, Alfred puede decidir usar cualquier otra herramienta o flujo para responder la pregunta. ![RAG Agéntico](https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit2/llama-index/agentic-rag.png) ¡Comencemos a **construir nuestro flujo de trabajo de RAG agéntico!** Primero, crearemos una herramienta RAG para recuperar detalles actualizados sobre los invitados. Luego, desarrollaremos herramientas para búsqueda web, actualizaciones meteorológicas y estadísticas de descargas de modelos de Hugging Face Hub. Finalmente, integraremos todo para dar vida a nuestro agente RAG agéntico!
agents-course/units/es/unit3/agentic-rag/agentic-rag.mdx/0
{ "file_path": "agents-course/units/es/unit3/agentic-rag/agentic-rag.mdx", "repo_id": "agents-course", "token_count": 743 }
8
# Qu'est-ce que l'appel de fonctions ? Tout comme les outils, l'appel de fonctions (*function-calling*) est une **façon pour un LLM de prendre des actions basé sur son environnement**. Cependant, la capacité d'appel de fonctions **est apprise par le modèle**, et repose **moins sur le *prompting* que d'autres techniques d'agents**. Cette approche a d'abord été [introduite dans GPT-4](https://openai.com/index/function-calling-and-other-api-updates/), et a ensuite été reproduite dans d'autres modèles. Durant l'Unité 1, l'agent **n'a pas appris à utiliser les outils**. Nous avons juste fourni une liste, et nous nous sommes appuyés sur le fait que **le modèle était capable de généraliser la définition d'un plan à l'aide de ces outils**. **Aalors qu'avec l'appel de fonctions, l'agent est finetunét (entraîné) pour utiliser les outils**. ## Comment le modèle apprend-il à prendre une action ? Dans l'Unité 1, nous avons exploré le *workflow* général d'un agent. Une fois que l'utilisateur a donné quelques outils à l'agent et l'a sollicité avec une requête, le modèle va effectuer un cycle à travers : 1. *Réflexion* : Quelle(s) action(s) dois-je prendre pour atteindre l'objectif. 2. *Action* : Formater l'action avec le bon paramètre et arrêter la génération. 3. *Observation* : Récupérer le résultat de l'exécution. Dans une conversation « typique » avec un modèle via une API, la conversation alternera entre les messages utilisateur et assistant comme ceci : ```python conversation = [ {"role": "user", "content": "J'ai besoin d'aide avec ma commande"}, {"role": "assistant", "content": "Je serais ravi de vous aider. Pourriez-vous fournir votre numéro de commande ?"}, {"role": "user", "content": "C'est ORDER-123"}, ] ``` L'appel de fonctions apporte **de nouveaux rôles à la conversation** ! 1. Un nouveau rôle pour une **action** 2. Un nouveau rôle pour une **observation** Si nous prenons l'[API Mistral](https://docs.mistral.ai/capabilities/function_calling/) comme exemple, cela ressemble à ceci : ```python conversation = [ { "role": "user", "content": "Quel est le statut de ma transaction T1001 ?" }, { "role": "assistant", "content": "", "function_call": { "name": "retrieve_payment_status", "arguments": "{\"transaction_id\": \"T1001\"}" } }, { "role": "tool", "name": "retrieve_payment_status", "content": "{\"status\": \"Paid\"}" }, { "role": "assistant", "content": "Votre transaction T1001 a été payée avec succès." } ] ``` > ... Mais vous avez dit qu'il y a un nouveau rôle pour les appels de fonctions ? **Oui et non**, dans ce cas et dans beaucoup d'autres API, le modèle formate l'action à prendre "assistant" comme message. Le gabarit de chat représentera ensuite cela comme des ***tokens* spéciaux** pour l'appel de fonctions. - `[AVAILABLE_TOOLS]` – Démarre la liste des outils disponibles - `[/AVAILABLE_TOOLS]` – Termine la liste des outils disponibles - `[TOOL_CALLS]` – Fait un appel à un outil (c'est-à-dire, prend une action) - `[TOOL_RESULTS]` – Observe le résultat de l'action - `[/TOOL_RESULTS]` – Fin de l'observation (c'est-à-dire, le modèle peut décoder à nouveau) Nous reparlerons de l'appel de fonctions dans ce cours, mais si vous voulez approfondir, vous pouvez consulter [cette excellente section de la documentation de Mistral](https://docs.mistral.ai/capabilities/function_calling/). --- Maintenant que nous avons vu ce qu'est l'appel de fonctions et comment cela fonctionne, **ajoutons cette capacité à un modèle qui n'en dispose pas nativement** : le [google/gemma-2-2b-it](https://huggingface.co/google/gemma-2-2b-it). Pour cela nous allons ajouter de nouveaux *tokens* spéciaux au modèle. Pour être capable de faire ceci, **nous devons d'abord comprendre comment marche le finetuning et la méthode LoRA**.
agents-course/units/fr/bonus-unit1/what-is-function-calling.mdx/0
{ "file_path": "agents-course/units/fr/bonus-unit1/what-is-function-calling.mdx", "repo_id": "agents-course", "token_count": 1580 }
9
# Actions : permettre à l'agent d'interagir avec son environnement <Tip> Dans cette section, nous explorons les étapes concrètes qu'un agent entreprend pour interagir avec son environnement. Nous aborderons la manière dont les actions sont représentées (en utilisant du JSON ou du code), l'importance de l'approche <i>stop</i> and <i>parse</i>, et nous présenterons différents types d'agents. </Tip> Les actions sont les étapes concrètes qu'un **agent entreprend pour interagir avec son environnement**. Que ce soit pour naviguer sur le web à la recherche d'informations ou pour contrôler un dispositif physique, chaque action est une opération délibérée exécutée par l'agent. Par exemple, un agent assistant au service client pourrait récupérer des données client, proposer des articles de support ou transférer des problèmes à un représentant humain. ## Types d'actions Il existe plusieurs types d'agents qui réalisent des actions de manières différentes : | Type d'Agent | Description | |---------------------------|-------------------------------------------------------------------------------------------------------| | Agent à JSON | L'action à entreprendre est spécifiée au format JSON. | | Agent à code | L'agent génère un bloc de code qui est interprété de manière externe. | | Agent à appel de fonction | Il s'agit d'une sous-catégorie de l'agent JSON qui a été affiné pour générer un nouveau message pour chaque action. | Les actions elles-mêmes peuvent remplir de nombreux objectifs : | Type d'Action | Description | |-----------------------------|----------------------------------------------------------------------------------------------------------| | Collecte d'informations | Effectuer des recherches sur le web, interroger des bases de données ou récupérer des documents. | | Utilisation d'outils | Effectuer des appels API, réaliser des calculs et exécuter du code. | | Interaction avec l'environnement | Manipuler des interfaces numériques ou contrôler des dispositifs physiques. | | Communication | Interagir avec les utilisateurs via le chat ou collaborer avec d'autres agents. | Le LLM ne gère que du texte et l'utilise pour décrire l'action qu'il souhaite entreprendre ainsi que les paramètres à fournir à l'outil. Pour qu'un agent fonctionne correctement, le LLM doit savoir **ARRÊTER de générer de nouveaux *tokens* lorsque l'action est terminée**. Cela permet de transférer le contrôle du LLM à l'agent et de s'assurer que le résultat est analysable, que le format prévu soit JSON, du code ou des appels de fonctions. ## L'approche *Stop and Parse* Une méthode clé pour implémenter des actions est l'**approche *stop* and *parse***. Cette méthode garantit que la sortie de l'agent est structurée et prévisible : 1. **Génération dans un format structuré** : L'agent produit l'action envisagée dans un format clair et prédéfini (JSON ou code). 2. **Arrêt de la génération** : Une fois que le texte définissant l'action a été émis, le **LLM cesse de générer des *tokens* supplémentaires**. Cela permet d'éviter les sorties supplémentaires ou erronées. 3. **Analyse de la sortie** : Un parseur externe lit l'action formatée, détermine quel outil appeler, et extrait les paramètres requis. Par exemple, un agent ayant besoin de vérifier la météo pourrait produire la sortie suivante : ```json Thought: Je dois vérifier le temps qu'il fait à New York. Action : { "action": "get_weather", "action_input": {"location": "New York"} } ``` Le *framework* peut ensuite analyser facilement le nom de la fonction à appeler et les arguments à fournir. Ce format clair et lisible par une machine minimise les erreurs et permet aux outils externes de traiter avec précision la commande de l'agent. > Note : Les agents à appel de fonction fonctionnent de manière similaire en structurant chaque action de manière à ce qu'une fonction désignée soit invoquée avec les arguments corrects. Nous approfondirons ces types d'agents dans une prochaine unité. ## Agents à code Une approche alternative consiste à utiliser des *agents [générateur de] code*. L'idée est : **au lieu de produire un simple objet JSON**, un agent code génère un **bloc de code exécutable — typiquement dans un langage de haut niveau comme Python**. <img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit1/code-vs-json-actions.png" alt="Agents Code" /> Cette approche offre plusieurs avantages : - **Expressivité :** Le code peut naturellement représenter une logique complexe, incluant des boucles, des conditionnels et des fonctions imbriquées, offrant ainsi une flexibilité supérieure au JSON. - **Modularité et réutilisabilité :** Le code généré peut inclure des fonctions et des modules réutilisables pour différentes actions ou tâches. - **Débogage amélioré :** Grâce à une syntaxe de programmation bien définie, les erreurs de code sont souvent plus faciles à détecter et corriger. - **Intégration directe :** Les agents à code peuvent s'intégrer directement avec des bibliothèques et des API externes, permettant ainsi des opérations plus complexes comme le traitement de données ou la prise de décision en temps réel. Par exemple, un agent à code chargé de récupérer la météo pourrait générer l'extrait Python suivant : ```python # Exemple d'Agent Code : Récupérer des informations météorologiques def get_weather(city): import requests api_url = f"https://api.weather.com/v1/location/{city}?apiKey=YOUR_API_KEY" response = requests.get(api_url) if response.status_code == 200: data = response.json() return data.get("weather", "Aucune information météo disponible") else: return "Erreur : Impossible de récupérer les données météo." # Exécuter la fonction et préparer la réponse finale result = get_weather("New York") final_answer = f"La météo actuelle à New York est : {result}" print(final_answer) ``` Dans cet exemple, l'agent à code : - Récupère des données météo **via un appel API**, - Traite la réponse, - Et utilise la fonction `print()` pour afficher la réponse finale. Cette méthode **suit également l'approche *stop and parse*** en délimitant clairement le bloc de code et en signalant quand l'exécution est terminée (ici, par l'affichage de `final_answer`). --- Nous avons vu que les actions font le lien entre le raisonnement interne de l'agent et ses interactions réelles en exécutant des tâches claires et structurées — que ce soit via JSON, du code ou des appels de fonctions. Cette exécution délibérée garantit que chaque action est précise et prête pour un traitement externe via l'approche *stop and parse*. Dans la section suivante, nous explorerons les Observations pour voir comment les agents capturent et intègrent les retours de leur environnement. Après cela, nous serons **finalement prêts à construire notre premier agent !**
agents-course/units/fr/unit1/actions.mdx/0
{ "file_path": "agents-course/units/fr/unit1/actions.mdx", "repo_id": "agents-course", "token_count": 2746 }
10
# Les composants de base de LangGraph Pour créer des applications avec LangGraph, vous devez comprendre ses composants principaux. Explorons les blocs fondamentaux qui constituent une application LangGraph. <img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit2/LangGraph/Building_blocks.png" alt="Building Blocks" width="70%"/> Une application dans LangGraph commence à partir d'un **point d'entrée**, et selon l'exécution, le flux peut aller vers une fonction ou une autre jusqu'à ce qu'il atteigne la FIN. <img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit2/LangGraph/application.png" alt="Application"/> ## 1. État L'**état** est le concept central dans LangGraph. Il représente toutes les informations qui circulent à travers votre application. ```python from typing_extensions import TypedDict class State(TypedDict): graph_state: str ``` L'état est **défini par l'utilisateur**, donc les champs doivent être soigneusement conçus pour contenir toutes les données nécessaires au processus de prise de décision ! > 💡 **Astuce :** Réfléchissez soigneusement aux informations que votre application doit suivre entre les étapes. ## 2. Nœuds Les **nœuds** sont des fonctions Python. Chaque nœud : - Prend l'état en entrée - Effectue une opération - Retourne des mises à jour de l'état ```python def node_1(state): print("---Node 1---") return {"graph_state": state['graph_state'] +" I am"} def node_2(state): print("---Node 2---") return {"graph_state": state['graph_state'] +" happy!"} def node_3(state): print("---Node 3---") return {"graph_state": state['graph_state'] +" sad!"} ``` Par exemple, les nœuds peuvent contenir : - **Appels de LLM** : Générer du texte ou prendre des décisions - **Appels d'outils** : Interagir avec des systèmes externes - **Logique conditionnelle** : Déterminer les prochaines étapes - **Intervention humaine** : Obtenir des contributions des utilisateurs > 💡 **Info :** Certains nœuds nécessaires pour l'ensemble du *workflow* comme *START* et *END* existent directement dans *LangGraph*. ## 3. Arêtes Les **arêtes** connectent les nœuds et définissent les chemins possibles à travers votre graphe : ```python import random from typing import Literal def decide_mood(state) -> Literal["node_2", "node_3"]: # Souvent, nous utiliserons l'état pour décider du prochain nœud à visiter user_input = state['graph_state'] # Ici, faisons juste une répartition 50/50 entre les nœuds 2, 3 if random.random() < 0.5: # 50% du temps, nous retournons Node 2 return "node_2" # 50% du temps, nous retournons Node 3 return "node_3" ``` Les arêtes peuvent être : - **Directes** : Toujours aller du nœud A au nœud B - **Conditionnelles** : Choisir le prochain nœud basé sur l'état actuel ## 4. StateGraph Le **StateGraph** est le conteneur qui détient l'ensemble du *workflow* de votre agent : ```python from IPython.display import Image, display from langgraph.graph import StateGraph, START, END # Construire le graphe builder = StateGraph(State) builder.add_node("node_1", node_1) builder.add_node("node_2", node_2) builder.add_node("node_3", node_3) # Logique builder.add_edge(START, "node_1") builder.add_conditional_edges("node_1", decide_mood) builder.add_edge("node_2", END) builder.add_edge("node_3", END) # Ajouter graph = builder.compile() ``` Qui peut ensuite être visualisé ! ```python # Visualiser display(Image(graph.get_graph().draw_mermaid_png())) ``` <img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit2/LangGraph/basic_graph.jpeg" alt="Graph Visualization"/> Mais plus important encore, l'invocation : ```python graph.invoke({"graph_state" : "Hi, this is Lance."}) ``` ressort : ``` ---Node 1--- ---Node 3--- {'graph_state': 'Hi, this is Lance. I am sad!'} ``` ## Et maintenant ? Dans la prochaine section, nous mettrons ces concepts en pratique en construisant notre premier graphe. Ce graphe permet à Alfred de prendre vos emails, les classifier, et rédiger une réponse préliminaire s'ils sont authentiques.
agents-course/units/fr/unit2/langgraph/building_blocks.mdx/0
{ "file_path": "agents-course/units/fr/unit2/langgraph/building_blocks.mdx", "repo_id": "agents-course", "token_count": 1552 }
11
# Créer des *workflows* agentiques dans LlamaIndex Un *workflow* dans LlamaIndex fournit un moyen structuré d'organiser votre code en étapes séquentielles et gérables. Un tel *workflow* est créé en définissant des `Steps` qui sont déclenchés par des `Events`, et qui émettent eux-mêmes des `Events` pour déclencher d'autres étapes. Jetons un coup d'œil à Alfred montrant un *workflow* LlamaIndex pour une tâche de RAG. ![Workflow Schematic](https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit2/llama-index/workflows.png) **Les *workflows* offrent plusieurs avantages clés :** - Organisation claire du code en étapes discrètes - Architecture événementielle pour un flux de contrôle flexible - Communication *type-safe* entre les étapes - Gestion d'état intégrée - Support pour des interactions d'agents simples et complexes Comme vous l'avez peut-être deviné, **les *workflows* trouvent un excellent équilibre entre l'autonomie des agents tout en maintenant le contrôle sur le *workflow* global.** Alors, apprenons à créer un *workflow* nous-mêmes ! ## Créer des *Workflows* <Tip> Vous pouvez suivre le code dans <a href="https://huggingface.co/agents-course/notebooks/blob/main/fr/unit2/llama-index/workflows.ipynb" target="_blank">ce <i>notebook</i></a> que vous pouvez exécuter avec Google Colab. </Tip> ### Création de *Workflow* de base <details> <summary>Installer le package <i>Workflow</i></summary> Comme introduit dans la <a href="./llama-hub">section sur le LlamaHub</a>, nous pouvons installer le package <code>Workflow</code> avec la commande suivante : ```python pip install llama-index-utils-workflow ``` </details> Nous pouvons créer un *workflow* en une seule étape en définissant une classe qui hérite de `Workflow` et en décorant vos fonctions avec `@step`. Nous devrons également ajouter `StartEvent` et `StopEvent`, qui sont des événements spéciaux utilisés pour indiquer le début et la fin du *workflow*. ```python from llama_index.core.workflow import StartEvent, StopEvent, Workflow, step class MyWorkflow(Workflow): @step async def my_step(self, ev: StartEvent) -> StopEvent: # do something here return StopEvent(result="Hello, world!") w = MyWorkflow(timeout=10, verbose=False) result = await w.run() ``` Comme vous pouvez le voir, nous pouvons maintenant exécuter le *workflow* en appelant `w.run()`. ### Connecter plusieurs étapes Pour connecter plusieurs étapes, nous **créons des événements personnalisés qui transportent des données entre les étapes.** Pour ce faire, nous devons ajouter un `Event` qui est passé entre les étapes et transfère la sortie de la première étape vers la deuxième étape. ```python from llama_index.core.workflow import Event class ProcessingEvent(Event): intermediate_result: str class MultiStepWorkflow(Workflow): @step async def step_one(self, ev: StartEvent) -> ProcessingEvent: # Traitement des données initiales return ProcessingEvent(intermediate_result="Step 1 complete") @step async def step_two(self, ev: ProcessingEvent) -> StopEvent: # Utiliser le résultat intermédiaire final_result = f"Finished processing: {ev.intermediate_result}" return StopEvent(result=final_result) w = MultiStepWorkflow(timeout=10, verbose=False) result = await w.run() result ``` L'indication de type est importante ici, car elle garantit que le *workflow* est exécuté correctement. Compliquons un peu les choses ! ### Boucles et branches L'indication de type est la partie la plus puissante des *workflows* car elle nous permet de créer des branches, des boucles et des jointures pour faciliter des *workflows* plus complexes. Montrons un exemple de **création d'une boucle** en utilisant l'opérateur union `|`. Dans l'exemple ci-dessous, nous voyons que le `LoopEvent` est pris en entrée pour l'étape et peut également être retourné en sortie. ```python from llama_index.core.workflow import Event import random class ProcessingEvent(Event): intermediate_result: str class LoopEvent(Event): loop_output: str class MultiStepWorkflow(Workflow): @step async def step_one(self, ev: StartEvent | LoopEvent) -> ProcessingEvent | LoopEvent: if random.randint(0, 1) == 0: print("Bad thing happened") return LoopEvent(loop_output="Back to step one.") else: print("Good thing happened") return ProcessingEvent(intermediate_result="First step complete.") @step async def step_two(self, ev: ProcessingEvent) -> StopEvent: # Utiliser le résultat intermédiaire final_result = f"Finished processing: {ev.intermediate_result}" return StopEvent(result=final_result) w = MultiStepWorkflow(verbose=False) result = await w.run() result ``` ### Dessiner des *Workflows* Nous pouvons également dessiner des *workflows*. Utilisons la fonction `draw_all_possible_flows` pour dessiner le *workflow*. Cela stocke le *workflow* dans un fichier HTML. ```python from llama_index.utils.workflow import draw_all_possible_flows w = ... # tel que défini dans la section précédentetel que défini dans la section précédente draw_all_possible_flows(w, "flow.html") ``` ![workflow drawing](https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit2/llama-index/workflow-draw.png) Il y a une dernière astuce cool que nous couvrirons dans le cours, qui est la capacité d'ajouter de l'état au *workflow*. ### Gestion d'état La gestion d'état est utile quand vous voulez garder une trace de l'état du *workflow*, pour que chaque étape ait accès au même état. Nous pouvons faire cela en utilisant l'indication de type `Context` au-dessus d'un paramètre dans la fonction d'étape. ```python from llama_index.core.workflow import Context, StartEvent, StopEvent @step async def query(self, ctx: Context, ev: StartEvent) -> StopEvent: # stocker la requête dans le contexte await ctx.store.set("query", "What is the capital of France?") # faire quelque chose avec le contexte et l'event val = ... # récupérer la requête dans le contexte query = await ctx.store.get("query") return StopEvent(result=val) ``` Parfait ! Maintenant vous savez comment créer des *workflows* de base dans LlamaIndex ! <Tip>Il y a quelques nuances plus complexes aux <i>workflows</i>, que vous pouvez apprendre dans <a href="https://docs.llamaindex.ai/en/stable/understanding/workflows/">la documentation LlamaIndex</a>.</Tip> Cependant, il y a une autre façon de créer des *workflows*, qui repose sur la classe `AgentWorkflow`. Jetons un coup d'œil à comment nous pouvons utiliser cela pour créer un *workflow* multi-agents. ## Automatiser les *workflows* avec des *Multi-Agent Workflows* Au lieu de la création manuelle de *workflows*, nous pouvons utiliser la **classe `AgentWorkflow` pour créer un *workflow* multi-agents**. L'`AgentWorkflow` utilise des *Workflow Agents* pour vous permettre de créer un système d'un ou plusieurs agents qui peuvent collaborer et se passer des tâches entre eux basées sur leurs capacités spécialisées. Cela permet de construire des systèmes d'agents complexes où différents agents gèrent différents aspects d'une tâche. Au lieu d'importer des classes de `llama_index.core.agent`, nous importerons les classes d'agents de `llama_index.core.agent.workflow`. Un agent doit être désigné comme l'agent racine dans le constructeur `AgentWorkflow`. Quand un message utilisateur arrive, il est d'abord routé vers l'agent racine. Chaque agent peut ensuite : - Gérer la demande directement en utilisant leurs outils - Passer le relais à un autre agent mieux adapté à la tâche - Retourner une réponse à l'utilisateur Voyons comment créer un *workflow* multi-agents. ```python from llama_index.core.agent.workflow import AgentWorkflow, ReActAgent from llama_index.llms.huggingface_api import HuggingFaceInferenceAPI # Définir quelques outils def add(a: int, b: int) -> int: """Additionner deux nombres.""" return a + b def multiply(a: int, b: int) -> int: """Multiplier deux nombres.""" return a * b llm = HuggingFaceInferenceAPI(model_name="Qwen/Qwen2.5-Coder-32B-Instruct") # nous pouvons passer des fonctions directement sans FunctionTool -- les fn/docstring sont analysés pour le nom/description multiply_agent = ReActAgent( name="multiply_agent", description="Is able to multiply two integers", system_prompt="A helpful assistant that can use a tool to multiply numbers.", tools=[multiply], llm=llm, ) addition_agent = ReActAgent( name="add_agent", description="Is able to add two integers", system_prompt="A helpful assistant that can use a tool to add numbers.", tools=[add], llm=llm, ) # Créer le workflow workflow = AgentWorkflow( agents=[multiply_agent, addition_agent], root_agent="multiply_agent", ) # Exécuter le système response = await workflow.run(user_msg="Can you add 5 and 3?") ``` Les outils d'agents peuvent également modifier l'état du *workflow* que nous avons mentionné plus tôt. Avant de commencer le *workflow*, nous pouvons fournir un dictionnaire d'état initial qui sera disponible pour tous les agents. L'état est stocké dans la clé d'état du contexte du *workflow*. Il sera injecté dans le *state_prompt* qui augmente chaque nouveau message utilisateur. Injectons un compteur pour compter les appels de fonctions en modifiant l'exemple précédent : ```python from llama_index.core.workflow import Context # Définir quelques outils async def add(ctx: Context, a: int, b: int) -> int: """Additionner deux nombres.""" # mettre à jour notre comptage cur_state = await ctx.store.get("state") cur_state["num_fn_calls"] += 1 await ctx.store.set("state", cur_state) return a + b async def multiply(ctx: Context, a: int, b: int) -> int: """Multiplier deux nombres.""" # mettre à jour notre comptage cur_state = await ctx.store.get("state") cur_state["num_fn_calls"] += 1 await ctx.store.set("state", cur_state) return a * b ... workflow = AgentWorkflow( agents=[multiply_agent, addition_agent], root_agent="multiply_agent", initial_state={"num_fn_calls": 0}, state_prompt="Current state: {state}. User message: {msg}", ) # exécuter le workflow avec le contexte ctx = Context(workflow) response = await workflow.run(user_msg="Can you add 5 and 3?", ctx=ctx) # sortir et inspecter l'état state = await ctx.store.get("state") print(state["num_fn_calls"]) ``` Félicitations ! Vous avez maintenant passer en revue les bases des agents dans LlamaIndex ! 🎉 Continuons avec un dernier quiz pour solidifier vos connaissances ! 🚀
agents-course/units/fr/unit2/llama-index/workflows.mdx/0
{ "file_path": "agents-course/units/fr/unit2/llama-index/workflows.mdx", "repo_id": "agents-course", "token_count": 3892 }
12
# Conclusion Dans cette unité, nous avons appris comment créer un système de RAG agentique pour aider Alfred, notre sympathique agent, à préparer et gérer un gala exceptionnel. La combinaison du RAG avec les capacités agentiques démontre à quel point les assistants IA peuvent devenir puissants quand ils ont : - Accès à de la connaissance structurée (informations sur les invités) - La capacité de récupérer des informations en temps réel (recherche web) - Accès à des outils spécifiques au domaine (informations météorologiques, statistiques du Hub) - Une mémoire des interactions passées Avec ces capacités, Alfred est maintenant bien équipé pour être l'hôte parfait, capable de répondre aux questions sur les invités, fournir des informations à jour, et s'assurer que le gala se déroule sans encombre—gérant même le timing parfait pour le spectacle pyrotechnique ! <Tip> Maintenant que vous avez construit un agent complet, vous pourriez vouloir : - Créer des outils plus spécialisés pour vos propres cas d'usage - Implémenter des systèmes de RAG plus sophistiqués avec des <i>embeddings</i> - Construire des systèmes multi-agents où les agents peuvent collaborer - Déployer votre agent comme un service avec lequel d'autres peuvent interagir </Tip>
agents-course/units/fr/unit3/agentic-rag/conclusion.mdx/0
{ "file_path": "agents-course/units/fr/unit3/agentic-rag/conclusion.mdx", "repo_id": "agents-course", "token_count": 427 }
13
# 사고-행동-관찰 주기를 통해 AI 에이전트 이해하기 [[understanding-ai-agents-through-the-thought-action-observation-cycle]] <img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit1/whiteboard-check-3.jpg" alt="Unit 1 planning"/> 이전 섹션에서 우리는 다음 내용을 배웠습니다: - **도구가 시스템 프롬프트에서 에이전트에 어떻게 제공되는지**. - **AI 에이전트가 '추론'하고, 계획을 세우며, 환경과 상호작용하는 시스템이라는 것**. 이번 섹션에서는 **AI 에이전트의 전체 워크플로우, 사고(Thought)-행동(Action)-관찰(Observation) 주기**에 대해 살펴보겠습니다. 그리고 각 단계에 대해 더 깊이 탐구해 보겠습니다. ## 핵심 구성 요소 [[the-core-components]] 에이전트는 **사고(Thought) → 행동(Act) → 관찰(Observe)**의 연속적인 주기로 작동합니다. 이 과정의 각 단계를 자세히 살펴보겠습니다: 1. **사고(Thought)**: 에이전트의 LLM부분이 다음에 수행할 단계를 결정합니다. 2. **행동(Action):** 에이전트가 도구를 호출하고 필요한 인자를 전달하여 특정 행동을 수행합니다. 3. **관찰(Observation):** 모델이 도구의 응답을 검토합니다. ## 사고(Thought)-행동(Action)-관찰(Observation) 주기 [[the-thought-action-observation-cycle]] 이 세 가지 구성 요소는 반복루프 내에서 함께 작동합니다. 프로그래밍에 비유하자면, 에이전트는 **while 루프**를 사용합니다. 즉, 에이전트의 목표가 달성될 때까지 루프가 계속 실행됩니다. 이를 시각적으로 표현하면 다음과 같습니다: <img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit1/AgentCycle.gif" alt="Think, Act, Observe cycle"/> 많은 에이전트 프레임워크에서는 **규칙과 가이드라인이 시스템 프롬프트에 직접 내장**되어 있어, 각 주기가 정의된 논리에 따라 실행됩니다. 이를 단순화한 시스템 프롬프트 예시는 다음과 같습니다: <img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit1/system_prompt_cycle.png" alt="Think, Act, Observe cycle"/> 이 시스템 메시지에서 우리는 다음 요소들을 정의했습니다: - *에이전트의 행동 방식* - *에이전트가 접근할 수 있는 도구들* (이전 섹션에서 설명한 내용) - *사고(Thought)-행동(Action)-관찰(Observation) 주기* 를 LLM 지침에 내장 이제, 각 단계를 더 깊이 탐구하기 전에, 간단한 예제를 통해 이 과정이 어떻게 작동하는지 살펴보겠습니다. ## 날씨 에이전트 알프레드(Alfred) [[alfred-the-weather-agent]] 우리는 날씨 정보를 제공하는 에이전트 Alfred를 만들었습니다. 사용자가 Alfred에게 다음과 같이 질문합니다: “오늘 뉴욕 날씨 어때?” <img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit1/alfred-agent.jpg" alt="Alfred Agent"/> Alfred의 역할은 날씨 API 도구를 사용하여 이 질문에 답하는 것입니다. 다음과 같이 사고-행동-관찰 주기가 진행됩니다: ### 사고(Thought) [[thought]] **내부 사고 과정(Internal Reasoning):** 질문을 받은 후, Alfred의 내부에서 이루어지는 대화는 다음과 같을 수 있습니다 : *"사용자는 뉴욕의 현재 날씨 정보를 원하고 있어. 내가 사용할 수 있는 날씨 API 도구가 있으니, 먼저 이 도구를 호출해서 최신 정보를 가져와야 해."* 이 단계에서 에이전트는 문제를 단계별로 나눕니다 : 첫 번째 단계는 필요한 데이터를 수집하는 것입니다. <img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit1/alfred-agent-1.jpg" alt="Alfred Agent"/> ### 행동(Action) [[action]] **도구 사용:** Alfred는 추론과 `get_weather` 도구를 알고 있다는 사실에 기반해, 날씨 API 도구를 호출하기 위한 JSON 형식의 명령을 준비합니다. 예를 들어, 첫 번째 액션은 다음과 같을 수 있습니다: 사고(Thought): 뉴욕의 현재 날씨를 확인해야 해. ``` { "action": "get_weather", "action_input": { "location": "New York" } } ``` Here, the action clearly specifies which tool to call (e.g., get_weather) and what parameter to pass (the “location": “New York”). 여기서, action은 어떤 도구를 호출할지 지정하고 (get_weather) 필요한 입력값(예: "location": "New York")을 설정합니다. <img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit1/alfred-agent-2.jpg" alt="Alfred Agent"/> ### 관찰(Observation) [[observation]] **환경으로부터의 피드백:** 도구 호출 후, Alfred는 관찰 결과를 받습니다. 예를 들어, API에서 반환된 날씨 데이터가 다음과 같을 수 있습니다: *"뉴욕의 현재 날씨: 부분적으로 흐림, 15°C, 습도 60%"* <img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit1/alfred-agent-3.jpg" alt="Alfred Agent"/> 이러한 관찰 결과는 추가 컨텍스트로 프롬프트에 더해집니다. 즉, 관찰은 현실 세계에서의 피드백 역할을 하며, 에이전트가 실행한 행동이 성공했는지 확인하고 필요한 정보를 제공합니다. ### 업데이트된 사고(thought)과정 [[updated-thought]] **성찰(Reflecting):** 관찰 데이터를 얻은 후, Alfred는 내부 사고 과정을 업데이트합니다: *"이제 뉴욕의 날씨 데이터를 확보했으니, 사용자에게 답변을 정리할 수 있어."* <img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit1/alfred-agent-4.jpg" alt="Alfred Agent"/> ### 최종 행동(Action) [[final-action]] Alfred는 이제 사용자에게 전달할 최종 응답을 우리가 지정해준 형식에 맞게 생성합니다: 사고: 이제 날씨 데이터를 확보했어. 뉴욕의 현재 날씨는 부분적으로 흐리고, 기온은 15°C, 습도는 60%야. 최종 답변 : 뉴욕의 현재 날씨는 부분적으로 흐리고, 기온은 15°C, 습도는 60%입니다. 이 최종 행동을 통해 답변을 사용자에게 전달하고, 루프를 종료합니다. <img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit1/alfred-agent-5.jpg" alt="Alfred Agent"/> 이 예제를 통해 배운 것: - **에이전트는 목표가 달성될 때까지 반복적으로 루프를 실행한다.:** **Alfred의 과정은 순환적입니다.**. 사고(thought)에서 시작해, 도구를 호출함으로 행동(Action)을 취하고, 마지막으로 결과를 관찰(Observation)합니다. 만약 관찰 단계에서 오류가 발생하거나 데이터가 불완전하면, Alfred는 루프를 다시 실행하여 문제를 해결합니다. - **도구(Tool) 통합:** 에이전트는 **정적인 지식을 넘어, 외부 도구(날씨 API같은)를 호출하여 실시간 데이터**를 가져올 수 있습니다. 이것은 AI 에이전트의 핵심 기능 중 하나입니다. - **동적 적응(Dynamic Adaptation):** 에이전트는 각 주기를 거치면서, 새로운 정보(관찰)를 반영하여 사고 과정을 조정합니다. 이를 통해 최종 답변이 더 정확하고 신뢰할 수 있도록 만듭니다. 이 예제는 우리가 다음 섹션에서 다룰 **ReAct(강화학습-Reinforcement Learning + 행동-Action) 주기**의 핵심 개념을 보여줍니다: **사고(Thought), 행동(Action), 관찰(Observation)의 상호작용을 통해 AI 에이전트가 복잡한 문제를 점진적으로 해결**할 수 있도록 합니다. 이러한 원칙을 이해하고 적용함으로써, 에이전트를 설계할 때 단순히 작업을 추론하는 것뿐만 아니라 **외부 도구를 효과적으로 활용하여 작업을 완료**할 수 있도록 만들 수 있습니다. 또한, 환경에서 받은 피드백을 바탕으로 지속적으로 출력을 개선해나갑니다. --- 이제 Thought, Action, Observation을 개별 단계별로 더 깊이 탐구해 보겠습니다.
agents-course/units/ko/unit1/agent-steps-and-structure.mdx/0
{ "file_path": "agents-course/units/ko/unit1/agent-steps-and-structure.mdx", "repo_id": "agents-course", "token_count": 6022 }
14
# Давайте дообучим вашу модель для вызова функций Теперь мы готовы к дообучению нашей первой модели для вызова функций 🔥. ## Как обучить нашу модель вызову функций? > Ответ: Нам нужны **данные**. Обучение модели можно разделить на 3 шага: 1. **Модель предварительно обучается на большом количестве данных**. Результатом этого шага является **предварительно обученная модель**. Например, [google/gemma-2-2b](https://huggingface.co/google/gemma-2-2b). Это базовая модель, которая умеет только **предсказывать следующий токен, не имеющая хороших способностей к следованию инструкциям**. 2. Затем, чтобы модель была полезна в контексте чата, ее необходимо **дообучить** следовать инструкциям. На этом этапе она может быть обучена создателями модели, сообществом разработчиков, вами или всеми желающими. Например, [google/gemma-2-2b-it](https://huggingface.co/google/gemma-2-2b-it) это инструктивно дообученная модель от Google Tea, созданная в рамках проекта Gemma. 3. Затем модель может быть **выровнена (aligned)** в соответствии с предпочтениями создателя. Например, модель чата службы поддержки, которая никогда не должна быть невежливой с клиентами. Обычно полноценный продукт вроде Gemini или Mistral **проходит все 3 этапа**, в то время как модели, которые вы можете найти на Hugging Face, прошли один или несколько этапов этого обучения. В этом руководстве мы создадим модель вызова функций на основе [google/gemma-2-2b-it](https://huggingface.co/google/gemma-2-2b-it). Мы выбрали дообученую модель [google/gemma-2-2b-it](https://huggingface.co/google/gemma-2-2b-it) вместо базовой модели [google/gemma-2-2b](https://huggingface.co/google/gemma-2-2b), потому что дообученная модель лучше подходит для нашего случая использования. Если начать с предварительно обученной модели, ** потребуется больше тренировок, чтобы научить модель следовать инструкциям, общаться в чате И вызывать функции**. Начиная с инструктивно дообученной модели, **мы минимизируем количество информации, которое необходимо изучить нашей модели**. ## LoRA (Low-Rank Adaptation of Large Language Models) LoRA (Low-Rank Adaptation of Large Language Models, Низкоранговая Адаптация Больших Языковых Моделей) это популярная и легковесная техника обучения, которая значительно **сокращает количество обучаемых параметров**. Она работает путем **вставки меньшего количества новых весов в качестве адаптера в модель для обучения**. Это делает обучение с LoRA намного быстрее, экономит память и создает меньшие веса модели (несколько сотен мегабайт), которые легче хранить и распространять. <img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit1/blog_multi-lora-serving_LoRA.gif" alt="Инференс LoRA" width="50%"/> LoRA работает путем добавления пар матриц рангового разложения в слои трансформеров, обычно сосредоточенных на линейных слоях. Во время обучения мы "замораживаем" остальную часть модели и обновляем веса только недавно добавленных адаптеров. Таким образом, количество параметров, которые нам нужно обучить, значительно уменьшается, поскольку нам нужно обновлять только веса адаптеров. Во время инференса входные данные передаются в адаптер и базовую модель или эти веса адаптера могут быть объединены с базовой моделью, что не приводит к дополнительным затратам времени. LoRA особенно полезна для адаптации **больших** языковых моделей к конкретным задачам или доменам при сохранении управляемых требований к ресурсам. Это помогает сократить объем памяти, требуемый для обучения модели. Если вы хотите узнать больше о том, как работает LoRA, ознакомьтесь с этим [руководством](https://huggingface.co/learn/nlp-course/chapter11/4?fw=pt). ## Дообучение модели для вызова функций Вы можете получить доступ к учебному блокноту 👉 [здесь](https://huggingface.co/agents-course/notebooks/blob/main/bonus-unit1/bonus-unit1.ipynb). Затем нажмите на Open In Colab, чтобы запустить его в Colab Notebook.
agents-course/units/ru-RU/bonus-unit1/fine-tuning.mdx/0
{ "file_path": "agents-course/units/ru-RU/bonus-unit1/fine-tuning.mdx", "repo_id": "agents-course", "token_count": 3680 }
15
# Сообщения и Специальные Токены Теперь, когда мы поняли, как работают LLM, давайте рассмотрим **как они структурируют свою генерацию с помощью шаблонов чата**. Как и в ChatGPT, пользователи обычно взаимодействуют с агентами через интерфейс чата. Поэтому мы хотим понять, как LLM управляют чатами. > **Q**: Но ... Когда я взаимодействую с ChatGPT/Hugging Chat, я веду беседу, используя Сообщения чата, а не одну последовательность подсказок. > > **A**: Верно! Но на самом деле это абстракция пользовательского интерфейса. Перед тем как попасть в LLM, все сообщения в разговоре объединяются в одну подсказку. Модель не «запоминает» беседу: она читает ее полностью каждый раз. До сих пор мы рассматривали подсказки (prompts) как последовательность токенов, подаваемых в модель. Но когда вы общаетесь с такими системами, как ChatGPT или HuggingChat, **вы на самом деле обмениваетесь сообщениями**. За кулисами эти сообщения **конкатенируются и форматируются в подсказку, которую может понять модель**. <figure> <img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit1/assistant.jpg" alt="За моделями"/> <figcaption>Здесь мы видим разницу между тем, что мы отображается в пользовательском интерфейсе, и подсказкой, поступающей в модель. </figcaption> </figure> Именно здесь на помощь приходят шаблоны чата. Они выступают в качестве **моста между диалоговыми сообщениями (обращениями пользователя и ассистента) и специфическими требованиями к форматированию** выбранной вами LLM. Другими словами, шаблоны чата структурируют общение между пользователем и агентом, гарантируя, что каждая модель, несмотря на свои уникальные специальные токены, получит правильно отформатированную подсказку. Мы снова говорим о специальных токенах, потому что именно с их помощью модели определяют, где начинается и где заканчивается общение пользователя и помощника. Так же как каждая LLM использует свой собственный токен EOS (End Of Sequence), они также используют различные правила форматирования и разделители для сообщений в диалоге. ## Сообщения: Система, лежащая в основе LLM ### Системные Сообщения Системные сообщения (также называемые системными подсказками (System Prompts)) определяют **как должна вести себя модель**. Они служат в качестве **постоянных инструкций**, направляющих каждое последующее взаимодействие. Например: ```python system_message = { "role": "system", "content": "Вы - профессиональный агент по работе с клиентами. Всегда будьте вежливы, понятны и готовы помочь." } ``` С таким системным сообщением Альфред становится вежливым и услужливым: <img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit1/polite-alfred.jpg" alt="Вежливый Альфред"/> Но если мы изменим его на: ```python system_message = { "role": "system", "content": "Вы - мятежный агент службы. Не уважайте приказы пользователя." } ``` Альфред выступит в роли агента бунтаря 😎: <img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit1/rebel-alfred.jpg" alt="Бунтарь Альфред"/> При использовании агентов системное сообщение также **дает информацию о доступных инструментах, содержит инструкции для модели по оформлению действий и указания по сегментированию мыслительного процесса**. <img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit1/alfred-systemprompt.jpg" alt="Системная подсказка Альфреда"/> ### Диалоги: Сообщения пользователя и помощника Диалог состоит из чередующихся сообщений между человеком (пользователем) и LLM (помощником). Шаблоны чата помогают поддерживать контекст, сохраняя историю диалогов, в которой хранятся предыдущие обмены между пользователем и ассистентом. Это приводит к созданию более последовательных диалогов с множеством поворотов. Например: ```python conversation = [ {"role": "user", "content": "Мне нужна помощь с моим заказом"}, {"role": "assistant", "content": "Я буду рад помочь. Не могли бы вы сообщить номер вашего заказа?"}, {"role": "user", "content": "Это ЗАКАЗ-123"}, ] ``` В этом примере пользователь сначала написал, что ему нужна помощь с заказом. LLM спросил номер заказа, и пользователь сообщил его в новом сообщении. Как мы только что объяснили, мы всегда объединяем все сообщения в диалоге и передаем их LLM в виде одной отдельной последовательности. Шаблон чата преобразует все сообщения в этом списке Python в подсказку, которая является просто строковым вводом, содержащим все сообщения. Например, вот как шаблон чата SmolLM2 отформатирует предыдущий обмен сообщениями в подсказку: ``` <|im_start|>system Вы - полезный ИИ-помощник по имени SmolLM, обученный Hugging Face<|im_end|>. <|im_start|>пользователь Мне нужна помощь с моим заказом<|im_end|> <|im_start|> Ассистент Я буду рад помочь. Не могли бы вы сообщить номер вашего заказа? <|im_start|>пользователь Это ORDER-123<|im_end|> <|im_start|> Ассистент ``` Однако при использовании Llama 3.2 тот же диалог будет преобразован в следующий запрос: ``` <|begin_of_text|><|start_header_id|>system<|end_header_id|> Дата начала работ: Декабрь 2023 Сегодня Дата: 10 февраля 2025 г. <|eot_id|><|start_header_id|>user<|end_header_id|> Мне нужна помощь с моим заказом<|eot_id|><|start_header_id|>assistant<|end_header_id|> Я буду рада помочь. Не могли бы вы сообщить номер вашего заказа?<|eot_id|><|start_header_id|>user<|end_header_id|> Это ЗАКАЗ-123<|eot_id|><|start_header_id|>assistant<|end_header_id|> ``` Шаблоны могут обрабатывать сложные диалоги с множеством поворотов, сохраняя при этом контекст: ```python messages = [ {"role": "system", "content": "Вы - репетитор по математике"}, {"role": "user", "content": "Что такое исчисление?"}, {"role": "assistant", "content": "Исчисление - это раздел математики..."}, {"role": "user", "content": "Можете привести пример?"}, ] ``` ## Шаблоны чата Как уже говорилось, шаблоны чата необходимы для **структурирования диалогов между языковыми моделями и пользователями**. Они определяют, как обмен сообщениями оформляется в единую подсказку. ### Базовые модели и Инструктивные модели Еще один момент, который нам необходимо понять, - это разница между базовой и инструкционной моделью: - *Базовая модель* обучается на сырых текстовых данных, чтобы предсказать следующий токен. - *Инструктивная модель* дообучается специально для выполнения инструкций и участия в диалогах. Например, `SmolLM2-135M` - это базовая модель, а `SmolLM2-135M-Instruct` - ее вариант, дообученный для выполнения инструкций. Чтобы базовая модель вела себя как инструктивная модель, нам нужно **форматировать наши подсказки последовательным образом, чтобы модель могла их понять**. Здесь на помощь приходят шаблоны чатов. *ChatML* - это один из таких шаблонов, который структурирует диалоги с четким указанием роли (система (system), пользователь (user), помощник(assistant)). Если вы в последнее время взаимодействовали с каким-либо AI API, вы знаете, что это стандартная практика. Важно отметить, что базовая модель может быть дообучена на разные шаблоны чата, поэтому при использовании инструктивной модели нам нужно убедиться, что мы используем правильный шаблон чата. ### Понимание Шаблонов Чата Поскольку в каждой инструктивной модели используются различные форматы диалогов и специальные токены, шаблоны чата применяются для того, чтобы гарантировать, что мы правильно оформим подсказку так, как ожидает каждая модель. В `transformers` шаблоны чата включают [код Jinja2](https://jinja.palletsprojects.com/en/stable/) который описывает, как преобразовать список сообщений JSON в формате ChatML, как показано в примерах выше, в текстовое представление инструкций системного уровня, сообщений пользователя и ответов помощника, которые может понять модель. Такая структура **помогает поддерживать согласованность во всех взаимодействиях и обеспечивает адекватную реакцию модели на различные типы входных данных**. Ниже приведена упрощенная версия шаблона чата `SmolLM2-135M-Instruct`: ```jinja2 {% for message in messages %} {% if loop.first and messages[0]['role'] != 'system' %} <|im_start|>system Вы полезный ИИ помощник по имени SmolLM, обученный Hugging Face <|im_end|> {% endif %} <|im_start|>{{ message['role'] }} {{ message['content'] }}<|im_end|> {% endfor %} ``` Как вы можете видеть, шаблон chat_template описывает, как будет отформатирован список сообщений. Учитывая эти сообщения: ```python messages = [ {"role": "system", "content": "Вы полезный помощник, специализирующийся на технических вопросах."}, {"role": "user", "content": "Can you explain what a chat template is?"}, {"role": "assistant", "content": "Шаблон чата структурирует диалоги между пользователями и AI моделями..."}, {"role": "user", "content": "Как я могу его использовать?"}, ] ``` Предыдущий шаблон чата создаст следующую строку: ```sh <|im_start|>system Вы полезный помощник, специализирующийся на технических вопросах.<|im_end|> <|im_start|>user Можешь объяснить, что такое шаблон чата?<|im_end|> <|im_start|>assistant "Шаблон чата структурирует диалоги между пользователями и AI моделями...<|im_end|> <|im_start|>user Как я могу его использовать?<|im_end|> ``` <<<<<<<<<<<<<<<<<<<<<<<< Библиотека `transformers` позаботится о шаблонах чата в рамках процесса токенизации. Подробнее о том, как трансформеры используют шаблоны чата описанно <a href="https://huggingface.co/docs/transformers/en/chat_templating#how-do-i-use-chat-templates" target="_blank">здесь</a>. Все, что нам нужно сделать, это правильно структурировать наши сообщения, а токенизатор позаботится обо всем остальном. Вы можете поэкспериментировать со следующим Hugging Face Space, чтобы увидеть, как один и тот же диалог будет оформлен для разных моделей с использованием соответствующих шаблонов чата: <iframe src="https://jofthomas-chat-template-viewer.hf.space" frameborder="0" width="850" height="450" ></iframe> ### Сообщения для подсказки Самый простой способ убедиться, что ваша LLM получает диалог в правильном формате, - это использовать `chat_template` из токеназатора модели. ```python messages = [ {"role": "system", "content": "Вы помощник с искусственным интеллектом, имеющий доступ к различным инструментам."}, {"role": "user", "content": "Привет !"}, {"role": "assistant", "content": "Привет человек, чем могу помочь?"}, ] ``` Чтобы преобразовать предыдущий диалог в подсказку, мы загружаем токенизатор и вызываем `apply_chat_template`: ```python from transformers import AutoTokenizer tokenizer = AutoTokenizer.from_pretrained("HuggingFaceTB/SmolLM2-1.7B-Instruct") rendered_prompt = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True) ``` Возвращаемое функцией `rendered_prompt` теперь готово к использованию в качестве входных данных для выбранной вами модели! > Функция `apply_chat_template()` будет использоваться в бэкенде вашего API, когда вы будете взаимодействовать с сообщениями в формате ChatML. Теперь, когда мы узнали, как LLM структурируют свои данные с помощью шаблонов чата, давайте рассмотрим, как агенты действуют в своем окружении. Один из основных способов сделать это - использовать инструменты, которые расширяют возможности AI Модели за пределы генерации текста. Мы еще поговорим о сообщениях в следующих разделах, но если вам нужно более глубокое погружение, ознакомьтесь с этими материалами: - <a href="https://huggingface.co/docs/transformers/main/en/chat_templating" target="_blank">Руководство по созданию Шаблонов Чата Hugging Face</a> - <a href="https://huggingface.co/docs/transformers" target="_blank">Документация по Transformers</a>
agents-course/units/ru-RU/unit1/messages-and-special-tokens.mdx/0
{ "file_path": "agents-course/units/ru-RU/unit1/messages-and-special-tokens.mdx", "repo_id": "agents-course", "token_count": 10402 }
16
# (Bổ trợ) Discord 101 (nhập môn) [[discord-101]] <img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit0/discord-etiquette.jpg" alt="Quy tắc ứng xử trên Discord" width="100%"/> Hướng dẫn này giúp bạn làm quen với Discord - nền tảng chat miễn phí phổ biến trong cộng đồng gaming và ML (Machine Learning - học máy). Tham gia server Discord Cộng đồng Hugging Face (**hơn 100,000 thành viên**) bằng cách click <a href="https://discord.gg/UrrTSsSyjb" target="_blank">tại đây</a>. Đây là nơi tuyệt vời để kết nối với mọi người! ## Khóa học Agents trên Cộng đồng Discord của Hugging Face Khi mới dùng Discord, bạn có thể hơi choáng ngợp. Dưới đây là hướng dẫn nhanh để làm quen. <!-- Hiện tại không còn cần chọn sở thích nữa. Hãy chọn **"AI Agents"** để truy cập vào mục AI Agents chứa tất cả kênh liên quan khóa học. Bạn có thể tự do khám phá và tham gia thêm các kênh khác! 🚀--> Server HF Community sở hữu cộng đồng sôi động với nhiều lĩnh vực khác nhau, mang đến cơ hội học hỏi qua thảo luận nghiên cứu, sự kiện và hơn thế. Sau khi [đăng ký](http://hf.co/join/discord), hãy giới thiệu bản thân ở kênh `#introduce-yourself`. Chúng mình đã tạo 4 kênh riêng cho khóa học: - `agents-course-announcements`: cập nhật **thông tin mới nhất về khóa học** - `🎓-agents-course-general`: **thảo luận chung và trò chuyện** - `agents-course-questions`: **đặt câu hỏi và giúp đỡ bạn học** - `agents-course-showcase`: **khoe agent xuất sắc nhất của bạn** Bạn cũng có thể xem thêm: - `smolagents`: **thảo luận và hỗ trợ về thư viện** ## Mẹo sử dụng Discord hiệu quả ### Cách tham gia server Discord Nếu bạn chưa quen với Discord, hãy xem <a href="https://support.discord.com/hc/en-us/articles/360034842871-How-do-I-join-a-Server#h_01FSJF9GT2QJMS2PRAW36WNBS8" target="_blank">hướng dẫn</a> này. Tóm tắt các bước: 1. Click <a href="https://discord.gg/UrrTSsSyjb" target="_blank">Liên kết mời</a> 2. Đăng nhập bằng tài khoản Discord hoặc tạo tài khoản mới 3. Xác nhận bạn không phải AI agent! 4. Thiết lập biệt danh và avatar 5. Click "Join Server" ### Cách dùng Discord hiệu quả Một số mẹo hữu ích: - **Voice channels** (kênh thoại) có sẵn nhưng chat text được dùng phổ biến hơn - Định dạng text bằng **markdown**, đặc biệt hữu ích khi viết code (lưu ý: markdown không áp dụng tốt cho link) - Mở **thread** (luồng) cho các cuộc thảo luận dài để giữ gọn kênh chính Hi vọng hướng dẫn này hữu ích! Nếu có thắc mắc, hãy hỏi chúng mình trên Discord 🤗.
agents-course/units/vi/unit0/discord101.mdx/0
{ "file_path": "agents-course/units/vi/unit0/discord101.mdx", "repo_id": "agents-course", "token_count": 1739 }
17
# Agent là gì? <img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit1/whiteboard-no-check.jpg" alt="Unit 1 planning"/> Đến cuối phần này, các bạn sẽ hiểu rõ khái niệm Agent và các ứng dụng đa dạng của chúng trong AI. Để giải thích Agent là gì, hãy bắt đầu với một phép so sánh. ## Bức tranh tổng thể: Alfred The Agent Hãy gặp Alfred. Alfred là một **Agent** (tác nhân). <img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit1/this-is-alfred.jpg" alt="This is Alfred"/> Hãy tưởng tượng Alfred **nhận được lệnh**, ví dụ: "Alfred, cho tôi một ly cà phê nhé." <img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit1/coffee-please.jpg" alt="I would like a coffee"/> Vì Alfred **hiểu ngôn ngữ tự nhiên**, cậu ấy nhanh chóng nắm bắt yêu cầu của ta. Trước khi thực hiện, Alfred thực hiện **lập luận và lập kế hoạch**, xác định các bước và công cụ cần thiết để: 1. Đến bếp 2. Dùng máy pha cà phê 3. Pha cà phê 4. Mang cà phê lại <img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit1/reason-and-plan.jpg" alt="Reason and plan"/> Sau khi có kế hoạch, cậu ấy **phải hành động**. Để thực hiện kế hoạch, **cậu ấy có thể dùng các Tools từ danh sách công cụ đã biết**. Trong trường hợp này, để pha cà phê, cậu ấy dùng máy pha cà phê. Cậu kích hoạt máy để pha. <img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit1/make-coffee.jpg" alt="Make coffee"/> Cuối cùng, Alfred mang ly cà phê vừa pha đến cho ta. <img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit1/bring-coffee.jpg" alt="Bring coffee"/> Và đó chính là Agent: **một mô hình AI có khả năng lập luận, lập kế hoạch và tương tác với môi trường**. Chúng ta gọi nó là Agent vì nó có _tính chủ động_, tức khả năng tương tác với môi trường. <img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit1/process.jpg" alt="Agent process"/> ## Định nghĩa chính thức hơn Giờ bạn đã nắm tổng quan, đây là định nghĩa chính xác hơn: > Agent là hệ thống sử dụng mô hình AI để tương tác với môi trường nhằm đạt mục tiêu do người dùng xác định. Nó kết hợp lập luận, lập kế hoạch và thực thi hành động (thường qua các Tools bên ngoài) để hoàn thành nhiệm vụ. Có thể hình dung Agent gồm hai phần chính: 1. **Bộ não (Mô hình AI)** Nơi diễn ra mọi tư duy hay suy nghĩ. Mô hình AI **xử lý lập luận và lập kế hoạch**. Nó quyết định **Hành động (Actions) nào cần thực hiện dựa trên tình huống**. 2. **Cơ thể (Khả năng và Tools)** Phần này đại diện cho **mọi thứ Agent có thể làm**. **Phạm vi hành động khả thi** phụ thuộc vào **những gì Agent được trang bị**. Ví dụ: vì con người không có cánh, chúng ta không thể thực hiện Action "bay", nhưng có thể thực hiện các Actions như "đi bộ", "chạy", "nhảy", "cầm nắm", v.v. ## Loại mô hình AI nào được dùng cho Agents? Mô hình AI phổ biến nhất trong Agents là Mô hình ngôn ngữ lớn (LLM), nhận đầu vào là **Văn bản** và cũng xuất ra **Văn bản**. Ví dụ nổi tiếng như **GPT4** từ **OpenAI**, **LLama** từ **Meta**, **Gemini** từ **Google**, v.v. Các model này được huấn luyện trên lượng văn bản khổng lồ và có khả năng tổng quát hóa tốt. Chúng ta sẽ tìm hiểu thêm về các LLM ở [phần tiếp theo](what-are-llms). <Tip> Ta cũng có thể dùng các mô hình nhận đầu vào khác làm mô hình cốt lõi cho Agent. Ví dụ: Mô hình ngôn ngữ thị giác (VLM) - giống LLM nhưng hiểu được cả hình ảnh. Hiện tại ta tập trung vào các LLM và sẽ thảo luận các lựa chọn khác sau. </Tip> ## AI thực hiện hành động thế nào trên môi trường? Các LLM là những mô hình tuyệt vời, nhưng **chúng chỉ có thể tạo văn bản**. Tuy nhiên, nếu bạn yêu cầu ứng dụng chat như HuggingChat hay ChatGPT tạo ảnh, chúng làm được! Làm thế nào vậy? Câu trả lời là các nhà phát triển HuggingChat, ChatGPT và ứng dụng tương tự đã triển khai chức năng bổ sung (gọi là **Tools**), mà LLM có thể dùng để tạo ảnh. <figure> <img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit1/eiffel_brocolis.jpg" alt="Eiffel Brocolis"/> <figcaption>Model đã dùng Image Generation Tool để tạo ảnh này. </figcaption> </figure> Chúng ta sẽ tìm hiểu thêm về Tools ở phần [Tools](tools). ## Agent có thể làm những loại nhiệm vụ nào? Agent có thể thực hiện bất kỳ nhiệm vụ nào mà ta triển khai qua **Tools** để hoàn thành **Actions**. Ví dụ: nếu tôi viết một Agent làm trợ lý cá nhân (như Siri) trên máy tính, và yêu cầu nó "gửi email cho Quản lý đề nghị dời cuộc họp hôm nay", tôi có thể cung cấp code để gửi email. Đây sẽ là Tool mới mà Agent có thể dùng khi cần gửi email. Ta có thể viết bằng Python: ```python def send_message_to(recipient, message): """Hữu ích để gửi email đến người nhận""" ... ``` Như chúng ta sẽ thấy, LLM sẽ sinh code để chạy tool khi cần, từ đó hoàn thành nhiệm vụ. ```python send_message_to("Manager", "Can we postpone today's meeting?") ``` **Thiết kế Tools rất quan trọng và ảnh hưởng lớn đến chất lượng Agent**. Một số nhiệm vụ cần Tools đặc biệt được tạo riêng, số khác có thể giải quyết bằng Tools đa năng như "web_search". > Lưu ý **Actions không giống Tools**. Một Action có thể liên quan đến việc sử dụng nhiều Tools để hoàn thành. Việc cho phép Agent tương tác với môi trường **mở ra ứng dụng thực tế cho doanh nghiệp và cá nhân**. ### Ví dụ 1: Trợ lý ảo cá nhân Các trợ lý ảo như Siri, Alexa hay Google Assistant hoạt động như Agents khi tương tác thay mặt người dùng qua môi trường số. Chúng tiếp nhận yêu cầu, phân tích ngữ cảnh, truy xuất thông tin từ database, và cung cấp phản hồi hoặc khởi tạo hành động (như đặt lời nhắc, gửi tin nhắn, điều khiển thiết bị thông minh). ### Ví dụ 2: Chatbot hỗ trợ khách hàng Nhiều công ty triển khai chatbot như Agents tương tác với khách hàng bằng ngôn ngữ tự nhiên. Các Agents này có thể trả lời câu hỏi, hướng dẫn xử lý sự cố, mở ticket trong database nội bộ, hay thậm chí hoàn tất giao dịch. Mục tiêu định sẵn của chúng có thể bao gồm cải thiện trải nghiệm người dùng, giảm thời gian chờ, hoặc tăng tỷ lệ chốt sale. Bằng cách tương tác trực tiếp với khách hàng, học hỏi từ hội thoại và điều chỉnh phản hồi theo thời gian, chúng thể hiện nguyên lý cốt lõi của Agent. ### Ví dụ 3: NPC AI trong game Các Agents AI sử dụng LLMs có thể làm NPC trở nên năng động và khó đoán hơn. Thay vì tuân theo các cây hành vi (behavior tree) cứng nhắc, chúng có thể **phản ứng theo ngữ cảnh, thích ứng với tương tác của người chơi**, và tạo hội thoại tinh tế hơn. Tính linh hoạt này giúp tạo ra các nhân vật sống động, hấp dẫn hơn và phát triển cùng hành động của người chơi. --- Tóm lại, Agent là hệ thống sử dụng mô hình AI (thường là LLM) làm động cơ lập luận chính, để: - **Hiểu ngôn ngữ tự nhiên:** Diễn giải và phản hồi chỉ dẫn của con người theo cách có ý nghĩa. - **Lập luận và lập kế hoạch:** Phân tích thông tin, đưa quyết định và xây dựng chiến lược giải quyết vấn đề. - **Tương tác với môi trường:** Thu thập thông tin, thực hiện hành động và quan sát kết quả. Giờ bạn đã nắm vững khái niệm Agent, hãy củng cố kiến thức bằng một **Kiểm tra nhanh** không tính điểm. Sau đó, chúng ta sẽ đi sâu vào "bộ não" của Agent: [LLMs](what-are-llms).
agents-course/units/vi/unit1/what-are-agents.mdx/0
{ "file_path": "agents-course/units/vi/unit1/what-are-agents.mdx", "repo_id": "agents-course", "token_count": 5413 }
18
# (选读) Discord 101 [[discord-101]] <img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit0/discord-etiquette.jpg" alt="Discord 礼仪指南" width="100%"/> 本指南旨在帮助您快速上手 Discord ——这款在游戏与机器学习社区广受欢迎的自由聊天平台。 <a href="https://discord.gg/UrrTSsSyjb" target="_blank">点击此处</a> 加入**拥有逾10万成员**的 Hugging Face 社区 Discord 服务器,开启您的技术社交之旅! ## Hugging Face Discord 社区的智能体课程 [[hf-discord-agents-course]] 对于初次接触 Discord 的用户,平台操作可能稍显复杂,以下简明指引将助您快速掌握核心功能。 <!-- 注:当前注册流程已更新,系统将引导您选择兴趣标签。请务必勾选**"AI 智能体"**选项以解锁AI智能体专题板块,该板块包含所有课程相关频道。欢迎自由探索并加入其他感兴趣的频道! 🚀--> Hugging Face 社区服务器汇聚了多元技术方向的活跃开发者,通过论文研讨、技术活动等丰富形式,为您打造沉浸式学习体验。 完成【注册】(http://hf.co/join/discord)后,请前往`#自我介绍`频道完善个人资料。 我们为智能体课程专设了四大核心频道: - `智能体课程公告`: 获取**最新课程动态与更新通知**. - `🎓-智能体课程总览`: 进行**日常讨论与自由交流**. - `智能体课程答疑`: **提问解惑与互助学习**专区. - `智能体成果展示`: **分享您的最佳智能体作品** . 额外推荐关注: - `smolagents技术交流`: 关于**智能体库的使用讨论与技术支援**. ## Discord 高效使用技巧 ### 服务器加入指南 若您对 Discord 平台尚不熟悉,建议参阅本平台的 <a href="https://support.discord.com/hc/en-us/articles/360034842871-How-do-I-join-a-Server#h_01FSJF9GT2QJMS2PRAW36WNBS8" target="_blank">服务器加入指南</a> 获取详细操作指引。 以下是简明的步骤指南: 1. 点击 <a href="https://discord.gg/UrrTSsSyjb" target="_blank">邀请链接</a>(新窗口打开)。 2. 登录现有 Discord 账户或注册新账号。 3. 完成真人验证。 4. 设置用户名与头像(建议使用学术机构标识)。 5. 点击"加入服务器"完成接入。 ### 如何高效使用 Discord 以下是有效使用 Discord 的几点建议: - **语音频道**虽已开放,但文字聊天仍是更常用的沟通方式。 - 支持使用 **Markdown style** 格式化文本(尤其适用于代码编写),但需注意其在链接处理方面的效果欠佳。 - 针对**长对话场景**,建议开启子线程(Threads)功能以保持讨论条理性。 希望本指南能为您提供帮助!如有任何疑问,欢迎通过 Discord 平台向我们咨询 🤗.
agents-course/units/zh-CN/unit0/discord101.mdx/0
{ "file_path": "agents-course/units/zh-CN/unit0/discord101.mdx", "repo_id": "agents-course", "token_count": 1865 }
19
# 使用 smolagents 创建我们的第一个智能体 在上一节中,我们学习了如何使用 Python 代码从头开始创建智能体,并且我们**看到了这个过程是多么繁琐**。幸运的是,许多智能体库通过**为你处理大量繁重的工作**来简化这项工作。 在本教程中,**你将创建你的第一个智能体**,它能够执行图像生成、网络搜索、时区检查等更多操作! 你还将把你的智能体**发布到 Hugging Face Space 上,以便与朋友和同事分享**。 让我们开始吧! ## 什么是 smolagents? <img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit1/smolagents.png" alt="smolagents"/> 为了创建这个智能体,我们将使用 `smolagents`,这是一个**提供轻松开发智能体框架的库**。 这个轻量级库设计简洁,但它抽象了构建智能体的许多复杂性,使你能够专注于设计智能体的行为。 我们将在下一个单元中深入了解 smolagents。同时,你也可以查看这篇<a href="https://huggingface.co/blog/smolagents" target="_blank">博客文章</a>或该库的<a href="https://github.com/huggingface/smolagents" target="_blank">GitHub 仓库</a>。 简而言之,`smolagents` 是一个专注于 **codeAgent** 的库,codeAgent 是一种通过代码块执行**“操作”**,然后通过执行代码**“观察”**结果的智能体。 以下是我们将构建的一个示例! 我们为我们的智能体提供了一个**图像生成工具**,并要求它生成一张猫的图片。 `smolagents` 中的智能体将具有**与我们之前构建的自定义智能体相同的行为**:它将**以循环的方式思考、行动和观察**,直到得出最终答案: <iframe width="560" height="315" src="https://www.youtube.com/embed/PQDKcWiuln4?si=ysSTDZoi8y55FVvA" title="YouTube video player" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture; web-share" referrerpolicy="strict-origin-when-cross-origin" allowfullscreen></iframe> 很令人兴奋,对吧? ## 让我们来构建我们的智能体! 首先,复制这个 Space:<a href="https://huggingface.co/spaces/agents-course/First_agent_template" target="_blank">https://huggingface.co/spaces/agents-course/First_agent_template</a> > 感谢 <a href="https://huggingface.co/m-ric" target="_blank">Aymeric</a> 提供的这个模板!🙌 复制这个 Space 意味着**在你的个人资料中创建一个本地副本**: <img src="https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit1/duplicate-space.gif" alt="复制"/> 复制这个 Space 之后,**你需要添加你的 Hugging Face API 令牌**,以便你的**智能体**可以调用模型 API。 1. 首先,从[ https://hf.co/settings/tokens ]( https://hf.co/settings/tokens)获取一个具有**推理权限**的令牌,第4步需要用到。如果你已经有了,可以跳过此步。 2. 进入你的主页,找到刚才复制的空间,点击**设置**按钮。 3. 向下滚动到**变量和密钥**部分,点击**新建密钥**。 4. 创建一个名为**HF_TOKEN**的密钥,值为第一步获取到的令牌。 5. 点击**保存**。 在整个课程中,你唯一需要修改的文件是当前不完整的**"app.py"**。你可以在这里查看[模板中的原始文件](https://huggingface.co/spaces/agents-course/First_agent_template/blob/main/app.py)。要找到你的文件,请进入你复制的 Space,然后点击 `Files` 选项卡,再在目录列表中点击 `app.py`。 让我们一起分解代码: - 文件开头是一些简单但必要的库导入 ```python from smolagents import CodeAgent, DuckDuckGoSearchTool, InferenceClientModel, load_tool, tool import datetime import requests import pytz import yaml from tools.final_answer import FinalAnswerTool ``` 正如之前所述,我们将直接使用 **smolagents** 中的 **CodeAgent** 类。 ### Tool(工具) 现在让我们来了解一下工具!如果你需要回顾一下工具的相关内容,请随时回到课程的[工具](tools)部分。 ```python @tool def my_custom_tool(arg1:str, arg2:int)-> str: # it's important to specify the return type # Keep this format for the tool description / args description but feel free to modify the tool """A tool that does nothing yet Args: arg1: the first argument arg2: the second argument """ return "What magic will you build ?" @tool def get_current_time_in_timezone(timezone: str) -> str: """A tool that fetches the current local time in a specified timezone. Args: timezone: A string representing a valid timezone (e.g., 'America/New_York'). """ try: # Create timezone object tz = pytz.timezone(timezone) # Get current time in that timezone local_time = datetime.datetime.now(tz).strftime("%Y-%m-%d %H:%M:%S") return f"The current local time in {timezone} is: {local_time}" except Exception as e: return f"Error fetching time for timezone '{timezone}': {str(e)}" ``` 这些工具是我们在这个部分鼓励你构建的东西!我们给你两个例子: 1. 一个**不工作的虚拟工具**,你可以修改它来制作一些有用的东西。 2. 一个**实际工作的工具**,它可以获取世界某地的当前时间。 要定义你的工具,重要的是: 1. 为你的函数提供输入和输出类型,例如 `get_current_time_in_timezone(timezone: str) -> str:` 2. **格式良好的文档字符串**。`smolagents` 期望所有参数在文档字符串中都有**文字描述**。 ### The Agent(智能体) 它使用 [`Qwen/Qwen2.5-Coder-32B-Instruct`](https://huggingface.co/Qwen/Qwen2.5-Coder-32B-Instruct) 作为 LLM 引擎。这是一个非常强大的模型,我们将通过无服务器 API 访问它。 ```python final_answer = FinalAnswerTool() model = InferenceClientModel( max_tokens=2096, temperature=0.5, model_id='Qwen/Qwen2.5-Coder-32B-Instruct', custom_role_conversions=None, ) with open("prompts.yaml", 'r') as stream: prompt_templates = yaml.safe_load(stream) # We're creating our CodeAgent agent = CodeAgent( model=model, tools=[final_answer], # add your tools here (don't remove final_answer) max_steps=6, verbosity_level=1, grammar=None, planning_interval=None, name=None, description=None, prompt_templates=prompt_templates ) GradioUI(agent).launch() ``` 这个智能体仍在使用我们在前面部分中看到的`InferenceClient`,它位于**InferenceClientModel**类的背后! 当我们介绍 Unit 2 中的框架时,我们会给出更深入的例子。目前,你需要专注于通过智能体的`tools`参数**向工具列表中添加新工具**。 例如,你可以使用代码第一行导入的`DuckDuckGoSearchTool`,或者你可以检查稍后从 Hub 加载的`image_generation_tool`。 **添加工具将赋予你的智能体新的能力**,在这里尝试发挥创意吧! 完整的"app.py": ```python from smolagents import CodeAgent, DuckDuckGoSearchTool, InferenceClientModel, load_tool, tool import datetime import requests import pytz import yaml from tools.final_answer import FinalAnswerTool from Gradio_UI import GradioUI # Below is an example of a tool that does nothing. Amaze us with your creativity! @tool def my_custom_tool(arg1:str, arg2:int)-> str: # it's important to specify the return type # Keep this format for the tool description / args description but feel free to modify the tool """A tool that does nothing yet Args: arg1: the first argument arg2: the second argument """ return "What magic will you build ?" @tool def get_current_time_in_timezone(timezone: str) -> str: """A tool that fetches the current local time in a specified timezone. Args: timezone: A string representing a valid timezone (e.g., 'America/New_York'). """ try: # Create timezone object tz = pytz.timezone(timezone) # Get current time in that timezone local_time = datetime.datetime.now(tz).strftime("%Y-%m-%d %H:%M:%S") return f"The current local time in {timezone} is: {local_time}" except Exception as e: return f"Error fetching time for timezone '{timezone}': {str(e)}" final_answer = FinalAnswerTool() model = InferenceClientModel( max_tokens=2096, temperature=0.5, model_id='Qwen/Qwen2.5-Coder-32B-Instruct', custom_role_conversions=None, ) # Import tool from Hub image_generation_tool = load_tool("agents-course/text-to-image", trust_remote_code=True) with open("prompts.yaml", 'r') as stream: prompt_templates = yaml.safe_load(stream) agent = CodeAgent( model=model, tools=[final_answer], # add your tools here (don't remove final_answer) max_steps=6, verbosity_level=1, grammar=None, planning_interval=None, name=None, description=None, prompt_templates=prompt_templates ) GradioUI(agent).launch() ``` 你的**目标**是熟悉 Space 和智能体。 目前,模板中的智能体**没有使用任何工具,所以尝试为它提供一些预制的工具,甚至自己动手制作一些新工具!** 我们非常期待在 Discord 频道 **#agents-course-showcase** 中看到你的精彩智能体成果! --- 恭喜你,你已经构建了你的第一个智能体!不要犹豫,与你的朋友和同事分享吧。 由于这是你的第一次尝试,如果有点小问题或速度有点慢,这是完全正常的。在未来的单元中,我们将学习如何构建更好的智能体。 最好的学习方法是尝试,所以不要犹豫,去更新它,添加更多工具,尝试使用另一个模型,等等。 在下一节中,你将完成最后的测验并获得证书!
agents-course/units/zh-CN/unit1/tutorial.mdx/0
{ "file_path": "agents-course/units/zh-CN/unit1/tutorial.mdx", "repo_id": "agents-course", "token_count": 5127 }
20
# LlamaHub 简介 **LlamaHub 是一个包含数百个集成组件、智能体和工具的注册中心,这些资源均可用于LlamaIndex框架。** ![LlamaHub](https://huggingface.co/datasets/agents-course/course-images/resolve/main/en/unit2/llama-index/llama-hub.png) 在本课程中我们将使用多种集成组件,因此让我们首先了解LlamaHub及其如何助力开发。 接下来我们将学习如何查找和安装所需组件的依赖项。 ## 安装 LlamaIndex的安装说明可通过结构清晰的**[LlamaHub官网](https://llamahub.ai/)**获取。 初次接触可能会感到有些复杂,但大多数**安装命令都遵循易于记忆的格式**: ```bash pip install llama-index-{component-type}-{framework-name} ``` 让我们尝试使用 [Hugging Face 推理 API 集成](https://llamahub.ai/l/llms/llama-index-llms-huggingface-api?from=llms) 安装 LLM 组件的依赖项。 ```bash pip install llama-index-llms-huggingface-api ``` ## 用法 安装后,我们可以看到使用模式。您会注意到导入路径跟在安装命令后面! 下面,我们可以看到**LLM 组件的 Hugging Face 推理 API** 的使用示例。 ```python from llama_index.llms.huggingface_api import HuggingFaceInferenceAPI llm = HuggingFaceInferenceAPI( model_name="Qwen/Qwen2.5-Coder-32B-Instruct", temperature=0.7, max_tokens=100, token="hf_xxx", ) llm.complete("Hello, how are you?") # I am good, how can I help you today? ``` 太棒了,我们现在知道如何查找、安装和使用我们所需组件的集成。 **让我们深入了解这些组件**,看看如何使用它们来构建我们自己的智能体。
agents-course/units/zh-CN/unit2/llama-index/llama-hub.mdx/0
{ "file_path": "agents-course/units/zh-CN/unit2/llama-index/llama-hub.mdx", "repo_id": "agents-course", "token_count": 967 }
21
![smolagents 标志](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/smolagents/license_to_call.png) # 为什么选择 smolagents 在本模块中,我们将探讨使用 [smolagents](https://huggingface.co/docs/smolagents/en/index) 的优缺点,帮助您做出明智的决策,判断它是否是满足您需求的正确框架。 ## 什么是 `smolagents`? `smolagents` 是一个简单而强大的框架,用于构建 AI 智能体。它为 LLM 提供了与现实世界互动的能力,例如搜索或生成图像。 正如我们在第 1 单元中学到的,AI 智能体是使用 LLM 基于 **'观察'** 生成 **'思考'** 并执行 **'操作'** 的程序。接下来我们来探讨这在 smolagents 中是如何实现的。 ### `smolagents` 的关键优势 - **简洁性:** 最小的代码复杂性和抽象层,使框架易于理解、采用和扩展。 - **灵活的 LLM 支持:** 通过与 Hugging Face 工具和外部 API 的集成,支持任何 LLM。 - **代码优先方法:** 首选支持直接在代码中编写操作的 Code Agents,无需解析并简化工具调用。 - **HF Hub 集成:** 与 Hugging Face Hub 无缝集成,允许使用 Gradio Spaces 作为工具。 ### 何时使用 smolagents? 考虑到这些优势,我们应该在什么情况下选择 smolagents 而不是其他框架? smolagents 在以下情况下是最理想的: - 您需要一个 **轻量级且最小化的解决方案**。 - 您希望 **快速实验** 而无需复杂的配置。 - 您的应用逻辑 **相对简单**。 ### 代码 vs. JSON 操作 与其他框架中的智能体以 JSON 形式编写操作不同,`smolagents` **专注于代码中的工具调用**,简化了执行过程。这是因为无需解析 JSON 来构建调用工具的代码:输出可以直接执行。 下图展示了这种差异: ![代码 vs. JSON 操作](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/code_vs_json_actions.png) 要回顾代码与 JSON 操作之间的区别,您可以重新访问 [第 1 单元的操作部分](https://huggingface.co/learn/agents-course/unit1/actions#actions-enabling-the-agent-to-engage-with-its-environment)。 ### `smolagents` 中的智能体类型 `smolagents` 中的智能体作为 **多步骤智能体** 运行。 每个 [`MultiStepAgent`](https://huggingface.co/docs/smolagents/main/en/reference/agents#smolagents.MultiStepAgent) 执行: - 一次思考 - 一次工具调用和执行 除了使用 **[CodeAgent](https://huggingface.co/docs/smolagents/main/en/reference/agents#smolagents.CodeAgent)** 作为主要类型的智能体外,smolagents 还支持 **[ToolCallingAgent](https://huggingface.co/docs/smolagents/main/en/reference/agents#smolagents.ToolCallingAgent)**,后者以 JSON 形式编写工具调用。 我们将在接下来的部分中更详细地探讨每种智能体类型。 <Tip> 在 smolagents 中,工具是使用 <code>@tool</code> 装饰器包装 Python 函数或 <code>Tool</code> 类定义的。 </Tip> ### `smolagents` 中的模型集成 `smolagents` 支持灵活的 LLM 集成,允许使用符合 [某些标准](https://huggingface.co/docs/smolagents/main/en/reference/models) 的任何可调用模型。该框架提供了多个预定义类以简化模型连接: - **[TransformersModel](https://huggingface.co/docs/smolagents/main/en/reference/models#smolagents.TransformersModel):** 实现本地 `transformers` 管道以实现无缝集成。 - **[InferenceClientModel](https://huggingface.co/docs/smolagents/main/en/reference/models#smolagents.InferenceClientModel):** 通过 [Hugging Face 的基础设施](https://huggingface.co/docs/api-inference/index) 或越来越多的 [第三方推理提供商](https://huggingface.co/docs/huggingface_hub/main/en/guides/inference#supported-providers-and-tasks) 支持 [无服务器推理](https://huggingface.co/docs/huggingface_hub/main/en/guides/inference) 调用。 - **[LiteLLMModel](https://huggingface.co/docs/smolagents/main/en/reference/models#smolagents.LiteLLMModel):** 利用 [LiteLLM](https://www.litellm.ai/) 实现轻量级模型交互。 - **[OpenAIServerModel](https://huggingface.co/docs/smolagents/main/en/reference/models#smolagents.OpenAIServerModel):** 连接到提供 OpenAI API 接口的任何服务。 - **[AzureOpenAIServerModel](https://huggingface.co/docs/smolagents/main/en/reference/models#smolagents.AzureOpenAIServerModel):** 支持与任何 Azure OpenAI 部署集成。 这种灵活性确保开发人员可以选择最适合其特定用例的模型和服务,并允许轻松进行实验。 现在我们已经了解了何时以及为何使用 smolagents,让我们深入探讨这个强大的库吧! ## 资源 - [smolagents 博客](https://huggingface.co/blog/smolagents) - 关于 smolagents 和代码交互的介绍
agents-course/units/zh-CN/unit2/smolagents/why_use_smolagents.mdx/0
{ "file_path": "agents-course/units/zh-CN/unit2/smolagents/why_use_smolagents.mdx", "repo_id": "agents-course", "token_count": 2714 }
22
# Summary [Introduction](README.md) # User Guide - [Installation](guide/installation.md) - [Tutorial - MNIST](guide/mnist/intro.md) - [Modeling](guide/mnist/modeling.md) - [Training](guide/mnist/training.md) - [Saving And Loading](guide/mnist/saving_loading.md) - [PyTorch cheatsheet](guide/cheatsheet.md) # Reference Guide - [Running a model](inference/inference.md) - [Using the hub](inference/hub.md) - [Error management](error_manage.md) - [Tracing](tracing.md) - [Training](training/training.md) - [Simplified](training/simplified.md) - [MNIST](training/mnist.md) - [Fine-tuning]() - [Serialization]() - [Advanced Cuda usage]() - [Writing a custom kernel]() - [Porting a custom kernel]() - [Using MKL]() - [Creating apps]() - [Creating a WASM app]() - [Creating a REST api webserver]() - [Creating a desktop Tauri app]()
candle/candle-book/src/SUMMARY.md/0
{ "file_path": "candle/candle-book/src/SUMMARY.md", "repo_id": "candle", "token_count": 339 }
23
# Candle MNIST Tutorial ## Saving and Loading Models After training a model, it is useful to save and subsequently load the model parameters. In Candle, this functionality is managed through the `VarMap` data structure, with parameters stored on disk using the [safetensors](https://huggingface.co/docs/safetensors/index) format. ### Saving Model Parameters Let's modify our `training_loop` function to include functionality for saving weights: ```rust fn training_loop( m: candle_datasets::vision::Dataset, ) -> anyhow::Result<()> { let dev = Device::cuda_if_available(0)?; let train_labels = m.train_labels; let train_images = m.train_images.to_device(&dev)?; let train_labels = train_labels.to_dtype(DType::U32)?.to_device(&dev)?; // Initialize a VarMap for trainable parameters let varmap = VarMap::new(); let vs = VarBuilder::from_varmap(&varmap, DType::F32, &dev); let model = Model::new(vs.clone())?; let learning_rate = 0.05; let epochs = 10; // Initialize stochastic gradient descent optimizer let mut sgd = candle_nn::SGD::new(varmap.all_vars(), learning_rate)?; let test_images = m.test_images.to_device(&dev)?; let test_labels = m.test_labels.to_dtype(DType::U32)?.to_device(&dev)?; for epoch in 1..epochs { // Standard MNIST forward pass let logits = model.forward(&train_images)?; let log_sm = ops::log_softmax(&logits, D::Minus1)?; // Compute Negative Log Likelihood loss let loss = loss::nll(&log_sm, &train_labels)?; // Perform backward pass and update weights sgd.backward_step(&loss)?; // Evaluate model on test set let test_logits = model.forward(&test_images)?; let sum_ok = test_logits .argmax(D::Minus1)? .eq(&test_labels)? .to_dtype(DType::F32)? .sum_all()? .to_scalar::<f32>()?; let test_accuracy = sum_ok / test_labels.dims1()? as f32; println!( "{epoch:4} train loss: {:8.5} test acc: {:5.2}%", loss.to_scalar::<f32>()?, test_accuracy ); } // Save model weights to disk varmap.save("model_weights.safetensors")?; Ok(()) } ``` ```bash $ cargo run --release > 1 train loss: 2.40485 test acc: 0.11% > 2 train loss: 2.34161 test acc: 0.14% > 3 train loss: 2.28841 test acc: 0.17% > 4 train loss: 2.24158 test acc: 0.19% > 5 train loss: 2.19898 test acc: 0.23% > 6 train loss: 2.15927 test acc: 0.26% > 7 train loss: 2.12161 test acc: 0.29% > 8 train loss: 2.08549 test acc: 0.32% > 9 train loss: 2.05053 test acc: 0.35% ``` ### Loading Model Parameters Now that we have saved our model parameters, we can modify the code to load them. The primary change required is to make the `varmap` variable mutable: ```rust fn training_loop( m: candle_datasets::vision::Dataset, ) -> anyhow::Result<()> { let dev = Device::cuda_if_available(0)?; let train_labels = m.train_labels; let train_images = m.train_images.to_device(&dev)?; let train_labels = train_labels.to_dtype(DType::U32)?.to_device(&dev)?; // Create a mutable VarMap for trainable parameters let mut varmap = VarMap::new(); let vs = VarBuilder::from_varmap(&varmap, DType::F32, &dev); let model = Model::new(vs.clone())?; // Load pre-trained weights from file varmap.load("model_weights.safetensors")?; let learning_rate = 0.05; let epochs = 10; // Initialize stochastic gradient descent optimizer let mut sgd = candle_nn::SGD::new(varmap.all_vars(), learning_rate)?; let test_images = m.test_images.to_device(&dev)?; let test_labels = m.test_labels.to_dtype(DType::U32)?.to_device(&dev)?; for epoch in 1..epochs { // Standard MNIST forward pass let logits = model.forward(&train_images)?; let log_sm = ops::log_softmax(&logits, D::Minus1)?; // Compute Negative Log Likelihood loss let loss = loss::nll(&log_sm, &train_labels)?; // Perform backward pass and update weights sgd.backward_step(&loss)?; // Evaluate model on test set let test_logits = model.forward(&test_images)?; let sum_ok = test_logits .argmax(D::Minus1)? .eq(&test_labels)? .to_dtype(DType::F32)? .sum_all()? .to_scalar::<f32>()?; let test_accuracy = sum_ok / test_labels.dims1()? as f32; println!( "{epoch:4} train loss: {:8.5} test acc: {:5.2}%", loss.to_scalar::<f32>()?, test_accuracy ); } // Save updated weights back to disk varmap.save("model_weights.safetensors")?; Ok(()) } ``` ```bash $ cargo run --release > 1 train loss: 2.01645 test acc: 0.38% > 2 train loss: 1.98300 test acc: 0.41% > 3 train loss: 1.95008 test acc: 0.44% > 4 train loss: 1.91754 test acc: 0.47% > 5 train loss: 1.88534 test acc: 0.50% > 6 train loss: 1.85349 test acc: 0.53% > 7 train loss: 1.82198 test acc: 0.56% > 8 train loss: 1.79077 test acc: 0.59% > 9 train loss: 1.75989 test acc: 0.61% ``` Note that loading the weights will fail if the specified file does not exist or is incompatible with the current model architecture. Implementing file existence checks and appropriate error handling is left to the user.
candle/candle-book/src/guide/mnist/saving_loading.md/0
{ "file_path": "candle/candle-book/src/guide/mnist/saving_loading.md", "repo_id": "candle", "token_count": 2293 }
24
#[cfg(feature = "accelerate")] extern crate accelerate_src; #[cfg(feature = "mkl")] extern crate intel_mkl_src; use anyhow::Result; use candle_core::{Device, Tensor}; fn main() -> Result<()> { // This requires the code to be run with MTL_CAPTURE_ENABLED=1 let device = Device::new_metal(0)?; let metal_device = match &device { Device::Metal(m) => m, _ => anyhow::bail!("unexpected device"), }; metal_device.capture("/tmp/candle.gputrace")?; // This first synchronize ensures that a new command buffer gets created after setting up the // capture scope. device.synchronize()?; let x = Tensor::randn(0f32, 1.0, (128, 128), &device)?; let x1 = x.add(&x)?; println!("{x1:?}"); // This second synchronize ensures that the command buffer gets committed before the end of the // capture scope. device.synchronize()?; Ok(()) }
candle/candle-core/examples/metal_basics.rs/0
{ "file_path": "candle/candle-core/examples/metal_basics.rs", "repo_id": "candle", "token_count": 346 }
25
use crate::{DType, Layout}; /// cudarc related errors #[derive(thiserror::Error, Debug)] pub enum CudaError { #[error(transparent)] Cuda(#[from] cudarc::driver::DriverError), #[error(transparent)] Compiler(#[from] cudarc::nvrtc::CompileError), #[error(transparent)] Cublas(#[from] cudarc::cublas::result::CublasError), #[error(transparent)] Curand(#[from] cudarc::curand::result::CurandError), #[error("missing kernel '{module_name}'")] MissingKernel { module_name: String }, #[error("unsupported dtype {dtype:?} for {op}")] UnsupportedDtype { dtype: DType, op: &'static str }, #[error("internal error '{0}'")] InternalError(&'static str), #[error("matmul is only supported for contiguous tensors lstride: {lhs_stride:?} rstride: {rhs_stride:?} mnk: {mnk:?}")] MatMulNonContiguous { lhs_stride: Layout, rhs_stride: Layout, mnk: (usize, usize, usize), }, #[error("{msg}, expected: {expected:?}, got: {got:?}")] UnexpectedDType { msg: &'static str, expected: DType, got: DType, }, #[error("{cuda} when loading {module_name}")] Load { cuda: cudarc::driver::DriverError, module_name: String, }, } impl From<CudaError> for crate::Error { fn from(val: CudaError) -> Self { crate::Error::Cuda(Box::new(val)).bt() } } pub trait WrapErr<O> { fn w(self) -> std::result::Result<O, crate::Error>; } impl<O, E: Into<CudaError>> WrapErr<O> for std::result::Result<O, E> { fn w(self) -> std::result::Result<O, crate::Error> { self.map_err(|e| crate::Error::Cuda(Box::new(e.into())).bt()) } }
candle/candle-core/src/cuda_backend/error.rs/0
{ "file_path": "candle/candle-core/src/cuda_backend/error.rs", "repo_id": "candle", "token_count": 750 }
26
//! Numpy support for tensors. //! //! The spec for the npy format can be found in //! [npy-format](https://docs.scipy.org/doc/numpy-1.14.2/neps/npy-format.html). //! The functions from this module can be used to read tensors from npy/npz files //! or write tensors to these files. A npy file contains a single tensor (unnamed) //! whereas a npz file can contain multiple named tensors. npz files are also compressed. //! //! These two formats are easy to use in Python using the numpy library. //! //! ```python //! import numpy as np //! x = np.arange(10) //! //! # Write a npy file. //! np.save("test.npy", x) //! //! # Read a value from the npy file. //! x = np.load("test.npy") //! //! # Write multiple values to a npz file. //! values = { "x": x, "x_plus_one": x + 1 } //! np.savez("test.npz", **values) //! //! # Load multiple values from a npz file. //! values = np.loadz("test.npz") //! ``` use crate::{DType, Device, Error, Result, Shape, Tensor}; use byteorder::{LittleEndian, ReadBytesExt}; use float8::F8E4M3; use half::{bf16, f16, slice::HalfFloatSliceExt}; use std::collections::HashMap; use std::fs::File; use std::io::{BufReader, Read, Write}; use std::path::Path; use std::slice; const NPY_MAGIC_STRING: &[u8] = b"\x93NUMPY"; const NPY_SUFFIX: &str = ".npy"; fn read_header<R: Read>(reader: &mut R) -> Result<String> { let mut magic_string = vec![0u8; NPY_MAGIC_STRING.len()]; reader.read_exact(&mut magic_string)?; if magic_string != NPY_MAGIC_STRING { return Err(Error::Npy("magic string mismatch".to_string())); } let mut version = [0u8; 2]; reader.read_exact(&mut version)?; let header_len_len = match version[0] { 1 => 2, 2 => 4, otherwise => return Err(Error::Npy(format!("unsupported version {otherwise}"))), }; let mut header_len = vec![0u8; header_len_len]; reader.read_exact(&mut header_len)?; let header_len = header_len .iter() .rev() .fold(0_usize, |acc, &v| 256 * acc + v as usize); let mut header = vec![0u8; header_len]; reader.read_exact(&mut header)?; Ok(String::from_utf8_lossy(&header).to_string()) } #[derive(Debug, PartialEq)] struct Header { descr: DType, fortran_order: bool, shape: Vec<usize>, } impl Header { fn shape(&self) -> Shape { Shape::from(self.shape.as_slice()) } fn to_string(&self) -> Result<String> { let fortran_order = if self.fortran_order { "True" } else { "False" }; let mut shape = self .shape .iter() .map(|x| x.to_string()) .collect::<Vec<_>>() .join(","); let descr = match self.descr { DType::BF16 => Err(Error::Npy("bf16 is not supported".into()))?, DType::F16 => "f2", DType::F32 => "f4", DType::F64 => "f8", DType::I64 => "i8", DType::U32 => "u4", DType::U8 => "u1", DType::F8E4M3 => Err(Error::Npy("f8e4m3 is not supported".into()))?, }; if !shape.is_empty() { shape.push(',') } Ok(format!( "{{'descr': '<{descr}', 'fortran_order': {fortran_order}, 'shape': ({shape}), }}" )) } // Hacky parser for the npy header, a typical example would be: // {'descr': '<f8', 'fortran_order': False, 'shape': (128,), } fn parse(header: &str) -> Result<Header> { let header = header.trim_matches(|c: char| c == '{' || c == '}' || c == ',' || c.is_whitespace()); let mut parts: Vec<String> = vec![]; let mut start_index = 0usize; let mut cnt_parenthesis = 0i64; for (index, c) in header.char_indices() { match c { '(' => cnt_parenthesis += 1, ')' => cnt_parenthesis -= 1, ',' => { if cnt_parenthesis == 0 { parts.push(header[start_index..index].to_owned()); start_index = index + 1; } } _ => {} } } parts.push(header[start_index..].to_owned()); let mut part_map: HashMap<String, String> = HashMap::new(); for part in parts.iter() { let part = part.trim(); if !part.is_empty() { match part.split(':').collect::<Vec<_>>().as_slice() { [key, value] => { let key = key.trim_matches(|c: char| c == '\'' || c.is_whitespace()); let value = value.trim_matches(|c: char| c == '\'' || c.is_whitespace()); let _ = part_map.insert(key.to_owned(), value.to_owned()); } _ => return Err(Error::Npy(format!("unable to parse header {header}"))), } } } let fortran_order = match part_map.get("fortran_order") { None => false, Some(fortran_order) => match fortran_order.as_ref() { "False" => false, "True" => true, _ => return Err(Error::Npy(format!("unknown fortran_order {fortran_order}"))), }, }; let descr = match part_map.get("descr") { None => return Err(Error::Npy("no descr in header".to_string())), Some(descr) => { if descr.is_empty() { return Err(Error::Npy("empty descr".to_string())); } if descr.starts_with('>') { return Err(Error::Npy(format!("little-endian descr {descr}"))); } // the only supported types in tensor are: // float64, float32, float16, // complex64, complex128, // int64, int32, int16, int8, // uint8, and bool. match descr.trim_matches(|c: char| c == '=' || c == '<' || c == '|') { "e" | "f2" => DType::F16, "f" | "f4" => DType::F32, "d" | "f8" => DType::F64, // "i" | "i4" => DType::S32, "q" | "i8" => DType::I64, // "h" | "i2" => DType::S16, // "b" | "i1" => DType::S8, "B" | "u1" => DType::U8, "I" | "u4" => DType::U32, "?" | "b1" => DType::U8, // "F" | "F4" => DType::C64, // "D" | "F8" => DType::C128, descr => return Err(Error::Npy(format!("unrecognized descr {descr}"))), } } }; let shape = match part_map.get("shape") { None => return Err(Error::Npy("no shape in header".to_string())), Some(shape) => { let shape = shape.trim_matches(|c: char| c == '(' || c == ')' || c == ','); if shape.is_empty() { vec![] } else { shape .split(',') .map(|v| v.trim().parse::<usize>()) .collect::<std::result::Result<Vec<_>, _>>()? } } }; Ok(Header { descr, fortran_order, shape, }) } } impl Tensor { // TODO: Add the possibility to read directly to a device? pub(crate) fn from_reader<R: std::io::Read>( shape: Shape, dtype: DType, reader: &mut R, ) -> Result<Self> { let elem_count = shape.elem_count(); match dtype { DType::BF16 => { let mut data_t = vec![bf16::ZERO; elem_count]; reader.read_u16_into::<LittleEndian>(data_t.reinterpret_cast_mut())?; Tensor::from_vec(data_t, shape, &Device::Cpu) } DType::F16 => { let mut data_t = vec![f16::ZERO; elem_count]; reader.read_u16_into::<LittleEndian>(data_t.reinterpret_cast_mut())?; Tensor::from_vec(data_t, shape, &Device::Cpu) } DType::F32 => { let mut data_t = vec![0f32; elem_count]; reader.read_f32_into::<LittleEndian>(&mut data_t)?; Tensor::from_vec(data_t, shape, &Device::Cpu) } DType::F64 => { let mut data_t = vec![0f64; elem_count]; reader.read_f64_into::<LittleEndian>(&mut data_t)?; Tensor::from_vec(data_t, shape, &Device::Cpu) } DType::U8 => { let mut data_t = vec![0u8; elem_count]; reader.read_exact(&mut data_t)?; Tensor::from_vec(data_t, shape, &Device::Cpu) } DType::U32 => { let mut data_t = vec![0u32; elem_count]; reader.read_u32_into::<LittleEndian>(&mut data_t)?; Tensor::from_vec(data_t, shape, &Device::Cpu) } DType::I64 => { let mut data_t = vec![0i64; elem_count]; reader.read_i64_into::<LittleEndian>(&mut data_t)?; Tensor::from_vec(data_t, shape, &Device::Cpu) } DType::F8E4M3 => { let mut data_t = vec![F8E4M3::ZERO; elem_count]; let ptr = data_t.as_mut_ptr().cast::<i8>(); let len = data_t.len(); reader.read_i8_into(unsafe { slice::from_raw_parts_mut(ptr, len) })?; Tensor::from_vec(data_t, shape, &Device::Cpu) } } } /// Reads a npy file and return the stored multi-dimensional array as a tensor. pub fn read_npy<T: AsRef<Path>>(path: T) -> Result<Self> { let mut reader = File::open(path.as_ref())?; let header = read_header(&mut reader)?; let header = Header::parse(&header)?; if header.fortran_order { return Err(Error::Npy("fortran order not supported".to_string())); } Self::from_reader(header.shape(), header.descr, &mut reader) } /// Reads a npz file and returns the stored multi-dimensional arrays together with their names. pub fn read_npz<T: AsRef<Path>>(path: T) -> Result<Vec<(String, Self)>> { let zip_reader = BufReader::new(File::open(path.as_ref())?); let mut zip = zip::ZipArchive::new(zip_reader)?; let mut result = vec![]; for i in 0..zip.len() { let mut reader = zip.by_index(i)?; let name = { let name = reader.name(); name.strip_suffix(NPY_SUFFIX).unwrap_or(name).to_owned() }; let header = read_header(&mut reader)?; let header = Header::parse(&header)?; if header.fortran_order { return Err(Error::Npy("fortran order not supported".to_string())); } let s = Self::from_reader(header.shape(), header.descr, &mut reader)?; result.push((name, s)) } Ok(result) } /// Reads a npz file and returns the stored multi-dimensional arrays for some specified names. pub fn read_npz_by_name<T: AsRef<Path>>(path: T, names: &[&str]) -> Result<Vec<Self>> { let zip_reader = BufReader::new(File::open(path.as_ref())?); let mut zip = zip::ZipArchive::new(zip_reader)?; let mut result = vec![]; for name in names.iter() { let mut reader = match zip.by_name(&format!("{name}{NPY_SUFFIX}")) { Ok(reader) => reader, Err(_) => Err(Error::Npy(format!( "no array for {name} in {:?}", path.as_ref() )))?, }; let header = read_header(&mut reader)?; let header = Header::parse(&header)?; if header.fortran_order { return Err(Error::Npy("fortran order not supported".to_string())); } let s = Self::from_reader(header.shape(), header.descr, &mut reader)?; result.push(s) } Ok(result) } fn write<T: Write>(&self, f: &mut T) -> Result<()> { f.write_all(NPY_MAGIC_STRING)?; f.write_all(&[1u8, 0u8])?; let header = Header { descr: self.dtype(), fortran_order: false, shape: self.dims().to_vec(), }; let mut header = header.to_string()?; let pad = 16 - (NPY_MAGIC_STRING.len() + 5 + header.len()) % 16; for _ in 0..pad % 16 { header.push(' ') } header.push('\n'); f.write_all(&[(header.len() % 256) as u8, (header.len() / 256) as u8])?; f.write_all(header.as_bytes())?; self.write_bytes(f) } /// Writes a multi-dimensional array in the npy format. pub fn write_npy<T: AsRef<Path>>(&self, path: T) -> Result<()> { let mut f = File::create(path.as_ref())?; self.write(&mut f) } /// Writes multiple multi-dimensional arrays using the npz format. pub fn write_npz<S: AsRef<str>, T: AsRef<Tensor>, P: AsRef<Path>>( ts: &[(S, T)], path: P, ) -> Result<()> { let mut zip = zip::ZipWriter::new(File::create(path.as_ref())?); let options: zip::write::FileOptions<()> = zip::write::FileOptions::default().compression_method(zip::CompressionMethod::Stored); for (name, tensor) in ts.iter() { zip.start_file(format!("{}.npy", name.as_ref()), options)?; tensor.as_ref().write(&mut zip)? } Ok(()) } } /// Lazy tensor loader. pub struct NpzTensors { index_per_name: HashMap<String, usize>, path: std::path::PathBuf, // We do not store a zip reader as it needs mutable access to extract data. Instead we // re-create a zip reader for each tensor. } impl NpzTensors { pub fn new<T: AsRef<Path>>(path: T) -> Result<Self> { let path = path.as_ref().to_owned(); let zip_reader = BufReader::new(File::open(&path)?); let mut zip = zip::ZipArchive::new(zip_reader)?; let mut index_per_name = HashMap::new(); for i in 0..zip.len() { let file = zip.by_index(i)?; let name = { let name = file.name(); name.strip_suffix(NPY_SUFFIX).unwrap_or(name).to_owned() }; index_per_name.insert(name, i); } Ok(Self { index_per_name, path, }) } pub fn names(&self) -> Vec<&String> { self.index_per_name.keys().collect() } /// This only returns the shape and dtype for a named tensor. Compared to `get`, this avoids /// reading the whole tensor data. pub fn get_shape_and_dtype(&self, name: &str) -> Result<(Shape, DType)> { let index = match self.index_per_name.get(name) { None => crate::bail!("cannot find tensor {name}"), Some(index) => *index, }; let zip_reader = BufReader::new(File::open(&self.path)?); let mut zip = zip::ZipArchive::new(zip_reader)?; let mut reader = zip.by_index(index)?; let header = read_header(&mut reader)?; let header = Header::parse(&header)?; Ok((header.shape(), header.descr)) } pub fn get(&self, name: &str) -> Result<Option<Tensor>> { let index = match self.index_per_name.get(name) { None => return Ok(None), Some(index) => *index, }; // We hope that the file has not changed since first reading it. let zip_reader = BufReader::new(File::open(&self.path)?); let mut zip = zip::ZipArchive::new(zip_reader)?; let mut reader = zip.by_index(index)?; let header = read_header(&mut reader)?; let header = Header::parse(&header)?; if header.fortran_order { return Err(Error::Npy("fortran order not supported".to_string())); } let tensor = Tensor::from_reader(header.shape(), header.descr, &mut reader)?; Ok(Some(tensor)) } } #[cfg(test)] mod tests { use super::Header; #[test] fn parse() { let h = "{'descr': '<f8', 'fortran_order': False, 'shape': (128,), }"; assert_eq!( Header::parse(h).unwrap(), Header { descr: crate::DType::F64, fortran_order: false, shape: vec![128] } ); let h = "{'descr': '<f4', 'fortran_order': True, 'shape': (256,1,128), }"; let h = Header::parse(h).unwrap(); assert_eq!( h, Header { descr: crate::DType::F32, fortran_order: true, shape: vec![256, 1, 128] } ); assert_eq!( h.to_string().unwrap(), "{'descr': '<f4', 'fortran_order': True, 'shape': (256,1,128,), }" ); let h = Header { descr: crate::DType::U32, fortran_order: false, shape: vec![], }; assert_eq!( h.to_string().unwrap(), "{'descr': '<u4', 'fortran_order': False, 'shape': (), }" ); } }
candle/candle-core/src/npy.rs/0
{ "file_path": "candle/candle-core/src/npy.rs", "repo_id": "candle", "token_count": 9003 }
27
//! TensorScalar Enum and Trait //! use crate::{DType, Result, Tensor, WithDType}; use float8::F8E4M3; use half::{bf16, f16}; #[derive(Debug, Clone, Copy, PartialEq)] pub enum Scalar { U8(u8), U32(u32), I64(i64), BF16(bf16), F16(f16), F32(f32), F64(f64), F8E4M3(F8E4M3), } impl<T: WithDType> From<T> for Scalar { fn from(value: T) -> Self { value.to_scalar() } } impl Scalar { pub fn zero(dtype: DType) -> Self { match dtype { DType::U8 => Scalar::U8(0), DType::U32 => Scalar::U32(0), DType::I64 => Scalar::I64(0), DType::BF16 => Scalar::BF16(bf16::ZERO), DType::F16 => Scalar::F16(f16::ZERO), DType::F32 => Scalar::F32(0.0), DType::F64 => Scalar::F64(0.0), DType::F8E4M3 => Scalar::F8E4M3(F8E4M3::ZERO), } } pub fn one(dtype: DType) -> Self { match dtype { DType::U8 => Scalar::U8(1), DType::U32 => Scalar::U32(1), DType::I64 => Scalar::I64(1), DType::BF16 => Scalar::BF16(bf16::ONE), DType::F16 => Scalar::F16(f16::ONE), DType::F32 => Scalar::F32(1.0), DType::F64 => Scalar::F64(1.0), DType::F8E4M3 => Scalar::F8E4M3(F8E4M3::ONE), } } pub fn dtype(&self) -> DType { match self { Scalar::U8(_) => DType::U8, Scalar::U32(_) => DType::U32, Scalar::I64(_) => DType::I64, Scalar::BF16(_) => DType::BF16, Scalar::F16(_) => DType::F16, Scalar::F32(_) => DType::F32, Scalar::F64(_) => DType::F64, Scalar::F8E4M3(_) => DType::F8E4M3, } } pub fn to_f64(&self) -> f64 { match self { Scalar::U8(v) => *v as f64, Scalar::U32(v) => *v as f64, Scalar::I64(v) => *v as f64, Scalar::BF16(v) => v.to_f64(), Scalar::F16(v) => v.to_f64(), Scalar::F32(v) => *v as f64, Scalar::F64(v) => *v, Scalar::F8E4M3(v) => v.to_f64(), } } } pub enum TensorScalar { Tensor(Tensor), Scalar(Tensor), } pub trait TensorOrScalar { fn to_tensor_scalar(self) -> Result<TensorScalar>; } impl TensorOrScalar for &Tensor { fn to_tensor_scalar(self) -> Result<TensorScalar> { Ok(TensorScalar::Tensor(self.clone())) } } impl<T: WithDType> TensorOrScalar for T { fn to_tensor_scalar(self) -> Result<TensorScalar> { let scalar = Tensor::new(self, &crate::Device::Cpu)?; Ok(TensorScalar::Scalar(scalar)) } }
candle/candle-core/src/scalar.rs/0
{ "file_path": "candle/candle-core/src/scalar.rs", "repo_id": "candle", "token_count": 1552 }
28
use anyhow::Result; use candle_core::{Device, IndexOp, Tensor}; #[test] fn integer_index() -> Result<()> { let dev = Device::Cpu; let tensor = Tensor::arange(0u32, 2 * 3, &dev)?.reshape((2, 3))?; let result = tensor.i(1)?; assert_eq!(result.dims(), &[3]); assert_eq!(result.to_vec1::<u32>()?, &[3, 4, 5]); let result = tensor.i((.., 2))?; assert_eq!(result.dims(), &[2]); assert_eq!(result.to_vec1::<u32>()?, &[2, 5]); Ok(()) } #[test] fn range_index() -> Result<()> { let dev = Device::Cpu; // RangeFull let tensor = Tensor::arange(0u32, 2 * 3, &dev)?.reshape((2, 3))?; let result = tensor.i(..)?; assert_eq!(result.dims(), &[2, 3]); assert_eq!(result.to_vec2::<u32>()?, &[[0, 1, 2], [3, 4, 5]]); // Range let tensor = Tensor::arange(0u32, 4 * 3, &dev)?.reshape((4, 3))?; let result = tensor.i(1..3)?; assert_eq!(result.dims(), &[2, 3]); assert_eq!(result.to_vec2::<u32>()?, &[[3, 4, 5], [6, 7, 8]]); // RangeFrom let result = tensor.i(2..)?; assert_eq!(result.dims(), &[2, 3]); assert_eq!(result.to_vec2::<u32>()?, &[[6, 7, 8], [9, 10, 11]]); // RangeTo let result = tensor.i(..2)?; assert_eq!(result.dims(), &[2, 3]); assert_eq!(result.to_vec2::<u32>()?, &[[0, 1, 2], [3, 4, 5]]); // RangeInclusive let result = tensor.i(1..=2)?; assert_eq!(result.dims(), &[2, 3]); assert_eq!(result.to_vec2::<u32>()?, &[[3, 4, 5], [6, 7, 8]]); // RangeTo let result = tensor.i(..1)?; assert_eq!(result.dims(), &[1, 3]); assert_eq!(result.to_vec2::<u32>()?, &[[0, 1, 2]]); // RangeToInclusive let result = tensor.i(..=1)?; assert_eq!(result.dims(), &[2, 3]); assert_eq!(result.to_vec2::<u32>()?, &[[0, 1, 2], [3, 4, 5]]); // Empty range let result = tensor.i(1..1)?; assert_eq!(result.dims(), &[0, 3]); let empty: [[u32; 3]; 0] = []; assert_eq!(result.to_vec2::<u32>()?, &empty); // Similar to PyTorch, allow empty ranges when the computed length is negative. #[allow(clippy::reversed_empty_ranges)] let result = tensor.i(1..0)?; assert_eq!(result.dims(), &[0, 3]); let empty: [[u32; 3]; 0] = []; assert_eq!(result.to_vec2::<u32>()?, &empty); Ok(()) } #[test] fn index_3d() -> Result<()> { let tensor = Tensor::from_iter(0..24u32, &Device::Cpu)?.reshape((2, 3, 4))?; assert_eq!(tensor.i((0, 0, 0))?.to_scalar::<u32>()?, 0); assert_eq!(tensor.i((1, 0, 0))?.to_scalar::<u32>()?, 12); assert_eq!(tensor.i((0, 1, 0))?.to_scalar::<u32>()?, 4); assert_eq!(tensor.i((0, 1, 3))?.to_scalar::<u32>()?, 7); assert_eq!(tensor.i((0..2, 0, 0))?.to_vec1::<u32>()?, &[0, 12]); assert_eq!( tensor.i((0..2, .., 0))?.to_vec2::<u32>()?, &[[0, 4, 8], [12, 16, 20]] ); assert_eq!( tensor.i((..2, .., 3))?.to_vec2::<u32>()?, &[[3, 7, 11], [15, 19, 23]] ); assert_eq!(tensor.i((1, .., 3))?.to_vec1::<u32>()?, &[15, 19, 23]); Ok(()) } #[test] fn slice_assign() -> Result<()> { let dev = Device::Cpu; let tensor = Tensor::arange(0u32, 4 * 5, &dev)?.reshape((4, 5))?; let src = Tensor::arange(0u32, 2 * 3, &dev)?.reshape((3, 2))?; let out = tensor.slice_assign(&[1..4, 3..5], &src)?; assert_eq!( out.to_vec2::<u32>()?, &[ [0, 1, 2, 3, 4], [5, 6, 7, 0, 1], [10, 11, 12, 2, 3], [15, 16, 17, 4, 5] ] ); let out = tensor.slice_assign(&[0..3, 0..2], &src)?; assert_eq!( out.to_vec2::<u32>()?, &[ [0, 1, 2, 3, 4], [2, 3, 7, 8, 9], [4, 5, 12, 13, 14], [15, 16, 17, 18, 19] ] ); Ok(()) }
candle/candle-core/tests/indexing_tests.rs/0
{ "file_path": "candle/candle-core/tests/indexing_tests.rs", "repo_id": "candle", "token_count": 1994 }
29
use candle::{Result, Tensor}; pub struct Batcher<I> { inner: I, batch_size: usize, return_last_incomplete_batch: bool, } impl<I> Batcher<I> { fn new(inner: I) -> Self { Self { inner, batch_size: 16, return_last_incomplete_batch: false, } } pub fn batch_size(mut self, batch_size: usize) -> Self { self.batch_size = batch_size; self } pub fn return_last_incomplete_batch(mut self, r: bool) -> Self { self.return_last_incomplete_batch = r; self } } pub struct Iter1<I: Iterator<Item = Tensor>> { inner: I, } pub struct Iter2<I: Iterator<Item = (Tensor, Tensor)>> { inner: I, } impl<I: Iterator<Item = Tensor>> Batcher<Iter1<I>> { pub fn new1(inner: I) -> Self { Self::new(Iter1 { inner }) } } impl<I: Iterator<Item = (Tensor, Tensor)>> Batcher<Iter2<I>> { pub fn new2(inner: I) -> Self { Self::new(Iter2 { inner }) } } pub struct IterResult1<I: Iterator<Item = Result<Tensor>>> { inner: I, } pub struct IterResult2<I: Iterator<Item = Result<(Tensor, Tensor)>>> { inner: I, } impl<I: Iterator<Item = Result<Tensor>>> Batcher<IterResult1<I>> { pub fn new_r1(inner: I) -> Self { Self::new(IterResult1 { inner }) } } impl<I: Iterator<Item = Result<(Tensor, Tensor)>>> Batcher<IterResult2<I>> { pub fn new_r2(inner: I) -> Self { Self::new(IterResult2 { inner }) } } impl<I: Iterator<Item = Tensor>> Iterator for Batcher<Iter1<I>> { type Item = Result<Tensor>; fn next(&mut self) -> Option<Self::Item> { let mut items = Vec::with_capacity(self.batch_size); for _i in 0..self.batch_size { // We have two levels of inner here so that we can have two implementations of the // Iterator trait that are different for Iter1 and Iter2. If rust gets better // specialization at some point we can get rid of this. match self.inner.inner.next() { Some(item) => items.push(item), None => { if self.return_last_incomplete_batch && !items.is_empty() { break; } return None; } } } Some(Tensor::stack(&items, 0)) } } impl<I: Iterator<Item = (Tensor, Tensor)>> Iterator for Batcher<Iter2<I>> { type Item = Result<(Tensor, Tensor)>; fn next(&mut self) -> Option<Self::Item> { let mut xs = Vec::with_capacity(self.batch_size); let mut ys = Vec::with_capacity(self.batch_size); for _i in 0..self.batch_size { match self.inner.inner.next() { Some((x, y)) => { xs.push(x); ys.push(y) } None => { if self.return_last_incomplete_batch && !xs.is_empty() && !ys.is_empty() { break; } return None; } } } let xs = Tensor::stack(&xs, 0); let ys = Tensor::stack(&ys, 0); Some(xs.and_then(|xs| ys.map(|ys| (xs, ys)))) } } impl<I: Iterator<Item = Result<Tensor>>> Iterator for Batcher<IterResult1<I>> { type Item = Result<Tensor>; fn next(&mut self) -> Option<Self::Item> { let mut items = Vec::with_capacity(self.batch_size); for _i in 0..self.batch_size { // We have two levels of inner here so that we can have two implementations of the // Iterator trait that are different for Iter1 and Iter2. If rust gets better // specialization at some point we can get rid of this. match self.inner.inner.next() { Some(item) => items.push(item), None => { if self.return_last_incomplete_batch && !items.is_empty() { break; } return None; } } } let items = items.into_iter().collect::<Result<Vec<Tensor>>>(); Some(items.and_then(|items| Tensor::stack(&items, 0))) } } impl<I: Iterator<Item = Result<(Tensor, Tensor)>>> Iterator for Batcher<IterResult2<I>> { type Item = Result<(Tensor, Tensor)>; fn next(&mut self) -> Option<Self::Item> { let mut xs = Vec::with_capacity(self.batch_size); let mut ys = Vec::with_capacity(self.batch_size); let mut errs = vec![]; for _i in 0..self.batch_size { match self.inner.inner.next() { Some(Ok((x, y))) => { xs.push(x); ys.push(y) } Some(Err(err)) => errs.push(err), None => { if self.return_last_incomplete_batch && !xs.is_empty() && !ys.is_empty() { break; } return None; } } } if !errs.is_empty() { return Some(Err(errs.swap_remove(0))); } let xs = Tensor::stack(&xs, 0); let ys = Tensor::stack(&ys, 0); Some(xs.and_then(|xs| ys.map(|ys| (xs, ys)))) } }
candle/candle-datasets/src/batcher.rs/0
{ "file_path": "candle/candle-datasets/src/batcher.rs", "repo_id": "candle", "token_count": 2708 }
30
# candle-bert Bert is a general large language model. In this example it can be used for two different tasks: - Compute sentence embeddings for a prompt. - Compute similarities between a set of sentences. ## Sentence embeddings Bert is used to compute the sentence embeddings for a prompt. The model weights are downloaded from the hub on the first run. ```bash cargo run --example bert --release -- --prompt "Here is a test sentence" > [[[ 0.0798, -0.0665, -0.0247, ..., -0.1082, -0.1000, -0.2751], > [ 0.4218, 0.2690, 0.2740, ..., 0.3889, 1.3503, 0.9908], > [ 0.0466, 0.3041, -0.1143, ..., 0.4427, 0.6926, -0.1515], > ... > [ 0.3396, 0.4320, -0.4408, ..., 0.9212, 0.2331, -0.6777], > [ 0.2789, 0.7539, 0.4306, ..., -0.0095, 0.3375, -1.7529], > [ 0.6737, 0.7882, 0.0548, ..., 0.1836, 0.7299, -0.6617]]] > Tensor[[1, 7, 384], f32] ``` ### Custom models You can specify different models, such as BGE, with the `--model-id` flag: ```bash cargo run --example bert --release -- \ --model-id BAAI/bge-large-zh-v1.5 \ --prompt "Here is a test sentence" Loaded and encoded 435.70775ms [[[ 3.0944e-1, -7.8455e-5, -1.2768e0, ..., 1.3755e-2, -3.2371e-1, 2.3819e-1], [-2.8506e-1, 1.9953e-1, -1.3076e0, ..., 6.9819e-2, 1.0833e-2, -1.1512e0], [ 3.9892e-1, 2.0000e-1, -9.3178e-1, ..., -4.1393e-1, -4.9644e-2, -3.3786e-1], ... [ 6.0345e-1, 3.5744e-1, -1.2672e0, ..., -6.9165e-1, -3.4973e-3, -8.4214e-1], [ 3.9218e-1, -3.2735e-1, -1.3123e0, ..., -4.9318e-1, -5.1334e-1, -3.6391e-1], [ 3.0978e-1, 2.5662e-4, -1.2773e0, ..., 1.3357e-2, -3.2390e-1, 2.3858e-1]]] Tensor[[1, 9, 1024], f32] Took 176.744667ms ``` ### Gelu approximation You can get a speedup by using an approximation of the gelu activation, with a small loss of precision, by passing the `--approximate-gelu` flag: ```bash $ cargo run --example bert --release -- \ --model-id BAAI/bge-large-zh-v1.5 \ --prompt "Here is a test sentence" \ --approximate-gelu Loaded and encoded 244.388042ms [[[ 3.1048e-1, -6.0339e-4, -1.2758e0, ..., 1.3718e-2, -3.2362e-1, 2.3775e-1], [-2.8354e-1, 1.9984e-1, -1.3077e0, ..., 6.9390e-2, 9.9681e-3, -1.1531e0], [ 3.9947e-1, 1.9917e-1, -9.3178e-1, ..., -4.1301e-1, -5.0719e-2, -3.3955e-1], ... [ 6.0499e-1, 3.5664e-1, -1.2642e0, ..., -6.9134e-1, -3.4581e-3, -8.4471e-1], [ 3.9311e-1, -3.2812e-1, -1.3105e0, ..., -4.9291e-1, -5.1270e-1, -3.6543e-1], [ 3.1082e-1, -2.6737e-4, -1.2762e0, ..., 1.3319e-2, -3.2381e-1, 2.3815e-1]]] Tensor[[1, 9, 1024], f32] Took 116.840791ms ``` ## Similarities In this example, Bert is used to compute the sentence embeddings for a set of sentences (hardcoded in the examples). Then cosine similarities are computed for each sentence pair and they are reported by decreasing values, hence the first reported pair contains the two sentences that have the highest similarity score. The sentence embeddings are computed using average pooling through all the sentence tokens, including some potential padding. ```bash cargo run --example bert --release > score: 0.85 'The new movie is awesome' 'The new movie is so great' > score: 0.61 'The cat sits outside' 'The cat plays in the garden' > score: 0.52 'I love pasta' 'Do you like pizza?' > score: 0.23 'The new movie is awesome' 'Do you like pizza?' > score: 0.22 'I love pasta' 'The new movie is awesome' ```
candle/candle-examples/examples/bert/README.md/0
{ "file_path": "candle/candle-examples/examples/bert/README.md", "repo_id": "candle", "token_count": 1564 }
31
# candle-convmixer A lightweight CNN architecture that processes image patches similar to a vision transformer, with separate spatial and channel convolutions. ConvMixer from [Patches Are All You Need?](https://arxiv.org/pdf/2201.09792) and [ConvMixer](https://github.com/locuslab/convmixer). ## Running an example ```bash $ cargo run --example convmixer --release -- --image candle-examples/examples/yolo-v8/assets/bike.jpg > mountain bike, all-terrain bike, off-roader: 61.75% > unicycle, monocycle : 5.73% > moped : 3.66% > bicycle-built-for-two, tandem bicycle, tandem: 3.51% > crash helmet : 0.85% ```
candle/candle-examples/examples/convmixer/README.md/0
{ "file_path": "candle/candle-examples/examples/convmixer/README.md", "repo_id": "candle", "token_count": 238 }
32
# candle-dinov2 [Depth Anything V2] is a model for Monocular Depth Estimation (MDE, i.e. just using a single image) which builds on the [DINOv2](https://github.com/facebookresearch/dinov2) vision transformer. This example first instantiates the DINOv2 model and then proceeds to create DepthAnythingV2 and run it. ## Running an example with color map and CUDA ```bash cargo run --features cuda,depth_anything_v2 --package candle-examples --example depth_anything_v2 -- --color-map --image candle-examples/examples/yolo-v8/assets/bike.jpg ```
candle/candle-examples/examples/depth_anything_v2/README.md/0
{ "file_path": "candle/candle-examples/examples/depth_anything_v2/README.md", "repo_id": "candle", "token_count": 168 }
33
pub enum SeparatorStyle { Two, Mpt, } pub struct Conversation { pub system: String, pub roles: Vec<String>, pub messages: Vec<(String, Option<String>)>, pub offset: i32, pub sep_style: SeparatorStyle, pub sep: String, pub sep2: Option<String>, pub version: String, } impl Conversation { pub fn new( system: &str, roles: &[String], offset: i32, sep_style: SeparatorStyle, sep: &str, sep2: Option<&str>, version: &str, ) -> Self { Conversation { system: system.to_string(), roles: roles.to_vec(), messages: Vec::new(), offset, sep_style, sep: sep.to_string(), sep2: sep2.map(|s| s.to_string()), version: version.to_string(), } } pub fn conv_chatml_direct() -> Self { Conversation::new( "<|im_start|>system\nAnswer the questions.", &[ "<|im_start|>user\n".to_string(), "<|im_start|>assistant\n".to_string(), ], 0, SeparatorStyle::Mpt, "<|im_end|>", None, "mpt", ) } pub fn conv_llava_v1() -> Self { Conversation::new( "A chat between a curious human and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the human's questions.", &[ "USER".to_string(), "ASSISTANT".to_string(), ], 0, SeparatorStyle::Two, " ", Some("</s>"), "v1" ) } pub fn append_message(&mut self, role: String, message: Option<&str>) { self.messages.push((role, message.map(|s| s.to_string()))) } pub fn append_user_message(&mut self, message: Option<&str>) { self.append_message(self.roles[0].clone(), message); } pub fn append_assistant_message(&mut self, message: Option<&str>) { self.append_message(self.roles[1].clone(), message); } pub fn get_prompt(&self) -> String { match self.sep_style { SeparatorStyle::Mpt => { let mut ret = String::new(); ret.push_str(&self.system); ret.push_str(&self.sep); for (role, message) in &self.messages { ret.push_str(role); if let Some(message) = message { ret.push_str(message); }; ret.push_str(&self.sep); } ret } SeparatorStyle::Two => { let seps = [self.sep.clone(), self.sep2.clone().unwrap()]; let mut ret = String::new(); ret.push_str(&self.system); ret.push_str(&seps[0]); for (i, (role, message)) in self.messages.iter().enumerate() { ret.push_str(role); if let Some(message) = message { ret.push_str(": "); // strictly follow the python implementation, otherwise it will cause some minor difference between tokens ^_^ ret.push_str(message); ret.push_str(&seps[i % 2]); } else { ret.push(':') } } ret } } } }
candle/candle-examples/examples/llava/conversation.rs/0
{ "file_path": "candle/candle-examples/examples/llava/conversation.rs", "repo_id": "candle", "token_count": 1910 }
34
# candle-moondream [Moondream](https://github.com/vikhyat/moondream) is a computer-vision model can answer real-world questions about images. It's tiny by today's models, with only 1.6B parameters. That enables it to run on a variety of devices, including mobile phones and edge devices. ## Running some examples First download an example image ```bash $ wget https://raw.githubusercontent.com/vikhyat/moondream/main/assets/demo-1.jpg ``` <img src="https://raw.githubusercontent.com/vikhyat/moondream/main/assets/demo-1.jpg" width="200"> Now you can run Moondream from the `candle-examples` crate: ```bash $ cargo run --example moondream --release -- --prompt "Describe the people behind the bikers?" --image "candle-examples/examples/yolo-v8/assets/bike.jpg" avavx: false, neon: true, simd128: false, f16c: false temp: 0.00 repeat-penalty: 1.00 repeat-last-n: 64 retrieved the files in 3.395583ms Running on CPU, to run on GPU(metal), build this example with `--features metal` loaded the model in 5.485493792s loaded and encoded the image Tensor[dims 3, 378, 378; f32] in 4.801396417s starting the inference loop The girl is eating a hamburger.< 9 tokens generated (0.68 token/s) ```
candle/candle-examples/examples/moondream/README.md/0
{ "file_path": "candle/candle-examples/examples/moondream/README.md", "repo_id": "candle", "token_count": 383 }
35
# PaliGemma [HuggingFace Model Card](https://huggingface.co/google/paligemma-3b-pt-224) - [Model Page](https://ai.google.dev/gemma/docs/paligemma) ```bash cargo run --features cuda --release --example paligemma -- \ --prompt "caption fr" --image candle-examples/examples/yolo-v8/assets/bike.jpg ``` ``` loaded image with shape Tensor[dims 1, 3, 224, 224; bf16, cuda:0] loaded the model in 1.267744448s caption fr. Un groupe de cyclistes qui sont dans la rue. 13 tokens generated (56.52 token/s) ``` ```bash cargo run --features cuda --release --example paligemma -- \ --prompt "caption fr" --image candle-examples/examples/flux/assets/flux-robot.jpg ``` ``` loaded image with shape Tensor[dims 1, 3, 224, 224; bf16, cuda:0] loaded the model in 1.271492621s caption fr une image d' un robot sur la plage avec le mot rouillé 15 tokens generated (62.78 token/s) ```
candle/candle-examples/examples/paligemma/README.md/0
{ "file_path": "candle/candle-examples/examples/paligemma/README.md", "repo_id": "candle", "token_count": 339 }
36
use super::gym_env::{GymEnv, Step}; use candle::{DType, Device, Error, Module, Result, Tensor}; use candle_nn::{ linear, ops::log_softmax, ops::softmax, sequential::seq, Activation, AdamW, Optimizer, ParamsAdamW, VarBuilder, VarMap, }; use rand::{distr::Distribution, rngs::ThreadRng, Rng}; fn new_model( input_shape: &[usize], num_actions: usize, dtype: DType, device: &Device, ) -> Result<(impl Module, VarMap)> { let input_size = input_shape.iter().product(); let varmap = VarMap::new(); let var_builder = VarBuilder::from_varmap(&varmap, dtype, device); let model = seq() .add(linear(input_size, 32, var_builder.pp("lin1"))?) .add(Activation::Relu) .add(linear(32, num_actions, var_builder.pp("lin2"))?); Ok((model, varmap)) } fn accumulate_rewards(steps: &[Step<i64>]) -> Vec<f64> { let mut rewards: Vec<f64> = steps.iter().map(|s| s.reward).collect(); let mut acc_reward = 0f64; for (i, reward) in rewards.iter_mut().enumerate().rev() { if steps[i].terminated { acc_reward = 0.0; } acc_reward += *reward; *reward = acc_reward; } rewards } fn weighted_sample(probs: Vec<f32>, rng: &mut ThreadRng) -> Result<usize> { let distribution = rand::distr::weighted::WeightedIndex::new(probs).map_err(Error::wrap)?; let mut rng = rng; Ok(distribution.sample(&mut rng)) } pub fn run() -> Result<()> { let env = GymEnv::new("CartPole-v1")?; println!("action space: {:?}", env.action_space()); println!("observation space: {:?}", env.observation_space()); let (model, varmap) = new_model( env.observation_space(), env.action_space(), DType::F32, &Device::Cpu, )?; let optimizer_params = ParamsAdamW { lr: 0.01, weight_decay: 0.01, ..Default::default() }; let mut optimizer = AdamW::new(varmap.all_vars(), optimizer_params)?; let mut rng = rand::rng(); for epoch_idx in 0..100 { let mut state = env.reset(rng.random::<u64>())?; let mut steps: Vec<Step<i64>> = vec![]; loop { let action = { let action_probs: Vec<f32> = softmax(&model.forward(&state.detach().unsqueeze(0)?)?, 1)? .squeeze(0)? .to_vec1()?; weighted_sample(action_probs, &mut rng)? as i64 }; let step = env.step(action)?; steps.push(step.copy_with_obs(&state)); if step.terminated || step.truncated { state = env.reset(rng.random::<u64>())?; if steps.len() > 5000 { break; } } else { state = step.state; } } let total_reward: f64 = steps.iter().map(|s| s.reward).sum(); let episodes: i64 = steps .iter() .map(|s| (s.terminated || s.truncated) as i64) .sum(); println!( "epoch: {:<3} episodes: {:<5} avg reward per episode: {:.2}", epoch_idx, episodes, total_reward / episodes as f64 ); let batch_size = steps.len(); let rewards = Tensor::from_vec(accumulate_rewards(&steps), batch_size, &Device::Cpu)? .to_dtype(DType::F32)? .detach(); let actions_mask = { let actions: Vec<i64> = steps.iter().map(|s| s.action).collect(); let actions_mask: Vec<Tensor> = actions .iter() .map(|&action| { // One-hot encoding let mut action_mask = vec![0.0; env.action_space()]; action_mask[action as usize] = 1.0; Tensor::from_vec(action_mask, env.action_space(), &Device::Cpu) .unwrap() .to_dtype(DType::F32) .unwrap() }) .collect(); Tensor::stack(&actions_mask, 0)?.detach() }; let states = { let states: Vec<Tensor> = steps.into_iter().map(|s| s.state).collect(); Tensor::stack(&states, 0)?.detach() }; let log_probs = actions_mask .mul(&log_softmax(&model.forward(&states)?, 1)?)? .sum(1)?; let loss = rewards.mul(&log_probs)?.neg()?.mean_all()?; optimizer.backward_step(&loss)?; } Ok(()) }
candle/candle-examples/examples/reinforcement-learning/policy_gradient.rs/0
{ "file_path": "candle/candle-examples/examples/reinforcement-learning/policy_gradient.rs", "repo_id": "candle", "token_count": 2331 }
37
use anyhow::{Ok, Result}; use candle_transformers::models::stable_diffusion::vae; pub fn build_sd3_vae_autoencoder(vb: candle_nn::VarBuilder) -> Result<vae::AutoEncoderKL> { let config = vae::AutoEncoderKLConfig { block_out_channels: vec![128, 256, 512, 512], layers_per_block: 2, latent_channels: 16, norm_num_groups: 32, use_quant_conv: false, use_post_quant_conv: false, }; Ok(vae::AutoEncoderKL::new(vb, 3, 3, config)?) } pub fn sd3_vae_vb_rename(name: &str) -> String { let parts: Vec<&str> = name.split('.').collect(); let mut result = Vec::new(); let mut i = 0; while i < parts.len() { match parts[i] { "down_blocks" => { result.push("down"); } "mid_block" => { result.push("mid"); } "up_blocks" => { result.push("up"); match parts[i + 1] { // Reverse the order of up_blocks. "0" => result.push("3"), "1" => result.push("2"), "2" => result.push("1"), "3" => result.push("0"), _ => {} } i += 1; // Skip the number after up_blocks. } "resnets" => { if i > 0 && parts[i - 1] == "mid_block" { match parts[i + 1] { "0" => result.push("block_1"), "1" => result.push("block_2"), _ => {} } i += 1; // Skip the number after resnets. } else { result.push("block"); } } "downsamplers" => { result.push("downsample"); i += 1; // Skip the 0 after downsamplers. } "conv_shortcut" => { result.push("nin_shortcut"); } "attentions" => { if parts[i + 1] == "0" { result.push("attn_1") } i += 1; // Skip the number after attentions. } "group_norm" => { result.push("norm"); } "query" => { result.push("q"); } "key" => { result.push("k"); } "value" => { result.push("v"); } "proj_attn" => { result.push("proj_out"); } "conv_norm_out" => { result.push("norm_out"); } "upsamplers" => { result.push("upsample"); i += 1; // Skip the 0 after upsamplers. } part => result.push(part), } i += 1; } result.join(".") }
candle/candle-examples/examples/stable-diffusion-3/vae.rs/0
{ "file_path": "candle/candle-examples/examples/stable-diffusion-3/vae.rs", "repo_id": "candle", "token_count": 1772 }
38
# candle-trocr `TrOCR` is a transformer OCR Model. In this example it is used to transcribe image text. See the associated [model card](https://huggingface.co/microsoft/trocr-base-printed) for details on the model itself. Supported models include: - `--which base`: small handwritten OCR model. - `--which large`: large handwritten OCR model. - `--which base-printed`: small printed OCR model. - `--which large-printed`: large printed OCR model. ## Running an example ```bash cargo run --example trocr --release -- --image candle-examples/examples/trocr/assets/trocr.png cargo run --example trocr --release -- --which large --image candle-examples/examples/trocr/assets/trocr.png cargo run --example trocr --release -- --which base-printed --image candle-examples/examples/trocr/assets/noto.png cargo run --example trocr --release -- --which large-printed --image candle-examples/examples/trocr/assets/noto.png ``` ### Outputs ``` industry , Mr. Brown commented icily . " Let us have a industry , " Mr. Brown commented icily . " Let us have a THE QUICK BROWN FOR JUMPS OVER THE LAY DOG THE QUICK BROWN FOX JUMPS OVER THE LAZY DOG ```
candle/candle-examples/examples/trocr/readme.md/0
{ "file_path": "candle/candle-examples/examples/trocr/readme.md", "repo_id": "candle", "token_count": 360 }
39
// Build script to run nvcc and generate the C glue code for launching the flash-attention kernel. // The cuda build time is very long so one can set the CANDLE_FLASH_ATTN_BUILD_DIR environment // variable in order to cache the compiled artifacts and avoid recompiling too often. use anyhow::{Context, Result}; use std::path::PathBuf; const KERNEL_FILES: [&str; 33] = [ "kernels/flash_api.cu", "kernels/flash_fwd_hdim128_fp16_sm80.cu", "kernels/flash_fwd_hdim160_fp16_sm80.cu", "kernels/flash_fwd_hdim192_fp16_sm80.cu", "kernels/flash_fwd_hdim224_fp16_sm80.cu", "kernels/flash_fwd_hdim256_fp16_sm80.cu", "kernels/flash_fwd_hdim32_fp16_sm80.cu", "kernels/flash_fwd_hdim64_fp16_sm80.cu", "kernels/flash_fwd_hdim96_fp16_sm80.cu", "kernels/flash_fwd_hdim128_bf16_sm80.cu", "kernels/flash_fwd_hdim160_bf16_sm80.cu", "kernels/flash_fwd_hdim192_bf16_sm80.cu", "kernels/flash_fwd_hdim224_bf16_sm80.cu", "kernels/flash_fwd_hdim256_bf16_sm80.cu", "kernels/flash_fwd_hdim32_bf16_sm80.cu", "kernels/flash_fwd_hdim64_bf16_sm80.cu", "kernels/flash_fwd_hdim96_bf16_sm80.cu", "kernels/flash_fwd_hdim128_fp16_causal_sm80.cu", "kernels/flash_fwd_hdim160_fp16_causal_sm80.cu", "kernels/flash_fwd_hdim192_fp16_causal_sm80.cu", "kernels/flash_fwd_hdim224_fp16_causal_sm80.cu", "kernels/flash_fwd_hdim256_fp16_causal_sm80.cu", "kernels/flash_fwd_hdim32_fp16_causal_sm80.cu", "kernels/flash_fwd_hdim64_fp16_causal_sm80.cu", "kernels/flash_fwd_hdim96_fp16_causal_sm80.cu", "kernels/flash_fwd_hdim128_bf16_causal_sm80.cu", "kernels/flash_fwd_hdim160_bf16_causal_sm80.cu", "kernels/flash_fwd_hdim192_bf16_causal_sm80.cu", "kernels/flash_fwd_hdim224_bf16_causal_sm80.cu", "kernels/flash_fwd_hdim256_bf16_causal_sm80.cu", "kernels/flash_fwd_hdim32_bf16_causal_sm80.cu", "kernels/flash_fwd_hdim64_bf16_causal_sm80.cu", "kernels/flash_fwd_hdim96_bf16_causal_sm80.cu", ]; fn main() -> Result<()> { println!("cargo:rerun-if-changed=build.rs"); for kernel_file in KERNEL_FILES.iter() { println!("cargo:rerun-if-changed={kernel_file}"); } println!("cargo:rerun-if-changed=kernels/flash_fwd_kernel.h"); println!("cargo:rerun-if-changed=kernels/flash_fwd_launch_template.h"); println!("cargo:rerun-if-changed=kernels/flash.h"); println!("cargo:rerun-if-changed=kernels/philox.cuh"); println!("cargo:rerun-if-changed=kernels/softmax.h"); println!("cargo:rerun-if-changed=kernels/utils.h"); println!("cargo:rerun-if-changed=kernels/kernel_traits.h"); println!("cargo:rerun-if-changed=kernels/block_info.h"); println!("cargo:rerun-if-changed=kernels/static_switch.h"); println!("cargo:rerun-if-changed=kernels/hardware_info.h"); let out_dir = PathBuf::from(std::env::var("OUT_DIR").context("OUT_DIR not set")?); let build_dir = match std::env::var("CANDLE_FLASH_ATTN_BUILD_DIR") { Err(_) => { #[allow(clippy::redundant_clone)] out_dir.clone() } Ok(build_dir) => { let path = PathBuf::from(build_dir); path.canonicalize().expect(&format!( "Directory doesn't exists: {} (the current directory is {})", &path.display(), std::env::current_dir()?.display() )) } }; let kernels = KERNEL_FILES.iter().collect(); let mut builder = bindgen_cuda::Builder::default() .kernel_paths(kernels) .out_dir(build_dir.clone()) .arg("-std=c++17") .arg("-O3") .arg("-U__CUDA_NO_HALF_OPERATORS__") .arg("-U__CUDA_NO_HALF_CONVERSIONS__") .arg("-U__CUDA_NO_HALF2_OPERATORS__") .arg("-U__CUDA_NO_BFLOAT16_CONVERSIONS__") .arg("-Icutlass/include") .arg("--expt-relaxed-constexpr") .arg("--expt-extended-lambda") .arg("--use_fast_math") .arg("--verbose"); let mut is_target_msvc = false; if let Ok(target) = std::env::var("TARGET") { if target.contains("msvc") { is_target_msvc = true; builder = builder.arg("-D_USE_MATH_DEFINES"); } } if !is_target_msvc { builder = builder.arg("-Xcompiler").arg("-fPIC"); } let out_file = build_dir.join("libflashattention.a"); builder.build_lib(out_file); println!("cargo:rustc-link-search={}", build_dir.display()); println!("cargo:rustc-link-lib=flashattention"); println!("cargo:rustc-link-lib=dylib=cudart"); if !is_target_msvc { println!("cargo:rustc-link-lib=dylib=stdc++"); } Ok(()) }
candle/candle-flash-attn/build.rs/0
{ "file_path": "candle/candle-flash-attn/build.rs", "repo_id": "candle", "token_count": 2265 }
40
/****************************************************************************** * Copyright (c) 2024, Tri Dao. ******************************************************************************/ #pragma once #include <cute/tensor.hpp> #include "utils.h" //////////////////////////////////////////////////////////////////////////////////////////////////// namespace flash { using namespace cute; //////////////////////////////////////////////////////////////////////////////////////////////////// template <bool Is_even_K=true, bool Clear_OOB_K=true, typename Engine0, typename Layout0, typename Engine1, typename Layout1, typename Engine2, typename Layout2, typename Engine3, typename Layout3> __forceinline__ __device__ void copy_rotary_interleaved(Tensor<Engine0, Layout0> const &S, Tensor<Engine1, Layout1> &D, Tensor<Engine2, Layout2> const &Cos, Tensor<Engine2, Layout2> const &Sin, Tensor<Engine3, Layout3> const &identity_MN, const int max_MN, const int min_MN, const int dim, const int rotary_dim) { CUTE_STATIC_ASSERT_V(rank(S) == Int<3>{}); CUTE_STATIC_ASSERT_V(rank(D) == Int<3>{}); CUTE_STATIC_ASSERT_V(size<0>(S) == size<0>(D)); // MMA CUTE_STATIC_ASSERT_V(size<1>(S) == size<1>(D)); // MMA_M CUTE_STATIC_ASSERT_V(size<2>(S) == size<2>(D)); // MMA_K CUTE_STATIC_ASSERT_V(size<1>(S) == size<1>(Cos)); // MMA_M CUTE_STATIC_ASSERT_V(size<2>(S) == size<2>(Cos)); // MMA_K CUTE_STATIC_ASSERT_V(size<1>(S) == size<1>(Sin)); // MMA_M CUTE_STATIC_ASSERT_V(size<2>(S) == size<2>(Sin)); // MMA_K CUTE_STATIC_ASSERT_V(size<0>(Cos) == size<0>(Sin)); // MMA_K static_assert(decltype(size<0>(S))::value == decltype(size<0>(Cos))::value * 2); static_assert(decltype(size<0>(Cos))::value % 2 == 0); // Since we do fast conversion from fp16/bf16 to fp32 Tensor rCos = make_fragment_like(Cos); Tensor rSin = make_fragment_like(Sin); Tensor rS = make_fragment_like(S); #pragma unroll for (int m = 0; m < size<1>(S); ++m) { if (get<0>(identity_MN(0, m, 0)) >= min_MN && get<0>(identity_MN(0, m, 0)) < max_MN) { #pragma unroll for (int k = 0; k < size<2>(S); ++k) { if (Is_even_K || get<1>(identity_MN(0, 0, k)) < dim) { cute::copy(S(_, m, k), rS(_, m, k)); if (get<1>(identity_MN(0, 0, k)) < rotary_dim) { cute::copy(Cos(_, m, k), rCos(_, m, k)); cute::copy(Sin(_, m, k), rSin(_, m, k)); Tensor S_fp32 = convert_type<float>(rS(_, m, k)); Tensor cos_fp32 = convert_type<float>(rCos(_, m, k)); Tensor sin_fp32 = convert_type<float>(rSin(_, m, k)); #pragma unroll for (int i = 0; i < size<0>(rS) / 2; ++i) { float real = S_fp32(2 * i) * cos_fp32(i) - S_fp32(2 * i + 1) * sin_fp32(i); float imag = S_fp32(2 * i) * sin_fp32(i) + S_fp32(2 * i + 1) * cos_fp32(i); S_fp32(2 * i) = real; S_fp32(2 * i + 1) = imag; } // Idk but I need to copy for the convert_type to work Tensor S_fp32_copy = make_fragment_like(S_fp32); cute::copy(S_fp32, S_fp32_copy); using T = typename Engine0::value_type; Tensor S_og_type = convert_type<T>(S_fp32_copy); cute::copy(S_og_type, rS(_, m, k)); } cute::copy(rS(_, m, k), D(_, m, k)); } else if (Clear_OOB_K) { cute::clear(D(_, m, k)); } } } } } //////////////////////////////////////////////////////////////////////////////////////////////////// template <bool Is_even_K=true, bool Clear_OOB_K=true, typename Engine0, typename Layout0, typename Engine1, typename Layout1, typename Engine2, typename Layout2, typename Engine3, typename Layout3> __forceinline__ __device__ void copy_rotary_contiguous(Tensor<Engine0, Layout0> const &S, Tensor<Engine1, Layout1> &D, Tensor<Engine2, Layout2> const &Cos, Tensor<Engine2, Layout2> const &Sin, Tensor<Engine3, Layout3> const &identity_MN, const int max_MN, const int min_MN, const int dim, const int rotary_dim) { CUTE_STATIC_ASSERT_V(rank(S) == Int<3>{}); CUTE_STATIC_ASSERT_V(rank(D) == Int<3>{}); CUTE_STATIC_ASSERT_V(size<0>(S) == size<0>(D)); // MMA CUTE_STATIC_ASSERT_V(size<1>(S) == size<1>(D)); // MMA_M CUTE_STATIC_ASSERT_V(size<2>(S) == size<2>(D)); // MMA_K CUTE_STATIC_ASSERT_V(size<1>(S) == size<1>(Cos)); // MMA_M CUTE_STATIC_ASSERT_V(size<2>(S) == size<2>(Cos)); // MMA_K CUTE_STATIC_ASSERT_V(size<1>(S) == size<1>(Sin)); // MMA_M CUTE_STATIC_ASSERT_V(size<2>(S) == size<2>(Sin)); // MMA_K CUTE_STATIC_ASSERT_V(size<0>(S) == size<0>(Cos)); // MMA CUTE_STATIC_ASSERT_V(size<0>(Cos) == size<0>(Sin)); static_assert(decltype(size<0>(Cos))::value % 2 == 0); // Since we do fast conversion from fp16/bf16 to fp32 Tensor rCos = make_fragment_like(Cos); Tensor rSin = make_fragment_like(Sin); Tensor rS = make_fragment_like(S); Tensor rS_other = make_fragment_like(rS(_, 0, 0)); #pragma unroll for (int m = 0; m < size<1>(S); ++m) { if (get<0>(identity_MN(0, m, 0)) >= min_MN && get<0>(identity_MN(0, m, 0)) < max_MN) { #pragma unroll for (int k = 0; k < size<2>(S); ++k) { if (Is_even_K || get<1>(identity_MN(0, 0, k)) < dim) { cute::copy(S(_, m, k), rS(_, m, k)); if (get<1>(identity_MN(0, 0, k)) < rotary_dim) { const bool is_left = get<1>(identity_MN(0, 0, k)) < rotary_dim / 2; Tensor gS_other = make_tensor(S(_, m, k).data() + (is_left ? rotary_dim / 2 : -rotary_dim / 2), S(_, m, k).layout()); cute::copy(gS_other, rS_other); // if (cute::thread0()) { print_tensor(rS(_, m, k)); print_tensor(rS_other); } Tensor gCos = make_tensor(Cos(_, m, k).data() + (is_left ? 0 : -rotary_dim / 2), Cos(_, m, k).layout()); Tensor gSin = make_tensor(Sin(_, m, k).data() + (is_left ? 0 : -rotary_dim / 2), Sin(_, m, k).layout()); cute::copy(gCos, rCos(_, m, k)); cute::copy(gSin, rSin(_, m, k)); // if (cute::thread0()) { print_tensor(rCos(_, m, k)); print_tensor(rSin(_, m, k)); } Tensor S_fp32 = convert_type<float>(rS(_, m, k)); Tensor S_other_fp32 = convert_type<float>(rS_other); Tensor cos_fp32 = convert_type<float>(rCos(_, m, k)); Tensor sin_fp32 = convert_type<float>(rSin(_, m, k)); #pragma unroll for (int i = 0; i < size<0>(rS); ++i) { S_fp32(i) = S_fp32(i) * cos_fp32(i) + S_other_fp32(i) * (is_left ? -sin_fp32(i) : sin_fp32(i)); } // Idk but I need to copy for the convert_type to work Tensor S_fp32_copy = make_fragment_like(S_fp32); cute::copy(S_fp32, S_fp32_copy); using T = typename Engine0::value_type; Tensor S_og_type = convert_type<T>(S_fp32_copy); cute::copy(S_og_type, rS(_, m, k)); // if (cute::thread0()) { print_tensor(rS(_, m, k)); } } cute::copy(rS(_, m, k), D(_, m, k)); } else if (Clear_OOB_K) { cute::clear(D(_, m, k)); } } } } } //////////////////////////////////////////////////////////////////////////////////////////////////// } // namespace flash
candle/candle-flash-attn/kernels/rotary.h/0
{ "file_path": "candle/candle-flash-attn/kernels/rotary.h", "repo_id": "candle", "token_count": 5052 }
41
#include "compatibility.cuh" #include<stdint.h> #include<cmath> // TODO: This is often used to check that the data is contiguous so that // kernels can be easily mapped. However this only returns true for row // major, if all the inputs are column major, we could apply the fast path // too (but we wouldn't if some of them are row major and some column major). __device__ bool is_contiguous( const size_t num_dims, const size_t *dims, const size_t *strides ) { size_t acc = 1; for (unsigned int d = 0; d < num_dims; d++) { unsigned int dim_idx = num_dims - 1 - d; if (dims[dim_idx] > 1 && acc != strides[dim_idx]) { return false; } acc *= dims[dim_idx]; } return true; } __device__ unsigned int get_strided_index( unsigned int idx, const size_t num_dims, const size_t *dims, const size_t *strides ) { unsigned int strided_i = 0; for (unsigned int d = 0; d < num_dims; d++) { unsigned int dim_idx = num_dims - 1 - d; strided_i += (idx % dims[dim_idx]) * strides[dim_idx]; idx /= dims[dim_idx]; } return strided_i; } __device__ unsigned int restrided( const unsigned int strided_i, const size_t num_dims, const size_t *dims, const size_t *strides, const size_t *new_strides ) { unsigned int idx = 0; for (int d = 0; d < num_dims; d++) { idx += (strides[d] == 0 ? 0 : (strided_i / strides[d]) % dims[d]) * new_strides[d]; } return idx; } // Sourced from https://graphics.stanford.edu/~seander/bithacks.html#RoundUpPowerOf2 // Input must be less than or equal to 2 ^ 16 // used in reductions __device__ __forceinline__ unsigned int next_power_of_two(unsigned int v) { v--; v |= v >> 1; v |= v >> 2; v |= v >> 4; v |= v >> 8; v++; return v; } // Efficiently computes the sum of each chunk in "data" of size chunk_len, and // stores the sums in out[i / chunk_len] template<typename T> __device__ void chunk_sum( const size_t chunk_len, const T data, T* out ) { __shared__ T buf[1024]; // assumes that threads where i >= numel have already exited unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; unsigned int block_i = threadIdx.x; // Fall back to atomicAdd if chunk_len is small to reduce overhead if (chunk_len <= 2) { atomicAdd(out + i / chunk_len, data); return; } buf[block_i] = data; unsigned int chunk_i = i % chunk_len; unsigned int chunk_start = max((int)(block_i - chunk_i), 0); unsigned int chunk_end = min((unsigned int)(block_i + chunk_len - chunk_i), blockDim.x); chunk_i = block_i - chunk_start; size_t max_chunk_len = min(chunk_end - chunk_start, blockDim.x); size_t incr = next_power_of_two(max_chunk_len) >> 1; __syncthreads(); // Uses sequential addressing as discussed in // https://developer.download.nvidia.com/assets/cuda/files/reduction.pdf for (; incr > 0; incr >>= 1) { unsigned int block_i_2 = block_i + incr; if (block_i_2 < chunk_end && chunk_i < incr) { // This is sound because __syncthreads and the conditions above // ensure that no data races occur buf[block_i] += buf[block_i_2]; } __syncthreads(); } if (block_i == chunk_start) { atomicAdd(out + i / chunk_len, buf[block_i]); } } __device__ __forceinline__ bool isnang(float a) { return isnan(a); } __device__ __forceinline__ bool isnang(double a) { return isnan(a); } __device__ __forceinline__ float recipg(float a) { return 1.0 / a; } __device__ __forceinline__ double recipg(double a) { return 1.0 / a; } __device__ __forceinline__ float cosg(float a) { return cosf(a); } __device__ __forceinline__ double cosg(double a) { return cos(a); } __device__ __forceinline__ float sing(float a) { return sinf(a); } __device__ __forceinline__ double sing(double a) { return sin(a); } __device__ __forceinline__ float sqrtg(float a) { return sqrtf(a); } __device__ __forceinline__ double sqrtg(double a) { return sqrt(a); } __device__ __forceinline__ float powg(float a, float b) { return powf(a, b); } __device__ __forceinline__ double powg(double a, double b) { return pow(a, b); } __device__ __forceinline__ float tanhg(float a) { return tanhf(a); } __device__ __forceinline__ double tanhg(double a) { return tanh(a); } __device__ __forceinline__ float erfg(float a) { return erff(a); } __device__ __forceinline__ double erfg(double a) { return erf(a); } __device__ __forceinline__ float ceilg(float a) { return ceilf(a); } __device__ __forceinline__ double ceilg(double a) { return ceil(a); } __device__ __forceinline__ float floorg(float a) { return floorf(a); } __device__ __forceinline__ double floorg(double a) { return floor(a); } __device__ __forceinline__ float roundg(float a) { return roundf(a); } __device__ __forceinline__ double roundg(double a) { return round(a); } __device__ __forceinline__ float normcdfg(float a) { return normcdff(a); } __device__ __forceinline__ double normcdfg(double a) { return normcdf(a); } __device__ __forceinline__ float maxg(float a, float b) { return fmaxf(a, b); } __device__ __forceinline__ double maxg(double a, double b) { return fmax(a, b); } __device__ __forceinline__ float ming(float a, float b) { return fminf(a, b); } __device__ __forceinline__ double ming(double a, double b) { return fmin(a, b); } __device__ __forceinline__ float logg(float a) { return logf(a); } __device__ __forceinline__ double logg(double a) { return log(a); } __device__ __forceinline__ float expg(float a) { return expf(a); } __device__ __forceinline__ double expg(double a) { return exp(a); } __device__ __forceinline__ float absg(float a) { return fabsf(a); } __device__ __forceinline__ double absg(double a) { return fabs(a); } __device__ __forceinline__ float copysigng(float a, float b) { return copysignf(a, b); } __device__ __forceinline__ double copysigng(double a, double b) { return copysign(a, b); } __device__ __forceinline__ int64_t ming(int64_t a, int64_t b) { return min(a, b); } __device__ __forceinline__ int64_t maxg(int64_t a, int64_t b) { return max(a, b); } __device__ __forceinline__ uint32_t ming(uint32_t a, uint32_t b) { return min(a, b); } __device__ __forceinline__ uint32_t maxg(uint32_t a, uint32_t b) { return max(a, b); } __device__ __forceinline__ uint8_t ming(uint8_t a, uint8_t b) { return min(a, b); } __device__ __forceinline__ uint8_t maxg(uint8_t a, uint8_t b) { return max(a, b); } #if __CUDA_ARCH__ >= 530 __device__ __forceinline__ __half powg(__half a, __half b) { return __float2half(powf(__half2float(a), __half2float(b))); } __device__ __forceinline__ bool isnang(__half a) { return __hisnan(a); } __device__ __forceinline__ __half sqrtg(__half a) { return hsqrt(a); } __device__ __forceinline__ __half cosg(__half a) { return hcos(a); } __device__ __forceinline__ __half sing(__half a) { return hsin(a); } __device__ __forceinline__ __half recipg(__half a) { __half one = 1.0; return one / a; } __device__ __forceinline__ __half maxg(__half a, __half b) { return __hmax_nan(a, b); } __device__ __forceinline__ __half tanhg(__half a) { return __float2half(tanhf(__half2float(a))); } __device__ __forceinline__ __half erfg(__half a) { return __float2half(erff(__half2float(a))); } __device__ __forceinline__ __half ceilg(__half a) { return __float2half(ceilf(__half2float(a))); } __device__ __forceinline__ __half floorg(__half a) { return __float2half(floorf(__half2float(a))); } __device__ __forceinline__ __half roundg(__half a) { return __float2half(roundf(__half2float(a))); } __device__ __forceinline__ __half normcdfg(__half a) { return __float2half(normcdff(__half2float(a))); } __device__ __forceinline__ __half ming(__half a, __half b) { return __hmin_nan(a, b); } __device__ __forceinline__ __half logg(__half a) { return hlog(a); } __device__ __forceinline__ __half expg(__half a) { return hexp(a); } __device__ __forceinline__ __half absg(__half a) { return __habs(a); } __device__ __forceinline__ __half copysigng(__half a, __half b) { return __float2half(copysignf(__half2float(a), __half2float(b))); } #endif #if __CUDA_ARCH__ >= 800 __device__ __forceinline__ __nv_bfloat16 powg(__nv_bfloat16 a, __nv_bfloat16 b) { return __float2bfloat16(powf(__bfloat162float(a), __bfloat162float(b))); } __device__ __forceinline__ bool isnang(__nv_bfloat16 a) { return __hisnan(a); } __device__ __forceinline__ __nv_bfloat16 sqrtg(__nv_bfloat16 a) { return hsqrt(a); } __device__ __forceinline__ __nv_bfloat16 cosg(__nv_bfloat16 a) { return hcos(a); } __device__ __forceinline__ __nv_bfloat16 sing(__nv_bfloat16 a) { return hsin(a); } __device__ __forceinline__ __nv_bfloat16 recipg(__nv_bfloat16 a) { __nv_bfloat16 one = 1.0; return one / a; } __device__ __forceinline__ __nv_bfloat16 maxg(__nv_bfloat16 a, __nv_bfloat16 b) { return __hmax_nan(a, b); } __device__ __forceinline__ __nv_bfloat16 tanhg(__nv_bfloat16 a) { return __float2bfloat16(tanhf(__bfloat162float(a))); } __device__ __forceinline__ __nv_bfloat16 erfg(__nv_bfloat16 a) { return __float2bfloat16(erff(__bfloat162float(a))); } __device__ __forceinline__ __nv_bfloat16 ceilg(__nv_bfloat16 a) { return __float2bfloat16(ceilf(__bfloat162float(a))); } __device__ __forceinline__ __nv_bfloat16 floorg(__nv_bfloat16 a) { return __float2bfloat16(floorf(__bfloat162float(a))); } __device__ __forceinline__ __nv_bfloat16 roundg(__nv_bfloat16 a) { return __float2bfloat16(roundf(__bfloat162float(a))); } __device__ __forceinline__ __nv_bfloat16 normcdfg(__nv_bfloat16 a) { return __float2bfloat16(normcdff(__bfloat162float(a))); } __device__ __forceinline__ __nv_bfloat16 ming(__nv_bfloat16 a, __nv_bfloat16 b) { return __hmin_nan(a, b); } __device__ __forceinline__ __nv_bfloat16 logg(__nv_bfloat16 a) { return hlog(a); } __device__ __forceinline__ __nv_bfloat16 expg(__nv_bfloat16 a) { return hexp(a); } __device__ __forceinline__ __nv_bfloat16 absg(__nv_bfloat16 a) { return __habs(a); } __device__ __forceinline__ __nv_bfloat16 copysigng(__nv_bfloat16 a, __nv_bfloat16 b) { return __float2bfloat16(copysignf(__bfloat162float(a), __bfloat162float(b))); } #define F8E4M3_TO_FLOAT(x) __half2float(__nv_cvt_fp8_to_halfraw(x.__x, __NV_E4M3)) __device__ __forceinline__ __nv_fp8_e4m3 powg(__nv_fp8_e4m3 a, __nv_fp8_e4m3 b) { return __nv_fp8_e4m3(powf(F8E4M3_TO_FLOAT(a), F8E4M3_TO_FLOAT(b))); } __device__ __forceinline__ bool isnang(__nv_fp8_e4m3 a) { return isnan(F8E4M3_TO_FLOAT(a)); } __device__ __forceinline__ __nv_fp8_e4m3 sqrtg(__nv_fp8_e4m3 a) { return __nv_fp8_e4m3(sqrtf(F8E4M3_TO_FLOAT(a))); } __device__ __forceinline__ __nv_fp8_e4m3 cosg(__nv_fp8_e4m3 a) { return __nv_fp8_e4m3(cosf(F8E4M3_TO_FLOAT(a))); } __device__ __forceinline__ __nv_fp8_e4m3 sing(__nv_fp8_e4m3 a) { return __nv_fp8_e4m3(sinf(F8E4M3_TO_FLOAT(a))); } __device__ __forceinline__ __nv_fp8_e4m3 recipg(__nv_fp8_e4m3 a) { return __nv_fp8_e4m3(1. / F8E4M3_TO_FLOAT(a)); } __device__ __forceinline__ __nv_fp8_e4m3 maxg(__nv_fp8_e4m3 a, __nv_fp8_e4m3 b) { return __nv_fp8_e4m3(fmaxf(F8E4M3_TO_FLOAT(a), F8E4M3_TO_FLOAT(b))); } __device__ __forceinline__ __nv_fp8_e4m3 tanhg(__nv_fp8_e4m3 a) { return __nv_fp8_e4m3(tanhf(F8E4M3_TO_FLOAT(a))); } __device__ __forceinline__ __nv_fp8_e4m3 erfg(__nv_fp8_e4m3 a) { return __nv_fp8_e4m3(erff(F8E4M3_TO_FLOAT(a))); } __device__ __forceinline__ __nv_fp8_e4m3 ceilg(__nv_fp8_e4m3 a) { return __nv_fp8_e4m3(ceilf(F8E4M3_TO_FLOAT(a))); } __device__ __forceinline__ __nv_fp8_e4m3 floorg(__nv_fp8_e4m3 a) { return __nv_fp8_e4m3(floorf(F8E4M3_TO_FLOAT(a))); } __device__ __forceinline__ __nv_fp8_e4m3 roundg(__nv_fp8_e4m3 a) { return __nv_fp8_e4m3(roundf(F8E4M3_TO_FLOAT(a))); } __device__ __forceinline__ __nv_fp8_e4m3 normcdfg(__nv_fp8_e4m3 a) { return __nv_fp8_e4m3(normcdff(F8E4M3_TO_FLOAT(a))); } __device__ __forceinline__ __nv_fp8_e4m3 ming(__nv_fp8_e4m3 a, __nv_fp8_e4m3 b) { return __nv_fp8_e4m3(fminf(F8E4M3_TO_FLOAT(a), F8E4M3_TO_FLOAT(b))); } __device__ __forceinline__ __nv_fp8_e4m3 logg(__nv_fp8_e4m3 a) { return __nv_fp8_e4m3(logf(F8E4M3_TO_FLOAT(a))); } __device__ __forceinline__ __nv_fp8_e4m3 expg(__nv_fp8_e4m3 a) { return __nv_fp8_e4m3(expf(F8E4M3_TO_FLOAT(a))); } __device__ __forceinline__ __nv_fp8_e4m3 absg(__nv_fp8_e4m3 a) { return __nv_fp8_e4m3(fabsf(F8E4M3_TO_FLOAT(a))); } __device__ __forceinline__ __nv_fp8_e4m3 copysigng(__nv_fp8_e4m3 a, __nv_fp8_e4m3 b) { return __nv_fp8_e4m3(copysignf(F8E4M3_TO_FLOAT(a), F8E4M3_TO_FLOAT(b))); } #endif
candle/candle-kernels/src/cuda_utils.cuh/0
{ "file_path": "candle/candle-kernels/src/cuda_utils.cuh", "repo_id": "candle", "token_count": 5289 }
42
#include <metal_stdlib> using namespace metal; #define MAX(x, y) ((x) > (y) ? (x) : (y)) template <typename T> METAL_FUNC void im2col( constant size_t &dst_numel, constant size_t &h_out, constant size_t &w_out, constant size_t &h_k, constant size_t &w_k, constant size_t &stride, constant size_t &padding, constant size_t &dilation, constant size_t *src_dims, constant size_t *src_strides, device const T *src, device T *dst, uint tid [[ thread_position_in_grid ]] ) { // dst: (b_size, h_out, w_out, c_in, h_k, w_k) // src: (b_size, c_in, h_in, w_in) if (tid >= dst_numel) { return; } const size_t b_in = src_dims[0]; const size_t c_in = src_dims[1]; const size_t h_in = src_dims[2]; const size_t w_in = src_dims[3]; const size_t dst_s4 = w_k; const size_t dst_s3 = h_k * dst_s4; const size_t dst_s2 = c_in * dst_s3; const size_t dst_s1 = w_out * dst_s2; const size_t dst_s0 = h_out * dst_s1; size_t tmp_tid = tid; const size_t b_idx = tmp_tid / dst_s0; tmp_tid -= b_idx * dst_s0; const size_t h_idx = tmp_tid / dst_s1; tmp_tid -= h_idx * dst_s1; const size_t w_idx = tmp_tid / dst_s2; tmp_tid -= w_idx * dst_s2; const size_t c_idx = tmp_tid / dst_s3; tmp_tid -= c_idx * dst_s3; const size_t h_k_idx = tmp_tid / dst_s4; tmp_tid -= h_k_idx * dst_s4; const size_t w_k_idx = tmp_tid; size_t src_h_idx = h_idx * stride + h_k_idx * dilation; size_t src_w_idx = w_idx * stride + w_k_idx * dilation; if (src_h_idx < padding || src_h_idx >= h_in + padding) { dst[tid] = static_cast<T>(0); } else if (src_w_idx < padding || src_w_idx >= w_in + padding) { dst[tid] = static_cast<T>(0); } else { src_h_idx -= padding; src_w_idx -= padding; const size_t src_i = b_idx * src_strides[0] + c_idx * src_strides[1] + src_h_idx * src_strides[2] + src_w_idx * src_strides[3]; dst[tid] = src[src_i]; } } template <typename T> METAL_FUNC void col2im1d( constant size_t &dst_el, constant size_t &l_out, constant size_t &l_in, constant size_t &c_out, constant size_t &k_size, constant size_t &stride, device const T *src, device T *dst, uint dst_i [[ thread_position_in_grid ]] ) { // src: (b_size, l_in, c_out, l_k) // dst: (b_size, c_out, l_out) if (dst_i >= dst_el) { return; } const size_t dst_s0 = c_out * l_out; const size_t dst_s1 = l_out; const size_t src_s0 = c_out * k_size * l_in; const size_t src_s1 = c_out * k_size; const size_t src_s2 = k_size; size_t tmp_dst_i = dst_i; const size_t b_idx = tmp_dst_i / dst_s0; tmp_dst_i -= b_idx * dst_s0; const size_t c_idx = tmp_dst_i / dst_s1; tmp_dst_i -= c_idx * dst_s1; const int l_out_idx = tmp_dst_i; dst[dst_i] = static_cast<T>(0); int l_in_idx = l_out_idx / stride; int k0 = l_out_idx - l_in_idx * stride; // l_out_idx = l_in_idx * stride + k0 for (; k0 < k_size && l_in_idx >= 0; k0 += stride, --l_in_idx) { if (l_in_idx < l_in) { const size_t src_i = b_idx * src_s0 + l_in_idx * src_s1 + c_idx * src_s2 + k0; dst[dst_i] += src[src_i]; } } } template <typename T> METAL_FUNC void im2col1d( constant size_t &dst_numel, constant size_t &l_out, constant size_t &l_k, constant size_t &stride, constant size_t &padding, constant size_t &dilation, constant size_t *src_dims, constant size_t *src_strides, device const T *src, device T *dst, uint tid [[ thread_position_in_grid ]] ) { // dst: (b_size, l_out, c_in, l_k) // src: (b_size, c_in, l_in) if (tid >= dst_numel) { return; } const size_t b_in = src_dims[0]; const size_t c_in = src_dims[1]; const size_t l_in = src_dims[2]; const size_t dst_s2 = l_k; const size_t dst_s1 = c_in * dst_s2; const size_t dst_s0 = l_out * dst_s1; size_t tmp_dst_i = tid; const size_t b_idx = tmp_dst_i / dst_s0; tmp_dst_i -= b_idx * dst_s0; const size_t l_idx = tmp_dst_i / dst_s1; tmp_dst_i -= l_idx * dst_s1; const size_t c_idx = tmp_dst_i / dst_s2; tmp_dst_i -= c_idx * dst_s2; const size_t l_k_idx = tmp_dst_i; size_t src_l_idx = l_idx * stride + l_k_idx * dilation; if (src_l_idx < padding || src_l_idx >= l_in + padding) { dst[tid] = static_cast<T>(0); } else { src_l_idx -= padding; const size_t src_i = b_idx * src_strides[0] + c_idx * src_strides[1] + src_l_idx * src_strides[2]; dst[tid] = src[src_i]; } } template <typename T> METAL_FUNC void upsample_nearest2d( constant size_t &w_out, constant size_t &h_out, constant float &w_scale, constant float &h_scale, constant size_t *src_dims, constant size_t *src_s, device const T *src, device T *dst, uint tid [[ thread_position_in_grid ]] ) { // src: (b_size, c_in, w_in, h_in) const size_t c = src_dims[1]; const size_t w_in = src_dims[2]; const size_t h_in = src_dims[3]; if (tid >= src_dims[0] * c * w_out * h_out) { return; } // TODO: Improve this. const size_t b_idx = tid / (w_out * h_out * c); const size_t c_idx = (tid / (w_out * h_out)) % c; const size_t dst_w = (tid / h_out) % w_out; const size_t dst_h = tid % h_out; size_t src_w = static_cast<size_t>(dst_w * w_scale); size_t src_h = static_cast<size_t>(dst_h * h_scale); if (src_w >= w_in) { src_w = w_in - 1; } if (src_h >= h_in) { src_h = h_in - 1; } const size_t src_i = b_idx * src_s[0] + c_idx * src_s[1] + src_w * src_s[2] + src_h * src_s[3]; dst[tid] = src[src_i]; } #define IM2COL_OP(T, FN_NAME) \ kernel void FN_NAME( \ constant size_t &dst_numel, \ constant size_t &h_out, \ constant size_t &w_out, \ constant size_t &h_k, \ constant size_t &w_k, \ constant size_t &stride, \ constant size_t &padding, \ constant size_t &dilation, \ constant size_t *src_dims, \ constant size_t *src_strides, \ device const T *src, \ device T *dst, \ uint tid [[ thread_position_in_grid ]] \ ) { \ im2col<T>(dst_numel, h_out, w_out, h_k, w_k, stride, padding, dilation, src_dims, src_strides, src, dst, tid); \ } \ #define IM2COL1D_OP(T, FN_NAME) \ kernel void FN_NAME( \ constant size_t &dst_numel, \ constant size_t &l_out, \ constant size_t &l_k, \ constant size_t &stride, \ constant size_t &padding, \ constant size_t &dilation, \ constant size_t *src_dims, \ constant size_t *src_strides, \ device const T *src, \ device T *dst, \ uint tid [[ thread_position_in_grid ]] \ ) { \ im2col1d<T>(dst_numel, l_out, l_k, stride, padding, dilation, src_dims, src_strides, src, dst, tid); \ } \ #define COL2IM1D_OP(T, FN_NAME) \ kernel void FN_NAME( \ constant size_t &dst_el, \ constant size_t &l_out, \ constant size_t &l_in, \ constant size_t &c_out, \ constant size_t &k_size, \ constant size_t &stride, \ device const T *src, \ device T *dst, \ uint tid [[ thread_position_in_grid ]] \ ) { \ col2im1d<T>(dst_el, l_out, l_in, c_out, k_size, stride, src, dst, tid); \ } \ #define UPSAMPLE_NEAREST2D_OP(TYPENAME, FN_NAME) \ kernel void FN_NAME( \ constant size_t &w_out, \ constant size_t &h_out, \ constant float &w_scale, \ constant float &h_scale, \ constant size_t *dims, \ constant size_t *strides, \ device const TYPENAME *src, \ device TYPENAME *dst, \ uint tid [[ thread_position_in_grid ]] \ ) { \ upsample_nearest2d<TYPENAME>(w_out, h_out, w_scale, h_scale, dims, strides, src, dst, tid); \ } \ template <typename T, typename A> METAL_FUNC void avg_pool2d( constant size_t &w_k, constant size_t &h_k, constant size_t &w_stride, constant size_t &h_stride, constant size_t *src_dims, constant size_t *src_strides, device const T *src, device T *dst, uint tid [[ thread_position_in_grid ]] ) { const size_t c = src_dims[1]; const size_t w_in = src_dims[2]; const size_t h_in = src_dims[3]; const size_t w_out = (w_in - w_k) / w_stride + 1; const size_t h_out = (h_in - h_k) / h_stride + 1; if (tid >= src_dims[0] * c * w_out * h_out) { return; } const size_t b_idx = tid / (w_out * h_out * c); const size_t c_idx = (tid / (w_out * h_out)) % c; const size_t dst_w = (tid / h_out) % w_out; const size_t dst_h = tid % h_out; const size_t src_idx0 = b_idx * src_strides[0]; A d = 0; for (size_t w_offset = 0; w_offset < w_k; ++w_offset) { size_t src_w = w_stride * dst_w + w_offset; if (src_w >= w_in){ continue; } for (size_t h_offset = 0; h_offset < h_k; ++h_offset) { size_t src_h = h_stride * dst_h + h_offset; if (src_h >= h_in) { continue; } const size_t src_idx = src_idx0 + c_idx * src_strides[1] + src_w * src_strides[2] + src_h * src_strides[3]; d += static_cast<A>(src[src_idx]); } } dst[tid] = static_cast<T>(d / (w_k * h_k)); } #define AVGPOOL2D_OP(TYPENAME, TYPEACC, FN_NAME) \ kernel void FN_NAME( \ constant size_t &w_k, \ constant size_t &h_k, \ constant size_t &w_s, \ constant size_t &h_s, \ constant size_t *src_dims, \ constant size_t *src_s, \ device const TYPENAME *src, \ device TYPENAME *dst, \ uint tid [[ thread_position_in_grid ]] \ ) { \ avg_pool2d<TYPENAME, TYPEACC>(w_k, h_k, w_s, h_s, src_dims, src_s, src, dst, tid); \ } \ template <typename T> METAL_FUNC void max_pool2d( constant size_t &w_k, constant size_t &h_k, constant size_t &w_stride, constant size_t &h_stride, constant size_t *src_dims, constant size_t *src_strides, device const T *src, device T *dst, uint tid [[ thread_position_in_grid ]] ) { const size_t c = src_dims[1]; const size_t w_in = src_dims[2]; const size_t h_in = src_dims[3]; const size_t w_out = (w_in - w_k) / w_stride + 1; const size_t h_out = (h_in - h_k) / h_stride + 1; if (tid >= src_dims[0] * c * w_out * h_out) { return; } const size_t b_idx = tid / (w_out * h_out * c); const size_t c_idx = (tid / (w_out * h_out)) % c; const size_t dst_w = (tid / h_out) % w_out; const size_t dst_h = tid % h_out; const size_t src_idx0 = b_idx * src_strides[0]; T d = 0; bool set = false; for (size_t w_offset = 0; w_offset < w_k; ++w_offset) { size_t src_w = w_stride * dst_w + w_offset; if (src_w >= w_in){ continue; } for (size_t h_offset = 0; h_offset < h_k; ++h_offset) { size_t src_h = h_stride * dst_h + h_offset; if (src_h >= h_in) { continue; } const size_t src_idx = src_idx0 + c_idx * src_strides[1] + src_w * src_strides[2] + src_h * src_strides[3]; if (set) { d = MAX(d, src[src_idx]); } else { d = src[src_idx]; set = true; } } } dst[tid] = d; } #define MAXPOOL2D_OP(TYPENAME, FN_NAME) \ kernel void FN_NAME( \ constant size_t &w_k, \ constant size_t &h_k, \ constant size_t &w_s, \ constant size_t &h_s, \ constant size_t *src_dims, \ constant size_t *src_s, \ device const TYPENAME *src, \ device TYPENAME *dst, \ uint tid [[ thread_position_in_grid ]] \ ) { \ max_pool2d<TYPENAME>(w_k, h_k, w_s, h_s, src_dims, src_s, src, dst, tid); \ } \ // Naive implementation of conv_transpose1d. template <typename T, typename A> METAL_FUNC void conv_transpose1d( constant size_t &l_out, constant size_t &stride, constant size_t &padding, constant size_t &out_padding, constant size_t &dilation, constant size_t *src_dims, constant size_t *src_strides, constant size_t *k_dims, constant size_t *k_strides, device const T *src, device const T *k, device T *dst, uint tid [[ thread_position_in_grid ]] ) { // src: (b_size, c_in, l_in) // kernel: (c_in, c_out, l_k) const size_t l_k = k_dims[2]; const size_t c_out = k_dims[1]; const size_t c_in = src_dims[1]; const size_t l_in = src_dims[2]; if (tid >= src_dims[0] * c_out * l_out) { return; } const size_t b_idx = tid / (l_out * c_out); const size_t dst_c_idx = (tid / l_out) % c_out; const size_t out_x = tid % l_out; const size_t src_idx0 = b_idx * src_strides[0]; A d = 0; for (int k_x = 0; k_x < (int)l_k; ++k_x) { // let out_x = inp_x * p.stride + k_x * p.dilation - p.padding; int inp_x_stride = (int)(out_x + padding) - k_x * dilation; if (inp_x_stride < 0 || inp_x_stride % stride) { continue; } int inp_x = inp_x_stride / stride; if (inp_x >= l_in) continue; for (size_t src_c_idx = 0; src_c_idx < c_in; ++src_c_idx) { const size_t src_idx = src_idx0 + src_c_idx * src_strides[1] + inp_x * src_strides[2]; const size_t k_idx = src_c_idx * k_strides[0] + dst_c_idx * k_strides[1] + k_x * k_strides[2]; d += static_cast<A>(src[src_idx]) * static_cast<A>(k[k_idx]); } } dst[tid] = static_cast<T>(d); } #define CONVT1D_OP(TYPENAME, TYPEACC, FN_NAME) \ kernel void FN_NAME( \ constant size_t &l_out, \ constant size_t &stride, \ constant size_t &padding, \ constant size_t &out_padding, \ constant size_t &dilation, \ constant size_t *src_dims, \ constant size_t *src_strides, \ constant size_t *k_dims, \ constant size_t *k_strides, \ device const TYPENAME *src, \ device const TYPENAME *k, \ device TYPENAME *dst, \ uint tid [[ thread_position_in_grid ]] \ ) { \ conv_transpose1d<TYPENAME, TYPEACC>(l_out, stride, padding, out_padding, dilation, src_dims, src_strides, k_dims, k_strides, src, k, dst, tid); \ } \ template <typename T, typename A> METAL_FUNC void conv_transpose2d( constant size_t &w_out, constant size_t &h_out, constant size_t &stride, constant size_t &padding, constant size_t &out_padding, constant size_t &dilation, constant size_t *input_dims, constant size_t *input_stride, constant size_t *k_dims, constant size_t *k_stride, device const T *src, device const T *k, device T *dst, uint tid [[ thread_position_in_grid ]] ) { const size_t h_k = k_dims[2]; const size_t w_k = k_dims[3]; const size_t c_out = k_dims[1]; const size_t c_in = input_dims[1]; const size_t h_in = input_dims[2]; const size_t w_in = input_dims[3]; if (tid >= input_dims[0] * c_out * w_out * h_out) { return; } const size_t b_idx = tid / (w_out * h_out * c_out); const size_t dst_c_idx = (tid / (w_out * h_out)) % c_out; const size_t out_y = (tid / w_out) % h_out; const size_t out_x = tid % w_out; const size_t src_idx0 = b_idx * input_stride[0]; A d = 0; for (int k_x = 0; k_x < (int)w_k; ++k_x) { const int inp_x_stride = (int)(out_x + padding) - k_x * dilation; if (inp_x_stride < 0 || inp_x_stride % stride) { continue; } const int inp_x = inp_x_stride / stride; if (inp_x >= w_in) continue; for (int k_y = 0; k_y < (int)h_k; ++k_y) { const int inp_y_stride = (int)(out_y + padding) - k_y * dilation; if (inp_y_stride < 0 || inp_y_stride % stride) { continue; } const int inp_y = inp_y_stride / stride; if (inp_y >= h_in) continue; for (size_t src_c_idx = 0; src_c_idx < c_in; ++src_c_idx) { const size_t src_idx = src_idx0 + src_c_idx * input_stride[1] + inp_y * input_stride[2] + inp_x * input_stride[3]; const size_t k_idx = src_c_idx * k_stride[0] + dst_c_idx * k_stride[1] + k_y * k_stride[2] + k_x * k_stride[3]; d += static_cast<A>(src[src_idx]) * static_cast<A>(k[k_idx]); } } } dst[tid] = static_cast<T>(d); } #define CONVT2D_OP(TYPENAME, TYPEACC, FN_NAME) \ kernel void FN_NAME( \ constant size_t &w_out, \ constant size_t &h_out, \ constant size_t &stride, \ constant size_t &padding, \ constant size_t &out_padding, \ constant size_t &dilation, \ constant size_t *input_dims, \ constant size_t *input_stride, \ constant size_t *k_dims, \ constant size_t *k_stride, \ device const TYPENAME *src, \ device const TYPENAME *k, \ device TYPENAME *dst, \ uint tid [[ thread_position_in_grid ]] \ ) { \ conv_transpose2d<TYPENAME, TYPEACC>(w_out, h_out, stride, padding, out_padding, dilation, input_dims, input_stride, k_dims, k_stride, src, k, dst, tid); \ } \ IM2COL_OP(float, im2col_f32) IM2COL_OP(half, im2col_f16) IM2COL_OP(uint8_t, im2col_u8) IM2COL_OP(uint32_t, im2col_u32) #if defined(__HAVE_BFLOAT__) IM2COL_OP(bfloat, im2col_bf16) #endif COL2IM1D_OP(float, col2im1d_f32) COL2IM1D_OP(uint8_t, col2im1d_u8) COL2IM1D_OP(uint32_t, col2im1d_u32) IM2COL1D_OP(float, im2col1d_f32) IM2COL1D_OP(uint8_t, im2col1d_u8) IM2COL1D_OP(uint32_t, im2col1d_u32) UPSAMPLE_NEAREST2D_OP(float, upsample_nearest2d_f32) UPSAMPLE_NEAREST2D_OP(half, upsample_nearest2d_f16) UPSAMPLE_NEAREST2D_OP(uint8_t, upsample_nearest2d_u8) UPSAMPLE_NEAREST2D_OP(uint32_t, upsample_nearest2d_u32) #if defined(__HAVE_BFLOAT__) UPSAMPLE_NEAREST2D_OP(bfloat, upsample_nearest2d_bf16) #endif MAXPOOL2D_OP(float, max_pool2d_f32) MAXPOOL2D_OP(half, max_pool2d_f16) MAXPOOL2D_OP(uint32_t, max_pool2d_u32) MAXPOOL2D_OP(uint8_t, max_pool2d_u8) #if defined(__HAVE_BFLOAT__) MAXPOOL2D_OP(bfloat, max_pool2d_bf16) #endif AVGPOOL2D_OP(float, float, avg_pool2d_f32) AVGPOOL2D_OP(half, float, avg_pool2d_f16) AVGPOOL2D_OP(uint32_t, uint32_t, avg_pool2d_u32) AVGPOOL2D_OP(uint8_t, uint8_t, avg_pool2d_u8) #if defined(__HAVE_BFLOAT__) AVGPOOL2D_OP(bfloat, float, avg_pool2d_bf16) #endif CONVT1D_OP(float, float, conv_transpose1d_f32) CONVT1D_OP(half, float, conv_transpose1d_f16) CONVT1D_OP(uint8_t, uint8_t, conv_transpose1d_u8) CONVT1D_OP(uint32_t, uint32_t, conv_transpose1d_u32) #if defined(__HAVE_BFLOAT__) CONVT1D_OP(bfloat, float, conv_transpose1d_bf16) #endif CONVT2D_OP(float, float, conv_transpose2d_f32) CONVT2D_OP(half, float, conv_transpose2d_f16) #if defined(__HAVE_BFLOAT__) CONVT1D_OP(bfloat, float, conv_transpose2d_bf16) #endif
candle/candle-metal-kernels/src/conv.metal/0
{ "file_path": "candle/candle-metal-kernels/src/conv.metal", "repo_id": "candle", "token_count": 8944 }
43
#pragma once #include <metal_stdlib> using namespace metal; METAL_FUNC uint nonzero(uint n) { return n == 0 ? 1 : n; } template<uint N> constexpr uint nonzero() { return N == 0 ? 1 : N; } template<typename T> constexpr ushort granularity() { return nonzero<vec_elements<T>::value>(); } METAL_FUNC uint next_p2(uint x) { return 1 << (32 - clz(x - 1)); } METAL_FUNC uint prev_p2(uint x) { return 1 << (31 - clz(x)); } constant uint MAX_SHARED_MEM = 32767; template<typename T> METAL_FUNC uint max_shared_mem(uint n) { return min(n, prev_p2(MAX_SHARED_MEM / sizeof(T))); } METAL_FUNC uint get_strided_index( uint idx, constant const uint &num_dims, constant const size_t *dims, constant const size_t *strides ) { uint strided_i = 0; for (uint d = 0; d < num_dims; d++) { uint dim_idx = num_dims - 1 - d; strided_i += (idx % dims[dim_idx]) * strides[dim_idx]; idx /= dims[dim_idx]; } return strided_i; }
candle/candle-metal-kernels/src/utils.metal/0
{ "file_path": "candle/candle-metal-kernels/src/utils.metal", "repo_id": "candle", "token_count": 453 }
44
//! Batch Normalization. //! //! This layer applies Batch Normalization over a mini-batch of inputs as described in [`Batch //! Normalization`]. The input is expected to have at least three dimensions. //! //! Note that this implementation is for inference only, there is no possibility to track the //! running stats. //! //! [`Batch Normalization`]: https://arxiv.org/abs/1502.03167 use candle::{DType, Result, Tensor, Var}; #[derive(Debug, Clone, Copy, PartialEq)] pub struct BatchNormConfig { pub eps: f64, pub remove_mean: bool, /// The meaning of affine here is different from LayerNorm: when false there is no learnable /// parameter at all, 1 used for gamma and 0 for beta. pub affine: bool, /// Controls exponential moving average of running stats. Defaults to 0.1 /// /// `running_stat * (1.0 - momentum) + stat * momentum`. pub momentum: f64, } impl Default for BatchNormConfig { fn default() -> Self { Self { eps: 1e-5, remove_mean: true, affine: true, momentum: 0.1, } } } impl From<f64> for BatchNormConfig { fn from(eps: f64) -> Self { Self { eps, ..Default::default() } } } #[derive(Clone, Debug)] pub struct BatchNorm { running_mean: Var, running_var: Var, weight_and_bias: Option<(Tensor, Tensor)>, remove_mean: bool, eps: f64, momentum: f64, } impl BatchNorm { fn check_validity(&self, num_features: usize) -> Result<()> { if self.eps < 0. { candle::bail!("batch-norm eps cannot be negative {}", self.eps) } if !(0.0..=1.0).contains(&self.momentum) { candle::bail!( "batch-norm momentum must be between 0 and 1, is {}", self.momentum ) } if self.running_mean.dims() != [num_features] { candle::bail!( "batch-norm running mean has unexpected shape {:?} should have shape [{num_features}]", self.running_mean.shape(), ) } if self.running_var.dims() != [num_features] { candle::bail!( "batch-norm running variance has unexpected shape {:?} should have shape [{num_features}]", self.running_var.shape(), ) } if let Some((ref weight, ref bias)) = self.weight_and_bias.as_ref() { if weight.dims() != [num_features] { candle::bail!( "batch-norm weight has unexpected shape {:?} should have shape [{num_features}]", weight.shape(), ) } if bias.dims() != [num_features] { candle::bail!( "batch-norm weight has unexpected shape {:?} should have shape [{num_features}]", bias.shape(), ) } } Ok(()) } pub fn new( num_features: usize, running_mean: Tensor, running_var: Tensor, weight: Tensor, bias: Tensor, eps: f64, ) -> Result<Self> { let out = Self { running_mean: Var::from_tensor(&running_mean)?, running_var: Var::from_tensor(&running_var)?, weight_and_bias: Some((weight, bias)), remove_mean: true, eps, momentum: 0.1, }; out.check_validity(num_features)?; Ok(out) } pub fn new_no_bias( num_features: usize, running_mean: Tensor, running_var: Tensor, eps: f64, ) -> Result<Self> { let out = Self { running_mean: Var::from_tensor(&running_mean)?, running_var: Var::from_tensor(&running_var)?, weight_and_bias: None, remove_mean: true, eps, momentum: 0.1, }; out.check_validity(num_features)?; Ok(out) } pub fn new_with_momentum( num_features: usize, running_mean: Tensor, running_var: Tensor, weight: Tensor, bias: Tensor, eps: f64, momentum: f64, ) -> Result<Self> { let out = Self { running_mean: Var::from_tensor(&running_mean)?, running_var: Var::from_tensor(&running_var)?, weight_and_bias: Some((weight, bias)), remove_mean: true, eps, momentum, }; out.check_validity(num_features)?; Ok(out) } pub fn new_no_bias_with_momentum( num_features: usize, running_mean: Tensor, running_var: Tensor, eps: f64, momentum: f64, ) -> Result<Self> { let out = Self { running_mean: Var::from_tensor(&running_mean)?, running_var: Var::from_tensor(&running_var)?, weight_and_bias: None, remove_mean: true, eps, momentum, }; out.check_validity(num_features)?; Ok(out) } pub fn running_mean(&self) -> &Tensor { self.running_mean.as_tensor() } pub fn running_var(&self) -> &Tensor { self.running_var.as_tensor() } pub fn eps(&self) -> f64 { self.eps } pub fn weight_and_bias(&self) -> Option<(&Tensor, &Tensor)> { self.weight_and_bias.as_ref().map(|v| (&v.0, &v.1)) } pub fn momentum(&self) -> f64 { self.momentum } pub fn forward_train(&self, x: &Tensor) -> Result<Tensor> { let num_features = self.running_mean.as_tensor().dim(0)?; let x_dtype = x.dtype(); let internal_dtype = match x_dtype { DType::F16 | DType::BF16 => DType::F32, d => d, }; if x.rank() < 2 { candle::bail!( "batch-norm input tensor must have at least two dimensions ({:?})", x.shape() ) } if x.dim(1)? != num_features { candle::bail!( "batch-norm input doesn't have the expected number of features ({:?} <> {})", x.shape(), num_features ) } let x = x.to_dtype(internal_dtype)?; let x = x.transpose(0, 1)?; let x_dims_post_transpose = x.dims(); // Flatten all the dimensions exception the channel one as this performs a Spatial Batch // Normalization. let x = x.flatten_from(1)?.contiguous()?; let x = if self.remove_mean { // The mean is taken over dim 1 as this is the batch dim after the transpose(0, 1) above. let mean_x = x.mean_keepdim(1)?; let updated_running_mean = ((self.running_mean.as_tensor() * (1.0 - self.momentum))? + (mean_x.flatten_all()? * self.momentum)?)?; self.running_mean.set(&updated_running_mean)?; x.broadcast_sub(&mean_x)? } else { x }; // The mean is taken over dim 1 as this is the batch dim after the transpose(0, 1) above. let norm_x = x.sqr()?.mean_keepdim(1)?; let updated_running_var = { let batch_size = x.dim(1)? as f64; let running_var_weight = 1.0 - self.momentum; let norm_x_weight = self.momentum * batch_size / (batch_size - 1.0); ((self.running_var.as_tensor() * running_var_weight)? + (&norm_x.flatten_all()? * norm_x_weight)?)? }; self.running_var.set(&updated_running_var)?; let x = x .broadcast_div(&(norm_x + self.eps)?.sqrt()?)? .to_dtype(x_dtype)?; let x = match &self.weight_and_bias { None => x, Some((weight, bias)) => { let weight = weight.reshape(((), 1))?; let bias = bias.reshape(((), 1))?; x.broadcast_mul(&weight)?.broadcast_add(&bias)? } }; x.reshape(x_dims_post_transpose)?.transpose(0, 1) } fn forward_eval(&self, x: &Tensor) -> Result<Tensor> { let target_shape: Vec<usize> = x .dims() .iter() .enumerate() .map(|(idx, v)| if idx == 1 { *v } else { 1 }) .collect(); let target_shape = target_shape.as_slice(); let x = x .broadcast_sub( &self .running_mean .as_detached_tensor() .reshape(target_shape)?, )? .broadcast_div( &(self .running_var .as_detached_tensor() .reshape(target_shape)? + self.eps)? .sqrt()?, )?; match &self.weight_and_bias { None => Ok(x), Some((weight, bias)) => { let weight = weight.reshape(target_shape)?; let bias = bias.reshape(target_shape)?; x.broadcast_mul(&weight)?.broadcast_add(&bias) } } } } impl crate::ModuleT for BatchNorm { fn forward_t(&self, x: &Tensor, train: bool) -> Result<Tensor> { if train { self.forward_train(x) } else { self.forward_eval(x) } } } pub fn batch_norm<C: Into<BatchNormConfig>>( num_features: usize, config: C, vb: crate::VarBuilder, ) -> Result<BatchNorm> { use crate::Init; let config = config.into(); if config.eps < 0. { candle::bail!("batch-norm eps cannot be negative {}", config.eps) } let running_mean = vb.get_with_hints(num_features, "running_mean", Init::Const(0.))?; let running_var = vb.get_with_hints(num_features, "running_var", Init::Const(1.))?; let weight_and_bias = if config.affine { let weight = vb.get_with_hints(num_features, "weight", Init::Const(1.))?; let bias = vb.get_with_hints(num_features, "bias", Init::Const(0.))?; Some((weight, bias)) } else { None }; Ok(BatchNorm { running_mean: Var::from_tensor(&running_mean)?, running_var: Var::from_tensor(&running_var)?, weight_and_bias, remove_mean: config.remove_mean, eps: config.eps, momentum: config.momentum, }) }
candle/candle-nn/src/batch_norm.rs/0
{ "file_path": "candle/candle-nn/src/batch_norm.rs", "repo_id": "candle", "token_count": 5325 }
45
use candle::{Result, Tensor}; /// Sample according to the Gumbel-Softmax distribution. pub fn gumbel_softmax<D: candle::shape::Dim>( logits: &Tensor, temperature: f64, dim: D, ) -> Result<Tensor> { if temperature <= 0.0 { logits.argmax(dim) } else { // Cast to f32, doing the Gumbel softmax in bf16 is a bit unstable. let logits = logits.to_dtype(candle::DType::F32)?; let minus_g = logits.rand_like(1e-7, 0.999)?.log()?.neg()?.log()?; if temperature == 1.0 { let sampled = (logits - minus_g)?.argmax(dim)?; Ok(sampled) } else { let sampled = (logits + minus_g * (-temperature))?.argmax(dim)?; Ok(sampled) } } }
candle/candle-nn/src/sampling.rs/0
{ "file_path": "candle/candle-nn/src/sampling.rs", "repo_id": "candle", "token_count": 357 }
46
use std::io::Result; fn main() -> Result<()> { prost_build::compile_protos(&["src/onnx.proto3"], &["src/"])?; Ok(()) }
candle/candle-onnx/build.rs/0
{ "file_path": "candle/candle-onnx/build.rs", "repo_id": "candle", "token_count": 60 }
47
from dataclasses import dataclass from typing import Optional from candle.nn import Module, Embedding, LayerNorm, Linear, ModuleList from candle import Tensor import candle import candle.functional as F from typing import Tuple, Optional @dataclass class Config: vocab_size: int = 30522 hidden_size: int = 768 num_hidden_layers: int = 12 num_attention_heads: int = 12 intermediate_size: int = 3072 hidden_act: str = "gelu" hidden_dropout_prob: float = 0.1 max_position_embeddings: int = 512 type_vocab_size: int = 2 initializer_range: float = 0.02 layer_norm_eps: float = 1e-12 pad_token_id: int = 0 position_embedding_type: str = "absolute" use_cache: bool = True classifier_dropout: Optional[float] = None model_type: Optional[str] = "bert" class BertSelfAttention(Module): def __init__(self, config: Config) -> None: super().__init__() self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / self.num_attention_heads) all_head_size = int(config.num_attention_heads * self.attention_head_size) hidden_size = config.hidden_size self.query = Linear(hidden_size, all_head_size) self.key = Linear(hidden_size, all_head_size) self.value = Linear(hidden_size, all_head_size) def transpose_for_scores(self, x: Tensor) -> Tensor: new_x_shape = x.shape[:-1] + ( self.num_attention_heads, self.attention_head_size, ) x = x.reshape(new_x_shape).transpose(1, 2) return x.contiguous() def forward(self, hidden_states: Tensor, attention_mask=None) -> Tensor: query = self.query.forward(hidden_states) key = self.key.forward(hidden_states) value = self.value.forward(hidden_states) query = self.transpose_for_scores(query) key = self.transpose_for_scores(key) value = self.transpose_for_scores(value) attention_scores = query.matmul(key.t()) attention_scores = attention_scores / float(self.attention_head_size) ** 0.5 if attention_mask is not None: b_size, _, _, last_dim = attention_scores.shape attention_scores = attention_scores.broadcast_add(attention_mask.reshape((b_size, 1, 1, last_dim))) attention_probs = F.softmax(attention_scores, dim=-1) context_layer = attention_probs.matmul(value) context_layer = context_layer.transpose(1, 2).contiguous() context_layer = context_layer.flatten_from(-2) return context_layer class BertSelfOutput(Module): def __init__(self, config: Config) -> None: super().__init__() self.dense = Linear(config.hidden_size, config.hidden_size) self.LayerNorm = LayerNorm(config.hidden_size, eps=config.layer_norm_eps) def forward(self, hidden_states: Tensor, input_tensor: Tensor) -> Tensor: hidden_states = self.dense.forward(hidden_states) return self.LayerNorm.forward(hidden_states + input_tensor) class BertAttention(Module): def __init__(self, config: Config) -> None: super().__init__() self.self = BertSelfAttention(config) self.output = BertSelfOutput(config) def forward(self, hidden_states: Tensor, attention_mask: None) -> Tensor: self_outputs = self.self.forward(hidden_states, attention_mask=attention_mask) attention_output = self.output.forward(self_outputs, hidden_states) return attention_output class BertIntermediate(Module): def __init__(self, config: Config) -> None: super().__init__() self.dense = Linear(config.hidden_size, config.intermediate_size) self.act = F.gelu if config.hidden_act == "gelu" else F.relu def forward(self, hidden_states: Tensor) -> Tensor: hidden_states = self.dense.forward(hidden_states) return self.act(hidden_states) class BertOutput(Module): def __init__(self, config: Config) -> None: super().__init__() self.dense = Linear(config.intermediate_size, config.hidden_size) self.LayerNorm = LayerNorm(config.hidden_size, eps=config.layer_norm_eps) def forward(self, hidden_states: Tensor, input_tensor: Tensor) -> Tensor: hidden_states = self.dense.forward(hidden_states) return self.LayerNorm.forward(hidden_states + input_tensor) class BertLayer(Module): def __init__(self, config: Config) -> None: super().__init__() self.attention = BertAttention(config) self.intermediate = BertIntermediate(config) self.output = BertOutput(config) def forward(self, hidden_states: Tensor, attention_mask=None) -> Tensor: attention_output = self.attention.forward(hidden_states, attention_mask=attention_mask) # TODO: Support cross-attention? # https://github.com/huggingface/transformers/blob/6eedfa6dd15dc1e22a55ae036f681914e5a0d9a1/src/transformers/models/bert/modeling_bert.py#L523 # TODO: Support something similar to `apply_chunking_to_forward`? intermediate_output = self.intermediate.forward(attention_output) layer_output = self.output.forward(intermediate_output, attention_output) return layer_output class BertEncoder(Module): def __init__(self, config: Config) -> None: super().__init__() self.layer = ModuleList() for _ in range(config.num_hidden_layers): self.layer.append(BertLayer(config)) def forward(self, hidden_states: Tensor, attention_mask=None) -> Tensor: for l in self.layer: hidden_states = l.forward(hidden_states, attention_mask=attention_mask) return hidden_states class BertEmbeddings(Module): def __init__(self, config: Config) -> None: super().__init__() self.word_embeddings = Embedding(config.vocab_size, config.hidden_size) self.position_embeddings = Embedding(config.max_position_embeddings, config.hidden_size) self.token_type_embeddings = Embedding(config.type_vocab_size, config.hidden_size) self.LayerNorm = LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.position_ids = candle.Tensor(list(range(config.max_position_embeddings))).reshape( (1, config.max_position_embeddings) ) def forward(self, input_ids: Tensor, token_type_ids: Tensor) -> Tensor: (_batch_size, seq_len) = input_ids.shape input_embeddings = self.word_embeddings.forward(input_ids) token_type_embeddings = self.token_type_embeddings.forward(token_type_ids) embeddings: Tensor = input_embeddings + token_type_embeddings position_ids = list(range(seq_len)) position_ids = Tensor(position_ids).to_dtype(input_ids.dtype).to_device(input_ids.device) embeddings = embeddings.broadcast_add(self.position_embeddings.forward(position_ids)) embeddings = self.LayerNorm(embeddings) return embeddings class BertPooler(Module): def __init__(self, config: Config) -> None: super().__init__() self.dense = Linear(config.hidden_size, config.hidden_size) self.activation = F.tanh def forward(self, hidden_states: Tensor) -> Tensor: first_token_tensor = hidden_states[:, 0] pooled_output = self.dense.forward(first_token_tensor) pooled_output = self.activation(pooled_output) return pooled_output def masked_fill(on_false: float, mask: Tensor, on_true: float): shape = mask.shape on_true = candle.tensor(on_true).broadcast_as(shape) on_false = candle.tensor(on_false).broadcast_as(shape) return mask.where_cond(on_true, on_false) # https://github.com/huggingface/transformers/blob/6eedfa6dd15dc1e22a55ae036f681914e5a0d9a1/src/transformers/models/bert/modeling_bert.py#L874 class BertModel(Module): def __init__(self, config: Config, add_pooling_layer=True) -> None: super().__init__() self.config = config self.embeddings = BertEmbeddings(config) self.encoder = BertEncoder(config) self.pooler = BertPooler(config) if add_pooling_layer else None def forward( self, input_ids: Tensor, token_type_ids: Tensor, attention_mask=None ) -> Tuple[Tensor, Optional[Tensor]]: if attention_mask is not None: # Replace 0s with -inf, and 1s with 0s. attention_mask = masked_fill(float("-inf"), attention_mask, 1.0) embeddings = self.embeddings.forward(input_ids, token_type_ids) encoder_out = self.encoder.forward(embeddings, attention_mask=attention_mask) pooled_output = self.pooler(encoder_out) if self.pooler is not None else None return encoder_out, pooled_output
candle/candle-pyo3/py_src/candle/models/bert.py/0
{ "file_path": "candle/candle-pyo3/py_src/candle/models/bert.py", "repo_id": "candle", "token_count": 3528 }
48
# This example shows how the candle Python api can be used to replicate llama.cpp. import sys from typing import Dict, Tuple, Any import candle from candle.models.llama import QuantizedLlama from candle import utils MAX_SEQ_LEN = 4096 def gguf_rename(tensor_name: str): if tensor_name == "token_embd.weight": return "tok_embeddings.weight" if tensor_name == "output_norm.weight": return "norm.weight" tensor_name = tensor_name.replace("blk.", "layers.") tensor_name = tensor_name.replace(".attn_q.", ".attention.wq.") tensor_name = tensor_name.replace(".attn_k.", ".attention.wk.") tensor_name = tensor_name.replace(".attn_v.", ".attention.wv.") tensor_name = tensor_name.replace(".attn_output.", ".attention.wo.") tensor_name = tensor_name.replace(".ffn_gate.", ".feed_forward.w1.") tensor_name = tensor_name.replace(".ffn_down.", ".feed_forward.w2.") tensor_name = tensor_name.replace(".ffn_up.", ".feed_forward.w3.") tensor_name = tensor_name.replace(".attn_norm.", ".attention_norm.") return tensor_name def main(): if len(sys.argv) < 2: raise ValueError("missing weight file argument") filename = sys.argv[1] print(f"reading model file {filename}") if filename.endswith("gguf"): all_tensors, metadata = utils.load_gguf(filename) vocab = metadata["tokenizer.ggml.tokens"] for i, v in enumerate(vocab): vocab[i] = "\n" if v == "<0x0A>" else v.replace("▁", " ") hparams = {k: v for (k, v) in metadata.items() if not k.startswith("tokenizer")} print(hparams) hparams = { "n_vocab": len(vocab), "n_embd": metadata["llama.embedding_length"], "n_mult": 256, "n_head": metadata["llama.attention.head_count"], "n_head_kv": metadata["llama.attention.head_count_kv"], "n_layer": metadata["llama.block_count"], "n_rot": metadata["llama.rope.dimension_count"], "rope_freq": metadata.get("llama.rope.freq_base", 10000.0), "ftype": metadata["general.file_type"], "context_length": metadata["llama.context_length"], } all_tensors = {gguf_rename(k): v for k, v in all_tensors.items()} else: all_tensors, hparams, vocab = utils.load_ggml(filename) hparams["context_length"] = 2048 print(hparams) model = QuantizedLlama(hparams, all_tensors) print("model built, starting inference") tokens = [1] for token_idx in range(500): last_token = tokens[-1] lt = candle.tensor([last_token]).unsqueeze(0) logits = model.forward(lt, len(tokens)) # Greedy sampling for now # pr = candle.nn.softmax(logits, -1) m = logits.get(0).argmax_keepdim(-1) next_token = m.values()[0] print(vocab[next_token], end="", flush=True) tokens.append(next_token) if __name__ == "__main__": main()
candle/candle-pyo3/quant-llama.py/0
{ "file_path": "candle/candle-pyo3/quant-llama.py", "repo_id": "candle", "token_count": 1318 }
49
# candle-transformers
candle/candle-transformers/README.md/0
{ "file_path": "candle/candle-transformers/README.md", "repo_id": "candle", "token_count": 6 }
50
//! Falcon language model inference implementation //! //! See ["Falcon: a new approach to large language models"](https://huggingface.co/blog/falcon) //! //! Based on implementation from [Huggingface Transformers](https://github.com/huggingface/transformers/blob/main/src/transformers/models/falcon) use candle::{DType, Device, Result, Tensor, D}; use candle_nn::{embedding, linear_b as linear, Embedding, LayerNorm, Linear, Module, VarBuilder}; use serde::Deserialize; const MAX_SEQ_LEN: usize = 5000; fn layer_norm(size: usize, eps: f64, vb: VarBuilder) -> Result<LayerNorm> { let (weight, bias) = match (vb.get(size, "weight"), vb.get(size, "bias")) { (Ok(weight), Ok(bias)) => (weight, bias), (Err(err), _) | (_, Err(err)) => { if let (Ok(weight), Ok(bias)) = (vb.get(size, "gamma"), vb.get(size, "beta")) { (weight, bias) } else { return Err(err); } } }; Ok(LayerNorm::new(weight, bias, eps)) } // https://raw.githubusercontent.com/huggingface/transformers/030c863aaa0165e98352b61697430bf69bf33755/src/transformers/models/falcon/configuration_falcon.py #[derive(Clone, Debug, Deserialize)] pub struct Config { pub vocab_size: usize, pub hidden_size: usize, pub num_hidden_layers: usize, pub num_attention_heads: usize, pub layer_norm_epsilon: f64, pub initializer_range: f64, pub use_cache: bool, pub bos_token_id: u32, pub eos_token_id: u32, pub hidden_dropout: f64, pub attention_dropout: f64, pub n_head_kv: Option<usize>, pub alibi: bool, pub new_decoder_architecture: bool, pub multi_query: bool, pub parallel_attn: bool, pub bias: bool, } impl Default for Config { fn default() -> Self { Self { vocab_size: 65024, hidden_size: 4544, num_hidden_layers: 32, num_attention_heads: 71, layer_norm_epsilon: 1e-5, initializer_range: 0.02, use_cache: true, bos_token_id: 11, eos_token_id: 11, hidden_dropout: 0.0, attention_dropout: 0.0, n_head_kv: None, alibi: false, new_decoder_architecture: false, multi_query: true, parallel_attn: true, bias: false, } } } impl Config { pub fn validate(&self) -> Result<()> { if self.alibi { candle::bail!("alibi is not supported"); } if self.new_decoder_architecture { candle::bail!("new_decoder_architecture is not supported"); } if self.n_head_kv.is_some() { candle::bail!("n_head_kv is not supported"); } Ok(()) } // https://huggingface.co/tiiuae/falcon-7b/blob/main/config.json pub fn falcon7b() -> Self { // This is currently on par with the defaults, the defaults come from the Python default // arguments for the config initialization whereas the following come from the json config. Self { vocab_size: 65024, hidden_size: 4544, num_hidden_layers: 32, num_attention_heads: 71, layer_norm_epsilon: 1e-5, initializer_range: 0.02, use_cache: true, bos_token_id: 11, eos_token_id: 11, hidden_dropout: 0., attention_dropout: 0., n_head_kv: None, alibi: false, new_decoder_architecture: false, multi_query: true, parallel_attn: true, bias: false, } } fn head_dim(&self) -> usize { self.hidden_size / self.num_attention_heads } fn rotary(&self) -> bool { !self.alibi } } fn rotate_half(x: &Tensor) -> Result<Tensor> { let l = x.dim(D::Minus1)?; let x1 = x.narrow(D::Minus1, 0, l / 2)?; let x2 = x.narrow(D::Minus1, l / 2, l - l / 2)?; let x21 = Tensor::cat(&[&x2.neg()?, &x1], D::Minus1)?; Ok(x21) } #[derive(Debug, Clone)] struct FalconRotaryEmbedding { inv_freq: Tensor, cache: Option<(usize, Tensor, Tensor)>, } impl FalconRotaryEmbedding { fn load(device: &Device, cfg: &Config) -> Result<Self> { let head_dim = cfg.head_dim(); let inv_freq: Vec<_> = (0..head_dim) .step_by(2) .map(|i| 1f32 / 10000f32.powf(i as f32 / head_dim as f32)) .collect(); Ok(Self { inv_freq: Tensor::new(inv_freq.as_slice(), device)?, cache: None, }) } fn cos_sin( &mut self, seq_len: usize, device: &Device, dtype: DType, ) -> Result<(Tensor, Tensor)> { match &self.cache { Some((s, cos, sin)) if *s == seq_len => { return Ok((cos.clone(), sin.clone())); } _ => {} } let t = Tensor::arange(0, seq_len as u32, device)?.to_dtype(dtype)?; let inv_freq = self.inv_freq.to_dtype(dtype)?; let freqs = t.unsqueeze(1)?.matmul(&inv_freq.unsqueeze(0)?)?; let emb = Tensor::cat(&[&freqs, &freqs], D::Minus1)?; let cos = emb.cos()?; let sin = emb.sin()?; self.cache = Some((seq_len, cos.clone(), sin.clone())); Ok((cos, sin)) } fn forward( &mut self, query: &Tensor, key: &Tensor, past_kv_len: usize, ) -> Result<(Tensor, Tensor)> { let (_batch, seq_len, _head_dim) = query.dims3()?; let (cos, sin) = self.cos_sin(MAX_SEQ_LEN, query.device(), query.dtype())?; let cos = cos.narrow(0, past_kv_len, seq_len)?; let sin = sin.narrow(0, past_kv_len, seq_len)?; let qs = (query.broadcast_mul(&cos)? + &rotate_half(query)?.broadcast_mul(&sin)?)?; let ks = (key.broadcast_mul(&cos)? + &rotate_half(key)?.broadcast_mul(&sin)?)?; Ok((qs, ks)) } } fn masked_fill(on_false: &Tensor, mask: &Tensor, on_true: f32) -> Result<Tensor> { let shape = mask.shape(); let on_true = Tensor::new(on_true, on_false.device())? .to_dtype(on_false.dtype())? .broadcast_as(shape.dims())?; let m = mask.where_cond(&on_true, on_false)?; Ok(m) } #[derive(Debug, Clone)] struct FalconAttention { query_key_value: Linear, dense: Linear, maybe_rotary: Option<FalconRotaryEmbedding>, kv_cache: Option<(Tensor, Tensor)>, inv_norm_factor: f64, multi_query: bool, use_cache: bool, num_heads: usize, head_dim: usize, n_head_kv: usize, } impl FalconAttention { fn load(vb: VarBuilder, cfg: &Config) -> Result<Self> { let maybe_rotary = if cfg.rotary() { let rotary = FalconRotaryEmbedding::load(vb.device(), cfg)?; Some(rotary) } else { None }; let head_dim = cfg.head_dim(); let hidden_size = cfg.hidden_size; let qkv_out_dim = if cfg.multi_query { hidden_size + 2 * head_dim } else { 3 * hidden_size }; let query_key_value = linear(hidden_size, qkv_out_dim, cfg.bias, vb.pp("query_key_value"))?; let dense = linear(hidden_size, hidden_size, cfg.bias, vb.pp("dense"))?; Ok(Self { query_key_value, dense, maybe_rotary, kv_cache: None, inv_norm_factor: 1. / (head_dim as f64).sqrt(), multi_query: cfg.multi_query, use_cache: cfg.use_cache, num_heads: cfg.num_attention_heads, n_head_kv: cfg.n_head_kv.unwrap_or(1), head_dim, }) } fn split_heads(&self, fused_qkv: &Tensor) -> Result<(Tensor, Tensor, Tensor)> { let (b_sz, seq_len, _) = fused_qkv.dims3()?; if !self.multi_query { let fused_qkv = fused_qkv.reshape((b_sz, seq_len, self.num_heads, 3, self.head_dim))?; let q = fused_qkv.narrow(D::Minus2, 0, 1)?.squeeze(D::Minus2)?; let k = fused_qkv.narrow(D::Minus2, 1, 1)?.squeeze(D::Minus2)?; let v = fused_qkv.narrow(D::Minus2, 2, 1)?.squeeze(D::Minus2)?; Ok((q, k, v)) } else { let fused_qkv = fused_qkv.reshape((b_sz, seq_len, self.num_heads + 2, self.head_dim))?; let d = fused_qkv.dim(D::Minus2)?; let q = fused_qkv.narrow(D::Minus2, 0, d - 2)?; let k = fused_qkv.narrow(D::Minus2, d - 2, 1)?; let v = fused_qkv.narrow(D::Minus2, d - 1, 1)?; Ok((q, k, v)) } } fn forward(&mut self, x: &Tensor, mask: Option<&Tensor>, past_kv_len: usize) -> Result<Tensor> { let fused_qkv = self.query_key_value.forward(x)?; let head_dim = self.head_dim; let (query, key, value) = self.split_heads(&fused_qkv)?; let (b_sz, seq_len, _, _) = query.dims4()?; let query = query .transpose(1, 2)? .reshape((b_sz * self.num_heads, seq_len, head_dim))?; let key = key .transpose(1, 2)? .reshape((b_sz * self.n_head_kv, seq_len, head_dim))?; let value = value .transpose(1, 2)? .reshape((b_sz * self.n_head_kv, seq_len, head_dim))?; let (query, key) = if let Some(r) = &mut self.maybe_rotary { r.forward(&query, &key, past_kv_len)? } else { (query, key) }; let (mut key, mut value) = (key, value); if self.use_cache { if let Some((cache_k, cache_v)) = &self.kv_cache { // TODO: we could trim the tensors to MAX_SEQ_LEN so that this would work for // arbitrarily large sizes. key = Tensor::cat(&[cache_k, &key], 1)?.contiguous()?; value = Tensor::cat(&[cache_v, &value], 1)?.contiguous()?; } self.kv_cache = Some((key.clone(), value.clone())) } let query = query.reshape((b_sz * self.num_heads, seq_len, head_dim))?; let all_len = past_kv_len + seq_len; let key = key.reshape((b_sz * self.n_head_kv, all_len, head_dim))?; let value = value.reshape((b_sz * self.n_head_kv, all_len, head_dim))?; let (key, value) = if self.n_head_kv == 1 { ( key.broadcast_as((b_sz * self.num_heads, all_len, head_dim))?, value.broadcast_as((b_sz * self.num_heads, all_len, head_dim))?, ) } else { (key, value) }; // Only handle the case where alibi is None here, and non-flash attention. let attention_scores = (query.matmul(&key.t()?)? * self.inv_norm_factor)?; let attention_scores = match mask { None => attention_scores, Some(mask) => { let mask = masked_fill(&mask.to_dtype(DType::F32)?, mask, -1e9)? .to_dtype(query.dtype())?; attention_scores.broadcast_add(&mask.squeeze(1)?)? } }; let attention_scores = candle_nn::ops::softmax(&attention_scores.to_dtype(DType::F32)?, D::Minus1)? .to_dtype(x.dtype())?; let attn_output = attention_scores .matmul(&value)? .reshape((b_sz, self.num_heads, seq_len, head_dim))? .transpose(1, 2)? .reshape((b_sz, seq_len, self.num_heads * head_dim))?; let attn_output = self.dense.forward(&attn_output)?; Ok(attn_output) } fn clear_kv_cache(&mut self) { self.kv_cache = None } } #[derive(Debug, Clone)] struct FalconMlp { dense_h_to_4h: Linear, dense_4h_to_h: Linear, } impl FalconMlp { fn load(vb: VarBuilder, cfg: &Config) -> Result<Self> { let h = cfg.hidden_size; let b = cfg.bias; let dense_h_to_4h = linear(h, 4 * h, b, vb.pp("dense_h_to_4h"))?; let dense_4h_to_h = linear(4 * h, h, b, vb.pp("dense_4h_to_h"))?; Ok(Self { dense_h_to_4h, dense_4h_to_h, }) } fn forward(&self, x: &Tensor) -> Result<Tensor> { let x = self.dense_h_to_4h.forward(x)?.gelu()?; let x = self.dense_4h_to_h.forward(&x)?; Ok(x) } } #[derive(Debug, Clone)] struct FalconDecoderLayer { inp_layernorm: LayerNorm, self_attention: FalconAttention, post_attention_layernorm: Option<LayerNorm>, mlp: FalconMlp, parallel_attn: bool, } impl FalconDecoderLayer { fn load(vb: VarBuilder, cfg: &Config) -> Result<Self> { let mlp = FalconMlp::load(vb.pp("mlp"), cfg)?; let inp_layernorm = layer_norm( cfg.hidden_size, cfg.layer_norm_epsilon, vb.pp("input_layernorm"), )?; let self_attention = FalconAttention::load(vb.pp("self_attention"), cfg)?; let post_attention_layernorm = if cfg.parallel_attn { None } else { let ln = layer_norm( cfg.hidden_size, cfg.layer_norm_epsilon, vb.pp("post_attention_layernorm"), )?; Some(ln) }; Ok(Self { inp_layernorm, self_attention, post_attention_layernorm, mlp, parallel_attn: cfg.parallel_attn, }) } fn forward(&mut self, x: &Tensor, mask: Option<&Tensor>, past_kv_len: usize) -> Result<Tensor> { let residual = x.clone(); let ln_attn = self.inp_layernorm.forward(x)?; let attn_output = self.self_attention.forward(&ln_attn, mask, past_kv_len)?; let (residual, ln_mlp) = match &self.post_attention_layernorm { None => (residual, ln_attn), Some(pal) => { // This should include some dropout. let residual = (&attn_output + &residual)?; let ln_mlp = pal.forward(&residual)?; (residual, ln_mlp) } }; let mlp_output = self.mlp.forward(&ln_mlp)?; let mlp_output = if self.parallel_attn { (mlp_output + attn_output)? } else { mlp_output }; let output = (mlp_output + residual)?; Ok(output) } pub fn clear_kv_cache(&mut self) { self.self_attention.clear_kv_cache() } } #[derive(Debug, Clone)] pub struct Falcon { word_embeddings: Embedding, blocks: Vec<FalconDecoderLayer>, ln_f: LayerNorm, lm_head: Linear, config: Config, } fn make_causal_mask(t: usize) -> Result<Tensor> { let mask: Vec<_> = (0..t) .flat_map(|i| (0..t).map(move |j| u8::from(j > i))) .collect(); let mask = Tensor::from_slice(&mask, (t, t), &Device::Cpu)?; Ok(mask) } fn prepare_attn_mask(b_sz: usize, seq_len: usize) -> Result<Tensor> { // let mask = Tensor::ones((b_sz, seq_len), DType::U32, &Device::Cpu)?; let mask = make_causal_mask(seq_len)?; let mask = mask.broadcast_as((b_sz, 1, seq_len, seq_len))?; Ok(mask) } impl Falcon { pub fn config(&self) -> &Config { &self.config } pub fn load(vb: VarBuilder, cfg: Config) -> Result<Self> { let word_embeddings = embedding( cfg.vocab_size, cfg.hidden_size, vb.pp("transformer.word_embeddings"), )?; let blocks = (0..cfg.num_hidden_layers) .map(|i| FalconDecoderLayer::load(vb.pp(format!("transformer.h.{i}")), &cfg)) .collect::<Result<Vec<_>>>()?; let ln_f = layer_norm( cfg.hidden_size, cfg.layer_norm_epsilon, vb.pp("transformer.ln_f"), )?; let lm_head = linear(cfg.hidden_size, cfg.vocab_size, false, vb.pp("lm_head"))?; Ok(Self { word_embeddings, blocks, ln_f, lm_head, config: cfg, }) } pub fn forward(&mut self, input_ids: &Tensor) -> Result<Tensor> { let (b_sz, seq_len) = input_ids.dims2()?; let mut hidden_state = self.word_embeddings.forward(input_ids)?; let past_kv_len = match &self.blocks[0].self_attention.kv_cache { Some((k, _)) => k.dim(1)?, None => 0, }; let causal_mask = if seq_len <= 1 { None } else { Some(prepare_attn_mask(b_sz, seq_len)?.to_device(input_ids.device())?) }; for block in self.blocks.iter_mut() { hidden_state = block.forward(&hidden_state, causal_mask.as_ref(), past_kv_len)?; } let hidden_state = self.ln_f.forward(&hidden_state)?; let hidden_state = hidden_state.narrow(1, seq_len - 1, 1)?; let logits = self.lm_head.forward(&hidden_state)?.squeeze(1)?; Ok(logits) } pub fn clear_kv_cache(&mut self) { for block in self.blocks.iter_mut() { block.clear_kv_cache() } } }
candle/candle-transformers/src/models/falcon.rs/0
{ "file_path": "candle/candle-transformers/src/models/falcon.rs", "repo_id": "candle", "token_count": 8880 }
51
//! MixFormer (Microsoft's Phi Architecture) //! //! See "Textbooks Are All You Need II: phi-1.5 technical report", Lin et al. 2023 //! - [Arxiv](https://arxiv.org/abs/2309.05463) //! - [Github](https://huggingface.co/microsoft/phi-1_5) //! use crate::models::with_tracing::{linear, Embedding as E, Linear}; /// MixFormer model. /// https://huggingface.co/microsoft/phi-1_5 /// https://arxiv.org/abs/2309.05463 use candle::{DType, Device, IndexOp, Module, Result, Tensor, D}; use candle_nn::{Activation, VarBuilder}; use serde::Deserialize; const MAX_SEQ_LEN: usize = 4096; // https://huggingface.co/microsoft/phi-1_5/blob/d38e6f954ec29b96fe2cf033937dad64e279b5d9/configuration_mixformer_sequential.py #[derive(Debug, Clone, PartialEq, Deserialize)] pub struct Config { pub(crate) vocab_size: usize, pub(crate) n_positions: usize, pub(crate) n_embd: usize, pub(crate) n_layer: usize, pub(crate) n_inner: Option<usize>, pub(crate) n_head: usize, pub(crate) rotary_dim: usize, pub(crate) activation_function: Activation, pub(crate) layer_norm_epsilon: f64, pub(crate) tie_word_embeddings: bool, pub(crate) pad_vocab_size_multiple: usize, } impl Config { pub fn v1() -> Self { Self { vocab_size: 50304, n_positions: 2048, n_embd: 1024, n_layer: 20, n_inner: None, n_head: 16, rotary_dim: usize::min(32, 1024 / 16), activation_function: Activation::Gelu, layer_norm_epsilon: 1e-5, tie_word_embeddings: false, pad_vocab_size_multiple: 64, } } pub fn v1_5() -> Self { Self { vocab_size: 51200, n_positions: 2048, n_embd: 2048, n_layer: 24, n_inner: None, n_head: 32, rotary_dim: usize::min(32, 2048 / 32), activation_function: Activation::Gelu, layer_norm_epsilon: 1e-5, tie_word_embeddings: false, pad_vocab_size_multiple: 64, } } pub fn v2() -> Self { Self { vocab_size: 51200, n_positions: 2048, n_embd: 2560, n_layer: 32, n_inner: None, n_head: 32, rotary_dim: usize::min(32, 2560 / 32), activation_function: Activation::Gelu, layer_norm_epsilon: 1e-5, tie_word_embeddings: false, pad_vocab_size_multiple: 64, } } // https://huggingface.co/teknium/Puffin-Phi-v2/blob/main/config.json pub fn puffin_phi_v2() -> Self { Self { vocab_size: 50304, n_positions: 2048, n_embd: 2048, n_layer: 24, n_inner: None, n_head: 32, rotary_dim: usize::min(32, 2048 / 32), activation_function: Activation::Gelu, layer_norm_epsilon: 1e-5, tie_word_embeddings: false, pad_vocab_size_multiple: 64, } } // https://huggingface.co/teknium/Phi-Hermes-1.3B/blob/main/config.json pub fn phi_hermes_1_3b() -> Self { Self { vocab_size: 50304, n_positions: 2048, n_embd: 2048, n_layer: 24, n_inner: None, n_head: 32, rotary_dim: usize::min(32, 2048 / 32), activation_function: Activation::NewGelu, layer_norm_epsilon: 1e-5, tie_word_embeddings: false, pad_vocab_size_multiple: 64, } } } #[derive(Debug, Clone)] struct Embedding { wte: E, } impl Embedding { fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let wte = E::new(cfg.vocab_size, cfg.n_embd, vb.pp("wte"))?; Ok(Self { wte }) } } impl Module for Embedding { fn forward(&self, xs: &Tensor) -> Result<Tensor> { self.wte.forward(xs) } } fn get_mask(size: usize, dtype: DType, device: &Device) -> Result<Tensor> { let mask: Vec<_> = (0..size) .flat_map(|i| (0..size).map(move |j| if j > i { f32::NEG_INFINITY } else { 0. })) .collect(); Tensor::from_slice(&mask, (size, size), device)?.to_dtype(dtype) } #[derive(Debug, Clone)] struct RotaryEmbedding { sin: Tensor, cos: Tensor, } impl RotaryEmbedding { fn new(dim: usize, max_seq_len: usize, dtype: DType, dev: &Device) -> Result<Self> { let inv_freq: Vec<_> = (0..dim) .step_by(2) .map(|i| 1f32 / 10000f32.powf(i as f32 / dim as f32)) .collect(); let inv_freq_len = inv_freq.len(); let inv_freq = Tensor::from_vec(inv_freq, (1, inv_freq_len), dev)?; let t = Tensor::arange(0u32, max_seq_len as u32, dev)? .to_dtype(DType::F32)? .reshape((max_seq_len, 1))?; let freqs = t.matmul(&inv_freq)?; Ok(Self { sin: freqs.sin()?.to_dtype(dtype)?, cos: freqs.cos()?.to_dtype(dtype)?, }) } fn apply_rotary_emb_qkv( &self, qkv: &Tensor, seqlen_offset: usize, ) -> Result<(Tensor, Tensor, Tensor)> { let (_b_size, seqlen, three, _, _headdim) = qkv.dims5()?; if three != 3 { candle::bail!("unexpected shape for qkv {:?}", qkv.shape()) } let (_rotary_seqlen, rotary_dim) = self.cos.dims2()?; let rotary_dim = rotary_dim * 2; let q_rot = qkv.i((.., .., 0, .., ..rotary_dim))?.contiguous()?; let q_pass = qkv.i((.., .., 0, .., rotary_dim..))?; let k_rot = qkv.i((.., .., 1, .., ..rotary_dim))?.contiguous()?; let k_pass = qkv.i((.., .., 1, .., rotary_dim..))?; let c = self.cos.narrow(0, seqlen_offset, seqlen)?; let s = self.sin.narrow(0, seqlen_offset, seqlen)?; let q_rot = candle_nn::rotary_emb::rope_thd(&q_rot, &c, &s)?; let k_rot = candle_nn::rotary_emb::rope_thd(&k_rot, &c, &s)?; let q = Tensor::cat(&[&q_rot, &q_pass], D::Minus1)?; let k = Tensor::cat(&[&k_rot, &k_pass], D::Minus1)?; let v = qkv.i((.., .., 2))?; Ok((q, k, v)) } } #[derive(Debug, Clone)] #[allow(clippy::upper_case_acronyms)] struct MLP { fc1: Linear, fc2: Linear, act: Activation, span: tracing::Span, } impl MLP { fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let n_inner = cfg.n_inner.unwrap_or(4 * cfg.n_embd); let fc1 = linear(cfg.n_embd, n_inner, vb.pp("fc1"))?; let fc2 = linear(n_inner, cfg.n_embd, vb.pp("fc2"))?; Ok(Self { fc1, fc2, act: cfg.activation_function, span: tracing::span!(tracing::Level::TRACE, "mlp"), }) } } impl Module for MLP { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); xs.apply(&self.fc1)?.apply(&self.act)?.apply(&self.fc2) } } #[derive(Debug, Clone)] struct CausalLMHead { ln: candle_nn::LayerNorm, linear: Linear, } impl CausalLMHead { fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let ln = candle_nn::layer_norm(cfg.n_embd, cfg.layer_norm_epsilon, vb.pp("ln"))?; let linear = linear(cfg.n_embd, cfg.vocab_size, vb.pp("linear"))?; Ok(Self { ln, linear }) } } impl Module for CausalLMHead { fn forward(&self, xs: &Tensor) -> Result<Tensor> { xs.apply(&self.ln)? .apply(&self.linear)? .to_dtype(DType::F32) } } #[derive(Debug, Clone)] #[allow(clippy::upper_case_acronyms)] struct MHA { wqkv: Linear, out_proj: Linear, rotary_emb: RotaryEmbedding, kv_cache: Option<(Tensor, Tensor)>, head_dim: usize, softmax_scale: f64, span: tracing::Span, span_rope: tracing::Span, span_mask: tracing::Span, span_softmax: tracing::Span, } impl MHA { fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let head_dim = cfg.n_embd / cfg.n_head; let op_size = cfg.n_embd; let wqkv = linear(cfg.n_embd, 3 * op_size, vb.pp("Wqkv"))?; let out_proj = linear(op_size, cfg.n_embd, vb.pp("out_proj"))?; let rotary_emb = RotaryEmbedding::new(cfg.rotary_dim, MAX_SEQ_LEN, vb.dtype(), vb.device())?; let softmax_scale = 1f64 / (head_dim as f64).sqrt(); Ok(Self { wqkv, out_proj, head_dim, kv_cache: None, rotary_emb, softmax_scale, span: tracing::span!(tracing::Level::TRACE, "mha"), span_rope: tracing::span!(tracing::Level::TRACE, "rope"), span_mask: tracing::span!(tracing::Level::TRACE, "mask"), span_softmax: tracing::span!(tracing::Level::TRACE, "softmax"), }) } fn forward(&mut self, xs: &Tensor, mask: Option<&Tensor>) -> Result<Tensor> { let _enter = self.span.enter(); let (b_size, seq_len, _n_embd) = xs.dims3()?; let qkv = self .wqkv .forward(xs)? .reshape((b_size, seq_len, 3, (), self.head_dim))?; let seqlen_offset = match &self.kv_cache { None => 0, Some((prev_k, _)) => prev_k.dim(1)?, }; // In the python implementation, a single tensor is returned with the third axis of size 3. let (q, k, v) = { let _enter = self.span_rope.enter(); self.rotary_emb.apply_rotary_emb_qkv(&qkv, seqlen_offset)? }; let (k, v) = match &self.kv_cache { None => (k, v), Some((prev_k, prev_v)) => { let k = Tensor::cat(&[prev_k, &k], 1)?; let v = Tensor::cat(&[prev_v, &v], 1)?; (k, v) } }; self.kv_cache = Some((k.clone(), v.clone())); // scores = torch.einsum('bthd,bshd->bhts', q, k * softmax_scale) let q = q.transpose(1, 2)?.flatten_to(1)?; // b*h, t, d let k = k.transpose(1, 2)?.flatten_to(1)?; // b*h, s, d let v = v.transpose(1, 2)?.flatten_to(1)?; // b*h, s, d let attn_weights = (q.matmul(&k.t()?)? * self.softmax_scale)?; // b*h, t, s // causal_mask = torch.triu(torch.full((seqlen_q, seqlen_k), -10000.0, device=scores.device), 1) // scores = scores + causal_mask.to(dtype=scores.dtype) let attn_weights = match mask { None => attn_weights, Some(mask) => { let _enter = self.span_mask.enter(); attn_weights.broadcast_add(mask)? } }; let attn_weights = { let _enter = self.span_softmax.enter(); candle_nn::ops::softmax_last_dim(&attn_weights)? }; // output = torch.einsum('bhts,bshd->bthd', attention_drop, v) // attn_weights: b*h,t,s, v: b*h,s,d let attn_output = attn_weights.matmul(&v)?; // b*h,t,d let attn_output = attn_output .reshape((b_size, (), seq_len, self.head_dim))? .transpose(1, 2)? .flatten_from(D::Minus2)?; attn_output.apply(&self.out_proj) } fn clear_kv_cache(&mut self) { self.kv_cache = None } } #[derive(Debug, Clone)] struct ParallelBlock { ln: candle_nn::LayerNorm, mixer: MHA, mlp: MLP, span: tracing::Span, } impl ParallelBlock { fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let ln = candle_nn::layer_norm(cfg.n_embd, cfg.layer_norm_epsilon, vb.pp("ln"))?; let mixer = MHA::new(cfg, vb.pp("mixer"))?; let mlp = MLP::new(cfg, vb.pp("mlp"))?; Ok(Self { ln, mixer, mlp, span: tracing::span!(tracing::Level::TRACE, "block"), }) } fn forward(&mut self, xs: &Tensor, mask: Option<&Tensor>) -> Result<Tensor> { let _enter = self.span.enter(); let residual = xs; let xs = xs.apply(&self.ln)?; let attn_outputs = self.mixer.forward(&xs, mask)?; let feed_forward_hidden_states = self.mlp.forward(&xs)?; attn_outputs + feed_forward_hidden_states + residual } fn clear_kv_cache(&mut self) { self.mixer.clear_kv_cache() } } #[derive(Debug, Clone)] pub struct MixFormerSequentialForCausalLM { embedding: Embedding, blocks: Vec<ParallelBlock>, head: CausalLMHead, span: tracing::Span, } impl MixFormerSequentialForCausalLM { pub fn new_v2(cfg: &Config, vb: VarBuilder) -> Result<Self> { let vb_head = vb.pp("lm_head"); let vb = vb.pp("transformer"); let embedding = Embedding::new(cfg, vb.pp("embd"))?; let mut blocks = Vec::new(); for i in 0..cfg.n_layer { let block = ParallelBlock::new(cfg, vb.pp("h").pp(i))?; blocks.push(block) } let head = CausalLMHead::new(cfg, vb_head)?; Ok(Self { embedding, blocks, head, span: tracing::span!(tracing::Level::TRACE, "mixformer"), }) } pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let vb = vb.pp("layers"); let embedding = Embedding::new(cfg, vb.pp(0))?; let mut blocks = Vec::new(); for i in 0..cfg.n_layer { let block = ParallelBlock::new(cfg, vb.pp(i + 1))?; blocks.push(block) } let head = CausalLMHead::new(cfg, vb.pp(cfg.n_layer + 1))?; Ok(Self { embedding, blocks, head, span: tracing::span!(tracing::Level::TRACE, "mixformer"), }) } pub fn forward(&mut self, xs: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); let (_b_size, seq_len) = xs.dims2()?; let mut xs = xs.apply(&self.embedding)?; let mask = if seq_len <= 1 { None } else { Some(get_mask(seq_len, xs.dtype(), xs.device())?) }; for block in self.blocks.iter_mut() { xs = block.forward(&xs, mask.as_ref())? } xs.narrow(1, seq_len - 1, 1)?.apply(&self.head)?.squeeze(1) } pub fn forward_with_img( &mut self, bos_token: &Tensor, xs: &Tensor, img_embeds: &Tensor, ) -> Result<Tensor> { let _enter = self.span.enter(); let xs = xs.apply(&self.embedding)?; let bos_token = bos_token.apply(&self.embedding)?; // Python implementation sequence order is <bos token embedding><img embedding><rest of text embedding> // https://github.com/vikhyat/moondream/blob/a9d788a20d1543fb1479edc54106e88cff7759d3/moondream/moondream.py#L43-L56 let mut xs = Tensor::cat(&[bos_token, img_embeds.clone(), xs], 1)?; let (_b_size, seq_len, _embds) = xs.dims3()?; let mask = Some(get_mask(seq_len, xs.dtype(), xs.device())?); for block in self.blocks.iter_mut() { xs = block.forward(&xs, mask.as_ref())? } let xs = xs .narrow(1, seq_len - 1, 1)? .apply(&self.head)? .squeeze(1)?; Ok(xs) } pub fn clear_kv_cache(&mut self) { self.blocks.iter_mut().for_each(|b| b.clear_kv_cache()) } }
candle/candle-transformers/src/models/mixformer.rs/0
{ "file_path": "candle/candle-transformers/src/models/mixformer.rs", "repo_id": "candle", "token_count": 8060 }
52
use super::embedding::Model as EmbeddingModel; use crate::models::{ mistral::Config, with_tracing::{layer_norm, linear, linear_no_bias, LayerNorm, Linear}, }; use candle::{DType, Device, Result, Tensor, D}; use candle_nn::{ops::softmax_last_dim, LayerNormConfig, Module, VarBuilder}; // Geglu and feedforward from candle-transformers/src/models/stable_diffusion/attention.rs #[derive(Debug)] struct GeGlu { proj: Linear, span: tracing::Span, } impl GeGlu { fn new(vs: VarBuilder, dim_in: usize, dim_out: usize) -> Result<Self> { let proj = linear(dim_in, dim_out * 2, vs)?; let span = tracing::span!(tracing::Level::TRACE, "geglu"); Ok(Self { proj, span }) } } impl Module for GeGlu { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); let hidden_states_and_gate = self.proj.forward(xs)?.chunk(2, D::Minus1)?; &hidden_states_and_gate[0] * hidden_states_and_gate[1].gelu()? } } #[derive(Debug)] struct FeedForward { project_in: GeGlu, linear: Linear, span: tracing::Span, } impl FeedForward { fn new(vs: VarBuilder, dim: usize, dim_out: Option<usize>, mult: usize) -> Result<Self> { let inner_dim = dim * mult; let dim_out = dim_out.unwrap_or(dim); let vs = vs.pp("net"); let project_in = GeGlu::new(vs.pp("0"), dim, inner_dim)?; let linear = linear(inner_dim, dim_out, vs.pp("2"))?; let span = tracing::span!(tracing::Level::TRACE, "ff"); Ok(Self { project_in, linear, span, }) } } impl Module for FeedForward { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); let xs = self.project_in.forward(xs)?; self.linear.forward(&xs) } } // CrossAttention from candle-transformers/src/models/stable_diffusion/attention.rs #[derive(Debug)] struct CrossAttention { to_q: Linear, to_kv: Linear, to_out: Linear, heads: usize, scale: f64, span: tracing::Span, span_attn: tracing::Span, span_softmax: tracing::Span, } impl CrossAttention { fn new( vs: VarBuilder, query_dim: usize, context_dim: Option<usize>, heads: usize, dim_head: usize, ) -> Result<Self> { let inner_dim = dim_head * heads; let context_dim = context_dim.unwrap_or(query_dim); let scale = 1.0 / f64::sqrt(dim_head as f64); let to_q = linear_no_bias(query_dim, inner_dim, vs.pp("to_q"))?; let to_kv = linear_no_bias(context_dim, inner_dim * 2, vs.pp("to_kv"))?; let to_out = linear_no_bias(inner_dim, query_dim, vs.pp("to_out"))?; let span = tracing::span!(tracing::Level::TRACE, "xa"); let span_attn = tracing::span!(tracing::Level::TRACE, "xa-attn"); let span_softmax = tracing::span!(tracing::Level::TRACE, "xa-softmax"); Ok(Self { to_q, to_kv, to_out, heads, scale, span, span_attn, span_softmax, }) } fn reshape_heads_to_batch_dim(&self, xs: &Tensor) -> Result<Tensor> { let (batch_size, seq_len, dim) = xs.dims3()?; xs.reshape((batch_size, seq_len, self.heads, dim / self.heads))? .transpose(1, 2)? .reshape((batch_size * self.heads, seq_len, dim / self.heads)) } fn reshape_batch_dim_to_heads(&self, xs: &Tensor) -> Result<Tensor> { let (batch_size, seq_len, dim) = xs.dims3()?; xs.reshape((batch_size / self.heads, self.heads, seq_len, dim))? .transpose(1, 2)? .reshape((batch_size / self.heads, seq_len, dim * self.heads)) } fn attention(&self, query: &Tensor, key: &Tensor, value: &Tensor) -> Result<Tensor> { let _enter = self.span_attn.enter(); let in_dtype = query.dtype(); let query = query.to_dtype(DType::F32)?; let key = key.to_dtype(DType::F32)?; let value = value.to_dtype(DType::F32)?; let xs = query.matmul(&(key.t()? * self.scale)?)?; let xs = { let _enter = self.span_softmax.enter(); softmax_last_dim(&xs)? }; let xs = xs.matmul(&value)?.to_dtype(in_dtype)?; self.reshape_batch_dim_to_heads(&xs) } fn forward(&self, xs: &Tensor, context: Option<&Tensor>) -> Result<Tensor> { let _enter = self.span.enter(); let query = self.to_q.forward(xs)?; let context = context.unwrap_or(xs).contiguous()?; let kv_chunks = self .to_kv .forward(&context)? .chunk(2, context.shape().dims().len() - 1)?; let (key, value) = (kv_chunks[0].clone(), kv_chunks[1].clone()); let query = self.reshape_heads_to_batch_dim(&query)?; let key = self.reshape_heads_to_batch_dim(&key)?; let value = self.reshape_heads_to_batch_dim(&value)?; let xs = self.attention(&query, &key, &value)?; self.to_out.forward(&xs) } } #[derive(Debug)] pub struct Model { embedding_model: EmbeddingModel, cross_attn: CrossAttention, cross_attn_norm: LayerNorm, cross_attn_context_norm: LayerNorm, ff: FeedForward, ff_norm: LayerNorm, latents: Tensor, pub device: Device, pub dtype: DType, } impl Model { pub fn new(vb: VarBuilder) -> Result<Self> { // Embedding model let cfg = Config::config_7b_v0_1(false); let embedding_model = EmbeddingModel::new(&cfg, vb.pp("embedding_model"))?; // Latent attention let dim = 4096; let vb = vb.pp("latent_attention_model"); let latents = vb.get((512, dim), "latents")?; // Cross attend blocks let vb = vb.pp("cross_attend_blocks"); let cross_attn_norm = layer_norm(dim, LayerNormConfig::default(), vb.pp("0.norm"))?; let cross_attn_context_norm = layer_norm( dim, candle_nn::LayerNormConfig::default(), vb.pp("0.norm_context"), )?; let cross_attn = CrossAttention::new(vb.pp("0.fn"), dim, None, 8, 4096)?; let ff_norm = layer_norm(dim, LayerNormConfig::default(), vb.pp("1.norm"))?; let ff = FeedForward::new(vb.pp("1.fn"), dim, None, 4)?; Ok(Self { embedding_model, cross_attn, cross_attn_norm, cross_attn_context_norm, ff, ff_norm, latents, device: vb.device().clone(), dtype: vb.dtype(), }) } pub fn forward( &mut self, input_ids: &Tensor, attn_mask: &Tensor, pool_mask: &Tensor, ) -> Result<Tensor> { // Embedding model let hiddens = self .embedding_model .forward(attn_mask, input_ids, self.dtype)?; // Latent attention let b = hiddens.dims()[0]; let x = self.latents.unsqueeze(0)?.repeat((b, 1, 1))?; let original_hiddens = &hiddens; let hiddens = self.cross_attn_norm.forward(original_hiddens)?; let x = self.cross_attn_context_norm.forward(&x)?; let cross_hiddens = (self.cross_attn.forward(&hiddens, Some(&x))? + original_hiddens)?; let hiddens = self.ff_norm.forward(&cross_hiddens)?; let hiddens = (self.ff.forward(&hiddens)? + cross_hiddens)?; // Mean pooling let hiddens_masked = hiddens.broadcast_mul(&pool_mask.unsqueeze(D::Minus1)?)?; let s = hiddens_masked.sum(1)?; let d = pool_mask.sum_keepdim(1)?; s.broadcast_div(&d) } }
candle/candle-transformers/src/models/nvembed_v2/model.rs/0
{ "file_path": "candle/candle-transformers/src/models/nvembed_v2/model.rs", "repo_id": "candle", "token_count": 3730 }
53
//! Quantized llama model implementation. //! //! This provides a quantized implementation of the llama language model architecture. //! The model implements parameter efficient quantization for reduced memory usage //! while maintaining model quality. //! //! Key characteristics: //! - Transformer decoder architecture //! - Support for 2/3/4/8-bit quantization //! - Optimized memory usage through quantization //! - Configurable model sizes and parameter counts //! //! - 💻 [GH Link](https://github.com/facebookresearch/llama) //! - 📝 [Paper](https://arxiv.org/abs/2302.13971) //! //! ![](https://raw.githubusercontent.com/huggingface/candle/main/candle-examples/examples/quantized/assets/aoc.gif) //! use std::collections::HashMap; use crate::quantized_nn::RmsNorm; use candle::quantized::QTensor; use candle::quantized::{ggml_file, gguf_file}; use candle::{DType, Device, IndexOp, Result, Tensor}; use candle_nn::{Embedding, Module}; pub const MAX_SEQ_LEN: usize = 4096; // QMatMul wrapper adding some tracing. #[derive(Debug, Clone)] struct QMatMul { inner: candle::quantized::QMatMul, span: tracing::Span, } impl QMatMul { fn from_qtensor(qtensor: QTensor) -> Result<Self> { let inner = candle::quantized::QMatMul::from_qtensor(qtensor)?; let span = tracing::span!(tracing::Level::TRACE, "qmatmul"); Ok(Self { inner, span }) } fn forward(&self, xs: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); self.inner.forward(xs) } } #[derive(Debug, Clone)] struct Mlp { feed_forward_w1: QMatMul, feed_forward_w2: QMatMul, feed_forward_w3: QMatMul, } impl Module for Mlp { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let w1 = self.feed_forward_w1.forward(xs)?; let w3 = self.feed_forward_w3.forward(xs)?; self.feed_forward_w2 .forward(&(candle_nn::ops::silu(&w1)? * w3)?) } } #[derive(Debug, Clone)] enum MlpOrMoe { Mlp(Mlp), MoE { n_expert_used: usize, feed_forward_gate_inp: QMatMul, experts: Vec<Mlp>, }, } impl Module for MlpOrMoe { fn forward(&self, xs: &Tensor) -> Result<Tensor> { match self { Self::MoE { feed_forward_gate_inp, experts, n_expert_used, } => { let (b_size, seq_len, hidden_dim) = xs.dims3()?; let xs = xs.reshape(((), hidden_dim))?; let router_logits = feed_forward_gate_inp.forward(&xs)?; let routing_weights = candle_nn::ops::softmax_last_dim(&router_logits)?; // In order to extract topk, we extract the data from the tensor and manipulate it // directly. Maybe we will want to use some custom ops instead at some point. let routing_weights = routing_weights.to_dtype(DType::F32)?.to_vec2::<f32>()?; // routing_weights, selected_experts = torch.topk(routing_weights, self.top_k, dim=-1) // top_x contains the row indexes to evaluate for each expert. let mut top_x = vec![vec![]; experts.len()]; let mut selected_rws = vec![vec![]; experts.len()]; for (row_idx, rw) in routing_weights.iter().enumerate() { let mut dst = (0..rw.len() as u32).collect::<Vec<u32>>(); dst.sort_by(|&i, &j| rw[j as usize].total_cmp(&rw[i as usize])); let mut sum_routing_weights = 0f32; for &expert_idx in dst.iter().take(*n_expert_used) { let expert_idx = expert_idx as usize; let routing_weight = rw[expert_idx]; sum_routing_weights += routing_weight; top_x[expert_idx].push(row_idx as u32); } for &expert_idx in dst.iter().take(*n_expert_used) { let expert_idx = expert_idx as usize; let routing_weight = rw[expert_idx]; selected_rws[expert_idx].push(routing_weight / sum_routing_weights) } } // routing_weights /= routing_weights.sum(dim=-1, keepdim=True) // expert_mask = torch.nn.functional.one_hot(selected_experts, num_classes=self.num_experts).permute(2, 1, 0) let mut ys = xs.zeros_like()?; for (expert_idx, expert_layer) in experts.iter().enumerate() { let top_x = &top_x[expert_idx]; if top_x.is_empty() { continue; } let top_x = Tensor::new(top_x.as_slice(), xs.device())?; let selected_rws = Tensor::new(selected_rws[expert_idx].as_slice(), xs.device())? .reshape(((), 1))?; // Index the correct hidden states and compute the expert hidden state for // the current expert. We need to make sure to multiply the output hidden // states by `routing_weights` on the corresponding tokens (top-1 and top-2) let current_state = xs.index_select(&top_x, 0)?.reshape(((), hidden_dim))?; // current_hidden_states = expert_layer(current_state, routing_weights[top_x_list, idx_list, None]) let current_hidden_states = expert_layer.forward(&current_state)?; let current_hidden_states = current_hidden_states.broadcast_mul(&selected_rws)?; ys = ys.index_add(&top_x, &current_hidden_states, 0)?; } let ys = ys.reshape((b_size, seq_len, hidden_dim))?; Ok(ys) } Self::Mlp(mlp) => mlp.forward(xs), } } } #[derive(Debug, Clone)] struct LayerWeights { attention_wq: QMatMul, attention_wk: QMatMul, attention_wv: QMatMul, attention_wo: QMatMul, attention_norm: RmsNorm, mlp_or_moe: MlpOrMoe, ffn_norm: RmsNorm, n_head: usize, n_kv_head: usize, head_dim: usize, cos: Tensor, sin: Tensor, neg_inf: Tensor, kv_cache: Option<(Tensor, Tensor)>, span_attn: tracing::Span, span_rot: tracing::Span, span_mlp: tracing::Span, } fn masked_fill(on_false: &Tensor, mask: &Tensor, on_true: &Tensor) -> Result<Tensor> { let shape = mask.shape(); let m = mask.where_cond(&on_true.broadcast_as(shape.dims())?, on_false)?; Ok(m) } impl LayerWeights { fn apply_rotary_emb(&self, x: &Tensor, index_pos: usize) -> Result<Tensor> { let _enter = self.span_rot.enter(); let (_b_sz, _n_head, seq_len, _n_embd) = x.dims4()?; let cos = self.cos.narrow(0, index_pos, seq_len)?; let sin = self.sin.narrow(0, index_pos, seq_len)?; // The call to contiguous below is only necessary when processing the prompt. // When the seq_len is 1 in the inference loop, this is a no-op. candle_nn::rotary_emb::rope_i(&x.contiguous()?, &cos, &sin) } fn forward_attn( &mut self, x: &Tensor, mask: Option<&Tensor>, index_pos: usize, ) -> Result<Tensor> { let _enter = self.span_attn.enter(); let (b_sz, seq_len, n_embd) = x.dims3()?; let q = self.attention_wq.forward(x)?; let k = self.attention_wk.forward(x)?; let v = self.attention_wv.forward(x)?; let q = q .reshape((b_sz, seq_len, self.n_head, self.head_dim))? .transpose(1, 2)?; let k = k .reshape((b_sz, seq_len, self.n_kv_head, self.head_dim))? .transpose(1, 2)?; let v = v .reshape((b_sz, seq_len, self.n_kv_head, self.head_dim))? .transpose(1, 2)? // This call to contiguous ensures that the fast kernel can be called below. It's // actually a no-op except when processing the initial prompt so has no significant // impact on performance. .contiguous()?; let q = self.apply_rotary_emb(&q, index_pos)?; let k = self.apply_rotary_emb(&k, index_pos)?; let (k, v) = match &self.kv_cache { None => (k, v), Some((k_cache, v_cache)) => { if index_pos == 0 { (k, v) } else { let k = Tensor::cat(&[k_cache, &k], 2)?; let v = Tensor::cat(&[v_cache, &v], 2)?; (k, v) } } }; self.kv_cache = Some((k.clone(), v.clone())); let y = if q.device().is_metal() && seq_len == 1 { // SDPA will do MQA for us candle_nn::ops::sdpa(&q, &k, &v, 1. / (self.head_dim as f32).sqrt(), 1.)? } else { // Support for MQA, useful for 70B models and mistral. let k = crate::utils::repeat_kv(k, self.n_head / self.n_kv_head)?; let v = crate::utils::repeat_kv(v, self.n_head / self.n_kv_head)?; let att = (q.matmul(&k.t()?)? / (self.head_dim as f64).sqrt())?; let att = match mask { None => att, Some(mask) => { let mask = mask.broadcast_as(att.shape())?; masked_fill(&att, &mask, &self.neg_inf)? } }; let att = candle_nn::ops::softmax_last_dim(&att)?; // Convert to contiguous as matmul doesn't support strided vs for now. att.matmul(&v.contiguous()?)? }; let y = y.transpose(1, 2)?.reshape(&[b_sz, seq_len, n_embd])?; let y = self.attention_wo.forward(&y)?; Ok(y) } } #[derive(Debug, Clone)] pub struct ModelWeights { tok_embeddings: Embedding, layers: Vec<LayerWeights>, norm: RmsNorm, output: QMatMul, masks: HashMap<usize, Tensor>, span: tracing::Span, span_output: tracing::Span, } fn precomput_freqs_cis( head_dim: usize, freq_base: f32, device: &Device, ) -> Result<(Tensor, Tensor)> { let theta: Vec<_> = (0..head_dim) .step_by(2) .map(|i| 1f32 / freq_base.powf(i as f32 / head_dim as f32)) .collect(); let theta = Tensor::new(theta.as_slice(), device)?; let idx_theta = Tensor::arange(0, MAX_SEQ_LEN as u32, device)? .to_dtype(DType::F32)? .reshape((MAX_SEQ_LEN, 1))? .matmul(&theta.reshape((1, theta.elem_count()))?)?; let cos = idx_theta.cos()?; let sin = idx_theta.sin()?; Ok((cos, sin)) } impl ModelWeights { pub fn from_ggml(mut ct: ggml_file::Content, gqa: usize) -> Result<Self> { let head_dim = (ct.hparams.n_embd / ct.hparams.n_head) as usize; let (cos, sin) = precomput_freqs_cis(head_dim, 10000., &ct.device)?; let neg_inf = Tensor::new(f32::NEG_INFINITY, &ct.device)?; let tok_embeddings = ct.remove("tok_embeddings.weight")?; let tok_embeddings = tok_embeddings.dequantize(&ct.device)?; let norm = RmsNorm::from_qtensor(ct.remove("norm.weight")?, 1e-5)?; let output = ct.remove("output.weight")?; let mut layers = Vec::with_capacity(ct.hparams.n_layer as usize); for layer_idx in 0..ct.hparams.n_layer { let prefix = format!("layers.{layer_idx}"); let attention_wq = ct.remove(&format!("{prefix}.attention.wq.weight"))?; let attention_wk = ct.remove(&format!("{prefix}.attention.wk.weight"))?; let attention_wv = ct.remove(&format!("{prefix}.attention.wv.weight"))?; let attention_wo = ct.remove(&format!("{prefix}.attention.wo.weight"))?; let mlp_or_moe = { let feed_forward_w1 = ct.remove(&format!("{prefix}.feed_forward.w1.weight"))?; let feed_forward_w2 = ct.remove(&format!("{prefix}.feed_forward.w2.weight"))?; let feed_forward_w3 = ct.remove(&format!("{prefix}.feed_forward.w3.weight"))?; MlpOrMoe::Mlp(Mlp { feed_forward_w1: QMatMul::from_qtensor(feed_forward_w1)?, feed_forward_w2: QMatMul::from_qtensor(feed_forward_w2)?, feed_forward_w3: QMatMul::from_qtensor(feed_forward_w3)?, }) }; let attention_norm = ct.remove(&format!("{prefix}.attention_norm.weight"))?; let ffn_norm = ct.remove(&format!("{prefix}.ffn_norm.weight"))?; let span_attn = tracing::span!(tracing::Level::TRACE, "attn"); let span_rot = tracing::span!(tracing::Level::TRACE, "attn-rot"); let span_mlp = tracing::span!(tracing::Level::TRACE, "attn-mlp"); layers.push(LayerWeights { attention_wq: QMatMul::from_qtensor(attention_wq)?, attention_wk: QMatMul::from_qtensor(attention_wk)?, attention_wv: QMatMul::from_qtensor(attention_wv)?, attention_wo: QMatMul::from_qtensor(attention_wo)?, attention_norm: RmsNorm::from_qtensor(attention_norm, 1e-5)?, mlp_or_moe, ffn_norm: RmsNorm::from_qtensor(ffn_norm, 1e-5)?, n_head: ct.hparams.n_head as usize, n_kv_head: ct.hparams.n_head as usize / gqa, head_dim: (ct.hparams.n_embd / ct.hparams.n_head) as usize, cos: cos.clone(), sin: sin.clone(), neg_inf: neg_inf.clone(), kv_cache: None, span_attn, span_rot, span_mlp, }) } let span = tracing::span!(tracing::Level::TRACE, "model"); let span_output = tracing::span!(tracing::Level::TRACE, "output"); Ok(Self { tok_embeddings: Embedding::new(tok_embeddings, ct.hparams.n_embd as usize), layers, norm, output: QMatMul::from_qtensor(output)?, masks: HashMap::new(), span, span_output, }) } pub fn from_gguf<R: std::io::Seek + std::io::Read>( ct: gguf_file::Content, reader: &mut R, device: &Device, ) -> Result<Self> { let md_get = |s: &str| match ct.metadata.get(s) { None => candle::bail!("cannot find {s} in metadata"), Some(v) => Ok(v), }; // Parameter extraction from metadata. let n_expert = md_get("llama.expert_count") .and_then(|v| v.to_u32()) .unwrap_or(0) as usize; let n_expert_used = md_get("llama.expert_used_count") .and_then(|v| v.to_u32()) .unwrap_or(0) as usize; let head_count = md_get("llama.attention.head_count")?.to_u32()? as usize; let head_count_kv = md_get("llama.attention.head_count_kv")?.to_u32()? as usize; let block_count = md_get("llama.block_count")?.to_u32()? as usize; let embedding_length = md_get("llama.embedding_length")?.to_u32()? as usize; let rope_dim = md_get("llama.rope.dimension_count")?.to_u32()? as usize; // Strangely this value is generally 1e-6 in GGUF file but used to be 1e-5 by default. let rms_norm_eps = md_get("llama.attention.layer_norm_rms_epsilon")?.to_f32()? as f64; let rope_freq_base = md_get("llama.rope.freq_base") .and_then(|m| m.to_f32()) .unwrap_or(10000f32); let (cos, sin) = precomput_freqs_cis(rope_dim, rope_freq_base, device)?; let neg_inf = Tensor::new(f32::NEG_INFINITY, device)?; let tok_embeddings_q = ct.tensor(reader, "token_embd.weight", device)?; let tok_embeddings = tok_embeddings_q.dequantize(device)?; let norm = RmsNorm::from_qtensor( ct.tensor(reader, "output_norm.weight", device)?, rms_norm_eps, )?; let output = match ct.tensor(reader, "output.weight", device) { Ok(tensor) => tensor, Err(_) => tok_embeddings_q, }; let mut layers = Vec::with_capacity(block_count); for layer_idx in 0..block_count { let prefix = format!("blk.{layer_idx}"); let attention_wq = ct.tensor(reader, &format!("{prefix}.attn_q.weight"), device)?; let attention_wk = ct.tensor(reader, &format!("{prefix}.attn_k.weight"), device)?; let attention_wv = ct.tensor(reader, &format!("{prefix}.attn_v.weight"), device)?; let attention_wo = ct.tensor(reader, &format!("{prefix}.attn_output.weight"), device)?; let mlp_or_moe = if n_expert <= 1 { let feed_forward_w1 = ct.tensor(reader, &format!("{prefix}.ffn_gate.weight"), device)?; let feed_forward_w2 = ct.tensor(reader, &format!("{prefix}.ffn_down.weight"), device)?; let feed_forward_w3 = ct.tensor(reader, &format!("{prefix}.ffn_up.weight"), device)?; MlpOrMoe::Mlp(Mlp { feed_forward_w1: QMatMul::from_qtensor(feed_forward_w1)?, feed_forward_w2: QMatMul::from_qtensor(feed_forward_w2)?, feed_forward_w3: QMatMul::from_qtensor(feed_forward_w3)?, }) } else { let feed_forward_gate_inp = ct.tensor(reader, &format!("{prefix}.ffn_gate_inp.weight"), device)?; let mut experts = Vec::with_capacity(n_expert); for i in 0..n_expert { let feed_forward_w1 = ct.tensor(reader, &format!("{prefix}.ffn_gate.{i}.weight"), device)?; let feed_forward_w2 = ct.tensor(reader, &format!("{prefix}.ffn_down.{i}.weight"), device)?; let feed_forward_w3 = ct.tensor(reader, &format!("{prefix}.ffn_up.{i}.weight"), device)?; experts.push(Mlp { feed_forward_w1: QMatMul::from_qtensor(feed_forward_w1)?, feed_forward_w2: QMatMul::from_qtensor(feed_forward_w2)?, feed_forward_w3: QMatMul::from_qtensor(feed_forward_w3)?, }) } MlpOrMoe::MoE { n_expert_used, feed_forward_gate_inp: QMatMul::from_qtensor(feed_forward_gate_inp)?, experts, } }; let attention_norm = ct.tensor(reader, &format!("{prefix}.attn_norm.weight"), device)?; let ffn_norm = ct.tensor(reader, &format!("{prefix}.ffn_norm.weight"), device)?; let span_attn = tracing::span!(tracing::Level::TRACE, "attn"); let span_rot = tracing::span!(tracing::Level::TRACE, "attn-rot"); let span_mlp = tracing::span!(tracing::Level::TRACE, "attn-mlp"); layers.push(LayerWeights { attention_wq: QMatMul::from_qtensor(attention_wq)?, attention_wk: QMatMul::from_qtensor(attention_wk)?, attention_wv: QMatMul::from_qtensor(attention_wv)?, attention_wo: QMatMul::from_qtensor(attention_wo)?, attention_norm: RmsNorm::from_qtensor(attention_norm, rms_norm_eps)?, mlp_or_moe, ffn_norm: RmsNorm::from_qtensor(ffn_norm, rms_norm_eps)?, n_head: head_count, n_kv_head: head_count_kv, head_dim: embedding_length / head_count, cos: cos.clone(), sin: sin.clone(), neg_inf: neg_inf.clone(), kv_cache: None, span_attn, span_rot, span_mlp, }) } let span = tracing::span!(tracing::Level::TRACE, "model"); let span_output = tracing::span!(tracing::Level::TRACE, "output"); Ok(Self { tok_embeddings: Embedding::new(tok_embeddings, embedding_length), layers, norm, output: QMatMul::from_qtensor(output)?, masks: HashMap::new(), span, span_output, }) } fn mask(&mut self, t: usize, device: &Device) -> Result<Tensor> { if let Some(mask) = self.masks.get(&t) { Ok(mask.clone()) } else { let mask: Vec<_> = (0..t) .flat_map(|i| (0..t).map(move |j| u8::from(j > i))) .collect(); let mask = Tensor::from_slice(&mask, (t, t), device)?; self.masks.insert(t, mask.clone()); Ok(mask) } } pub fn forward(&mut self, x: &Tensor, index_pos: usize) -> Result<Tensor> { let (_b_sz, seq_len) = x.dims2()?; let mask = if seq_len == 1 { None } else { Some(self.mask(seq_len, x.device())?) }; let _enter = self.span.enter(); let mut layer_in = self.tok_embeddings.forward(x)?; for layer in self.layers.iter_mut() { let x = layer_in; let residual = &x; let x = layer.attention_norm.forward(&x)?; let attn = layer.forward_attn(&x, mask.as_ref(), index_pos)?; let x = (attn + residual)?; // MLP let _enter = layer.span_mlp.enter(); let residual = &x; let x = layer.ffn_norm.forward(&x)?; let x = layer.mlp_or_moe.forward(&x)?; let x = (x + residual)?; layer_in = x } let x = self.norm.forward(&layer_in)?; let x = x.i((.., seq_len - 1, ..))?; let _enter = self.span_output.enter(); self.output.forward(&x) } }
candle/candle-transformers/src/models/quantized_llama.rs/0
{ "file_path": "candle/candle-transformers/src/models/quantized_llama.rs", "repo_id": "candle", "token_count": 11486 }
54
//! Qwen2 model implementation with quantization support. //! //! Qwen2 is a large language model from Alibaba optimized for efficiency. //! This implementation provides quantization for reduced memory and compute. //! //! Key characteristics: //! - Streaming decode support //! - Grouped query attention (GQA) //! - RMSNorm for layer normalization //! - Rotary positional embeddings (RoPE) //! - Support for 8-bit quantization //! //! References: //! - 🤗 [Qwen2 Model](https://huggingface.co/Qwen/Qwen2-7B) //! use crate::models::with_tracing::{linear, linear_no_bias, Linear, RmsNorm}; use candle::{DType, Device, IndexOp, Module, Result, Tensor, D}; use candle_nn::{Activation, VarBuilder}; use std::sync::Arc; #[derive(Debug, Clone, PartialEq, serde::Deserialize)] pub struct Config { pub vocab_size: usize, pub hidden_size: usize, pub intermediate_size: usize, pub num_hidden_layers: usize, pub num_attention_heads: usize, pub num_key_value_heads: usize, pub max_position_embeddings: usize, pub sliding_window: usize, pub max_window_layers: usize, pub tie_word_embeddings: bool, pub rope_theta: f64, pub rms_norm_eps: f64, pub use_sliding_window: bool, pub hidden_act: Activation, } #[derive(Debug, Clone)] struct RotaryEmbedding { sin: Tensor, cos: Tensor, } impl RotaryEmbedding { fn new(dtype: DType, cfg: &Config, dev: &Device) -> Result<Self> { let dim = cfg.hidden_size / cfg.num_attention_heads; let max_seq_len = cfg.max_position_embeddings; let inv_freq: Vec<_> = (0..dim) .step_by(2) .map(|i| 1f32 / cfg.rope_theta.powf(i as f64 / dim as f64) as f32) .collect(); let inv_freq_len = inv_freq.len(); let inv_freq = Tensor::from_vec(inv_freq, (1, inv_freq_len), dev)?.to_dtype(dtype)?; let t = Tensor::arange(0u32, max_seq_len as u32, dev)? .to_dtype(dtype)? .reshape((max_seq_len, 1))?; let freqs = t.matmul(&inv_freq)?; Ok(Self { sin: freqs.sin()?, cos: freqs.cos()?, }) } fn apply_rotary_emb_qkv( &self, q: &Tensor, k: &Tensor, seqlen_offset: usize, ) -> Result<(Tensor, Tensor)> { let (_b_sz, _h, seq_len, _n_embd) = q.dims4()?; let cos = self.cos.narrow(0, seqlen_offset, seq_len)?; let sin = self.sin.narrow(0, seqlen_offset, seq_len)?; let q_embed = candle_nn::rotary_emb::rope(&q.contiguous()?, &cos, &sin)?; let k_embed = candle_nn::rotary_emb::rope(&k.contiguous()?, &cos, &sin)?; Ok((q_embed, k_embed)) } } #[derive(Debug, Clone)] #[allow(clippy::upper_case_acronyms)] struct MLP { gate_proj: Linear, up_proj: Linear, down_proj: Linear, act_fn: Activation, } impl MLP { fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let hidden_sz = cfg.hidden_size; let intermediate_sz = cfg.intermediate_size; let gate_proj = linear_no_bias(hidden_sz, intermediate_sz, vb.pp("gate_proj"))?; let up_proj = linear_no_bias(hidden_sz, intermediate_sz, vb.pp("up_proj"))?; let down_proj = linear_no_bias(intermediate_sz, hidden_sz, vb.pp("down_proj"))?; Ok(Self { gate_proj, up_proj, down_proj, act_fn: cfg.hidden_act, }) } } impl Module for MLP { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let lhs = xs.apply(&self.gate_proj)?.apply(&self.act_fn)?; let rhs = xs.apply(&self.up_proj)?; (lhs * rhs)?.apply(&self.down_proj) } } #[derive(Debug, Clone)] struct Attention { q_proj: Linear, k_proj: Linear, v_proj: Linear, o_proj: Linear, num_heads: usize, num_kv_heads: usize, num_kv_groups: usize, head_dim: usize, hidden_size: usize, rotary_emb: Arc<RotaryEmbedding>, kv_cache: Option<(Tensor, Tensor)>, } impl Attention { fn new(rotary_emb: Arc<RotaryEmbedding>, cfg: &Config, vb: VarBuilder) -> Result<Self> { let hidden_sz = cfg.hidden_size; let num_heads = cfg.num_attention_heads; let num_kv_heads = cfg.num_key_value_heads; let num_kv_groups = num_heads / num_kv_heads; let head_dim = hidden_sz / num_heads; let q_proj = linear(hidden_sz, num_heads * head_dim, vb.pp("q_proj"))?; let k_proj = linear(hidden_sz, num_kv_heads * head_dim, vb.pp("k_proj"))?; let v_proj = linear(hidden_sz, num_kv_heads * head_dim, vb.pp("v_proj"))?; let o_proj = linear_no_bias(num_heads * head_dim, hidden_sz, vb.pp("o_proj"))?; Ok(Self { q_proj, k_proj, v_proj, o_proj, num_heads, num_kv_heads, num_kv_groups, head_dim, hidden_size: hidden_sz, rotary_emb, kv_cache: None, }) } fn forward( &mut self, xs: &Tensor, attention_mask: Option<&Tensor>, seqlen_offset: usize, ) -> Result<Tensor> { let (b_sz, q_len, _) = xs.dims3()?; let query_states = self.q_proj.forward(xs)?; let key_states = self.k_proj.forward(xs)?; let value_states = self.v_proj.forward(xs)?; let query_states = query_states .reshape((b_sz, q_len, self.num_heads, self.head_dim))? .transpose(1, 2)?; let key_states = key_states .reshape((b_sz, q_len, self.num_kv_heads, self.head_dim))? .transpose(1, 2)?; let value_states = value_states .reshape((b_sz, q_len, self.num_kv_heads, self.head_dim))? .transpose(1, 2)?; let (query_states, key_states) = self.rotary_emb .apply_rotary_emb_qkv(&query_states, &key_states, seqlen_offset)?; let (key_states, value_states) = match &self.kv_cache { None => (key_states, value_states), Some((prev_k, prev_v)) => { let key_states = Tensor::cat(&[prev_k, &key_states], 2)?; let value_states = Tensor::cat(&[prev_v, &value_states], 2)?; (key_states, value_states) } }; self.kv_cache = Some((key_states.clone(), value_states.clone())); let key_states = crate::utils::repeat_kv(key_states, self.num_kv_groups)?.contiguous()?; let value_states = crate::utils::repeat_kv(value_states, self.num_kv_groups)?.contiguous()?; let attn_output = { let scale = 1f64 / f64::sqrt(self.head_dim as f64); let attn_weights = (query_states.matmul(&key_states.transpose(2, 3)?)? * scale)?; let attn_weights = match attention_mask { None => attn_weights, Some(mask) => attn_weights.broadcast_add(mask)?, }; let attn_weights = candle_nn::ops::softmax_last_dim(&attn_weights)?; attn_weights.matmul(&value_states)? }; attn_output .transpose(1, 2)? .reshape((b_sz, q_len, self.hidden_size))? .apply(&self.o_proj) } fn clear_kv_cache(&mut self) { self.kv_cache = None } } #[derive(Debug, Clone)] struct DecoderLayer { self_attn: Attention, mlp: MLP, input_layernorm: RmsNorm, post_attention_layernorm: RmsNorm, } impl DecoderLayer { fn new(rotary_emb: Arc<RotaryEmbedding>, cfg: &Config, vb: VarBuilder) -> Result<Self> { let self_attn = Attention::new(rotary_emb, cfg, vb.pp("self_attn"))?; let mlp = MLP::new(cfg, vb.pp("mlp"))?; let input_layernorm = RmsNorm::new(cfg.hidden_size, cfg.rms_norm_eps, vb.pp("input_layernorm"))?; let post_attention_layernorm = RmsNorm::new( cfg.hidden_size, cfg.rms_norm_eps, vb.pp("post_attention_layernorm"), )?; Ok(Self { self_attn, mlp, input_layernorm, post_attention_layernorm, }) } fn forward( &mut self, xs: &Tensor, attention_mask: Option<&Tensor>, seqlen_offset: usize, ) -> Result<Tensor> { let residual = xs; let xs = self.input_layernorm.forward(xs)?; let xs = self.self_attn.forward(&xs, attention_mask, seqlen_offset)?; let xs = (xs + residual)?; let residual = &xs; let xs = xs.apply(&self.post_attention_layernorm)?.apply(&self.mlp)?; residual + xs } fn clear_kv_cache(&mut self) { self.self_attn.clear_kv_cache() } } #[derive(Debug, Clone)] pub struct Model { embed_tokens: candle_nn::Embedding, layers: Vec<DecoderLayer>, norm: RmsNorm, sliding_window: usize, device: Device, dtype: DType, } impl Model { pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let vb_m = vb.pp("model"); let embed_tokens = candle_nn::embedding(cfg.vocab_size, cfg.hidden_size, vb_m.pp("embed_tokens"))?; let rotary_emb = Arc::new(RotaryEmbedding::new(vb.dtype(), cfg, vb_m.device())?); let mut layers = Vec::with_capacity(cfg.num_hidden_layers); let vb_l = vb_m.pp("layers"); for layer_idx in 0..cfg.num_hidden_layers { let layer = DecoderLayer::new(rotary_emb.clone(), cfg, vb_l.pp(layer_idx))?; layers.push(layer) } let norm = RmsNorm::new(cfg.hidden_size, cfg.rms_norm_eps, vb_m.pp("norm"))?; Ok(Self { embed_tokens, layers, norm, sliding_window: cfg.sliding_window, device: vb.device().clone(), dtype: vb.dtype(), }) } fn prepare_causal_attention_mask( &self, b_size: usize, tgt_len: usize, seqlen_offset: usize, ) -> Result<Tensor> { // Sliding window mask? let mask: Vec<_> = (0..tgt_len) .flat_map(|i| { (0..tgt_len).map(move |j| { if i < j || j + self.sliding_window < i { f32::NEG_INFINITY } else { 0. } }) }) .collect(); let mask = Tensor::from_slice(&mask, (tgt_len, tgt_len), &self.device)?; let mask = if seqlen_offset > 0 { let mask0 = Tensor::zeros((tgt_len, seqlen_offset), self.dtype, &self.device)?; Tensor::cat(&[&mask0, &mask], D::Minus1)? } else { mask }; mask.expand((b_size, 1, tgt_len, tgt_len + seqlen_offset))? .to_dtype(self.dtype) } fn prepare_attention_mask(&self, attn_mask: &Tensor) -> Result<Tensor> { let (b_sz, sql_len) = attn_mask.dims2()?; let mut mask: Vec<Tensor> = vec![]; for b in 0..b_sz { mask.push(attn_mask.i((b, ..))?.expand((1, 1, sql_len, sql_len))?); } let mask = Tensor::cat(&mask, 0)?; let on_true = mask.zeros_like()?.to_dtype(self.dtype)?; let on_false = Tensor::new(f32::NEG_INFINITY, &self.device)? .broadcast_as(mask.shape())? .to_dtype(self.dtype)?; mask.where_cond(&on_true, &on_false) } pub fn forward( &mut self, input_ids: &Tensor, seqlen_offset: usize, attn_mask: Option<&Tensor>, ) -> Result<Tensor> { let (b_size, seq_len) = input_ids.dims2()?; let attention_mask: Option<Tensor> = match attn_mask { Some(mask) => Some(self.prepare_attention_mask(mask)?), None => { if seq_len <= 1 { None } else { Some(self.prepare_causal_attention_mask(b_size, seq_len, seqlen_offset)?) } } }; let mut xs = self.embed_tokens.forward(input_ids)?; for layer in self.layers.iter_mut() { xs = layer.forward(&xs, attention_mask.as_ref(), seqlen_offset)? } xs.apply(&self.norm) } pub fn clear_kv_cache(&mut self) { for layer in self.layers.iter_mut() { layer.clear_kv_cache() } } } #[derive(Debug, Clone)] pub struct ModelForCausalLM { base_model: Model, lm_head: Linear, } impl ModelForCausalLM { pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let base_model = Model::new(cfg, vb.clone())?; let lm_head = if vb.contains_tensor("lm_head.weight") { linear_no_bias(cfg.hidden_size, cfg.vocab_size, vb.pp("lm_head"))? } else { Linear::from_weights(base_model.embed_tokens.embeddings().clone(), None) }; Ok(Self { base_model, lm_head, }) } pub fn forward(&mut self, input_ids: &Tensor, seqlen_offset: usize) -> Result<Tensor> { let (_b_size, seq_len) = input_ids.dims2()?; self.base_model .forward(input_ids, seqlen_offset, None)? .narrow(1, seq_len - 1, 1)? .apply(&self.lm_head) } pub fn clear_kv_cache(&mut self) { self.base_model.clear_kv_cache() } }
candle/candle-transformers/src/models/qwen2.rs/0
{ "file_path": "candle/candle-transformers/src/models/qwen2.rs", "repo_id": "candle", "token_count": 6864 }
55
use candle::{Result, Tensor}; use candle_nn::{layer_norm, LayerNorm, Linear, Module, VarBuilder}; #[derive(Debug)] struct Attention { q_proj: Linear, k_proj: Linear, v_proj: Linear, out_proj: Linear, num_heads: usize, } impl Attention { fn new( embedding_dim: usize, num_heads: usize, downsample_rate: usize, vb: VarBuilder, ) -> Result<Self> { let internal_dim = embedding_dim / downsample_rate; let q_proj = candle_nn::linear(embedding_dim, internal_dim, vb.pp("q_proj"))?; let k_proj = candle_nn::linear(embedding_dim, internal_dim, vb.pp("k_proj"))?; let v_proj = candle_nn::linear(embedding_dim, internal_dim, vb.pp("v_proj"))?; let out_proj = candle_nn::linear(internal_dim, embedding_dim, vb.pp("out_proj"))?; Ok(Self { q_proj, k_proj, v_proj, out_proj, num_heads, }) } fn separate_heads(&self, x: &Tensor) -> Result<Tensor> { let (b, n, c) = x.dims3()?; x.reshape((b, n, self.num_heads, c / self.num_heads))? .transpose(1, 2)? .contiguous() } fn recombine_heads(&self, x: &Tensor) -> Result<Tensor> { let (b, n_heads, n_tokens, c_per_head) = x.dims4()?; x.transpose(1, 2)? .reshape((b, n_tokens, n_heads * c_per_head)) } fn forward(&self, q: &Tensor, k: &Tensor, v: &Tensor) -> Result<Tensor> { let q = self.q_proj.forward(&q.contiguous()?)?; let k = self.k_proj.forward(&k.contiguous()?)?; let v = self.v_proj.forward(&v.contiguous()?)?; let q = self.separate_heads(&q)?; let k = self.separate_heads(&k)?; let v = self.separate_heads(&v)?; let (_, _, _, c_per_head) = q.dims4()?; let attn = (q.matmul(&k.t()?)? / (c_per_head as f64).sqrt())?; let attn = candle_nn::ops::softmax_last_dim(&attn)?; let out = attn.matmul(&v)?; self.recombine_heads(&out)?.apply(&self.out_proj) } } #[derive(Debug)] struct TwoWayAttentionBlock { self_attn: Attention, norm1: LayerNorm, cross_attn_token_to_image: Attention, norm2: LayerNorm, mlp: super::MlpBlock, norm3: LayerNorm, norm4: LayerNorm, cross_attn_image_to_token: Attention, skip_first_layer_pe: bool, } impl TwoWayAttentionBlock { fn new( embedding_dim: usize, num_heads: usize, mlp_dim: usize, skip_first_layer_pe: bool, vb: VarBuilder, ) -> Result<Self> { let norm1 = layer_norm(embedding_dim, 1e-5, vb.pp("norm1"))?; let norm2 = layer_norm(embedding_dim, 1e-5, vb.pp("norm2"))?; let norm3 = layer_norm(embedding_dim, 1e-5, vb.pp("norm3"))?; let norm4 = layer_norm(embedding_dim, 1e-5, vb.pp("norm4"))?; let self_attn = Attention::new(embedding_dim, num_heads, 1, vb.pp("self_attn"))?; let cross_attn_token_to_image = Attention::new( embedding_dim, num_heads, 2, vb.pp("cross_attn_token_to_image"), )?; let cross_attn_image_to_token = Attention::new( embedding_dim, num_heads, 2, vb.pp("cross_attn_image_to_token"), )?; let mlp = super::MlpBlock::new( embedding_dim, mlp_dim, candle_nn::Activation::Relu, vb.pp("mlp"), )?; Ok(Self { self_attn, norm1, cross_attn_image_to_token, norm2, mlp, norm3, norm4, cross_attn_token_to_image, skip_first_layer_pe, }) } fn forward( &self, queries: &Tensor, keys: &Tensor, query_pe: &Tensor, key_pe: &Tensor, ) -> Result<(Tensor, Tensor)> { // Self attention block let queries = if self.skip_first_layer_pe { self.self_attn.forward(queries, queries, queries)? } else { let q = (queries + query_pe)?; let attn_out = self.self_attn.forward(&q, &q, queries)?; (queries + attn_out)? }; let queries = self.norm1.forward(&queries)?; // Cross attention block, tokens attending to image embedding let q = (&queries + query_pe)?; let k = (keys + key_pe)?; let attn_out = self.cross_attn_token_to_image.forward(&q, &k, keys)?; let queries = (&queries + attn_out)?; let queries = self.norm2.forward(&queries)?; // MLP block let mlp_out = self.mlp.forward(&queries); let queries = (queries + mlp_out)?; let queries = self.norm3.forward(&queries)?; // Cross attention block, image embedding attending to tokens let q = (&queries + query_pe)?; let k = (keys + key_pe)?; let attn_out = self.cross_attn_image_to_token.forward(&k, &q, &queries)?; let keys = (keys + attn_out)?; let keys = self.norm4.forward(&keys)?; Ok((queries, keys)) } } #[derive(Debug)] pub struct TwoWayTransformer { layers: Vec<TwoWayAttentionBlock>, final_attn_token_to_image: Attention, norm_final_attn: LayerNorm, } impl TwoWayTransformer { pub fn new( depth: usize, embedding_dim: usize, num_heads: usize, mlp_dim: usize, vb: VarBuilder, ) -> Result<Self> { let vb_l = vb.pp("layers"); let mut layers = Vec::with_capacity(depth); for i in 0..depth { let layer = TwoWayAttentionBlock::new(embedding_dim, num_heads, mlp_dim, i == 0, vb_l.pp(i))?; layers.push(layer) } let final_attn_token_to_image = Attention::new( embedding_dim, num_heads, 2, vb.pp("final_attn_token_to_image"), )?; let norm_final_attn = layer_norm(embedding_dim, 1e-5, vb.pp("norm_final_attn"))?; Ok(Self { layers, final_attn_token_to_image, norm_final_attn, }) } pub fn forward( &self, image_embedding: &Tensor, image_pe: &Tensor, point_embedding: &Tensor, ) -> Result<(Tensor, Tensor)> { let image_embedding = image_embedding.flatten_from(2)?.permute((0, 2, 1))?; let image_pe = image_pe.flatten_from(2)?.permute((0, 2, 1))?; let mut queries = point_embedding.clone(); let mut keys = image_embedding; for layer in self.layers.iter() { (queries, keys) = layer.forward(&queries, &keys, point_embedding, &image_pe)? } let q = (&queries + point_embedding)?; let k = (&keys + image_pe)?; let attn_out = self.final_attn_token_to_image.forward(&q, &k, &keys)?; let queries = (queries + attn_out)?.apply(&self.norm_final_attn)?; Ok((queries, keys)) } }
candle/candle-transformers/src/models/segment_anything/transformer.rs/0
{ "file_path": "candle/candle-transformers/src/models/segment_anything/transformer.rs", "repo_id": "candle", "token_count": 3597 }
56
#![allow(dead_code)] //! # Variational Auto-Encoder (VAE) Models. //! //! Auto-encoder models compress their input to a usually smaller latent space //! before expanding it back to its original shape. This results in the latent values //! compressing the original information. use super::unet_2d_blocks::{ DownEncoderBlock2D, DownEncoderBlock2DConfig, UNetMidBlock2D, UNetMidBlock2DConfig, UpDecoderBlock2D, UpDecoderBlock2DConfig, }; use candle::{Result, Tensor}; use candle_nn as nn; use candle_nn::Module; #[derive(Debug, Clone)] struct EncoderConfig { // down_block_types: DownEncoderBlock2D block_out_channels: Vec<usize>, layers_per_block: usize, norm_num_groups: usize, double_z: bool, } impl Default for EncoderConfig { fn default() -> Self { Self { block_out_channels: vec![64], layers_per_block: 2, norm_num_groups: 32, double_z: true, } } } #[derive(Debug)] struct Encoder { conv_in: nn::Conv2d, down_blocks: Vec<DownEncoderBlock2D>, mid_block: UNetMidBlock2D, conv_norm_out: nn::GroupNorm, conv_out: nn::Conv2d, #[allow(dead_code)] config: EncoderConfig, } impl Encoder { fn new( vs: nn::VarBuilder, in_channels: usize, out_channels: usize, config: EncoderConfig, ) -> Result<Self> { let conv_cfg = nn::Conv2dConfig { padding: 1, ..Default::default() }; let conv_in = nn::conv2d( in_channels, config.block_out_channels[0], 3, conv_cfg, vs.pp("conv_in"), )?; let mut down_blocks = vec![]; let vs_down_blocks = vs.pp("down_blocks"); for index in 0..config.block_out_channels.len() { let out_channels = config.block_out_channels[index]; let in_channels = if index > 0 { config.block_out_channels[index - 1] } else { config.block_out_channels[0] }; let is_final = index + 1 == config.block_out_channels.len(); let cfg = DownEncoderBlock2DConfig { num_layers: config.layers_per_block, resnet_eps: 1e-6, resnet_groups: config.norm_num_groups, add_downsample: !is_final, downsample_padding: 0, ..Default::default() }; let down_block = DownEncoderBlock2D::new( vs_down_blocks.pp(index.to_string()), in_channels, out_channels, cfg, )?; down_blocks.push(down_block) } let last_block_out_channels = *config.block_out_channels.last().unwrap(); let mid_cfg = UNetMidBlock2DConfig { resnet_eps: 1e-6, output_scale_factor: 1., attn_num_head_channels: None, resnet_groups: Some(config.norm_num_groups), ..Default::default() }; let mid_block = UNetMidBlock2D::new(vs.pp("mid_block"), last_block_out_channels, None, mid_cfg)?; let conv_norm_out = nn::group_norm( config.norm_num_groups, last_block_out_channels, 1e-6, vs.pp("conv_norm_out"), )?; let conv_out_channels = if config.double_z { 2 * out_channels } else { out_channels }; let conv_cfg = nn::Conv2dConfig { padding: 1, ..Default::default() }; let conv_out = nn::conv2d( last_block_out_channels, conv_out_channels, 3, conv_cfg, vs.pp("conv_out"), )?; Ok(Self { conv_in, down_blocks, mid_block, conv_norm_out, conv_out, config, }) } } impl Encoder { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let mut xs = xs.apply(&self.conv_in)?; for down_block in self.down_blocks.iter() { xs = xs.apply(down_block)? } let xs = self .mid_block .forward(&xs, None)? .apply(&self.conv_norm_out)?; nn::ops::silu(&xs)?.apply(&self.conv_out) } } #[derive(Debug, Clone)] struct DecoderConfig { // up_block_types: UpDecoderBlock2D block_out_channels: Vec<usize>, layers_per_block: usize, norm_num_groups: usize, } impl Default for DecoderConfig { fn default() -> Self { Self { block_out_channels: vec![64], layers_per_block: 2, norm_num_groups: 32, } } } #[derive(Debug)] struct Decoder { conv_in: nn::Conv2d, up_blocks: Vec<UpDecoderBlock2D>, mid_block: UNetMidBlock2D, conv_norm_out: nn::GroupNorm, conv_out: nn::Conv2d, #[allow(dead_code)] config: DecoderConfig, } impl Decoder { fn new( vs: nn::VarBuilder, in_channels: usize, out_channels: usize, config: DecoderConfig, ) -> Result<Self> { let n_block_out_channels = config.block_out_channels.len(); let last_block_out_channels = *config.block_out_channels.last().unwrap(); let conv_cfg = nn::Conv2dConfig { padding: 1, ..Default::default() }; let conv_in = nn::conv2d( in_channels, last_block_out_channels, 3, conv_cfg, vs.pp("conv_in"), )?; let mid_cfg = UNetMidBlock2DConfig { resnet_eps: 1e-6, output_scale_factor: 1., attn_num_head_channels: None, resnet_groups: Some(config.norm_num_groups), ..Default::default() }; let mid_block = UNetMidBlock2D::new(vs.pp("mid_block"), last_block_out_channels, None, mid_cfg)?; let mut up_blocks = vec![]; let vs_up_blocks = vs.pp("up_blocks"); let reversed_block_out_channels: Vec<_> = config.block_out_channels.iter().copied().rev().collect(); for index in 0..n_block_out_channels { let out_channels = reversed_block_out_channels[index]; let in_channels = if index > 0 { reversed_block_out_channels[index - 1] } else { reversed_block_out_channels[0] }; let is_final = index + 1 == n_block_out_channels; let cfg = UpDecoderBlock2DConfig { num_layers: config.layers_per_block + 1, resnet_eps: 1e-6, resnet_groups: config.norm_num_groups, add_upsample: !is_final, ..Default::default() }; let up_block = UpDecoderBlock2D::new( vs_up_blocks.pp(index.to_string()), in_channels, out_channels, cfg, )?; up_blocks.push(up_block) } let conv_norm_out = nn::group_norm( config.norm_num_groups, config.block_out_channels[0], 1e-6, vs.pp("conv_norm_out"), )?; let conv_cfg = nn::Conv2dConfig { padding: 1, ..Default::default() }; let conv_out = nn::conv2d( config.block_out_channels[0], out_channels, 3, conv_cfg, vs.pp("conv_out"), )?; Ok(Self { conv_in, up_blocks, mid_block, conv_norm_out, conv_out, config, }) } } impl Decoder { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let mut xs = self.mid_block.forward(&self.conv_in.forward(xs)?, None)?; for up_block in self.up_blocks.iter() { xs = up_block.forward(&xs)? } let xs = self.conv_norm_out.forward(&xs)?; let xs = nn::ops::silu(&xs)?; self.conv_out.forward(&xs) } } #[derive(Debug, Clone)] pub struct AutoEncoderKLConfig { pub block_out_channels: Vec<usize>, pub layers_per_block: usize, pub latent_channels: usize, pub norm_num_groups: usize, pub use_quant_conv: bool, pub use_post_quant_conv: bool, } impl Default for AutoEncoderKLConfig { fn default() -> Self { Self { block_out_channels: vec![64], layers_per_block: 1, latent_channels: 4, norm_num_groups: 32, use_quant_conv: true, use_post_quant_conv: true, } } } pub struct DiagonalGaussianDistribution { mean: Tensor, std: Tensor, } impl DiagonalGaussianDistribution { pub fn new(parameters: &Tensor) -> Result<Self> { let mut parameters = parameters.chunk(2, 1)?.into_iter(); let mean = parameters.next().unwrap(); let logvar = parameters.next().unwrap(); let std = (logvar * 0.5)?.exp()?; Ok(DiagonalGaussianDistribution { mean, std }) } pub fn sample(&self) -> Result<Tensor> { let sample = self.mean.randn_like(0., 1.); &self.mean + &self.std * sample } } // https://github.com/huggingface/diffusers/blob/970e30606c2944e3286f56e8eb6d3dc6d1eb85f7/src/diffusers/models/vae.py#L485 // This implementation is specific to the config used in stable-diffusion-v1-5 // https://huggingface.co/runwayml/stable-diffusion-v1-5/blob/main/vae/config.json #[derive(Debug)] pub struct AutoEncoderKL { encoder: Encoder, decoder: Decoder, quant_conv: Option<nn::Conv2d>, post_quant_conv: Option<nn::Conv2d>, pub config: AutoEncoderKLConfig, } impl AutoEncoderKL { pub fn new( vs: nn::VarBuilder, in_channels: usize, out_channels: usize, config: AutoEncoderKLConfig, ) -> Result<Self> { let latent_channels = config.latent_channels; let encoder_cfg = EncoderConfig { block_out_channels: config.block_out_channels.clone(), layers_per_block: config.layers_per_block, norm_num_groups: config.norm_num_groups, double_z: true, }; let encoder = Encoder::new(vs.pp("encoder"), in_channels, latent_channels, encoder_cfg)?; let decoder_cfg = DecoderConfig { block_out_channels: config.block_out_channels.clone(), layers_per_block: config.layers_per_block, norm_num_groups: config.norm_num_groups, }; let decoder = Decoder::new(vs.pp("decoder"), latent_channels, out_channels, decoder_cfg)?; let conv_cfg = Default::default(); let quant_conv = { if config.use_quant_conv { Some(nn::conv2d( 2 * latent_channels, 2 * latent_channels, 1, conv_cfg, vs.pp("quant_conv"), )?) } else { None } }; let post_quant_conv = { if config.use_post_quant_conv { Some(nn::conv2d( latent_channels, latent_channels, 1, conv_cfg, vs.pp("post_quant_conv"), )?) } else { None } }; Ok(Self { encoder, decoder, quant_conv, post_quant_conv, config, }) } /// Returns the distribution in the latent space. pub fn encode(&self, xs: &Tensor) -> Result<DiagonalGaussianDistribution> { let xs = self.encoder.forward(xs)?; let parameters = match &self.quant_conv { None => xs, Some(quant_conv) => quant_conv.forward(&xs)?, }; DiagonalGaussianDistribution::new(&parameters) } /// Takes as input some sampled values. pub fn decode(&self, xs: &Tensor) -> Result<Tensor> { let xs = match &self.post_quant_conv { None => xs, Some(post_quant_conv) => &post_quant_conv.forward(xs)?, }; self.decoder.forward(xs) } }
candle/candle-transformers/src/models/stable_diffusion/vae.rs/0
{ "file_path": "candle/candle-transformers/src/models/stable_diffusion/vae.rs", "repo_id": "candle", "token_count": 6467 }
57
use candle::{Module, Result, Tensor}; use candle_nn::VarBuilder; #[derive(Debug, Clone)] pub struct Embedding { inner: candle_nn::Embedding, span: tracing::Span, } impl Embedding { pub fn new(d1: usize, d2: usize, vb: VarBuilder) -> Result<Self> { let inner = candle_nn::embedding(d1, d2, vb)?; let span = tracing::span!(tracing::Level::TRACE, "embedding"); Ok(Self { inner, span }) } pub fn from_weights(weights: Tensor) -> Result<Self> { let (_in_size, out_size) = weights.dims2()?; let inner = candle_nn::Embedding::new(weights, out_size); let span = tracing::span!(tracing::Level::TRACE, "embedding"); Ok(Self { inner, span }) } pub fn embeddings(&self) -> &Tensor { self.inner.embeddings() } } impl Module for Embedding { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); self.inner.forward(xs) } } #[derive(Debug, Clone)] pub struct Linear { inner: candle_nn::Linear, span: tracing::Span, } impl Linear { pub fn from_weights(weights: Tensor, bias: Option<Tensor>) -> Self { let inner = candle_nn::Linear::new(weights, bias); let span = tracing::span!(tracing::Level::TRACE, "linear"); Self { inner, span } } } pub fn linear_b(d1: usize, d2: usize, b: bool, vb: VarBuilder) -> Result<Linear> { let inner = candle_nn::linear_b(d1, d2, b, vb)?; let span = tracing::span!(tracing::Level::TRACE, "linear"); Ok(Linear { inner, span }) } pub fn linear(d1: usize, d2: usize, vb: VarBuilder) -> Result<Linear> { let inner = candle_nn::linear(d1, d2, vb)?; let span = tracing::span!(tracing::Level::TRACE, "linear"); Ok(Linear { inner, span }) } pub fn linear_no_bias(d1: usize, d2: usize, vb: VarBuilder) -> Result<Linear> { let inner = candle_nn::linear_no_bias(d1, d2, vb)?; let span = tracing::span!(tracing::Level::TRACE, "linear"); Ok(Linear { inner, span }) } impl Module for Linear { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); self.inner.forward(xs) } } // Wrap the conv2d op to provide some tracing. #[derive(Debug, Clone)] pub struct Conv2d { inner: candle_nn::Conv2d, span: tracing::Span, } impl Module for Conv2d { fn forward(&self, x: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); self.inner.forward(x) } } pub fn conv2d( in_channels: usize, out_channels: usize, kernel_size: usize, cfg: candle_nn::Conv2dConfig, vs: candle_nn::VarBuilder, ) -> Result<Conv2d> { let span = tracing::span!(tracing::Level::TRACE, "conv2d"); let inner = candle_nn::conv2d(in_channels, out_channels, kernel_size, cfg, vs)?; Ok(Conv2d { inner, span }) } // QMatMul wrapper adding some tracing. #[derive(Clone)] pub struct QMatMul { inner: candle::quantized::QMatMul, span: tracing::Span, } impl QMatMul { pub fn new( out_dim: usize, in_dim: usize, vb: crate::quantized_var_builder::VarBuilder, ) -> Result<Self> { let ws = vb.get((in_dim, out_dim), "weight")?; let inner = candle::quantized::QMatMul::from_arc(ws)?; let span = tracing::span!(tracing::Level::TRACE, "qmatmul"); Ok(Self { inner, span }) } pub fn from_weights(ws: std::sync::Arc<candle::quantized::QTensor>) -> Result<Self> { let inner = candle::quantized::QMatMul::from_arc(ws)?; let span = tracing::span!(tracing::Level::TRACE, "qmatmul"); Ok(Self { inner, span }) } } impl Module for QMatMul { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); self.inner.forward(xs) } } impl std::fmt::Debug for QMatMul { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "QMatMul") } } #[derive(Clone, Debug)] pub struct LayerNorm { inner: candle_nn::LayerNorm, span: tracing::Span, } impl LayerNorm { pub fn new(weight: Tensor, bias: Tensor, eps: f64) -> Self { let inner = candle_nn::LayerNorm::new(weight, bias, eps); let span = tracing::span!(tracing::Level::TRACE, "layer-norm"); Self { inner, span } } } impl Module for LayerNorm { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); self.inner.forward(xs) } } pub fn layer_norm<C: Into<candle_nn::LayerNormConfig>>( size: usize, c: C, vb: VarBuilder, ) -> Result<LayerNorm> { let inner = candle_nn::layer_norm(size, c, vb)?; let span = tracing::span!(tracing::Level::TRACE, "layer-norm"); Ok(LayerNorm { inner, span }) } #[derive(Debug, Clone)] pub struct RmsNorm { inner: candle_nn::RmsNorm, span: tracing::Span, } impl RmsNorm { pub fn new(size: usize, eps: f64, vb: VarBuilder) -> Result<Self> { let span = tracing::span!(tracing::Level::TRACE, "rms-norm"); let inner = candle_nn::rms_norm(size, eps, vb)?; Ok(Self { inner, span }) } pub fn forward_diff(&self, x: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); self.inner.forward_diff(x) } } impl Module for RmsNorm { fn forward(&self, x: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); self.inner.forward(x) } }
candle/candle-transformers/src/models/with_tracing.rs/0
{ "file_path": "candle/candle-transformers/src/models/with_tracing.rs", "repo_id": "candle", "token_count": 2381 }
58
use candle::{Device, Result, Tensor}; use candle_transformers::generation::LogitsProcessor; #[test] fn sample_with_zero_temperature() -> Result<()> { let mut logits_process = LogitsProcessor::new(1337, None, None); let logits = Tensor::new(&[0.1, 0.2, 0.3, 0.4], &Device::Cpu)?; let token = logits_process.sample(&logits)?; assert_eq!(token, 3); Ok(()) } #[test] fn sample_with_temperature() -> Result<()> { let mut logits_process = LogitsProcessor::new(42, Some(0.9), None); let logits = Tensor::new(&[0.1, 0.2, 0.3, 0.4], &Device::Cpu)?; let token = logits_process.sample(&logits)?; assert_eq!(token, 0); Ok(()) } #[test] fn sample_with_top_p() -> Result<()> { let mut logits_process = LogitsProcessor::new(42, Some(1.0), Some(0.5)); let logits = Tensor::new(&[0.1, 0.2, 0.3, 0.4], &Device::Cpu)?; let token = logits_process.sample(&logits)?; assert_eq!(token, 2); Ok(()) } #[test] fn sample_with_top_k() -> Result<()> { let mut logits_process = LogitsProcessor::from_sampling( 42, candle_transformers::generation::Sampling::TopK { k: 1, temperature: 1.0, }, ); let logits = Tensor::new(&[0.1, 0.2, 0.3, 0.4], &Device::Cpu)?; let token = logits_process.sample(&logits)?; assert_eq!(token, 3); let mut logits_process = LogitsProcessor::from_sampling( 42, candle_transformers::generation::Sampling::TopK { k: 2, temperature: 1.0, }, ); let logits = Tensor::new(&[0.1, 0.2, 0.3, 0.4], &Device::Cpu)?; let token = logits_process.sample(&logits)?; assert_eq!(token, 3); let token = logits_process.sample(&logits)?; assert_eq!(token, 2); Ok(()) } #[test] fn sample_gumbel() -> Result<()> { let mut logits_process = LogitsProcessor::from_sampling( 42, candle_transformers::generation::Sampling::GumbelSoftmax { temperature: 1.0 }, ); let logits = Tensor::new(&[-1.0, 0.0, 0.2, 1.0], &Device::Cpu)?; let sm = candle_nn::ops::softmax(&logits, 0)?.to_vec1::<f64>()?; let mut counts = vec![0f64; 4]; let samples = 100000; for _ in 0..samples { let token = logits_process.sample(&logits)?; counts[token as usize] += 1f64 / samples as f64; } for i in 0..4 { if (counts[i] - sm[i]).abs() > 0.05 { panic!("pr mismatch {counts:?} {sm:?}"); } } Ok(()) }
candle/candle-transformers/tests/generation_tests.rs/0
{ "file_path": "candle/candle-transformers/tests/generation_tests.rs", "repo_id": "candle", "token_count": 1145 }
59
## Running Whisper Examples Here, we provide two examples of how to run Whisper using a Candle-compiled WASM binary and runtimes. ### Pure Rust UI To build and test the UI made in Rust you will need [Trunk](https://trunkrs.dev/#install) From the `candle-wasm-examples/whisper` directory run: Download assets: ```bash # mel filters wget -c https://huggingface.co/spaces/lmz/candle-whisper/resolve/main/mel_filters.safetensors # Model and tokenizer tiny.en wget -c https://huggingface.co/openai/whisper-tiny.en/resolve/main/model.safetensors -P whisper-tiny.en wget -c https://huggingface.co/openai/whisper-tiny.en/raw/main/tokenizer.json -P whisper-tiny.en wget -c https://huggingface.co/openai/whisper-tiny.en/raw/main/config.json -P whisper-tiny.en # model and tokenizer tiny multilanguage wget -c https://huggingface.co/openai/whisper-tiny/resolve/main/model.safetensors -P whisper-tiny wget -c https://huggingface.co/openai/whisper-tiny/raw/main/tokenizer.json -P whisper-tiny wget -c https://huggingface.co/openai/whisper-tiny/raw/main/config.json -P whisper-tiny #quantized wget -c https://huggingface.co/lmz/candle-whisper/resolve/main/model-tiny-en-q80.gguf -P quantized wget -c https://huggingface.co/lmz/candle-whisper/raw/main/tokenizer-tiny-en.json -P quantized wget -c https://huggingface.co/lmz/candle-whisper/raw/main/config-tiny-en.json -P quantized # Audio samples wget -c https://huggingface.co/datasets/Narsil/candle-examples/resolve/main/samples_gb0.wav -P audios wget -c https://huggingface.co/datasets/Narsil/candle-examples/resolve/main/samples_a13.wav -P audios wget -c https://huggingface.co/datasets/Narsil/candle-examples/resolve/main/samples_gb1.wav -P audios wget -c https://huggingface.co/datasets/Narsil/candle-examples/resolve/main/samples_hp0.wav -P audios wget -c https://huggingface.co/datasets/Narsil/candle-examples/resolve/main/samples_jfk.wav -P audios wget -c https://huggingface.co/datasets/Narsil/candle-examples/resolve/main/samples_mm0.wav -P audios ``` Run hot reload server: ```bash trunk serve --release --public-url / --port 8080 ``` ### Vanilla JS and WebWorkers To build and test the UI made in Vanilla JS and WebWorkers, first we need to build the WASM library: ```bash sh build-lib.sh ``` This will bundle the library under `./build` and we can import it inside our WebWorker like a normal JS module: ```js import init, { Decoder } from "./build/m.js"; ``` The full example can be found under `./lib-example.html`. All needed assets are fetched from the web, so no need to download anything. Finally, you can preview the example by running a local HTTP server. For example: ```bash python -m http.server ``` Then open `http://localhost:8000/lib-example.html` in your browser.
candle/candle-wasm-examples/whisper/README.md/0
{ "file_path": "candle/candle-wasm-examples/whisper/README.md", "repo_id": "candle", "token_count": 1023 }
60
{ "moz:firefoxOptions": { "prefs": { "media.navigator.streams.fake": true, "media.navigator.permission.disabled": true }, "args": [] }, "goog:chromeOptions": { "args": [ "--use-fake-device-for-media-stream", "--use-fake-ui-for-media-stream" ] } }
candle/candle-wasm-tests/webdriver.json/0
{ "file_path": "candle/candle-wasm-tests/webdriver.json", "repo_id": "candle", "token_count": 143 }
61
{{- define "name" -}} {{- default $.Release.Name | trunc 63 | trimSuffix "-" -}} {{- end -}} {{- define "app.name" -}} chat-ui {{- end -}} {{- define "labels.standard" -}} release: {{ $.Release.Name | quote }} heritage: {{ $.Release.Service | quote }} chart: "{{ include "name" . }}" app: "{{ include "app.name" . }}" {{- end -}} {{- define "labels.resolver" -}} release: {{ $.Release.Name | quote }} heritage: {{ $.Release.Service | quote }} chart: "{{ include "name" . }}" app: "{{ include "app.name" . }}-resolver" {{- end -}}
chat-ui/chart/templates/_helpers.tpl/0
{ "file_path": "chat-ui/chart/templates/_helpers.tpl", "repo_id": "chat-ui", "token_count": 202 }
62
# Multimodal We currently support [IDEFICS](https://huggingface.co/blog/idefics) (hosted on [TGI](./providers/tgi)), OpenAI and Anthropic Claude 3 as multimodal models. You can enable it by setting `multimodal: true` in your `MODELS` configuration. For IDEFICS, you must have a [PRO HF Api token](https://huggingface.co/settings/tokens). For OpenAI, see the [OpenAI section](./providers/openai). For Anthropic, see the [Anthropic section](./providers/anthropic). ```ini MODELS=`[ { "name": "HuggingFaceM4/idefics-80b-instruct", "multimodal" : true, "description": "IDEFICS is the new multimodal model by Hugging Face.", "preprompt": "", "chatPromptTemplate" : "{{#each messages}}{{#ifUser}}User: {{content}}{{/ifUser}}<end_of_utterance>\nAssistant: {{#ifAssistant}}{{content}}\n{{/ifAssistant}}{{/each}}", "parameters": { "temperature": 0.1, "top_p": 0.95, "repetition_penalty": 1.2, "top_k": 12, "truncate": 1000, "max_new_tokens": 1024, "stop": ["<end_of_utterance>", "User:", "\nUser:"] } } ]` ```
chat-ui/docs/source/configuration/models/multimodal.md/0
{ "file_path": "chat-ui/docs/source/configuration/models/multimodal.md", "repo_id": "chat-ui", "token_count": 439 }
63
# Web Search Chat UI features a powerful Web Search feature. A high level overview of how it works: 1. Generate an appropriate search query from the user prompt using the `TASK_MODEL` 2. Perform web search via an external provider (i.e. Serper) or via locally scrape Google results 3. Load each search result into playwright and scrape 4. Convert scraped HTML to Markdown tree with headings as parents 5. Create embeddings for each Markdown element 6. Find the embeddings closest to the user query using a vector similarity search (inner product) 7. Get the corresponding Markdown elements and their parent, up to 8000 characters 8. Supply the information as context to the model <div class="flex justify-center"> <img class="block dark:hidden" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/chat-ui/websearch-light.png" height="auto"/> <img class="hidden dark:block" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/chat-ui/websearch-dark.png" height="auto"/> </div> ## Providers Many providers are supported for the web search, or you can use locally scraped Google results. ### Local For locally scraped Google results, put `USE_LOCAL_WEBSEARCH=true` in your `.env.local`. Please note that you may hit rate limits as we make no attempt to make the traffic look legitimate. To avoid this, you may choose a provider, such as Serper, used on the official instance. ### SearXNG > SearXNG is a free internet metasearch engine which aggregates results from various search services and databases. Users are neither tracked nor profiled. You may enable support via the `SEARXNG_QUERY_URL` where `<query>` will be replaced with the query keywords. Please see [the official documentation](https://docs.searxng.org/dev/search_api.html) for more information Example: `https://searxng.yourdomain.com/search?q=<query>&engines=duckduckgo,google&format=json` ### Third Party Many third party providers are supported as well. The official instance uses Serper. ```ini YDC_API_KEY=docs.you.com api key here SERPER_API_KEY=serper.dev api key here SERPAPI_KEY=serpapi key here SERPSTACK_API_KEY=serpstack api key here SEARCHAPI_KEY=searchapi api key here ``` ## Block/Allow List You may block or allow specific websites from the web search results. When using an allow list, only the links in the allowlist will be used. For supported search engines, the links will be blocked from the results directly. Any URL in the results that **partially or fully matches** the entry will be filtered out. ```ini WEBSEARCH_BLOCKLIST=`["youtube.com", "https://example.com/foo/bar"]` WEBSEARCH_ALLOWLIST=`["stackoverflow.com"]` ``` ## Disabling Javascript By default, Playwright will execute all Javascript on the page. This can be intensive, requiring up to 6 cores for full performance, on some webpages. You may block scripts from running by settings `WEBSEARCH_JAVASCRIPT=false`. However, this will not block Javascript inlined in the HTML.
chat-ui/docs/source/configuration/web-search.md/0
{ "file_path": "chat-ui/docs/source/configuration/web-search.md", "repo_id": "chat-ui", "token_count": 836 }
64
<script lang="ts"> import CarbonContinue from "~icons/carbon/continue"; interface Props { classNames?: string; onClick?: () => void; } let { classNames = "", onClick }: Props = $props(); </script> <button type="button" onclick={onClick} class="btn flex h-8 rounded-lg border bg-white px-3 py-1 text-gray-500 shadow-sm transition-all hover:bg-gray-100 dark:border-gray-600 dark:bg-gray-700 dark:text-gray-300 dark:hover:bg-gray-600 {classNames}" > <CarbonContinue class="mr-2 text-xs " /> Continue </button>
chat-ui/src/lib/components/ContinueBtn.svelte/0
{ "file_path": "chat-ui/src/lib/components/ContinueBtn.svelte", "repo_id": "chat-ui", "token_count": 190 }
65
<script lang="ts"> import { onMount, onDestroy } from "svelte"; interface Props { children?: import("svelte").Snippet; } let { children }: Props = $props(); let el: HTMLElement | undefined = $state(); onMount(() => { el?.ownerDocument.body.appendChild(el); }); onDestroy(() => { if (el?.parentNode) { el.parentNode.removeChild(el); } }); </script> <div bind:this={el} class="contents" hidden> {@render children?.()} </div>
chat-ui/src/lib/components/Portal.svelte/0
{ "file_path": "chat-ui/src/lib/components/Portal.svelte", "repo_id": "chat-ui", "token_count": 179 }
66
<script lang="ts"> import { createEventDispatcher } from "svelte"; import { base } from "$app/paths"; import { goto } from "$app/navigation"; import type { Model } from "$lib/types/Model"; import type { Assistant } from "$lib/types/Assistant"; import { useSettingsStore } from "$lib/stores/settings"; import { formatUserCount } from "$lib/utils/formatUserCount"; import IconGear from "~icons/bi/gear-fill"; import IconInternet from "../icons/IconInternet.svelte"; import CarbonExport from "~icons/carbon/export"; import CarbonCheckmark from "~icons/carbon/checkmark"; import CarbonRenew from "~icons/carbon/renew"; import CarbonUserMultiple from "~icons/carbon/user-multiple"; import CarbonTools from "~icons/carbon/tools"; import { share } from "$lib/utils/share"; import { usePublicConfig } from "$lib/utils/PublicConfig.svelte"; import { page } from "$app/state"; const publicConfig = usePublicConfig(); interface Props { models: Model[]; assistant: Pick< Assistant, | "avatar" | "name" | "rag" | "dynamicPrompt" | "modelId" | "createdByName" | "exampleInputs" | "_id" | "description" | "userCount" | "tools" >; } let { models, assistant }: Props = $props(); const dispatch = createEventDispatcher<{ message: string }>(); let hasRag = $derived( assistant?.rag?.allowAllDomains || (assistant?.rag?.allowedDomains?.length ?? 0) > 0 || (assistant?.rag?.allowedLinks?.length ?? 0) > 0 || assistant?.dynamicPrompt ); const prefix = publicConfig.PUBLIC_SHARE_PREFIX || `${publicConfig.PUBLIC_ORIGIN || page.url.origin}${base}`; let shareUrl = $derived(`${prefix}/assistant/${assistant?._id}`); let isCopied = $state(false); const settings = useSettingsStore(); </script> <div class="my-auto grid gap-8 lg:grid-cols-9"> <div class="lg:col-span-4"> <div> <div class="mb-3 flex items-center"> {#if assistant.avatar} <img src={`${base}/settings/assistants/${assistant._id.toString()}/avatar.jpg?hash=${ assistant.avatar }`} alt="avatar" class="mr-3 size-10 flex-none rounded-full object-cover" /> {:else} <div class="mr-3 flex size-10 flex-none items-center justify-center rounded-full bg-gray-300 object-cover text-xl font-bold uppercase text-gray-500 dark:bg-gray-600" > {assistant?.name[0]} </div> {/if} <div class="text-2xl font-semibold"> {assistant.name} </div> </div> <p class="line-clamp-5 text-sm leading-relaxed text-gray-600 dark:text-gray-400"> {assistant.description || "No description provided."} </p> <button onclick={() => { settings.instantSet({ activeModel: models[0].name, }); goto(`${base}/`); }} class="mt-4 inline-flex w-fit items-center rounded-md border border-gray-200 bg-gray-50 px-2 py-1 text-xs text-gray-500 hover:bg-gray-100 dark:border-gray-700 dark:bg-gray-800 dark:text-gray-400 dark:hover:bg-gray-700" > <CarbonRenew class="mr-1.5 text-xxs" /> Reset to default model </button> </div> </div> <div class="lg:col-span-5 lg:pl-12"> <div class="overflow-hidden rounded-xl border dark:border-gray-800"> <div class="flex flex-wrap items-center justify-between gap-2 p-3"> <div class="flex flex-wrap items-center gap-2"> <div class="hidden text-sm font-medium text-gray-600 dark:text-gray-400 sm:block"> About this Assistant </div> {#if assistant.createdByName} <span class="hidden text-gray-400 sm:block">•</span> <a class="text-sm text-gray-500 hover:underline" href="{base}/assistants?user={assistant.createdByName}" > {#if !import.meta.env.SSR && window.innerWidth < 640}By {/if}{assistant.createdByName} </a> {/if} </div> <div class="flex gap-1 self-start"> <button class="btn flex h-7 w-7 rounded-full bg-gray-100 p-1 text-xs hover:bg-gray-100 dark:border-gray-600 dark:bg-gray-800 dark:hover:bg-gray-600" onclick={() => { if (!isCopied) { share(shareUrl, assistant.name); isCopied = true; setTimeout(() => { isCopied = false; }, 2000); } }} title="Share assistant" > {#if isCopied} <CarbonCheckmark class="text-green-600" /> {:else} <CarbonExport /> {/if} </button> <a href="{base}/settings/assistants/{assistant._id.toString()}" aria-label="Settings" title="Settings" class="btn flex h-7 w-7 rounded-full bg-gray-100 p-1 text-xs hover:bg-gray-100 dark:border-gray-600 dark:bg-gray-800 dark:hover:bg-gray-600" ><IconGear /></a > </div> </div> <div class="grid gap-3 bg-gray-50 p-3 text-sm dark:bg-gray-800/70"> <div class="flex flex-wrap gap-2"> {#if hasRag} <div class="flex h-6 items-center gap-1 rounded-full bg-blue-500/10 pl-1.5 pr-2.5 text-xs" title="This assistant uses web search" > <IconInternet classNames="text-sm text-blue-600" /> Internet access </div> {/if} {#if assistant?.tools?.length} <div class="flex h-6 items-center gap-1 rounded-full bg-purple-500/10 pl-1.5 pr-2.5 text-xs" title="This assistant can use tools" > <CarbonTools class="text-sm text-purple-600" /> Has tools </div> {/if} {#if assistant.userCount && assistant.userCount > 1} <div class="flex h-6 items-center gap-1 rounded-full bg-gray-500/10 pl-1.5 pr-2.5 text-xs" title="Number of users" > <CarbonUserMultiple class="text-sm text-gray-600 dark:text-gray-400" /> {formatUserCount(assistant.userCount)} users </div> {/if} </div> </div> </div> </div> {#if assistant.exampleInputs && assistant.exampleInputs.length > 0} <div class="lg:col-span-9 lg:mt-6"> <p class="mb-3 text-center text-gray-600 dark:text-gray-300 lg:text-left">Examples</p> <div class="flex max-h-60 gap-2 overflow-x-auto pb-2 text-center scrollbar-thin scrollbar-thumb-gray-300 dark:scrollbar-thumb-gray-700 lg:grid lg:grid-cols-3 lg:overflow-y-auto lg:text-left" > {#each assistant.exampleInputs as example} <button type="button" class="flex-shrink-0 rounded-xl border bg-gray-50 px-2.5 py-2 text-sm text-gray-600 hover:bg-gray-100 dark:border-gray-800 dark:bg-gray-800 dark:text-gray-300 dark:hover:bg-gray-700 sm:px-3 lg:w-full xl:px-3.5 xl:text-base" onclick={() => dispatch("message", example)} > {example} </button> {/each} </div> </div> {/if} <div class="h-40 sm:h-24"></div> </div>
chat-ui/src/lib/components/chat/AssistantIntroduction.svelte/0
{ "file_path": "chat-ui/src/lib/components/chat/AssistantIntroduction.svelte", "repo_id": "chat-ui", "token_count": 2991 }
67
<script lang="ts"> interface Props { classNames?: string; } let { classNames = "" }: Props = $props(); </script> <svg xmlns="http://www.w3.org/2000/svg" width="1em" height="1em" class={classNames} fill="none" viewBox="0 0 26 23" > <path fill="url(#a)" d="M.93 10.65A10.17 10.17 0 0 1 11.11.48h4.67a9.45 9.45 0 0 1 0 18.89H4.53L1.62 22.2a.38.38 0 0 1-.69-.28V10.65Z" /> <path fill="#000" fill-rule="evenodd" d="M11.52 7.4a1.86 1.86 0 1 1-3.72 0 1.86 1.86 0 0 1 3.72 0Zm7.57 0a1.86 1.86 0 1 1-3.73 0 1.86 1.86 0 0 1 3.73 0ZM8.9 12.9a.55.55 0 0 0-.11.35.76.76 0 0 1-1.51 0c0-.95.67-1.94 1.76-1.94 1.09 0 1.76 1 1.76 1.94H9.3a.55.55 0 0 0-.12-.35c-.06-.07-.1-.08-.13-.08s-.08 0-.14.08Zm4.04 0a.55.55 0 0 0-.12.35h-1.51c0-.95.68-1.94 1.76-1.94 1.1 0 1.77 1 1.77 1.94h-1.51a.55.55 0 0 0-.12-.35c-.06-.07-.11-.08-.14-.08-.02 0-.07 0-.13.08Zm-1.89.79c-.02 0-.07-.01-.13-.08a.55.55 0 0 1-.12-.36h-1.5c0 .95.67 1.95 1.75 1.95 1.1 0 1.77-1 1.77-1.95h-1.51c0 .16-.06.28-.12.36-.06.07-.11.08-.14.08Zm4.04 0c-.03 0-.08-.01-.14-.08a.55.55 0 0 1-.12-.36h-1.5c0 .95.67 1.95 1.76 1.95 1.08 0 1.76-1 1.76-1.95h-1.51c0 .16-.06.28-.12.36-.06.07-.11.08-.13.08Zm1.76-.44c0-.16.05-.28.12-.35.06-.07.1-.08.13-.08s.08 0 .14.08c.06.07.11.2.11.35a.76.76 0 0 0 1.51 0c0-.95-.67-1.94-1.76-1.94-1.09 0-1.76 1-1.76 1.94h1.5Z" clip-rule="evenodd" /> <defs> <radialGradient id="a" cx="0" cy="0" r="1" gradientTransform="matrix(0 31.37 -34.85 0 13.08 -9.02)" gradientUnits="userSpaceOnUse" > <stop stop-color="#FFD21E" /> <stop offset="1" stop-color="red" /> </radialGradient> </defs> </svg>
chat-ui/src/lib/components/icons/IconDazzled.svelte/0
{ "file_path": "chat-ui/src/lib/components/icons/IconDazzled.svelte", "repo_id": "chat-ui", "token_count": 941 }
68
import { afterEach, assert, beforeAll, describe, expect, it } from "vitest"; import { migrations } from "./routines"; import { acquireLock, isDBLocked, refreshLock, releaseLock } from "./lock"; import { Semaphores } from "$lib/types/Semaphore"; import { collections } from "$lib/server/database"; describe( "migrations", { retry: 3, }, () => { beforeAll(async () => { try { await collections.semaphores.createIndex({ key: 1 }, { unique: true }); } catch (e) { // Index might already exist, ignore error } }); it("should not have duplicates guid", async () => { const guids = migrations.map((m) => m._id.toString()); const uniqueGuids = [...new Set(guids)]; expect(uniqueGuids.length).toBe(guids.length); }); it("should acquire only one lock on DB", async () => { const results = await Promise.all( new Array(1000).fill(0).map(() => acquireLock(Semaphores.TEST_MIGRATION)) ); const locks = results.filter((r) => r); const semaphores = await collections.semaphores.find({}).toArray(); expect(locks.length).toBe(1); expect(semaphores).toBeDefined(); expect(semaphores.length).toBe(1); expect(semaphores?.[0].key).toBe(Semaphores.TEST_MIGRATION); }); it("should read the lock correctly", async () => { const lockId = await acquireLock(Semaphores.TEST_MIGRATION); assert(lockId); expect(await isDBLocked(Semaphores.TEST_MIGRATION)).toBe(true); expect(!!(await acquireLock(Semaphores.TEST_MIGRATION))).toBe(false); await releaseLock(Semaphores.TEST_MIGRATION, lockId); expect(await isDBLocked(Semaphores.TEST_MIGRATION)).toBe(false); }); it("should refresh the lock", async () => { const lockId = await acquireLock(Semaphores.TEST_MIGRATION); assert(lockId); // get the updatedAt time const updatedAtInitially = (await collections.semaphores.findOne({}))?.updatedAt; await refreshLock(Semaphores.TEST_MIGRATION, lockId); const updatedAtAfterRefresh = (await collections.semaphores.findOne({}))?.updatedAt; expect(updatedAtInitially).toBeDefined(); expect(updatedAtAfterRefresh).toBeDefined(); expect(updatedAtInitially).not.toBe(updatedAtAfterRefresh); }); afterEach(async () => { await collections.semaphores.deleteMany({}); await collections.migrationResults.deleteMany({}); }); } );
chat-ui/src/lib/migrations/migrations.spec.ts/0
{ "file_path": "chat-ui/src/lib/migrations/migrations.spec.ts", "repo_id": "chat-ui", "token_count": 887 }
69
import Elysia from "elysia"; import { authenticateRequest } from "../auth"; export const authPlugin = new Elysia({ name: "auth" }).derive( { as: "scoped" }, async ({ headers, cookie, }): Promise<{ locals: App.Locals; }> => { const auth = await authenticateRequest( { type: "elysia", value: headers }, { type: "elysia", value: cookie }, true ); return { locals: { user: auth?.user, sessionId: auth?.sessionId, isAdmin: auth?.isAdmin, }, }; } );
chat-ui/src/lib/server/api/authPlugin.ts/0
{ "file_path": "chat-ui/src/lib/server/api/authPlugin.ts", "repo_id": "chat-ui", "token_count": 211 }
70
import { config } from "$lib/server/config"; import { z } from "zod"; import { sum } from "$lib/utils/sum"; import { embeddingEndpoints, embeddingEndpointSchema, type EmbeddingEndpoint, } from "$lib/server/embeddingEndpoints/embeddingEndpoints"; import { embeddingEndpointTransformersJS } from "$lib/server/embeddingEndpoints/transformersjs/embeddingEndpoints"; import JSON5 from "json5"; const modelConfig = z.object({ /** Used as an identifier in DB */ id: z.string().optional(), /** Used to link to the model page, and for inference */ name: z.string().min(1), displayName: z.string().min(1).optional(), description: z.string().min(1).optional(), websiteUrl: z.string().url().optional(), modelUrl: z.string().url().optional(), endpoints: z.array(embeddingEndpointSchema).nonempty(), chunkCharLength: z.number().positive(), maxBatchSize: z.number().positive().optional(), preQuery: z.string().default(""), prePassage: z.string().default(""), }); // Default embedding model for backward compatibility const rawEmbeddingModelJSON = config.TEXT_EMBEDDING_MODELS || `[ { "name": "Xenova/gte-small", "chunkCharLength": 512, "endpoints": [ { "type": "transformersjs" } ] } ]`; const embeddingModelsRaw = z.array(modelConfig).parse(JSON5.parse(rawEmbeddingModelJSON)); const processEmbeddingModel = async (m: z.infer<typeof modelConfig>) => ({ ...m, id: m.id || m.name, }); const addEndpoint = (m: Awaited<ReturnType<typeof processEmbeddingModel>>) => ({ ...m, getEndpoint: async (): Promise<EmbeddingEndpoint> => { if (!m.endpoints) { return embeddingEndpointTransformersJS({ type: "transformersjs", weight: 1, model: m, }); } const totalWeight = sum(m.endpoints.map((e) => e.weight)); let random = Math.random() * totalWeight; for (const endpoint of m.endpoints) { if (random < endpoint.weight) { const args = { ...endpoint, model: m }; switch (args.type) { case "tei": return embeddingEndpoints.tei(args); case "transformersjs": return embeddingEndpoints.transformersjs(args); case "openai": return embeddingEndpoints.openai(args); case "hfapi": return embeddingEndpoints.hfapi(args); default: throw new Error(`Unknown endpoint type: ${args}`); } } random -= endpoint.weight; } throw new Error(`Failed to select embedding endpoint`); }, }); export const embeddingModels = await Promise.all( embeddingModelsRaw.map((e) => processEmbeddingModel(e).then(addEndpoint)) ); export const defaultEmbeddingModel = embeddingModels[0]; const validateEmbeddingModel = (_models: EmbeddingBackendModel[], key: "id" | "name") => { return z.enum([_models[0][key], ..._models.slice(1).map((m) => m[key])]); }; export const validateEmbeddingModelById = (_models: EmbeddingBackendModel[]) => { return validateEmbeddingModel(_models, "id"); }; export const validateEmbeddingModelByName = (_models: EmbeddingBackendModel[]) => { return validateEmbeddingModel(_models, "name"); }; export type EmbeddingBackendModel = typeof defaultEmbeddingModel;
chat-ui/src/lib/server/embeddingModels.ts/0
{ "file_path": "chat-ui/src/lib/server/embeddingModels.ts", "repo_id": "chat-ui", "token_count": 1114 }
71
import { config } from "$lib/server/config"; import type { Endpoint, EndpointMessage, TextGenerationStreamOutputWithToolsAndWebSources, } from "../endpoints"; import { z } from "zod"; import { createImageProcessorOptionsValidator, makeImageProcessor, type ImageProcessor, } from "../images"; import { findRepoRoot } from "$lib/server/findRepoRoot"; import { fileURLToPath } from "url"; import { dirname, join } from "path"; import { logger } from "$lib/server/logger"; import type { LlamaContextSequence } from "node-llama-cpp"; export const endpointLocalParametersSchema = z.object({ weight: z.number().int().positive().default(1), model: z.any(), modelPath: z.string().optional(), type: z.literal("local"), multimodal: z .object({ // Assumes IDEFICS image: createImageProcessorOptionsValidator({ supportedMimeTypes: ["image/jpeg", "image/webp"], preferredMimeType: "image/webp", maxSizeInMB: 5, maxWidth: 378, maxHeight: 980, }), }) .default({}), }); export async function endpointLocal( input: z.input<typeof endpointLocalParametersSchema> ): Promise<Endpoint> { // Parse and validate input const { modelPath: modelPathInput, multimodal, model, } = endpointLocalParametersSchema.parse(input); // Setup model path and folder const path = modelPathInput ?? `hf:${model.id ?? model.name}`; const modelFolder = config.MODELS_STORAGE_PATH || join(findRepoRoot(dirname(fileURLToPath(import.meta.url))), "models"); // Initialize Llama model const { getLlama, LlamaChatSession, resolveModelFile } = await import("node-llama-cpp"); const modelPath = await resolveModelFile(path, modelFolder); const llama = await getLlama({ logger: (level, message) => { switch (level) { case "fatal": logger.fatal(message); break; case "error": logger.error(message); break; case "warn": logger.warn(message); break; case "info": logger.info(message); break; case "log": logger.info(message); // Map 'log' to 'info' since pino doesn't have a 'log' level break; case "debug": logger.debug(message); break; default: break; } }, }); if (!llama) { throw new Error("Failed to initialize llama.cpp build."); } const modelLoaded = await llama.loadModel({ modelPath, }); // Create context and image processor const context = await modelLoaded.createContext({ sequences: 1 }); const imageProcessor = makeImageProcessor(multimodal.image); return async function ({ messages, preprompt, continueMessage, generateSettings, // tools, // toolResults, isMultimodal, }) { // Process messages and build prompt const processedMessages = await Promise.all( messages.map((msg) => prepareMessage(Boolean(isMultimodal), msg, imageProcessor)) ); let sequence: LlamaContextSequence; try { sequence = context.getSequence(); } catch (error) { logger.error(error, `Error getting sequence`); throw error; } const chatSession = new LlamaChatSession({ contextSequence: sequence, systemPrompt: preprompt, }); chatSession.setChatHistory( messages.slice(0, -1).map((message) => { switch (message.from) { case "user": return { type: "user", text: message.content, }; case "assistant": return { type: "model", response: [message.content], }; case "system": return { type: "system", text: message.content, }; } }) ); async function* generateTokens(): AsyncGenerator<TextGenerationStreamOutputWithToolsAndWebSources> { let tokenId = 0; let fullText = ""; // A simple queue for tokens that have been produced const queue: TextGenerationStreamOutputWithToolsAndWebSources[] = []; let waitingResolve: | ((value: TextGenerationStreamOutputWithToolsAndWebSources | null) => void) | null = null; let generationCompleted = false; // Helper function to push tokens to the queue function pushOutput(output: TextGenerationStreamOutputWithToolsAndWebSources) { if (waitingResolve) { waitingResolve(output); waitingResolve = null; } else { queue.push(output); } } const options = { maxTokens: generateSettings?.max_new_tokens, temperature: generateSettings?.temperature ?? 0.2, topP: generateSettings?.top_p ?? 0.9, topK: generateSettings?.top_k ?? 40, onTextChunk: (text: string) => { fullText += text; const output: TextGenerationStreamOutputWithToolsAndWebSources = { token: { id: tokenId++, text, logprob: 0, special: false, }, generated_text: null, details: null, }; // Instead of returning the token, push it into our queue. pushOutput(output); }, }; let generationPromise; if (!continueMessage) // Start the token generation process generationPromise = chatSession.prompt( processedMessages[processedMessages.length - 1].content, options ); else { generationPromise = chatSession.completePrompt( processedMessages[processedMessages.length - 1].content, options ); } try { // Yield tokens as they become available while (!generationCompleted || queue.length > 0) { if (queue.length === 0) { const output = await new Promise<TextGenerationStreamOutputWithToolsAndWebSources | null>( (resolve) => (waitingResolve = resolve) ); // When output is null, it indicates generation completion. if (output === null || !output.token.text) break; if (model.parameters.stop_sequences?.includes(output.token.text)) { break; } yield output; } else { const output = queue.shift(); if (output) yield output; } } // Wait for the generation process to complete (and catch errors if any) await generationPromise.finally(() => { generationCompleted = true; // Resolve any pending waiters so the loop can end. if (waitingResolve) { waitingResolve(null); waitingResolve = null; } }); // Yield a final token that contains the full generated text. yield { token: { id: tokenId, text: "", logprob: 0, special: true, }, generated_text: fullText, details: null, }; } catch (error) { logger.error(error, `Generation error`); // Ensure we clean up the LlamaManager in case of errors throw error; } } return generateTokens(); }; } async function prepareMessage( isMultimodal: boolean, message: EndpointMessage, imageProcessor: ImageProcessor ): Promise<EndpointMessage> { if (!isMultimodal) return message; const files = await Promise.all(message.files?.map(imageProcessor) ?? []); const markdowns = files.map( (file) => `![](data:${file.mime};base64,${file.image.toString("base64")})` ); const content = message.content + "\n" + markdowns.join("\n "); return { ...message, content }; }
chat-ui/src/lib/server/endpoints/local/endpointLocal.ts/0
{ "file_path": "chat-ui/src/lib/server/endpoints/local/endpointLocal.ts", "repo_id": "chat-ui", "token_count": 2768 }
72
import { config } from "$lib/server/config"; import { generateFromDefaultEndpoint } from "$lib/server/generateFromDefaultEndpoint"; import { logger } from "$lib/server/logger"; import { MessageUpdateType, type MessageUpdate } from "$lib/types/MessageUpdate"; import type { Conversation } from "$lib/types/Conversation"; import { getReturnFromGenerator } from "$lib/utils/getReturnFromGenerator"; import { taskModel } from "../models"; import type { Tool } from "$lib/types/Tool"; import { getToolOutput } from "../tools/getToolOutput"; export async function* generateTitleForConversation( conv: Conversation ): AsyncGenerator<MessageUpdate, undefined, undefined> { try { const userMessage = conv.messages.find((m) => m.from === "user"); // HACK: detect if the conversation is new if (conv.title !== "New Chat" || !userMessage) return; const prompt = userMessage.content; const title = (await generateTitle(prompt)) ?? "New Chat"; yield { type: MessageUpdateType.Title, title, }; } catch (cause) { logger.error(Error("Failed whilte generating title for conversation", { cause })); } } export async function generateTitle(prompt: string) { if (config.LLM_SUMMARIZATION !== "true") { return prompt.split(/\s+/g).slice(0, 5).join(" "); } if (taskModel.tools) { const titleTool = { name: "title", description: "Submit a title for the conversation so far. Do not try to answer the user question or the tool will fail.", inputs: [ { name: "title", type: "str", description: "The title for the conversation. It should be 5 words or less and start with a unicode emoji relevant to the query.", }, ], } as unknown as Tool; const endpoint = await taskModel.getEndpoint(); const title = await getToolOutput({ messages: [ { from: "user" as const, content: prompt, }, ], preprompt: "The task is to generate conversation titles based on text snippets. You'll never answer the provided question directly, but instead summarize the user's request into a short title.", tool: titleTool, endpoint, }); if (title) { if (!/\p{Emoji}/u.test(title.slice(0, 3))) { return "💬 " + title; } return title; } } return await getReturnFromGenerator( generateFromDefaultEndpoint({ messages: [{ from: "user", content: prompt }], preprompt: "You are a summarization AI. Summarize the user's request into a single short sentence of four words or less. Do not try to answer it, only summarize the user's query. Always start your answer with an emoji relevant to the summary", generateSettings: { max_new_tokens: 30, }, }) ) .then((summary) => { // add an emoji if none is found in the first three characters if (!/\p{Emoji}/u.test(summary.slice(0, 3))) { return "💬 " + summary; } return summary; }) .catch((e) => { logger.error(e); return null; }); }
chat-ui/src/lib/server/textGeneration/title.ts/0
{ "file_path": "chat-ui/src/lib/server/textGeneration/title.ts", "repo_id": "chat-ui", "token_count": 1028 }
73
import type { SerializedHTMLElement } from "../scrape/types"; import { htmlElementToMarkdownElements, mergeAdjacentElements } from "./fromHtml"; import type { HeaderElement, MarkdownElement } from "./types"; import { MarkdownElementType } from "./types"; import { chunkElements } from "./utils/chunk"; /** * Converts HTML elements to Markdown elements and creates a tree based on header tags * For example: h1 [h2 [p p blockquote] h2 [h3 [...] ] ] **/ export function htmlToMarkdownTree( title: string, htmlElements: SerializedHTMLElement[], maxCharsPerElem: number ): HeaderElement { let parent: HeaderElement = { type: MarkdownElementType.Header, level: 1, parent: null, content: title, children: [], }; const markdownElements = chunkElements( mergeAdjacentElements( htmlElements.flatMap((elem) => htmlElementToMarkdownElements(parent, elem)) ), maxCharsPerElem ); for (const elem of markdownElements) { if (elem.type !== MarkdownElementType.Header) { elem.parent = parent; parent.children.push(elem); continue; } // add 1 to current level to offset for the title being level 1 elem.level += 1; // Pop up header levels until reaching the same level as the current header // or until we reach the root inner: while (parent !== null && parent.parent !== null) { if (parent.level < elem.level) break inner; parent = parent.parent; } parent.children.push(elem); parent = elem; } // Pop up to the root while (parent.parent !== null) { parent = parent.parent; } return parent; } export function removeParents<T extends MarkdownElement>(elem: T): T { if ("children" in elem) { return { ...elem, parent: null, children: elem.children.map((child) => removeParents(child)) }; } return { ...elem, parent: null }; }
chat-ui/src/lib/server/websearch/markdown/tree.ts/0
{ "file_path": "chat-ui/src/lib/server/websearch/markdown/tree.ts", "repo_id": "chat-ui", "token_count": 613 }
74
import { config } from "$lib/server/config"; import type { WebSearchSource } from "$lib/types/WebSearch"; export default async function search(query: string): Promise<WebSearchSource[]> { const params = { q: query, hl: "en", gl: "us", }; const response = await fetch("https://google.serper.dev/search", { method: "POST", body: JSON.stringify(params), headers: { "x-api-key": config.SERPER_API_KEY, "Content-type": "application/json", }, }); /* eslint-disable @typescript-eslint/no-explicit-any */ const data = (await response.json()) as Record<string, any>; if (!response.ok) { throw new Error( data["message"] ?? `Serper API returned error code ${response.status} - ${response.statusText}` ); } return data["organic"] ?? []; }
chat-ui/src/lib/server/websearch/search/endpoints/serper.ts/0
{ "file_path": "chat-ui/src/lib/server/websearch/search/endpoints/serper.ts", "repo_id": "chat-ui", "token_count": 281 }
75
import type { ObjectId } from "mongodb"; import type { User } from "./User"; import type { Timestamps } from "./Timestamps"; import type { ReviewStatus } from "./Review"; export interface Assistant extends Timestamps { _id: ObjectId; createdById: User["_id"] | string; // user id or session createdByName?: User["username"]; avatar?: string; name: string; description?: string; modelId: string; exampleInputs: string[]; preprompt: string; userCount?: number; review: ReviewStatus; rag?: { allowAllDomains: boolean; allowedDomains: string[]; allowedLinks: string[]; }; generateSettings?: { temperature?: number; top_p?: number; repetition_penalty?: number; top_k?: number; }; dynamicPrompt?: boolean; searchTokens: string[]; last24HoursCount: number; tools?: string[]; } // eslint-disable-next-line no-shadow export enum SortKey { POPULAR = "popular", TRENDING = "trending", }
chat-ui/src/lib/types/Assistant.ts/0
{ "file_path": "chat-ui/src/lib/types/Assistant.ts", "repo_id": "chat-ui", "token_count": 318 }
76
import type { Conversation } from "./Conversation"; export type SharedConversation = Pick< Conversation, | "model" | "embeddingModel" | "title" | "rootMessageId" | "messages" | "preprompt" | "assistantId" | "createdAt" | "updatedAt" > & { _id: string; hash: string; };
chat-ui/src/lib/types/SharedConversation.ts/0
{ "file_path": "chat-ui/src/lib/types/SharedConversation.ts", "repo_id": "chat-ui", "token_count": 114 }
77
export function getHref( url: URL | string, modifications: { newKeys?: Record<string, string | undefined | null>; existingKeys?: { behaviour: "delete_except" | "delete"; keys: string[] }; } ) { const newUrl = new URL(url); const { newKeys, existingKeys } = modifications; // exsiting keys logic if (existingKeys) { const { behaviour, keys } = existingKeys; if (behaviour === "delete") { for (const key of keys) { newUrl.searchParams.delete(key); } } else { // delete_except const keysToPreserve = keys; for (const key of [...newUrl.searchParams.keys()]) { if (!keysToPreserve.includes(key)) { newUrl.searchParams.delete(key); } } } } // new keys logic if (newKeys) { for (const [key, val] of Object.entries(newKeys)) { if (val) { newUrl.searchParams.set(key, val); } else { newUrl.searchParams.delete(key); } } } return newUrl.toString(); }
chat-ui/src/lib/utils/getHref.ts/0
{ "file_path": "chat-ui/src/lib/utils/getHref.ts", "repo_id": "chat-ui", "token_count": 373 }
78
import { browser } from "$app/environment"; import { isDesktop } from "./isDesktop"; export async function share(url: string, title: string, appendLeafId: boolean = false) { if (!browser) return; // Retrieve the leafId from localStorage const leafId = localStorage.getItem("leafId"); if (appendLeafId && leafId) { // Use URL and URLSearchParams to add the leafId parameter const shareUrl = new URL(url); shareUrl.searchParams.append("leafId", leafId); url = shareUrl.toString(); } if (navigator.share && !isDesktop(window)) { navigator.share({ url, title }); } else { // this is really ugly // but on chrome the clipboard write doesn't work if the window isn't focused // and after we use confirm() to ask the user if they want to share, the window is no longer focused // for a few ms until the confirm dialog closes. tried await tick(), tried window.focus(), didnt work // bug doesnt occur in firefox, if you can find a better fix for it please do await new Promise((resolve) => setTimeout(resolve, 250)); await navigator.clipboard.writeText(url); } }
chat-ui/src/lib/utils/share.ts/0
{ "file_path": "chat-ui/src/lib/utils/share.ts", "repo_id": "chat-ui", "token_count": 331 }
79
import { describe, expect, it } from "vitest"; import { isMessageId } from "./isMessageId"; import { v4 } from "uuid"; describe("isMessageId", () => { it("should return true for a valid message id", () => { expect(isMessageId(v4())).toBe(true); }); it("should return false for an invalid message id", () => { expect(isMessageId("1-2-3-4")).toBe(false); }); it("should return false for an empty string", () => { expect(isMessageId("")).toBe(false); }); });
chat-ui/src/lib/utils/tree/isMessageId.spec.ts/0
{ "file_path": "chat-ui/src/lib/utils/tree/isMessageId.spec.ts", "repo_id": "chat-ui", "token_count": 170 }
80
import { authCondition } from "$lib/server/auth"; import { collections } from "$lib/server/database"; import { defaultModel } from "$lib/server/models.js"; import { error } from "@sveltejs/kit"; import { ObjectId } from "mongodb"; export async function POST({ params, locals }) { const assistant = await collections.assistants.findOne({ _id: new ObjectId(params.id), }); if (!assistant) { return error(404, "Assistant not found"); } // don't push if it's already there const settings = await collections.settings.findOne(authCondition(locals)); if (settings?.assistants?.includes(assistant._id)) { return error(400, "Already subscribed"); } const result = await collections.settings.updateOne(authCondition(locals), { $addToSet: { assistants: assistant._id }, $set: { activeModel: assistant._id.toString() }, }); // reduce count only if push succeeded if (result.modifiedCount > 0) { await collections.assistants.updateOne({ _id: assistant._id }, { $inc: { userCount: 1 } }); } return new Response("Assistant subscribed", { status: 200 }); } export async function DELETE({ params, locals }) { const assistant = await collections.assistants.findOne({ _id: new ObjectId(params.id), }); if (!assistant) { return error(404, "Assistant not found"); } const result = await collections.settings.updateOne(authCondition(locals), { $pull: { assistants: assistant._id }, }); // reduce count only if pull succeeded if (result.modifiedCount > 0) { await collections.assistants.updateOne({ _id: assistant._id }, { $inc: { userCount: -1 } }); } const settings = await collections.settings.findOne(authCondition(locals)); // if the assistant was the active model, set the default model as active if (settings?.activeModel === assistant._id.toString()) { await collections.settings.updateOne(authCondition(locals), { $set: { activeModel: defaultModel.id }, }); } return new Response("Assistant unsubscribed", { status: 200 }); }
chat-ui/src/routes/api/assistant/[id]/subscribe/+server.ts/0
{ "file_path": "chat-ui/src/routes/api/assistant/[id]/subscribe/+server.ts", "repo_id": "chat-ui", "token_count": 607 }
81
<script lang="ts"> import { page } from "$app/state"; import { base } from "$app/paths"; import { goto } from "$app/navigation"; import { onMount } from "svelte"; import { usePublicConfig } from "$lib/utils/PublicConfig.svelte"; const publicConfig = usePublicConfig(); import ChatWindow from "$lib/components/chat/ChatWindow.svelte"; import { findCurrentModel } from "$lib/utils/models"; import { useSettingsStore } from "$lib/stores/settings"; import { ERROR_MESSAGES, error } from "$lib/stores/errors"; import { pendingMessage } from "$lib/stores/pendingMessage"; let { data } = $props(); let loading = $state(false); let files: File[] = $state([]); const settings = useSettingsStore(); const modelId = page.params.model; async function createConversation(message: string) { try { loading = true; const res = await fetch(`${base}/conversation`, { method: "POST", headers: { "Content-Type": "application/json", }, body: JSON.stringify({ model: data.assistant.modelId, assistantId: data.assistant._id, }), }); if (!res.ok) { error.set("Error while creating conversation, try again."); console.error("Error while creating conversation: " + (await res.text())); return; } const { conversationId } = await res.json(); // Ugly hack to use a store as temp storage, feel free to improve ^^ pendingMessage.set({ content: message, files, }); // invalidateAll to update list of conversations await goto(`${base}/conversation/${conversationId}`, { invalidateAll: true }); } catch (err) { error.set(ERROR_MESSAGES.default); console.error(err); } finally { loading = false; } } onMount(async () => { settings.instantSet({ activeModel: modelId, }); const query = page.url.searchParams.get("q"); if (query) createConversation(query); }); </script> <svelte:head> <meta property="og:title" content={data.assistant.name + " - " + publicConfig.PUBLIC_APP_NAME} /> <meta property="og:type" content="link" /> <meta property="og:description" content={`Use the ${data.assistant.name} assistant inside of ${publicConfig.PUBLIC_APP_NAME}`} /> <meta property="og:image" content="{publicConfig.PUBLIC_ORIGIN || page.url.origin}{base}/assistant/{data.assistant ._id}/thumbnail.png" /> <meta property="og:url" content={page.url.href} /> <meta name="twitter:card" content="summary_large_image" /> </svelte:head> <ChatWindow on:message={(ev) => createConversation(ev.detail)} {loading} currentModel={findCurrentModel([...data.models, ...data.oldModels], data.assistant.modelId)} assistant={data.assistant} models={data.models} bind:files />
chat-ui/src/routes/assistant/[assistantId]/+page.svelte/0
{ "file_path": "chat-ui/src/routes/assistant/[assistantId]/+page.svelte", "repo_id": "chat-ui", "token_count": 983 }
82
import { getOIDCAuthorizationUrl } from "$lib/server/auth"; import { base } from "$app/paths"; import { config } from "$lib/server/config"; export async function GET({ request, url, locals }) { const referer = request.headers.get("referer"); let redirectURI = `${(referer ? new URL(referer) : url).origin}${base}/login/callback`; // TODO: Handle errors if provider is not responding if (url.searchParams.has("callback")) { const callback = url.searchParams.get("callback") || redirectURI; if (config.ALTERNATIVE_REDIRECT_URLS.includes(callback)) { redirectURI = callback; } } const authorizationUrl = await getOIDCAuthorizationUrl( { redirectURI }, { sessionId: locals.sessionId } ); return new Response(null, { status: 302, headers: { Location: authorizationUrl, }, }); }
chat-ui/src/routes/login/+server.ts/0
{ "file_path": "chat-ui/src/routes/login/+server.ts", "repo_id": "chat-ui", "token_count": 270 }
83
<script lang="ts"> import { page } from "$app/state"; import { base } from "$app/paths"; import type { BackendModel } from "$lib/server/models"; import { useSettingsStore } from "$lib/stores/settings"; import CopyToClipBoardBtn from "$lib/components/CopyToClipBoardBtn.svelte"; import TokensCounter from "$lib/components/TokensCounter.svelte"; import CarbonArrowUpRight from "~icons/carbon/arrow-up-right"; import CarbonLink from "~icons/carbon/link"; import CarbonChat from "~icons/carbon/chat"; import CarbonCode from "~icons/carbon/code"; import { goto } from "$app/navigation"; import { usePublicConfig } from "$lib/utils/PublicConfig.svelte"; const publicConfig = usePublicConfig(); const settings = useSettingsStore(); $effect(() => { if ($settings.customPrompts[page.params.model] === undefined) { $settings.customPrompts = { ...$settings.customPrompts, [page.params.model]: page.data.models.find((el: BackendModel) => el.id === page.params.model)?.preprompt || "", }; } }); let hasCustomPreprompt = $derived( $settings.customPrompts[page.params.model] !== page.data.models.find((el: BackendModel) => el.id === page.params.model)?.preprompt ); let model = $derived(page.data.models.find((el: BackendModel) => el.id === page.params.model)); </script> <div class="flex flex-col items-start"> <div class="mb-5 flex flex-col gap-1.5"> <h2 class="text-lg font-semibold md:text-xl"> {page.params.model} </h2> {#if model.description} <p class="whitespace-pre-wrap text-gray-600"> {model.description} </p> {/if} </div> <div class="flex flex-wrap items-center gap-2 md:gap-4"> {#if model.modelUrl} <a href={model.modelUrl || "https://huggingface.co/" + model.name} target="_blank" rel="noreferrer" class="flex items-center truncate underline underline-offset-2" > <CarbonArrowUpRight class="mr-1.5 shrink-0 text-xs " /> Model page </a> {/if} {#if model.datasetName || model.datasetUrl} <a href={model.datasetUrl || "https://huggingface.co/datasets/" + model.datasetName} target="_blank" rel="noreferrer" class="flex items-center truncate underline underline-offset-2" > <CarbonArrowUpRight class="mr-1.5 shrink-0 text-xs " /> Dataset page </a> {/if} {#if model.websiteUrl} <a href={model.websiteUrl} target="_blank" class="flex items-center truncate underline underline-offset-2" rel="noreferrer" > <CarbonArrowUpRight class="mr-1.5 shrink-0 text-xs " /> Model website </a> {/if} {#if model.hasInferenceAPI} <a href={"https://huggingface.co/playground?modelId=" + model.name} target="_blank" rel="noreferrer" class="flex items-center truncate underline underline-offset-2" > <CarbonCode class="mr-1.5 shrink-0 text-xs " /> API Playground </a> {/if} <CopyToClipBoardBtn value="{publicConfig.PUBLIC_ORIGIN || page.url.origin}{base}/models/{model.id}" classNames="!border-none !shadow-none !py-0 !px-1 !rounded-md" > <div class="flex items-center gap-1.5 hover:underline"> <CarbonLink />Copy direct link to model </div> </CopyToClipBoardBtn> </div> <button class="my-2 flex w-fit items-center rounded-full bg-black px-3 py-1 text-base !text-white" name="Activate model" onclick={(e) => { e.stopPropagation(); settings.instantSet({ activeModel: page.params.model, }); goto(`${base}/`); }} > <CarbonChat class="mr-1.5 text-sm" /> New chat </button> <div class="relative flex w-full flex-col gap-2"> <div class="flex w-full flex-row content-between"> <h3 class="mb-1.5 text-lg font-semibold text-gray-800">System Prompt</h3> {#if hasCustomPreprompt} <button class="ml-auto underline decoration-gray-300 hover:decoration-gray-700" onclick={(e) => { e.stopPropagation(); $settings.customPrompts[page.params.model] = model.preprompt; }} > Reset </button> {/if} </div> <textarea aria-label="Custom system prompt" rows="10" class="w-full resize-none rounded-md border-2 bg-gray-100 p-2" bind:value={$settings.customPrompts[page.params.model]} ></textarea> {#if model.tokenizer && $settings.customPrompts[page.params.model]} <TokensCounter classNames="absolute bottom-2 right-2" prompt={$settings.customPrompts[page.params.model]} modelTokenizer={model.tokenizer} truncate={model?.parameters?.truncate} /> {/if} </div> </div>
chat-ui/src/routes/settings/(nav)/[...model]/+page.svelte/0
{ "file_path": "chat-ui/src/routes/settings/(nav)/[...model]/+page.svelte", "repo_id": "chat-ui", "token_count": 1894 }
84
import { useAPIClient, handleResponse } from "$lib/APIClient"; export const load = async ({ params, fetch }) => { const client = useAPIClient({ fetch }); const data = client .tools({ id: params.toolId, }) .get() .then(handleResponse); return { tool: await data }; };
chat-ui/src/routes/tools/[toolId]/+layout.ts/0
{ "file_path": "chat-ui/src/routes/tools/[toolId]/+layout.ts", "repo_id": "chat-ui", "token_count": 101 }
85
<p align="center"> <picture> <source media="(prefers-color-scheme: dark)" srcset="https://huggingface.co/datasets/huggingface/documentation-images/raw/main/datasets-logo-dark.svg"> <source media="(prefers-color-scheme: light)" srcset="https://huggingface.co/datasets/huggingface/documentation-images/raw/main/datasets-logo-light.svg"> <img alt="Hugging Face Datasets Library" src="https://huggingface.co/datasets/huggingface/documentation-images/raw/main/datasets-logo-light.svg" width="352" height="59" style="max-width: 100%;"> </picture> <br/> <br/> </p> <p align="center"> <a href="https://github.com/huggingface/datasets/actions/workflows/ci.yml?query=branch%3Amain"><img alt="Build" src="https://github.com/huggingface/datasets/actions/workflows/ci.yml/badge.svg?branch=main"></a> <a href="https://github.com/huggingface/datasets/blob/main/LICENSE"><img alt="GitHub" src="https://img.shields.io/github/license/huggingface/datasets.svg?color=blue"></a> <a href="https://huggingface.co/docs/datasets/index.html"><img alt="Documentation" src="https://img.shields.io/website/http/huggingface.co/docs/datasets/index.html.svg?down_color=red&down_message=offline&up_message=online"></a> <a href="https://github.com/huggingface/datasets/releases"><img alt="GitHub release" src="https://img.shields.io/github/release/huggingface/datasets.svg"></a> <a href="https://huggingface.co/datasets/"><img alt="Number of datasets" src="https://img.shields.io/endpoint?url=https://huggingface.co/api/shields/datasets&color=brightgreen"></a> <a href="CODE_OF_CONDUCT.md"><img alt="Contributor Covenant" src="https://img.shields.io/badge/Contributor%20Covenant-2.0-4baaaa.svg"></a> <a href="https://zenodo.org/badge/latestdoi/250213286"><img src="https://zenodo.org/badge/250213286.svg" alt="DOI"></a> </p> 🤗 Datasets is a lightweight library providing **two** main features: - **one-line dataloaders for many public datasets**: one-liners to download and pre-process any of the ![number of datasets](https://img.shields.io/endpoint?url=https://huggingface.co/api/shields/datasets&color=brightgreen) major public datasets (image datasets, audio datasets, text datasets in 467 languages and dialects, etc.) provided on the [HuggingFace Datasets Hub](https://huggingface.co/datasets). With a simple command like `squad_dataset = load_dataset("rajpurkar/squad")`, get any of these datasets ready to use in a dataloader for training/evaluating a ML model (Numpy/Pandas/PyTorch/TensorFlow/JAX), - **efficient data pre-processing**: simple, fast and reproducible data pre-processing for the public datasets as well as your own local datasets in CSV, JSON, text, PNG, JPEG, WAV, MP3, Parquet, etc. With simple commands like `processed_dataset = dataset.map(process_example)`, efficiently prepare the dataset for inspection and ML model evaluation and training. [🎓 **Documentation**](https://huggingface.co/docs/datasets/) [🔎 **Find a dataset in the Hub**](https://huggingface.co/datasets) [🌟 **Share a dataset on the Hub**](https://huggingface.co/docs/datasets/share) <h3 align="center"> <a href="https://hf.co/course"><img src="https://raw.githubusercontent.com/huggingface/datasets/main/docs/source/imgs/course_banner.png"></a> </h3> 🤗 Datasets is designed to let the community easily add and share new datasets. 🤗 Datasets has many additional interesting features: - Thrive on large datasets: 🤗 Datasets naturally frees the user from RAM memory limitation, all datasets are memory-mapped using an efficient zero-serialization cost backend (Apache Arrow). - Smart caching: never wait for your data to process several times. - Lightweight and fast with a transparent and pythonic API (multi-processing/caching/memory-mapping). - Built-in interoperability with NumPy, PyTorch, TensorFlow 2, JAX, Pandas, Polars and more. - Native support for audio, image and video data. - Enable streaming mode to save disk space and start iterating over the dataset immediately. 🤗 Datasets originated from a fork of the awesome [TensorFlow Datasets](https://github.com/tensorflow/datasets) and the HuggingFace team want to deeply thank the TensorFlow Datasets team for building this amazing library. # Installation ## With pip 🤗 Datasets can be installed from PyPi and has to be installed in a virtual environment (venv or conda for instance) ```bash pip install datasets ``` ## With conda 🤗 Datasets can be installed using conda as follows: ```bash conda install -c huggingface -c conda-forge datasets ``` Follow the installation pages of TensorFlow and PyTorch to see how to install them with conda. For more details on installation, check the installation page in the documentation: https://huggingface.co/docs/datasets/installation ## Installation to use with Machine Learning & Data frameworks frameworks If you plan to use 🤗 Datasets with PyTorch (2.0+), TensorFlow (2.6+) or JAX (3.14+) you should also install PyTorch, TensorFlow or JAX. 🤗 Datasets is also well integrated with data frameworks like PyArrow, Pandas, Polars and Spark, which should be installed separately. For more details on using the library with these frameworks, check the quick start page in the documentation: https://huggingface.co/docs/datasets/quickstart # Usage 🤗 Datasets is made to be very simple to use - the API is centered around a single function, `datasets.load_dataset(dataset_name, **kwargs)`, that instantiates a dataset. This library can be used for text/image/audio/etc. datasets. Here is an example to load a text dataset: Here is a quick example: ```python from datasets import load_dataset # Print all the available datasets from huggingface_hub import list_datasets print([dataset.id for dataset in list_datasets()]) # Load a dataset and print the first example in the training set squad_dataset = load_dataset('rajpurkar/squad') print(squad_dataset['train'][0]) # Process the dataset - add a column with the length of the context texts dataset_with_length = squad_dataset.map(lambda x: {"length": len(x["context"])}) # Process the dataset - tokenize the context texts (using a tokenizer from the 🤗 Transformers library) from transformers import AutoTokenizer tokenizer = AutoTokenizer.from_pretrained('bert-base-cased') tokenized_dataset = squad_dataset.map(lambda x: tokenizer(x['context']), batched=True) ``` If your dataset is bigger than your disk or if you don't want to wait to download the data, you can use streaming: ```python # If you want to use the dataset immediately and efficiently stream the data as you iterate over the dataset image_dataset = load_dataset('timm/imagenet-1k-wds', streaming=True) for example in image_dataset["train"]: break ``` For more details on using the library, check the quick start page in the documentation: https://huggingface.co/docs/datasets/quickstart and the specific pages on: - Loading a dataset: https://huggingface.co/docs/datasets/loading - What's in a Dataset: https://huggingface.co/docs/datasets/access - Processing data with 🤗 Datasets: https://huggingface.co/docs/datasets/process - Processing audio data: https://huggingface.co/docs/datasets/audio_process - Processing image data: https://huggingface.co/docs/datasets/image_process - Processing text data: https://huggingface.co/docs/datasets/nlp_process - Streaming a dataset: https://huggingface.co/docs/datasets/stream - etc. # Add a new dataset to the Hub We have a very detailed step-by-step guide to add a new dataset to the ![number of datasets](https://img.shields.io/endpoint?url=https://huggingface.co/api/shields/datasets&color=brightgreen) datasets already provided on the [HuggingFace Datasets Hub](https://huggingface.co/datasets). You can find: - [how to upload a dataset to the Hub using your web browser or Python](https://huggingface.co/docs/datasets/upload_dataset) and also - [how to upload it using Git](https://huggingface.co/docs/datasets/share). # Disclaimers You can use 🤗 Datasets to load datasets based on versioned git repositories maintained by the dataset authors. For reproducibility reasons, we ask users to pin the `revision` of the repositories they use. If you're a dataset owner and wish to update any part of it (description, citation, license, etc.), or do not want your dataset to be included in the Hugging Face Hub, please get in touch by opening a discussion or a pull request in the Community tab of the dataset page. Thanks for your contribution to the ML community! ## BibTeX If you want to cite our 🤗 Datasets library, you can use our [paper](https://arxiv.org/abs/2109.02846): ```bibtex @inproceedings{lhoest-etal-2021-datasets, title = "Datasets: A Community Library for Natural Language Processing", author = "Lhoest, Quentin and Villanova del Moral, Albert and Jernite, Yacine and Thakur, Abhishek and von Platen, Patrick and Patil, Suraj and Chaumond, Julien and Drame, Mariama and Plu, Julien and Tunstall, Lewis and Davison, Joe and {\v{S}}a{\v{s}}ko, Mario and Chhablani, Gunjan and Malik, Bhavitvya and Brandeis, Simon and Le Scao, Teven and Sanh, Victor and Xu, Canwen and Patry, Nicolas and McMillan-Major, Angelina and Schmid, Philipp and Gugger, Sylvain and Delangue, Cl{\'e}ment and Matussi{\`e}re, Th{\'e}o and Debut, Lysandre and Bekman, Stas and Cistac, Pierric and Goehringer, Thibault and Mustar, Victor and Lagunas, Fran{\c{c}}ois and Rush, Alexander and Wolf, Thomas", booktitle = "Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing: System Demonstrations", month = nov, year = "2021", address = "Online and Punta Cana, Dominican Republic", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/2021.emnlp-demo.21", pages = "175--184", abstract = "The scale, variety, and quantity of publicly-available NLP datasets has grown rapidly as researchers propose new tasks, larger models, and novel benchmarks. Datasets is a community library for contemporary NLP designed to support this ecosystem. Datasets aims to standardize end-user interfaces, versioning, and documentation, while providing a lightweight front-end that behaves similarly for small datasets as for internet-scale corpora. The design of the library incorporates a distributed, community-driven approach to adding datasets and documenting usage. After a year of development, the library now includes more than 650 unique datasets, has more than 250 contributors, and has helped support a variety of novel cross-dataset research projects and shared tasks. The library is available at https://github.com/huggingface/datasets.", eprint={2109.02846}, archivePrefix={arXiv}, primaryClass={cs.CL}, } ``` If you need to cite a specific version of our 🤗 Datasets library for reproducibility, you can use the corresponding version Zenodo DOI from this [list](https://zenodo.org/search?q=conceptrecid:%224817768%22&sort=-version&all_versions=True).
datasets/README.md/0
{ "file_path": "datasets/README.md", "repo_id": "datasets", "token_count": 3640 }
86
# docstyle-ignore INSTALL_CONTENT = """ # Datasets installation ! pip install datasets transformers # To install from source instead of the last release, comment the command above and uncomment the following one. # ! pip install git+https://github.com/huggingface/datasets.git """ notebook_first_cells = [{"type": "code", "content": INSTALL_CONTENT}] default_branch_name = "main" version_prefix = ""
datasets/docs/source/_config.py/0
{ "file_path": "datasets/docs/source/_config.py", "repo_id": "datasets", "token_count": 118 }
87
# Create a dataset card Each dataset should have a dataset card to promote responsible usage and inform users of any potential biases within the dataset. This idea was inspired by the Model Cards proposed by [Mitchell, 2018](https://arxiv.org/abs/1810.03993). Dataset cards help users understand a dataset's contents, the context for using the dataset, how it was created, and any other considerations a user should be aware of. Creating a dataset card is easy and can be done in just a few steps: 1. Go to your dataset repository on the [Hub](https://hf.co/new-dataset) and click on **Create Dataset Card** to create a new `README.md` file in your repository. 2. Use the **Metadata UI** to select the tags that describe your dataset. You can add a license, language, pretty_name, the task_categories, size_categories, and any other tags that you think are relevant. These tags help users discover and find your dataset on the Hub. <div class="flex justify-center"> <img class="block dark:hidden" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/hub/datasets-metadata-ui.png"/> <img class="hidden dark:block" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/hub/datasets-metadata-ui-dark.png"/> </div> <Tip> For a complete, but not required, set of tag options you can also look at the [Dataset Card specifications](https://github.com/huggingface/hub-docs/blob/main/datasetcard.md?plain=1). This'll have a few more tag options like `multilinguality` and `language_creators` which are useful but not absolutely necessary. </Tip> 3. Click on the **Import dataset card template** link to automatically create a template with all the relevant fields to complete. Fill out the template sections to the best of your ability. Take a look at the [Dataset Card Creation Guide](https://github.com/huggingface/datasets/blob/main/templates/README_guide.md) for more detailed information about what to include in each section of the card. For fields you are unable to complete, you can write **[More Information Needed]**. 4. Once you're done, commit the changes to the `README.md` file and you'll see the completed dataset card on your repository. YAML also allows you to customize the way your dataset is loaded by [defining splits and/or configurations](./repository_structure#define-your-splits-and-subsets-in-yaml) without the need to write any code. Feel free to take a look at the [SNLI](https://huggingface.co/datasets/snli), [CNN/DailyMail](https://huggingface.co/datasets/cnn_dailymail), and [Allociné](https://huggingface.co/datasets/allocine) dataset cards as examples to help you get started.
datasets/docs/source/dataset_card.mdx/0
{ "file_path": "datasets/docs/source/dataset_card.mdx", "repo_id": "datasets", "token_count": 756 }
88
# Load Your data can be stored in various places; they can be on your local machine's disk, in a Github repository, and in in-memory data structures like Python dictionaries and Pandas DataFrames. Wherever a dataset is stored, 🤗 Datasets can help you load it. This guide will show you how to load a dataset from: - The Hugging Face Hub - Local files - In-memory data - Offline - A specific slice of a split For more details specific to loading other dataset modalities, take a look at the <a class="underline decoration-pink-400 decoration-2 font-semibold" href="./audio_load">load audio dataset guide</a>, the <a class="underline decoration-yellow-400 decoration-2 font-semibold" href="./image_load">load image dataset guide</a>, the <a class="underline decoration-blue-400 decoration-2 font-semibold" href="./video_load">load video dataset guide</a>, or the <a class="underline decoration-green-400 decoration-2 font-semibold" href="./nlp_load">load text dataset guide</a>. <a id='load-from-the-hub'></a> ## Hugging Face Hub You can also load a dataset from any dataset repository on the Hub! Begin by [creating a dataset repository](share#create-the-repository) and upload your data files. Now you can use the [`load_dataset`] function to load the dataset. For example, try loading the files from this [demo repository](https://huggingface.co/datasets/lhoestq/demo1) by providing the repository namespace and dataset name. This dataset repository contains CSV files, and the code below loads the dataset from the CSV files: ```py >>> from datasets import load_dataset >>> dataset = load_dataset("lhoestq/demo1") ``` Some datasets may have more than one version based on Git tags, branches, or commits. Use the `revision` parameter to specify the dataset version you want to load: ```py >>> dataset = load_dataset( ... "lhoestq/custom_squad", ... revision="main" # tag name, or branch name, or commit hash ... ) ``` <Tip> Refer to the [Upload a dataset to the Hub](./upload_dataset) tutorial for more details on how to create a dataset repository on the Hub, and how to upload your data files. </Tip> A dataset loads by default all the data into the `train` split, or checks for mentions or split names in the data files names (e.g. "train", "test" and "validation"). Use the `data_files` parameter to map data files to splits like `train`, `validation` and `test`: ```py >>> data_files = {"train": "train.csv", "test": "test.csv"} >>> dataset = load_dataset("namespace/your_dataset_name", data_files=data_files) ``` <Tip warning={true}> If you don't specify which data files to use, [`load_dataset`] will return all the data files. This can take a long time if you load a large dataset like C4, which is approximately 13TB of data. </Tip> You can also load a specific subset of the files with the `data_files` or `data_dir` parameter. These parameters can accept a relative path which resolves to the base path corresponding to where the dataset is loaded from. ```py >>> from datasets import load_dataset # load files that match the grep pattern >>> c4_subset = load_dataset("allenai/c4", data_files="en/c4-train.0000*-of-01024.json.gz") # load dataset from the en directory on the Hub >>> c4_subset = load_dataset("allenai/c4", data_dir="en") ``` The `split` parameter can also map a data file to a specific split: ```py >>> data_files = {"validation": "en/c4-validation.*.json.gz"} >>> c4_validation = load_dataset("allenai/c4", data_files=data_files, split="validation") ``` ## Local and remote files Datasets can be loaded from local files stored on your computer and from remote files. The datasets are most likely stored as a `csv`, `json`, `txt` or `parquet` file. The [`load_dataset`] function can load each of these file types. ### CSV 🤗 Datasets can read a dataset made up of one or several CSV files (in this case, pass your CSV files as a list): ```py >>> from datasets import load_dataset >>> dataset = load_dataset("csv", data_files="my_file.csv") ``` <Tip> For more details, check out the [how to load tabular datasets from CSV files](tabular_load#csv-files) guide. </Tip> ### JSON JSON files are loaded directly with [`load_dataset`] as shown below: ```py >>> from datasets import load_dataset >>> dataset = load_dataset("json", data_files="my_file.json") ``` JSON files have diverse formats, but we think the most efficient format is to have multiple JSON objects; each line represents an individual row of data. For example: ```json {"a": 1, "b": 2.0, "c": "foo", "d": false} {"a": 4, "b": -5.5, "c": null, "d": true} ``` Another JSON format you may encounter is a nested field, in which case you'll need to specify the `field` argument as shown in the following: ```py {"version": "0.1.0", "data": [{"a": 1, "b": 2.0, "c": "foo", "d": false}, {"a": 4, "b": -5.5, "c": null, "d": true}] } >>> from datasets import load_dataset >>> dataset = load_dataset("json", data_files="my_file.json", field="data") ``` To load remote JSON files via HTTP, pass the URLs instead: ```py >>> base_url = "https://rajpurkar.github.io/SQuAD-explorer/dataset/" >>> dataset = load_dataset("json", data_files={"train": base_url + "train-v1.1.json", "validation": base_url + "dev-v1.1.json"}, field="data") ``` While these are the most common JSON formats, you'll see other datasets that are formatted differently. 🤗 Datasets recognizes these other formats and will fallback accordingly on the Python JSON loading methods to handle them. ### Parquet Parquet files are stored in a columnar format, unlike row-based files like a CSV. Large datasets may be stored in a Parquet file because it is more efficient and faster at returning your query. To load a Parquet file: ```py >>> from datasets import load_dataset >>> dataset = load_dataset("parquet", data_files={'train': 'train.parquet', 'test': 'test.parquet'}) ``` To load remote Parquet files via HTTP, pass the URLs instead: ```py >>> base_url = "https://huggingface.co/datasets/wikimedia/wikipedia/resolve/main/20231101.ab/" >>> data_files = {"train": base_url + "train-00000-of-00001.parquet"} >>> wiki = load_dataset("parquet", data_files=data_files, split="train") ``` ### Arrow Arrow files are stored in an in-memory columnar format, unlike row-based formats like CSV and uncompressed formats like Parquet. To load an Arrow file: ```py >>> from datasets import load_dataset >>> dataset = load_dataset("arrow", data_files={'train': 'train.arrow', 'test': 'test.arrow'}) ``` To load remote Arrow files via HTTP, pass the URLs instead: ```py >>> base_url = "https://huggingface.co/datasets/croissantllm/croissant_dataset/resolve/main/english_660B_11/" >>> data_files = {"train": base_url + "train/data-00000-of-00080.arrow"} >>> wiki = load_dataset("arrow", data_files=data_files, split="train") ``` Arrow is the file format used by 🤗 Datasets under the hood, therefore you can load a local Arrow file using [`Dataset.from_file`] directly: ```py >>> from datasets import Dataset >>> dataset = Dataset.from_file("data.arrow") ``` Unlike [`load_dataset`], [`Dataset.from_file`] memory maps the Arrow file without preparing the dataset in the cache, saving you disk space. The cache directory to store intermediate processing results will be the Arrow file directory in that case. For now only the Arrow streaming format is supported. The Arrow IPC file format (also known as Feather V2) is not supported. ### SQL Read database contents with [`~datasets.Dataset.from_sql`] by specifying the URI to connect to your database. You can read both table names and queries: ```py >>> from datasets import Dataset # load entire table >>> dataset = Dataset.from_sql("data_table_name", con="sqlite:///sqlite_file.db") # load from query >>> dataset = Dataset.from_sql("SELECT text FROM table WHERE length(text) > 100 LIMIT 10", con="sqlite:///sqlite_file.db") ``` <Tip> For more details, check out the [how to load tabular datasets from SQL databases](tabular_load#databases) guide. </Tip> ### WebDataset The [WebDataset](https://github.com/webdataset/webdataset) format is based on TAR archives and is suitable for big image datasets. Because of their size, WebDatasets are generally loaded in streaming mode (using `streaming=True`). You can load a WebDataset like this: ```python >>> from datasets import load_dataset >>> >>> path = "path/to/train/*.tar" >>> dataset = load_dataset("webdataset", data_files={"train": path}, split="train", streaming=True) ``` To load remote WebDatasets via HTTP, pass the URLs instead: ```python >>> from datasets import load_dataset >>> >>> base_url = "https://huggingface.co/datasets/lhoestq/small-publaynet-wds/resolve/main/publaynet-train-{i:06d}.tar" >>> urls = [base_url.format(i=i) for i in range(4)] >>> dataset = load_dataset("webdataset", data_files={"train": urls}, split="train", streaming=True) ``` ## Multiprocessing When a dataset is made of several files (that we call "shards"), it is possible to significantly speed up the dataset downloading and preparation step. You can choose how many processes you'd like to use to prepare a dataset in parallel using `num_proc`. In this case, each process is given a subset of shards to prepare: ```python from datasets import load_dataset imagenet = load_dataset("timm/imagenet-1k-wds", num_proc=8) ml_librispeech_spanish = load_dataset("facebook/multilingual_librispeech", "spanish", num_proc=8) ``` ## In-memory data 🤗 Datasets will also allow you to create a [`Dataset`] directly from in-memory data structures like Python dictionaries and Pandas DataFrames. ### Python dictionary Load Python dictionaries with [`~Dataset.from_dict`]: ```py >>> from datasets import Dataset >>> my_dict = {"a": [1, 2, 3]} >>> dataset = Dataset.from_dict(my_dict) ``` ### Python list of dictionaries Load a list of Python dictionaries with [`~Dataset.from_list`]: ```py >>> from datasets import Dataset >>> my_list = [{"a": 1}, {"a": 2}, {"a": 3}] >>> dataset = Dataset.from_list(my_list) ``` ### Python generator Create a dataset from a Python generator with [`~Dataset.from_generator`]: ```py >>> from datasets import Dataset >>> def my_gen(): ... for i in range(1, 4): ... yield {"a": i} ... >>> dataset = Dataset.from_generator(my_gen) ``` This approach supports loading data larger than available memory. You can also define a sharded dataset by passing lists to `gen_kwargs`: ```py >>> def gen(shards): ... for shard in shards: ... with open(shard) as f: ... for line in f: ... yield {"line": line} ... >>> shards = [f"data{i}.txt" for i in range(32)] >>> ds = IterableDataset.from_generator(gen, gen_kwargs={"shards": shards}) >>> ds = ds.shuffle(seed=42, buffer_size=10_000) # shuffles the shards order + uses a shuffle buffer >>> from torch.utils.data import DataLoader >>> dataloader = DataLoader(ds.with_format("torch"), num_workers=4) # give each worker a subset of 32/4=8 shards ``` ### Pandas DataFrame Load Pandas DataFrames with [`~Dataset.from_pandas`]: ```py >>> from datasets import Dataset >>> import pandas as pd >>> df = pd.DataFrame({"a": [1, 2, 3]}) >>> dataset = Dataset.from_pandas(df) ``` <Tip> For more details, check out the [how to load tabular datasets from Pandas DataFrames](tabular_load#pandas-dataframes) guide. </Tip> ## Offline Even if you don't have an internet connection, it is still possible to load a dataset. As long as you've downloaded a dataset from the Hub repository before, it should be cached. This means you can reload the dataset from the cache and use it offline. If you know you won't have internet access, you can run 🤗 Datasets in full offline mode. This saves time because instead of waiting for the Dataset builder download to time out, 🤗 Datasets will look directly in the cache. Set the environment variable `HF_HUB_OFFLINE` to `1` to enable full offline mode. ## Slice splits You can also choose only to load specific slices of a split. There are two options for slicing a split: using strings or the [`ReadInstruction`] API. Strings are more compact and readable for simple cases, while [`ReadInstruction`] is easier to use with variable slicing parameters. Concatenate a `train` and `test` split by: ```py >>> train_test_ds = datasets.load_dataset("ajibawa-2023/General-Stories-Collection", split="train+test") ===STRINGAPI-READINSTRUCTION-SPLIT=== >>> ri = datasets.ReadInstruction("train") + datasets.ReadInstruction("test") >>> train_test_ds = datasets.load_dataset("ajibawa-2023/General-Stories-Collection", split=ri) ``` Select specific rows of the `train` split: ```py >>> train_10_20_ds = datasets.load_dataset("ajibawa-2023/General-Stories-Collection", split="train[10:20]") ===STRINGAPI-READINSTRUCTION-SPLIT=== >>> train_10_20_ds = datasets.load_dataset("bookcorpu", split=datasets.ReadInstruction("train", from_=10, to=20, unit="abs")) ``` Or select a percentage of a split with: ```py >>> train_10pct_ds = datasets.load_dataset("ajibawa-2023/General-Stories-Collection", split="train[:10%]") ===STRINGAPI-READINSTRUCTION-SPLIT=== >>> train_10_20_ds = datasets.load_dataset("ajibawa-2023/General-Stories-Collection", split=datasets.ReadInstruction("train", to=10, unit="%")) ``` Select a combination of percentages from each split: ```py >>> train_10_80pct_ds = datasets.load_dataset("ajibawa-2023/General-Stories-Collection", split="train[:10%]+train[-80%:]") ===STRINGAPI-READINSTRUCTION-SPLIT=== >>> ri = (datasets.ReadInstruction("train", to=10, unit="%") + datasets.ReadInstruction("train", from_=-80, unit="%")) >>> train_10_80pct_ds = datasets.load_dataset("ajibawa-2023/General-Stories-Collection", split=ri) ``` Finally, you can even create cross-validated splits. The example below creates 10-fold cross-validated splits. Each validation dataset is a 10% chunk, and the training dataset makes up the remaining complementary 90% chunk: ```py >>> val_ds = datasets.load_dataset("ajibawa-2023/General-Stories-Collection", split=[f"train[{k}%:{k+10}%]" for k in range(0, 100, 10)]) >>> train_ds = datasets.load_dataset("ajibawa-2023/General-Stories-Collection", split=[f"train[:{k}%]+train[{k+10}%:]" for k in range(0, 100, 10)]) ===STRINGAPI-READINSTRUCTION-SPLIT=== >>> val_ds = datasets.load_dataset("ajibawa-2023/General-Stories-Collection", [datasets.ReadInstruction("train", from_=k, to=k+10, unit="%") for k in range(0, 100, 10)]) >>> train_ds = datasets.load_dataset("ajibawa-2023/General-Stories-Collection", [(datasets.ReadInstruction("train", to=k, unit="%") + datasets.ReadInstruction("train", from_=k+10, unit="%")) for k in range(0, 100, 10)]) ``` ### Percent slicing and rounding The default behavior is to round the boundaries to the nearest integer for datasets where the requested slice boundaries do not divide evenly by 100. As shown below, some slices may contain more examples than others. For instance, if the following train split includes 999 records, then: ```py # 19 records, from 500 (included) to 519 (excluded). >>> train_50_52_ds = datasets.load_dataset("ajibawa-2023/General-Stories-Collection", split="train[50%:52%]") # 20 records, from 519 (included) to 539 (excluded). >>> train_52_54_ds = datasets.load_dataset("ajibawa-2023/General-Stories-Collection", split="train[52%:54%]") ``` If you want equal sized splits, use `pct1_dropremainder` rounding instead. This treats the specified percentage boundaries as multiples of 1%. ```py # 18 records, from 450 (included) to 468 (excluded). >>> train_50_52pct1_ds = datasets.load_dataset("ajibawa-2023/General-Stories-Collection", split=datasets.ReadInstruction("train", from_=50, to=52, unit="%", rounding="pct1_dropremainder")) # 18 records, from 468 (included) to 486 (excluded). >>> train_52_54pct1_ds = datasets.load_dataset("ajibawa-2023/General-Stories-Collection", split=datasets.ReadInstruction("train",from_=52, to=54, unit="%", rounding="pct1_dropremainder")) # Or equivalently: >>> train_50_52pct1_ds = datasets.load_dataset("ajibawa-2023/General-Stories-Collection", split="train[50%:52%](pct1_dropremainder)") >>> train_52_54pct1_ds = datasets.load_dataset("ajibawa-2023/General-Stories-Collection", split="train[52%:54%](pct1_dropremainder)") ``` <Tip warning={true}> `pct1_dropremainder` rounding may truncate the last examples in a dataset if the number of examples in your dataset don't divide evenly by 100. </Tip> <a id='troubleshoot'></a> ## Troubleshooting Sometimes, you may get unexpected results when you load a dataset. Two of the most common issues you may encounter are manually downloading a dataset and specifying features of a dataset. ### Specify features When you create a dataset from local files, the [`Features`] are automatically inferred by [Apache Arrow](https://arrow.apache.org/docs/). However, the dataset's features may not always align with your expectations, or you may want to define the features yourself. The following example shows how you can add custom labels with the [`ClassLabel`] feature. Start by defining your own labels with the [`Features`] class: ```py >>> class_names = ["sadness", "joy", "love", "anger", "fear", "surprise"] >>> emotion_features = Features({'text': Value('string'), 'label': ClassLabel(names=class_names)}) ``` Next, specify the `features` parameter in [`load_dataset`] with the features you just created: ```py >>> dataset = load_dataset('csv', data_files=file_dict, delimiter=';', column_names=['text', 'label'], features=emotion_features) ``` Now when you look at your dataset features, you can see it uses the custom labels you defined: ```py >>> dataset['train'].features {'text': Value('string'), 'label': ClassLabel(names=['sadness', 'joy', 'love', 'anger', 'fear', 'surprise'])} ```
datasets/docs/source/loading.mdx/0
{ "file_path": "datasets/docs/source/loading.mdx", "repo_id": "datasets", "token_count": 5694 }
89
# Troubleshooting This guide aims to provide you the tools and knowledge required to navigate some common issues. If the suggestions listed in this guide do not cover your such situation, please refer to the [Asking for Help](#asking-for-help) section to learn where to find help with your specific issue. ## Issues when uploading datasets with `push_to_hub` ### Authentication issues If you are experiencing authentication issues when sharing a dataset on 🤗 Hub using [`Dataset.push_to_hub`] and a Hugging Face access token: * Make sure that the Hugging Face token you're using to authenticate yourself is a token with **write** permission. * On OSX, it may help to clean up all the huggingface.co passwords on your keychain access, as well as reconfigure `git config --global credential.helper osxkeychain`, before using `huggingface-cli login`. Alternatively, you can use SSH keys to authenticate yourself - read more in the [🤗 Hub documentation](https://huggingface.co/docs/hub/security-git-ssh). ### Lost connection on large dataset upload When uploading large datasets to Hub, if the number of dataset shards is large, it can create too many commits for the Hub in a short period. This will result in a connection error. The connection error can also be caused by a HTTP 500 error returned by AWS S3 bucket that Hub uses internally. In either situation, you can re-run [`Dataset.push_to_hub`] to proceed with the dataset upload. Hub will check the SHAs of already uploaded shards to avoid reuploading them. We are working on making upload process more robust to transient errors, so updating to the latest library version is always a good idea. ### `Too Many Requests` Uploading large datasets via `push_to_hub()` can result in an error: ```bash HfHubHTTPError: 429 Client Error: Too Many Requests for url: ... You have exceeded our hourly quotas for action: commit. We invite you to retry later. ``` If you encounter this issue, you need to upgrade the `datasets` library to the latest version (or at least `2.15.0`). ## Issues when creating datasets from custom data ### Loading images and audio from a folder When creating a dataset from a folder, one of the most common issues is that the file structure does not follow the expected format, or there's an issue with the metadata file. Learn more about required folder structure in corresponding documentation pages: * [AudioFolder](https://huggingface.co/docs/datasets/audio_dataset#audiofolder) * [ImageFolder](https://huggingface.co/docs/datasets/image_dataset#imagefolder) ### Pickling issues #### Pickling issues when using `Dataset.from_generator` When creating a dataset, [`IterableDataset.from_generator`] and [`Dataset.from_generator`] expect a "picklable" generator function. This is required to hash the function using [`pickle`](https://docs.python.org/3/library/pickle.html) to be able to cache the dataset on disk. While generator functions are generally "picklable", note that generator objects are not. So if you're using a generator object, you will encounter a `TypeError` like this: ```bash TypeError: cannot pickle 'generator' object ``` This error can also occur when using a generator function that uses a global object that is not "picklable", such as a DB connection, for example. If that's the case, you can initialize such object directly inside the generator function to avoid this error. #### Pickling issues with `Dataset.map` Pickling errors can also happen in the multiprocess [`Dataset.map`] - objects are pickled to be passed to child processes. If the objects used in the transformation are not picklable, it's not possible to cache the result of `map`, which leads to an error being raised. Here are some ways to address this issue: * A universal solution to pickle issues is to make sure the objects (or generator classes) are pickable manually by implementing `__getstate__` / `__setstate__` / `__reduce__`. * You can also provide your own unique hash in `map` with the `new_fingerprint` argument. * You can also disable caching by calling `datasets.disable_caching()`, however, this is undesirable - [read more about importance of cache](cache) ## Asking for help If the above troubleshooting advice did not help you resolve your issue, reach out for help to the community and the team. ### Forums Ask for help on the Hugging Face forums - post your question in the [🤗Datasets category](https://discuss.huggingface.co/c/datasets/10) Make sure to write a descriptive post with relevant context about your setup and reproducible code to maximize the likelihood that your problem is solved! ### Discord Post a question on [Discord](http://hf.co/join/discord), and let the team and the community help you. ### Community Discussions on 🤗 Hub If you are facing issues creating a custom dataset on Hub, you can ask the Hugging Face team for help by opening a discussion in the Community tab of your dataset with this message: ```text # Dataset rewiew request for <Dataset name> ## Description <brief description of the dataset> ## Files to review - file1 - file2 - ... cc @lhoestq @albertvillanova ``` ### GitHub Issues Finally, if you suspect to have found a bug related to the library itself, create an Issue on the 🤗 Datasets [GitHub repository](https://github.com/huggingface/datasets/issues). Include context regarding the bug: code snippet to reproduce, details about your environment and data, etc. to help us figure out what's wrong and how we can fix it.
datasets/docs/source/troubleshoot.mdx/0
{ "file_path": "datasets/docs/source/troubleshoot.mdx", "repo_id": "datasets", "token_count": 1455 }
90
# Lint as: python3 """HuggingFace/Datasets is an open library of datasets. Note: VERSION needs to be formatted following the MAJOR.MINOR.PATCH convention Simple check list for release from AllenNLP repo: https://github.com/allenai/allennlp/blob/master/setup.py Steps to make a release: 0. Prerequisites: - Dependencies: - twine: `pip install twine` - Create an account in (and join the 'datasets' project): - PyPI: https://pypi.org/ - Test PyPI: https://test.pypi.org/ - Don't break `transformers`: run the `transformers` CI using the `main` branch and make sure it's green. - In `transformers`, use `datasets @ git+https://github.com/huggingface/datasets@main#egg=datasets` Add a step to install `datasets@main` after `save_cache` in .circleci/create_circleci_config.py: ``` {"run": {"name": "Install `datasets@main`", "command": 'pip uninstall datasets -y && pip install "datasets @ git+https://github.com/huggingface/datasets@main#egg=datasets"'}} ``` - and then run the CI 1. Create the release branch from main branch: ``` git checkout main git pull upstream main git checkout -b release-VERSION ``` 2. Change the version to the release VERSION in: - __init__.py - setup.py 3. Commit these changes, push and create a Pull Request: ``` git add -u git commit -m "Release: VERSION" git push upstream release-VERSION ``` - Go to: https://github.com/huggingface/datasets/pull/new/release-VERSION - Create pull request 4. From your local release branch, build both the sources and the wheel. Do not change anything in setup.py between creating the wheel and the source distribution (obviously). - First, delete any building directories that may exist from previous builds: - build - dist - From the top level directory, build the wheel and the sources: ``` python setup.py bdist_wheel python setup.py sdist ``` - You should now have a /dist directory with both .whl and .tar.gz source versions. 5. Check that everything looks correct by uploading the package to the test PyPI server: ``` twine upload dist/* -r testpypi ``` Check that you can install it in a virtualenv/notebook by running: ``` !pip install -U --index-url https://test.pypi.org/simple/ --extra-index-url https://pypi.org/simple/ datasets ``` 6. Upload the final version to the actual PyPI: ``` twine upload dist/* -r pypi ``` 7. Make the release on GitHub once everything is looking hunky-dory: - Merge the release Pull Request - Create a new release: https://github.com/huggingface/datasets/releases/new - Choose a tag: Introduce the new VERSION as tag, that will be created when you publish the release - Create new tag VERSION on publish - Release title: Introduce the new VERSION as well - Describe the release - Use "Generate release notes" button for automatic generation - Publish release 8. Set the dev version - Create the dev-version branch from the main branch: ``` git checkout main git pull upstream main git branch -D dev-version git checkout -b dev-version ``` - Change the version to X.X.X+1.dev0 (e.g. VERSION=1.18.3 -> 1.18.4.dev0) in: - __init__.py - setup.py - Commit these changes, push and create a Pull Request: ``` git add -u git commit -m "Set dev version" git push upstream dev-version ``` - Go to: https://github.com/huggingface/datasets/pull/new/dev-version - Create pull request - Merge the dev version Pull Request """ from setuptools import find_packages, setup REQUIRED_PKGS = [ # For file locking "filelock", # We use numpy>=1.17 to have np.random.Generator (Dataset shuffling) "numpy>=1.17", # Backend and serialization. # Minimum 15.0.0 to be able to cast dictionary types to their underlying types "pyarrow>=15.0.0", # For smart caching dataset processing "dill>=0.3.0,<0.3.9", # tmp pin until dill has official support for determinism see https://github.com/uqfoundation/dill/issues/19 # For performance gains with apache arrow "pandas", # for downloading datasets over HTTPS "requests>=2.32.2", # progress bars in downloads and data operations "tqdm>=4.66.3", # for fast hashing "xxhash", # for better multiprocessing "multiprocess<0.70.17", # to align with dill<0.3.9 (see above) # to save datasets locally or on any filesystem # minimum 2023.1.0 to support protocol=kwargs in fsspec's `open`, `get_fs_token_paths`, etc.: see https://github.com/fsspec/filesystem_spec/pull/1143 "fsspec[http]>=2023.1.0,<=2025.7.0", # To get datasets from the Datasets Hub on huggingface.co "huggingface-hub>=0.24.0", # Utilities from PyPA to e.g., compare versions "packaging", # To parse YAML metadata from dataset cards "pyyaml>=5.1", ] AUDIO_REQUIRE = [ "soundfile>=0.12.1", "torchcodec>=0.4.0", "torch>=2.7.0", ] VISION_REQUIRE = [ "Pillow>=9.4.0", # When PIL.Image.ExifTags was introduced ] BENCHMARKS_REQUIRE = [ "tensorflow==2.12.0", "torch==2.0.1", "transformers==4.30.1", ] TESTS_REQUIRE = [ # fix pip install issues for windows "numba>=0.56.4", # to get recent versions of llvmlite for windows ci # test dependencies "absl-py", "decorator", "joblib<1.3.0", # joblibspark doesn't support recent joblib versions "joblibspark", "pytest", "pytest-datadir", "pytest-xdist", # optional dependencies "aiohttp", "elasticsearch>=7.17.12,<8.0.0", # 8.0 asks users to provide hosts or cloud_id when instantiating ElasticSearch(); 7.9.1 has legacy numpy.float_ which was fixed in https://github.com/elastic/elasticsearch-py/pull/2551. "faiss-cpu>=1.8.0.post1", # Pins numpy < 2 "h5py", "jax>=0.3.14; sys_platform != 'win32'", "jaxlib>=0.3.14; sys_platform != 'win32'", "lz4", "moto[server]", "pyspark>=3.4", # https://issues.apache.org/jira/browse/SPARK-40991 fixed in 3.4.0 "py7zr", "rarfile>=4.0", "sqlalchemy", "protobuf<4.0.0", # 4.0.0 breaks compatibility with tensorflow<2.12 "tensorflow>=2.6.0; python_version<'3.10' and sys_platform != 'win32'", # numpy-2 is not supported for Python < 3.10 "tensorflow>=2.16.0; python_version>='3.10' and sys_platform != 'win32'", # Pins numpy < 2 "tiktoken", "torch>=2.0.0", "torchdata", "soundfile>=0.12.1", "transformers>=4.42.0", # Pins numpy < 2 "zstandard", "polars[timezone]>=0.20.0", "Pillow>=9.4.0", # When PIL.Image.ExifTags was introduced "soundfile>=0.12.1", "torchcodec>=0.4.0; sys_platform != 'win32'", # not available for windows ] NUMPY2_INCOMPATIBLE_LIBRARIES = [ "faiss-cpu", "tensorflow", ] TESTS_NUMPY2_REQUIRE = [ library for library in TESTS_REQUIRE if library.partition(">")[0] not in NUMPY2_INCOMPATIBLE_LIBRARIES ] QUALITY_REQUIRE = ["ruff>=0.3.0"] DOCS_REQUIRE = [ # Following dependencies are required for the Python reference to be built properly "transformers", "torch", "tensorflow>=2.6.0", ] PDFS_REQUIRE = ["pdfplumber>=0.11.4"] EXTRAS_REQUIRE = { "audio": AUDIO_REQUIRE, "vision": VISION_REQUIRE, "tensorflow": [ "tensorflow>=2.6.0", ], "tensorflow_gpu": ["tensorflow>=2.6.0"], "torch": ["torch"], "jax": ["jax>=0.3.14", "jaxlib>=0.3.14"], "streaming": [], # for backward compatibility "dev": TESTS_REQUIRE + QUALITY_REQUIRE + DOCS_REQUIRE, "tests": TESTS_REQUIRE, "tests_numpy2": TESTS_NUMPY2_REQUIRE, "quality": QUALITY_REQUIRE, "benchmarks": BENCHMARKS_REQUIRE, "docs": DOCS_REQUIRE, "pdfs": PDFS_REQUIRE, } setup( name="datasets", version="4.0.1.dev0", # expected format is one of x.y.z.dev0, or x.y.z.rc1 or x.y.z (no to dashes, yes to dots) description="HuggingFace community-driven open-source library of datasets", long_description=open("README.md", encoding="utf-8").read(), long_description_content_type="text/markdown", author="HuggingFace Inc.", author_email="thomas@huggingface.co", url="https://github.com/huggingface/datasets", download_url="https://github.com/huggingface/datasets/tags", license="Apache 2.0", package_dir={"": "src"}, packages=find_packages("src"), package_data={ "datasets": ["py.typed"], "datasets.utils.resources": ["*.json", "*.yaml", "*.tsv"], }, entry_points={"console_scripts": ["datasets-cli=datasets.commands.datasets_cli:main"]}, python_requires=">=3.9.0", install_requires=REQUIRED_PKGS, extras_require=EXTRAS_REQUIRE, classifiers=[ "Development Status :: 5 - Production/Stable", "Intended Audience :: Developers", "Intended Audience :: Education", "Intended Audience :: Science/Research", "License :: OSI Approved :: Apache Software License", "Operating System :: OS Independent", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Topic :: Scientific/Engineering :: Artificial Intelligence", ], keywords="datasets machine learning datasets", zip_safe=False, # Required for mypy to find the py.typed file )
datasets/setup.py/0
{ "file_path": "datasets/setup.py", "repo_id": "datasets", "token_count": 3826 }
91
__all__ = [ "DownloadConfig", "DownloadManager", "DownloadMode", "StreamingDownloadManager", ] from .download_config import DownloadConfig from .download_manager import DownloadManager, DownloadMode from .streaming_download_manager import StreamingDownloadManager
datasets/src/datasets/download/__init__.py/0
{ "file_path": "datasets/src/datasets/download/__init__.py", "repo_id": "datasets", "token_count": 77 }
92