text
stringlengths
7
1.24M
id
stringlengths
14
166
metadata
dict
__index_level_0__
int64
0
519
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os import sys import warnings from dataclasses import dataclass, field from typing import Literal, Optional import numpy as np import tyro from typing_extensions import Annotated from trl.trainer.utils import exact_div from ..core import flatten_dict from ..import_utils import is_wandb_available JSONDict = Annotated[Optional[dict], tyro.conf.arg(metavar="JSON", constructor=json.loads)] @dataclass class PPOConfig: """ Configuration class for PPOTrainer """ # common parameters exp_name: str = os.path.basename(sys.argv[0])[: -len(".py")] """the name of this experiment (by default is the file name without the extension name)""" seed: int = 0 """Seed value for random generations""" log_with: Optional[Literal["wandb", "tensorboard"]] = None """Log with either 'wandb' or 'tensorboard', check https://huggingface.co/docs/accelerate/usage_guides/tracking for more details""" task_name: Optional[str] = None """Name of task to use - used only for tracking purposes""" model_name: Optional[str] = "gpt2" """Name of model to use - used only for tracking purposes""" query_dataset: Optional[str] = "imdb" """Name of dataset to query - used only for tracking purposes""" reward_model: Optional[str] = "sentiment-analysis:lvwerra/distilbert-imdb" """The reward model to use - used only for tracking purposes""" remove_unused_columns: bool = True """Remove unused columns from the dataset if `datasets.Dataset` is used""" tracker_kwargs: JSONDict = field(default_factory=dict) """Keyword arguments for the tracker (e.g. python ppo.py --tracker_kwargs='{"wandb": {"entity": "my_wandb_entity", "name": "my_exp_name"}}'""" accelerator_kwargs: JSONDict = field(default_factory=dict) """Keyword arguments for the accelerator""" project_kwargs: JSONDict = field(default_factory=dict) """Keyword arguments for the accelerator project config (e.g. `logging_dir`)""" tracker_project_name: str = "trl" """Name of project to use for tracking""" push_to_hub_if_best_kwargs: JSONDict = field(default_factory=dict) """Keyword arguments for pushing model to the hub during training (e.g. repo_id)""" # hyperparameters steps: int = 20000 """Number of training steps""" learning_rate: float = 1.41e-5 """Adam learning rate""" adap_kl_ctrl: bool = True """Use adaptive KL control, otherwise linear""" init_kl_coef: Optional[float] = 0.2 """Initial KL penalty coefficient (used for adaptive and linear control)""" kl_penalty: Literal["kl", "abs", "mse", "full"] = "kl" """kl penalty options: 'kl': model_logp - ref_logp, 'abs': abs(kl), 'mse': mean squared error mse(kl) and 'full': the actual kl for all tokens in the distribution""" target: Optional[float] = 6 """Target KL value for adaptive KL control""" horizon: Optional[float] = 10000 """Horizon for adaptive KL control""" gamma: float = 1 """Gamma parameter for advantage calculation""" lam: float = 0.95 """Lambda parameter for advantage calculation""" cliprange: float = 0.2 """Range for clipping in PPO policy gradient loss""" cliprange_value: float = 0.2 """Range for clipping values in loss calculation""" vf_coef: float = 0.1 """Scaling factor for value loss""" batch_size: int = 128 """Number of samples per optimisation step""" forward_batch_size: Optional[int] = None """DEPRECATED: use `mini_batch_size` instead, which does the same thing.""" mini_batch_size: int = 128 """Number of samples optimized in each mini batch""" gradient_accumulation_steps: int = 1 """The number of gradient accumulation steps""" world_size: tyro.conf.Suppress[int] = None """The world size for distributed training""" ppo_epochs: int = 4 """Number of optimisation epochs per batch of samples""" max_grad_norm: Optional[float] = None """Maximum gradient norm for gradient clipping""" optimize_cuda_cache: Optional[bool] = None """DEPRECATED: use `optimize_device_cache` instead, which does the same thing.""" optimize_device_cache: Optional[bool] = False """Optimize device cache for slightly more memory-efficient training""" early_stopping: bool = False """Whether to stop the PPO optimization loop early is the KL too high""" target_kl: float = 1 """Stop early if we exceed this value by over 50%""" compare_steps: int = 1 """Number of steps between comparison of the current reward with the best seen so far""" ratio_threshold: float = 10.0 """Skip mini-batches with high PPO ratios that can cause loss spikes""" use_score_scaling: bool = False """Use score scaling""" use_score_norm: bool = False """Use score normalization. Only applicable if use_score_scaling is True""" score_clip: Optional[float] = None """Score clipping""" whiten_rewards: bool = False """Whiten the rewards before compute advantages""" gradient_checkpointing: bool = False """Enable gradient checkpointing""" # computed hyperparameters at runtime; we use `tyro.conf.Suppress` to hide them from the help text is_encoder_decoder: Optional[tyro.conf.Suppress[bool]] = None """TO BE FILLED In RUNTIME: Whether the model is an encoder-decoder model""" is_peft_model: Optional[tyro.conf.Suppress[bool]] = None """TO BE FILLED In RUNTIME: Whether the model is a PEFT model""" backward_batch_size: tyro.conf.Suppress[int] = None """TO BE FILLED In RUNTIME: Number of samples optimized in an `optimizer.step()` call""" global_backward_batch_size: tyro.conf.Suppress[int] = None """TO BE FILLED In RUNTIME: the effective `backward_batch_size` across all processes""" global_batch_size: tyro.conf.Suppress[int] = None """TO BE FILLED In RUNTIME: the effective `batch_size` across all processes""" dataset_num_proc: Optional[int] = None if optimize_cuda_cache is not None: warnings.warn( "The `optimize_cuda_cache` argument will be deprecated soon, please use `optimize_device_cache` instead." ) if optimize_device_cache is True: raise ValueError("Both `optimize_device_cache` and `optimize_cuda_cache` were provided") optimize_device_cache = optimize_cuda_cache def __post_init__(self): if self.forward_batch_size is not None: warnings.warn( "Note that using `forward_batch_size` is deprecated, use `mini_batch_size` instead. By setting it you overwrite `mini_batch_size` which affects both the batch size during forward passes and also the mini batch size for PPO optimization." ) self.mini_batch_size = self.forward_batch_size self.backward_batch_size = self.mini_batch_size * self.gradient_accumulation_steps exact_div( self.batch_size, self.backward_batch_size, "`batch_size` must be a multiple of `mini_batch_size * gradient_accumulation_steps`", ) # check if wandb is installed if self.log_with == "wandb": # raise error if wandb is not installed if not is_wandb_available(): raise ImportError( "Please install wandb to use wandb logging. You can do this by running `pip install wandb`." ) self.total_ppo_epochs = int(np.ceil(self.steps / self.batch_size)) assert self.kl_penalty in ["kl", "abs", "mse", "full"] def to_dict(self): output_dict = {} for key, value in self.__dict__.items(): output_dict[key] = value return flatten_dict(output_dict)
trl/trl/trainer/ppo_config.py/0
{ "file_path": "trl/trl/trainer/ppo_config.py", "repo_id": "trl", "token_count": 2874 }
451
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. โš ๏ธ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Troubleshoot This guide provides solutions to some issues you might encounter when using Accelerate. Not all errors are covered because Accelerate is an active library that is continuously evolving and there are many different use cases and distributed training setups. If the solutions described here don't help with your specific error, please take a look at the [Ask for help](#ask-for-help) section to learn where and how to get help. ## Logging Logging can help you identify where an error is coming from. In a distributed setup with multiple processes, logging can be a challenge, but Accelerate provides the [`~accelerate.logging`] utility to ensure logs are synchronized. To troubleshoot an issue, use [`~accelerate.logging`] instead of the standard Python [`logging`](https://docs.python.org/3/library/logging.html#module-logging) module. Set the verbosity level (`INFO`, `DEBUG`, `WARNING`, `ERROR`, `CRITICAL`) with the `log_level` parameter, and then you can either: 1. Export the `log_level` as the `ACCELERATE_LOG_LEVEL` environment variable. 2. Pass the `log_level` directly to `get_logger`. For example, to set `log_level="INFO"`: ```py from accelerate.logging import get_logger logger = get_logger(__name__, log_level="DEBUG") ``` By default, the log is called on main processes only. To call it on all processes, pass `main_process_only=False`. If a log should be called on all processes and in order, also pass `in_order=True`. ```py from accelerate.logging import get_logger logger = get_logger(__name__, log_level="DEBUG") # log all processes logger.debug("thing_to_log", main_process_only=False) # log all processes in order logger.debug("thing_to_log", main_process_only=False, in_order=True) ``` ## Hanging code and timeout errors There can be many reasons why your code is hanging. Let's take a look at how to solve some of the most common issues that can cause your code to hang. ### Mismatched tensor shapes Mismatched tensor shapes is a common issue that can cause your code to hang for a significant amount of time on a distributed setup. When running scripts in a distributed setup, functions such as [`Accelerator.gather`] and [`Accelerator.reduce`] are necessary to grab tensors across devices to collectively perform operations on them. These (and other) functions rely on `torch.distributed` to perform a `gather` operation, which requires tensors to have the **exact same shape** across all processes. When the tensor shapes don't match, your code hangs and you'll eventually hit a timeout exception. You can use Accelerate's operational debug mode to immediately catch this issue. We recommend enabling this mode during the `accelerate config` setup, but you can also enable it from the CLI, as an environment variable, or by manually editing the `config.yaml` file. <hfoptions id="mismatch"> <hfoption id="CLI"> ```bash accelerate launch --debug {my_script.py} --arg1 --arg2 ``` </hfoption> <hfoption id="environment variable"> If enabling debug mode as an environment variable, you don't need to call `accelerate launch`. ```bash ACCELERATE_DEBUG_MODE="1" torchrun {my_script.py} --arg1 --arg2 ``` </hfoption> <hfoption id="config.yaml"> Add `debug: true` to your `config.yaml` file. ```yaml compute_environment: LOCAL_MACHINE debug: true ``` </hfoption> </hfoptions> Once you enable debug mode, you should get a traceback that points to the tensor shape mismatch issue. ```py Traceback (most recent call last): File "/home/zach_mueller_huggingface_co/test.py", line 18, in <module> main() File "/home/zach_mueller_huggingface_co/test.py", line 15, in main broadcast_tensor = broadcast(tensor) File "/home/zach_mueller_huggingface_co/accelerate/src/accelerate/utils/operations.py", line 303, in wrapper accelerate.utils.operations.DistributedOperationException: Cannot apply desired operation due to shape mismatches. All shapes across devices must be valid. Operation: `accelerate.utils.operations.broadcast` Input shapes: - Process 0: [1, 5] - Process 1: [1, 2, 5] ``` ### Early stopping For early stopping in distributed training, if each process has a specific stopping condition (e.g. validation loss), it may not be synchronized across all processes. As a result, a break can happen on process 0 but not on process 1 which will cause your code to hang indefinitely until a timeout occurs. If you have early stopping conditionals, use the `set_breakpoint` and `check_breakpoint` methods to make sure all the processes are ended correctly. ```py # Assume `should_do_breakpoint` is a custom defined function that returns a conditional, # and that conditional might be true only on process 1 if should_do_breakpoint(loss): accelerator.set_breakpoint() # Later in the training script when we need to check for the breakpoint if accelerator.check_breakpoint(): break ``` ### Low kernel versions on Linux On Linux with kernel version < 5.5, hanging processes have been reported. To avoid this problem, upgrade your system to a later kernel version. ### MPI If your distributed CPU training job using MPI is hanging, ensure that you have [passwordless SSH](https://www.open-mpi.org/faq/?category=rsh#ssh-keys) setup (using keys) between the nodes. This means that for all nodes in your hostfile, you should to be able to SSH from one node to another without being prompted for a password. Next, try to run the `mpirun` command as a sanity check. For example, the command below should print out the hostnames for each of the nodes. ```bash mpirun -f hostfile -n {number of nodes} -ppn 1 hostname ``` ## CUDA Out-of-Memory One of the most frustrating errors when it comes to running training scripts is hitting "CUDA Out-of-Memory". The entire script needs to be restarted and any progress is lost. To address this problem, Accelerate provides the [`find_executable_batch_size`] utility that is heavily based on [toma](https://github.com/BlackHC/toma). This utility retries code that fails due to OOM (out-of-memory) conditions and automatically lowers batch sizes. For each OOM condition, the algorithm decreases the batch size by half and retries the code until it succeeds. To use [`find_executable_batch_size`], restructure your training function to include an inner function with `find_executable_batch_size` and build your dataloaders inside it. At a minimum, this only takes 4 new lines of code. <Tip warning={true}> The inner function **must** take batch size as the first parameter, but we do not pass one to it when called. The wrapper will handles this for you. Any object (models, optimizers) that consumes CUDA memory and is passed to the [`Accelerator`] also **must** be declared inside the inner function. </Tip> ```diff def training_function(args): accelerator = Accelerator() + @find_executable_batch_size(starting_batch_size=args.batch_size) + def inner_training_loop(batch_size): + nonlocal accelerator # Ensure they can be used in our context + accelerator.free_memory() # Free all lingering references model = get_model() model.to(accelerator.device) optimizer = get_optimizer() train_dataloader, eval_dataloader = get_dataloaders(accelerator, batch_size) lr_scheduler = get_scheduler( optimizer, num_training_steps=len(train_dataloader)*num_epochs ) model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare( model, optimizer, train_dataloader, eval_dataloader, lr_scheduler ) train(model, optimizer, train_dataloader, lr_scheduler) validate(model, eval_dataloader) + inner_training_loop() ``` ## Non-reproducible results between device setups If you changed the device setup and observe different model performance, it is likely you didn't update your script when moving from one setup to another. Even if you're using the same script with the same batch size, the results will still be different on a TPU, multi-GPU, and single GPU. For example, if you were training on a single GPU with a batch size of 16 and you move to a dual GPU setup, you need to change the batch size to 8 to have the same effective batch size. This is because when training with Accelerate, the batch size passed to the dataloader is the **batch size per GPU**. To make sure you can reproduce the results between the setups, make sure to use the same seed, adjust the batch size accordingly, and consider scaling the learning rate. For more details and a quick reference for batch sizes, check out the [Comparing performance between different device setups](../concept_guides/performance) guide. ## Performance issues on different GPUs If your multi-GPU setup consists of different GPUs, you may encounter some performance issues: - There may be an imbalance in GPU memory between the GPUs. In this case, the GPU with the smaller memory will limit the batch size or the size of the model that can be loaded onto the GPUs. - If you are using GPUs with different performance profiles, the performance will be driven by the slowest GPU you are using because the other GPUs will have to wait for it to complete its workload. Vastly different GPUs within the same setup can lead to performance bottlenecks. ## Ask for help If none of the solutions and advice here helped resolve your issue, you can always reach out to the community and Accelerate team for help. - Ask for help on the Hugging Face forums by posting your question in the [๐Ÿค— Accelerate category](https://discuss.huggingface.co/c/accelerate/18). Make sure to write a descriptive post with relevant context about your setup and reproducible code to maximize the likelihood that your problem is solved! - Post a question on [Discord](http://hf.co/join/discord), and let the team and the community help you. - Create an Issue on the ๐Ÿค— Accelerate [GitHub repository](https://github.com/huggingface/accelerate/issues) if you think you've found a bug related to the library. Include context regarding the bug and details about your distributed setup to help us better figure out what's wrong and how we can fix it.
accelerate/docs/source/basic_tutorials/troubleshooting.md/0
{ "file_path": "accelerate/docs/source/basic_tutorials/troubleshooting.md", "repo_id": "accelerate", "token_count": 3049 }
0
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. โš ๏ธ Note that this file is in Markdown but contains specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # DeepSpeed [DeepSpeed](https://github.com/microsoft/DeepSpeed) implements everything described in the [ZeRO paper](https://arxiv.org/abs/1910.02054). Some of the salient optimizations are: 1. Optimizer state partitioning (ZeRO stage 1) 2. Gradient partitioning (ZeRO stage 2) 3. Parameter partitioning (ZeRO stage 3) 4. Custom mixed precision training handling 5. A range of fast CUDA-extension-based optimizers 6. ZeRO-Offload to CPU and Disk/NVMe 7. Hierarchical partitioning of model parameters (ZeRO++) ZeRO-Offload has its own dedicated paper: [ZeRO-Offload: Democratizing Billion-Scale Model Training](https://arxiv.org/abs/2101.06840). And NVMe-support is described in the paper [ZeRO-Infinity: Breaking the GPU Memory Wall for Extreme Scale Deep Learning](https://arxiv.org/abs/2104.07857). DeepSpeed ZeRO-2 is primarily used only for training, as its features are of no use to inference. DeepSpeed ZeRO-3 can be used for inference as well since it allows huge models to be loaded on multiple GPUs, which won't be possible on a single GPU. ๐Ÿค— Accelerate integrates [DeepSpeed](https://github.com/microsoft/DeepSpeed) via 2 options: 1. Integration of the DeepSpeed features via `deepspeed config file` specification in `accelerate config` . You just supply your custom config file or use our template. Most of this document is focused on this feature. This supports all the core features of DeepSpeed and gives user a lot of flexibility. User may have to change a few lines of code depending on the config. 2. Integration via `deepspeed_plugin`.This supports subset of the DeepSpeed features and uses default options for the rest of the configurations. User need not change any code and is good for those who are fine with most of the default settings of DeepSpeed. ## What is integrated? Training: 1. ๐Ÿค— Accelerate integrates all features of DeepSpeed ZeRO. This includes all the ZeRO stages 1, 2 and 3 as well as ZeRO-Offload, ZeRO-Infinity (which can offload to disk/NVMe) and ZeRO++. Below is a short description of Data Parallelism using ZeRO - Zero Redundancy Optimizer along with diagram from this [blog post](https://www.microsoft.com/en-us/research/blog/zero-deepspeed-new-system-optimizations-enable-training-models-with-over-100-billion-parameters/) ![ZeRO Data Parallelism](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/parallelism-zero.png) (Source: [link](https://www.microsoft.com/en-us/research/blog/zero-deepspeed-new-system-optimizations-enable-training-models-with-over-100-billion-parameters/)) a. **Stage 1** : Shards optimizer states across data parallel workers/GPUs b. **Stage 2** : Shards optimizer states + gradients across data parallel workers/GPUs c. **Stage 3**: Shards optimizer states + gradients + model parameters across data parallel workers/GPUs d. **Optimizer Offload**: Offloads the gradients + optimizer states to CPU/Disk building on top of ZERO Stage 2 e. **Param Offload**: Offloads the model parameters to CPU/Disk building on top of ZERO Stage 3 f. **Hierarchical Partitioning**: Enables efficient multi-node training with data-parallel training across nodes and ZeRO-3 sharding within a node, built on top of ZeRO Stage 3. <u>Note</u>: With respect to Disk Offload, the disk should be an NVME for decent speed but it technically works on any Disk Inference: 1. DeepSpeed ZeRO Inference supports ZeRO stage 3 with ZeRO-Infinity. It uses the same ZeRO protocol as training, but it doesn't use an optimizer and a lr scheduler and only stage 3 is relevant. For more details see: [deepspeed-zero-inference](#deepspeed-zero-inference). ## How it works? **Pre-Requisites**: Install DeepSpeed version >=0.6.5. Please refer to the [DeepSpeed Installation details](https://github.com/microsoft/DeepSpeed#installation) for more information. We will first look at easy to use integration via `accelerate config`. Followed by more flexible and feature rich `deepspeed config file` integration. ### Accelerate DeepSpeed Plugin On your machine(s) just run: ```bash accelerate config ``` and answer the questions asked. It will ask whether you want to use a config file for DeepSpeed to which you should answer no. Then answer the following questions to generate a basic DeepSpeed config. This will generate a config file that will be used automatically to properly set the default options when doing ```bash accelerate launch my_script.py --args_to_my_script ``` For instance, here is how you would run the NLP example `examples/nlp_example.py` (from the root of the repo) with DeepSpeed Plugin: **ZeRO Stage-2 DeepSpeed Plugin Example** ```bash compute_environment: LOCAL_MACHINE deepspeed_config: gradient_accumulation_steps: 1 gradient_clipping: 1.0 offload_optimizer_device: none offload_param_device: none zero3_init_flag: true zero_stage: 2 distributed_type: DEEPSPEED fsdp_config: {} machine_rank: 0 main_process_ip: null main_process_port: null main_training_function: main mixed_precision: fp16 num_machines: 1 num_processes: 2 use_cpu: false ``` ```bash accelerate launch examples/nlp_example.py --mixed_precision fp16 ``` **ZeRO Stage-3 with CPU Offload DeepSpeed Plugin Example** ```bash compute_environment: LOCAL_MACHINE deepspeed_config: gradient_accumulation_steps: 1 gradient_clipping: 1.0 offload_optimizer_device: cpu offload_param_device: cpu zero3_init_flag: true zero3_save_16bit_model: true zero_stage: 3 distributed_type: DEEPSPEED fsdp_config: {} machine_rank: 0 main_process_ip: null main_process_port: null main_training_function: main mixed_precision: fp16 num_machines: 1 num_processes: 2 use_cpu: false ``` ```bash accelerate launch examples/nlp_example.py --mixed_precision fp16 ``` Currently, `Accelerate` supports following config through the CLI: ```bash `zero_stage`: [0] Disabled, [1] optimizer state partitioning, [2] optimizer+gradient state partitioning and [3] optimizer+gradient+parameter partitioning `gradient_accumulation_steps`: Number of training steps to accumulate gradients before averaging and applying them. `gradient_clipping`: Enable gradient clipping with value. `offload_optimizer_device`: [none] Disable optimizer offloading, [cpu] offload optimizer to CPU, [nvme] offload optimizer to NVMe SSD. Only applicable with ZeRO >= Stage-2. `offload_optimizer_nvme_path`: Decides Nvme Path to offload optimizer states. If unspecified, will default to 'none'. `offload_param_device`: [none] Disable parameter offloading, [cpu] offload parameters to CPU, [nvme] offload parameters to NVMe SSD. Only applicable with ZeRO Stage-3. `offload_param_nvme_path`: Decides Nvme Path to offload parameters. If unspecified, will default to 'none'. `zero3_init_flag`: Decides whether to enable `deepspeed.zero.Init` for constructing massive models. Only applicable with ZeRO Stage-3. `zero3_save_16bit_model`: Decides whether to save 16-bit model weights when using ZeRO Stage-3. `mixed_precision`: `no` for FP32 training, `fp16` for FP16 mixed-precision training and `bf16` for BF16 mixed-precision training. `deepspeed_moe_layer_cls_names`: Comma-separated list of transformer Mixture-of-Experts (MoE) layer class names (case-sensitive) to wrap ,e.g, `MixtralSparseMoeBlock`, `Qwen2MoeSparseMoeBlock`, `JetMoEAttention,JetMoEBlock` ... `deepspeed_hostfile`: DeepSpeed hostfile for configuring multi-node compute resources. `deepspeed_exclusion_filter`: DeepSpeed exclusion filter string when using mutli-node setup. `deepspeed_inclusion_filter`: DeepSpeed inclusion filter string when using mutli-node setup. `deepspeed_multinode_launcher`: DeepSpeed multi-node launcher to use. If unspecified, will default to `pdsh`. `deepspeed_config_file`: path to the DeepSpeed config file in `json` format. See the next section for more details on this. ``` To be able to tweak more options, you will need to use a DeepSpeed config file. ### DeepSpeed Config File On your machine(s) just run: ```bash accelerate config ``` and answer the questions asked. It will ask whether you want to use a config file for deepspeed to which you answer yes and provide the path to the deepspeed config file. This will generate a config file that will be used automatically to properly set the default options when doing ```bash accelerate launch my_script.py --args_to_my_script ``` For instance, here is how you would run the NLP example `examples/by_feature/deepspeed_with_config_support.py` (from the root of the repo) with DeepSpeed Config File: **ZeRO Stage-2 DeepSpeed Config File Example** ```bash compute_environment: LOCAL_MACHINE deepspeed_config: deepspeed_config_file: /home/ubuntu/accelerate/examples/configs/deepspeed_config_templates/zero_stage2_config.json zero3_init_flag: true distributed_type: DEEPSPEED fsdp_config: {} machine_rank: 0 main_process_ip: null main_process_port: null main_training_function: main mixed_precision: fp16 num_machines: 1 num_processes: 2 use_cpu: false ``` with the contents of `zero_stage2_config.json` being: ```json { "fp16": { "enabled": true, "loss_scale": 0, "loss_scale_window": 1000, "initial_scale_power": 16, "hysteresis": 2, "min_loss_scale": 1 }, "optimizer": { "type": "AdamW", "params": { "lr": "auto", "weight_decay": "auto", "torch_adam": true, "adam_w_mode": true } }, "scheduler": { "type": "WarmupDecayLR", "params": { "warmup_min_lr": "auto", "warmup_max_lr": "auto", "warmup_num_steps": "auto", "total_num_steps": "auto" } }, "zero_optimization": { "stage": 2, "allgather_partitions": true, "allgather_bucket_size": 2e8, "overlap_comm": true, "reduce_scatter": true, "reduce_bucket_size": "auto", "contiguous_gradients": true }, "gradient_accumulation_steps": 1, "gradient_clipping": "auto", "steps_per_print": 2000, "train_batch_size": "auto", "train_micro_batch_size_per_gpu": "auto", "wall_clock_breakdown": false } ``` ```bash accelerate launch examples/by_feature/deepspeed_with_config_support.py \ --config_name "gpt2-large" \ --tokenizer_name "gpt2-large" \ --dataset_name "wikitext" \ --dataset_config_name "wikitext-2-raw-v1" \ --block_size 128 \ --output_dir "./clm/clm_deepspeed_stage2_accelerate" \ --learning_rate 5e-4 \ --per_device_train_batch_size 24 \ --per_device_eval_batch_size 24 \ --num_train_epochs 3 \ --with_tracking \ --report_to "wandb"\ ``` **ZeRO Stage-3 with CPU offload DeepSpeed Config File Example** ```bash compute_environment: LOCAL_MACHINE deepspeed_config: deepspeed_config_file: /home/ubuntu/accelerate/examples/configs/deepspeed_config_templates/zero_stage3_offload_config.json zero3_init_flag: true distributed_type: DEEPSPEED fsdp_config: {} machine_rank: 0 main_process_ip: null main_process_port: null main_training_function: main mixed_precision: fp16 num_machines: 1 num_processes: 2 use_cpu: false ``` with the contents of `zero_stage3_offload_config.json` being: ```json { "fp16": { "enabled": true, "loss_scale": 0, "loss_scale_window": 1000, "initial_scale_power": 16, "hysteresis": 2, "min_loss_scale": 1 }, "optimizer": { "type": "AdamW", "params": { "lr": "auto", "weight_decay": "auto" } }, "scheduler": { "type": "WarmupDecayLR", "params": { "warmup_min_lr": "auto", "warmup_max_lr": "auto", "warmup_num_steps": "auto", "total_num_steps": "auto" } }, "zero_optimization": { "stage": 3, "offload_optimizer": { "device": "cpu", "pin_memory": true }, "offload_param": { "device": "cpu", "pin_memory": true }, "overlap_comm": true, "contiguous_gradients": true, "reduce_bucket_size": "auto", "stage3_prefetch_bucket_size": "auto", "stage3_param_persistence_threshold": "auto", "sub_group_size": 1e9, "stage3_max_live_parameters": 1e9, "stage3_max_reuse_distance": 1e9, "stage3_gather_16bit_weights_on_model_save": "auto" }, "gradient_accumulation_steps": 1, "gradient_clipping": "auto", "steps_per_print": 2000, "train_batch_size": "auto", "train_micro_batch_size_per_gpu": "auto", "wall_clock_breakdown": false } ``` ```bash accelerate launch examples/by_feature/deepspeed_with_config_support.py \ --config_name "gpt2-large" \ --tokenizer_name "gpt2-large" \ --dataset_name "wikitext" \ --dataset_config_name "wikitext-2-raw-v1" \ --block_size 128 \ --output_dir "./clm/clm_deepspeed_stage3_offload_accelerate" \ --learning_rate 5e-4 \ --per_device_train_batch_size 32 \ --per_device_eval_batch_size 32 \ --num_train_epochs 3 \ --with_tracking \ --report_to "wandb"\ ``` **ZeRO++ Config Example** You can use the features of ZeRO++ by using the appropriate config parameters. Note that ZeRO++ is an extension for ZeRO Stage 3. Here is how the config file can be modified, from [DeepSpeed's ZeRO++ tutorial](https://www.deepspeed.ai/tutorials/zeropp/): ```json { "zero_optimization": { "stage": 3, "reduce_bucket_size": "auto", "zero_quantized_weights": true, "zero_hpz_partition_size": 8, "zero_quantized_gradients": true, "contiguous_gradients": true, "overlap_comm": true } } ``` For hierarchical partitioning, the partition size `zero_hpz_partition_size` should ideally be set to the number of GPUs per node. (For example, the above config file assumes 8 GPUs per node) **Important code changes when using DeepSpeed Config File** 1. DeepSpeed Optimizers and Schedulers. For more information on these, see the [DeepSpeed Optimizers](https://deepspeed.readthedocs.io/en/latest/optimizers.html) and [DeepSpeed Schedulers](https://deepspeed.readthedocs.io/en/latest/schedulers.html) documentation. We will look at the changes needed in the code when using these. a. DS Optim + DS Scheduler: The case when both `optimizer` and `scheduler` keys are present in the DeepSpeed config file. In this situation, those will be used and the user has to use `accelerate.utils.DummyOptim` and `accelerate.utils.DummyScheduler` to replace the PyTorch/Custom optimizers and schedulers in their code. Below is the snippet from `examples/by_feature/deepspeed_with_config_support.py` showing this: ```python # Creates Dummy Optimizer if `optimizer` was specified in the config file else creates Adam Optimizer optimizer_cls = ( torch.optim.AdamW if accelerator.state.deepspeed_plugin is None or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config else DummyOptim ) optimizer = optimizer_cls(optimizer_grouped_parameters, lr=args.learning_rate) # Creates Dummy Scheduler if `scheduler` was specified in the config file else creates `args.lr_scheduler_type` Scheduler if ( accelerator.state.deepspeed_plugin is None or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config ): lr_scheduler = get_scheduler( name=args.lr_scheduler_type, optimizer=optimizer, num_warmup_steps=args.num_warmup_steps, num_training_steps=args.max_train_steps, ) else: lr_scheduler = DummyScheduler( optimizer, total_num_steps=args.max_train_steps, warmup_num_steps=args.num_warmup_steps ) ``` b. Custom Optim + Custom Scheduler: The case when both `optimizer` and `scheduler` keys are absent in the DeepSpeed config file. In this situation, no code changes are needed from the user and this is the case when using integration via DeepSpeed Plugin. In the above example we can see that the code remains unchanged if the `optimizer` and `scheduler` keys are absent in the DeepSpeed config file. c. Custom Optim + DS Scheduler: The case when only `scheduler` key is present in the DeepSpeed config file. In this situation, the user has to use `accelerate.utils.DummyScheduler` to replace the PyTorch/Custom scheduler in their code. d. DS Optim + Custom Scheduler: The case when only `optimizer` key is present in the DeepSpeed config file. This will result in an error because you can only use DS Scheduler when using DS Optim. 2. Notice the `auto` values in the above example DeepSpeed config files. These are automatically handled by `prepare` method based on model, dataloaders, dummy optimizer and dummy schedulers provided to `prepare` method. Only the `auto` fields specified in above examples are handled by `prepare` method and the rest have to be explicitly specified by the user. The `auto` values are calculated as: - `reduce_bucket_size`: `hidden_size * hidden_size` - `stage3_prefetch_bucket_size`: `int(0.9 * hidden_size * hidden_size)` - `stage3_param_persistence_threshold`: `10 * hidden_size` For the `auto` feature to work for these 3 config entries - Accelerate will use `model.config.hidden_size` or `max(model.config.hidden_sizes)` as `hidden_size`. If neither of these is available, the launching will fail and you will have to set these 3 config entries manually. Remember the first 2 config entries are the communication buffers - the larger they are the more efficient the comms will be, and the larger they are the more GPU memory they will consume, so it's a tunable performance trade-off. **Things to note when using DeepSpeed Config File** Below is a sample script using `deepspeed_config_file` in different scenarios. Code `test.py`: ```python from accelerate import Accelerator from accelerate.state import AcceleratorState def main(): accelerator = Accelerator() accelerator.print(f"{AcceleratorState()}") if __name__ == "__main__": main() ``` **Scenario 1**: Manually tampered accelerate config file having `deepspeed_config_file` along with other entries. 1. Content of the `accelerate` config: ```yaml command_file: null commands: null compute_environment: LOCAL_MACHINE deepspeed_config: gradient_accumulation_steps: 1 gradient_clipping: 1.0 offload_optimizer_device: 'cpu' offload_param_device: 'cpu' zero3_init_flag: true zero3_save_16bit_model: true zero_stage: 3 deepspeed_config_file: 'ds_config.json' distributed_type: DEEPSPEED downcast_bf16: 'no' dynamo_backend: 'NO' fsdp_config: {} gpu_ids: null machine_rank: 0 main_process_ip: null main_process_port: null main_training_function: main megatron_lm_config: {} num_machines: 1 num_processes: 2 rdzv_backend: static same_network: true tpu_name: null tpu_zone: null use_cpu: false ``` 2. `ds_config.json`: ```json { "bf16": { "enabled": true }, "zero_optimization": { "stage": 3, "stage3_gather_16bit_weights_on_model_save": false, "offload_optimizer": { "device": "none" }, "offload_param": { "device": "none" } }, "gradient_clipping": 1.0, "train_batch_size": "auto", "train_micro_batch_size_per_gpu": "auto", "gradient_accumulation_steps": 10, "steps_per_print": 2000000 } ``` 3. Output of `accelerate launch test.py`: ```bash ValueError: When using `deepspeed_config_file`, the following accelerate config variables will be ignored: ['gradient_accumulation_steps', 'gradient_clipping', 'zero_stage', 'offload_optimizer_device', 'offload_param_device', 'zero3_save_16bit_model', 'mixed_precision']. Please specify them appropriately in the DeepSpeed config file. If you are using an accelerate config file, remove other config variables mentioned in the above specified list. The easiest method is to create a new config following the questionnaire via `accelerate config`. It will only ask for the necessary config variables when using `deepspeed_config_file`. ``` **Scenario 2**: Use the solution of the error to create new accelerate config and check that no ambiguity error is now thrown. 1. Run `accelerate config`: ```bash $ accelerate config ------------------------------------------------------------------------------------------------------------------------------- In which compute environment are you running? This machine ------------------------------------------------------------------------------------------------------------------------------- Which type of machine are you using? multi-GPU How many different machines will you use (use more than 1 for multi-node training)? [1]: Do you wish to optimize your script with torch dynamo?[yes/NO]: Do you want to use DeepSpeed? [yes/NO]: yes Do you want to specify a json file to a DeepSpeed config? [yes/NO]: yes Please enter the path to the json DeepSpeed config file: ds_config.json Do you want to enable `deepspeed.zero.Init` when using ZeRO Stage-3 for constructing massive models? [yes/NO]: yes How many GPU(s) should be used for distributed training? [1]:4 accelerate configuration saved at ds_config_sample.yaml ``` 2. Content of the `accelerate` config: ```yaml compute_environment: LOCAL_MACHINE deepspeed_config: deepspeed_config_file: ds_config.json zero3_init_flag: true distributed_type: DEEPSPEED downcast_bf16: 'no' dynamo_backend: 'NO' fsdp_config: {} machine_rank: 0 main_training_function: main megatron_lm_config: {} num_machines: 1 num_processes: 4 rdzv_backend: static same_network: true use_cpu: false ``` 3. Output of `accelerate launch test.py`: ```bash Distributed environment: DEEPSPEED Backend: nccl Num processes: 4 Process index: 0 Local process index: 0 Device: cuda:0 Mixed precision type: bf16 ds_config: {'bf16': {'enabled': True}, 'zero_optimization': {'stage': 3, 'stage3_gather_16bit_weights_on_model_save': False, 'offload_optimizer': {'device': 'none'}, 'offload_param': {'device': 'none'}}, 'gradient_clipping': 1.0, 'train_batch_size': 'auto', 'train_micro_batch_size_per_gpu': 'auto', 'gradient_accumulation_steps': 10, 'steps_per_print': inf, 'fp16': {'enabled': False}} ``` **Scenario 3**: Setting the `accelerate launch` command arguments related to DeepSpeed as `"auto"` in the DeepSpeed` configuration file and check that things work as expected. 1. New `ds_config.json` with `"auto"` for the `accelerate launch` DeepSpeed command arguments: ```json { "bf16": { "enabled": "auto" }, "zero_optimization": { "stage": "auto", "stage3_gather_16bit_weights_on_model_save": "auto", "offload_optimizer": { "device": "auto" }, "offload_param": { "device": "auto" } }, "gradient_clipping": "auto", "train_batch_size": "auto", "train_micro_batch_size_per_gpu": "auto", "gradient_accumulation_steps": "auto", "steps_per_print": 2000000 } ``` 2. Output of `accelerate launch --mixed_precision="fp16" --zero_stage=3 --gradient_accumulation_steps=5 --gradient_clipping=1.0 --offload_param_device="cpu" --offload_optimizer_device="nvme" --zero3_save_16bit_model="true" test.py`: ```bash Distributed environment: DEEPSPEED Backend: nccl Num processes: 4 Process index: 0 Local process index: 0 Device: cuda:0 Mixed precision type: fp16 ds_config: {'bf16': {'enabled': False}, 'zero_optimization': {'stage': 3, 'stage3_gather_16bit_weights_on_model_save': True, 'offload_optimizer': {'device': 'nvme'}, 'offload_param': {'device': 'cpu'}}, 'gradient_clipping': 1.0, 'train_batch_size': 'auto', 'train_micro_batch_size_per_gpu': 'auto', 'gradient_accumulation_steps': 5, 'steps_per_print': inf, 'fp16': {'enabled': True, 'auto_cast': True}} ``` **Note**: 1. Remaining `"auto"` values are handled in `accelerator.prepare()` call as explained in point 2 of `Important code changes when using DeepSpeed Config File`. 2. Only when `gradient_accumulation_steps` is `auto`, the value passed while creating `Accelerator` object via `Accelerator(gradient_accumulation_steps=k)` will be used. When using DeepSpeed Plugin, the value from it will be used and it will overwrite the value passed while creating Accelerator object. ## Saving and loading 1. Saving and loading of models is unchanged for ZeRO Stage-1 and Stage-2. 2. under ZeRO Stage-3, `state_dict` contains just the placeholders since the model weights are partitioned across multiple GPUs. ZeRO Stage-3 has 2 options: a. Saving the entire 16bit model weights to directly load later on using `model.load_state_dict(torch.load(pytorch_model.bin))`. For this, either set `zero_optimization.stage3_gather_16bit_weights_on_model_save` to True in DeepSpeed Config file or set `zero3_save_16bit_model` to True in DeepSpeed Plugin. **Note that this option requires consolidation of the weights on one GPU it can be slow and memory demanding, so only use this feature when needed.** Below is the snippet from `examples/by_feature/deepspeed_with_config_support.py` showing this: ```python unwrapped_model = accelerator.unwrap_model(model) # New Code # # Saves the whole/unpartitioned fp16 model when in ZeRO Stage-3 to the output directory if # `stage3_gather_16bit_weights_on_model_save` is True in DeepSpeed Config file or # `zero3_save_16bit_model` is True in DeepSpeed Plugin. # For Zero Stages 1 and 2, models are saved as usual in the output directory. # The model name saved is `pytorch_model.bin` unwrapped_model.save_pretrained( args.output_dir, is_main_process=accelerator.is_main_process, save_function=accelerator.save, state_dict=accelerator.get_state_dict(model), ) ``` b. To get 32bit weights, first save the model using `model.save_checkpoint()`. Below is the snippet from `examples/by_feature/deepspeed_with_config_support.py` showing this: ```python success = model.save_checkpoint(PATH, ckpt_id, checkpoint_state_dict) status_msg = f"checkpointing: PATH={PATH}, ckpt_id={ckpt_id}" if success: logging.info(f"Success {status_msg}") else: logging.warning(f"Failure {status_msg}") ``` This will create ZeRO model and optimizer partitions along with `zero_to_fp32.py` script in checkpoint directory. You can use this script to do offline consolidation. It requires no configuration files or GPUs. Here is an example of its usage: ```bash $ cd /path/to/checkpoint_dir $ ./zero_to_fp32.py . pytorch_model.bin Processing zero checkpoint at global_step1 Detected checkpoint of type zero stage 3, world_size: 2 Saving fp32 state dict to pytorch_model.bin (total_numel=60506624) ``` To get 32bit model for saving/inference, you can perform: ```python from deepspeed.utils.zero_to_fp32 import load_state_dict_from_zero_checkpoint unwrapped_model = accelerator.unwrap_model(model) fp32_model = load_state_dict_from_zero_checkpoint(unwrapped_model, checkpoint_dir) ``` If you are only interested in the `state_dict`, you can do the following: ```python from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir) ``` Note that all these functions require ~2x memory (general RAM) of the size of the final checkpoint. ## ZeRO Inference DeepSpeed ZeRO Inference supports ZeRO stage 3 with ZeRO-Infinity. It uses the same ZeRO protocol as training, but it doesn't use an optimizer and a lr scheduler and only stage 3 is relevant. With accelerate integration, you just need to prepare the model and dataloader as shown below: ```python model, eval_dataloader = accelerator.prepare(model, eval_dataloader) ``` ## Few caveats to be aware of 1. Current integration doesnโ€™t support Pipeline Parallelism of DeepSpeed. 2. Current integration doesnโ€™t support `mpu`, limiting the tensor parallelism which is supported in Megatron-LM. 3. Current integration doesnโ€™t support multiple models. ## DeepSpeed Resources The documentation for the internals related to deepspeed can be found [here](../package_reference/deepspeed). - [Project's github](https://github.com/microsoft/deepspeed) - [Usage docs](https://www.deepspeed.ai/getting-started/) - [API docs](https://deepspeed.readthedocs.io/en/latest/index.html) - [Blog posts](https://www.microsoft.com/en-us/research/search/?q=deepspeed) Papers: - [ZeRO: Memory Optimizations Toward Training Trillion Parameter Models](https://arxiv.org/abs/1910.02054) - [ZeRO-Offload: Democratizing Billion-Scale Model Training](https://arxiv.org/abs/2101.06840) - [ZeRO-Infinity: Breaking the GPU Memory Wall for Extreme Scale Deep Learning](https://arxiv.org/abs/2104.07857) - [ZeRO++: Extremely Efficient Collective Communication for Giant Model Training](https://arxiv.org/abs/2306.10209) Finally, please, remember that ๐Ÿค— `Accelerate` only integrates DeepSpeed, therefore if you have any problems or questions with regards to DeepSpeed usage, please, file an issue with [DeepSpeed GitHub](https://github.com/microsoft/DeepSpeed/issues). <Tip> For those interested in the similarities and differences between FSDP and DeepSpeed, please check out the [concept guide here](../concept_guides/fsdp_and_deepspeed)! </Tip>
accelerate/docs/source/usage_guides/deepspeed.md/0
{ "file_path": "accelerate/docs/source/usage_guides/deepspeed.md", "repo_id": "accelerate", "token_count": 10169 }
1
<!--- Copyright 2021 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # In this folder we showcase various full examples using ๐Ÿค— Accelerate ## Simple NLP example The [nlp_example.py](./nlp_example.py) script is a simple example to train a Bert model on a classification task ([GLUE's MRPC](https://www.microsoft.com/en-us/download/details.aspx?id=52398)). Prior to running it you should install ๐Ÿค— Dataset and ๐Ÿค— Transformers: ```bash pip install datasets evaluate transformers ``` The same script can be run in any of the following configurations: - single CPU or single GPU - multi CPUs - multi GPUs (using PyTorch distributed mode) - (multi) TPUs - fp16 (mixed-precision) or fp32 (normal precision) To run it in each of these various modes, use the following commands: - single CPU: * from a server without GPU ```bash python ./nlp_example.py ``` * from any server by passing `cpu=True` to the `Accelerator`. ```bash python ./nlp_example.py --cpu ``` * from any server with Accelerate launcher ```bash accelerate launch --cpu ./nlp_example.py ``` - single GPU: ```bash python ./nlp_example.py # from a server with a GPU ``` - with fp16 (mixed-precision) * from any server by passing `mixed_precison=fp16` to the `Accelerator`. ```bash python ./nlp_example.py --mixed_precision fp16 ``` * from any server with Accelerate launcher ```bash accelerate launch --mixed_precision fp16 ./nlp_example.py - multi CPUs (requires Open MPI, Intel MPI, or MVAPICH) * With Accelerate config and launcher, execute the following from node 0: ```bash accelerate config # Select to have accelerate launch mpirun accelerate launch ./nlp_example.py # This will run the script on each server ``` * With Intel MPI: ```bash export CCL_WORKER_COUNT=1 export MASTER_ADDR=xxx.xxx.xxx.xxx #node0 ip mpirun -f hostfile -n 16 -ppn 4 python ./nlp_example.py ``` - multi GPUs (using PyTorch distributed mode) * With Accelerate config and launcher ```bash accelerate config # This will create a config file on your server accelerate launch ./nlp_example.py # This will run the script on your server ``` * With traditional PyTorch launcher (`python -m torch.distributed.run` can be used instead of `torchrun`) ```bash torchrun --nproc_per_node 2 ./nlp_example.py ``` - multi GPUs, multi node (several machines, using PyTorch distributed mode) * With Accelerate config and launcher, on each machine: ```bash accelerate config # This will create a config file on each server accelerate launch ./nlp_example.py # This will run the script on each server ``` * With PyTorch launcher only (`python -m torch.distributed.run` can be used instead of `torchrun`). Run this command on each node: ```bash torchrun \ # python -m torch.distributed.run --nproc_per_node 2 \ --nnodes 2 \ --rdzv_id 2299 \ # A unique job id --rdzv_backend c10d \ --rdzv_endpoint master_node_ip_address:29500 \ ./nlp_example.py ``` - (multi) TPUs * With Accelerate config and launcher ```bash accelerate config # This will create a config file on your TPU server accelerate launch ./nlp_example.py # This will run the script on each server ``` * In PyTorch: Add an `xmp.spawn` line in your script as you usually do. ## Simple vision example The [cv_example.py](./cv_example.py) script is a simple example to fine-tune a ResNet-50 on a classification task ([Ofxord-IIT Pet Dataset](https://www.robots.ox.ac.uk/~vgg/data/pets/)). The same script can be run in any of the following configurations: - single CPU or single GPU - multi CPUs - multi GPUs (using PyTorch distributed mode) - (multi) TPUs - fp16 (mixed-precision) or fp32 (normal precision) Prior to running it you should install timm and torchvision: ```bash pip install timm torchvision ``` and you should download the data with the following commands: ```bash wget https://www.robots.ox.ac.uk/~vgg/data/pets/data/images.tar.gz tar -xzf images.tar.gz ``` To run it in each of these various modes, use the following commands: - single CPU: * from a server without GPU ```bash python ./cv_example.py --data_dir path_to_data ``` * from any server by passing `cpu=True` to the `Accelerator`. ```bash python ./cv_example.py --data_dir path_to_data --cpu ``` * from any server with Accelerate launcher ```bash accelerate launch --cpu ./cv_example.py --data_dir path_to_data ``` - single GPU: ```bash python ./cv_example.py # from a server with a GPU ``` - with fp16 (mixed-precision) * from any server by passing `mixed_precison=fp16` to the `Accelerator`. ```bash python ./cv_example.py --data_dir path_to_data --mixed_precison fp16 ``` * from any server with Accelerate launcher ```bash accelerate launch --mixed_precison fp16 ./cv_example.py --data_dir path_to_data - multi CPUs (requires Open MPI, Intel MPI, or MVAPICH) * With Accelerate config and launcher, run the following from node 0: ```bash accelerate config --config_file config.yaml # Select to have accelerate launch mpirun accelerate launch ./cv_example.py --data_dir path_to_data # This will run the script on each server ``` * With Intel MPI, execute mpirun from node 0: ```bash export CCL_WORKER_COUNT=1 export MASTER_ADDR=xxx.xxx.xxx.xxx #node0 ip mpirun -f hostfile -n 16 -ppn 4 python ./cv_example.py --data_dir path_to_data ``` - multi GPUs (using PyTorch distributed mode) * With Accelerate config and launcher ```bash accelerate config --config_file config.yaml # This will create a config file on your server to `config.yaml` accelerate launch --config_file config.yaml ./cv_example.py --data_dir path_to_data # This will run the script on your server ``` * With traditional PyTorch launcher (`python -m torch.distributed.run` can be used instead of `torchrun`) ```bash torchrun --nproc_per_node 2 ./cv_example.py --data_dir path_to_data ``` - multi GPUs, multi node (several machines, using PyTorch distributed mode) * With Accelerate config and launcher, on each machine: ```bash accelerate config --config_file config.yaml # This will create a config file on your server to `config.yaml` accelerate launch --config_file config.yaml ./cv_example.py --data_dir path_to_data # This will run the script on each server ``` * With PyTorch launcher only (`python -m torch.distributed.run` can be used instead of `torchrun`). Run this command on each node: ```bash torchrun \ # python -m torch.distributed.run --nproc_per_node 2 \ --nnodes 2 \ --rdzv_id 2299 \ # A unique job id --rdzv_backend c10d \ --rdzv_endpoint master_node_ip_address:29500 \ ./cv_example.py --data_dir path_to_data ``` - (multi) TPUs * With Accelerate config and launcher ```bash accelerate config --config_file config.yaml # This will create a config file on your server to `config.yaml` accelerate launch --config_file config.yaml ./cv_example.py --data_dir path_to_data # This will run the script on each server ``` * In PyTorch: Add an `xmp.spawn` line in your script as you usually do. ### Simple vision example (GANs) - [huggan project](https://github.com/huggingface/community-events/tree/main/huggan) ### Using AWS SageMaker integration - [Examples showcasing AWS SageMaker integration of ๐Ÿค— Accelerate.](https://github.com/pacman100/accelerate-aws-sagemaker) ## Configuration zoo In [/config_yaml_templates](./config_yaml_templates/) we have a variety of *minimal* `config.yaml` templates and examples to help you learn how to create your own configuration files depending on the scenario. ## SLURM Scripts In [/slurm/submit_multigpu.sh](./slurm/submit_multigpu.sh) and [/slurm/submit_multinode.sh](./slurm/submit_multinode.sh) we present two scripts for running the examples on a machine with [SLURM](https://slurm.schedmd.com/documentation.html) workload manager. In [/slurm/submit_multigpu.sh](./slurm/submit_multigpu.sh) the only parameter in the launcher that needs to be modified is `--num_processes`, which determines the number of GPUs we will use. In this case, using the environment variable `$SLURM_GPUS`, we indicate that we want to utilize all the GPUs available on the node we have requested. In [/slurm/submit_multinode.sh](./slurm/submit_multinode.sh) we must specify the number of nodes that will be part of the training (`--num_machines`), how many GPUs we will use in total (`--num_processes`), the [`backend`](https://pytorch.org/docs/stable/elastic/run.html#note-on-rendezvous-backend), `--main_process_ip` which will be the address the master node and the `--main_process_port`. In [/slurm/submit_multicpu.sh](./slurm/submit_multicpu.sh) we must specify the number of nodes that will be part of the training (`--num_machines`), how many CPU processes we will use in total (`--num_processes`), the [`backend`](https://pytorch.org/docs/stable/elastic/run.html#note-on-rendezvous-backend), `--main_process_ip` which will be the address the master node and the `--main_process_port`. `mpirun_hostfile` specifies to run the job using MPIRun. In both scripts, we run `activateEnviroment.sh` at the beginning. This script should contain the necessary instructions to initialize the environment for execution. Below, we show an example that loads the necessary libraries ([Environment modules](https://github.com/cea-hpc/modules)), activates the Python environment, and sets up various environment variables, most of them to run the scripts in offline mode in case we don't have internet connection from the cluster. ```bash # activateEnvironment.sh module purge module load anaconda3/2020.02 cuda/10.2 cudnn/8.0.5 nccl/2.9.9 arrow/7.0.0 openmpi source activate /home/nct01/nct01328/pytorch_antoni_local export HF_HOME=/gpfs/projects/nct01/nct01328/ export HF_LOCAL_HOME=/gpfs/projects/nct01/nct01328/HF_LOCAL export HF_DATASETS_OFFLINE=1 export TRANSFORMERS_OFFLINE=1 export PYTHONPATH=/home/nct01/nct01328/transformers-in-supercomputers:$PYTHONPATH export GPUS_PER_NODE=4 ``` ## Simple Multi-GPU Hardware Launcher (using an external platform) [multigpu_remote_launcher.py](./multigpu_remote_launcher.py) is a minimal script that demonstrates launching accelerate on multiple remote GPUs, and with automatic hardware environment and dependency setup for reproducibility. You can easily customize the training function used, training arguments, hyperparameters, and type of compute hardware, and then run the script to automatically launch multi GPU training on remote hardware. This script uses [Runhouse](https://github.com/run-house/runhouse) to launch on self-hosted hardware (e.g. in your own cloud account or on-premise cluster) but there are other options for running remotely as well. Runhouse can be installed with `pip install runhouse`, and you can refer to [hardware setup](https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup) for hardware setup instructions, or this [Colab tutorial](https://colab.research.google.com/drive/1qVwYyLTCPYPSdz9ZX7BZl9Qm0A3j7RJe) for a more in-depth walkthrough. ## Finer Examples While the first two scripts are extremely barebones when it comes to what you can do with accelerate, more advanced features are documented in two other locations. ### `by_feature` examples These scripts are *individual* examples highlighting one particular feature or use-case within Accelerate. They all stem from the [nlp_example.py](./nlp_example.py) script, and any changes or modifications is denoted with a `# New Code #` comment. Read the README.md file located in the `by_feature` folder for more information. ### `complete_*` examples These two scripts contain *every* single feature currently available in Accelerate in one place, as one giant script. New arguments that can be passed include: - `checkpointing_steps`, whether the various states should be saved at the end of every `n` steps, or `"epoch"` for each epoch. States are then saved to folders named `step_{n}` or `epoch_{n}` - `resume_from_checkpoint`, should be used if you want to resume training off of a previous call to the script and passed a `checkpointing_steps` to it. - `with_tracking`, should be used if you want to log the training run using all available experiment trackers in your environment. Currently supported trackers include TensorBoard, Weights and Biases, and CometML.
accelerate/examples/README.md/0
{ "file_path": "accelerate/examples/README.md", "repo_id": "accelerate", "token_count": 4684 }
2
#!/bin/bash #SBATCH --job-name=multinode #SBATCH -D . #SBATCH --output=O-%x.%j #SBATCH --error=E-%x.%j #SBATCH --nodes=4 # number of nodes #SBATCH --ntasks-per-node=1 # number of MP tasks #SBATCH --gres=gpu:4 # number of GPUs per node #SBATCH --cpus-per-task=160 # number of cores per tasks #SBATCH --time=01:59:00 # maximum execution time (HH:MM:SS) ###################### ### Set enviroment ### ###################### source activateEnvironment.sh export GPUS_PER_NODE=4 ###################### ###################### #### Set network ##### ###################### head_node_ip=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1) ###################### export LAUNCHER="accelerate launch \ --num_processes $((SLURM_NNODES * GPUS_PER_NODE)) \ --num_machines $SLURM_NNODES \ --rdzv_backend c10d \ --main_process_ip $head_node_ip \ --main_process_port 29500 \ " export ACCELERATE_DIR="${ACCELERATE_DIR:-/accelerate}" export SCRIPT="${ACCELERATE_DIR}/examples/complete_nlp_example.py" export SCRIPT_ARGS=" \ --mixed_precision fp16 \ --output_dir ${ACCELERATE_DIR}/examples/output \ " # This step is necessary because accelerate launch does not handle multiline arguments properly export CMD="$LAUNCHER $PYTHON_FILE $ARGS" srun $CMD
accelerate/examples/slurm/submit_multinode.sh/0
{ "file_path": "accelerate/examples/slurm/submit_multinode.sh", "repo_id": "accelerate", "token_count": 547 }
3
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from setuptools import find_packages, setup extras = {} extras["quality"] = [ "black ~= 23.1", # hf-doc-builder has a hidden dependency on `black` "hf-doc-builder >= 0.3.0", "ruff ~= 0.2.1", ] extras["docs"] = [] extras["test_prod"] = ["pytest>=7.2.0,<=8.0.0", "pytest-xdist", "pytest-subtests", "parameterized"] extras["test_dev"] = [ "datasets", "diffusers", "evaluate", "torchdata>=0.8.0", "torchpippy>=0.2.0", "transformers", "scipy", "scikit-learn", "tqdm", "bitsandbytes", "timm", ] extras["testing"] = extras["test_prod"] + extras["test_dev"] extras["deepspeed"] = ["deepspeed"] extras["rich"] = ["rich"] extras["test_trackers"] = ["wandb", "comet-ml", "tensorboard", "dvclive"] extras["dev"] = extras["quality"] + extras["testing"] + extras["rich"] extras["sagemaker"] = [ "sagemaker", # boto3 is a required package in sagemaker ] setup( name="accelerate", version="0.34.0.dev0", description="Accelerate", long_description=open("README.md", encoding="utf-8").read(), long_description_content_type="text/markdown", keywords="deep learning", license="Apache", author="The HuggingFace team", author_email="zach.mueller@huggingface.co", url="https://github.com/huggingface/accelerate", package_dir={"": "src"}, packages=find_packages("src"), entry_points={ "console_scripts": [ "accelerate=accelerate.commands.accelerate_cli:main", "accelerate-config=accelerate.commands.config:main", "accelerate-estimate-memory=accelerate.commands.estimate:main", "accelerate-launch=accelerate.commands.launch:main", "accelerate-merge-weights=accelerate.commands.merge:main", ] }, python_requires=">=3.8.0", install_requires=[ "numpy>=1.17,<3.0.0", "packaging>=20.0", "psutil", "pyyaml", "torch>=1.10.0", "huggingface_hub>=0.21.0", "safetensors>=0.4.3", ], extras_require=extras, classifiers=[ "Development Status :: 5 - Production/Stable", "Intended Audience :: Developers", "Intended Audience :: Education", "Intended Audience :: Science/Research", "License :: OSI Approved :: Apache Software License", "Operating System :: OS Independent", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.8", "Topic :: Scientific/Engineering :: Artificial Intelligence", ], ) # Release checklist # 1. Checkout the release branch (for a patch the current release branch, for a new minor version, create one): # git checkout -b vXX.xx-release # The -b is only necessary for creation (so remove it when doing a patch) # 2. Change the version in __init__.py and setup.py to the proper value. # 3. Commit these changes with the message: "Release: v<VERSION>" # 4. Add a tag in git to mark the release: # git tag v<VERSION> -m 'Adds tag v<VERSION> for pypi' # Push the tag and release commit to git: git push --tags origin vXX.xx-release # 5. Run the following commands in the top-level directory: # rm -rf dist # rm -rf build # python setup.py bdist_wheel # python setup.py sdist # 6. Upload the package to the pypi test server first: # twine upload dist/* -r testpypi # 7. Check that you can install it in a virtualenv by running: # pip install accelerate # pip uninstall accelerate # pip install -i https://testpypi.python.org/pypi accelerate # accelerate env # accelerate test # 8. Upload the final version to actual pypi: # twine upload dist/* -r pypi # 9. Add release notes to the tag in github once everything is looking hunky-dory. # 10. Go back to the main branch and update the version in __init__.py, setup.py to the new version ".dev" and push to # main.
accelerate/setup.py/0
{ "file_path": "accelerate/setup.py", "repo_id": "accelerate", "token_count": 1720 }
4
#!/usr/bin/env python # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from huggingface_hub import model_info from huggingface_hub.utils import GatedRepoError, RepositoryNotFoundError from accelerate import init_empty_weights from accelerate.commands.utils import CustomArgumentParser from accelerate.utils import ( calculate_maximum_sizes, convert_bytes, is_timm_available, is_transformers_available, ) if is_transformers_available(): import transformers from transformers import AutoConfig, AutoModel if is_timm_available(): import timm def verify_on_hub(repo: str, token: str = None): "Verifies that the model is on the hub and returns the model info." try: return model_info(repo, token=token) except (OSError, GatedRepoError): return "gated" except RepositoryNotFoundError: return "repo" def check_has_model(error): """ Checks what library spawned `error` when a model is not found """ if is_timm_available() and isinstance(error, RuntimeError) and "Unknown model" in error.args[0]: return "timm" elif ( is_transformers_available() and isinstance(error, OSError) and "does not appear to have a file named" in error.args[0] ): return "transformers" else: return "unknown" def create_empty_model(model_name: str, library_name: str, trust_remote_code: bool = False, access_token: str = None): """ Creates an empty model from its parent library on the `Hub` to calculate the overall memory consumption. Args: model_name (`str`): The model name on the Hub library_name (`str`): The library the model has an integration with, such as `transformers`. Will be used if `model_name` has no metadata on the Hub to determine the library. trust_remote_code (`bool`, `optional`, defaults to `False`): Whether or not to allow for custom models defined on the Hub in their own modeling files. This option should only be set to `True` for repositories you trust and in which you have read the code, as it will execute code present on the Hub on your local machine. access_token (`str`, `optional`, defaults to `None`): The access token to use to access private or gated models on the Hub. (for use on the Gradio app) Returns: `torch.nn.Module`: The torch model that has been initialized on the `meta` device. """ model_info = verify_on_hub(model_name, access_token) # Simplified errors if model_info == "gated": raise GatedRepoError( f"Repo for model `{model_name}` is gated. You must be authenticated to access it. Please run `huggingface-cli login`." ) elif model_info == "repo": raise RepositoryNotFoundError( f"Repo for model `{model_name}` does not exist on the Hub. If you are trying to access a private repo," " make sure you are authenticated via `huggingface-cli login` and have access." ) if library_name is None: library_name = getattr(model_info, "library_name", False) if not library_name: raise ValueError( f"Model `{model_name}` does not have any library metadata on the Hub, please manually pass in a `--library_name` to use (such as `transformers`)" ) if library_name == "transformers": if not is_transformers_available(): raise ImportError( f"To check `{model_name}`, `transformers` must be installed. Please install it via `pip install transformers`" ) print(f"Loading pretrained config for `{model_name}` from `transformers`...") if model_info.config is None: raise RuntimeError(f"Tried to load `{model_name}` with `transformers` but it does not have any metadata.") auto_map = model_info.config.get("auto_map", False) config = AutoConfig.from_pretrained(model_name, trust_remote_code=trust_remote_code, token=access_token) with init_empty_weights(): # remote code could specify a specific `AutoModel` class in the `auto_map` constructor = AutoModel if isinstance(auto_map, dict): value = None for key in auto_map.keys(): if key.startswith("AutoModelFor"): value = key break if value is not None: constructor = getattr(transformers, value) model = constructor.from_config(config, trust_remote_code=trust_remote_code) elif library_name == "timm": if not is_timm_available(): raise ImportError( f"To check `{model_name}`, `timm` must be installed. Please install it via `pip install timm`" ) print(f"Loading pretrained config for `{model_name}` from `timm`...") with init_empty_weights(): model = timm.create_model(model_name, pretrained=False) else: raise ValueError( f"Library `{library_name}` is not supported yet, please open an issue on GitHub for us to add support." ) return model def create_ascii_table(headers: list, rows: list, title: str): "Creates a pretty table from a list of rows, minimal version of `tabulate`." sep_char, in_between = "โ”‚", "โ”€" column_widths = [] for i in range(len(headers)): column_values = [row[i] for row in rows] + [headers[i]] max_column_width = max(len(value) for value in column_values) column_widths.append(max_column_width) formats = [f"%{column_widths[i]}s" for i in range(len(rows[0]))] pattern = f"{sep_char}{sep_char.join(formats)}{sep_char}" diff = 0 def make_row(left_char, middle_char, right_char): return f"{left_char}{middle_char.join([in_between * n for n in column_widths])}{in_between * diff}{right_char}" separator = make_row("โ”œ", "โ”ผ", "โ”ค") if len(title) > sum(column_widths): diff = abs(len(title) - len(separator)) column_widths[-1] += diff # Update with diff separator = make_row("โ”œ", "โ”ผ", "โ”ค") initial_rows = [ make_row("โ”Œ", in_between, "โ”"), f"{sep_char}{title.center(len(separator) - 2)}{sep_char}", make_row("โ”œ", "โ”ฌ", "โ”ค"), ] table = "\n".join(initial_rows) + "\n" column_widths[-1] += diff centered_line = [text.center(column_widths[i]) for i, text in enumerate(headers)] table += f"{pattern % tuple(centered_line)}\n{separator}\n" for i, line in enumerate(rows): centered_line = [t.center(column_widths[i]) for i, t in enumerate(line)] table += f"{pattern % tuple(centered_line)}\n" table += f'โ””{"โ”ด".join([in_between * n for n in column_widths])}โ”˜' return table def estimate_command_parser(subparsers=None): if subparsers is not None: parser = subparsers.add_parser("estimate-memory") else: parser = CustomArgumentParser(description="Model size estimator for fitting a model onto CUDA memory.") parser.add_argument("model_name", type=str, help="The model name on the Hugging Face Hub.") parser.add_argument( "--library_name", type=str, help="The library the model has an integration with, such as `transformers`, needed only if this information is not stored on the Hub.", choices=["timm", "transformers"], ) parser.add_argument( "--dtypes", type=str, nargs="+", default=["float32", "float16", "int8", "int4"], help="The dtypes to use for the model, must be one (or many) of `float32`, `float16`, `int8`, and `int4`", choices=["float32", "float16", "int8", "int4"], ) parser.add_argument( "--trust_remote_code", action="store_true", help="""Whether or not to allow for custom models defined on the Hub in their own modeling files. This flag should only be used for repositories you trust and in which you have read the code, as it will execute code present on the Hub on your local machine.""", default=False, ) if subparsers is not None: parser.set_defaults(func=estimate_command) return parser def estimate_training_usage(bytes: int, mixed_precision: str, msamp_config: str = None) -> dict: """ Given an amount of `bytes` and `mixed_precision`, calculates how much training memory is needed for a batch size of 1. Args: bytes (`int`): The size of the model being trained. mixed_precision (`str`): The mixed precision that would be ran. msamp_config (`str`): The msamp config to estimate the training memory for if `mixed_precision` is set to `"fp8"`. """ memory_sizes = {"model": -1, "optimizer": -1, "gradients": -1, "step": -1} fp32_size = bytes fp16_size = bytes // 2 if mixed_precision == "float32": memory_sizes["model"] = fp32_size memory_sizes["gradients"] = fp32_size memory_sizes["optimizer"] = fp32_size * 2 memory_sizes["step"] = fp32_size * 4 elif mixed_precision in ("float16", "bfloat16") or (mixed_precision == "fp8" and msamp_config is None): # With native `TransformersEngine`, there is no memory savings with FP8 # With mixed precision training, the model has weights stored # in FP16 and FP32 memory_sizes["model"] = fp32_size # 1.5 from weight gradient + computation (GEMM) memory_sizes["gradients"] = fp32_size + fp16_size # 2x from optimizer states memory_sizes["optimizer"] = fp32_size * 2 # Optimizer states memory_sizes["step"] = memory_sizes["optimizer"] return memory_sizes def gather_data(args): "Creates an empty model and gathers the data for the sizes" try: model = create_empty_model( args.model_name, library_name=args.library_name, trust_remote_code=args.trust_remote_code ) except (RuntimeError, OSError) as e: library = check_has_model(e) if library != "unknown": raise RuntimeError( f"Tried to load `{args.model_name}` with `{library}` but a possible model to load was not found inside the repo." ) raise e total_size, largest_layer = calculate_maximum_sizes(model) data = [] for dtype in args.dtypes: dtype_total_size = total_size dtype_largest_layer = largest_layer[0] dtype_training_size = estimate_training_usage(dtype_total_size, dtype) if dtype == "float16": dtype_total_size /= 2 dtype_largest_layer /= 2 elif dtype == "int8": dtype_total_size /= 4 dtype_largest_layer /= 4 elif dtype == "int4": dtype_total_size /= 8 dtype_largest_layer /= 8 data.append([dtype, dtype_largest_layer, dtype_total_size, dtype_training_size]) return data def estimate_command(args): data = gather_data(args) for row in data: for i, item in enumerate(row): if isinstance(item, (int, float)): row[i] = convert_bytes(item) elif isinstance(item, dict): training_usage = max(item.values()) row[i] = convert_bytes(training_usage) if training_usage != -1 else "N/A" headers = ["dtype", "Largest Layer", "Total Size", "Training using Adam"] title = f"Memory Usage for loading `{args.model_name}`" table = create_ascii_table(headers, data, title) print(table) def main(): parser = estimate_command_parser() args = parser.parse_args() estimate_command(args) if __name__ == "__main__": main()
accelerate/src/accelerate/commands/estimate.py/0
{ "file_path": "accelerate/src/accelerate/commands/estimate.py", "repo_id": "accelerate", "token_count": 4976 }
5
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from accelerate import Accelerator, DistributedType class LocalSGD: """ A helper class to support local SGD on top of Accelerator. It simply runs a given number of updates independently on each device, and averages model weights every K synchronization step. It should be used only in the multi-GPU (or multi-CPU) setup without extensions such as DeepSpeed. In particular, this is a simple implementation that cannot support scenarios such as model parallelism. Although we are not aware of the true origins of this simple approach, the idea of local SGD is quite old and goes back to at least: Zhang, J., De Sa, C., Mitliagkas, I., & Rรฉ, C. (2016). [Parallel SGD: When does averaging help?. arXiv preprint arXiv:1606.07365.](https://arxiv.org/abs/1606.07365) We credit the term Local SGD to the following paper (but there might be earlier references we are not aware of). Stich, Sebastian Urban. ["Local SGD Converges Fast and Communicates Little." ICLR 2019-International Conference on Learning Representations. No. CONF. 2019.](https://arxiv.org/abs/1805.09767) """ def __enter__(self): if self.enabled: self.model_sync_obj = self.model.no_sync() self.model_sync_obj.__enter__() return self def __exit__(self, type, value, tb): if self.enabled: # Average all models on exit self._sync_and_avg_model_params() self.model_sync_obj.__exit__(type, value, tb) def __init__(self, accelerator: Accelerator, model: torch.nn.Module, local_sgd_steps: int, enabled: bool = True): """ Constructor. Args: model (`torch.nn.Module): The model whose parameters we need to average. accelerator (`Accelerator`): Accelerator object. local_sgd_steps (`int`): A number of local SGD steps (before model parameters are synchronized). enabled (`bool): Local SGD is disabled if this parameter set to `False`. """ if accelerator.distributed_type not in [ DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU, DistributedType.MULTI_MLU, DistributedType.MULTI_MUSA, DistributedType.MULTI_NPU, ]: raise NotImplementedError("LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)") self.enabled = enabled and accelerator.distributed_type != DistributedType.NO self.num_steps = 0 if self.enabled: self.accelerator = accelerator self.model = model self.local_sgd_steps = local_sgd_steps def step(self): """ This function makes a "step" and synchronizes model parameters if necessary. """ self.num_steps += 1 if not self.enabled: return if self.num_steps % self.local_sgd_steps == 0: self._sync_and_avg_model_params() def _sync_and_avg_model_params(self): """ Synchronize + Average model parameters across all GPUs """ self.accelerator.wait_for_everyone() with self.accelerator.autocast(): for param in self.model.parameters(): param.data = self.accelerator.reduce(param.data, reduction="mean")
accelerate/src/accelerate/local_sgd.py/0
{ "file_path": "accelerate/src/accelerate/local_sgd.py", "repo_id": "accelerate", "token_count": 1554 }
6
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import math import os import platform import subprocess import sys from dataclasses import dataclass, field from functools import lru_cache from shutil import which from typing import List, Optional import torch from packaging.version import parse logger = logging.getLogger(__name__) def convert_dict_to_env_variables(current_env: dict): """ Verifies that all keys and values in `current_env` do not contain illegal keys or values, and returns a list of strings as the result. Example: ```python >>> from accelerate.utils.environment import verify_env >>> env = {"ACCELERATE_DEBUG_MODE": "1", "BAD_ENV_NAME": "<mything", "OTHER_ENV": "2"} >>> valid_env_items = verify_env(env) >>> print(valid_env_items) ["ACCELERATE_DEBUG_MODE=1\n", "OTHER_ENV=2\n"] ``` """ forbidden_chars = [";", "\n", "<", ">", " "] valid_env_items = [] for key, value in current_env.items(): if all(char not in (key + value) for char in forbidden_chars) and len(key) >= 1 and len(value) >= 1: valid_env_items.append(f"{key}={value}\n") else: logger.warning(f"WARNING: Skipping {key}={value} as it contains forbidden characters or missing values.") return valid_env_items def str_to_bool(value) -> int: """ Converts a string representation of truth to `True` (1) or `False` (0). True values are `y`, `yes`, `t`, `true`, `on`, and `1`; False value are `n`, `no`, `f`, `false`, `off`, and `0`; """ value = value.lower() if value in ("y", "yes", "t", "true", "on", "1"): return 1 elif value in ("n", "no", "f", "false", "off", "0"): return 0 else: raise ValueError(f"invalid truth value {value}") def get_int_from_env(env_keys, default): """Returns the first positive env value found in the `env_keys` list or the default.""" for e in env_keys: val = int(os.environ.get(e, -1)) if val >= 0: return val return default def parse_flag_from_env(key, default=False): """Returns truthy value for `key` from the env if available else the default.""" value = os.environ.get(key, str(default)) return str_to_bool(value) == 1 # As its name indicates `str_to_bool` actually returns an int... def parse_choice_from_env(key, default="no"): value = os.environ.get(key, str(default)) return value def are_libraries_initialized(*library_names: str) -> List[str]: """ Checks if any of `library_names` are imported in the environment. Will return any names that are. """ return [lib_name for lib_name in library_names if lib_name in sys.modules.keys()] def _nvidia_smi(): """ Returns the right nvidia-smi command based on the system. """ if platform.system() == "Windows": # If platform is Windows and nvidia-smi can't be found in path # try from systemd drive with default installation path command = which("nvidia-smi") if command is None: command = "%s\\Program Files\\NVIDIA Corporation\\NVSMI\\nvidia-smi.exe" % os.environ["systemdrive"] else: command = "nvidia-smi" return command def get_gpu_info(): """ Gets GPU count and names using `nvidia-smi` instead of torch to not initialize CUDA. Largely based on the `gputil` library. """ # Returns as list of `n` GPUs and their names output = subprocess.check_output( [_nvidia_smi(), "--query-gpu=count,name", "--format=csv,noheader"], universal_newlines=True ) output = output.strip() gpus = output.split(os.linesep) # Get names from output gpu_count = len(gpus) gpu_names = [gpu.split(",")[1].strip() for gpu in gpus] return gpu_names, gpu_count def get_driver_version(): """ Returns the driver version In the case of multiple GPUs, will return the first. """ output = subprocess.check_output( [_nvidia_smi(), "--query-gpu=driver_version", "--format=csv,noheader"], universal_newlines=True ) output = output.strip() return output.split(os.linesep)[0] def check_cuda_p2p_ib_support(): """ Checks if the devices being used have issues with P2P and IB communications, namely any consumer GPU hardware after the 3090. Noteably uses `nvidia-smi` instead of torch to not initialize CUDA. """ try: device_names, device_count = get_gpu_info() # As new consumer GPUs get released, add them to `unsupported_devices`` unsupported_devices = {"RTX 40"} if device_count > 1: if any( unsupported_device in device_name for device_name in device_names for unsupported_device in unsupported_devices ): # Check if they have the right driver version acceptable_driver_version = "550.40.07" current_driver_version = get_driver_version() if parse(current_driver_version) < parse(acceptable_driver_version): return False return True except Exception: pass return True def check_fp8_capability(): """ Checks if all the current GPUs available support FP8. Notably must initialize `torch.cuda` to check. """ cuda_device_capacity = torch.cuda.get_device_capability() return cuda_device_capacity >= (8, 9) @dataclass class CPUInformation: """ Stores information about the CPU in a distributed environment. It contains the following attributes: - rank: The rank of the current process. - world_size: The total number of processes in the world. - local_rank: The rank of the current process on the local node. - local_world_size: The total number of processes on the local node. """ rank: int = field(default=0, metadata={"help": "The rank of the current process."}) world_size: int = field(default=1, metadata={"help": "The total number of processes in the world."}) local_rank: int = field(default=0, metadata={"help": "The rank of the current process on the local node."}) local_world_size: int = field(default=1, metadata={"help": "The total number of processes on the local node."}) def get_cpu_distributed_information() -> CPUInformation: """ Returns various information about the environment in relation to CPU distributed training as a `CPUInformation` dataclass. """ information = {} information["rank"] = get_int_from_env(["RANK", "PMI_RANK", "OMPI_COMM_WORLD_RANK", "MV2_COMM_WORLD_RANK"], 0) information["world_size"] = get_int_from_env( ["WORLD_SIZE", "PMI_SIZE", "OMPI_COMM_WORLD_SIZE", "MV2_COMM_WORLD_SIZE"], 1 ) information["local_rank"] = get_int_from_env( ["LOCAL_RANK", "MPI_LOCALRANKID", "OMPI_COMM_WORLD_LOCAL_RANK", "MV2_COMM_WORLD_LOCAL_RANK"], 0 ) information["local_world_size"] = get_int_from_env( ["LOCAL_WORLD_SIZE", "MPI_LOCALNRANKS", "OMPI_COMM_WORLD_LOCAL_SIZE", "MV2_COMM_WORLD_LOCAL_SIZE"], 1, ) return CPUInformation(**information) def override_numa_affinity(local_process_index: int, verbose: Optional[bool] = None) -> None: """ Overrides whatever NUMA affinity is set for the current process. This is very taxing and requires recalculating the affinity to set, ideally you should use `utils.environment.set_numa_affinity` instead. Args: local_process_index (int): The index of the current process on the current server. verbose (bool, *optional*): Whether to log out the assignment of each CPU. If `ACCELERATE_DEBUG_MODE` is enabled, will default to True. """ if verbose is None: verbose = parse_flag_from_env("ACCELERATE_DEBUG_MODE", False) if torch.cuda.is_available(): from accelerate.utils import is_pynvml_available if not is_pynvml_available(): raise ImportError( "To set CPU affinity on CUDA GPUs the `pynvml` package must be available. (`pip install pynvml`)" ) import pynvml as nvml # The below code is based on https://github.com/NVIDIA/DeepLearningExamples/blob/master/TensorFlow2/LanguageModeling/BERT/gpu_affinity.py nvml.nvmlInit() num_elements = math.ceil(os.cpu_count() / 64) handle = nvml.nvmlDeviceGetHandleByIndex(local_process_index) affinity_string = "" for j in nvml.nvmlDeviceGetCpuAffinity(handle, num_elements): # assume nvml returns list of 64 bit ints affinity_string = f"{j:064b}{affinity_string}" affinity_list = [int(x) for x in affinity_string] affinity_list.reverse() # so core 0 is the 0th element affinity_to_set = [i for i, e in enumerate(affinity_list) if e != 0] os.sched_setaffinity(0, affinity_to_set) if verbose: cpu_cores = os.sched_getaffinity(0) logger.info(f"Assigning {len(cpu_cores)} cpu cores to process {local_process_index}: {cpu_cores}") @lru_cache def set_numa_affinity(local_process_index: int, verbose: Optional[bool] = None) -> None: """ Assigns the current process to a specific NUMA node. Ideally most efficient when having at least 2 cpus per node. This result is cached between calls. If you want to override it, please use `accelerate.utils.environment.override_numa_afifnity`. Args: local_process_index (int): The index of the current process on the current server. verbose (bool, *optional*): Whether to print the new cpu cores assignment for each process. If `ACCELERATE_DEBUG_MODE` is enabled, will default to True. """ override_numa_affinity(local_process_index=local_process_index, verbose=verbose)
accelerate/src/accelerate/utils/environment.py/0
{ "file_path": "accelerate/src/accelerate/utils/environment.py", "repo_id": "accelerate", "token_count": 3941 }
7
{ "fp16": { "enabled": "auto", "loss_scale": 0, "loss_scale_window": 1000, "initial_scale_power": 16, "hysteresis": 2, "min_loss_scale": 1 }, "bf16": { "enabled": "auto" }, "optimizer": { "type": "AdamW", "params": { "lr": "auto", "weight_decay": "auto", "torch_adam": true, "adam_w_mode": true } }, "scheduler": { "type": "WarmupLR", "params": { "warmup_min_lr": "auto", "warmup_max_lr": "auto", "warmup_num_steps": "auto" } }, "zero_optimization": { "stage": 2, "offload_optimizer": { "device": "cpu", "pin_memory": true }, "allgather_partitions": true, "allgather_bucket_size": 2e8, "overlap_comm": true, "reduce_scatter": true, "reduce_bucket_size": "auto", "contiguous_gradients": true }, "gradient_accumulation_steps": 1, "gradient_clipping": "auto", "steps_per_print": 2000, "train_batch_size": "auto", "train_micro_batch_size_per_gpu": "auto", "wall_clock_breakdown": false }
accelerate/tests/deepspeed/ds_config_zero2.json/0
{ "file_path": "accelerate/tests/deepspeed/ds_config_zero2.json", "repo_id": "accelerate", "token_count": 680 }
8
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import random import unittest import pytest import torch from parameterized import parameterized from torch.utils.data import BatchSampler, DataLoader, IterableDataset from accelerate import Accelerator, PartialState from accelerate.data_loader import ( BatchSamplerShard, DataLoaderDispatcher, DataLoaderShard, DataLoaderStateMixin, IterableDatasetShard, SkipBatchSampler, SkipDataLoader, prepare_data_loader, skip_first_batches, ) from accelerate.state import GradientState from accelerate.test_utils.testing import require_torchdata_stateful_dataloader from accelerate.utils import is_torchdata_stateful_dataloader_available if is_torchdata_stateful_dataloader_available(): from torchdata.stateful_dataloader import ( StatefulDataLoader, ) def parameterized_custom_name_func(func, param_num, param): # customize the test name generator function as we want both params to appear in the sub-test # name, as by default it shows only the first param param_based_name = f"num_workers_{param.args[0]}" return f"{func.__name__}_{param_based_name}" class RandomIterableDataset(IterableDataset): # For testing, an iterable dataset of random length def __init__(self, p_stop=0.01, max_length=1000): self.p_stop = p_stop self.max_length = max_length def __iter__(self): count = 0 stop = False while not stop and count < self.max_length: yield count count += 1 stop = random.random() < self.p_stop class SimpleIterableDataset(IterableDataset): def __init__(self, num_samples=1000): self.num_samples = num_samples def __iter__(self): for _ in range(self.num_samples): yield torch.rand(1) def __len__(self): return self.num_samples def set_epoch(self, epoch): self.epoch = epoch class DataLoaderTester(unittest.TestCase): def check_batch_sampler_shards(self, batch_sampler, expected, split_batches=False, even_batches=True): batch_sampler_shards = [ BatchSamplerShard(batch_sampler, 2, i, split_batches=split_batches, even_batches=even_batches) for i in range(2) ] batch_sampler_lists = [list(batch_sampler_shard) for batch_sampler_shard in batch_sampler_shards] if not split_batches: assert [len(shard) for shard in batch_sampler_shards] == [len(e) for e in expected] assert batch_sampler_lists == expected def test_batch_sampler_shards_with_no_splits(self): # Check the shards when the dataset is a round multiple of total batch size. batch_sampler = BatchSampler(range(24), batch_size=3, drop_last=False) expected = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]], ] self.check_batch_sampler_shards(batch_sampler, expected) batch_sampler = BatchSampler(range(24), batch_size=3, drop_last=True) # Expected shouldn't change self.check_batch_sampler_shards(batch_sampler, expected) # Check the shards when the dataset is a round multiple of batch size but not total batch size. batch_sampler = BatchSampler(range(21), batch_size=3, drop_last=False) expected = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]], ] self.check_batch_sampler_shards(batch_sampler, expected) batch_sampler = BatchSampler(range(21), batch_size=3, drop_last=True) expected = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(batch_sampler, expected) # Check the shards when the dataset is not a round multiple of batch size but has a multiple of # num_processes batch. batch_sampler = BatchSampler(range(22), batch_size=3, drop_last=False) expected = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]], ] self.check_batch_sampler_shards(batch_sampler, expected) batch_sampler = BatchSampler(range(22), batch_size=3, drop_last=True) expected = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(batch_sampler, expected) # Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of # num_processes batch. batch_sampler = BatchSampler(range(20), batch_size=3, drop_last=False) expected = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]], ] self.check_batch_sampler_shards(batch_sampler, expected) batch_sampler = BatchSampler(range(20), batch_size=3, drop_last=True) expected = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(batch_sampler, expected) # Check the shards when the dataset is very small. batch_sampler = BatchSampler(range(2), batch_size=3, drop_last=False) expected = [[[0, 1, 0]], [[1, 0, 1]]] self.check_batch_sampler_shards(batch_sampler, expected) batch_sampler = BatchSampler(range(2), batch_size=3, drop_last=True) expected = [[], []] self.check_batch_sampler_shards(batch_sampler, expected) def test_batch_sampler_shards_with_splits(self): # Check the shards when the dataset is a round multiple of batch size. batch_sampler = BatchSampler(range(24), batch_size=4, drop_last=False) expected = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]], ] self.check_batch_sampler_shards(batch_sampler, expected, split_batches=True) batch_sampler = BatchSampler(range(24), batch_size=4, drop_last=True) # Expected shouldn't change self.check_batch_sampler_shards(batch_sampler, expected, split_batches=True) # Check the shards when the dataset is not a round multiple of batch size. batch_sampler = BatchSampler(range(22), batch_size=4, drop_last=False) expected = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]], ] self.check_batch_sampler_shards(batch_sampler, expected, split_batches=True) batch_sampler = BatchSampler(range(22), batch_size=4, drop_last=True) expected = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(batch_sampler, expected, split_batches=True) # Check the shards when the dataset is not a round multiple of batch size or num_processes. batch_sampler = BatchSampler(range(21), batch_size=4, drop_last=False) expected = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]], ] self.check_batch_sampler_shards(batch_sampler, expected, split_batches=True) batch_sampler = BatchSampler(range(21), batch_size=4, drop_last=True) expected = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(batch_sampler, expected, split_batches=True) # Check the shards when the dataset is very small. batch_sampler = BatchSampler(range(2), batch_size=4, drop_last=False) expected = [[[0, 1]], [[0, 1]]] self.check_batch_sampler_shards(batch_sampler, expected, split_batches=True) batch_sampler = BatchSampler(range(2), batch_size=4, drop_last=True) expected = [[], []] self.check_batch_sampler_shards(batch_sampler, expected, split_batches=True) def test_batch_sampler_shards_with_no_splits_no_even(self): # Check the shards when the dataset is a round multiple of total batch size. batch_sampler = BatchSampler(range(24), batch_size=3, drop_last=False) expected = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]], ] self.check_batch_sampler_shards(batch_sampler, expected, even_batches=False) batch_sampler = BatchSampler(range(24), batch_size=3, drop_last=True) # Expected shouldn't change self.check_batch_sampler_shards(batch_sampler, expected, even_batches=False) # Check the shards when the dataset is a round multiple of batch size but not total batch size. batch_sampler = BatchSampler(range(21), batch_size=3, drop_last=False) expected = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(batch_sampler, expected, even_batches=False) batch_sampler = BatchSampler(range(21), batch_size=3, drop_last=True) expected = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(batch_sampler, expected, even_batches=False) # Check the shards when the dataset is not a round multiple of batch size but has a multiple of # num_processes batch. batch_sampler = BatchSampler(range(22), batch_size=3, drop_last=False) expected = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]], [[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]], ] self.check_batch_sampler_shards(batch_sampler, expected, even_batches=False) batch_sampler = BatchSampler(range(22), batch_size=3, drop_last=True) expected = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(batch_sampler, expected, even_batches=False) # Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of # num_processes batch. batch_sampler = BatchSampler(range(20), batch_size=3, drop_last=False) expected = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(batch_sampler, expected, even_batches=False) batch_sampler = BatchSampler(range(20), batch_size=3, drop_last=True) expected = [ [[0, 1, 2], [6, 7, 8], [12, 13, 14]], [[3, 4, 5], [9, 10, 11], [15, 16, 17]], ] self.check_batch_sampler_shards(batch_sampler, expected, even_batches=False) # Check the shards when the dataset is very small. batch_sampler = BatchSampler(range(2), batch_size=3, drop_last=False) expected = [[[0, 1]], []] self.check_batch_sampler_shards(batch_sampler, expected, even_batches=False) batch_sampler = BatchSampler(range(2), batch_size=3, drop_last=True) expected = [[], []] self.check_batch_sampler_shards(batch_sampler, expected, even_batches=False) def test_batch_sampler_shards_with_splits_no_even(self): # Check the shards when the dataset is a round multiple of batch size. batch_sampler = BatchSampler(range(24), batch_size=4, drop_last=False) expected = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]], ] self.check_batch_sampler_shards(batch_sampler, expected, split_batches=True, even_batches=False) batch_sampler = BatchSampler(range(24), batch_size=4, drop_last=True) # Expected shouldn't change self.check_batch_sampler_shards(batch_sampler, expected, split_batches=True, even_batches=False) # Check the shards when the dataset is not a round multiple of batch size. batch_sampler = BatchSampler(range(22), batch_size=4, drop_last=False) expected = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(batch_sampler, expected, split_batches=True, even_batches=False) batch_sampler = BatchSampler(range(22), batch_size=4, drop_last=True) expected = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(batch_sampler, expected, split_batches=True, even_batches=False) # Check the shards when the dataset is not a round multiple of batch size or num_processes. batch_sampler = BatchSampler(range(21), batch_size=4, drop_last=False) expected = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(batch_sampler, expected, split_batches=True, even_batches=False) batch_sampler = BatchSampler(range(21), batch_size=4, drop_last=True) expected = [ [[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]], [[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]], ] self.check_batch_sampler_shards(batch_sampler, expected, split_batches=True, even_batches=False) # Check the shards when the dataset is very small. batch_sampler = BatchSampler(range(2), batch_size=4, drop_last=False) expected = [[[0, 1]], []] self.check_batch_sampler_shards(batch_sampler, expected, split_batches=True, even_batches=False) batch_sampler = BatchSampler(range(2), batch_size=4, drop_last=True) expected = [[], []] self.check_batch_sampler_shards(batch_sampler, expected, split_batches=True, even_batches=False) def test_batch_sampler_with_varying_batch_size(self): batch_sampler = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]] batch_sampler_shards = [BatchSamplerShard(batch_sampler, 2, i, even_batches=False) for i in range(2)] assert len(batch_sampler_shards[0]) == 3 assert len(batch_sampler_shards[1]) == 2 assert list(batch_sampler_shards[0]) == [[0, 1, 2], [5, 6, 7, 8], [12, 13]] assert list(batch_sampler_shards[1]) == [[3, 4], [9, 10, 11]] def check_iterable_dataset_shards( self, dataset, seed, batch_size, drop_last=False, num_processes=2, split_batches=False ): random.seed(seed) reference = list(dataset) iterable_dataset_shards = [ IterableDatasetShard( dataset, batch_size=batch_size, drop_last=drop_last, num_processes=num_processes, process_index=i, split_batches=split_batches, ) for i in range(num_processes) ] iterable_dataset_lists = [] for iterable_dataset_shard in iterable_dataset_shards: # Since our random iterable dataset will be... random... we need to use a seed to get reproducible results. random.seed(seed) iterable_dataset_lists.append(list(iterable_dataset_shard)) shard_batch_size = batch_size // num_processes if split_batches else batch_size # All iterable dataset shard should have the same length, a round multiple of shard_batch_size first_list = iterable_dataset_lists[0] for l in iterable_dataset_lists[1:]: assert len(l) == len(first_list) assert (len(l) % shard_batch_size) == 0 observed = [] for idx in range(0, len(first_list), shard_batch_size): for l in iterable_dataset_lists: observed += l[idx : idx + shard_batch_size] if not drop_last: while len(reference) < len(observed): reference += reference assert observed == reference[: len(observed)] def test_iterable_dataset_shard(self): seed = 42 dataset = RandomIterableDataset() self.check_iterable_dataset_shards(dataset, seed, batch_size=4, drop_last=False, split_batches=False) self.check_iterable_dataset_shards(dataset, seed, batch_size=4, drop_last=True, split_batches=False) self.check_iterable_dataset_shards(dataset, seed, batch_size=4, drop_last=False, split_batches=True) self.check_iterable_dataset_shards(dataset, seed, batch_size=4, drop_last=True, split_batches=True) # Edge case with a very small dataset dataset = RandomIterableDataset(max_length=2) self.check_iterable_dataset_shards(dataset, seed, batch_size=4, drop_last=False, split_batches=False) self.check_iterable_dataset_shards(dataset, seed, batch_size=4, drop_last=True, split_batches=False) self.check_iterable_dataset_shards(dataset, seed, batch_size=4, drop_last=False, split_batches=True) self.check_iterable_dataset_shards(dataset, seed, batch_size=4, drop_last=True, split_batches=True) def test_iterable_dataset_using_none_batch_size(self): dataset = SimpleIterableDataset(100) dataloader = DataLoader(dataset, batch_size=None) dataloader = prepare_data_loader(dataloader) for d in dataloader: assert isinstance(d, torch.Tensor) def test_skip_batch_sampler(self): batch_sampler = BatchSampler(range(16), batch_size=4, drop_last=False) new_batch_sampler = SkipBatchSampler(batch_sampler, 2) assert list(new_batch_sampler) == [[8, 9, 10, 11], [12, 13, 14, 15]] def test_dataloader_inheritance(self): """ `DataLoaderAdapter`'s parent classes are dynamically constructed, assert that subclasses of DataLoaderAdapter are instances of DataLoader and DataLoaderStateMixin. """ skip_dl = SkipDataLoader(range(16), batch_size=4, skip_batches=2) dl_shard = DataLoaderShard(range(16), batch_size=4) dl_dispatcher = DataLoaderDispatcher(range(16), batch_size=4) assert isinstance(skip_dl, DataLoader) assert isinstance(dl_shard, DataLoader) assert isinstance(dl_dispatcher, DataLoader) assert isinstance(dl_shard, DataLoaderStateMixin) assert isinstance(dl_dispatcher, DataLoaderStateMixin) assert isinstance(skip_dl.base_dataloader, DataLoader) assert isinstance(dl_shard.base_dataloader, DataLoader) assert isinstance(dl_dispatcher.base_dataloader, DataLoader) with pytest.raises(AttributeError): _ = DataLoaderShard.base_dataloader def test_skip_data_loader(self): dataloader = SkipDataLoader(list(range(16)), batch_size=4, skip_batches=2) assert [t.tolist() for t in dataloader] == [[8, 9, 10, 11], [12, 13, 14, 15]] def test_skip_first_batches(self): dataloader = DataLoader(list(range(16)), batch_size=4) new_dataloader = skip_first_batches(dataloader, num_batches=2) assert [t.tolist() for t in new_dataloader] == [[8, 9, 10, 11], [12, 13, 14, 15]] def test_end_of_dataloader(self): dataloader = DataLoaderShard(list(range(16)), batch_size=4) for idx, _ in enumerate(dataloader): assert dataloader.end_of_dataloader == (idx == 3) # Test it also works on the second iteration for idx, _ in enumerate(dataloader): assert dataloader.end_of_dataloader == (idx == 3) def test_end_of_dataloader_dispatcher(self): dataloader = DataLoaderDispatcher(range(16), batch_size=4) for idx, _ in enumerate(dataloader): assert dataloader.end_of_dataloader == (idx == 3) # Test it also works on the second iteration for idx, _ in enumerate(dataloader): assert dataloader.end_of_dataloader == (idx == 3) class StatefulDataLoaderTester(unittest.TestCase): @require_torchdata_stateful_dataloader def test_skip_data_loader(self): dataloader = SkipDataLoader(list(range(16)), batch_size=4, skip_batches=2, use_stateful_dataloader=True) assert isinstance(dataloader, StatefulDataLoader) assert [t.tolist() for t in dataloader] == [[8, 9, 10, 11], [12, 13, 14, 15]] @require_torchdata_stateful_dataloader def test_skip_first_batches(self): dataloader = StatefulDataLoader(list(range(16)), batch_size=4) new_dataloader = skip_first_batches(dataloader, num_batches=2) assert isinstance(new_dataloader, StatefulDataLoader) assert [t.tolist() for t in new_dataloader] == [[8, 9, 10, 11], [12, 13, 14, 15]] @require_torchdata_stateful_dataloader def test_end_of_dataloader(self): dataloader = DataLoaderShard(list(range(16)), batch_size=4, use_stateful_dataloader=True) assert dataloader.use_stateful_dataloader assert isinstance(dataloader, StatefulDataLoader) for idx, _ in enumerate(dataloader): assert dataloader.end_of_dataloader == (idx == 3) # Test it also works on the second iteration for idx, _ in enumerate(dataloader): assert dataloader.end_of_dataloader == (idx == 3) @require_torchdata_stateful_dataloader def test_end_of_dataloader_dispatcher(self): dataloader = DataLoaderDispatcher(range(16), batch_size=4, use_stateful_dataloader=True) assert isinstance(dataloader, StatefulDataLoader) for idx, _ in enumerate(dataloader): assert dataloader.end_of_dataloader == (idx == 3) # Test it also works on the second iteration for idx, _ in enumerate(dataloader): assert dataloader.end_of_dataloader == (idx == 3) @parameterized.expand([0, 2], name_func=parameterized_custom_name_func) @require_torchdata_stateful_dataloader def test_dataloader_state_dict(self, num_workers): """ Test that saving a stateful dataloader's state, then loading it back, gives the same results. """ dataset = list(range(16)) dataloader = DataLoaderShard(dataset, batch_size=4, use_stateful_dataloader=True, num_workers=num_workers) assert dataloader.use_stateful_dataloader assert isinstance(dataloader, StatefulDataLoader) vals = [] for idx, val in enumerate(dataloader): vals.append(val) if idx == 1: sd = dataloader.state_dict() assert len(vals) == 4 dataloader2 = DataLoaderShard(dataset, batch_size=4, use_stateful_dataloader=True, num_workers=num_workers) dataloader2.load_state_dict(sd) data1 = vals[2:] data2 = list(dataloader2) assert len(data1) == len(data2) for d1, d2 in zip(data1, data2): assert torch.allclose(d1, d2) @parameterized.expand([0, 2], name_func=parameterized_custom_name_func) @require_torchdata_stateful_dataloader def test_dataloader_dispatcher_state_dict(self, num_workers): """ Test that saving a stateful dataloader's state, then loading it back, gives the same results. """ dataset = list(range(16)) dataloader = DataLoaderDispatcher(dataset, batch_size=4, use_stateful_dataloader=True, num_workers=num_workers) assert dataloader.use_stateful_dataloader assert isinstance(dataloader, StatefulDataLoader) vals = [] for idx, val in enumerate(dataloader): vals.append(val) if idx == 1: sd = dataloader.state_dict() assert len(vals) == 4 dataloader2 = DataLoaderDispatcher( dataset, batch_size=4, use_stateful_dataloader=True, num_workers=num_workers ) dataloader2.load_state_dict(sd) data1 = vals[2:] data2 = list(dataloader2) assert len(data1) == len(data2) for d1, d2 in zip(data1, data2): assert torch.allclose(d1, d2) @require_torchdata_stateful_dataloader def test_dataloader_inheritance(self): """ `DataLoaderAdapter`'s parent classes are dynamically constructed, assert that if use_stateful_dataloader=True, subclasses of DataLoaderAdapter are instances of StatefulDataLoader and DataLoaderStateMixin. """ skip_dl = SkipDataLoader(range(16), batch_size=4, skip_batches=2, use_stateful_dataloader=True) dl_shard = DataLoaderShard(range(16), batch_size=4, use_stateful_dataloader=True) dl_dispatcher = DataLoaderDispatcher(range(16), batch_size=4, use_stateful_dataloader=True) assert isinstance(skip_dl, StatefulDataLoader) assert isinstance(dl_shard, StatefulDataLoader) assert isinstance(dl_dispatcher, StatefulDataLoader) assert isinstance(dl_shard, DataLoaderStateMixin) assert isinstance(dl_dispatcher, DataLoaderStateMixin) assert isinstance(skip_dl.base_dataloader, StatefulDataLoader) assert isinstance(dl_shard.base_dataloader, StatefulDataLoader) assert isinstance(dl_dispatcher.base_dataloader, StatefulDataLoader) @parameterized.expand([0, 2], name_func=parameterized_custom_name_func) @require_torchdata_stateful_dataloader def test_stateful_dataloader_adapter_equivalent_to_torchdata_stateful_dataloader(self, num_workers): """ Assert that `state_dict()` and `load_state_dict()` for derived subclasses of `DataLoaderAdapter` produce the same behavior as `state_dict()` and `load_state_dict()` for `StatefulDataLoader`. """ dataset = list(range(64)) # Set the seed for reproducibility def g(): return torch.Generator().manual_seed(42) accelerator = Accelerator() stateful_dl = StatefulDataLoader(dataset, batch_size=4, num_workers=num_workers, generator=g()) skip_dl = SkipDataLoader( dataset, batch_size=4, num_workers=num_workers, generator=g(), use_stateful_dataloader=True ) dl_shard = DataLoaderShard( dataset, batch_size=4, num_workers=num_workers, generator=g(), use_stateful_dataloader=True ) dl_dispatcher = DataLoaderDispatcher( dataset, batch_size=4, num_workers=num_workers, generator=g(), use_stateful_dataloader=True ) dataloaders_under_test = [skip_dl, dl_shard, dl_dispatcher] num_batches_to_skip = 8 def get_first_n_batches(dl, n, device): """ Iterate over the first `n` batches of a dataloader then break, returning the batches in a list. """ batches = [] for idx, batch in enumerate(dl): if idx == n - 1: if hasattr(dl, "end"): dl.end() break batches.append(batch.to(device)) return batches # Iterate over all of the dataloaders identically, expect the same values expected_batches = get_first_n_batches(stateful_dl, num_batches_to_skip, accelerator.device) batches_from_dataloaders = [ get_first_n_batches(dl, num_batches_to_skip, accelerator.device) for dl in dataloaders_under_test ] for dl_batches in batches_from_dataloaders: for expected, actual in zip(expected_batches, dl_batches): assert torch.allclose(expected, actual) # The adapters should all produce the same state_dict as the reference stateful dataloader expected_state_dict = stateful_dl.state_dict() skip_dl_state_dict = skip_dl.state_dict() dl_shard_state_dict = dl_shard.state_dict() dl_dispatcher_state_dict = dl_dispatcher.state_dict() assert expected_state_dict == skip_dl_state_dict assert expected_state_dict == dl_shard_state_dict assert expected_state_dict == dl_dispatcher_state_dict # Load the state dict into new dataloaders manual_skip_dl = SkipDataLoader( dataset, batch_size=4, num_workers=num_workers, generator=g(), skip_batches=num_batches_to_skip, use_stateful_dataloader=True, ) loaded_stateful_dl = StatefulDataLoader(dataset, batch_size=4, num_workers=num_workers, generator=g()) loaded_stateful_dl.load_state_dict(expected_state_dict) loaded_skip_dl = SkipDataLoader( dataset, batch_size=4, num_workers=num_workers, generator=g(), use_stateful_dataloader=True ) loaded_skip_dl.load_state_dict(expected_state_dict) loaded_dl_shard = DataLoaderShard( dataset, batch_size=4, num_workers=num_workers, generator=g(), use_stateful_dataloader=True ) loaded_dl_shard.load_state_dict(expected_state_dict) loaded_dl_dispatcher = DataLoaderDispatcher( dataset, batch_size=4, num_workers=num_workers, generator=g(), use_stateful_dataloader=True ) loaded_dl_dispatcher.load_state_dict(expected_state_dict) # Continue the iteration, expecting identical behavior across the board def get_all_batches(dl, device): """ Iterate over all batches of a dataloader, returning (batches, num_batches_yielded) """ batches = [] num_batches_yielded = 0 for batch in dl: batches.append(batch.to(device)) num_batches_yielded += 1 return (batches, num_batches_yielded) expected_batch_results = get_all_batches(loaded_stateful_dl, accelerator.device) dataloader_batch_results = [ get_all_batches(dl, accelerator.device) for dl in [manual_skip_dl, loaded_skip_dl, loaded_dl_shard, loaded_dl_dispatcher] ] for dl_results in dataloader_batch_results: for expected, actual in zip(expected_batches, dl_batches): assert torch.allclose(expected[0], actual[0]) assert expected_batch_results[1] == dl_results[1] assert accelerator.gradient_state.active_dataloader is None @parameterized.expand([0, 2], name_func=parameterized_custom_name_func) @require_torchdata_stateful_dataloader def test_decoupled_stateful_dataloader_adapter_equivalent_to_torchdata_stateful_dataloader(self, num_workers): """ Assert that `state_dict()` and `load_state_dict()` for derived subclasses of `DataLoaderAdapter` produce the same behavior as `state_dict()` and `load_state_dict()` for `StatefulDataLoader` when *not* using Accelerator (and instead using the decoupled `PartialState` workflow). """ dataset = list(range(64)) # Set the seed for reproducibility def g(): return torch.Generator().manual_seed(42) state = PartialState() stateful_dl = StatefulDataLoader(dataset, batch_size=4, num_workers=num_workers, generator=g()) skip_dl = SkipDataLoader( dataset, batch_size=4, num_workers=num_workers, generator=g(), use_stateful_dataloader=True ) dl_shard = DataLoaderShard( dataset, batch_size=4, num_workers=num_workers, generator=g(), use_stateful_dataloader=True ) dl_dispatcher = DataLoaderDispatcher( dataset, batch_size=4, num_workers=num_workers, generator=g(), use_stateful_dataloader=True ) dataloaders_under_test = [skip_dl, dl_shard, dl_dispatcher] num_batches_to_skip = 8 def get_first_n_batches(dl, n, device): """ Iterate over the first `n` batches of a dataloader then break, returning the batches in a list. """ batches = [] for idx, batch in enumerate(dl): if idx == n - 1: if hasattr(dl, "end"): dl.end() break batches.append(batch.to(device)) return batches # Iterate over all of the dataloaders identically, expect the same values expected_batches = get_first_n_batches(stateful_dl, num_batches_to_skip, state.device) batches_from_dataloaders = [ get_first_n_batches(dl, num_batches_to_skip, state.device) for dl in dataloaders_under_test ] for dl_batches in batches_from_dataloaders: for expected, actual in zip(expected_batches, dl_batches): assert torch.allclose(expected, actual) # The adapters should all produce the same state_dict as the reference stateful dataloader expected_state_dict = stateful_dl.state_dict() skip_dl_state_dict = skip_dl.state_dict() dl_shard_state_dict = dl_shard.state_dict() dl_dispatcher_state_dict = dl_dispatcher.state_dict() assert expected_state_dict == skip_dl_state_dict assert expected_state_dict == dl_shard_state_dict assert expected_state_dict == dl_dispatcher_state_dict # Load the state dict into new dataloaders manual_skip_dl = SkipDataLoader( dataset, batch_size=4, num_workers=num_workers, generator=g(), skip_batches=num_batches_to_skip, use_stateful_dataloader=True, ) loaded_stateful_dl = StatefulDataLoader(dataset, batch_size=4, num_workers=num_workers, generator=g()) loaded_stateful_dl.load_state_dict(expected_state_dict) loaded_skip_dl = SkipDataLoader( dataset, batch_size=4, num_workers=num_workers, generator=g(), use_stateful_dataloader=True ) loaded_skip_dl.load_state_dict(expected_state_dict) loaded_dl_shard = DataLoaderShard( dataset, batch_size=4, num_workers=num_workers, generator=g(), use_stateful_dataloader=True ) loaded_dl_shard.load_state_dict(expected_state_dict) loaded_dl_dispatcher = DataLoaderDispatcher( dataset, batch_size=4, num_workers=num_workers, generator=g(), use_stateful_dataloader=True ) loaded_dl_dispatcher.load_state_dict(expected_state_dict) # Continue the iteration, expecting identical behavior across the board def get_all_batches(dl, device): """ Iterate over all batches of a dataloader, returning (batches, num_batches_yielded) """ batches = [] num_batches_yielded = 0 for batch in dl: batches.append(batch.to(device)) num_batches_yielded += 1 return (batches, num_batches_yielded) expected_batch_results = get_all_batches(loaded_stateful_dl, state.device) dataloader_batch_results = [ get_all_batches(dl, state.device) for dl in [manual_skip_dl, loaded_skip_dl, loaded_dl_shard, loaded_dl_dispatcher] ] for dl_results in dataloader_batch_results: for expected, actual in zip(expected_batches, dl_batches): assert torch.allclose(expected[0], actual[0]) assert expected_batch_results[1] == dl_results[1] # Using the decoupled (`PartialState`) workflow, GradientState should be automatically initialized (with # default parameters) by `DataLoaderDispatcher` assert GradientState._shared_state != {}, "GradientState should already be initialized!" gradient_state = GradientState() assert gradient_state.active_dataloader is None
accelerate/tests/test_data_loader.py/0
{ "file_path": "accelerate/tests/test_data_loader.py", "repo_id": "accelerate", "token_count": 16443 }
9
# Welcome to the RLHF Handbook! Stay tuned for more details ๐Ÿค—
alignment-handbook/chapters/en/chapter0/introduction.mdx/0
{ "file_path": "alignment-handbook/chapters/en/chapter0/introduction.mdx", "repo_id": "alignment-handbook", "token_count": 18 }
10
# Model arguments model_name_or_path: alignment-handbook/zephyr-7b-sft-full torch_dtype: null # Data training arguments dataset_mixer: HuggingFaceH4/ultrafeedback_binarized: 1.0 dataset_splits: - train_prefs - test_prefs preprocessing_num_workers: 12 # Training arguments with sensible defaults bf16: true beta: 0.01 loss_type: sigmoid do_eval: true eval_strategy: steps eval_steps: 100 gradient_accumulation_steps: 2 gradient_checkpointing: true gradient_checkpointing_kwargs: use_reentrant: False hub_model_id: zephyr-7b-align-scan hub_model_revision: dpo-beta-0.01 learning_rate: 5.0e-7 logging_steps: 10 lr_scheduler_type: cosine max_prompt_length: 512 num_train_epochs: 1 optim: adamw_torch output_dir: data/zephyr-7b-align-scan-dpo-beta-0.01 per_device_train_batch_size: 8 per_device_eval_batch_size: 8 save_strategy: "steps" save_steps: 100 save_total_limit: 1 seed: 42 warmup_ratio: 0.1
alignment-handbook/recipes/pref_align_scan/dpo/config_zephyr.yaml/0
{ "file_path": "alignment-handbook/recipes/pref_align_scan/dpo/config_zephyr.yaml", "repo_id": "alignment-handbook", "token_count": 358 }
11
# Model arguments model_name_or_path: google/gemma-7b model_revision: main tokenizer_name_or_path: philschmid/gemma-tokenizer-chatml # Custom tokenizer with <|im_start|> and <|im_end|> tokens torch_dtype: bfloat16 attn_implementation: flash_attention_2 # Data training arguments dataset_mixer: HuggingFaceH4/deita-10k-v0-sft: 1.0 dataset_splits: - train_sft - test_sft preprocessing_num_workers: 12 # SFT trainer config bf16: true dataset_kwargs: add_special_tokens: false # We already wrap <bos> and <eos> in the chat template append_concat_token: false # No need to add <eos> across samples do_eval: true eval_strategy: epoch gradient_accumulation_steps: 4 gradient_checkpointing: true gradient_checkpointing_kwargs: use_reentrant: false hub_model_id: zephyr-7b-gemma-sft hub_strategy: every_save learning_rate: 2.0e-05 log_level: info logging_steps: 5 logging_strategy: steps lr_scheduler_type: cosine max_seq_length: 2048 max_steps: -1 num_train_epochs: 3 output_dir: data/zephyr-7b-gemma-sft overwrite_output_dir: true per_device_eval_batch_size: 4 per_device_train_batch_size: 4 push_to_hub: true remove_unused_columns: true report_to: - tensorboard - wandb save_strategy: "no" seed: 42 warmup_ratio: 0.1
alignment-handbook/recipes/zephyr-7b-gemma/sft/config_full.yaml/0
{ "file_path": "alignment-handbook/recipes/zephyr-7b-gemma/sft/config_full.yaml", "repo_id": "alignment-handbook", "token_count": 481 }
12
# Model arguments model_name_or_path: mistralai/Mistral-7B-v0.1 model_revision: main torch_dtype: bfloat16 attn_implementation: flash_attention_2 # Data training arguments dataset_mixer: HuggingFaceH4/ultrachat_200k: 1.0 dataset_splits: - train_sft - test_sft preprocessing_num_workers: 12 # SFT trainer config bf16: true do_eval: true eval_strategy: epoch gradient_accumulation_steps: 2 gradient_checkpointing: true hub_model_id: zephyr-7b-sft-full hub_strategy: every_save learning_rate: 2.0e-05 log_level: info logging_steps: 5 logging_strategy: steps lr_scheduler_type: cosine max_seq_length: 2048 max_steps: -1 num_train_epochs: 1 output_dir: data/zephyr-7b-sft-full overwrite_output_dir: true per_device_eval_batch_size: 16 per_device_train_batch_size: 32 push_to_hub: true remove_unused_columns: true report_to: - tensorboard save_strategy: "no" save_total_limit: null seed: 42
alignment-handbook/tests/fixtures/config_sft_full.yaml/0
{ "file_path": "alignment-handbook/tests/fixtures/config_sft_full.yaml", "repo_id": "alignment-handbook", "token_count": 358 }
13
.PHONY: clean-ptx clean test clean-ptx: find target -name "*.ptx" -type f -delete echo "" > candle-kernels/src/lib.rs touch candle-kernels/build.rs touch candle-examples/build.rs touch candle-flash-attn/build.rs clean: cargo clean test: cargo test all: test
candle/Makefile/0
{ "file_path": "candle/Makefile", "repo_id": "candle", "token_count": 107 }
14
[package] name = "candle-core" version.workspace = true edition.workspace = true description.workspace = true repository.workspace = true keywords.workspace = true categories.workspace = true license.workspace = true readme = "README.md" [dependencies] accelerate-src = { workspace = true, optional = true } byteorder = { workspace = true } candle-kernels = { workspace = true, optional = true } candle-metal-kernels = { workspace = true, optional = true } metal = { workspace = true, optional = true} cudarc = { workspace = true, optional = true } gemm = { workspace = true } half = { workspace = true } intel-mkl-src = { workspace = true, optional = true } libc = { workspace = true, optional = true } memmap2 = { workspace = true } num-traits = { workspace = true } num_cpus = { workspace = true } rand = { workspace = true } rand_distr = { workspace = true } rayon = { workspace = true } safetensors = { workspace = true } thiserror = { workspace = true } yoke = { workspace = true } zip = { workspace = true } [dev-dependencies] anyhow = { workspace = true } clap = { workspace = true } criterion = { workspace = true } [features] default = [] cuda = ["cudarc", "dep:candle-kernels"] cudnn = ["cuda", "cudarc/cudnn"] mkl = ["dep:libc", "dep:intel-mkl-src"] accelerate = ["dep:libc", "dep:accelerate-src"] metal = ["dep:metal", "dep:candle-metal-kernels"] [[bench]] name = "bench_main" harness = false [[example]] name = "metal_basics" required-features = ["metal"]
candle/candle-core/Cargo.toml/0
{ "file_path": "candle/candle-core/Cargo.toml", "repo_id": "candle", "token_count": 490 }
15
#![allow(dead_code)] use libc::{c_char, c_double, c_float, c_int, c_long, c_ulong}; mod ffi { use super::*; extern "C" { // It would be nice to be able to switch to the NEWLAPACK version of the function but this // seems to trigger some link error. Available function names can be seen here: // /Library/Developer/CommandLineTools/SDKs/MacOSX13.3.sdk/System/Library/Frameworks/Accelerate.framework/Versions/A/Accelerate.tbd #[link_name = "sgemm_"] pub fn sgemm_ffi( transa: *const c_char, transb: *const c_char, m: *const c_int, n: *const c_int, k: *const c_int, alpha: *const c_float, a: *const c_float, lda: *const c_int, b: *const c_float, ldb: *const c_int, beta: *const c_float, c: *mut c_float, ldc: *const c_int, ); #[link_name = "dgemm_"] pub fn dgemm_ffi( transa: *const c_char, transb: *const c_char, m: *const c_int, n: *const c_int, k: *const c_int, alpha: *const c_double, a: *const c_double, lda: *const c_int, b: *const c_double, ldb: *const c_int, beta: *const c_double, c: *mut c_double, ldc: *const c_int, ); pub fn vvexpf(dst: *mut c_float, src: *const c_float, len: *const c_int); pub fn vvexp(dst: *mut c_double, src: *const c_double, len: *const c_int); pub fn vvsqrtf(dst: *mut c_float, src: *const c_float, len: *const c_int); pub fn vvsqrt(dst: *mut c_double, src: *const c_double, len: *const c_int); pub fn vvsinf(dst: *mut c_float, src: *const c_float, len: *const c_int); pub fn vvsin(dst: *mut c_double, src: *const c_double, len: *const c_int); pub fn vvcosf(dst: *mut c_float, src: *const c_float, len: *const c_int); pub fn vvcos(dst: *mut c_double, src: *const c_double, len: *const c_int); pub fn vvlogf(dst: *mut c_float, src: *const c_float, len: *const c_int); pub fn vvlog(dst: *mut c_double, src: *const c_double, len: *const c_int); pub fn vvtanhf(dst: *mut c_float, src: *const c_float, len: *const c_int); pub fn vvtanh(dst: *mut c_double, src: *const c_double, len: *const c_int); pub fn vDSP_vaddD( _: *const c_double, _: c_long, _: *const c_double, _: c_long, _: *mut c_double, _: c_long, _: c_ulong, ); pub fn vDSP_vadd( _: *const c_float, _: c_long, _: *const c_float, _: c_long, _: *mut c_float, _: c_long, _: c_ulong, ); pub fn vDSP_vsubD( _: *const c_double, _: c_long, _: *const c_double, _: c_long, _: *mut c_double, _: c_long, _: c_ulong, ); pub fn vDSP_vsub( _: *const c_float, _: c_long, _: *const c_float, _: c_long, _: *mut c_float, _: c_long, _: c_ulong, ); pub fn vDSP_vmulD( _: *const c_double, _: c_long, _: *const c_double, _: c_long, _: *mut c_double, _: c_long, _: c_ulong, ); pub fn vDSP_vmul( _: *const c_float, _: c_long, _: *const c_float, _: c_long, _: *mut c_float, _: c_long, _: c_ulong, ); pub fn vDSP_vdivD( _: *const c_double, _: c_long, _: *const c_double, _: c_long, _: *mut c_double, _: c_long, _: c_ulong, ); pub fn vDSP_vdiv( _: *const c_float, _: c_long, _: *const c_float, _: c_long, _: *mut c_float, _: c_long, _: c_ulong, ); pub fn vDSP_vminD( _: *const c_double, _: c_long, _: *const c_double, _: c_long, _: *mut c_double, _: c_long, _: c_ulong, ); pub fn vDSP_vmin( _: *const c_float, _: c_long, _: *const c_float, _: c_long, _: *mut c_float, _: c_long, _: c_ulong, ); pub fn vDSP_vmaxD( _: *const c_double, _: c_long, _: *const c_double, _: c_long, _: *mut c_double, _: c_long, _: c_ulong, ); pub fn vDSP_vmax( _: *const c_float, _: c_long, _: *const c_float, _: c_long, _: *mut c_float, _: c_long, _: c_ulong, ); } } #[allow(clippy::too_many_arguments)] #[inline] pub unsafe fn sgemm( transa: u8, transb: u8, m: i32, n: i32, k: i32, alpha: f32, a: &[f32], lda: i32, b: &[f32], ldb: i32, beta: f32, c: &mut [f32], ldc: i32, ) { ffi::sgemm_ffi( &(transa as c_char), &(transb as c_char), &m, &n, &k, &alpha, a.as_ptr(), &lda, b.as_ptr(), &ldb, &beta, c.as_mut_ptr(), &ldc, ) } #[allow(clippy::too_many_arguments)] #[inline] pub unsafe fn dgemm( transa: u8, transb: u8, m: i32, n: i32, k: i32, alpha: f64, a: &[f64], lda: i32, b: &[f64], ldb: i32, beta: f64, c: &mut [f64], ldc: i32, ) { ffi::dgemm_ffi( &(transa as c_char), &(transb as c_char), &m, &n, &k, &alpha, a.as_ptr(), &lda, b.as_ptr(), &ldb, &beta, c.as_mut_ptr(), &ldc, ) } #[inline] pub fn vs_exp(a: &[f32], y: &mut [f32]) { let a_len = a.len(); let y_len = y.len(); if a_len != y_len { panic!("a and y have different lengths {a_len} <> {y_len}") } unsafe { ffi::vvexpf(y.as_mut_ptr(), a.as_ptr(), &(a_len as i32)) } } #[inline] pub fn vd_exp(a: &[f64], y: &mut [f64]) { let a_len = a.len(); let y_len = y.len(); if a_len != y_len { panic!("a and y have different lengths {a_len} <> {y_len}") } unsafe { ffi::vvexp(y.as_mut_ptr(), a.as_ptr(), &(a_len as i32)) } } #[inline] pub fn vs_sqrt(a: &[f32], y: &mut [f32]) { let a_len = a.len(); let y_len = y.len(); if a_len != y_len { panic!("a and y have different lengths {a_len} <> {y_len}") } unsafe { ffi::vvsqrtf(y.as_mut_ptr(), a.as_ptr(), &(a_len as i32)) } } #[inline] pub fn vd_sqrt(a: &[f64], y: &mut [f64]) { let a_len = a.len(); let y_len = y.len(); if a_len != y_len { panic!("a and y have different lengths {a_len} <> {y_len}") } unsafe { ffi::vvsqrt(y.as_mut_ptr(), a.as_ptr(), &(a_len as i32)) } } #[inline] pub fn vs_sin(a: &[f32], y: &mut [f32]) { let a_len = a.len(); let y_len = y.len(); if a_len != y_len { panic!("a and y have different lengths {a_len} <> {y_len}") } unsafe { ffi::vvsinf(y.as_mut_ptr(), a.as_ptr(), &(a_len as i32)) } } #[inline] pub fn vd_sin(a: &[f64], y: &mut [f64]) { let a_len = a.len(); let y_len = y.len(); if a_len != y_len { panic!("a and y have different lengths {a_len} <> {y_len}") } unsafe { ffi::vvsin(y.as_mut_ptr(), a.as_ptr(), &(a_len as i32)) } } #[inline] pub fn vs_cos(a: &[f32], y: &mut [f32]) { let a_len = a.len(); let y_len = y.len(); if a_len != y_len { panic!("a and y have different lengths {a_len} <> {y_len}") } unsafe { ffi::vvcosf(y.as_mut_ptr(), a.as_ptr(), &(a_len as i32)) } } #[inline] pub fn vd_cos(a: &[f64], y: &mut [f64]) { let a_len = a.len(); let y_len = y.len(); if a_len != y_len { panic!("a and y have different lengths {a_len} <> {y_len}") } unsafe { ffi::vvcos(y.as_mut_ptr(), a.as_ptr(), &(a_len as i32)) } } #[inline] pub fn vs_tanh(a: &[f32], y: &mut [f32]) { let a_len = a.len(); let y_len = y.len(); if a_len != y_len { panic!("a and y have different lengths {a_len} <> {y_len}") } unsafe { ffi::vvtanhf(y.as_mut_ptr(), a.as_ptr(), &(a_len as i32)) } } #[inline] pub fn vd_tanh(a: &[f64], y: &mut [f64]) { let a_len = a.len(); let y_len = y.len(); if a_len != y_len { panic!("a and y have different lengths {a_len} <> {y_len}") } unsafe { ffi::vvtanh(y.as_mut_ptr(), a.as_ptr(), &(a_len as i32)) } } #[inline] pub fn vs_ln(a: &[f32], y: &mut [f32]) { let a_len = a.len(); let y_len = y.len(); if a_len != y_len { panic!("a and y have different lengths {a_len} <> {y_len}") } unsafe { ffi::vvlogf(y.as_mut_ptr(), a.as_ptr(), &(a_len as i32)) } } #[inline] pub fn vd_ln(a: &[f64], y: &mut [f64]) { let a_len = a.len(); let y_len = y.len(); if a_len != y_len { panic!("a and y have different lengths {a_len} <> {y_len}") } unsafe { ffi::vvlog(y.as_mut_ptr(), a.as_ptr(), &(a_len as i32)) } } #[inline] pub fn vs_sqr(a: &[f32], y: &mut [f32]) { let a_len = a.len(); let y_len = y.len(); if a_len != y_len { panic!("a and y have different lengths {a_len} <> {y_len}") } y.iter_mut().zip(a.iter()).for_each(|(y, a)| *y = *a * *a) } #[inline] pub fn vd_sqr(a: &[f64], y: &mut [f64]) { let a_len = a.len(); let y_len = y.len(); if a_len != y_len { panic!("a and y have different lengths {a_len} <> {y_len}") } y.iter_mut().zip(a.iter()).for_each(|(y, a)| *y = *a * *a) } #[inline] pub fn vs_tanh_inplace(y: &mut [f32]) { unsafe { ffi::vvtanhf(y.as_mut_ptr(), y.as_ptr(), &(y.len() as i32)) } } #[inline] pub fn vd_tanh_inplace(y: &mut [f64]) { unsafe { ffi::vvtanh(y.as_mut_ptr(), y.as_ptr(), &(y.len() as i32)) } } #[inline] pub fn vs_exp_inplace(y: &mut [f32]) { unsafe { ffi::vvexpf(y.as_mut_ptr(), y.as_ptr(), &(y.len() as i32)) } } #[inline] pub fn vd_exp_inplace(y: &mut [f64]) { unsafe { ffi::vvexp(y.as_mut_ptr(), y.as_ptr(), &(y.len() as i32)) } } #[inline] pub fn vs_gelu(vs: &[f32], ys: &mut [f32]) { for (&v, y) in vs.iter().zip(ys.iter_mut()) { *y = (2.0f32 / std::f32::consts::PI).sqrt() * v * (1.0 + 0.044715 * v * v) } vs_tanh_inplace(ys); for (&v, y) in vs.iter().zip(ys.iter_mut()) { *y = 0.5 * v * (1.0 + *y) } } #[inline] pub fn vd_gelu(vs: &[f64], ys: &mut [f64]) { for (&v, y) in vs.iter().zip(ys.iter_mut()) { *y = (2.0f64 / std::f64::consts::PI).sqrt() * v * (1.0 + 0.044715 * v * v) } vd_tanh_inplace(ys); for (&v, y) in vs.iter().zip(ys.iter_mut()) { *y = 0.5 * v * (1.0 + *y) } } #[inline] pub fn vs_silu(vs: &[f32], ys: &mut [f32]) { for (&v, y) in vs.iter().zip(ys.iter_mut()) { *y = -v } vs_exp_inplace(ys); for (&v, y) in vs.iter().zip(ys.iter_mut()) { *y = v / (1.0 + *y) } } #[inline] pub fn vd_silu(vs: &[f64], ys: &mut [f64]) { for (&v, y) in vs.iter().zip(ys.iter_mut()) { *y = -v } vd_exp_inplace(ys); for (&v, y) in vs.iter().zip(ys.iter_mut()) { *y = v / (1.0 + *y) } } macro_rules! binary_op { ($fn_name:ident, $ty:ty, $accelerate_name:ident) => { #[inline] pub fn $fn_name(a: &[$ty], b: &[$ty], y: &mut [$ty]) { let a_len = a.len(); let b_len = b.len(); let y_len = y.len(); if a_len != y_len || b_len != y_len { panic!( "{} a,b,y len mismatch {a_len} {b_len} {y_len}", stringify!($fn_name) ); } unsafe { // Weird quirk of accelerate, the rhs comes before the lhs. ffi::$accelerate_name( b.as_ptr(), 1, a.as_ptr(), 1, y.as_mut_ptr(), 1, a_len as u64, ) } } }; } binary_op!(vs_add, f32, vDSP_vadd); binary_op!(vd_add, f64, vDSP_vaddD); binary_op!(vs_sub, f32, vDSP_vsub); binary_op!(vd_sub, f64, vDSP_vsubD); binary_op!(vs_mul, f32, vDSP_vmul); binary_op!(vd_mul, f64, vDSP_vmulD); binary_op!(vs_div, f32, vDSP_vdiv); binary_op!(vd_div, f64, vDSP_vdivD); binary_op!(vs_max, f32, vDSP_vmax); binary_op!(vd_max, f64, vDSP_vmaxD); binary_op!(vs_min, f32, vDSP_vmin); binary_op!(vd_min, f64, vDSP_vminD);
candle/candle-core/src/accelerate.rs/0
{ "file_path": "candle/candle-core/src/accelerate.rs", "repo_id": "candle", "token_count": 7639 }
16
use crate::backend::{BackendDevice, BackendStorage}; use crate::op::{BinaryOpT, CmpOp, ReduceOp, UnaryOpT}; use crate::{CpuStorage, DType, Layout, Result, Shape, WithDType}; pub use candle_kernels as kernels; pub use cudarc; use cudarc::cublas::{Gemm, GemmConfig, StridedBatchedConfig}; use cudarc::driver::{ CudaSlice, DevicePtr, DeviceRepr, DeviceSlice, LaunchAsync, LaunchConfig, ValidAsZeroBits, }; use half::{bf16, f16}; #[cfg(feature = "cudnn")] pub mod cudnn; mod device; mod error; mod utils; pub use device::{CudaDevice, DeviceId}; pub use error::{CudaError, WrapErr}; pub use utils::{Map1, Map1Any, Map2, Map2Any, Map2InPlace, Map3, S}; pub enum SlicePtrOrNull<T> { Ptr(CudaSlice<T>), Null, } unsafe impl<T: DeviceRepr> DeviceRepr for &SlicePtrOrNull<T> { fn as_kernel_param(&self) -> *mut std::ffi::c_void { match self { SlicePtrOrNull::Ptr(slice) => slice.as_kernel_param(), SlicePtrOrNull::Null => 0usize.as_kernel_param(), } } } impl SlicePtrOrNull<usize> { pub fn params_from_layout(dev: &CudaDevice, l: &Layout) -> Result<Self> { let ds = if l.is_contiguous() { SlicePtrOrNull::Null } else { SlicePtrOrNull::Ptr(dev.htod_copy([l.dims(), l.stride()].concat()).w()?) }; Ok(ds) } } #[derive(Debug)] pub enum CudaStorageSlice { U8(CudaSlice<u8>), U32(CudaSlice<u32>), I64(CudaSlice<i64>), BF16(CudaSlice<bf16>), F16(CudaSlice<f16>), F32(CudaSlice<f32>), F64(CudaSlice<f64>), } struct Clone; impl Map1 for Clone { fn f<T: DeviceRepr>( &self, s: &CudaSlice<T>, _: &CudaDevice, _: &Layout, ) -> Result<CudaSlice<T>> { s.try_clone().w() } } pub fn kernel_name<T: WithDType>(root: &str) -> String { let dtype = T::DTYPE.as_str(); format!("{root}_{dtype}") } struct Affine(f64, f64); impl Map1 for Affine { fn f<T: DeviceRepr + WithDType>( &self, src: &CudaSlice<T>, dev: &CudaDevice, layout: &Layout, ) -> Result<CudaSlice<T>> { let shape = layout.shape(); let dims = shape.dims(); let el = shape.elem_count(); let cfg = LaunchConfig::for_num_elems(el as u32); let ds = SlicePtrOrNull::params_from_layout(dev, layout)?; let src = &src.slice(layout.start_offset()..); let func = dev.get_or_load_func(&kernel_name::<T>("affine"), kernels::AFFINE)?; // SAFETY: Set later by running the kernel. let out = unsafe { dev.alloc::<T>(el) }.w()?; let params = ( el, dims.len(), &ds, src, &out, T::from_f64(self.0), T::from_f64(self.1), ); // SAFETY: ffi. unsafe { func.launch(cfg, params) }.w()?; Ok(out) } } struct Elu(f64); impl Map1 for Elu { fn f<T: DeviceRepr + WithDType>( &self, src: &CudaSlice<T>, dev: &CudaDevice, layout: &Layout, ) -> Result<CudaSlice<T>> { let shape = layout.shape(); let dims = shape.dims(); let el = shape.elem_count(); let cfg = LaunchConfig::for_num_elems(el as u32); let ds = SlicePtrOrNull::params_from_layout(dev, layout)?; let src = &src.slice(layout.start_offset()..); let func = dev.get_or_load_func(&kernel_name::<T>("uelu"), kernels::UNARY)?; // SAFETY: Set later by running the kernel. let out = unsafe { dev.alloc::<T>(el) }.w()?; let params = (el, dims.len(), &ds, T::from_f64(self.0), src, &out); // SAFETY: ffi. unsafe { func.launch(cfg, params) }.w()?; Ok(out) } } struct Im2Col1D { l_k: usize, stride: usize, dilation: usize, padding: usize, } impl Im2Col1D { fn l_out(&self, l: usize) -> usize { (l + 2 * self.padding - self.dilation * (self.l_k - 1) - 1) / self.stride + 1 } } impl Map1 for Im2Col1D { fn f<T: DeviceRepr + WithDType>( &self, src: &CudaSlice<T>, dev: &CudaDevice, layout: &Layout, ) -> Result<CudaSlice<T>> { let shape = layout.shape(); let dims = shape.dims(); let l_out = self.l_out(dims[2]); let dst_el = dims[0] * l_out * dims[1] * self.l_k; let cfg = LaunchConfig::for_num_elems(dst_el as u32); let ds = dev.htod_copy([dims, layout.stride()].concat()).w()?; let src = &src.slice(layout.start_offset()..); let func = dev.get_or_load_func(&kernel_name::<T>("im2col1d"), kernels::CONV)?; // SAFETY: Set later by running the kernel. let dst = unsafe { dev.alloc::<T>(dst_el) }.w()?; let params = ( dst_el, l_out, self.l_k, self.stride, self.padding, self.dilation, &ds, src, &dst, ); // SAFETY: ffi. unsafe { func.launch(cfg, params) }.w()?; Ok(dst) } } #[allow(unused)] struct Im2Col { h_k: usize, w_k: usize, stride: usize, dilation: usize, padding: usize, } impl Im2Col { #[allow(unused)] fn hw_out(&self, h: usize, w: usize) -> (usize, usize) { let h_out = (h + 2 * self.padding - self.dilation * (self.h_k - 1) - 1) / self.stride + 1; let w_out = (w + 2 * self.padding - self.dilation * (self.w_k - 1) - 1) / self.stride + 1; (h_out, w_out) } } impl Map1 for Im2Col { fn f<T: DeviceRepr + WithDType>( &self, src: &CudaSlice<T>, dev: &CudaDevice, layout: &Layout, ) -> Result<CudaSlice<T>> { let shape = layout.shape(); let dims = shape.dims(); let (h_out, w_out) = self.hw_out(dims[2], dims[3]); let dst_el = dims[0] * h_out * w_out * dims[1] * self.h_k * self.w_k; let cfg = LaunchConfig::for_num_elems(dst_el as u32); let ds = dev.htod_copy([dims, layout.stride()].concat()).w()?; let src = &src.slice(layout.start_offset()..); let func = dev.get_or_load_func(&kernel_name::<T>("im2col"), kernels::CONV)?; // SAFETY: Set later by running the kernel. let dst = unsafe { dev.alloc::<T>(dst_el) }.w()?; let params = ( dst_el, h_out, w_out, self.h_k, self.w_k, self.stride, self.padding, self.dilation, &ds, src, &dst, ); // SAFETY: ffi. unsafe { func.launch(cfg, params) }.w()?; Ok(dst) } } struct Powf(f64); impl Map1 for Powf { fn f<T: DeviceRepr + WithDType>( &self, src: &CudaSlice<T>, dev: &CudaDevice, layout: &Layout, ) -> Result<CudaSlice<T>> { let shape = layout.shape(); let dims = shape.dims(); let el = shape.elem_count(); let cfg = LaunchConfig::for_num_elems(el as u32); let ds = SlicePtrOrNull::params_from_layout(dev, layout)?; let src = &src.slice(layout.start_offset()..); let func = dev.get_or_load_func(&kernel_name::<T>("upowf"), kernels::UNARY)?; // SAFETY: Set later by running the kernel. let out = unsafe { dev.alloc::<T>(el) }.w()?; let params = (el, dims.len(), &ds, T::from_f64(self.0), src, &out); // SAFETY: ffi. unsafe { func.launch(cfg, params) }.w()?; Ok(out) } } struct FastReduce<'a>(&'a [usize], ReduceOp); impl<'a> Map1Any for FastReduce<'a> { fn f<T: DeviceRepr + WithDType + ValidAsZeroBits, W: Fn(CudaSlice<T>) -> S>( &self, src: &CudaSlice<T>, dev: &CudaDevice, layout: &Layout, wrap: W, ) -> Result<S> { let src_stride = layout.stride(); let src_dims = layout.shape().dims(); let src_el: usize = src_dims.iter().product(); // Source dims and strides with the sum dims at the end. let mut dims = vec![]; let mut stride = vec![]; let mut dst_el: usize = 1; for (dim_idx, &d) in src_dims.iter().enumerate() { if !self.0.contains(&dim_idx) { dst_el *= d; dims.push(d); stride.push(src_stride[dim_idx]); } } for &dim_idx in self.0.iter() { dims.push(src_dims[dim_idx]); stride.push(src_stride[dim_idx]); } let el_to_sum_per_block = src_el / dst_el; // The reduction loop requires the shared array to be properly initialized and for // this we want the number of threads to be a power of two. let block_dim = usize::min(1024, el_to_sum_per_block).next_power_of_two(); let cfg = LaunchConfig { // TODO: Maybe use grid_y if the output is too large? // TODO: Specialized implementation when reducing on no or all dimensions or when // reducing only aggregate a small number of elements together. grid_dim: (dst_el as u32, 1, 1), block_dim: (block_dim as u32, 1, 1), shared_mem_bytes: 0, }; let ds = dev .htod_copy([dims.as_slice(), stride.as_slice()].concat()) .w()?; let src = &src.slice(layout.start_offset()..); let (name, check_empty, return_index) = match self.1 { ReduceOp::Sum => ("fast_sum", false, false), ReduceOp::Min => ("fast_min", true, false), ReduceOp::Max => ("fast_max", true, false), ReduceOp::ArgMin => ("fast_argmin", true, true), ReduceOp::ArgMax => ("fast_argmax", true, true), }; if check_empty && layout.shape().elem_count() == 0 { Err(crate::Error::EmptyTensor { op: "reduce" }.bt())? } let func = dev.get_or_load_func(&kernel_name::<T>(name), kernels::REDUCE)?; if return_index { // SAFETY: filled in by the follow up kernel. let out = unsafe { dev.alloc::<u32>(dst_el) }.w()?; let params = (src_el, el_to_sum_per_block, src_dims.len(), &ds, src, &out); // SAFETY: ffi. unsafe { func.launch(cfg, params) }.w()?; Ok(S::U32(out)) } else { // SAFETY: filled in by the follow up kernel. let out = unsafe { dev.alloc::<T>(dst_el) }.w()?; let params = (src_el, el_to_sum_per_block, src_dims.len(), &ds, src, &out); // SAFETY: ffi. unsafe { func.launch(cfg, params) }.w()?; Ok(wrap(out)) } } } impl<U: UnaryOpT> Map1 for U { fn f<T: DeviceRepr + WithDType + ValidAsZeroBits>( &self, src: &CudaSlice<T>, dev: &CudaDevice, layout: &Layout, ) -> Result<CudaSlice<T>> { let shape = layout.shape(); let dims = shape.dims(); let el_count = shape.elem_count(); let cfg = LaunchConfig::for_num_elems(el_count as u32); let ds = SlicePtrOrNull::params_from_layout(dev, layout)?; let src = &src.slice(layout.start_offset()..); let func = dev.get_or_load_func(&kernel_name::<T>(U::KERNEL), kernels::UNARY)?; // SAFETY: Set later by running the kernel. let out = unsafe { dev.alloc::<T>(el_count) }.w()?; let params = (el_count, dims.len(), &ds, src, &out); // SAFETY: ffi. unsafe { func.launch(cfg, params) }.w()?; Ok(out) } } struct IndexSelect<'a>(&'a CudaStorage, &'a Layout, usize); impl<'a> Map1 for IndexSelect<'a> { fn f<T: DeviceRepr + WithDType + ValidAsZeroBits>( &self, src: &CudaSlice<T>, dev: &CudaDevice, src_l: &Layout, ) -> Result<CudaSlice<T>> { let ids_l = &self.1; let (name, ids) = match &self.0.slice { CudaStorageSlice::U32(slice) => { ("is_u32", *slice.slice(ids_l.start_offset()..).device_ptr()) } CudaStorageSlice::U8(slice) => { ("is_u8", *slice.slice(ids_l.start_offset()..).device_ptr()) } CudaStorageSlice::I64(slice) => { ("is_i64", *slice.slice(ids_l.start_offset()..).device_ptr()) } _ => Err(CudaError::UnexpectedDType { msg: "index_select ids should be u8 or u32", expected: DType::U32, got: self.0.dtype(), }) .w()?, }; let ids_shape = ids_l.shape(); let ids_dims = ids_shape.dims(); let ds = dev.htod_copy([ids_dims, ids_l.stride()].concat()).w()?; let src = match src_l.contiguous_offsets() { Some((o1, o2)) => src.slice(o1..o2), None => Err(crate::Error::RequiresContiguous { op: "index-select" }.bt())?, }; let left_size: usize = src_l.dims()[..self.2].iter().product(); let right_size: usize = src_l.dims()[self.2 + 1..].iter().product(); let src_dim_size = src_l.dims()[self.2]; let ids_dim_size = ids_shape.elem_count(); let dst_el = ids_shape.elem_count() * left_size * right_size; let cfg = LaunchConfig::for_num_elems(dst_el as u32); let func = dev.get_or_load_func(&kernel_name::<T>(name), kernels::INDEXING)?; // SAFETY: Set later by running the kernel. let out = unsafe { dev.alloc::<T>(dst_el) }.w()?; let params = ( dst_el, ids_dims.len(), &ds, ids, &src, &out, left_size, src_dim_size, ids_dim_size, right_size, ); // SAFETY: ffi. unsafe { func.launch(cfg, params) }.w()?; Ok(out) } } struct Gather<'a>(&'a CudaStorage, &'a Layout, usize); impl<'a> Map1 for Gather<'a> { fn f<T: DeviceRepr + WithDType + ValidAsZeroBits>( &self, src: &CudaSlice<T>, dev: &CudaDevice, src_l: &Layout, ) -> Result<CudaSlice<T>> { let ids = &self.0; let ids_l = &self.1; let dim = self.2; let (ids_o1, ids_o2) = match ids_l.contiguous_offsets() { Some(o12) => o12, None => Err(crate::Error::RequiresContiguous { op: "gather" }.bt())?, }; let (name, ids) = match &ids.slice { CudaStorageSlice::U32(slice) => { ("gather_u32", *slice.slice(ids_o1..ids_o2).device_ptr()) } CudaStorageSlice::U8(slice) => ("gather_u8", *slice.slice(ids_o1..ids_o2).device_ptr()), CudaStorageSlice::I64(slice) => { ("gather_i64", *slice.slice(ids_o1..ids_o2).device_ptr()) } _ => Err(CudaError::UnexpectedDType { msg: "gather ids should be u8/u32/i64", expected: DType::U32, got: ids.dtype(), })?, }; let el = ids_l.shape().elem_count(); let cfg = LaunchConfig::for_num_elems(el as u32); let src = match src_l.contiguous_offsets() { Some((o1, o2)) => src.slice(o1..o2), None => Err(crate::Error::RequiresContiguous { op: "gather" }.bt())?, }; let left_sz: usize = src_l.dims()[..dim].iter().product(); let right_sz: usize = src_l.dims()[dim + 1..].iter().product(); let src_dim_sz = src_l.dims()[dim]; let ids_dim_sz = ids_l.dims()[dim]; let func = dev.get_or_load_func(&kernel_name::<T>(name), kernels::INDEXING)?; // SAFETY: Set later by running the kernel. let out = unsafe { dev.alloc::<T>(el) }.w()?; let params = ( el, ids, &src, &out, left_sz, src_dim_sz, ids_dim_sz, right_sz, ); // SAFETY: ffi. unsafe { func.launch(cfg, params) }.w()?; Ok(out) } } struct IndexAdd<'a>(&'a CudaStorage, &'a Layout, usize); impl<'a> Map2InPlace for IndexAdd<'a> { fn f<T: DeviceRepr + WithDType + ValidAsZeroBits>( &self, dst: &mut CudaSlice<T>, dst_shape: &Shape, src: &CudaSlice<T>, src_l: &Layout, dev: &CudaDevice, ) -> Result<()> { let ids = &self.0; let ids_l = &self.1; let dim = self.2; let (ids_o1, ids_o2) = match ids_l.contiguous_offsets() { Some(o12) => o12, None => Err(crate::Error::RequiresContiguous { op: "index-add" }.bt())?, }; let (name, ids) = match &ids.slice { CudaStorageSlice::U32(slice) => ("ia_u32", *slice.slice(ids_o1..ids_o2).device_ptr()), CudaStorageSlice::I64(slice) => ("ia_i64", *slice.slice(ids_o1..ids_o2).device_ptr()), CudaStorageSlice::U8(slice) => ("ia_u8", *slice.slice(ids_o1..ids_o2).device_ptr()), _ => Err(CudaError::UnexpectedDType { msg: "index-add ids should be u8/u32/i64", expected: DType::U32, got: ids.dtype(), })?, }; let src = match src_l.contiguous_offsets() { Some((o1, o2)) => src.slice(o1..o2), None => Err(crate::Error::RequiresContiguous { op: "index-add" }.bt())?, }; let left_sz: usize = src_l.dims()[..dim].iter().product(); let right_sz: usize = src_l.dims()[dim + 1..].iter().product(); let src_dim_sz = src_l.dims()[dim]; let dst_dim_sz = dst_shape.dims()[dim]; let ids_dim_sz = ids_l.dims()[0]; let cfg = LaunchConfig::for_num_elems((left_sz * right_sz) as u32); let func = dev.get_or_load_func(&kernel_name::<T>(name), kernels::INDEXING)?; // SAFETY: Set later by running the kernel. let params = ( ids, ids_dim_sz, &src, dst, left_sz, src_dim_sz, dst_dim_sz, right_sz, ); // SAFETY: ffi. unsafe { func.launch(cfg, params) }.w()?; Ok(()) } } struct ScatterAdd<'a>(&'a CudaStorage, &'a Layout, usize); impl<'a> Map2InPlace for ScatterAdd<'a> { fn f<T: DeviceRepr + WithDType + ValidAsZeroBits>( &self, dst: &mut CudaSlice<T>, dst_shape: &Shape, src: &CudaSlice<T>, src_l: &Layout, dev: &CudaDevice, ) -> Result<()> { let ids = &self.0; let ids_l = &self.1; let dim = self.2; let (ids_o1, ids_o2) = match ids_l.contiguous_offsets() { Some(o12) => o12, None => Err(crate::Error::RequiresContiguous { op: "scatter-add" }.bt())?, }; let (name, ids) = match &ids.slice { CudaStorageSlice::U32(slice) => ("sa_u32", *slice.slice(ids_o1..ids_o2).device_ptr()), CudaStorageSlice::I64(slice) => ("sa_i64", *slice.slice(ids_o1..ids_o2).device_ptr()), CudaStorageSlice::U8(slice) => ("sa_u8", *slice.slice(ids_o1..ids_o2).device_ptr()), _ => Err(CudaError::UnexpectedDType { msg: "scatter-add ids should be u8/u32/i64", expected: DType::U32, got: ids.dtype(), })?, }; let src = match src_l.contiguous_offsets() { Some((o1, o2)) => src.slice(o1..o2), None => Err(crate::Error::RequiresContiguous { op: "scatter-add" }.bt())?, }; let left_sz: usize = src_l.dims()[..dim].iter().product(); let right_sz: usize = src_l.dims()[dim + 1..].iter().product(); let src_dim_sz = src_l.dims()[dim]; let dst_dim_sz = dst_shape.dims()[dim]; let cfg = LaunchConfig::for_num_elems((left_sz * right_sz) as u32); let func = dev.get_or_load_func(&kernel_name::<T>(name), kernels::INDEXING)?; // SAFETY: Set later by running the kernel. let params = (ids, &src, dst, left_sz, src_dim_sz, dst_dim_sz, right_sz); // SAFETY: ffi. unsafe { func.launch(cfg, params) }.w()?; Ok(()) } } struct Conv1D<'a>(&'a crate::conv::ParamsConv1D); impl<'a> Map2 for Conv1D<'a> { fn f<T: DeviceRepr + WithDType + ValidAsZeroBits>( &self, inp: &CudaSlice<T>, inp_l: &Layout, k: &CudaSlice<T>, k_l: &Layout, dev: &CudaDevice, ) -> Result<CudaSlice<T>> { // Kernel shape: (c_out, c_in_k, k_size) // Input shape: (b_size, c_in, l_in) or (c_in, l_in) let p = &self.0; let inp = &inp.slice(inp_l.start_offset()..); let k = &k.slice(k_l.start_offset()..); let shape = inp_l.shape(); let dims = shape.dims(); let el = shape.elem_count(); let l_out = p.l_out(); let dst_el = p.c_out * l_out * p.b_size; let cfg = LaunchConfig::for_num_elems(dst_el as u32); let func = dev.get_or_load_func(&kernel_name::<T>("conv1d"), kernels::CONV)?; // SAFETY: Set later by running the kernel. let out = unsafe { dev.alloc::<T>(dst_el) }.w()?; let ds = if dims.len() == 3 { [dims, inp_l.stride(), k_l.dims(), k_l.stride()].concat() } else if dims.len() == 2 { [&[1], dims, &[1], inp_l.stride(), k_l.dims(), k_l.stride()].concat() } else { crate::bail!("unexpected input shape for conv1d {dims:?}") }; let ds = dev.htod_copy(ds).w()?; let params = ( el, l_out, p.stride, p.padding, p.dilation, &ds, inp, k, &out, ); // SAFETY: ffi. unsafe { func.launch(cfg, params) }.w()?; Ok(out) } } struct Conv2D<'a>(&'a crate::conv::ParamsConv2D); impl<'a> Map2 for Conv2D<'a> { fn f<T: DeviceRepr + WithDType + ValidAsZeroBits>( &self, inp: &CudaSlice<T>, inp_l: &Layout, k: &CudaSlice<T>, k_l: &Layout, dev: &CudaDevice, ) -> Result<CudaSlice<T>> { // Kernel shape: (c_out, c_in_k, h_k, w_k) // Input shape: (b_size, c_in, h_in, w_in) let p = &self.0; let (out_w, out_h) = (p.out_w(), p.out_h()); let dst_el = p.c_out * out_w * out_h * p.b_size; let inp = &inp.slice(inp_l.start_offset()..); let k = &k.slice(k_l.start_offset()..); let shape = inp_l.shape(); let dims = shape.dims(); let el = shape.elem_count(); // SAFETY: Set later by running the kernel. let out = unsafe { dev.alloc::<T>(dst_el) }.w()?; let cfg = LaunchConfig::for_num_elems(dst_el as u32); let func = dev.get_or_load_func(&kernel_name::<T>("conv2d"), kernels::CONV)?; let ds = if dims.len() == 4 { [dims, inp_l.stride(), k_l.dims(), k_l.stride()].concat() } else { crate::bail!("unexpected input shape for conv2d {dims:?}") }; let ds = dev.htod_copy(ds).w()?; let params = ( el, out_w, out_h, p.stride, p.padding, p.dilation, &ds, inp, k, &out, ); // SAFETY: ffi. unsafe { func.launch(cfg, params) }.w()?; Ok(out) } } struct Col2Im1D { stride: usize, } impl Map1 for Col2Im1D { fn f<T: DeviceRepr + WithDType + ValidAsZeroBits>( &self, col: &CudaSlice<T>, dev: &CudaDevice, l: &Layout, ) -> Result<CudaSlice<T>> { let (b_size, l_in, c_out, k_size) = l.shape().dims4()?; let stride = self.stride; let l_out = (l_in - 1) * stride + k_size; let dst_el = b_size * c_out * l_out; let mut im = unsafe { dev.alloc::<T>(dst_el) }.w()?; let cfg = LaunchConfig::for_num_elems(dst_el as u32); let params = (dst_el, l_out, l_in, c_out, k_size, stride, col, &mut im); let func = dev.get_or_load_func(&kernel_name::<T>("col2im1d"), kernels::CONV)?; unsafe { func.launch(cfg, params) }.w()?; Ok(im) } } struct ConvTranspose1D<'a>(&'a crate::conv::ParamsConvTranspose1D); impl<'a> Map2 for ConvTranspose1D<'a> { fn f<T: DeviceRepr + WithDType + ValidAsZeroBits>( &self, inp: &CudaSlice<T>, inp_l: &Layout, k: &CudaSlice<T>, k_l: &Layout, dev: &CudaDevice, ) -> Result<CudaSlice<T>> { // Kernel shape: (c_in_k, c_out, l_k) // Input shape: (b_size, c_in, l_in) let p = &self.0; let l_out = p.l_out(); let dst_el = p.c_out * l_out * p.b_size; let inp = &inp.slice(inp_l.start_offset()..); let k = &k.slice(k_l.start_offset()..); let shape = inp_l.shape(); let dims = shape.dims(); let el = shape.elem_count(); // SAFETY: Set later by running the kernel. let out = unsafe { dev.alloc::<T>(dst_el) }.w()?; let cfg = LaunchConfig::for_num_elems(dst_el as u32); let func = dev.get_or_load_func(&kernel_name::<T>("conv_transpose1d"), kernels::CONV)?; let ds = if dims.len() == 3 { [dims, inp_l.stride(), k_l.dims(), k_l.stride()].concat() } else { crate::bail!("unexpected input shape for conv_transpose1d {dims:?}") }; let ds = dev.htod_copy(ds).w()?; let params = ( el, l_out, p.stride, p.padding, p.output_padding, p.dilation, &ds, inp, k, &out, ); // SAFETY: ffi. unsafe { func.launch(cfg, params) }.w()?; Ok(out) } } struct ConvTranspose2D<'a>(&'a crate::conv::ParamsConvTranspose2D); impl<'a> Map2 for ConvTranspose2D<'a> { fn f<T: DeviceRepr + WithDType + ValidAsZeroBits>( &self, inp: &CudaSlice<T>, inp_l: &Layout, k: &CudaSlice<T>, k_l: &Layout, dev: &CudaDevice, ) -> Result<CudaSlice<T>> { // Kernel shape: (c_in_k, c_out, h_k, w_k) // Input shape: (b_size, c_in, h_in, w_in) let p = &self.0; let (out_w, out_h) = (p.out_w(), p.out_h()); let dst_el = p.c_out * out_w * out_h * p.b_size; let inp = &inp.slice(inp_l.start_offset()..); let k = &k.slice(k_l.start_offset()..); let shape = inp_l.shape(); let dims = shape.dims(); let el = shape.elem_count(); // SAFETY: Set later by running the kernel. let out = unsafe { dev.alloc::<T>(dst_el) }.w()?; let cfg = LaunchConfig::for_num_elems(dst_el as u32); let func = dev.get_or_load_func(&kernel_name::<T>("conv_transpose2d"), kernels::CONV)?; let ds = if dims.len() == 4 { [dims, inp_l.stride(), k_l.dims(), k_l.stride()].concat() } else { crate::bail!("unexpected input shape for conv_transpose2d {dims:?}") }; let ds = dev.htod_copy(ds).w()?; let params = ( el, out_w, out_h, p.stride, p.padding, p.output_padding, p.dilation, &ds, inp, k, &out, ); // SAFETY: ffi. unsafe { func.launch(cfg, params) }.w()?; Ok(out) } } enum PoolOp { Max, Avg, } struct Pool2D { w_k: usize, h_k: usize, w_stride: usize, h_stride: usize, op: PoolOp, } impl Map1 for Pool2D { fn f<T: DeviceRepr + WithDType + ValidAsZeroBits>( &self, inp: &CudaSlice<T>, dev: &CudaDevice, inp_l: &Layout, ) -> Result<CudaSlice<T>> { // Input shape: (b_size, c, h, w) let inp = &inp.slice(inp_l.start_offset()..); let shape = inp_l.shape(); let dims = shape.dims(); let ds = if dims.len() == 4 { [dims, inp_l.stride()].concat() } else { crate::bail!("unexpected input shape for pool {dims:?}") }; let el = shape.elem_count(); let out_w = (dims[2] - self.w_k) / self.w_stride + 1; let out_h = (dims[3] - self.h_k) / self.h_stride + 1; let dst_el = out_w * out_h * dims[0] * dims[1]; let cfg = LaunchConfig::for_num_elems(dst_el as u32); let kname = match self.op { PoolOp::Max => "max_pool2d", PoolOp::Avg => "avg_pool2d", }; let func = dev.get_or_load_func(&kernel_name::<T>(kname), kernels::CONV)?; // SAFETY: Set later by running the kernel. let out = unsafe { dev.alloc::<T>(dst_el) }.w()?; let ds = dev.htod_copy(ds).w()?; let params = ( el, self.w_k, self.h_k, self.w_stride, self.h_stride, &ds, inp, &out, ); // SAFETY: ffi. unsafe { func.launch(cfg, params) }.w()?; Ok(out) } } struct UpsampleNearest2D(usize, usize); impl Map1 for UpsampleNearest2D { fn f<T: DeviceRepr + WithDType + ValidAsZeroBits>( &self, inp: &CudaSlice<T>, dev: &CudaDevice, inp_l: &Layout, ) -> Result<CudaSlice<T>> { // Input shape: (b_size, c, h, w) let inp = &inp.slice(inp_l.start_offset()..); let shape = inp_l.shape(); let dims = shape.dims(); let ds = if dims.len() == 4 { [dims, inp_l.stride()].concat() } else { crate::bail!("unexpected input shape for upsample {dims:?}") }; let (out_w, out_h) = (self.0, self.1); let dst_el = out_w * out_h * dims[0] * dims[1]; let cfg = LaunchConfig::for_num_elems(dst_el as u32); let func = dev.get_or_load_func(&kernel_name::<T>("upsample_nearest2d"), kernels::CONV)?; // SAFETY: Set later by running the kernel. let out = unsafe { dev.alloc::<T>(dst_el) }.w()?; let ds = dev.htod_copy(ds).w()?; let scale_w = dims[2] as f64 / out_w as f64; let scale_h = dims[3] as f64 / out_h as f64; let params = (out_w, out_h, scale_w, scale_h, &ds, inp, &out); // SAFETY: ffi. unsafe { func.launch(cfg, params) }.w()?; Ok(out) } } struct WhereCond<'a>(&'a CudaStorage, &'a Layout); impl<'a> Map2 for WhereCond<'a> { fn f<T: DeviceRepr + WithDType + ValidAsZeroBits>( &self, t: &CudaSlice<T>, layout_t: &Layout, f: &CudaSlice<T>, layout_f: &Layout, dev: &CudaDevice, ) -> Result<CudaSlice<T>> { let ids_l = &self.1; let (ids, name) = match &self.0.slice { CudaStorageSlice::U8(slice) => { let ptr = *slice.slice(ids_l.start_offset()..).device_ptr(); (ptr, "where_u8") } CudaStorageSlice::U32(slice) => { let ptr = *slice.slice(ids_l.start_offset()..).device_ptr(); (ptr, "where_u32") } CudaStorageSlice::I64(slice) => { let ptr = *slice.slice(ids_l.start_offset()..).device_ptr(); (ptr, "where_i64") } _ => Err(CudaError::UnexpectedDType { msg: "where conditions should be u8/u32/i64", expected: DType::U32, got: self.0.dtype(), }) .w()?, }; let shape = ids_l.shape(); let dims = shape.dims(); let el = shape.elem_count(); let cfg = LaunchConfig::for_num_elems(el as u32); let ds = dev .htod_copy([dims, ids_l.stride(), layout_t.stride(), layout_f.stride()].concat()) .w()?; let t = &t.slice(layout_t.start_offset()..); let f = &f.slice(layout_f.start_offset()..); let func = dev.get_or_load_func(&kernel_name::<T>(name), kernels::TERNARY)?; // SAFETY: Set later by running the kernel. let out = unsafe { dev.alloc::<T>(el) }.w()?; let params = (el, dims.len(), &ds, ids, t, f, &out); // SAFETY: ffi unsafe { func.launch(cfg, params) }.w()?; Ok(out) } } impl<U: crate::op::BinaryOpT> Map2 for U { fn f<T: DeviceRepr + WithDType + ValidAsZeroBits>( &self, lhs: &CudaSlice<T>, lhs_l: &Layout, rhs: &CudaSlice<T>, rhs_l: &Layout, dev: &CudaDevice, ) -> Result<CudaSlice<T>> { let shape = lhs_l.shape(); let dims = shape.dims(); let elem_count = shape.elem_count(); let cfg = LaunchConfig::for_num_elems(elem_count as u32); let dims_and_strides = if lhs_l.is_contiguous() && rhs_l.is_contiguous() { SlicePtrOrNull::Null } else { SlicePtrOrNull::Ptr( dev.htod_copy([dims, lhs_l.stride(), rhs_l.stride()].concat()) .w()?, ) }; let lhs = &lhs.slice(lhs_l.start_offset()..); let rhs = &rhs.slice(rhs_l.start_offset()..); let func = dev.get_or_load_func(&kernel_name::<T>(U::KERNEL), kernels::BINARY)?; // SAFETY: Set later by running the kernel. let out = unsafe { dev.alloc::<T>(elem_count) }.w()?; let params = (elem_count, dims.len(), &dims_and_strides, lhs, rhs, &out); // SAFETY: ffi unsafe { func.launch(cfg, params) }.w()?; Ok(out) } } struct Cmp(CmpOp); impl Map2Any for Cmp { fn f<T: DeviceRepr + WithDType + ValidAsZeroBits>( &self, lhs: &CudaSlice<T>, lhs_l: &Layout, rhs: &CudaSlice<T>, rhs_l: &Layout, dev: &CudaDevice, ) -> Result<S> { let shape = lhs_l.shape(); let dims = shape.dims(); let elem_count = shape.elem_count(); let cfg = LaunchConfig::for_num_elems(elem_count as u32); let dims_and_strides = if lhs_l.is_contiguous() && rhs_l.is_contiguous() { SlicePtrOrNull::Null } else { SlicePtrOrNull::Ptr( dev.htod_copy([dims, lhs_l.stride(), rhs_l.stride()].concat()) .w()?, ) }; let lhs = &lhs.slice(lhs_l.start_offset()..); let rhs = &rhs.slice(rhs_l.start_offset()..); let name = match self.0 { CmpOp::Eq => "eq", CmpOp::Ne => "ne", CmpOp::Lt => "lt", CmpOp::Le => "le", CmpOp::Gt => "gt", CmpOp::Ge => "ge", }; let func = dev.get_or_load_func(&kernel_name::<T>(name), kernels::BINARY)?; // SAFETY: Set later by running the kernel. let out = unsafe { dev.alloc::<u8>(elem_count) }.w()?; let params = (elem_count, dims.len(), &dims_and_strides, lhs, rhs, &out); // SAFETY: ffi unsafe { func.launch(cfg, params) }.w()?; Ok(S::U8(out)) } } fn slice_src_and_dst<'a, T>( src: &'a CudaSlice<T>, src_l: &Layout, dst: &'a mut CudaSlice<T>, dst_offset: usize, ) -> ( cudarc::driver::CudaView<'a, T>, cudarc::driver::CudaViewMut<'a, T>, ) { let src_offset = src_l.start_offset(); let to_copy = dst .len() .saturating_sub(dst_offset) .min(src.len().saturating_sub(src_offset)); let src = src.slice(src_offset..src_offset + to_copy); let dst = dst.slice_mut(dst_offset..dst_offset + to_copy); (src, dst) } #[derive(Debug)] pub struct CudaStorage { pub slice: CudaStorageSlice, pub device: CudaDevice, } pub trait CudaDType: Sized { fn as_cuda_slice(s: &CudaStorage) -> Result<&CudaSlice<Self>>; fn wrap_cuda_slice(s: CudaSlice<Self>, dev: CudaDevice) -> CudaStorage; } macro_rules! cuda_dtype { ($ty:ty, $dtype:ident) => { impl CudaDType for $ty { fn as_cuda_slice(s: &CudaStorage) -> Result<&CudaSlice<Self>> { match &s.slice { CudaStorageSlice::$dtype(data) => Ok(&data), _ => Err(crate::Error::UnexpectedDType { expected: DType::$dtype, got: s.dtype(), msg: "unexpected dtype", } .bt()), } } fn wrap_cuda_slice(slice: CudaSlice<Self>, device: CudaDevice) -> CudaStorage { let slice = CudaStorageSlice::$dtype(slice); CudaStorage { slice, device } } } }; } cuda_dtype!(u8, U8); cuda_dtype!(u32, U32); cuda_dtype!(i64, I64); cuda_dtype!(f16, F16); cuda_dtype!(bf16, BF16); cuda_dtype!(f32, F32); cuda_dtype!(f64, F64); impl CudaStorage { pub fn wrap_cuda_slice<T: CudaDType>(slice: CudaSlice<T>, device: CudaDevice) -> CudaStorage { T::wrap_cuda_slice(slice, device) } pub fn as_cuda_slice<T: CudaDType>(&self) -> Result<&CudaSlice<T>> { T::as_cuda_slice(self) } } fn gemm_config<T>( alpha: T, beta: T, (b, m, n, k): (usize, usize, usize, usize), lhs_l: &Layout, rhs_l: &Layout, ) -> Result<StridedBatchedConfig<T>> { // https://docs.nvidia.com/cuda/cublas/index.html#cublas-t-gemm use cudarc::cublas::sys::cublasOperation_t; let lhs_stride = lhs_l.stride(); let rhs_stride = rhs_l.stride(); let rhs_m1 = rhs_stride[rhs_stride.len() - 1]; let rhs_m2 = rhs_stride[rhs_stride.len() - 2]; let lhs_m1 = lhs_stride[lhs_stride.len() - 1]; let lhs_m2 = lhs_stride[lhs_stride.len() - 2]; // The a tensor has dims batching, k, n (rhs) // We also allow for the case where the stride on the minor dimension is not as expected but // there is a single element. let (lda, transa) = if (rhs_m1 == 1 || n == 1) && (rhs_m2 == n || k == 1) { (n as i32, cublasOperation_t::CUBLAS_OP_N) } else if (rhs_m1 == k || n == 1) && (rhs_m2 == 1 || k == 1) { (k as i32, cublasOperation_t::CUBLAS_OP_T) } else { Err(CudaError::MatMulNonContiguous { lhs_stride: lhs_l.clone(), rhs_stride: rhs_l.clone(), mnk: (m, n, k), })? }; // The b tensor has dims batching, m, k (lhs) // We also allow for the case where the stride on the minor dimension is not as expected but // there is a single element. let (ldb, transb) = if (lhs_m1 == 1 || k == 1) && (lhs_m2 == k || m == 1) { (k as i32, cublasOperation_t::CUBLAS_OP_N) } else if (lhs_m1 == m || k == 1) && (lhs_m2 == 1 || m == 1) { (m as i32, cublasOperation_t::CUBLAS_OP_T) } else { Err(CudaError::MatMulNonContiguous { lhs_stride: lhs_l.clone(), rhs_stride: rhs_l.clone(), mnk: (m, n, k), })? }; // The setup below was copied from: // https://github.com/lebedov/scikit-cuda/blob/7e7300474286019c917a6c8a4bca59405c64fbce/tests/test_cublas.py#L531 let gemm = GemmConfig { alpha, beta, m: n as i32, n: m as i32, k: k as i32, lda, ldb, ldc: n as i32, transa, transb, }; let stride_b: usize = match lhs_stride[..lhs_stride.len() - 2] { [s1, stride] if s1 == stride * lhs_l.dims()[1] => stride, [_, stride] if lhs_l.dims()[0] == 1 => stride, [stride, _] if lhs_l.dims()[1] == 1 => stride, [stride] => stride, [] => m * k, _ => Err(CudaError::MatMulNonContiguous { lhs_stride: lhs_l.clone(), rhs_stride: rhs_l.clone(), mnk: (m, n, k), })?, }; let stride_a: usize = match rhs_stride[..rhs_stride.len() - 2] { [s1, stride] if s1 == stride * rhs_l.dims()[1] => stride, [_, stride] if rhs_l.dims()[0] == 1 => stride, [stride, _] if rhs_l.dims()[1] == 1 => stride, [stride] => stride, [] => n * k, _ => Err(CudaError::MatMulNonContiguous { lhs_stride: lhs_l.clone(), rhs_stride: rhs_l.clone(), mnk: (m, n, k), })?, }; Ok(StridedBatchedConfig { batch_size: b as i32, gemm, stride_a: stride_a as i64, stride_b: stride_b as i64, stride_c: (m * n) as i64, }) } impl BackendStorage for CudaStorage { type Device = CudaDevice; fn try_clone(&self, layout: &Layout) -> Result<Self> { let slice = Clone.map(&self.slice, self.device(), layout)?; let device = self.device.clone(); Ok(Self { slice, device }) } fn dtype(&self) -> DType { match self.slice { CudaStorageSlice::U8(_) => DType::U8, CudaStorageSlice::U32(_) => DType::U32, CudaStorageSlice::I64(_) => DType::I64, CudaStorageSlice::BF16(_) => DType::BF16, CudaStorageSlice::F16(_) => DType::F16, CudaStorageSlice::F32(_) => DType::F32, CudaStorageSlice::F64(_) => DType::F64, } } fn device(&self) -> &CudaDevice { &self.device } fn to_dtype(&self, layout: &Layout, dtype: DType) -> Result<Self> { let shape = layout.shape(); let dims = shape.dims(); let el = shape.elem_count(); let cfg = LaunchConfig::for_num_elems(el as u32); let dev = self.device(); let ds = SlicePtrOrNull::params_from_layout(dev, layout)?; let start_o = layout.start_offset(); // This returns an i64 rather than a &i64, this is useful to get around some temporary // lifetime issue and is safe as long as self.slice does not go out of scope before inp // is used. let inp = match &self.slice { CudaStorageSlice::U8(inp) => *inp.slice(start_o..).device_ptr(), CudaStorageSlice::U32(inp) => *inp.slice(start_o..).device_ptr(), CudaStorageSlice::I64(inp) => *inp.slice(start_o..).device_ptr(), CudaStorageSlice::BF16(inp) => *inp.slice(start_o..).device_ptr(), CudaStorageSlice::F16(inp) => *inp.slice(start_o..).device_ptr(), CudaStorageSlice::F32(inp) => *inp.slice(start_o..).device_ptr(), CudaStorageSlice::F64(inp) => *inp.slice(start_o..).device_ptr(), }; let inp = &inp; let kernel_name = format!("cast_{}_{}", self.dtype().as_str(), dtype.as_str()); let func = dev.get_or_load_func(&kernel_name, kernels::CAST)?; let slice = match dtype { DType::U8 => { let out = unsafe { dev.alloc::<u8>(el) }.w()?; let params = (el, dims.len(), &ds, *inp, &out); unsafe { func.launch(cfg, params) }.w()?; CudaStorageSlice::U8(out) } DType::U32 => { let out = unsafe { dev.alloc::<u32>(el) }.w()?; let params = (el, dims.len(), &ds, *inp, &out); unsafe { func.launch(cfg, params) }.w()?; CudaStorageSlice::U32(out) } DType::I64 => { let out = unsafe { dev.alloc::<i64>(el) }.w()?; let params = (el, dims.len(), &ds, *inp, &out); unsafe { func.launch(cfg, params) }.w()?; CudaStorageSlice::I64(out) } DType::BF16 => { let out = unsafe { dev.alloc::<bf16>(el) }.w()?; let params = (el, dims.len(), &ds, *inp, &out); unsafe { func.launch(cfg, params) }.w()?; CudaStorageSlice::BF16(out) } DType::F16 => { let out = unsafe { dev.alloc::<f16>(el) }.w()?; let params = (el, dims.len(), &ds, *inp, &out); unsafe { func.launch(cfg, params) }.w()?; CudaStorageSlice::F16(out) } DType::F32 => { let out = unsafe { dev.alloc::<f32>(el) }.w()?; let params = (el, dims.len(), &ds, *inp, &out); unsafe { func.launch(cfg, params) }.w()?; CudaStorageSlice::F32(out) } DType::F64 => { let out = unsafe { dev.alloc::<f64>(el) }.w()?; let params = (el, dims.len(), &ds, *inp, &out); unsafe { func.launch(cfg, params) }.w()?; CudaStorageSlice::F64(out) } }; Ok(Self { slice, device: dev.clone(), }) } fn affine(&self, layout: &Layout, mul: f64, add: f64) -> Result<Self> { let device = self.device().clone(); let slice = Affine(mul, add).map(&self.slice, &device, layout)?; Ok(Self { slice, device }) } fn powf(&self, layout: &Layout, e: f64) -> Result<Self> { let device = self.device().clone(); let slice = Powf(e).map(&self.slice, &device, layout)?; Ok(Self { slice, device }) } fn elu(&self, layout: &Layout, alpha: f64) -> Result<Self> { let device = self.device().clone(); let slice = Elu(alpha).map(&self.slice, &device, layout)?; Ok(Self { slice, device }) } fn reduce_op(&self, op: ReduceOp, layout: &Layout, sum_dims: &[usize]) -> Result<Self> { let device = self.device().clone(); let slice = FastReduce(sum_dims, op).map(&self.slice, &device, layout)?; Ok(Self { slice, device }) } fn cmp(&self, op: CmpOp, rhs: &Self, lhs_l: &Layout, rhs_l: &Layout) -> Result<Self> { let device = self.device().clone(); let slice = Cmp(op).map(&self.slice, lhs_l, &rhs.slice, rhs_l, &device)?; Ok(Self { slice, device }) } fn unary_impl<U: UnaryOpT>(&self, layout: &Layout) -> Result<Self> { let device = self.device().clone(); let slice = U::V.map(&self.slice, &device, layout)?; Ok(Self { slice, device }) } fn binary_impl<B: BinaryOpT>( &self, rhs: &Self, lhs_l: &Layout, rhs_l: &Layout, ) -> Result<Self> { let device = self.device().clone(); let slice = B::V.map(&self.slice, lhs_l, &rhs.slice, rhs_l, &device)?; Ok(Self { slice, device }) } fn to_cpu_storage(&self) -> Result<CpuStorage> { match &self.slice { CudaStorageSlice::U8(slice) => { let dev = slice.device(); let cpu_storage = dev.dtoh_sync_copy(slice).w()?; Ok(CpuStorage::U8(cpu_storage)) } CudaStorageSlice::U32(slice) => { let dev = slice.device(); let cpu_storage = dev.dtoh_sync_copy(slice).w()?; Ok(CpuStorage::U32(cpu_storage)) } CudaStorageSlice::I64(slice) => { let dev = slice.device(); let cpu_storage = dev.dtoh_sync_copy(slice).w()?; Ok(CpuStorage::I64(cpu_storage)) } CudaStorageSlice::BF16(slice) => { let dev = slice.device(); let cpu_storage = dev.dtoh_sync_copy(slice).w()?; Ok(CpuStorage::BF16(cpu_storage)) } CudaStorageSlice::F16(slice) => { let dev = slice.device(); let cpu_storage = dev.dtoh_sync_copy(slice).w()?; Ok(CpuStorage::F16(cpu_storage)) } CudaStorageSlice::F32(slice) => { let dev = slice.device(); let cpu_storage = dev.dtoh_sync_copy(slice).w()?; Ok(CpuStorage::F32(cpu_storage)) } CudaStorageSlice::F64(slice) => { let dev = slice.device(); let cpu_storage = dev.dtoh_sync_copy(slice).w()?; Ok(CpuStorage::F64(cpu_storage)) } } } fn where_cond( &self, layout: &Layout, t: &Self, t_l: &Layout, f: &Self, f_l: &Layout, ) -> Result<Self> { let device = self.device().clone(); let slice = WhereCond(self, layout).map(&t.slice, t_l, &f.slice, f_l, &device)?; Ok(Self { slice, device }) } fn conv1d( &self, l: &Layout, kernel: &Self, kernel_l: &Layout, params: &crate::conv::ParamsConv1D, ) -> Result<Self> { const USE_IM2COL_CONV1D: bool = true; let device = self.device().clone(); if !USE_IM2COL_CONV1D { let slice = Conv1D(params).map(&self.slice, l, &kernel.slice, kernel_l, &device)?; return Ok(Self { slice, device }); } let col = Im2Col1D { l_k: params.k_size, stride: params.stride, dilation: params.dilation, padding: params.padding, } .map(&self.slice, &device, l)?; let col = Self { slice: col, device }; let l_out = params.l_out(); let b = params.b_size; let n = params.c_out; let k = params.k_size * params.c_in; let m = l_out; let col_l = Layout::contiguous((b, m, k)); let res = if kernel_l.is_contiguous() { let kernel_l = Layout::contiguous_with_offset((1, n, k), kernel_l.start_offset()) .transpose(1, 2)? .broadcast_as((b, k, n))?; col.matmul(kernel, (b, m, n, k), &col_l, &kernel_l)? } else { // Make the kernel contiguous if not already the case. let mut kernel_c = unsafe { self.device() .alloc_uninit(kernel_l.shape(), kernel.dtype())? }; kernel.copy_strided_src(&mut kernel_c, 0, kernel_l)?; let kernel_l = Layout::contiguous_with_offset((1, n, k), kernel_l.start_offset()) .transpose(1, 2)? .broadcast_as((b, k, n))?; col.matmul(kernel, (b, m, n, k), &col_l, &kernel_l)? }; let res_l = Layout::contiguous((b, l_out, n)).transpose(1, 2)?; let mut res_t = unsafe { self.device().alloc_uninit(res_l.shape(), res.dtype())? }; res.copy_strided_src(&mut res_t, 0, &res_l)?; Ok(res_t) } fn conv_transpose1d( &self, l: &Layout, kernel: &Self, kernel_l: &Layout, params: &crate::conv::ParamsConvTranspose1D, ) -> Result<Self> { const USE_COL2IM_CONV1D_TR: bool = true; let device = self.device().clone(); let can_use_col2im = kernel_l.is_contiguous() && params.dilation == 1 && params.padding == 0 && params.output_padding == 0; let slice = if USE_COL2IM_CONV1D_TR && can_use_col2im { let (b_size, c_in, l_in) = l.shape().dims3()?; let (c_in2, c_out, k_size) = kernel_l.shape().dims3()?; if !kernel_l.is_contiguous() { crate::bail!( "convtr1d: the second argument (kernel) has to be contiguous {kernel_l:?}" ) } if c_in != c_in2 { crate::bail!( "convtr1d: shape mismatch on c_in {:?} {:?}", l.shape(), kernel_l.shape() ) } let col = { // This merges the last two dimensions of the kernel together. let kernel_l_mm = Layout::new( (b_size, c_in, k_size * c_out).into(), vec![0, k_size * c_out, 1], kernel_l.start_offset(), ); self.matmul( kernel, ( b_size, /* m */ l_in, /* n */ c_out * k_size, /* k */ c_in, ), &l.transpose(1, 2)?, &kernel_l_mm, )? }; let col_l = Layout::contiguous((b_size, l_in, c_out, k_size)); Col2Im1D { stride: params.stride, } .map(&col.slice, &device, &col_l)? } else { ConvTranspose1D(params).map(&self.slice, l, &kernel.slice, kernel_l, &device)? }; Ok(Self { slice, device }) } #[cfg(not(feature = "cudnn"))] fn conv2d( &self, l: &Layout, kernel: &Self, kernel_l: &Layout, params: &crate::conv::ParamsConv2D, ) -> Result<Self> { const USE_IM2COL_CONV2D: bool = true; let device = self.device().clone(); if !USE_IM2COL_CONV2D { let slice = Conv2D(params).map(&self.slice, l, &kernel.slice, kernel_l, &device)?; return Ok(Self { slice, device }); } let col = Im2Col { h_k: params.k_h, w_k: params.k_w, stride: params.stride, dilation: params.dilation, padding: params.padding, } .map(&self.slice, &device, l)?; let col = Self { slice: col, device }; let h_out = params.out_h(); let w_out = params.out_w(); let b = params.b_size; let n = params.c_out; let k = params.k_h * params.k_w * params.c_in; let m = h_out * w_out; let col_l = Layout::contiguous((b, m, k)); let res = if kernel_l.is_contiguous() { let kernel_l = Layout::contiguous_with_offset((1, n, k), kernel_l.start_offset()) .transpose(1, 2)? .broadcast_as((b, k, n))?; col.matmul(kernel, (b, m, n, k), &col_l, &kernel_l)? } else { // Make the kernel contiguous if not already the case. let mut kernel_c = unsafe { self.device() .alloc_uninit(kernel_l.shape(), kernel.dtype())? }; kernel.copy_strided_src(&mut kernel_c, 0, kernel_l)?; let kernel_l = Layout::contiguous_with_offset((1, n, k), kernel_l.start_offset()) .transpose(1, 2)? .broadcast_as((b, k, n))?; col.matmul(kernel, (b, m, n, k), &col_l, &kernel_l)? }; let res_l = Layout::contiguous((b, h_out, w_out, n)) .transpose(1, 2)? .transpose(1, 3)?; let mut res_t = unsafe { self.device().alloc_uninit(res_l.shape(), res.dtype())? }; res.copy_strided_src(&mut res_t, 0, &res_l)?; Ok(res_t) } #[cfg(feature = "cudnn")] fn conv2d( &self, inp_l: &Layout, kernel: &Self, kernel_l: &Layout, params: &crate::conv::ParamsConv2D, ) -> Result<Self> { let device = self.device().clone(); if !kernel_l.is_contiguous() { let slice = Conv2D(params).map(&self.slice, inp_l, &kernel.slice, kernel_l, &device)?; return Ok(Self { slice, device }); } let (out_w, out_h) = (params.out_w(), params.out_h()); let dst_el = params.c_out * out_w * out_h * params.b_size; let slice = match (&self.slice, &kernel.slice) { (S::U8(inp), S::U8(k)) => { let inp = &inp.slice(inp_l.start_offset()..); let k = &k.slice(kernel_l.start_offset()..); let mut out = unsafe { device.alloc::<u8>(dst_el) }.w()?; crate::cudnn::launch_conv2d::<u8>(inp, inp_l, k, &mut out, params, &device) .map_err(crate::Error::wrap)?; S::U8(out) } (S::BF16(inp), S::BF16(k)) => { let inp = &inp.slice(inp_l.start_offset()..); let k = &k.slice(kernel_l.start_offset()..); let mut out = unsafe { device.alloc::<bf16>(dst_el) }.w()?; crate::cudnn::launch_conv2d::<bf16>(inp, inp_l, k, &mut out, params, &device) .map_err(crate::Error::wrap)?; S::BF16(out) } (S::F16(inp), S::F16(k)) => { let inp = &inp.slice(inp_l.start_offset()..); let k = &k.slice(kernel_l.start_offset()..); let mut out = unsafe { device.alloc::<f16>(dst_el) }.w()?; crate::cudnn::launch_conv2d::<f16>(inp, inp_l, k, &mut out, params, &device) .map_err(crate::Error::wrap)?; S::F16(out) } (S::F32(inp), S::F32(k)) => { let inp = &inp.slice(inp_l.start_offset()..); let k = &k.slice(kernel_l.start_offset()..); let mut out = unsafe { device.alloc::<f32>(dst_el) }.w()?; crate::cudnn::launch_conv2d::<f32>(inp, inp_l, k, &mut out, params, &device) .map_err(crate::Error::wrap)?; S::F32(out) } (S::F64(inp), S::F64(k)) => { let inp = &inp.slice(inp_l.start_offset()..); let k = &k.slice(kernel_l.start_offset()..); let mut out = unsafe { device.alloc::<f64>(dst_el) }.w()?; crate::cudnn::launch_conv2d::<f64>(inp, inp_l, k, &mut out, params, &device) .map_err(crate::Error::wrap)?; S::F64(out) } (S::U32(_), S::U32(_)) => Err(CudaError::InternalError("conv2d does not support u32"))?, (S::I64(_), S::I64(_)) => Err(CudaError::InternalError("conv2d does not support i64"))?, _ => Err(CudaError::InternalError("dtype mismatch in conv2d"))?, }; Ok(Self { slice, device }) } fn conv_transpose2d( &self, l: &Layout, kernel: &Self, kernel_l: &Layout, params: &crate::conv::ParamsConvTranspose2D, ) -> Result<Self> { let device = self.device().clone(); let slice = ConvTranspose2D(params).map(&self.slice, l, &kernel.slice, kernel_l, &device)?; Ok(Self { slice, device }) } fn avg_pool2d(&self, l: &Layout, k: (usize, usize), stride: (usize, usize)) -> Result<Self> { let device = self.device().clone(); let slice = Pool2D { w_k: k.0, h_k: k.1, w_stride: stride.0, h_stride: stride.1, op: PoolOp::Avg, } .map(&self.slice, &device, l)?; Ok(Self { slice, device }) } fn max_pool2d(&self, l: &Layout, k: (usize, usize), stride: (usize, usize)) -> Result<Self> { let device = self.device().clone(); let slice = Pool2D { w_k: k.0, h_k: k.1, w_stride: stride.0, h_stride: stride.1, op: PoolOp::Max, } .map(&self.slice, &device, l)?; Ok(Self { slice, device }) } fn upsample_nearest1d(&self, _: &Layout, _out_sz: usize) -> Result<Self> { crate::bail!("upsample-nearest1d is not supported on cuda") } fn upsample_nearest2d(&self, l: &Layout, out_w: usize, out_h: usize) -> Result<Self> { let device = self.device().clone(); let slice = UpsampleNearest2D(out_w, out_h).map(&self.slice, &device, l)?; Ok(Self { slice, device }) } fn index_select(&self, ids: &Self, l: &Layout, ids_l: &Layout, dim: usize) -> Result<Self> { let device = self.device().clone(); let slice = IndexSelect(ids, ids_l, dim).map(&self.slice, &device, l)?; Ok(Self { slice, device }) } fn gather(&self, l: &Layout, ids: &Self, ids_l: &Layout, dim: usize) -> Result<Self> { let device = self.device().clone(); let slice = Gather(ids, ids_l, dim).map(&self.slice, &device, l)?; Ok(Self { slice, device }) } fn scatter_add( &self, l: &Layout, ids: &Self, ids_l: &Layout, src: &Self, src_l: &Layout, dim: usize, ) -> Result<Self> { let device = self.device().clone(); let mut acc = unsafe { device.alloc_uninit(l.shape(), self.dtype())? }; self.copy_strided_src(&mut acc, 0, l)?; ScatterAdd(ids, ids_l, dim).map(&mut acc.slice, l.shape(), &src.slice, src_l, &device)?; Ok(acc) } fn index_add( &self, l: &Layout, ids: &Self, ids_l: &Layout, src: &Self, src_l: &Layout, dim: usize, ) -> Result<Self> { let device = self.device().clone(); let mut acc = unsafe { device.alloc_uninit(l.shape(), self.dtype())? }; self.copy_strided_src(&mut acc, 0, l)?; IndexAdd(ids, ids_l, dim).map(&mut acc.slice, l.shape(), &src.slice, src_l, &device)?; Ok(acc) } fn matmul( &self, rhs: &Self, (b, m, n, k): (usize, usize, usize, usize), lhs_l: &Layout, rhs_l: &Layout, ) -> Result<Self> { let elem_count = b * m * n; let dev = &self.device; let slice = match (&self.slice, &rhs.slice) { (CudaStorageSlice::BF16(lhs), CudaStorageSlice::BF16(rhs)) => { let lhs = &lhs.slice(lhs_l.start_offset()..); let rhs = &rhs.slice(rhs_l.start_offset()..); let cfg = gemm_config(bf16::ONE, bf16::ZERO, (b, m, n, k), lhs_l, rhs_l)?; let mut out = unsafe { dev.alloc::<bf16>(elem_count) }.w()?; unsafe { gemm_strided_batched_bf16(&self.device.blas, cfg, rhs, lhs, &mut out) } .w()?; CudaStorageSlice::BF16(out) } (CudaStorageSlice::F16(lhs), CudaStorageSlice::F16(rhs)) => { let lhs = &lhs.slice(lhs_l.start_offset()..); let rhs = &rhs.slice(rhs_l.start_offset()..); let cfg = gemm_config(f16::ONE, f16::ZERO, (b, m, n, k), lhs_l, rhs_l)?; let mut out = unsafe { dev.alloc::<f16>(elem_count) }.w()?; unsafe { gemm_strided_batched_f16(&self.device.blas, cfg, rhs, lhs, &mut out) } .w()?; CudaStorageSlice::F16(out) } (CudaStorageSlice::F32(lhs), CudaStorageSlice::F32(rhs)) => { let lhs = &lhs.slice(lhs_l.start_offset()..); let rhs = &rhs.slice(rhs_l.start_offset()..); let cfg = gemm_config(1., 0., (b, m, n, k), lhs_l, rhs_l)?; let mut out = unsafe { dev.alloc::<f32>(elem_count) }.w()?; unsafe { gemm_strided_batched_f32(&self.device.blas, cfg, rhs, lhs, &mut out) } .w()?; CudaStorageSlice::F32(out) } (CudaStorageSlice::F64(lhs), CudaStorageSlice::F64(rhs)) => { let lhs = &lhs.slice(lhs_l.start_offset()..); let rhs = &rhs.slice(rhs_l.start_offset()..); let cfg = gemm_config(1., 0., (b, m, n, k), lhs_l, rhs_l)?; let mut out = unsafe { dev.alloc::<f64>(elem_count) }.w()?; unsafe { self.device .blas .gemm_strided_batched(cfg, rhs, lhs, &mut out) } .w()?; CudaStorageSlice::F64(out) } _ => Err(CudaError::InternalError("dtype mismatch in matmul op"))?, }; let device = dev.clone(); Ok(Self { slice, device }) } fn copy2d( &self, dst: &mut Self, d1: usize, d2: usize, src_s: usize, dst_s: usize, src_o: usize, dst_o: usize, ) -> Result<()> { let dev = &self.device; let d1 = d1 as u32; let d2 = d2 as u32; // Nothing to copy so we exit early to avoid launching a kernel and some potential invalid // argument with a null pointer. if d1 == 0 || d2 == 0 { return Ok(()); } let dst_s = dst_s as u32; let src_s = src_s as u32; let (src, dst, kname) = match (&self.slice, &mut dst.slice) { (S::U8(s), S::U8(d)) => ( *s.slice(src_o..).device_ptr(), *d.slice(dst_o..).device_ptr(), "copy2d_u8", ), (S::U32(s), S::U32(d)) => ( *s.slice(src_o..).device_ptr(), *d.slice(dst_o..).device_ptr(), "copy2d_u32", ), (S::I64(s), S::I64(d)) => ( *s.slice(src_o..).device_ptr(), *d.slice(dst_o..).device_ptr(), "copy2d_i64", ), (S::BF16(s), S::BF16(d)) => ( *s.slice(src_o..).device_ptr(), *d.slice(dst_o..).device_ptr(), "copy2d_bf16", ), (S::F16(s), S::F16(d)) => ( *s.slice(src_o..).device_ptr(), *d.slice(dst_o..).device_ptr(), "copy2d_f16", ), (S::F32(s), S::F32(d)) => ( *s.slice(src_o..).device_ptr(), *d.slice(dst_o..).device_ptr(), "copy2d_f32", ), (S::F64(s), S::F64(d)) => ( *s.slice(src_o..).device_ptr(), *d.slice(dst_o..).device_ptr(), "copy2d_f64", ), _ => Err(CudaError::InternalError("dtype mismatch in copy2d"))?, }; let func = dev.get_or_load_func(kname, kernels::FILL)?; let cfg = LaunchConfig::for_num_elems(d1 * d2); let params = (src, dst, d1, d2, src_s, dst_s); // SAFETY: ffi. unsafe { func.launch(cfg, params) }.w()?; Ok(()) } fn copy_strided_src(&self, dst: &mut Self, dst_offset: usize, src_l: &Layout) -> Result<()> { let src_shape = src_l.shape(); let dims = src_shape.dims(); let el_count = src_shape.elem_count(); if el_count == 0 { return Ok(()); } let cfg = LaunchConfig::for_num_elems(el_count as u32); let dev = &self.device; let ds = SlicePtrOrNull::params_from_layout(dev, src_l)?; match (&self.slice, &mut dst.slice) { (CudaStorageSlice::BF16(src), CudaStorageSlice::BF16(dst)) => { let (src, mut dst) = slice_src_and_dst(src, src_l, dst, dst_offset); if src_l.is_contiguous() { dev.dtod_copy(&src, &mut dst).w()? } else { let func = dev.get_or_load_func("ucopy_bf16", kernels::UNARY)?; // SAFETY: Set later by running the kernel. let params = (el_count, dims.len(), &ds, &src, &mut dst); // SAFETY: ffi. unsafe { func.launch(cfg, params) }.w()? } } (CudaStorageSlice::F16(src), CudaStorageSlice::F16(dst)) => { let (src, mut dst) = slice_src_and_dst(src, src_l, dst, dst_offset); if src_l.is_contiguous() { dev.dtod_copy(&src, &mut dst).w()? } else { let func = dev.get_or_load_func("ucopy_f16", kernels::UNARY)?; // SAFETY: Set later by running the kernel. let params = (el_count, dims.len(), &ds, &src, &mut dst); // SAFETY: ffi. unsafe { func.launch(cfg, params) }.w()? } } (CudaStorageSlice::F32(src), CudaStorageSlice::F32(dst)) => { let (src, mut dst) = slice_src_and_dst(src, src_l, dst, dst_offset); if src_l.is_contiguous() { dev.dtod_copy(&src, &mut dst).w()? } else { let func = dev.get_or_load_func("ucopy_f32", kernels::UNARY)?; // SAFETY: Set later by running the kernel. let params = (el_count, dims.len(), &ds, &src, &mut dst); // SAFETY: ffi. unsafe { func.launch(cfg, params) }.w()? } } (CudaStorageSlice::U8(src), CudaStorageSlice::U8(dst)) => { let (src, mut dst) = slice_src_and_dst(src, src_l, dst, dst_offset); if src_l.is_contiguous() { dev.dtod_copy(&src, &mut dst).w()? } else { let func = dev.get_or_load_func("ucopy_u8", kernels::UNARY)?; // SAFETY: Set later by running the kernel. let params = (el_count, dims.len(), &ds, &src, &mut dst); // SAFETY: ffi. unsafe { func.launch(cfg, params) }.w()? } } (CudaStorageSlice::U32(src), CudaStorageSlice::U32(dst)) => { let (src, mut dst) = slice_src_and_dst(src, src_l, dst, dst_offset); if src_l.is_contiguous() { dev.dtod_copy(&src, &mut dst).w()? } else { let func = dev.get_or_load_func("ucopy_u32", kernels::UNARY)?; // SAFETY: Set later by running the kernel. let params = (el_count, dims.len(), &ds, &src, &mut dst); // SAFETY: ffi. unsafe { func.launch(cfg, params) }.w()? } } (CudaStorageSlice::I64(src), CudaStorageSlice::I64(dst)) => { let (src, mut dst) = slice_src_and_dst(src, src_l, dst, dst_offset); if src_l.is_contiguous() { dev.dtod_copy(&src, &mut dst).w()? } else { let func = dev.get_or_load_func("ucopy_i64", kernels::UNARY)?; // SAFETY: Set later by running the kernel. let params = (el_count, dims.len(), &ds, &src, &mut dst); // SAFETY: ffi. unsafe { func.launch(cfg, params) }.w()? } } (CudaStorageSlice::F64(src), CudaStorageSlice::F64(dst)) => { let (src, mut dst) = slice_src_and_dst(src, src_l, dst, dst_offset); if src_l.is_contiguous() { dev.dtod_copy(&src, &mut dst).w()? } else { let func = dev.get_or_load_func("ucopy_f64", kernels::UNARY)?; // SAFETY: Set later by running the kernel. let params = (el_count, dims.len(), &ds, &src, &mut dst); // SAFETY: ffi. unsafe { func.launch(cfg, params) }.w()?; } } _ => Err(CudaError::InternalError( "dtype mismatch in copy_strided op", ))?, } Ok(()) } } // Default for the reduced precision setting is false, similar to pytorch. // https://github.com/pytorch/pytorch/issues/123157 static MM_F16_REDUCED_PRECISION: std::sync::atomic::AtomicBool = std::sync::atomic::AtomicBool::new(false); static MM_BF16_REDUCED_PRECISION: std::sync::atomic::AtomicBool = std::sync::atomic::AtomicBool::new(false); static MM_F32_REDUCED_PRECISION: std::sync::atomic::AtomicBool = std::sync::atomic::AtomicBool::new(false); /// This bool controls whether reduced precision reductions (e.g., with tf32 accumulation type) are /// allowed with f32 GEMMs. pub fn gemm_reduced_precision_f32() -> bool { MM_F32_REDUCED_PRECISION.load(std::sync::atomic::Ordering::Relaxed) } /// This bool controls whether reduced precision reductions (e.g., with tf32 accumulation type) are /// allowed with f32 GEMMs. pub fn set_gemm_reduced_precision_f32(b: bool) { MM_F32_REDUCED_PRECISION.store(b, std::sync::atomic::Ordering::Relaxed) } /// This bool controls whether reduced precision reductions (e.g., with fp16 accumulation type) are /// allowed with f16 GEMMs. pub fn gemm_reduced_precision_f16() -> bool { MM_F16_REDUCED_PRECISION.load(std::sync::atomic::Ordering::Relaxed) } /// This bool controls whether reduced precision reductions (e.g., with fp16 accumulation type) are /// allowed with f16 GEMMs. pub fn set_gemm_reduced_precision_f16(b: bool) { MM_F16_REDUCED_PRECISION.store(b, std::sync::atomic::Ordering::Relaxed) } /// This bool controls whether reduced precision reductions (e.g., with fp16 accumulation type) are /// allowed with bf16 GEMMs. pub fn gemm_reduced_precision_bf16() -> bool { MM_BF16_REDUCED_PRECISION.load(std::sync::atomic::Ordering::Relaxed) } /// This bool controls whether reduced precision reductions (e.g., with fp16 accumulation type) are /// allowed with bf16 GEMMs. pub fn set_gemm_reduced_precision_bf16(b: bool) { MM_BF16_REDUCED_PRECISION.store(b, std::sync::atomic::Ordering::Relaxed) } unsafe fn gemm_strided_batched_f32( cublas: &cudarc::cublas::CudaBlas, cfg: StridedBatchedConfig<f32>, a: &cudarc::driver::CudaView<f32>, b: &cudarc::driver::CudaView<f32>, c: &mut CudaSlice<f32>, ) -> std::result::Result<(), cudarc::cublas::result::CublasError> { use cudarc::cublas::sys; use cudarc::driver::DevicePtrMut; let compute_type = if gemm_reduced_precision_f32() { sys::cublasComputeType_t::CUBLAS_COMPUTE_32F_FAST_TF32 } else { sys::cublasComputeType_t::CUBLAS_COMPUTE_32F }; let alpha = &cfg.gemm.alpha as *const f32 as *const _; let beta = &cfg.gemm.beta as *const f32 as *const _; cudarc::cublas::result::gemm_strided_batched_ex( *cublas.handle(), cfg.gemm.transa, cfg.gemm.transb, cfg.gemm.m, cfg.gemm.n, cfg.gemm.k, alpha, *a.device_ptr() as *const _, sys::cudaDataType_t::CUDA_R_32F, cfg.gemm.lda, cfg.stride_a, *b.device_ptr() as *const _, sys::cudaDataType_t::CUDA_R_32F, cfg.gemm.ldb, cfg.stride_b, beta, *c.device_ptr_mut() as *mut _, sys::cudaDataType_t::CUDA_R_32F, cfg.gemm.ldc, cfg.stride_c, cfg.batch_size, compute_type, sys::cublasGemmAlgo_t::CUBLAS_GEMM_DEFAULT_TENSOR_OP, ) } unsafe fn gemm_strided_batched_f16( cublas: &cudarc::cublas::CudaBlas, cfg: StridedBatchedConfig<f16>, a: &cudarc::driver::CudaView<f16>, b: &cudarc::driver::CudaView<f16>, c: &mut CudaSlice<f16>, ) -> std::result::Result<(), cudarc::cublas::result::CublasError> { use cudarc::cublas::sys; use cudarc::driver::DevicePtrMut; let alpha = cfg.gemm.alpha; let beta = cfg.gemm.beta; let alpha_f32: f32 = cfg.gemm.alpha.to_f32(); let beta_f32: f32 = cfg.gemm.beta.to_f32(); let (compute_type, alpha, beta) = if gemm_reduced_precision_f16() { ( sys::cublasComputeType_t::CUBLAS_COMPUTE_16F, (&alpha) as *const f16 as *const _, (&beta) as *const f16 as *const _, ) } else { ( sys::cublasComputeType_t::CUBLAS_COMPUTE_32F, (&alpha_f32) as *const f32 as *const _, (&beta_f32) as *const f32 as *const _, ) }; cudarc::cublas::result::gemm_strided_batched_ex( *cublas.handle(), cfg.gemm.transa, cfg.gemm.transb, cfg.gemm.m, cfg.gemm.n, cfg.gemm.k, alpha, *a.device_ptr() as *const _, sys::cudaDataType_t::CUDA_R_16F, cfg.gemm.lda, cfg.stride_a, *b.device_ptr() as *const _, sys::cudaDataType_t::CUDA_R_16F, cfg.gemm.ldb, cfg.stride_b, beta, *c.device_ptr_mut() as *mut _, sys::cudaDataType_t::CUDA_R_16F, cfg.gemm.ldc, cfg.stride_c, cfg.batch_size, compute_type, sys::cublasGemmAlgo_t::CUBLAS_GEMM_DEFAULT_TENSOR_OP, ) } unsafe fn gemm_strided_batched_bf16( cublas: &cudarc::cublas::CudaBlas, cfg: StridedBatchedConfig<bf16>, a: &cudarc::driver::CudaView<bf16>, b: &cudarc::driver::CudaView<bf16>, c: &mut CudaSlice<bf16>, ) -> std::result::Result<(), cudarc::cublas::result::CublasError> { use cudarc::cublas::sys; use cudarc::driver::DevicePtrMut; let alpha_f32: f32 = cfg.gemm.alpha.to_f32(); let beta_f32: f32 = cfg.gemm.beta.to_f32(); // The type for alpha and beta depends on the computeType. // https://docs.nvidia.com/cuda/cublas/index.html#cublasgemmstridedbatchedex let (compute_type, alpha, beta) = if gemm_reduced_precision_bf16() { ( sys::cublasComputeType_t::CUBLAS_COMPUTE_32F_FAST_16BF, (&alpha_f32) as *const f32 as *const _, (&beta_f32) as *const f32 as *const _, ) } else { ( sys::cublasComputeType_t::CUBLAS_COMPUTE_32F, (&alpha_f32) as *const f32 as *const _, (&beta_f32) as *const f32 as *const _, ) }; cudarc::cublas::result::gemm_strided_batched_ex( *cublas.handle(), cfg.gemm.transa, cfg.gemm.transb, cfg.gemm.m, cfg.gemm.n, cfg.gemm.k, alpha, *a.device_ptr() as *const _, sys::cudaDataType_t::CUDA_R_16BF, cfg.gemm.lda, cfg.stride_a, *b.device_ptr() as *const _, sys::cudaDataType_t::CUDA_R_16BF, cfg.gemm.ldb, cfg.stride_b, beta, *c.device_ptr_mut() as *mut _, sys::cudaDataType_t::CUDA_R_16BF, cfg.gemm.ldc, cfg.stride_c, cfg.batch_size, compute_type, sys::cublasGemmAlgo_t::CUBLAS_GEMM_DEFAULT_TENSOR_OP, ) }
candle/candle-core/src/cuda_backend/mod.rs/0
{ "file_path": "candle/candle-core/src/cuda_backend/mod.rs", "repo_id": "candle", "token_count": 41770 }
17
#![allow(clippy::redundant_closure_call)] use crate::Tensor; use half::{bf16, f16}; use num_traits::float::Float; #[derive(Clone, Copy, PartialEq, Eq)] pub enum CmpOp { Eq, Ne, Le, Ge, Lt, Gt, } #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum ReduceOp { Sum, Min, Max, ArgMin, ArgMax, } impl ReduceOp { pub(crate) fn name(&self) -> &'static str { match self { Self::ArgMax => "argmax", Self::ArgMin => "argmin", Self::Min => "min", Self::Max => "max", Self::Sum => "sum", } } } // These ops return the same type as their input type. #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum BinaryOp { Add, Mul, Sub, Div, Maximum, Minimum, } // Unary ops with no argument #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum UnaryOp { Exp, Log, Sin, Cos, Abs, Neg, Recip, Sqr, Sqrt, Gelu, GeluErf, Erf, Relu, Silu, Tanh, Floor, Ceil, Round, Sign, } #[derive(Clone)] pub enum Op { Binary(Tensor, Tensor, BinaryOp), Unary(Tensor, UnaryOp), Cmp(Tensor, CmpOp), // The third argument is the reduced shape with `keepdim=true`. Reduce(Tensor, ReduceOp, Vec<usize>), Matmul(Tensor, Tensor), Gather(Tensor, Tensor, usize), ScatterAdd(Tensor, Tensor, Tensor, usize), IndexSelect(Tensor, Tensor, usize), IndexAdd(Tensor, Tensor, Tensor, usize), WhereCond(Tensor, Tensor, Tensor), #[allow(dead_code)] Conv1D { arg: Tensor, kernel: Tensor, padding: usize, stride: usize, dilation: usize, }, #[allow(dead_code)] ConvTranspose1D { arg: Tensor, kernel: Tensor, padding: usize, output_padding: usize, stride: usize, dilation: usize, }, #[allow(dead_code)] Conv2D { arg: Tensor, kernel: Tensor, padding: usize, stride: usize, dilation: usize, }, #[allow(dead_code)] ConvTranspose2D { arg: Tensor, kernel: Tensor, padding: usize, output_padding: usize, stride: usize, dilation: usize, }, AvgPool2D { arg: Tensor, kernel_size: (usize, usize), stride: (usize, usize), }, MaxPool2D { arg: Tensor, kernel_size: (usize, usize), stride: (usize, usize), }, UpsampleNearest1D { arg: Tensor, target_size: usize, }, UpsampleNearest2D { arg: Tensor, target_h: usize, target_w: usize, }, Cat(Vec<Tensor>, usize), #[allow(dead_code)] // add is currently unused. Affine { arg: Tensor, mul: f64, add: f64, }, ToDType(Tensor), Copy(Tensor), Broadcast(Tensor), Narrow(Tensor, usize, usize, usize), SliceScatter0(Tensor, Tensor, usize), Reshape(Tensor), ToDevice(Tensor), Transpose(Tensor, usize, usize), Permute(Tensor, Vec<usize>), Elu(Tensor, f64), Powf(Tensor, f64), CustomOp1( Tensor, std::sync::Arc<Box<dyn crate::CustomOp1 + Send + Sync>>, ), CustomOp2( Tensor, Tensor, std::sync::Arc<Box<dyn crate::CustomOp2 + Send + Sync>>, ), CustomOp3( Tensor, Tensor, Tensor, std::sync::Arc<Box<dyn crate::CustomOp3 + Send + Sync>>, ), } pub trait UnaryOpT { const NAME: &'static str; const KERNEL: &'static str; const V: Self; fn bf16(v1: bf16) -> bf16; fn f16(v1: f16) -> f16; fn f32(v1: f32) -> f32; fn f64(v1: f64) -> f64; fn u8(v1: u8) -> u8; fn u32(v1: u32) -> u32; fn i64(v1: i64) -> i64; // There is no very good way to represent optional function in traits so we go for an explicit // boolean flag to mark the function as existing. const BF16_VEC: bool = false; fn bf16_vec(_xs: &[bf16], _ys: &mut [bf16]) {} const F16_VEC: bool = false; fn f16_vec(_xs: &[f16], _ys: &mut [f16]) {} const F32_VEC: bool = false; fn f32_vec(_xs: &[f32], _ys: &mut [f32]) {} const F64_VEC: bool = false; fn f64_vec(_xs: &[f64], _ys: &mut [f64]) {} } pub trait BinaryOpT { const NAME: &'static str; const KERNEL: &'static str; const V: Self; fn bf16(v1: bf16, v2: bf16) -> bf16; fn f16(v1: f16, v2: f16) -> f16; fn f32(v1: f32, v2: f32) -> f32; fn f64(v1: f64, v2: f64) -> f64; fn u8(v1: u8, v2: u8) -> u8; fn u32(v1: u32, v2: u32) -> u32; fn i64(v1: i64, v2: i64) -> i64; const BF16_VEC: bool = false; fn bf16_vec(_xs1: &[bf16], _xs2: &[bf16], _ys: &mut [bf16]) {} const F16_VEC: bool = false; fn f16_vec(_xs1: &[f16], _xs2: &[f16], _ys: &mut [f16]) {} const F32_VEC: bool = false; fn f32_vec(_xs1: &[f32], _xs2: &[f32], _ys: &mut [f32]) {} const F64_VEC: bool = false; fn f64_vec(_xs1: &[f64], _xs2: &[f64], _ys: &mut [f64]) {} const U8_VEC: bool = false; fn u8_vec(_xs1: &[u8], _xs2: &[u8], _ys: &mut [u8]) {} const U32_VEC: bool = false; fn u32_vec(_xs1: &[u32], _xs2: &[u32], _ys: &mut [u32]) {} const I64_VEC: bool = false; fn i64_vec(_xs1: &[i64], _xs2: &[i64], _ys: &mut [i64]) {} } pub(crate) struct Add; pub(crate) struct Div; pub(crate) struct Mul; pub(crate) struct Sub; pub(crate) struct Maximum; pub(crate) struct Minimum; pub(crate) struct Exp; pub(crate) struct Log; pub(crate) struct Sin; pub(crate) struct Cos; pub(crate) struct Abs; pub(crate) struct Neg; pub(crate) struct Recip; pub(crate) struct Sqr; pub(crate) struct Sqrt; pub(crate) struct Gelu; pub(crate) struct GeluErf; pub(crate) struct Erf; pub(crate) struct Relu; pub(crate) struct Silu; pub(crate) struct Tanh; pub(crate) struct Floor; pub(crate) struct Ceil; pub(crate) struct Round; pub(crate) struct Sign; macro_rules! bin_op { ($op:ident, $name: literal, $e: expr, $f32_vec: ident, $f64_vec: ident) => { impl BinaryOpT for $op { const NAME: &'static str = $name; const KERNEL: &'static str = concat!("b", $name); const V: Self = $op; #[inline(always)] fn bf16(v1: bf16, v2: bf16) -> bf16 { $e(v1, v2) } #[inline(always)] fn f16(v1: f16, v2: f16) -> f16 { $e(v1, v2) } #[inline(always)] fn f32(v1: f32, v2: f32) -> f32 { $e(v1, v2) } #[inline(always)] fn f64(v1: f64, v2: f64) -> f64 { $e(v1, v2) } #[inline(always)] fn u8(v1: u8, v2: u8) -> u8 { $e(v1, v2) } #[inline(always)] fn u32(v1: u32, v2: u32) -> u32 { $e(v1, v2) } #[inline(always)] fn i64(v1: i64, v2: i64) -> i64 { $e(v1, v2) } #[cfg(feature = "mkl")] const F32_VEC: bool = true; #[cfg(feature = "mkl")] const F64_VEC: bool = true; #[cfg(feature = "mkl")] #[inline(always)] fn f32_vec(xs1: &[f32], xs2: &[f32], ys: &mut [f32]) { crate::mkl::$f32_vec(xs1, xs2, ys) } #[cfg(feature = "mkl")] #[inline(always)] fn f64_vec(xs1: &[f64], xs2: &[f64], ys: &mut [f64]) { crate::mkl::$f64_vec(xs1, xs2, ys) } #[cfg(feature = "accelerate")] const F32_VEC: bool = true; #[cfg(feature = "accelerate")] const F64_VEC: bool = true; #[cfg(feature = "accelerate")] #[inline(always)] fn f32_vec(xs1: &[f32], xs2: &[f32], ys: &mut [f32]) { crate::accelerate::$f32_vec(xs1, xs2, ys) } #[cfg(feature = "accelerate")] #[inline(always)] fn f64_vec(xs1: &[f64], xs2: &[f64], ys: &mut [f64]) { crate::accelerate::$f64_vec(xs1, xs2, ys) } } }; } bin_op!(Add, "add", |v1, v2| v1 + v2, vs_add, vd_add); bin_op!(Sub, "sub", |v1, v2| v1 - v2, vs_sub, vd_sub); bin_op!(Mul, "mul", |v1, v2| v1 * v2, vs_mul, vd_mul); bin_op!(Div, "div", |v1, v2| v1 / v2, vs_div, vd_div); bin_op!( Minimum, "minimum", |v1, v2| if v1 > v2 { v2 } else { v1 }, vs_min, vd_min ); bin_op!( Maximum, "maximum", |v1, v2| if v1 < v2 { v2 } else { v1 }, vs_max, vd_max ); #[allow(clippy::redundant_closure_call)] macro_rules! unary_op { ($op: ident, $name: literal, $a: ident, $e: expr) => { impl UnaryOpT for $op { const NAME: &'static str = $name; const KERNEL: &'static str = concat!("u", $name); const V: Self = $op; #[inline(always)] fn bf16($a: bf16) -> bf16 { $e } #[inline(always)] fn f16($a: f16) -> f16 { $e } #[inline(always)] fn f32($a: f32) -> f32 { $e } #[inline(always)] fn f64($a: f64) -> f64 { $e } #[inline(always)] fn u8(_: u8) -> u8 { todo!("no unary function for u8") } #[inline(always)] fn u32(_: u32) -> u32 { todo!("no unary function for u32") } #[inline(always)] fn i64(_: i64) -> i64 { todo!("no unary function for i64") } } }; ($op: ident, $name: literal, $a: ident, $e: expr, $f32_vec:ident, $f64_vec:ident) => { impl UnaryOpT for $op { const NAME: &'static str = $name; const KERNEL: &'static str = concat!("u", $name); const V: Self = $op; #[inline(always)] fn bf16($a: bf16) -> bf16 { $e } #[inline(always)] fn f16($a: f16) -> f16 { $e } #[inline(always)] fn f32($a: f32) -> f32 { $e } #[inline(always)] fn f64($a: f64) -> f64 { $e } #[inline(always)] fn u8(_: u8) -> u8 { todo!("no unary function for u8") } #[inline(always)] fn u32(_: u32) -> u32 { todo!("no unary function for u32") } #[inline(always)] fn i64(_: i64) -> i64 { todo!("no unary function for i64") } #[cfg(feature = "mkl")] const F32_VEC: bool = true; #[cfg(feature = "mkl")] const F64_VEC: bool = true; #[cfg(feature = "mkl")] #[inline(always)] fn f32_vec(xs: &[f32], ys: &mut [f32]) { crate::mkl::$f32_vec(xs, ys) } #[cfg(feature = "mkl")] #[inline(always)] fn f64_vec(xs: &[f64], ys: &mut [f64]) { crate::mkl::$f64_vec(xs, ys) } #[cfg(feature = "accelerate")] const F32_VEC: bool = true; #[cfg(feature = "accelerate")] const F64_VEC: bool = true; #[cfg(feature = "accelerate")] #[inline(always)] fn f32_vec(xs: &[f32], ys: &mut [f32]) { crate::accelerate::$f32_vec(xs, ys) } #[cfg(feature = "accelerate")] #[inline(always)] fn f64_vec(xs: &[f64], ys: &mut [f64]) { crate::accelerate::$f64_vec(xs, ys) } } }; } unary_op!(Exp, "exp", v, v.exp(), vs_exp, vd_exp); unary_op!(Log, "log", v, v.ln(), vs_ln, vd_ln); unary_op!(Sin, "sin", v, v.sin(), vs_sin, vd_sin); unary_op!(Cos, "cos", v, v.cos(), vs_cos, vd_cos); unary_op!(Tanh, "tanh", v, v.tanh(), vs_tanh, vd_tanh); unary_op!(Neg, "neg", v, -v); unary_op!(Recip, "recip", v, v.recip()); unary_op!(Sqr, "sqr", v, v * v, vs_sqr, vd_sqr); unary_op!(Sqrt, "sqrt", v, v.sqrt(), vs_sqrt, vd_sqrt); // Hardcode the value for sqrt(2/pi) // https://github.com/huggingface/candle/issues/1982 #[allow(clippy::excessive_precision)] const SQRT_TWO_OVER_PI_F32: f32 = 0.79788456080286535587989211986876373; #[allow(clippy::excessive_precision)] const SQRT_TWO_OVER_PI_F64: f64 = 0.79788456080286535587989211986876373; /// Tanh based approximation of the `gelu` operation /// GeluErf is the more precise one. /// <https://en.wikipedia.org/wiki/Activation_function#Comparison_of_activation_functions> impl UnaryOpT for Gelu { const NAME: &'static str = "gelu"; const V: Self = Gelu; #[inline(always)] fn bf16(v: bf16) -> bf16 { bf16::from_f32_const(0.5) * v * (bf16::ONE + bf16::tanh( bf16::from_f32_const(SQRT_TWO_OVER_PI_F32) * v * (bf16::ONE + bf16::from_f32_const(0.044715) * v * v), )) } #[inline(always)] fn f16(v: f16) -> f16 { f16::from_f32_const(0.5) * v * (f16::ONE + f16::tanh( f16::from_f32_const(SQRT_TWO_OVER_PI_F32) * v * (f16::ONE + f16::from_f32_const(0.044715) * v * v), )) } #[inline(always)] fn f32(v: f32) -> f32 { 0.5 * v * (1.0 + f32::tanh(SQRT_TWO_OVER_PI_F32 * v * (1.0 + 0.044715 * v * v))) } #[inline(always)] fn f64(v: f64) -> f64 { 0.5 * v * (1.0 + f64::tanh(SQRT_TWO_OVER_PI_F64 * v * (1.0 + 0.044715 * v * v))) } #[inline(always)] fn u8(_: u8) -> u8 { 0 } #[inline(always)] fn u32(_: u32) -> u32 { 0 } #[inline(always)] fn i64(_: i64) -> i64 { 0 } const KERNEL: &'static str = "ugelu"; #[cfg(feature = "mkl")] const F32_VEC: bool = true; #[cfg(feature = "mkl")] #[inline(always)] fn f32_vec(xs: &[f32], ys: &mut [f32]) { crate::mkl::vs_gelu(xs, ys) } #[cfg(feature = "mkl")] const F64_VEC: bool = true; #[cfg(feature = "mkl")] #[inline(always)] fn f64_vec(xs: &[f64], ys: &mut [f64]) { crate::mkl::vd_gelu(xs, ys) } #[cfg(feature = "accelerate")] const F32_VEC: bool = true; #[cfg(feature = "accelerate")] #[inline(always)] fn f32_vec(xs: &[f32], ys: &mut [f32]) { crate::accelerate::vs_gelu(xs, ys) } #[cfg(feature = "accelerate")] const F64_VEC: bool = true; #[cfg(feature = "accelerate")] #[inline(always)] fn f64_vec(xs: &[f64], ys: &mut [f64]) { crate::accelerate::vd_gelu(xs, ys) } } /// `erf` operation /// <https://en.wikipedia.org/wiki/Error_function> impl UnaryOpT for Erf { const NAME: &'static str = "erf"; const KERNEL: &'static str = "uerf"; const V: Self = Erf; #[inline(always)] fn bf16(v: bf16) -> bf16 { bf16::from_f64(Self::f64(v.to_f64())) } #[inline(always)] fn f16(v: f16) -> f16 { f16::from_f64(Self::f64(v.to_f64())) } #[inline(always)] fn f32(v: f32) -> f32 { Self::f64(v as f64) as f32 } #[inline(always)] fn f64(v: f64) -> f64 { crate::cpu::erf::erf(v) } #[inline(always)] fn u8(_: u8) -> u8 { 0 } #[inline(always)] fn u32(_: u32) -> u32 { 0 } #[inline(always)] fn i64(_: i64) -> i64 { 0 } } /// Silu operation impl UnaryOpT for Silu { const NAME: &'static str = "silu"; const V: Self = Silu; #[inline(always)] fn bf16(v: bf16) -> bf16 { v / (bf16::ONE + (-v).exp()) } #[inline(always)] fn f16(v: f16) -> f16 { v / (f16::ONE + (-v).exp()) } #[inline(always)] fn f32(v: f32) -> f32 { v / (1.0 + (-v).exp()) } #[inline(always)] fn f64(v: f64) -> f64 { v / (1.0 + (-v).exp()) } #[inline(always)] fn u8(_: u8) -> u8 { 0 } #[inline(always)] fn u32(_: u32) -> u32 { 0 } #[inline(always)] fn i64(_: i64) -> i64 { 0 } const KERNEL: &'static str = "usilu"; #[cfg(feature = "mkl")] const F32_VEC: bool = true; #[cfg(feature = "mkl")] #[inline(always)] fn f32_vec(xs: &[f32], ys: &mut [f32]) { crate::mkl::vs_silu(xs, ys) } #[cfg(feature = "mkl")] const F64_VEC: bool = true; #[cfg(feature = "mkl")] #[inline(always)] fn f64_vec(xs: &[f64], ys: &mut [f64]) { crate::mkl::vd_silu(xs, ys) } #[cfg(feature = "accelerate")] const F32_VEC: bool = true; #[cfg(feature = "accelerate")] #[inline(always)] fn f32_vec(xs: &[f32], ys: &mut [f32]) { crate::accelerate::vs_silu(xs, ys) } #[cfg(feature = "accelerate")] const F64_VEC: bool = true; #[cfg(feature = "accelerate")] #[inline(always)] fn f64_vec(xs: &[f64], ys: &mut [f64]) { crate::accelerate::vd_silu(xs, ys) } } impl UnaryOpT for Abs { const NAME: &'static str = "abs"; const KERNEL: &'static str = "uabs"; const V: Self = Abs; #[inline(always)] fn bf16(v: bf16) -> bf16 { v.abs() } #[inline(always)] fn f16(v: f16) -> f16 { v.abs() } #[inline(always)] fn f32(v: f32) -> f32 { v.abs() } #[inline(always)] fn f64(v: f64) -> f64 { v.abs() } #[inline(always)] fn u8(v: u8) -> u8 { v } #[inline(always)] fn u32(v: u32) -> u32 { v } #[inline(always)] fn i64(v: i64) -> i64 { v.abs() } } impl UnaryOpT for Ceil { const NAME: &'static str = "ceil"; const KERNEL: &'static str = "uceil"; const V: Self = Ceil; #[inline(always)] fn bf16(v: bf16) -> bf16 { v.ceil() } #[inline(always)] fn f16(v: f16) -> f16 { v.ceil() } #[inline(always)] fn f32(v: f32) -> f32 { v.ceil() } #[inline(always)] fn f64(v: f64) -> f64 { v.ceil() } #[inline(always)] fn u8(v: u8) -> u8 { v } #[inline(always)] fn u32(v: u32) -> u32 { v } #[inline(always)] fn i64(v: i64) -> i64 { v } } impl UnaryOpT for Floor { const NAME: &'static str = "floor"; const KERNEL: &'static str = "ufloor"; const V: Self = Floor; #[inline(always)] fn bf16(v: bf16) -> bf16 { v.floor() } #[inline(always)] fn f16(v: f16) -> f16 { v.floor() } #[inline(always)] fn f32(v: f32) -> f32 { v.floor() } #[inline(always)] fn f64(v: f64) -> f64 { v.floor() } #[inline(always)] fn u8(v: u8) -> u8 { v } #[inline(always)] fn u32(v: u32) -> u32 { v } #[inline(always)] fn i64(v: i64) -> i64 { v } } impl UnaryOpT for Round { const NAME: &'static str = "round"; const KERNEL: &'static str = "uround"; const V: Self = Round; #[inline(always)] fn bf16(v: bf16) -> bf16 { v.round() } #[inline(always)] fn f16(v: f16) -> f16 { v.round() } #[inline(always)] fn f32(v: f32) -> f32 { v.round() } #[inline(always)] fn f64(v: f64) -> f64 { v.round() } #[inline(always)] fn u8(v: u8) -> u8 { v } #[inline(always)] fn u32(v: u32) -> u32 { v } #[inline(always)] fn i64(v: i64) -> i64 { v } } impl UnaryOpT for GeluErf { const NAME: &'static str = "gelu_erf"; const KERNEL: &'static str = "ugelu_erf"; const V: Self = GeluErf; #[inline(always)] fn bf16(v: bf16) -> bf16 { bf16::from_f64(Self::f64(v.to_f64())) } #[inline(always)] fn f16(v: f16) -> f16 { f16::from_f64(Self::f64(v.to_f64())) } #[inline(always)] fn f32(v: f32) -> f32 { Self::f64(v as f64) as f32 } #[inline(always)] fn f64(v: f64) -> f64 { (crate::cpu::erf::erf(v / 2f64.sqrt()) + 1.) * 0.5 * v } #[inline(always)] fn u8(_: u8) -> u8 { 0 } #[inline(always)] fn u32(_: u32) -> u32 { 0 } #[inline(always)] fn i64(_: i64) -> i64 { 0 } } impl UnaryOpT for Relu { const NAME: &'static str = "relu"; const KERNEL: &'static str = "urelu"; const V: Self = Relu; #[inline(always)] fn bf16(v: bf16) -> bf16 { v.max(bf16::ZERO) } #[inline(always)] fn f16(v: f16) -> f16 { v.max(f16::ZERO) } #[inline(always)] fn f32(v: f32) -> f32 { v.max(0f32) } #[inline(always)] fn f64(v: f64) -> f64 { v.max(0f64) } #[inline(always)] fn u8(v: u8) -> u8 { v } #[inline(always)] fn u32(v: u32) -> u32 { v } #[inline(always)] fn i64(v: i64) -> i64 { v } } /// `BackpropOp` is a wrapper around `Option<Op>`. The main goal is to ensure that dependencies are /// properly checked when creating a new value #[derive(Clone)] pub struct BackpropOp(Option<Op>); impl BackpropOp { pub(crate) fn none() -> Self { BackpropOp(None) } pub(crate) fn new1(arg: &Tensor, f: impl Fn(Tensor) -> Op) -> Self { let op = if arg.track_op() { Some(f(arg.clone())) } else { None }; Self(op) } pub(crate) fn new2(arg1: &Tensor, arg2: &Tensor, f: impl Fn(Tensor, Tensor) -> Op) -> Self { let op = if arg1.track_op() || arg2.track_op() { Some(f(arg1.clone(), arg2.clone())) } else { None }; Self(op) } pub(crate) fn new3( arg1: &Tensor, arg2: &Tensor, arg3: &Tensor, f: impl Fn(Tensor, Tensor, Tensor) -> Op, ) -> Self { let op = if arg1.track_op() || arg2.track_op() || arg3.track_op() { Some(f(arg1.clone(), arg2.clone(), arg3.clone())) } else { None }; Self(op) } pub(crate) fn new<A: AsRef<Tensor>>(args: &[A], f: impl Fn(Vec<Tensor>) -> Op) -> Self { let op = if args.iter().any(|arg| arg.as_ref().track_op()) { let args: Vec<Tensor> = args.iter().map(|arg| arg.as_ref().clone()).collect(); Some(f(args)) } else { None }; Self(op) } pub(crate) fn is_none(&self) -> bool { self.0.is_none() } } impl std::ops::Deref for BackpropOp { type Target = Option<Op>; fn deref(&self) -> &Self::Target { &self.0 } } impl UnaryOpT for Sign { const NAME: &'static str = "sign"; const KERNEL: &'static str = "usign"; const V: Self = Sign; #[inline(always)] fn bf16(v: bf16) -> bf16 { bf16::from((v > bf16::ZERO) as i8) - bf16::from((v < bf16::ZERO) as i8) } #[inline(always)] fn f16(v: f16) -> f16 { f16::from((v > f16::ZERO) as i8) - f16::from((v < f16::ZERO) as i8) } #[inline(always)] fn f32(v: f32) -> f32 { f32::from(v > 0.) - f32::from(v < 0.) } #[inline(always)] fn f64(v: f64) -> f64 { f64::from(v > 0.) - f64::from(v < 0.) } #[inline(always)] fn u8(v: u8) -> u8 { u8::min(1, v) } #[inline(always)] fn u32(v: u32) -> u32 { u32::min(1, v) } #[inline(always)] fn i64(v: i64) -> i64 { (v > 0) as i64 - (v < 0) as i64 } }
candle/candle-core/src/op.rs/0
{ "file_path": "candle/candle-core/src/op.rs", "repo_id": "candle", "token_count": 13498 }
18
//! The shape of a tensor is a tuple with the size of each of its dimensions. #![allow(clippy::redundant_closure_call)] use crate::{Error, Result}; #[derive(Clone, PartialEq, Eq)] pub struct Shape(Vec<usize>); pub const SCALAR: Shape = Shape(vec![]); impl std::fmt::Debug for Shape { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "{:?}", &self.dims()) } } impl<const C: usize> From<&[usize; C]> for Shape { fn from(dims: &[usize; C]) -> Self { Self(dims.to_vec()) } } impl From<&[usize]> for Shape { fn from(dims: &[usize]) -> Self { Self(dims.to_vec()) } } impl From<&Shape> for Shape { fn from(shape: &Shape) -> Self { Self(shape.0.to_vec()) } } impl From<()> for Shape { fn from(_: ()) -> Self { Self(vec![]) } } impl From<usize> for Shape { fn from(d1: usize) -> Self { Self(vec![d1]) } } impl From<(usize,)> for Shape { fn from(d1: (usize,)) -> Self { Self(vec![d1.0]) } } impl From<(usize, usize)> for Shape { fn from(d12: (usize, usize)) -> Self { Self(vec![d12.0, d12.1]) } } impl From<(usize, usize, usize)> for Shape { fn from(d123: (usize, usize, usize)) -> Self { Self(vec![d123.0, d123.1, d123.2]) } } impl From<(usize, usize, usize, usize)> for Shape { fn from(d1234: (usize, usize, usize, usize)) -> Self { Self(vec![d1234.0, d1234.1, d1234.2, d1234.3]) } } impl From<(usize, usize, usize, usize, usize)> for Shape { fn from(d12345: (usize, usize, usize, usize, usize)) -> Self { Self(vec![d12345.0, d12345.1, d12345.2, d12345.3, d12345.4]) } } impl From<(usize, usize, usize, usize, usize, usize)> for Shape { fn from(d123456: (usize, usize, usize, usize, usize, usize)) -> Self { Self(vec![ d123456.0, d123456.1, d123456.2, d123456.3, d123456.4, d123456.5, ]) } } impl From<Vec<usize>> for Shape { fn from(dims: Vec<usize>) -> Self { Self(dims) } } macro_rules! extract_dims { ($fn_name:ident, $cnt:tt, $dims:expr, $out_type:ty) => { pub fn $fn_name(dims: &[usize]) -> Result<$out_type> { if dims.len() != $cnt { Err(Error::UnexpectedNumberOfDims { expected: $cnt, got: dims.len(), shape: Shape::from(dims), } .bt()) } else { Ok($dims(dims)) } } impl Shape { pub fn $fn_name(&self) -> Result<$out_type> { $fn_name(self.0.as_slice()) } } impl crate::Tensor { pub fn $fn_name(&self) -> Result<$out_type> { self.shape().$fn_name() } } impl std::convert::TryInto<$out_type> for Shape { type Error = crate::Error; fn try_into(self) -> std::result::Result<$out_type, Self::Error> { self.$fn_name() } } }; } impl Shape { pub fn from_dims(dims: &[usize]) -> Self { Self(dims.to_vec()) } /// The rank is the number of dimensions, 0 for a scalar value, 1 for a vector, etc. pub fn rank(&self) -> usize { self.0.len() } pub fn into_dims(self) -> Vec<usize> { self.0 } /// The dimensions as a slice of `usize`. pub fn dims(&self) -> &[usize] { &self.0 } /// The total number of elements, this is the product of all dimension sizes. pub fn elem_count(&self) -> usize { self.0.iter().product() } /// The strides given in number of elements for a contiguous n-dimensional /// arrays using this shape. pub(crate) fn stride_contiguous(&self) -> Vec<usize> { let mut stride: Vec<_> = self .0 .iter() .rev() .scan(1, |prod, u| { let prod_pre_mult = *prod; *prod *= u; Some(prod_pre_mult) }) .collect(); stride.reverse(); stride } /// Returns true if the strides are C contiguous (aka row major). pub fn is_contiguous(&self, stride: &[usize]) -> bool { if self.0.len() != stride.len() { return false; } let mut acc = 1; for (&stride, &dim) in stride.iter().zip(self.0.iter()).rev() { if dim > 1 && stride != acc { return false; } acc *= dim; } true } /// Returns true if the strides are Fortran contiguous (aka column major). pub fn is_fortran_contiguous(&self, stride: &[usize]) -> bool { if self.0.len() != stride.len() { return false; } let mut acc = 1; for (&stride, &dim) in stride.iter().zip(self.0.iter()) { if dim > 1 && stride != acc { return false; } acc *= dim; } true } /// Modifies the shape by adding a list of additional dimensions at the end of the existing /// dimensions. pub fn extend(mut self, additional_dims: &[usize]) -> Self { self.0.extend(additional_dims); self } /// Check whether the two shapes are compatible for broadcast, and if it is the case return the /// broadcasted shape. This is to be used for binary pointwise ops. pub fn broadcast_shape_binary_op(&self, rhs: &Self, op: &'static str) -> Result<Shape> { let lhs = self; let lhs_dims = lhs.dims(); let rhs_dims = rhs.dims(); let lhs_ndims = lhs_dims.len(); let rhs_ndims = rhs_dims.len(); let bcast_ndims = usize::max(lhs_ndims, rhs_ndims); let mut bcast_dims = vec![0; bcast_ndims]; for (idx, bcast_value) in bcast_dims.iter_mut().enumerate() { let rev_idx = bcast_ndims - idx; let l_value = if lhs_ndims < rev_idx { 1 } else { lhs_dims[lhs_ndims - rev_idx] }; let r_value = if rhs_ndims < rev_idx { 1 } else { rhs_dims[rhs_ndims - rev_idx] }; *bcast_value = if l_value == r_value { l_value } else if l_value == 1 { r_value } else if r_value == 1 { l_value } else { Err(Error::ShapeMismatchBinaryOp { lhs: lhs.clone(), rhs: rhs.clone(), op, } .bt())? } } Ok(Shape::from(bcast_dims)) } pub(crate) fn broadcast_shape_matmul(&self, rhs: &Self) -> Result<(Shape, Shape)> { let lhs = self; let lhs_dims = lhs.dims(); let rhs_dims = rhs.dims(); if lhs_dims.len() < 2 || rhs_dims.len() < 2 { crate::bail!("only 2d matrixes are supported {lhs:?} {rhs:?}") } let (m, lhs_k) = (lhs_dims[lhs_dims.len() - 2], lhs_dims[lhs_dims.len() - 1]); let (rhs_k, n) = (rhs_dims[rhs_dims.len() - 2], rhs_dims[rhs_dims.len() - 1]); if lhs_k != rhs_k { crate::bail!("different inner dimensions in broadcast matmul {lhs:?} {rhs:?}") } let lhs_b = Self::from(&lhs_dims[..lhs_dims.len() - 2]); let rhs_b = Self::from(&rhs_dims[..rhs_dims.len() - 2]); let bcast = lhs_b.broadcast_shape_binary_op(&rhs_b, "broadcast_matmul")?; let bcast_dims = bcast.dims(); let bcast_lhs = [bcast_dims, &[m, lhs_k]].concat(); let bcast_rhs = [bcast_dims, &[rhs_k, n]].concat(); Ok((Shape::from(bcast_lhs), Shape::from(bcast_rhs))) } } pub trait Dim { fn to_index(&self, shape: &Shape, op: &'static str) -> Result<usize>; fn to_index_plus_one(&self, shape: &Shape, op: &'static str) -> Result<usize>; } impl Dim for usize { fn to_index(&self, shape: &Shape, op: &'static str) -> Result<usize> { let dim = *self; if dim >= shape.dims().len() { Err(Error::DimOutOfRange { shape: shape.clone(), dim: dim as i32, op, } .bt())? } else { Ok(dim) } } fn to_index_plus_one(&self, shape: &Shape, op: &'static str) -> Result<usize> { let dim = *self; if dim > shape.dims().len() { Err(Error::DimOutOfRange { shape: shape.clone(), dim: dim as i32, op, } .bt())? } else { Ok(dim) } } } #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] pub enum D { Minus1, Minus2, Minus(usize), } impl D { fn out_of_range(&self, shape: &Shape, op: &'static str) -> Error { let dim = match self { Self::Minus1 => -1, Self::Minus2 => -2, Self::Minus(u) => -(*u as i32), }; Error::DimOutOfRange { shape: shape.clone(), dim, op, } .bt() } } impl Dim for D { fn to_index(&self, shape: &Shape, op: &'static str) -> Result<usize> { let rank = shape.rank(); match self { Self::Minus1 if rank >= 1 => Ok(rank - 1), Self::Minus2 if rank >= 2 => Ok(rank - 2), Self::Minus(u) if *u > 0 && rank >= *u => Ok(rank - *u), _ => Err(self.out_of_range(shape, op)), } } fn to_index_plus_one(&self, shape: &Shape, op: &'static str) -> Result<usize> { let rank = shape.rank(); match self { Self::Minus1 => Ok(rank), Self::Minus2 if rank >= 1 => Ok(rank - 1), Self::Minus(u) if *u > 0 && rank + 1 >= *u => Ok(rank + 1 - *u), _ => Err(self.out_of_range(shape, op)), } } } pub trait Dims: Sized { fn to_indexes_internal(self, shape: &Shape, op: &'static str) -> Result<Vec<usize>>; fn to_indexes(self, shape: &Shape, op: &'static str) -> Result<Vec<usize>> { let dims = self.to_indexes_internal(shape, op)?; for (i, &dim) in dims.iter().enumerate() { if dims[..i].contains(&dim) { Err(Error::DuplicateDimIndex { shape: shape.clone(), dims: dims.clone(), op, } .bt())? } if dim >= shape.rank() { Err(Error::DimOutOfRange { shape: shape.clone(), dim: dim as i32, op, } .bt())? } } Ok(dims) } } impl Dims for Vec<usize> { fn to_indexes_internal(self, _: &Shape, _: &'static str) -> Result<Vec<usize>> { Ok(self) } } impl<const N: usize> Dims for [usize; N] { fn to_indexes_internal(self, _: &Shape, _: &'static str) -> Result<Vec<usize>> { Ok(self.to_vec()) } } impl Dims for &[usize] { fn to_indexes_internal(self, _: &Shape, _: &'static str) -> Result<Vec<usize>> { Ok(self.to_vec()) } } impl Dims for () { fn to_indexes_internal(self, _: &Shape, _: &'static str) -> Result<Vec<usize>> { Ok(vec![]) } } impl<D: Dim + Sized> Dims for D { fn to_indexes_internal(self, shape: &Shape, op: &'static str) -> Result<Vec<usize>> { let dim = self.to_index(shape, op)?; Ok(vec![dim]) } } impl<D: Dim> Dims for (D,) { fn to_indexes_internal(self, shape: &Shape, op: &'static str) -> Result<Vec<usize>> { let dim = self.0.to_index(shape, op)?; Ok(vec![dim]) } } impl<D1: Dim, D2: Dim> Dims for (D1, D2) { fn to_indexes_internal(self, shape: &Shape, op: &'static str) -> Result<Vec<usize>> { let d0 = self.0.to_index(shape, op)?; let d1 = self.1.to_index(shape, op)?; Ok(vec![d0, d1]) } } impl<D1: Dim, D2: Dim, D3: Dim> Dims for (D1, D2, D3) { fn to_indexes_internal(self, shape: &Shape, op: &'static str) -> Result<Vec<usize>> { let d0 = self.0.to_index(shape, op)?; let d1 = self.1.to_index(shape, op)?; let d2 = self.2.to_index(shape, op)?; Ok(vec![d0, d1, d2]) } } impl<D1: Dim, D2: Dim, D3: Dim, D4: Dim> Dims for (D1, D2, D3, D4) { fn to_indexes_internal(self, shape: &Shape, op: &'static str) -> Result<Vec<usize>> { let d0 = self.0.to_index(shape, op)?; let d1 = self.1.to_index(shape, op)?; let d2 = self.2.to_index(shape, op)?; let d3 = self.3.to_index(shape, op)?; Ok(vec![d0, d1, d2, d3]) } } impl<D1: Dim, D2: Dim, D3: Dim, D4: Dim, D5: Dim> Dims for (D1, D2, D3, D4, D5) { fn to_indexes_internal(self, shape: &Shape, op: &'static str) -> Result<Vec<usize>> { let d0 = self.0.to_index(shape, op)?; let d1 = self.1.to_index(shape, op)?; let d2 = self.2.to_index(shape, op)?; let d3 = self.3.to_index(shape, op)?; let d4 = self.4.to_index(shape, op)?; Ok(vec![d0, d1, d2, d3, d4]) } } impl<D1: Dim, D2: Dim, D3: Dim, D4: Dim, D5: Dim, D6: Dim> Dims for (D1, D2, D3, D4, D5, D6) { fn to_indexes_internal(self, shape: &Shape, op: &'static str) -> Result<Vec<usize>> { let d0 = self.0.to_index(shape, op)?; let d1 = self.1.to_index(shape, op)?; let d2 = self.2.to_index(shape, op)?; let d3 = self.3.to_index(shape, op)?; let d4 = self.4.to_index(shape, op)?; let d5 = self.5.to_index(shape, op)?; Ok(vec![d0, d1, d2, d3, d4, d5]) } } extract_dims!(dims0, 0, |_: &[usize]| (), ()); extract_dims!(dims1, 1, |d: &[usize]| d[0], usize); extract_dims!(dims2, 2, |d: &[usize]| (d[0], d[1]), (usize, usize)); extract_dims!( dims3, 3, |d: &[usize]| (d[0], d[1], d[2]), (usize, usize, usize) ); extract_dims!( dims4, 4, |d: &[usize]| (d[0], d[1], d[2], d[3]), (usize, usize, usize, usize) ); extract_dims!( dims5, 5, |d: &[usize]| (d[0], d[1], d[2], d[3], d[4]), (usize, usize, usize, usize, usize) ); pub trait ShapeWithOneHole { fn into_shape(self, el_count: usize) -> Result<Shape>; } impl<S: Into<Shape>> ShapeWithOneHole for S { fn into_shape(self, _el_count: usize) -> Result<Shape> { Ok(self.into()) } } impl ShapeWithOneHole for ((),) { fn into_shape(self, el_count: usize) -> Result<Shape> { Ok(el_count.into()) } } fn hole_size(el_count: usize, prod_d: usize, s: &dyn std::fmt::Debug) -> Result<usize> { if prod_d == 0 { crate::bail!("cannot reshape tensor of {el_count} elements to {s:?}") } if el_count % prod_d != 0 { crate::bail!("cannot reshape tensor with {el_count} elements to {s:?}") } Ok(el_count / prod_d) } impl ShapeWithOneHole for ((), usize) { fn into_shape(self, el_count: usize) -> Result<Shape> { let ((), d1) = self; Ok((hole_size(el_count, d1, &self)?, d1).into()) } } impl ShapeWithOneHole for (usize, ()) { fn into_shape(self, el_count: usize) -> Result<Shape> { let (d1, ()) = self; Ok((d1, hole_size(el_count, d1, &self)?).into()) } } impl ShapeWithOneHole for ((), usize, usize) { fn into_shape(self, el_count: usize) -> Result<Shape> { let ((), d1, d2) = self; Ok((hole_size(el_count, d1 * d2, &self)?, d1, d2).into()) } } impl ShapeWithOneHole for (usize, (), usize) { fn into_shape(self, el_count: usize) -> Result<Shape> { let (d1, (), d2) = self; Ok((d1, hole_size(el_count, d1 * d2, &self)?, d2).into()) } } impl ShapeWithOneHole for (usize, usize, ()) { fn into_shape(self, el_count: usize) -> Result<Shape> { let (d1, d2, ()) = self; Ok((d1, d2, hole_size(el_count, d1 * d2, &self)?).into()) } } impl ShapeWithOneHole for ((), usize, usize, usize) { fn into_shape(self, el_count: usize) -> Result<Shape> { let ((), d1, d2, d3) = self; let d = hole_size(el_count, d1 * d2 * d3, &self)?; Ok((d, d1, d2, d3).into()) } } impl ShapeWithOneHole for (usize, (), usize, usize) { fn into_shape(self, el_count: usize) -> Result<Shape> { let (d1, (), d2, d3) = self; let d = hole_size(el_count, d1 * d2 * d3, &self)?; Ok((d1, d, d2, d3).into()) } } impl ShapeWithOneHole for (usize, usize, (), usize) { fn into_shape(self, el_count: usize) -> Result<Shape> { let (d1, d2, (), d3) = self; let d = hole_size(el_count, d1 * d2 * d3, &self)?; Ok((d1, d2, d, d3).into()) } } impl ShapeWithOneHole for (usize, usize, usize, ()) { fn into_shape(self, el_count: usize) -> Result<Shape> { let (d1, d2, d3, ()) = self; let d = hole_size(el_count, d1 * d2 * d3, &self)?; Ok((d1, d2, d3, d).into()) } } impl ShapeWithOneHole for ((), usize, usize, usize, usize) { fn into_shape(self, el_count: usize) -> Result<Shape> { let ((), d1, d2, d3, d4) = self; let d = hole_size(el_count, d1 * d2 * d3 * d4, &self)?; Ok((d, d1, d2, d3, d4).into()) } } impl ShapeWithOneHole for (usize, (), usize, usize, usize) { fn into_shape(self, el_count: usize) -> Result<Shape> { let (d1, (), d2, d3, d4) = self; let d = hole_size(el_count, d1 * d2 * d3 * d4, &self)?; Ok((d1, d, d2, d3, d4).into()) } } impl ShapeWithOneHole for (usize, usize, (), usize, usize) { fn into_shape(self, el_count: usize) -> Result<Shape> { let (d1, d2, (), d3, d4) = self; let d = hole_size(el_count, d1 * d2 * d3 * d4, &self)?; Ok((d1, d2, d, d3, d4).into()) } } impl ShapeWithOneHole for (usize, usize, usize, (), usize) { fn into_shape(self, el_count: usize) -> Result<Shape> { let (d1, d2, d3, (), d4) = self; let d = hole_size(el_count, d1 * d2 * d3 * d4, &self)?; Ok((d1, d2, d3, d, d4).into()) } } impl ShapeWithOneHole for (usize, usize, usize, usize, ()) { fn into_shape(self, el_count: usize) -> Result<Shape> { let (d1, d2, d3, d4, ()) = self; let d = hole_size(el_count, d1 * d2 * d3 * d4, &self)?; Ok((d1, d2, d3, d4, d).into()) } } #[cfg(test)] mod tests { use super::*; #[test] fn stride() { let shape = Shape::from(()); assert_eq!(shape.stride_contiguous(), Vec::<usize>::new()); let shape = Shape::from(42); assert_eq!(shape.stride_contiguous(), [1]); let shape = Shape::from((42, 1337)); assert_eq!(shape.stride_contiguous(), [1337, 1]); let shape = Shape::from((299, 792, 458)); assert_eq!(shape.stride_contiguous(), [458 * 792, 458, 1]); } }
candle/candle-core/src/shape.rs/0
{ "file_path": "candle/candle-core/src/shape.rs", "repo_id": "candle", "token_count": 9929 }
19
use candle::{test_device, Device, IndexOp, Result, Tensor}; use candle_core as candle; fn contiguous(device: &Device) -> Result<()> { let tensor = Tensor::arange(0u32, 24u32, device)?.reshape((2, 3, 4))?; assert_eq!( tensor.to_vec3::<u32>()?, &[ [[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11]], [[12, 13, 14, 15], [16, 17, 18, 19], [20, 21, 22, 23]] ] ); assert_eq!( tensor.t()?.contiguous()?.to_vec3::<u32>()?, &[ [[0, 4, 8], [1, 5, 9], [2, 6, 10], [3, 7, 11]], [[12, 16, 20], [13, 17, 21], [14, 18, 22], [15, 19, 23]] ] ); assert_eq!( tensor.transpose(0, 1)?.contiguous()?.to_vec3::<u32>()?, &[ [[0, 1, 2, 3], [12, 13, 14, 15]], [[4, 5, 6, 7], [16, 17, 18, 19]], [[8, 9, 10, 11], [20, 21, 22, 23]] ] ); assert_eq!( tensor.transpose(0, 1)?.flatten_all()?.to_vec1::<u32>()?, &[0, 1, 2, 3, 12, 13, 14, 15, 4, 5, 6, 7, 16, 17, 18, 19, 8, 9, 10, 11, 20, 21, 22, 23] ); assert_eq!( tensor .i(1..)? .transpose(0, 1)? .contiguous()? .to_vec3::<u32>()?, &[[[12, 13, 14, 15]], [[16, 17, 18, 19]], [[20, 21, 22, 23]]] ); assert_eq!( tensor.transpose(0, 2)?.contiguous()?.to_vec3::<u32>()?, &[ [[0, 12], [4, 16], [8, 20]], [[1, 13], [5, 17], [9, 21]], [[2, 14], [6, 18], [10, 22]], [[3, 15], [7, 19], [11, 23]] ] ); Ok(()) } test_device!(contiguous, contiguous_cpu, contiguous_gpu, contiguous_metal); #[test] fn strided_blocks() -> Result<()> { use candle::Device::Cpu; let tensor = Tensor::arange(0u32, 24u32, &Cpu)?.reshape((2, 3, 4))?; match tensor.strided_blocks() { candle::StridedBlocks::SingleBlock { start_offset, len } => { assert_eq!(start_offset, 0); assert_eq!(len, 24); } candle::StridedBlocks::MultipleBlocks { .. } => { panic!("unexpected block structure") } }; let tensor = Tensor::arange(0u32, 26u32, &Cpu)? .i(2..)? .reshape((2, 3, 4))?; match tensor.strided_blocks() { candle::StridedBlocks::SingleBlock { start_offset, len } => { assert_eq!(start_offset, 2); assert_eq!(len, 24); } candle::StridedBlocks::MultipleBlocks { .. } => { panic!("unexpected block structure") } }; let tensor = Tensor::arange(0u32, 24u32, &Cpu)?.reshape((2, 3, 4))?; let tensor = tensor.i(1)?; match tensor.strided_blocks() { candle::StridedBlocks::SingleBlock { start_offset, len } => { assert_eq!(start_offset, 12); assert_eq!(len, 12); } candle::StridedBlocks::MultipleBlocks { .. } => { panic!("unexpected block structure") } }; let tensor = Tensor::arange(0u32, 24u32, &Cpu)?.reshape((2, 3, 4))?; let tensor = tensor.i((.., 1))?.contiguous()?; match tensor.strided_blocks() { candle::StridedBlocks::SingleBlock { start_offset, len } => { assert_eq!(start_offset, 0); assert_eq!(len, 8); assert_eq!(tensor.to_vec2::<u32>()?, &[[4, 5, 6, 7], [16, 17, 18, 19]]); } candle::StridedBlocks::MultipleBlocks { .. } => { panic!("unexpected block structure") } }; let tensor = Tensor::arange(0u32, 24u32, &Cpu)?.reshape((2, 3, 4))?; let tensor = tensor.i((.., 1))?; match tensor.strided_blocks() { candle::StridedBlocks::SingleBlock { .. } => { panic!("unexpected block structure") } candle::StridedBlocks::MultipleBlocks { block_len, block_start_index, } => { assert_eq!(block_len, 4); assert_eq!(block_start_index.collect::<Vec<_>>(), &[4, 16]) } }; let tensor = Tensor::arange(0u32, 24u32, &Cpu)?.reshape((2, 3, 4))?; match tensor.t()?.strided_blocks() { candle::StridedBlocks::SingleBlock { .. } => { panic!("unexpected block structure") } candle::StridedBlocks::MultipleBlocks { block_start_index, block_len, } => { assert_eq!(block_len, 1); assert_eq!( block_start_index.collect::<Vec<_>>(), &[ 0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11, 12, 16, 20, 13, 17, 21, 14, 18, 22, 15, 19, 23 ] ) } }; let tensor = Tensor::arange(0u32, 24u32, &Cpu)?.reshape((2, 3, 4))?; match tensor.transpose(0, 1)?.strided_blocks() { candle::StridedBlocks::SingleBlock { .. } => { panic!("unexpected block structure") } candle::StridedBlocks::MultipleBlocks { block_start_index, block_len, } => { assert_eq!(block_len, 4); assert_eq!( block_start_index.collect::<Vec<_>>(), &[0, 12, 4, 16, 8, 20] ) } }; Ok(()) }
candle/candle-core/tests/layout_tests.rs/0
{ "file_path": "candle/candle-core/tests/layout_tests.rs", "repo_id": "candle", "token_count": 2819 }
20
use hf_hub::{ api::sync::{Api, ApiRepo}, Repo, RepoType, }; use parquet::file::reader::SerializedFileReader; use std::fs::File; #[derive(thiserror::Error, Debug)] pub enum Error { #[error("ApiError : {0}")] ApiError(#[from] hf_hub::api::sync::ApiError), #[error("IoError : {0}")] IoError(#[from] std::io::Error), #[error("ParquetError : {0}")] ParquetError(#[from] parquet::errors::ParquetError), } fn sibling_to_parquet( rfilename: &str, repo: &ApiRepo, ) -> Result<SerializedFileReader<File>, Error> { let local = repo.get(rfilename)?; let file = File::open(local)?; let reader = SerializedFileReader::new(file)?; Ok(reader) } pub fn from_hub(api: &Api, dataset_id: String) -> Result<Vec<SerializedFileReader<File>>, Error> { let repo = Repo::with_revision( dataset_id, RepoType::Dataset, "refs/convert/parquet".to_string(), ); let repo = api.repo(repo); let info = repo.info()?; let files: Result<Vec<_>, _> = info .siblings .into_iter() .filter_map(|s| -> Option<Result<_, _>> { let filename = s.rfilename; if filename.ends_with(".parquet") { let reader_result = sibling_to_parquet(&filename, &repo); Some(reader_result) } else { None } }) .collect(); let files = files?; Ok(files) } #[cfg(test)] mod tests { use super::*; use parquet::file::reader::FileReader; #[test] fn test_dataset() { let api = Api::new().unwrap(); let files = from_hub( &api, "hf-internal-testing/dummy_image_text_data".to_string(), ) .unwrap(); assert_eq!(files.len(), 1); assert_eq!(files[0].metadata().file_metadata().num_rows(), 20); } }
candle/candle-datasets/src/hub.rs/0
{ "file_path": "candle/candle-datasets/src/hub.rs", "repo_id": "candle", "token_count": 900 }
21
# candle-starcoder: code generation model [StarCoder/BigCode](https://huggingface.co/bigcode/starcoderbase-1b) is a LLM model specialized to code generation. The initial model was trained on 80 programming languages. ## Running some example ```bash cargo run --example bigcode --release -- --prompt "fn fact(n: u64) -> u64 " > fn fact(n: u64) -> u64 { > if n == 0 { > 1 > } else { > n * fact(n - 1) > } > } ```
candle/candle-examples/examples/bigcode/README.md/0
{ "file_path": "candle/candle-examples/examples/bigcode/README.md", "repo_id": "candle", "token_count": 180 }
22
# candle-dinov2 [Depth Anything V2] is a model for Monocular Depth Estimation (MDE, i.e. just using a single image) which builds on the [DINOv2](https://github.com/facebookresearch/dinov2) vision transformer. This example first instantiates the DINOv2 model and then proceeds to create DepthAnythingV2 and run it. ## Running an example with color map and CUDA ```bash cargo run --features cuda,depth_anything_v2 --package candle-examples --example depth_anything_v2 -- --color-map --image candle-examples/examples/yolo-v8/assets/bike.jpg ```
candle/candle-examples/examples/depth_anything_v2/README.md/0
{ "file_path": "candle/candle-examples/examples/depth_anything_v2/README.md", "repo_id": "candle", "token_count": 168 }
23
# hiera [Hiera: A Hierarchical Vision Transformer without the Bells-and-Whistles](https://arxiv.org/abs/2306.00989) This candle implementation uses pre-trained Hiera models from timm for inference. The classification head has been trained on the ImageNet dataset and returns the probabilities for the top-5 classes. ## Running an example ``` $ cargo run --example hiera --release -- --image candle-examples/examples/yolo-v8/assets/bike.jpg --which tiny loaded image Tensor[dims 3, 224, 224; f32] model built mountain bike, all-terrain bike, off-roader: 71.15% unicycle, monocycle : 7.11% knee pad : 4.26% crash helmet : 1.48% moped : 1.07% ```
candle/candle-examples/examples/hiera/README.md/0
{ "file_path": "candle/candle-examples/examples/hiera/README.md", "repo_id": "candle", "token_count": 260 }
24
/// This follows the lines of: /// https://github.com/johnma2006/mamba-minimal/blob/master/model.py /// Simple, minimal implementation of Mamba in one file of PyTorch. use candle::{IndexOp, Module, Result, Tensor, D}; use candle_nn::{RmsNorm, VarBuilder}; use candle_transformers::models::with_tracing::{linear, linear_no_bias, Linear}; #[derive(Debug, Clone, serde::Deserialize)] pub struct Config { d_model: usize, n_layer: usize, vocab_size: usize, pad_vocab_size_multiple: usize, } impl Config { fn vocab_size(&self) -> usize { let pad = self.pad_vocab_size_multiple; (self.vocab_size + pad - 1) / pad * pad } fn dt_rank(&self) -> usize { (self.d_model + 15) / 16 } fn d_conv(&self) -> usize { 4 } fn d_state(&self) -> usize { 16 } fn d_inner(&self) -> usize { self.d_model * 2 } } // https://github.com/johnma2006/mamba-minimal/blob/61f01953ca153f8c4a850d7111beecbf4be9cee1/model.py#L177 #[derive(Clone, Debug)] pub struct MambaBlock { in_proj: Linear, conv1d: candle_nn::Conv1d, x_proj: Linear, dt_proj: Linear, a_log: Tensor, d: Tensor, out_proj: Linear, dt_rank: usize, } impl MambaBlock { pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let d_inner = cfg.d_inner(); let d_conv = cfg.d_conv(); let d_state = cfg.d_state(); let dt_rank = cfg.dt_rank(); let in_proj = linear_no_bias(cfg.d_model, d_inner * 2, vb.pp("in_proj"))?; let conv_cfg = candle_nn::Conv1dConfig { groups: d_inner, padding: d_conv - 1, ..Default::default() }; let conv1d = candle_nn::conv1d(d_inner, d_inner, d_conv, conv_cfg, vb.pp("conv1d"))?; let x_proj = linear_no_bias(d_inner, dt_rank + d_state * 2, vb.pp("x_proj"))?; let dt_proj = linear(dt_rank, d_inner, vb.pp("dt_proj"))?; let a_log = vb.get((d_inner, d_state), "A_log")?; let d = vb.get(d_inner, "D")?; let out_proj = linear_no_bias(d_inner, cfg.d_model, vb.pp("out_proj"))?; Ok(Self { in_proj, conv1d, x_proj, dt_proj, a_log, d, out_proj, dt_rank, }) } fn ssm(&self, xs: &Tensor) -> Result<Tensor> { let (_d_in, n) = self.a_log.dims2()?; let a = self.a_log.to_dtype(candle::DType::F32)?.exp()?.neg()?; let d = self.d.to_dtype(candle::DType::F32)?; let x_dbl = xs.apply(&self.x_proj)?; let delta = x_dbl.narrow(D::Minus1, 0, self.dt_rank)?; let b = x_dbl.narrow(D::Minus1, self.dt_rank, n)?; let c = x_dbl.narrow(D::Minus1, self.dt_rank + n, n)?; let delta = delta.contiguous()?.apply(&self.dt_proj)?; // softplus without threshold let delta = (delta.exp()? + 1.)?.log()?; let ss = selective_scan(xs, &delta, &a, &b, &c, &d)?; Ok(ss) } } // https://github.com/johnma2006/mamba-minimal/blob/61f01953ca153f8c4a850d7111beecbf4be9cee1/model.py#L275 fn selective_scan( u: &Tensor, delta: &Tensor, a: &Tensor, b: &Tensor, c: &Tensor, d: &Tensor, ) -> Result<Tensor> { let (b_sz, l, d_in) = u.dims3()?; let n = a.dim(1)?; let delta = delta.t()?.reshape((b_sz, d_in, l, 1))?; // b d_in l 1 let delta_a = delta.broadcast_mul(&a.reshape((1, d_in, 1, n))?)?.exp()?; let delta_b_u = delta .broadcast_mul(&b.reshape((b_sz, 1, l, n))?)? .broadcast_mul(&u.t()?.reshape((b_sz, d_in, l, 1))?)?; let mut xs = Tensor::zeros((b_sz, d_in, n), delta_a.dtype(), delta_a.device())?; let mut ys = Vec::with_capacity(l); for i in 0..l { xs = ((delta_a.i((.., .., i))? * xs)? + delta_b_u.i((.., .., i))?)?; let y = xs.matmul(&c.i((.., i, ..))?.unsqueeze(2)?)?.squeeze(2)?; ys.push(y) } let ys = Tensor::stack(ys.as_slice(), 1)?; ys + u.broadcast_mul(d) } impl Module for MambaBlock { // https://github.com/johnma2006/mamba-minimal/blob/61f01953ca153f8c4a850d7111beecbf4be9cee1/model.py#L206 fn forward(&self, xs: &Tensor) -> Result<Tensor> { let (_b_sz, seq_len, _dim) = xs.dims3()?; let xs_and_res = xs.apply(&self.in_proj)?.chunk(2, D::Minus1)?; let (xs, res) = (&xs_and_res[0], &xs_and_res[1]); let xs = xs .t()? .apply(&self.conv1d)? .narrow(D::Minus1, 0, seq_len)? .t()?; let xs = candle_nn::ops::silu(&xs)?; let ys = (self.ssm(&xs)? * candle_nn::ops::silu(res))?; ys.apply(&self.out_proj) } } // https://github.com/johnma2006/mamba-minimal/blob/61f01953ca153f8c4a850d7111beecbf4be9cee1/model.py#L143 #[derive(Clone, Debug)] pub struct ResidualBlock { mixer: MambaBlock, norm: RmsNorm, } impl ResidualBlock { pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let norm = candle_nn::rms_norm(cfg.d_model, 1e-5, vb.pp("norm"))?; let mixer = MambaBlock::new(cfg, vb.pp("mixer"))?; Ok(Self { mixer, norm }) } } impl Module for ResidualBlock { fn forward(&self, xs: &Tensor) -> Result<Tensor> { xs.apply(&self.norm)?.apply(&self.mixer)? + xs } } // https://github.com/johnma2006/mamba-minimal/blob/61f01953ca153f8c4a850d7111beecbf4be9cee1/model.py#L56 #[derive(Clone, Debug)] pub struct Model { embedding: candle_nn::Embedding, layers: Vec<ResidualBlock>, norm_f: RmsNorm, lm_head: Linear, } impl Model { pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let embedding = candle_nn::embedding(cfg.vocab_size(), cfg.d_model, vb.pp("embedding"))?; let mut layers = Vec::with_capacity(cfg.n_layer); let vb_l = vb.pp("layers"); for layer_idx in 0..cfg.n_layer { let layer = ResidualBlock::new(cfg, vb_l.pp(layer_idx))?; layers.push(layer) } let norm_f = candle_nn::rms_norm(cfg.d_model, 1e-5, vb.pp("norm_f"))?; let lm_head = Linear::from_weights(embedding.embeddings().clone(), None); Ok(Self { embedding, layers, norm_f, lm_head, }) } } impl Module for Model { fn forward(&self, input_ids: &Tensor) -> Result<Tensor> { let (_b_size, seq_len) = input_ids.dims2()?; let mut xs = self.embedding.forward(input_ids)?; for layer in self.layers.iter() { xs = layer.forward(&xs)? } xs.narrow(1, seq_len - 1, 1)? .apply(&self.norm_f)? .apply(&self.lm_head) } }
candle/candle-examples/examples/mamba-minimal/model.rs/0
{ "file_path": "candle/candle-examples/examples/mamba-minimal/model.rs", "repo_id": "candle", "token_count": 3488 }
25
#[cfg(feature = "mkl")] extern crate intel_mkl_src; #[cfg(feature = "accelerate")] extern crate accelerate_src; use clap::{Parser, ValueEnum}; use candle::{DType, IndexOp, D}; use candle_nn::{Module, VarBuilder}; use candle_transformers::models::mobilenetv4; #[derive(Clone, Copy, Debug, ValueEnum)] enum Which { Small, Medium, Large, HybridMedium, HybridLarge, } impl Which { fn model_filename(&self) -> String { let name = match self { Self::Small => "conv_small.e2400_r224", Self::Medium => "conv_medium.e500_r256", Self::HybridMedium => "hybrid_medium.ix_e550_r256", Self::Large => "conv_large.e600_r384", Self::HybridLarge => "hybrid_large.ix_e600_r384", }; format!("timm/mobilenetv4_{}_in1k", name) } fn resolution(&self) -> u32 { match self { Self::Small => 224, Self::Medium => 256, Self::HybridMedium => 256, Self::Large => 384, Self::HybridLarge => 384, } } fn config(&self) -> mobilenetv4::Config { match self { Self::Small => mobilenetv4::Config::small(), Self::Medium => mobilenetv4::Config::medium(), Self::HybridMedium => mobilenetv4::Config::hybrid_medium(), Self::Large => mobilenetv4::Config::large(), Self::HybridLarge => mobilenetv4::Config::hybrid_large(), } } } #[derive(Parser)] struct Args { #[arg(long)] model: Option<String>, #[arg(long)] image: String, /// Run on CPU rather than on GPU. #[arg(long)] cpu: bool, #[arg(value_enum, long, default_value_t=Which::Small)] which: Which, } pub fn main() -> anyhow::Result<()> { let args = Args::parse(); let device = candle_examples::device(args.cpu)?; let image = candle_examples::imagenet::load_image(args.image, args.which.resolution() as usize)? .to_device(&device)?; println!("loaded image {image:?}"); let model_file = match args.model { None => { let model_name = args.which.model_filename(); let api = hf_hub::api::sync::Api::new()?; let api = api.model(model_name); api.get("model.safetensors")? } Some(model) => model.into(), }; let vb = unsafe { VarBuilder::from_mmaped_safetensors(&[model_file], DType::F32, &device)? }; let model = mobilenetv4::mobilenetv4(&args.which.config(), 1000, vb)?; println!("model built"); let logits = model.forward(&image.unsqueeze(0)?)?; let prs = candle_nn::ops::softmax(&logits, D::Minus1)? .i(0)? .to_vec1::<f32>()?; let mut prs = prs.iter().enumerate().collect::<Vec<_>>(); prs.sort_by(|(_, p1), (_, p2)| p2.total_cmp(p1)); for &(category_idx, pr) in prs.iter().take(5) { println!( "{:24}: {:.2}%", candle_examples::imagenet::CLASSES[category_idx], 100. * pr ); } Ok(()) }
candle/candle-examples/examples/mobilenetv4/main.rs/0
{ "file_path": "candle/candle-examples/examples/mobilenetv4/main.rs", "repo_id": "candle", "token_count": 1443 }
26
#[cfg(feature = "mkl")] extern crate intel_mkl_src; #[cfg(feature = "accelerate")] extern crate accelerate_src; use anyhow::{Error as E, Result}; use clap::{Parser, ValueEnum}; use candle_examples::token_output_stream::TokenOutputStream; use candle_transformers::models::mixformer::{Config, MixFormerSequentialForCausalLM as MixFormer}; use candle_transformers::models::phi::{Config as PhiConfig, Model as Phi}; use candle_transformers::models::phi3::{Config as Phi3Config, Model as Phi3}; use candle_transformers::models::quantized_mixformer::MixFormerSequentialForCausalLM as QMixFormer; use candle::{DType, Device, IndexOp, Tensor}; use candle_nn::VarBuilder; use candle_transformers::generation::LogitsProcessor; use hf_hub::{api::sync::Api, Repo, RepoType}; use tokenizers::Tokenizer; enum Model { MixFormer(MixFormer), Phi(Phi), Phi3(Phi3), Quantized(QMixFormer), } struct TextGeneration { model: Model, device: Device, tokenizer: TokenOutputStream, logits_processor: LogitsProcessor, repeat_penalty: f32, repeat_last_n: usize, verbose_prompt: bool, } impl TextGeneration { #[allow(clippy::too_many_arguments)] fn new( model: Model, tokenizer: Tokenizer, seed: u64, temp: Option<f64>, top_p: Option<f64>, repeat_penalty: f32, repeat_last_n: usize, verbose_prompt: bool, device: &Device, ) -> Self { let logits_processor = LogitsProcessor::new(seed, temp, top_p); Self { model, tokenizer: TokenOutputStream::new(tokenizer), logits_processor, repeat_penalty, repeat_last_n, verbose_prompt, device: device.clone(), } } fn run(&mut self, prompt: &str, sample_len: usize) -> Result<()> { use std::io::Write; println!("starting the inference loop"); let tokens = self .tokenizer .tokenizer() .encode(prompt, true) .map_err(E::msg)?; if tokens.is_empty() { anyhow::bail!("Empty prompts are not supported in the phi model.") } if self.verbose_prompt { for (token, id) in tokens.get_tokens().iter().zip(tokens.get_ids().iter()) { let token = token.replace('โ–', " ").replace("<0x0A>", "\n"); println!("{id:7} -> '{token}'"); } } let mut tokens = tokens.get_ids().to_vec(); let mut generated_tokens = 0usize; let eos_token = match self.tokenizer.get_token("<|endoftext|>") { Some(token) => token, None => anyhow::bail!("cannot find the endoftext token"), }; print!("{prompt}"); std::io::stdout().flush()?; let start_gen = std::time::Instant::now(); let mut pos = 0; for index in 0..sample_len { let context_size = if index > 0 { 1 } else { tokens.len() }; let ctxt = &tokens[tokens.len().saturating_sub(context_size)..]; let input = Tensor::new(ctxt, &self.device)?.unsqueeze(0)?; let logits = match &mut self.model { Model::MixFormer(m) => m.forward(&input)?, Model::Phi(m) => m.forward(&input)?, Model::Quantized(m) => m.forward(&input)?, Model::Phi3(m) => m.forward(&input, pos)?.i((.., 0, ..))?, }; let logits = logits.squeeze(0)?.to_dtype(DType::F32)?; let logits = if self.repeat_penalty == 1. { logits } else { let start_at = tokens.len().saturating_sub(self.repeat_last_n); candle_transformers::utils::apply_repeat_penalty( &logits, self.repeat_penalty, &tokens[start_at..], )? }; let next_token = self.logits_processor.sample(&logits)?; tokens.push(next_token); generated_tokens += 1; if next_token == eos_token { if let Some(t) = self.tokenizer.decode_rest()? { print!("{t}"); std::io::stdout().flush()?; } break; } if let Some(t) = self.tokenizer.next_token(next_token)? { print!("{t}"); std::io::stdout().flush()?; } pos += context_size; } let dt = start_gen.elapsed(); println!( "\n{generated_tokens} tokens generated ({:.2} token/s)", generated_tokens as f64 / dt.as_secs_f64(), ); Ok(()) } } #[derive(Clone, Copy, Debug, ValueEnum, PartialEq, Eq)] enum WhichModel { #[value(name = "1")] V1, #[value(name = "1.5")] V1_5, #[value(name = "2")] V2, #[value(name = "3")] V3, #[value(name = "3-medium")] V3Medium, #[value(name = "2-old")] V2Old, PuffinPhiV2, PhiHermes, } #[derive(Parser, Debug)] #[command(author, version, about, long_about = None)] struct Args { /// Run on CPU rather than on GPU. #[arg(long)] cpu: bool, /// Enable tracing (generates a trace-timestamp.json file). #[arg(long)] tracing: bool, /// Display the token for the specified prompt. #[arg(long)] verbose_prompt: bool, #[arg(long)] prompt: Option<String>, #[arg(long)] mmlu_dir: Option<String>, /// The temperature used to generate samples. #[arg(long)] temperature: Option<f64>, /// Nucleus sampling probability cutoff. #[arg(long)] top_p: Option<f64>, /// The seed to use when generating random samples. #[arg(long, default_value_t = 299792458)] seed: u64, /// The length of the sample to generate (in tokens). #[arg(long, short = 'n', default_value_t = 5000)] sample_len: usize, #[arg(long)] model_id: Option<String>, #[arg(long, default_value = "2")] model: WhichModel, #[arg(long)] revision: Option<String>, #[arg(long)] weight_file: Option<String>, #[arg(long)] tokenizer: Option<String>, #[arg(long)] quantized: bool, /// Penalty to be applied for repeating tokens, 1. means no penalty. #[arg(long, default_value_t = 1.1)] repeat_penalty: f32, /// The context size to consider for the repeat penalty. #[arg(long, default_value_t = 64)] repeat_last_n: usize, /// The dtype to be used for running the model, e.g. f32, bf16, or f16. #[arg(long)] dtype: Option<String>, } fn main() -> Result<()> { use tracing_chrome::ChromeLayerBuilder; use tracing_subscriber::prelude::*; let args = Args::parse(); let _guard = if args.tracing { let (chrome_layer, guard) = ChromeLayerBuilder::new().build(); tracing_subscriber::registry().with(chrome_layer).init(); Some(guard) } else { None }; println!( "avx: {}, neon: {}, simd128: {}, f16c: {}", candle::utils::with_avx(), candle::utils::with_neon(), candle::utils::with_simd128(), candle::utils::with_f16c() ); println!( "temp: {:.2} repeat-penalty: {:.2} repeat-last-n: {}", args.temperature.unwrap_or(0.), args.repeat_penalty, args.repeat_last_n ); let start = std::time::Instant::now(); let api = Api::new()?; let model_id = match args.model_id { Some(model_id) => model_id.to_string(), None => { if args.quantized { "lmz/candle-quantized-phi".to_string() } else { match args.model { WhichModel::V1 => "microsoft/phi-1".to_string(), WhichModel::V1_5 => "microsoft/phi-1_5".to_string(), WhichModel::V2 | WhichModel::V2Old => "microsoft/phi-2".to_string(), WhichModel::V3 => "microsoft/Phi-3-mini-4k-instruct".to_string(), WhichModel::V3Medium => "microsoft/Phi-3-medium-4k-instruct".to_string(), WhichModel::PuffinPhiV2 | WhichModel::PhiHermes => { "lmz/candle-quantized-phi".to_string() } } } } }; let revision = match args.revision { Some(rev) => rev.to_string(), None => { if args.quantized { "main".to_string() } else { match args.model { WhichModel::V1 => "refs/pr/8".to_string(), WhichModel::V1_5 => "refs/pr/73".to_string(), WhichModel::V2Old => "834565c23f9b28b96ccbeabe614dd906b6db551a".to_string(), WhichModel::V2 | WhichModel::V3 | WhichModel::V3Medium | WhichModel::PuffinPhiV2 | WhichModel::PhiHermes => "main".to_string(), } } } }; let repo = api.repo(Repo::with_revision(model_id, RepoType::Model, revision)); let tokenizer_filename = match args.tokenizer { Some(file) => std::path::PathBuf::from(file), None => match args.model { WhichModel::V1 | WhichModel::V1_5 | WhichModel::V2 | WhichModel::V2Old | WhichModel::V3 | WhichModel::V3Medium => repo.get("tokenizer.json")?, WhichModel::PuffinPhiV2 | WhichModel::PhiHermes => { repo.get("tokenizer-puffin-phi-v2.json")? } }, }; let filenames = match args.weight_file { Some(weight_file) => vec![std::path::PathBuf::from(weight_file)], None => { if args.quantized { match args.model { WhichModel::V1 => vec![repo.get("model-v1-q4k.gguf")?], WhichModel::V1_5 => vec![repo.get("model-q4k.gguf")?], WhichModel::V2 | WhichModel::V2Old => vec![repo.get("model-v2-q4k.gguf")?], WhichModel::PuffinPhiV2 => vec![repo.get("model-puffin-phi-v2-q4k.gguf")?], WhichModel::PhiHermes => vec![repo.get("model-phi-hermes-1_3B-q4k.gguf")?], WhichModel::V3 | WhichModel::V3Medium => anyhow::bail!( "use the quantized or quantized-phi examples for quantized phi-v3" ), } } else { match args.model { WhichModel::V1 | WhichModel::V1_5 => vec![repo.get("model.safetensors")?], WhichModel::V2 | WhichModel::V2Old | WhichModel::V3 | WhichModel::V3Medium => { candle_examples::hub_load_safetensors( &repo, "model.safetensors.index.json", )? } WhichModel::PuffinPhiV2 => vec![repo.get("model-puffin-phi-v2.safetensors")?], WhichModel::PhiHermes => vec![repo.get("model-phi-hermes-1_3B.safetensors")?], } } } }; println!("retrieved the files in {:?}", start.elapsed()); let tokenizer = Tokenizer::from_file(tokenizer_filename).map_err(E::msg)?; let start = std::time::Instant::now(); let config = || match args.model { WhichModel::V1 => Config::v1(), WhichModel::V1_5 => Config::v1_5(), WhichModel::V2 | WhichModel::V2Old => Config::v2(), WhichModel::PuffinPhiV2 => Config::puffin_phi_v2(), WhichModel::PhiHermes => Config::phi_hermes_1_3b(), WhichModel::V3 | WhichModel::V3Medium => { panic!("use the quantized or quantized-phi examples for quantized phi-v3") } }; let device = candle_examples::device(args.cpu)?; let model = if args.quantized { let config = config(); let vb = candle_transformers::quantized_var_builder::VarBuilder::from_gguf( &filenames[0], &device, )?; let model = match args.model { WhichModel::V2 | WhichModel::V2Old => QMixFormer::new_v2(&config, vb)?, _ => QMixFormer::new(&config, vb)?, }; Model::Quantized(model) } else { let dtype = match args.dtype { Some(dtype) => std::str::FromStr::from_str(&dtype)?, None => { if args.model == WhichModel::V3 || args.model == WhichModel::V3Medium { device.bf16_default_to_f32() } else { DType::F32 } } }; let vb = unsafe { VarBuilder::from_mmaped_safetensors(&filenames, dtype, &device)? }; match args.model { WhichModel::V1 | WhichModel::V1_5 | WhichModel::V2 => { let config_filename = repo.get("config.json")?; let config = std::fs::read_to_string(config_filename)?; let config: PhiConfig = serde_json::from_str(&config)?; let phi = Phi::new(&config, vb)?; Model::Phi(phi) } WhichModel::V3 | WhichModel::V3Medium => { let config_filename = repo.get("config.json")?; let config = std::fs::read_to_string(config_filename)?; let config: Phi3Config = serde_json::from_str(&config)?; let phi3 = Phi3::new(&config, vb)?; Model::Phi3(phi3) } WhichModel::V2Old => { let config = config(); Model::MixFormer(MixFormer::new_v2(&config, vb)?) } WhichModel::PhiHermes | WhichModel::PuffinPhiV2 => { let config = config(); Model::MixFormer(MixFormer::new(&config, vb)?) } } }; println!("loaded the model in {:?}", start.elapsed()); match (args.prompt, args.mmlu_dir) { (None, None) | (Some(_), Some(_)) => { anyhow::bail!("exactly one of --prompt and --mmlu-dir must be specified") } (Some(prompt), None) => { let mut pipeline = TextGeneration::new( model, tokenizer, args.seed, args.temperature, args.top_p, args.repeat_penalty, args.repeat_last_n, args.verbose_prompt, &device, ); pipeline.run(&prompt, args.sample_len)?; } (None, Some(mmlu_dir)) => mmlu(model, tokenizer, &device, mmlu_dir)?, } Ok(()) } fn mmlu<P: AsRef<std::path::Path>>( mut model: Model, tokenizer: Tokenizer, device: &Device, mmlu_dir: P, ) -> anyhow::Result<()> { for dir_entry in mmlu_dir.as_ref().read_dir()?.flatten() { let dir_entry = dir_entry.path(); let theme = match dir_entry.file_stem().and_then(|v| v.to_str()) { None => "".to_string(), Some(v) => match v.strip_suffix("_test") { None => v.replace('_', " "), Some(v) => v.replace('_', " "), }, }; if dir_entry.extension().as_ref().and_then(|v| v.to_str()) != Some("csv") { continue; } println!("reading {dir_entry:?}"); let dir_entry = std::fs::File::open(dir_entry)?; let mut reader = csv::ReaderBuilder::new() .has_headers(false) .from_reader(dir_entry); let token_a = tokenizer.token_to_id("A").unwrap(); let token_b = tokenizer.token_to_id("B").unwrap(); let token_c = tokenizer.token_to_id("C").unwrap(); let token_d = tokenizer.token_to_id("D").unwrap(); for row in reader.records() { let row = match row { Err(_) => continue, Ok(row) => row, }; if row.len() < 5 { continue; } let question = row.get(0).unwrap(); let answer_a = row.get(1).unwrap(); let answer_b = row.get(2).unwrap(); let answer_c = row.get(3).unwrap(); let answer_d = row.get(4).unwrap(); let answer = row.get(5).unwrap(); let prompt = format!( "{} {theme}.\n{question}\nA. {answer_a}\nB. {answer_b}\nC. {answer_c}\nD. {answer_d}\nAnswer:\n", "The following are multiple choice questions (with answers) about" ); let tokens = tokenizer.encode(prompt.as_str(), true).map_err(E::msg)?; let tokens = tokens.get_ids().to_vec(); let input = Tensor::new(tokens, device)?.unsqueeze(0)?; let logits = match &mut model { Model::MixFormer(m) => { m.clear_kv_cache(); m.forward(&input)? } Model::Phi(m) => { m.clear_kv_cache(); m.forward(&input)? } Model::Phi3(m) => { m.clear_kv_cache(); m.forward(&input, 0)? } Model::Quantized(m) => { m.clear_kv_cache(); m.forward(&input)? } }; let logits = logits.squeeze(0)?.to_dtype(DType::F32)?; let logits_v: Vec<f32> = logits.to_vec1()?; let pr_a = logits_v[token_a as usize]; let pr_b = logits_v[token_b as usize]; let pr_c = logits_v[token_c as usize]; let pr_d = logits_v[token_d as usize]; let model_answer = if pr_a > pr_b && pr_a > pr_c && pr_a > pr_d { "A" } else if pr_b > pr_c && pr_b > pr_d { "B" } else if pr_c > pr_d { "C" } else { "D" }; println!("{prompt}\n -> {model_answer} vs {answer}"); } } Ok(()) }
candle/candle-examples/examples/phi/main.rs/0
{ "file_path": "candle/candle-examples/examples/phi/main.rs", "repo_id": "candle", "token_count": 9478 }
27
use std::collections::VecDeque; use rand::distributions::Uniform; use rand::{thread_rng, Rng}; use candle::{DType, Device, Module, Result, Tensor}; use candle_nn::loss::mse; use candle_nn::{linear, seq, Activation, AdamW, Optimizer, VarBuilder, VarMap}; use crate::gym_env::GymEnv; const DEVICE: Device = Device::Cpu; const EPISODES: usize = 200; const BATCH_SIZE: usize = 64; const GAMMA: f64 = 0.99; const LEARNING_RATE: f64 = 0.01; pub fn run() -> Result<()> { let env = GymEnv::new("CartPole-v1")?; // Build the model that predicts the estimated rewards given a specific state. let var_map = VarMap::new(); let vb = VarBuilder::from_varmap(&var_map, DType::F32, &DEVICE); let observation_space = *env.observation_space().first().unwrap(); let model = seq() .add(linear(observation_space, 64, vb.pp("linear_in"))?) .add(Activation::Relu) .add(linear(64, env.action_space(), vb.pp("linear_out"))?); let mut optimizer = AdamW::new_lr(var_map.all_vars(), LEARNING_RATE)?; // Initialize the model's memory. let mut memory = VecDeque::with_capacity(10000); // Start the training loop. let mut state = env.reset(0)?; let mut episode = 0; let mut accumulate_rewards = 0.0; while episode < EPISODES { // Given the current state, predict the estimated rewards, and take the // action that is expected to return the most rewards. let estimated_rewards = model.forward(&state.unsqueeze(0)?)?; let action: u32 = estimated_rewards.squeeze(0)?.argmax(0)?.to_scalar()?; // Take that action in the environment, and memorize the outcome: // - the state for which the action was taken // - the action taken // - the new state resulting of taking that action // - the actual rewards of taking that action // - whether the environment reached a terminal state or not (e.g. game over) let step = env.step(action)?; accumulate_rewards += step.reward; memory.push_back(( state, action, step.state.clone(), step.reward, step.terminated || step.truncated, )); state = step.state; // If there's enough entries in the memory, perform a learning step, where // BATCH_SIZE transitions will be sampled from the memory and will be // fed to the model so that it performs a backward pass. if memory.len() > BATCH_SIZE { // Sample randomly from the memory. let batch = thread_rng() .sample_iter(Uniform::from(0..memory.len())) .take(BATCH_SIZE) .map(|i| memory.get(i).unwrap().clone()) .collect::<Vec<_>>(); // Group all the samples together into tensors with the appropriate shape. let states: Vec<_> = batch.iter().map(|e| e.0.clone()).collect(); let states = Tensor::stack(&states, 0)?; let actions = batch.iter().map(|e| e.1); let actions = Tensor::from_iter(actions, &DEVICE)?.unsqueeze(1)?; let next_states: Vec<_> = batch.iter().map(|e| e.2.clone()).collect(); let next_states = Tensor::stack(&next_states, 0)?; let rewards = batch.iter().map(|e| e.3 as f32); let rewards = Tensor::from_iter(rewards, &DEVICE)?.unsqueeze(1)?; let non_final_mask = batch.iter().map(|e| !e.4 as u8 as f32); let non_final_mask = Tensor::from_iter(non_final_mask, &DEVICE)?.unsqueeze(1)?; // Get the estimated rewards for the actions that where taken at each step. let estimated_rewards = model.forward(&states)?; let x = estimated_rewards.gather(&actions, 1)?; // Get the maximum expected rewards for the next state, apply them a discount rate // GAMMA and add them to the rewards that were actually gathered on the current state. // If the next state is a terminal state, just omit maximum estimated // rewards for that state. let expected_rewards = model.forward(&next_states)?.detach(); let y = expected_rewards.max_keepdim(1)?; let y = (y * GAMMA * non_final_mask + rewards)?; // Compare the estimated rewards with the maximum expected rewards and // perform the backward step. let loss = mse(&x, &y)?; optimizer.backward_step(&loss)?; } // If we are on a terminal state, reset the environment and log how it went. if step.terminated || step.truncated { episode += 1; println!("Episode {episode} | Rewards {}", accumulate_rewards as i64); state = env.reset(0)?; accumulate_rewards = 0.0; } } Ok(()) }
candle/candle-examples/examples/reinforcement-learning/dqn.rs/0
{ "file_path": "candle/candle-examples/examples/reinforcement-learning/dqn.rs", "repo_id": "candle", "token_count": 2032 }
28
use candle::Device; use candle::Module; use candle_nn::VarBuilder; use candle_transformers::models::segformer::{ Config, ImageClassificationModel, SemanticSegmentationModel, }; use clap::{Args, Parser, Subcommand}; use imageproc::image::Rgb; use imageproc::integral_image::ArrayData; use std::collections::HashMap; use std::path::PathBuf; #[derive(Parser)] #[clap(about, version, long_about = None)] struct CliArgs { #[arg(long, help = "use cpu")] cpu: bool, #[command(subcommand)] command: Commands, } #[derive(Args, Debug)] struct SegmentationArgs { #[arg( long, help = "name of the huggingface hub model", default_value = "nvidia/segformer-b0-finetuned-ade-512-512" )] model_name: String, #[arg( long, help = "path to the label file in json format", default_value = "candle-examples/examples/segformer/assets/labels.json" )] label_path: PathBuf, #[arg(long, help = "path to for the output mask image")] output_path: PathBuf, #[arg(help = "path to image as input")] image: PathBuf, } #[derive(Args, Debug)] struct ClassificationArgs { #[arg( long, help = "name of the huggingface hub model", default_value = "paolinox/segformer-finetuned-food101" )] model_name: String, #[arg(help = "path to image as input")] image: PathBuf, } #[derive(Subcommand, Debug)] enum Commands { Segment(SegmentationArgs), Classify(ClassificationArgs), } fn get_vb_and_config(model_name: String, device: &Device) -> anyhow::Result<(VarBuilder, Config)> { println!("loading model {} via huggingface hub", model_name); let api = hf_hub::api::sync::Api::new()?; let api = api.model(model_name.clone()); let model_file = api.get("model.safetensors")?; println!("model {} downloaded and loaded", model_name); let vb = unsafe { VarBuilder::from_mmaped_safetensors(&[model_file], candle::DType::F32, device)? }; let config = std::fs::read_to_string(api.get("config.json")?)?; let config: Config = serde_json::from_str(&config)?; println!("{:?}", config); Ok((vb, config)) } #[derive(Debug, serde::Deserialize)] struct LabelItem { index: u32, color: String, } fn segmentation_task(args: SegmentationArgs, device: &Device) -> anyhow::Result<()> { let label_file = std::fs::read_to_string(&args.label_path)?; let label_items: Vec<LabelItem> = serde_json::from_str(&label_file)?; let label_colors: HashMap<u32, Rgb<u8>> = label_items .iter() .map(|x| { (x.index - 1, { let color = x.color.trim_start_matches('#'); let r = u8::from_str_radix(&color[0..2], 16).unwrap(); let g = u8::from_str_radix(&color[2..4], 16).unwrap(); let b = u8::from_str_radix(&color[4..6], 16).unwrap(); Rgb([r, g, b]) }) }) .collect(); let image = candle_examples::imagenet::load_image224(args.image)? .unsqueeze(0)? .to_device(device)?; let (vb, config) = get_vb_and_config(args.model_name, device)?; let num_labels = label_items.len(); let model = SemanticSegmentationModel::new(&config, num_labels, vb)?; let segmentations = model.forward(&image)?; // generate a mask image let mask = &segmentations.squeeze(0)?.argmax(0)?; let (h, w) = mask.dims2()?; let mask = mask.flatten_all()?.to_vec1::<u32>()?; let mask = mask .iter() .flat_map(|x| label_colors[x].data()) .collect::<Vec<u8>>(); let mask: image::ImageBuffer<image::Rgb<u8>, Vec<u8>> = image::ImageBuffer::from_raw(w as u32, h as u32, mask).unwrap(); // resize let mask = image::DynamicImage::from(mask); let mask = mask.resize_to_fill( w as u32 * 4, h as u32 * 4, image::imageops::FilterType::CatmullRom, ); mask.save(args.output_path.clone())?; println!("mask image saved to {:?}", args.output_path); Ok(()) } fn classification_task(args: ClassificationArgs, device: &Device) -> anyhow::Result<()> { let image = candle_examples::imagenet::load_image224(args.image)? .unsqueeze(0)? .to_device(device)?; let (vb, config) = get_vb_and_config(args.model_name, device)?; let num_labels = 7; let model = ImageClassificationModel::new(&config, num_labels, vb)?; let classification = model.forward(&image)?; let classification = candle_nn::ops::softmax_last_dim(&classification)?; let classification = classification.squeeze(0)?; println!( "classification logits {:?}", classification.to_vec1::<f32>()? ); let label_id = classification.argmax(0)?.to_scalar::<u32>()?; let label_id = format!("{}", label_id); println!("label: {}", config.id2label[&label_id]); Ok(()) } pub fn main() -> anyhow::Result<()> { let args = CliArgs::parse(); let device = candle_examples::device(args.cpu)?; if let Commands::Segment(args) = args.command { segmentation_task(args, &device)? } else if let Commands::Classify(args) = args.command { classification_task(args, &device)? } Ok(()) }
candle/candle-examples/examples/segformer/main.rs/0
{ "file_path": "candle/candle-examples/examples/segformer/main.rs", "repo_id": "candle", "token_count": 2229 }
29
/****************************************************************************** * Copyright (c) 2023, Tri Dao. ******************************************************************************/ #pragma once #include <cuda.h> #include <vector> // #ifdef OLD_GENERATOR_PATH // #include <ATen/CUDAGeneratorImpl.h> // #else // #include <ATen/cuda/CUDAGeneratorImpl.h> // #endif // // #include <ATen/cuda/CUDAGraphsUtils.cuh> // For at::cuda::philox::unpack constexpr int TOTAL_DIM = 0; constexpr int H_DIM = 1; constexpr int D_DIM = 2; //////////////////////////////////////////////////////////////////////////////////////////////////// struct Qkv_params { using index_t = int64_t; // The QKV matrices. void *__restrict__ q_ptr; void *__restrict__ k_ptr; void *__restrict__ v_ptr; // The stride between rows of the Q, K and V matrices. index_t q_batch_stride; index_t k_batch_stride; index_t v_batch_stride; index_t q_row_stride; index_t k_row_stride; index_t v_row_stride; index_t q_head_stride; index_t k_head_stride; index_t v_head_stride; // The number of heads. int h, h_k; // In the case of multi-query and grouped-query attention (MQA/GQA), nheads_k could be // different from nheads (query). int h_h_k_ratio; // precompute h / h_k, }; //////////////////////////////////////////////////////////////////////////////////////////////////// struct Flash_fwd_params : public Qkv_params { // The O matrix (output). void * __restrict__ o_ptr; void * __restrict__ oaccum_ptr; // The stride between rows of O. index_t o_batch_stride; index_t o_row_stride; index_t o_head_stride; // The pointer to the P matrix. void * __restrict__ p_ptr; // The pointer to the softmax sum. void * __restrict__ softmax_lse_ptr; void * __restrict__ softmax_lseaccum_ptr; // The dimensions. int b, seqlen_q, seqlen_k, seqlen_knew, d, seqlen_q_rounded, seqlen_k_rounded, d_rounded, rotary_dim, total_q; // The scaling factors for the kernel. float scale_softmax; float scale_softmax_log2; // array of length b+1 holding starting offset of each sequence. int * __restrict__ cu_seqlens_q; int * __restrict__ cu_seqlens_k; // If provided, the actual length of each k sequence. int * __restrict__ seqused_k; int *__restrict__ blockmask; // The K_new and V_new matrices. void * __restrict__ knew_ptr; void * __restrict__ vnew_ptr; // The stride between rows of the Q, K and V matrices. index_t knew_batch_stride; index_t vnew_batch_stride; index_t knew_row_stride; index_t vnew_row_stride; index_t knew_head_stride; index_t vnew_head_stride; // The cos and sin matrices for rotary embedding. void * __restrict__ rotary_cos_ptr; void * __restrict__ rotary_sin_ptr; // The indices to index into the KV cache. int * __restrict__ cache_batch_idx; // Paged KV cache int * __restrict__ block_table; index_t block_table_batch_stride; int page_block_size; // The dropout probability (probability of keeping an activation). float p_dropout; // uint32_t p_dropout_in_uint; // uint16_t p_dropout_in_uint16_t; uint8_t p_dropout_in_uint8_t; // Scale factor of 1 / (1 - p_dropout). float rp_dropout; float scale_softmax_rp_dropout; // Local window size int window_size_left, window_size_right; float softcap; // Random state. // at::PhiloxCudaState philox_args; // Pointer to the RNG seed (idx 0) and offset (idx 1). uint64_t * rng_state; bool is_bf16; bool is_causal; // If is_seqlens_k_cumulative, then seqlen_k is cu_seqlens_k[bidb + 1] - cu_seqlens_k[bidb]. // Otherwise it's cu_seqlens_k[bidb], i.e., we use cu_seqlens_k to store the sequence lengths of K. bool is_seqlens_k_cumulative; bool is_rotary_interleaved; int num_splits; // For split-KV version void * __restrict__ alibi_slopes_ptr; index_t alibi_slopes_batch_stride; bool unpadded_lse; // For varlen paths: LSE is in [nheads, total_seqlen_q] format instead of [b, nheads, seqlen_q]. bool seqlenq_ngroups_swapped; // q has been transposed from (b, 1, (nheads_kv ngroups), d) to (b, ngroups, nheads_kv, d). }; //////////////////////////////////////////////////////////////////////////////////////////////////// struct Flash_bwd_params : public Flash_fwd_params { // The dO and dQKV matrices. void *__restrict__ do_ptr; void *__restrict__ dq_ptr; void *__restrict__ dk_ptr; void *__restrict__ dv_ptr; // To accumulate dQ void *__restrict__ dq_accum_ptr; void *__restrict__ dk_accum_ptr; void *__restrict__ dv_accum_ptr; // // To accumulate dK and dV in case we're splitting the bwd along seqlen_q // dimension void *__restrict__ dk_accum_ptr; void *__restrict__ // dv_accum_ptr; // The stride between rows of the dO, dQ, dK and dV matrices. // TD [2022-04-16]: We're using 32-bit indexing to save registers. // The code probably won't work for arrays larger than 2GB. index_t do_batch_stride; index_t do_row_stride; index_t do_head_stride; index_t dq_batch_stride; index_t dk_batch_stride; index_t dv_batch_stride; index_t dq_row_stride; index_t dk_row_stride; index_t dv_row_stride; index_t dq_head_stride; index_t dk_head_stride; index_t dv_head_stride; // The pointer to the softmax d sum. void *__restrict__ dsoftmax_sum; bool deterministic; index_t dq_accum_split_stride; }; //////////////////////////////////////////////////////////////////////////////////////////////////// template<typename T, int Headdim, bool Is_causal> void run_mha_fwd_(Flash_fwd_params &params, cudaStream_t stream); template<typename T, int Headdim, bool Is_causal> void run_mha_fwd_splitkv_dispatch(Flash_fwd_params &params, cudaStream_t stream); template<typename T, int Headdim> void run_mha_bwd_(Flash_bwd_params &params, cudaStream_t stream);
candle/candle-flash-attn/kernels/flash.h/0
{ "file_path": "candle/candle-flash-attn/kernels/flash.h", "repo_id": "candle", "token_count": 2364 }
30
use anyhow::Result; use candle::{DType, Device, IndexOp, Tensor, D}; fn to_vec3_round(t: Tensor, digits: i32) -> Result<Vec<Vec<Vec<f32>>>> { let b = 10f32.powi(digits); let t = t.to_vec3::<f32>()?; let t = t .iter() .map(|t| { t.iter() .map(|t| t.iter().map(|t| f32::round(t * b) / b).collect()) .collect() }) .collect(); Ok(t) } fn fa_acausal(q: &Tensor, k: &Tensor, v: &Tensor, softmax_scale: f32) -> Result<Tensor> { let in_dtype = q.dtype(); let q = q.to_dtype(DType::F32)?; let k = k.to_dtype(DType::F32)?; let v = v.to_dtype(DType::F32)?; let att = (q.matmul(&k.t()?)? * softmax_scale as f64)?; let att = candle_nn::ops::softmax(&att, D::Minus1)?; // Convert to contiguous as matmul doesn't support strided vs for now. let output = att.matmul(&v.contiguous()?)?.to_dtype(in_dtype)?; Ok(output) } #[test] fn flash_attn_acausal() -> Result<()> { let device = Device::new_cuda(0)?; let q = Tensor::arange(0u32, 48, &device)? .to_dtype(DType::F16)? .reshape((1, 3, 2, 8))?; let k = (&q / 40.)?; let v = (&q / 50.)?; let q = (&q / 30.)?; let ys1 = fa_acausal(&q, &k, &v, 0.5)?; let ys1 = ys1.i(0)?.to_dtype(DType::F32)?; let ys2 = { let q = q.transpose(1, 2)?; let k = k.transpose(1, 2)?; let v = v.transpose(1, 2)?; candle_flash_attn::flash_attn(&q, &k, &v, 0.5, false)?.transpose(1, 2)? }; let ys2 = ys2.i(0)?.to_dtype(DType::F32)?; let diff = ys1.sub(&ys2)?.abs()?.flatten_all()?.max(0)?; assert_eq!(ys1.dims(), &[3, 2, 8]); assert_eq!( to_vec3_round(ys1, 4)?, &[ [ [0.0837, 0.1038, 0.1238, 0.1438, 0.1637, 0.1837, 0.2037, 0.2238], [0.0922, 0.1122, 0.1322, 0.1522, 0.1721, 0.1921, 0.2122, 0.2322] ], [ [0.4204, 0.4404, 0.4604, 0.4805, 0.5005, 0.5205, 0.5405, 0.5605], [0.428, 0.448, 0.468, 0.488, 0.5083, 0.5283, 0.5483, 0.5684] ], [ [0.7554, 0.7754, 0.7954, 0.8154, 0.8354, 0.8555, 0.8755, 0.8955], [0.7622, 0.7822, 0.8022, 0.8223, 0.8423, 0.8623, 0.8823, 0.9023] ] ] ); assert_eq!(ys2.dims(), &[3, 2, 8]); assert_eq!( to_vec3_round(ys2, 4)?, &[ [ [0.0837, 0.1038, 0.1238, 0.1438, 0.1637, 0.1837, 0.2037, 0.2238], [0.0922, 0.1122, 0.1322, 0.1522, 0.1721, 0.1921, 0.2122, 0.2322] ], [ [0.4204, 0.4404, 0.4604, 0.4805, 0.5005, 0.5205, 0.5405, 0.5605], [0.428, 0.448, 0.468, 0.488, 0.5083, 0.5283, 0.5483, 0.5684] ], [ [0.7554, 0.7754, 0.7954, 0.8154, 0.8354, 0.8555, 0.8755, 0.8955], [0.7622, 0.7822, 0.8022, 0.8223, 0.8423, 0.8623, 0.8823, 0.9023] ] ] ); assert!(diff.to_vec0::<f32>()?.abs() < 1e-5); Ok(()) } #[test] fn flash_attn_varlen() -> Result<()> { let device = Device::new_cuda(0)?; let q = Tensor::arange(0u32, 48, &device)? .to_dtype(DType::F16)? .reshape((3, 2, 8))?; let k = (&q / 40.)?; let v = (&q / 50.)?; let q = (&q / 30.)?; let seqlens_q = Tensor::new(&[0u32, 2u32], &device)?; let seqlens_k = Tensor::new(&[0u32, 2u32], &device)?; let ys = { let q = q.transpose(0, 1)?; let k = k.transpose(0, 1)?; let v = v.transpose(0, 1)?; candle_flash_attn::flash_attn_varlen( &q, &k, &v, &seqlens_q, &seqlens_k, 32, 32, 0.5, false, )? .transpose(0, 1)? }; let ys = ys.to_dtype(DType::F32)?; assert_eq!(ys.dims(), &[3, 2, 8]); assert_eq!( to_vec3_round(ys, 4)?, &[ [ [0.0837, 0.1038, 0.1238, 0.1438, 0.1637, 0.1837, 0.2037, 0.2238], [0.0922, 0.1122, 0.1322, 0.1522, 0.1721, 0.1921, 0.2122, 0.2322] ], [ [0.4204, 0.4404, 0.4604, 0.4805, 0.5005, 0.5205, 0.5405, 0.5605], [0.428, 0.448, 0.468, 0.488, 0.5083, 0.5283, 0.5483, 0.5684] ], [ [0.7554, 0.7754, 0.7954, 0.8154, 0.8354, 0.8555, 0.8755, 0.8955], [0.7622, 0.7822, 0.8022, 0.8223, 0.8423, 0.8623, 0.8823, 0.9023] ] ] ); Ok(()) }
candle/candle-flash-attn/tests/flash_attn_tests.rs/0
{ "file_path": "candle/candle-flash-attn/tests/flash_attn_tests.rs", "repo_id": "candle", "token_count": 2787 }
31
// Adapted from https://github.com/ggerganov/llama.cpp/blob/master/ggml-cuda/argsort.cu #define SORT_ORDER_ASC 1 #define SORT_ORDER_DESC 0 #include "cuda_utils.cuh" #include<stdint.h> template<typename T> static inline __device__ void ggml_cuda_swap(T & a, T & b) { T tmp = a; a = b; b = tmp; } template<int order, typename T> static __device__ void k_argsort(const T * x, uint32_t * dst, const int ncols, int ncols_pad) { // bitonic sort int col = threadIdx.x; int row = blockIdx.y; if (col >= ncols_pad) { return; } const T * x_row = x + row * ncols; extern __shared__ int dst_row[]; // initialize indices dst_row[col] = col; __syncthreads(); for (int k = 2; k <= ncols_pad; k *= 2) { for (int j = k / 2; j > 0; j /= 2) { int ixj = col ^ j; if (ixj > col) { if ((col & k) == 0) { if (dst_row[col] >= ncols || (dst_row[ixj] < ncols && (order == SORT_ORDER_ASC ? x_row[dst_row[col]] > x_row[dst_row[ixj]] : x_row[dst_row[col]] < x_row[dst_row[ixj]])) ) { ggml_cuda_swap(dst_row[col], dst_row[ixj]); } } else { if (dst_row[ixj] >= ncols || (dst_row[col] < ncols && (order == SORT_ORDER_ASC ? x_row[dst_row[col]] < x_row[dst_row[ixj]] : x_row[dst_row[col]] > x_row[dst_row[ixj]])) ) { ggml_cuda_swap(dst_row[col], dst_row[ixj]); } } } __syncthreads(); } } // copy the result to dst without the padding if (col < ncols) { dst[row * ncols + col] = dst_row[col]; } } #define ASORT_OP(TYPENAME, RUST_NAME) \ extern "C" __global__ void asort_asc_##RUST_NAME( \ const TYPENAME * x, uint32_t * dst, const int ncols, int ncols_pad \ ) { \ k_argsort<SORT_ORDER_ASC>(x, dst, ncols, ncols_pad); \ } \ extern "C" __global__ void asort_desc_##RUST_NAME( \ const TYPENAME * x, uint32_t * dst, const int ncols, int ncols_pad \ ) { \ k_argsort<SORT_ORDER_DESC>(x, dst, ncols, ncols_pad); \ } \ #if __CUDA_ARCH__ >= 800 ASORT_OP(__nv_bfloat16, bf16) #endif #if __CUDA_ARCH__ >= 530 ASORT_OP(__half, f16) #endif ASORT_OP(float, f32) ASORT_OP(double, f64) ASORT_OP(uint8_t, u8) ASORT_OP(uint32_t, u32) ASORT_OP(int64_t, i64)
candle/candle-kernels/src/sort.cu/0
{ "file_path": "candle/candle-kernels/src/sort.cu", "repo_id": "candle", "token_count": 1469 }
32
#include <metal_stdlib> using namespace metal; METAL_FUNC uint get_strided_index( uint idx, constant size_t &num_dims, constant size_t *dims, constant size_t *strides ) { uint strided_i = 0; for (uint d = 0; d < num_dims; d++) { uint dim_idx = num_dims - 1 - d; strided_i += (idx % dims[dim_idx]) * strides[dim_idx]; idx /= dims[dim_idx]; } return strided_i; } template<typename T, typename ID> METAL_FUNC void where_cond( constant size_t &numel, constant size_t &num_dims, constant size_t *dims, constant size_t *strides, constant size_t *strides_t, constant size_t *strides_f, device const ID *ids, device const T *t, device const T *f, device T *out, uint i [[ thread_position_in_grid ]] ) { if (i >= numel){ return; } uint strided_i = get_strided_index(i, num_dims, dims, strides); uint strided_i_t = get_strided_index(i, num_dims, dims, strides_t); uint strided_i_f = get_strided_index(i, num_dims, dims, strides_f); out[i] = ids[strided_i] ? t[strided_i_t] : f[strided_i_f]; } #define WHERE_OP(T, ID, FN_NAME) \ kernel void FN_NAME( \ constant size_t &numel, \ constant size_t &num_dims, \ constant size_t *dims, \ constant size_t *strides, \ constant size_t *strides_t, \ constant size_t *strides_f, \ device const ID *ids, \ device const T *t, \ device const T *f, \ device T *out, \ uint i [[ thread_position_in_grid ]] \ ) { \ where_cond<T, ID>(numel, num_dims, dims, strides, strides_t, strides_f, ids, t, f, out, i); \ } \ WHERE_OP(half, uint32_t, where_u32_f16) WHERE_OP(float, uint32_t, where_u32_f32) WHERE_OP(uint8_t, uint32_t, where_u32_u8) WHERE_OP(uint32_t, uint32_t, where_u32_u32) WHERE_OP(half, uint8_t, where_u8_f16) WHERE_OP(float, uint8_t, where_u8_f32) WHERE_OP(uint8_t, uint8_t, where_u8_u8) WHERE_OP(uint32_t, uint8_t, where_u8_u32) #if __METAL_VERSION__ >= 220 WHERE_OP(int64_t, uint8_t, where_u8_i64) WHERE_OP(int64_t, uint32_t, where_u32_i64) WHERE_OP(half, int64_t, where_i64_f16) WHERE_OP(float, int64_t, where_i64_f32) WHERE_OP(uint8_t, int64_t, where_i64_u8) WHERE_OP(uint32_t, int64_t, where_i64_u32) WHERE_OP(int64_t, int64_t, where_i64_i64) #if defined(__HAVE_BFLOAT__) WHERE_OP(bfloat, int64_t, where_i64_bf16) #endif #endif #if defined(__HAVE_BFLOAT__) WHERE_OP(bfloat, uint8_t, where_u8_bf16) WHERE_OP(bfloat, uint32_t, where_u32_bf16) #endif
candle/candle-metal-kernels/src/ternary.metal/0
{ "file_path": "candle/candle-metal-kernels/src/ternary.metal", "repo_id": "candle", "token_count": 2256 }
33
use candle::{Result, Tensor}; use serde::Deserialize; #[derive(Debug, Clone, Copy, PartialEq, Deserialize, Default)] #[serde(rename_all = "lowercase")] pub enum Activation { #[default] #[serde(alias = "gelu")] Gelu, #[serde(alias = "gelu_new")] NewGelu, Relu, Relu2, Relu6, Silu, Sigmoid, HardSigmoid, Swiglu, Swish, HardSwish, Elu(f64), LeakyRelu(f64), #[serde(alias = "gelu_pytorch_tanh")] GeluPytorchTanh, } impl super::Module for Activation { fn forward(&self, xs: &Tensor) -> Result<Tensor> { match self { Self::Gelu => xs.gelu_erf(), // https://github.com/huggingface/transformers/blob/12f043eaeaabfef6f6efea411d98e6f6d3c094b7/src/transformers/activations.py#L49-L78 Self::NewGelu => xs.gelu(), Self::Relu => xs.relu(), Self::Relu2 => xs.relu()?.sqr(), Self::Relu6 => xs.clamp(0f32, 6f32), Self::Silu => xs.silu(), Self::Sigmoid => crate::ops::sigmoid(xs), Self::HardSigmoid => crate::ops::hard_sigmoid(xs), Self::Swiglu => crate::ops::swiglu(xs), Self::Swish => xs * crate::ops::sigmoid(xs)?, Self::HardSwish => xs * crate::ops::hard_sigmoid(xs)?, &Self::Elu(alpha) => xs.elu(alpha), &Self::LeakyRelu(negative_slope) => crate::ops::leaky_relu(xs, negative_slope), Self::GeluPytorchTanh => xs.gelu(), } } } #[derive(Clone, Debug)] pub struct PReLU { weight: Tensor, is_scalar: bool, } impl PReLU { pub fn new(weight: Tensor, is_scalar: bool) -> Self { Self { weight, is_scalar } } pub fn weight(&self) -> &Tensor { &self.weight } pub fn is_scalar(&self) -> bool { self.is_scalar } } impl candle::Module for PReLU { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let weight = if self.is_scalar { self.weight.reshape(())? } else if xs.rank() >= 2 { let num_channels = xs.dim(1)?; let num_weights = self.weight.elem_count(); if num_weights != num_channels { candle::bail!("error in prelu: unexpected number of channels for the input, got {num_channels}, weight dim is {num_weights}") } let mut s = vec![1; xs.rank()]; s[1] = self.weight.elem_count(); self.weight.reshape(s)? } else { self.weight.clone() }; let zeros = xs.zeros_like()?; xs.maximum(&zeros)? + xs.minimum(&zeros)?.broadcast_mul(&weight)? } } /// Create or initialize a new PReLU layer. /// /// This uses some default name for weights, namely `"weight"`. /// # Arguments /// /// * `num_channels` - The number of channels. Use `None` to have as single trainable value and /// `Some` for a 1D vector with the appropriate number of channels. When applying the `forward` /// function, the input tensor shape `s` should either be one dimension with this number of /// channels or if `s.len() >= 2` it should have `s[1]` equal to this number. pub fn prelu(num_channels: Option<usize>, vs: crate::VarBuilder) -> Result<PReLU> { let init_ws = crate::init::Init::Const(0.25); // When using a scalar weight, the PyTorch encoding is to use a 1d vector of length 1. let ws = vs.get_with_hints((num_channels.unwrap_or(1),), "weight", init_ws)?; Ok(PReLU::new(ws, num_channels.is_none())) }
candle/candle-nn/src/activation.rs/0
{ "file_path": "candle/candle-nn/src/activation.rs", "repo_id": "candle", "token_count": 1656 }
34
use candle::{CpuStorage, Layout, Result, Shape, Tensor, D}; use rayon::prelude::*; /// Interleaved variant of rotary embeddings. /// The x0 and x1 value are interleaved on the n_embd (= head_dim) dimension. /// The resulting y0 and y1 are also interleaved with: /// y0 = x0*cos - x1*sin /// y1 = x0*sin + x1*cos #[derive(Debug, Clone)] struct RotaryEmbI; impl candle::CustomOp3 for RotaryEmbI { fn name(&self) -> &'static str { "rotary-emb-int" } fn cpu_fwd( &self, s1: &CpuStorage, l1: &Layout, s2: &CpuStorage, l2: &Layout, s3: &CpuStorage, l3: &Layout, ) -> Result<(CpuStorage, Shape)> { fn inner<T: candle::WithDType + num_traits::Float>( src: &[T], l_src: &Layout, cos: &[T], l_cos: &Layout, sin: &[T], l_sin: &Layout, ) -> Result<(CpuStorage, Shape)> { let src = match l_src.contiguous_offsets() { None => candle::bail!("input src has to be contiguous"), Some((o1, o2)) => &src[o1..o2], }; let cos = match l_cos.contiguous_offsets() { None => candle::bail!("input cos has to be contiguous"), Some((o1, o2)) => &cos[o1..o2], }; let sin = match l_sin.contiguous_offsets() { None => candle::bail!("input sin has to be contiguous"), Some((o1, o2)) => &sin[o1..o2], }; let (b, h, t, d) = l_src.shape().dims4()?; let el_count = b * h * t * d; let mut dst = vec![T::zero(); el_count]; src.par_chunks(t * d) .zip(dst.par_chunks_mut(t * d)) .for_each(|(src, dst)| { for i_over_2 in 0..t * d / 2 { let i = 2 * i_over_2; dst[i] = src[i] * cos[i_over_2] - src[i + 1] * sin[i_over_2]; dst[i + 1] = src[i] * sin[i_over_2] + src[i + 1] * cos[i_over_2]; } }); let storage = candle::WithDType::to_cpu_storage_owned(dst); Ok((storage, (b, h, t, d).into())) } use candle::backend::BackendStorage; use CpuStorage::{BF16, F16, F32, F64}; match (s1, s2, s3) { (BF16(s1), BF16(s2), BF16(s3)) => inner(s1, l1, s2, l2, s3, l3), (F16(s1), F16(s2), F16(s3)) => inner(s1, l1, s2, l2, s3, l3), (F32(s1), F32(s2), F32(s3)) => inner(s1, l1, s2, l2, s3, l3), (F64(s1), F64(s2), F64(s3)) => inner(s1, l1, s2, l2, s3, l3), _ => candle::bail!( "unsupported dtype for rope {:?} {:?} {:?}", s1.dtype(), s2.dtype(), s3.dtype() ), } } #[cfg(feature = "cuda")] fn cuda_fwd( &self, s1: &candle::CudaStorage, l1: &Layout, s2: &candle::CudaStorage, l2: &Layout, s3: &candle::CudaStorage, l3: &Layout, ) -> Result<(candle::CudaStorage, Shape)> { use candle::cuda_backend::cudarc::driver::{ CudaSlice, DeviceRepr, LaunchAsync, LaunchConfig, }; use candle::cuda_backend::{kernel_name, kernels, WrapErr}; use candle::{CudaDevice, WithDType}; fn inner<T: DeviceRepr + WithDType>( src: &CudaSlice<T>, l_src: &Layout, cos: &CudaSlice<T>, l_cos: &Layout, sin: &CudaSlice<T>, l_sin: &Layout, dev: &CudaDevice, ) -> Result<CudaSlice<T>> { let src = match l_src.contiguous_offsets() { None => candle::bail!("src input has to be contiguous"), Some((o1, o2)) => src.slice(o1..o2), }; let cos = match l_cos.contiguous_offsets() { None => candle::bail!("cos input has to be contiguous"), Some((o1, o2)) => cos.slice(o1..o2), }; let sin = match l_sin.contiguous_offsets() { None => candle::bail!("sin input has to be contiguous"), Some((o1, o2)) => sin.slice(o1..o2), }; let (b, h, t, d) = l_src.shape().dims4()?; let el = b * h * t * d; let cfg = LaunchConfig::for_num_elems((el / 2) as u32); let func = dev.get_or_load_func(&kernel_name::<T>("rope_i"), kernels::REDUCE)?; // SAFETY: Set later by running the kernel. let dst = unsafe { dev.alloc::<T>(el) }.w()?; let params = (&src, &cos, &sin, &dst, (b * h) as u32, (t * d) as u32); // SAFETY: ffi. unsafe { func.launch(cfg, params) }.w()?; Ok(dst) } use candle::backend::BackendStorage; use candle::cuda_backend::CudaStorageSlice::{BF16, F16, F32, F64}; let dev = s1.device(); let slice = match (&s1.slice, &s2.slice, &s3.slice) { (BF16(s1), BF16(s2), BF16(s3)) => BF16(inner(s1, l1, s2, l2, s3, l3, dev)?), (F16(s1), F16(s2), F16(s3)) => F16(inner(s1, l1, s2, l2, s3, l3, dev)?), (F32(s1), F32(s2), F32(s3)) => F32(inner(s1, l1, s2, l2, s3, l3, dev)?), (F64(s1), F64(s2), F64(s3)) => F64(inner(s1, l1, s2, l2, s3, l3, dev)?), _ => candle::bail!( "unsupported dtype for rope {:?} {:?} {:?}", s1.dtype(), s2.dtype(), s3.dtype() ), }; let dst = candle::cuda_backend::CudaStorage { slice, device: dev.clone(), }; Ok((dst, l1.shape().clone())) } #[cfg(feature = "metal")] fn metal_fwd( &self, src: &candle::MetalStorage, l_src: &Layout, cos: &candle::MetalStorage, l_cos: &Layout, sin: &candle::MetalStorage, l_sin: &Layout, ) -> Result<(candle::MetalStorage, Shape)> { use candle::backend::BackendStorage; let device = src.device(); let command_buffer = device.command_buffer()?; let kernels = device.kernels(); if cos.dtype() != src.dtype() || sin.dtype() != src.dtype() { candle::bail!( "dtype mismatch in rope-i {:?} {:?} {:?}", src.dtype(), cos.dtype(), sin.dtype() ) } let name = match src.dtype() { candle::DType::F32 => "rope_i_f32", candle::DType::F16 => "rope_i_f16", candle::DType::BF16 => "rope_i_bf16", dtype => candle::bail!("rope-i is not implemented for {dtype:?}"), }; let (b, h, t, d) = l_src.shape().dims4()?; let el = b * h * t * d; let output = device.new_buffer(el, src.dtype(), "rope-i")?; candle_metal_kernels::call_rope_i( device.metal_device(), &command_buffer, kernels, name, b * h, t * d, src.buffer(), l_src.start_offset() * src.dtype().size_in_bytes(), cos.buffer(), l_cos.start_offset() * cos.dtype().size_in_bytes(), sin.buffer(), l_sin.start_offset() * sin.dtype().size_in_bytes(), &output, ) .map_err(candle::Error::wrap)?; let out = candle::MetalStorage::new(output, device.clone(), el, src.dtype()); Ok((out, l_src.shape().clone())) } } pub fn rope_i(xs: &Tensor, cos: &Tensor, sin: &Tensor) -> Result<Tensor> { let (_b_sz, _n_head, seq_len, n_embd) = xs.dims4()?; let (cos_seq_len, cos_n_embd) = cos.dims2()?; let (sin_seq_len, sin_n_embd) = cos.dims2()?; if cos_n_embd * 2 != n_embd || sin_n_embd * 2 != n_embd || seq_len > cos_seq_len || seq_len > sin_seq_len { candle::bail!( "inconsistent last dim size in rope {:?} {:?} {:?}", xs.shape(), cos.shape(), sin.shape() ) } if !xs.is_contiguous() { candle::bail!("xs has to be contiguous in rope") } if !cos.is_contiguous() { candle::bail!("cos has to be contiguous in rope") } if !sin.is_contiguous() { candle::bail!("sin has to be contiguous in rope") } xs.apply_op3_no_bwd(cos, sin, &RotaryEmbI) } pub fn rope_i_slow(x: &Tensor, cos: &Tensor, sin: &Tensor) -> Result<Tensor> { let (b_sz, n_head, seq_len, n_embd) = x.dims4()?; let cos = cos .narrow(0, 0, seq_len)? .reshape((seq_len, n_embd / 2, 1))?; let sin = sin .narrow(0, 0, seq_len)? .reshape((seq_len, n_embd / 2, 1))?; let cos = cos.broadcast_as((b_sz, 1, seq_len, n_embd / 2, 1))?; let sin = sin.broadcast_as((b_sz, 1, seq_len, n_embd / 2, 1))?; let x = x.reshape((b_sz, n_head, seq_len, n_embd / 2, 2))?; let x0 = x.narrow(D::Minus1, 0, 1)?; let x1 = x.narrow(D::Minus1, 1, 1)?; let y0 = (x0.broadcast_mul(&cos)? - x1.broadcast_mul(&sin)?)?; let y1 = (x0.broadcast_mul(&sin)? + x1.broadcast_mul(&cos)?)?; let rope = Tensor::cat(&[y0, y1], D::Minus1)?; let rope = rope.flatten_from(D::Minus2)?; Ok(rope) } /// Contiguous variant of rope embeddings. #[derive(Debug, Clone)] struct RotaryEmb; impl candle::CustomOp3 for RotaryEmb { fn name(&self) -> &'static str { "rotary-emb" } fn cpu_fwd( &self, s1: &CpuStorage, l1: &Layout, s2: &CpuStorage, l2: &Layout, s3: &CpuStorage, l3: &Layout, ) -> Result<(CpuStorage, Shape)> { fn inner<T: candle::WithDType + num_traits::Float>( src: &[T], l_src: &Layout, cos: &[T], l_cos: &Layout, sin: &[T], l_sin: &Layout, ) -> Result<(CpuStorage, Shape)> { let src = match l_src.contiguous_offsets() { None => candle::bail!("input src has to be contiguous"), Some((o1, o2)) => &src[o1..o2], }; let cos = match l_cos.contiguous_offsets() { None => candle::bail!("input cos has to be contiguous"), Some((o1, o2)) => &cos[o1..o2], }; let sin = match l_sin.contiguous_offsets() { None => candle::bail!("input sin has to be contiguous"), Some((o1, o2)) => &sin[o1..o2], }; let (b, h, t, d) = l_src.shape().dims4()?; let el_count = b * h * t * d; let mut dst = vec![T::zero(); el_count]; src.par_chunks(t * d) .zip(dst.par_chunks_mut(t * d)) .for_each(|(src, dst)| { for i_t in 0..t { for i_d in 0..d / 2 { let i1 = i_t * d + i_d; let i2 = i1 + d / 2; let i_cs = i_t * (d / 2) + i_d; dst[i1] = src[i1] * cos[i_cs] - src[i2] * sin[i_cs]; dst[i2] = src[i1] * sin[i_cs] + src[i2] * cos[i_cs]; } } }); let storage = candle::WithDType::to_cpu_storage_owned(dst); Ok((storage, (b, h, t, d).into())) } use candle::backend::BackendStorage; use CpuStorage::{BF16, F16, F32, F64}; match (s1, s2, s3) { (BF16(s1), BF16(s2), BF16(s3)) => inner(s1, l1, s2, l2, s3, l3), (F16(s1), F16(s2), F16(s3)) => inner(s1, l1, s2, l2, s3, l3), (F32(s1), F32(s2), F32(s3)) => inner(s1, l1, s2, l2, s3, l3), (F64(s1), F64(s2), F64(s3)) => inner(s1, l1, s2, l2, s3, l3), _ => candle::bail!( "unsupported dtype for rope {:?} {:?} {:?}", s1.dtype(), s2.dtype(), s3.dtype() ), } } #[cfg(feature = "cuda")] fn cuda_fwd( &self, s1: &candle::CudaStorage, l1: &Layout, s2: &candle::CudaStorage, l2: &Layout, s3: &candle::CudaStorage, l3: &Layout, ) -> Result<(candle::CudaStorage, Shape)> { use candle::cuda_backend::cudarc::driver::{ CudaSlice, DeviceRepr, LaunchAsync, LaunchConfig, }; use candle::cuda_backend::{kernel_name, kernels, WrapErr}; use candle::{CudaDevice, WithDType}; fn inner<T: DeviceRepr + WithDType>( src: &CudaSlice<T>, l_src: &Layout, cos: &CudaSlice<T>, l_cos: &Layout, sin: &CudaSlice<T>, l_sin: &Layout, dev: &CudaDevice, ) -> Result<CudaSlice<T>> { let src = match l_src.contiguous_offsets() { None => candle::bail!("src input has to be contiguous"), Some((o1, o2)) => src.slice(o1..o2), }; let cos = match l_cos.contiguous_offsets() { None => candle::bail!("cos input has to be contiguous"), Some((o1, o2)) => cos.slice(o1..o2), }; let sin = match l_sin.contiguous_offsets() { None => candle::bail!("sin input has to be contiguous"), Some((o1, o2)) => sin.slice(o1..o2), }; let (b, h, t, d) = l_src.shape().dims4()?; let el = b * h * t * d; let cfg = LaunchConfig::for_num_elems((el / 2) as u32); let func = dev.get_or_load_func(&kernel_name::<T>("rope"), kernels::REDUCE)?; // SAFETY: Set later by running the kernel. let dst = unsafe { dev.alloc::<T>(el) }.w()?; let params = ( &src, &cos, &sin, &dst, (b * h) as u32, (t * d) as u32, d as u32, ); // SAFETY: ffi. unsafe { func.launch(cfg, params) }.w()?; Ok(dst) } use candle::backend::BackendStorage; use candle::cuda_backend::CudaStorageSlice::{BF16, F16, F32, F64}; let dev = s1.device(); let slice = match (&s1.slice, &s2.slice, &s3.slice) { (BF16(s1), BF16(s2), BF16(s3)) => BF16(inner(s1, l1, s2, l2, s3, l3, dev)?), (F16(s1), F16(s2), F16(s3)) => F16(inner(s1, l1, s2, l2, s3, l3, dev)?), (F32(s1), F32(s2), F32(s3)) => F32(inner(s1, l1, s2, l2, s3, l3, dev)?), (F64(s1), F64(s2), F64(s3)) => F64(inner(s1, l1, s2, l2, s3, l3, dev)?), _ => candle::bail!( "unsupported dtype for rope {:?} {:?} {:?}", s1.dtype(), s2.dtype(), s3.dtype() ), }; let dst = candle::cuda_backend::CudaStorage { slice, device: dev.clone(), }; Ok((dst, l1.shape().clone())) } #[cfg(feature = "metal")] fn metal_fwd( &self, src: &candle::MetalStorage, l_src: &Layout, cos: &candle::MetalStorage, l_cos: &Layout, sin: &candle::MetalStorage, l_sin: &Layout, ) -> Result<(candle::MetalStorage, Shape)> { use candle::backend::BackendStorage; let device = src.device(); let command_buffer = device.command_buffer()?; let kernels = device.kernels(); if cos.dtype() != src.dtype() || sin.dtype() != src.dtype() { candle::bail!( "dtype mismatch in rope {:?} {:?} {:?}", src.dtype(), cos.dtype(), sin.dtype() ) } let name = match src.dtype() { candle::DType::F32 => "rope_f32", candle::DType::F16 => "rope_f16", candle::DType::BF16 => "rope_bf16", dtype => candle::bail!("rope is not implemented for {dtype:?}"), }; let (b, h, t, d) = l_src.shape().dims4()?; let el = b * h * t * d; let output = device.new_buffer(el, src.dtype(), "rope-i")?; candle_metal_kernels::call_rope( device.metal_device(), &command_buffer, kernels, name, b * h, t * d, d, src.buffer(), l_src.start_offset() * src.dtype().size_in_bytes(), cos.buffer(), l_cos.start_offset() * cos.dtype().size_in_bytes(), sin.buffer(), l_sin.start_offset() * sin.dtype().size_in_bytes(), &output, ) .map_err(candle::Error::wrap)?; let out = candle::MetalStorage::new(output, device.clone(), el, src.dtype()); Ok((out, l_src.shape().clone())) } } pub fn rope(xs: &Tensor, cos: &Tensor, sin: &Tensor) -> Result<Tensor> { let (_b_sz, _n_head, seq_len, n_embd) = xs.dims4()?; let (cos_seq_len, cos_n_embd) = cos.dims2()?; let (sin_seq_len, sin_n_embd) = sin.dims2()?; if cos_n_embd * 2 != n_embd || sin_n_embd * 2 != n_embd || seq_len > cos_seq_len || seq_len > sin_seq_len { candle::bail!( "inconsistent last dim size in rope {:?} {:?} {:?}", xs.shape(), cos.shape(), sin.shape() ) } if !xs.is_contiguous() { candle::bail!("xs has to be contiguous in rope") } if !cos.is_contiguous() { candle::bail!("cos has to be contiguous in rope") } if !sin.is_contiguous() { candle::bail!("sin has to be contiguous in rope") } xs.apply_op3_no_bwd(cos, sin, &RotaryEmb) } fn rotate_half(xs: &Tensor) -> Result<Tensor> { let last_dim = xs.dim(D::Minus1)?; let xs1 = xs.narrow(D::Minus1, 0, last_dim / 2)?; let xs2 = xs.narrow(D::Minus1, last_dim / 2, last_dim - last_dim / 2)?; Tensor::cat(&[&xs2.neg()?, &xs1], D::Minus1) } pub fn rope_slow(x: &Tensor, cos: &Tensor, sin: &Tensor) -> Result<Tensor> { let (_b_sz, _h, seq_len, _n_embd) = x.dims4()?; let cos = Tensor::cat(&[cos, cos], D::Minus1)?; let sin = Tensor::cat(&[sin, sin], D::Minus1)?; let cos = cos.narrow(0, 0, seq_len)?; let sin = sin.narrow(0, 0, seq_len)?; let cos = cos.unsqueeze(0)?.unsqueeze(0)?; let sin = sin.unsqueeze(0)?.unsqueeze(0)?; x.broadcast_mul(&cos)? + rotate_half(x)?.broadcast_mul(&sin)? } /// T (seqlen)/H (num-heads)/D (head-dim) contiguous variant of rope embeddings. #[derive(Debug, Clone)] struct RotaryEmbThd; impl candle::CustomOp3 for RotaryEmbThd { fn name(&self) -> &'static str { "rotary-emb" } fn cpu_fwd( &self, s1: &CpuStorage, l1: &Layout, s2: &CpuStorage, l2: &Layout, s3: &CpuStorage, l3: &Layout, ) -> Result<(CpuStorage, Shape)> { fn inner<T: candle::WithDType + num_traits::Float>( src: &[T], l_src: &Layout, cos: &[T], l_cos: &Layout, sin: &[T], l_sin: &Layout, ) -> Result<(CpuStorage, Shape)> { let src = match l_src.contiguous_offsets() { None => candle::bail!("input src has to be contiguous"), Some((o1, o2)) => &src[o1..o2], }; let cos = match l_cos.contiguous_offsets() { None => candle::bail!("input cos has to be contiguous"), Some((o1, o2)) => &cos[o1..o2], }; let sin = match l_sin.contiguous_offsets() { None => candle::bail!("input sin has to be contiguous"), Some((o1, o2)) => &sin[o1..o2], }; let (b, t, h, d) = l_src.shape().dims4()?; let el_count = b * h * t * d; let mut dst = vec![T::zero(); el_count]; src.par_chunks(t * h * d) .zip(dst.par_chunks_mut(t * h * d)) .for_each(|(src, dst)| { for i_t in 0..t { for i_d in 0..d / 2 { let i_cs = i_t * (d / 2) + i_d; for i_h in 0..h { let i1 = i_t * h * d + i_h * d + i_d; let i2 = i1 + d / 2; dst[i1] = src[i1] * cos[i_cs] - src[i2] * sin[i_cs]; dst[i2] = src[i1] * sin[i_cs] + src[i2] * cos[i_cs]; } } } }); let storage = candle::WithDType::to_cpu_storage_owned(dst); Ok((storage, (b, t, h, d).into())) } use candle::backend::BackendStorage; use CpuStorage::{BF16, F16, F32, F64}; match (s1, s2, s3) { (BF16(s1), BF16(s2), BF16(s3)) => inner(s1, l1, s2, l2, s3, l3), (F16(s1), F16(s2), F16(s3)) => inner(s1, l1, s2, l2, s3, l3), (F32(s1), F32(s2), F32(s3)) => inner(s1, l1, s2, l2, s3, l3), (F64(s1), F64(s2), F64(s3)) => inner(s1, l1, s2, l2, s3, l3), _ => candle::bail!( "unsupported dtype for rope {:?} {:?} {:?}", s1.dtype(), s2.dtype(), s3.dtype() ), } } #[cfg(feature = "cuda")] fn cuda_fwd( &self, s1: &candle::CudaStorage, l1: &Layout, s2: &candle::CudaStorage, l2: &Layout, s3: &candle::CudaStorage, l3: &Layout, ) -> Result<(candle::CudaStorage, Shape)> { use candle::cuda_backend::cudarc::driver::{ CudaSlice, DeviceRepr, LaunchAsync, LaunchConfig, }; use candle::cuda_backend::{kernel_name, kernels, WrapErr}; use candle::{CudaDevice, WithDType}; fn inner<T: DeviceRepr + WithDType>( src: &CudaSlice<T>, l_src: &Layout, cos: &CudaSlice<T>, l_cos: &Layout, sin: &CudaSlice<T>, l_sin: &Layout, dev: &CudaDevice, ) -> Result<CudaSlice<T>> { let src = match l_src.contiguous_offsets() { None => candle::bail!("src input has to be contiguous"), Some((o1, o2)) => src.slice(o1..o2), }; let cos = match l_cos.contiguous_offsets() { None => candle::bail!("cos input has to be contiguous"), Some((o1, o2)) => cos.slice(o1..o2), }; let sin = match l_sin.contiguous_offsets() { None => candle::bail!("sin input has to be contiguous"), Some((o1, o2)) => sin.slice(o1..o2), }; let (b, t, h, d) = l_src.shape().dims4()?; let el = b * h * t * d; let cfg = LaunchConfig::for_num_elems((el / 2) as u32); let func = dev.get_or_load_func(&kernel_name::<T>("rope_thd"), kernels::REDUCE)?; // SAFETY: Set later by running the kernel. let dst = unsafe { dev.alloc::<T>(el) }.w()?; let params = ( &src, &cos, &sin, &dst, b as u32, t as u32, h as u32, d as u32, ); // SAFETY: ffi. unsafe { func.launch(cfg, params) }.w()?; Ok(dst) } use candle::backend::BackendStorage; use candle::cuda_backend::CudaStorageSlice::{BF16, F16, F32, F64}; let dev = s1.device(); let slice = match (&s1.slice, &s2.slice, &s3.slice) { (BF16(s1), BF16(s2), BF16(s3)) => BF16(inner(s1, l1, s2, l2, s3, l3, dev)?), (F16(s1), F16(s2), F16(s3)) => F16(inner(s1, l1, s2, l2, s3, l3, dev)?), (F32(s1), F32(s2), F32(s3)) => F32(inner(s1, l1, s2, l2, s3, l3, dev)?), (F64(s1), F64(s2), F64(s3)) => F64(inner(s1, l1, s2, l2, s3, l3, dev)?), _ => candle::bail!( "unsupported dtype for rope {:?} {:?} {:?}", s1.dtype(), s2.dtype(), s3.dtype() ), }; let dst = candle::cuda_backend::CudaStorage { slice, device: dev.clone(), }; Ok((dst, l1.shape().clone())) } #[cfg(feature = "metal")] fn metal_fwd( &self, src: &candle::MetalStorage, l_src: &Layout, cos: &candle::MetalStorage, l_cos: &Layout, sin: &candle::MetalStorage, l_sin: &Layout, ) -> Result<(candle::MetalStorage, Shape)> { use candle::backend::BackendStorage; let device = src.device(); let command_buffer = device.command_buffer()?; let kernels = device.kernels(); if cos.dtype() != src.dtype() || sin.dtype() != src.dtype() { candle::bail!( "dtype mismatch in rope {:?} {:?} {:?}", src.dtype(), cos.dtype(), sin.dtype() ) } let name = match src.dtype() { candle::DType::F32 => "rope_thd_f32", candle::DType::F16 => "rope_thd_f16", candle::DType::BF16 => "rope_thd_bf16", dtype => candle::bail!("rope_thd is not implemented for {dtype:?}"), }; let (b, t, h, d) = l_src.shape().dims4()?; let el = b * h * t * d; let output = device.new_buffer(el, src.dtype(), "rope-thd")?; candle_metal_kernels::call_rope_thd( device.metal_device(), &command_buffer, kernels, name, b, t, h, d, src.buffer(), l_src.start_offset() * src.dtype().size_in_bytes(), cos.buffer(), l_cos.start_offset() * cos.dtype().size_in_bytes(), sin.buffer(), l_sin.start_offset() * sin.dtype().size_in_bytes(), &output, ) .map_err(candle::Error::wrap)?; let out = candle::MetalStorage::new(output, device.clone(), el, src.dtype()); Ok((out, l_src.shape().clone())) } } pub fn rope_thd(xs: &Tensor, cos: &Tensor, sin: &Tensor) -> Result<Tensor> { let (_b_sz, seq_len, _n_head, n_embd) = xs.dims4()?; let (cos_seq_len, cos_n_embd) = cos.dims2()?; let (sin_seq_len, sin_n_embd) = sin.dims2()?; if cos_n_embd * 2 != n_embd || sin_n_embd * 2 != n_embd || seq_len > cos_seq_len || seq_len > sin_seq_len { candle::bail!( "inconsistent last dim size in rope {:?} {:?} {:?}", xs.shape(), cos.shape(), sin.shape() ) } if !xs.is_contiguous() { candle::bail!("xs has to be contiguous in rope") } if !cos.is_contiguous() { candle::bail!("cos has to be contiguous in rope") } if !sin.is_contiguous() { candle::bail!("sin has to be contiguous in rope") } xs.apply_op3_no_bwd(cos, sin, &RotaryEmbThd) }
candle/candle-nn/src/rotary_emb.rs/0
{ "file_path": "candle/candle-nn/src/rotary_emb.rs", "repo_id": "candle", "token_count": 15510 }
35
use candle::Result; use prost::Message; pub mod onnx { include!(concat!(env!("OUT_DIR"), "/onnx.rs")); } pub mod eval; pub use eval::{dtype, simple_eval}; pub fn read_file<P: AsRef<std::path::Path>>(p: P) -> Result<onnx::ModelProto> { let buf = std::fs::read(p)?; onnx::ModelProto::decode(buf.as_slice()).map_err(candle::Error::wrap) }
candle/candle-onnx/src/lib.rs/0
{ "file_path": "candle/candle-onnx/src/lib.rs", "repo_id": "candle", "token_count": 154 }
36
from .module import Module from .container import Sequential, ModuleList, ModuleDict from .sparse import Embedding from .normalization import LayerNorm from .linear import Linear
candle/candle-pyo3/py_src/candle/nn/__init__.py/0
{ "file_path": "candle/candle-pyo3/py_src/candle/nn/__init__.py", "repo_id": "candle", "token_count": 43 }
37
use std::collections::HashMap; use crate::utils::wrap_err; use crate::{PyDType, PyTensor}; use candle_onnx::eval::{dtype, get_tensor, simple_eval}; use candle_onnx::onnx::tensor_proto::DataType; use candle_onnx::onnx::tensor_shape_proto::dimension::Value; use candle_onnx::onnx::type_proto::{Tensor as ONNXTensor, Value as ONNXValue}; use candle_onnx::onnx::{ModelProto, ValueInfoProto}; use pyo3::exceptions::PyValueError; use pyo3::prelude::*; use pyo3::types::{PyList, PyTuple}; #[derive(Clone, Debug)] #[pyclass(name = "ONNXTensorDescription")] /// A wrapper around an ONNX tensor description. pub struct PyONNXTensorDescriptor(ONNXTensor); #[pymethods] impl PyONNXTensorDescriptor { #[getter] /// The data type of the tensor. /// &RETURNS&: DType fn dtype(&self) -> PyResult<PyDType> { match DataType::try_from(self.0.elem_type) { Ok(dt) => match dtype(dt) { Some(dt) => Ok(PyDType(dt)), None => Err(PyValueError::new_err(format!( "unsupported 'value' data-type {dt:?}" ))), }, type_ => Err(PyValueError::new_err(format!( "unsupported input type {type_:?}" ))), } } #[getter] /// The shape of the tensor. /// &RETURNS&: Tuple[Union[int,str,Any]] fn shape(&self, py: Python) -> PyResult<Py<PyTuple>> { let shape = PyList::empty_bound(py); if let Some(d) = &self.0.shape { for dim in d.dim.iter() { if let Some(value) = &dim.value { match value { Value::DimValue(v) => shape.append(*v)?, Value::DimParam(s) => shape.append(s.clone())?, }; } else { return Err(PyValueError::new_err("None value in shape")); } } } Ok(shape.to_tuple().into()) } fn __repr__(&self, py: Python) -> String { match (self.shape(py), self.dtype()) { (Ok(shape), Ok(dtype)) => format!( "TensorDescriptor[shape: {:?}, dtype: {:?}]", shape.to_string(), dtype.__str__() ), (Err(_), Err(_)) => "TensorDescriptor[shape: unknown, dtype: unknown]".to_string(), (Err(_), Ok(dtype)) => format!( "TensorDescriptor[shape: unknown, dtype: {:?}]", dtype.__str__() ), (Ok(shape), Err(_)) => format!( "TensorDescriptor[shape: {:?}, dtype: unknown]", shape.to_string() ), } } fn __str__(&self, py: Python) -> String { self.__repr__(py) } } #[derive(Clone, Debug)] #[pyclass(name = "ONNXModel")] /// A wrapper around an ONNX model. pub struct PyONNXModel(ModelProto); fn extract_tensor_descriptions( value_infos: &[ValueInfoProto], ) -> HashMap<String, PyONNXTensorDescriptor> { let mut map = HashMap::new(); for value_info in value_infos.iter() { let input_type = match &value_info.r#type { Some(input_type) => input_type, None => continue, }; let input_type = match &input_type.value { Some(input_type) => input_type, None => continue, }; let tensor_type: &ONNXTensor = match input_type { ONNXValue::TensorType(tt) => tt, _ => continue, }; map.insert( value_info.name.to_string(), PyONNXTensorDescriptor(tensor_type.clone()), ); } map } #[pymethods] impl PyONNXModel { #[new] #[pyo3(text_signature = "(self, path:str)")] /// Load an ONNX model from the given path. fn new(path: String) -> PyResult<Self> { let model: ModelProto = candle_onnx::read_file(path).map_err(wrap_err)?; Ok(PyONNXModel(model)) } #[getter] /// The version of the IR this model targets. /// &RETURNS&: int fn ir_version(&self) -> i64 { self.0.ir_version } #[getter] /// The producer of the model. /// &RETURNS&: str fn producer_name(&self) -> String { self.0.producer_name.clone() } #[getter] /// The version of the producer of the model. /// &RETURNS&: str fn producer_version(&self) -> String { self.0.producer_version.clone() } #[getter] /// The domain of the operator set of the model. /// &RETURNS&: str fn domain(&self) -> String { self.0.domain.clone() } #[getter] /// The version of the model. /// &RETURNS&: int fn model_version(&self) -> i64 { self.0.model_version } #[getter] /// The doc string of the model. /// &RETURNS&: str fn doc_string(&self) -> String { self.0.doc_string.clone() } /// Get the weights of the model. /// &RETURNS&: Dict[str, Tensor] fn initializers(&self) -> PyResult<HashMap<String, PyTensor>> { let mut map = HashMap::new(); if let Some(graph) = self.0.graph.as_ref() { for tensor_description in graph.initializer.iter() { let tensor = get_tensor(tensor_description, tensor_description.name.as_str()) .map_err(wrap_err)?; map.insert(tensor_description.name.to_string(), PyTensor(tensor)); } } Ok(map) } #[getter] /// The inputs of the model. /// &RETURNS&: Optional[Dict[str, ONNXTensorDescription]] fn inputs(&self) -> Option<HashMap<String, PyONNXTensorDescriptor>> { if let Some(graph) = self.0.graph.as_ref() { return Some(extract_tensor_descriptions(&graph.input)); } None } #[getter] /// The outputs of the model. /// &RETURNS&: Optional[Dict[str, ONNXTensorDescription]] fn outputs(&self) -> Option<HashMap<String, PyONNXTensorDescriptor>> { if let Some(graph) = self.0.graph.as_ref() { return Some(extract_tensor_descriptions(&graph.output)); } None } #[pyo3(text_signature = "(self, inputs:Dict[str,Tensor])")] /// Run the model on the given inputs. /// &RETURNS&: Dict[str,Tensor] fn run(&self, inputs: HashMap<String, PyTensor>) -> PyResult<HashMap<String, PyTensor>> { let unwrapped_tensors = inputs.into_iter().map(|(k, v)| (k.clone(), v.0)).collect(); let result = simple_eval(&self.0, unwrapped_tensors).map_err(wrap_err)?; Ok(result .into_iter() .map(|(k, v)| (k.clone(), PyTensor(v))) .collect()) } }
candle/candle-pyo3/src/onnx.rs/0
{ "file_path": "candle/candle-pyo3/src/onnx.rs", "repo_id": "candle", "token_count": 3268 }
38
pub mod generation; pub mod models; pub mod object_detection; pub mod pipelines; pub mod quantized_nn; pub mod quantized_var_builder; pub mod utils;
candle/candle-transformers/src/lib.rs/0
{ "file_path": "candle/candle-transformers/src/lib.rs", "repo_id": "candle", "token_count": 47 }
39
use candle::{IndexOp, Result, Tensor, D}; use candle_nn::{layer_norm, LayerNorm, Linear, Module, VarBuilder}; const IMG_SIZE: usize = 518; const PATCH_SIZE: usize = 14; const NUM_CLASSES: usize = 1000; fn linear(vb: VarBuilder, in_dim: usize, out_dim: usize, bias: bool) -> Result<Linear> { if bias { candle_nn::linear(in_dim, out_dim, vb) } else { candle_nn::linear_no_bias(in_dim, out_dim, vb) } } #[derive(Debug)] struct Attention { qkv: Linear, proj: Linear, num_heads: usize, scale: f64, } impl Attention { fn new( vb: VarBuilder, dim: usize, num_heads: usize, qkv_bias: bool, proj_bias: bool, ) -> Result<Self> { let qkv = linear(vb.pp("qkv"), dim, dim * 3, qkv_bias)?; let proj = linear(vb.pp("proj"), dim, dim, proj_bias)?; let scale = 1. / ((dim / num_heads) as f64).sqrt(); Ok(Self { qkv, proj, num_heads, scale, }) } } impl Module for Attention { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let (b, n, c) = xs.dims3()?; let qkv = self .qkv .forward(xs)? .reshape((b, n, 3, self.num_heads, c / self.num_heads))? .transpose(1, 2)? // 02134 .transpose(0, 1)? // 20134 .transpose(2, 3)?; // 20314 let q = (qkv.i(0)? * self.scale)?; let k = qkv.i(1)?.contiguous()?; let v = qkv.i(2)?.contiguous()?; let attn = candle_nn::ops::softmax(&q.matmul(&k.t()?)?, D::Minus1)?; let attn = attn.matmul(&v)?.transpose(1, 2)?.reshape((b, n, c))?; self.proj.forward(&attn) } } #[derive(Debug)] struct LayerScale { gamma: Tensor, } impl LayerScale { fn new(vb: VarBuilder, dim: usize) -> Result<Self> { let gamma = vb.get(dim, "gamma")?; Ok(Self { gamma }) } } impl Module for LayerScale { fn forward(&self, xs: &Tensor) -> Result<Tensor> { xs.broadcast_mul(&self.gamma) } } #[derive(Debug)] struct Mlp { fc1: Linear, fc2: Linear, } impl Mlp { fn new(vb: VarBuilder, in_features: usize, hidden_features: usize, bias: bool) -> Result<Self> { let out_features = in_features; let fc1 = linear(vb.pp("fc1"), in_features, hidden_features, bias)?; let fc2 = linear(vb.pp("fc2"), hidden_features, out_features, bias)?; Ok(Self { fc1, fc2 }) } } impl Module for Mlp { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let xs = self.fc1.forward(xs)?.gelu()?; self.fc2.forward(&xs) } } #[derive(Debug)] struct Block { norm1: LayerNorm, attn: Attention, ls1: LayerScale, norm2: LayerNorm, mlp: Mlp, ls2: LayerScale, } impl Block { fn new(vb: VarBuilder, dim: usize, num_heads: usize) -> Result<Self> { let norm1 = layer_norm(dim, 1e-5, vb.pp("norm1"))?; let attn = Attention::new(vb.pp("attn"), dim, num_heads, true, true)?; let ls1 = LayerScale::new(vb.pp("ls1"), dim)?; let norm2 = layer_norm(dim, 1e-5, vb.pp("norm2"))?; let mlp = Mlp::new(vb.pp("mlp"), dim, dim * 4, true)?; let ls2 = LayerScale::new(vb.pp("ls2"), dim)?; Ok(Self { norm1, attn, ls1, norm2, mlp, ls2, }) } } impl Module for Block { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let residual = xs; let xs = self .ls1 .forward(&self.attn.forward(&self.norm1.forward(xs)?)?)?; let xs = (xs + residual)?; let residual = &xs; let xs = self .ls2 .forward(&self.mlp.forward(&self.norm2.forward(&xs)?)?)?; xs + residual } } #[derive(Debug)] struct PatchEmbed { proj: candle_nn::Conv2d, patch_size: (usize, usize), num_patches: usize, } impl PatchEmbed { fn new( vb: VarBuilder, img_size: usize, patch_size: usize, in_chans: usize, embed_dim: usize, ) -> Result<Self> { let config = candle_nn::Conv2dConfig { stride: patch_size, ..Default::default() }; let proj = candle_nn::conv2d(in_chans, embed_dim, patch_size, config, vb.pp("proj"))?; let num_patches = (img_size / patch_size) * (img_size / patch_size); Ok(Self { proj, patch_size: (patch_size, patch_size), num_patches, }) } } impl Module for PatchEmbed { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let (_b, _c, h, w) = xs.dims4()?; let (patch_h, patch_w) = self.patch_size; if (h % patch_h) != 0 { candle::bail!("image height {h} is not a multiple of patch height {patch_h}") } if (w % patch_w) != 0 { candle::bail!("image width {w} is not a multiple of patch width {patch_w}") } let xs = self.proj.forward(xs)?; let (b, c, h, w) = xs.dims4()?; // flatten embeddings. xs.reshape((b, c, h * w))?.transpose(1, 2) } } #[derive(Debug)] pub struct DinoVisionTransformer { patch_embed: PatchEmbed, cls_token: Tensor, pos_embed: Tensor, blocks: Vec<Block>, norm: LayerNorm, head: Linear, } impl DinoVisionTransformer { pub fn new(vb: VarBuilder, depth: usize, embed_dim: usize, num_heads: usize) -> Result<Self> { let patch_embed = PatchEmbed::new(vb.pp("patch_embed"), IMG_SIZE, PATCH_SIZE, 3, embed_dim)?; let cls_token = vb.get((1, 1, embed_dim), "cls_token")?; let num_tokens = 1; let pos_embed = vb.get( (1, patch_embed.num_patches + num_tokens, embed_dim), "pos_embed", )?; let head = linear(vb.pp("head"), 2 * embed_dim, NUM_CLASSES, true)?; let norm = layer_norm(embed_dim, 1e-5, vb.pp("norm"))?; let vb_b = vb.pp("blocks"); let blocks = (0..depth) .map(|i| Block::new(vb_b.pp(i.to_string()), embed_dim, num_heads)) .collect::<Result<Vec<_>>>()?; Ok(Self { patch_embed, cls_token, pos_embed, blocks, norm, head, }) } fn interpolate_pos_encoding(&self, xs: &Tensor, w: usize, h: usize) -> Result<Tensor> { let npatch = xs.dim(1)? - 1; let n = self.pos_embed.dim(1)? - 1; let sqrt_n = (n as f64).sqrt(); if npatch == n && w == h { return Ok(xs.clone()); } let class_pos_embed = self.pos_embed.i((.., ..1))?; let patch_pos_embed = self.pos_embed.i((.., 1..))?; let dim = xs.dim(D::Minus1)?; let (w0, h0) = ((w / PATCH_SIZE) as f64 + 0.1, (h / PATCH_SIZE) as f64 + 0.1); let patch_pos_embed = patch_pos_embed .reshape((1, sqrt_n as usize, sqrt_n as usize, dim))? .transpose(2, 3)? .transpose(1, 2)?; // This uses bicubic interpolation in the original implementation. let patch_pos_embed = patch_pos_embed.upsample_nearest2d(h0 as usize, w0 as usize)?; let el_count = patch_pos_embed.shape().elem_count(); let patch_pos_embed = patch_pos_embed .transpose(1, 2)? .transpose(2, 3)? .reshape((1, el_count / dim, dim))?; Tensor::cat(&[&class_pos_embed, &patch_pos_embed], 1) } fn prepare_tokens_with_mask(&self, xs: &Tensor) -> Result<Tensor> { let (_b, _nc, w, h) = xs.dims4()?; let xs = self.patch_embed.forward(xs)?; let xs = Tensor::cat(&[&self.cls_token, &xs], 1)?; &xs + &self.interpolate_pos_encoding(&xs, w, h)? } fn get_intermediate_layers_not_chunked( &self, xs: &Tensor, blocks_to_take: &[usize], ) -> Result<Vec<Tensor>> { let mut xs = self.prepare_tokens_with_mask(xs)?; let mut output = Vec::new(); for (i, blk) in self.blocks.iter().enumerate() { xs = blk.forward(&xs)?; if blocks_to_take.contains(&i) { output.push(xs.clone()); } } if output.len() != blocks_to_take.len() { candle::bail!( "only {} / {} blocks found", output.len(), blocks_to_take.len() ); } Ok(output) } pub fn get_intermediate_layers( &self, xs: &Tensor, blocks_to_take: &[usize], reshape: bool, return_class_token: bool, norm: bool, ) -> Result<Tensor> { let outputs = self.get_intermediate_layers_not_chunked(xs, blocks_to_take)?; let outputs = if norm { outputs .iter() .map(|out| self.norm.forward(out)) .collect::<Result<Vec<_>>>()? } else { outputs }; let class_tokens = outputs .iter() .map(|out| out.i((.., 0))) .collect::<Result<Vec<_>>>()?; let outputs = outputs .iter() .map(|out| out.i((.., 1..))) .collect::<Result<Vec<_>>>()?; let outputs = if reshape { let (b, _c, w, h) = xs.dims4()?; let patch_size = self.patch_embed.patch_size.0; let num_channels = outputs[0].elem_count() / (b * (w / patch_size) * (h / patch_size)); outputs .iter() .map(|out| { out.reshape((b, w / patch_size, h / patch_size, num_channels))? .transpose(2, 3)? .transpose(1, 2) }) .collect::<Result<Vec<_>>>()? } else { outputs }; let outputs = if return_class_token { outputs .iter() .zip(class_tokens.iter()) .map(|(out, class_token)| Tensor::cat(&[out, class_token], D::Minus1)) .collect::<Result<Vec<_>>>()? } else { outputs }; Tensor::stack(&outputs[..], 0) } } impl Module for DinoVisionTransformer { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let mut xs = self.prepare_tokens_with_mask(xs)?; for blk in self.blocks.iter() { xs = blk.forward(&xs)? } let xs = self.norm.forward(&xs)?; let xs_norm_clstoken = xs.i((.., 0))?; let xs_norm_patchtokens = xs.i((.., 1..))?.mean(1)?; let xs = Tensor::cat(&[xs_norm_clstoken, xs_norm_patchtokens], D::Minus1)?; self.head.forward(&xs) } } pub fn vit_small(vb: VarBuilder) -> Result<DinoVisionTransformer> { DinoVisionTransformer::new(vb, 12, 384, 6) }
candle/candle-transformers/src/models/dinov2.rs/0
{ "file_path": "candle/candle-transformers/src/models/dinov2.rs", "repo_id": "candle", "token_count": 5803 }
40
//! Hiera inference implementation based on timm. //! //! See "Hiera: A Hierarchical Vision Transformer without the Bells-and-Whistles" //! https://arxiv.org/abs/2306.00989 //! //! https://github.com/huggingface/pytorch-image-models/blob/main/timm/models/hiera.py use candle::{Result, D}; use candle_nn::{conv2d, layer_norm, linear, ops::softmax, Conv2dConfig, Func, VarBuilder}; #[derive(Debug, Clone, serde::Deserialize)] pub struct Config { channels: usize, heads: usize, stages: [usize; 4], } impl Config { pub fn tiny() -> Self { Self { channels: 96, heads: 1, stages: [1, 2, 7, 2], } } pub fn small() -> Self { Self { channels: 96, heads: 1, stages: [1, 2, 11, 2], } } pub fn base() -> Self { Self { channels: 96, heads: 1, stages: [2, 3, 16, 3], } } pub fn base_plus() -> Self { Self { channels: 112, heads: 2, stages: [2, 3, 16, 3], } } pub fn large() -> Self { Self { channels: 144, heads: 2, stages: [2, 6, 36, 4], } } pub fn huge() -> Self { Self { channels: 256, heads: 4, stages: [2, 6, 36, 4], } } } const NUM_TOKENS: usize = 56 * 56; fn hiera_embeddings(channels: usize, vb: VarBuilder) -> Result<Func<'static>> { let conv_cfg = Conv2dConfig { stride: 4, padding: 3, ..Default::default() }; let proj = conv2d(3, channels, 7, conv_cfg, vb.pp("patch_embed.proj"))?; let pos_embed = vb.get((1, NUM_TOKENS, channels), "pos_embed")?; Ok(Func::new(move |xs| { let xs = xs.apply(&proj)?; let (b, c, _, _) = xs.dims4()?; let xs = xs.reshape((b, c, ()))?.transpose(1, 2)?; let xs = xs.broadcast_add(&pos_embed)?; Ok(xs) })) } fn hiera_unroll() -> Result<Func<'static>> { Ok(Func::new(move |xs| { let mut xs = xs.clone(); let (mut b, _, c) = xs.dims3()?; let mut size = 56; xs = xs.reshape((b, size, size, c))?; for _ in 0..3 { size /= 2; let new_shape = &[b, size, 2, size, 2, c]; xs = xs.reshape(new_shape)?; xs = xs.permute((0, 2, 4, 1, 3, 5))?; xs = xs.flatten(0, 2)?; b *= 4; } xs = xs.reshape(((), NUM_TOKENS, c))?; Ok(xs) })) } fn hiera_mlp(in_channels: usize, out_channels: usize, vb: VarBuilder) -> Result<Func<'static>> { let fc1 = linear(in_channels, out_channels, vb.pp("fc1"))?; let fc2 = linear(out_channels, in_channels, vb.pp("fc2"))?; Ok(Func::new(move |xs| { let xs = xs.apply(&fc1)?.gelu()?.apply(&fc2)?; Ok(xs) })) } fn hiera_attention( in_channels: usize, out_channels: usize, heads: usize, q_stride: usize, window_size: usize, use_mask_attention: bool, vb: VarBuilder, ) -> Result<Func<'static>> { let head_dim = out_channels / heads; let scale = (head_dim as f64).powf(-0.5); let proj = linear(out_channels, out_channels, vb.pp("proj"))?; let qkv = linear(in_channels, out_channels * 3, vb.pp("qkv"))?; Ok(Func::new(move |xs| { let (b, n, _) = xs.dims3()?; let num_windows = if use_mask_attention { n / (q_stride * window_size) } else { 1 }; let qkv = xs.apply(&qkv)?; let ec = qkv.elem_count(); let s = ec / (b * num_windows * 3 * heads * head_dim); let qkv = qkv .reshape((b, s, num_windows, 3, heads, head_dim))? .permute((3, 0, 4, 2, 1, 5))?; let mut q = qkv.get(0)?; let k = qkv.get(1)?; let v = qkv.get(2)?; if q_stride > 1 { let ec = q.elem_count(); let s = ec / (b * num_windows * q_stride * heads * head_dim); q = q .reshape((b, heads, num_windows, q_stride, s, head_dim))? .max(3)?; } let q = (q * scale)?; // Q, K and V are 6 dimensional with the first dimension being 1. // Squeeze them for the attention calculation since 6 dimensional matmuls are not supported. let att = q .squeeze(0)? .matmul(&k.squeeze(0)?.transpose(D::Minus2, D::Minus1)?)?; let att = softmax(&att, D::Minus1)?; let xs = att.matmul(&v.squeeze(0)?)?.unsqueeze(0)?; let xs = xs.transpose(1, 3)?.reshape((b, (), out_channels))?; let xs = xs.apply(&proj)?; Ok(xs) })) } fn hiera_block( heads: usize, in_channels: usize, out_channels: usize, q_stride: usize, window_size: usize, use_mask_attention: bool, vb: VarBuilder, ) -> Result<Func<'static>> { let norm1 = layer_norm(in_channels, 1e-6, vb.pp("norm1"))?; let norm2 = layer_norm(out_channels, 1e-6, vb.pp("norm2"))?; let proj = linear(in_channels, out_channels, vb.pp("proj")); let stride = 4; let mlp = hiera_mlp(out_channels, out_channels * 4, vb.pp("mlp"))?; let attn = hiera_attention( in_channels, out_channels, heads, q_stride, window_size, use_mask_attention, vb.pp("attn"), )?; Ok(Func::new(move |xs| { let mut xs = xs.clone(); let xs_norm = xs.apply_t(&norm1, false)?; if let Ok(p) = &proj { xs = xs_norm.apply(p)?; let (a, _, d) = xs.dims3()?; xs = xs.reshape((a, stride, (), d))?.max(1)?; } let xs = (xs + &xs_norm.apply(&attn)?)?; let xs = (&xs + &xs.apply_t(&norm2, false)?.apply(&mlp)?)?; Ok(xs) })) } fn hiera_blocks(cfg: &Config, vb: VarBuilder) -> Result<Func<'static>> { let nblocks = cfg.stages.iter().sum(); let mut blocks = Vec::with_capacity(nblocks); let mut out_channels = cfg.channels; let mut in_channels = out_channels; let mut heads = cfg.heads; let mut b = 0; let mut q_stride = 1; let mut window_size = 64; for s in 0..4 { let use_mask_attention = s < 2; for _ in 0..cfg.stages[s] { blocks.push(hiera_block( heads, in_channels, out_channels, q_stride, window_size, use_mask_attention, vb.pp(b), )?); b += 1; in_channels = out_channels; q_stride = 1; } q_stride = 4; out_channels *= 2; heads *= 2; window_size /= 4; } Ok(Func::new(move |xs| { let mut xs = xs.clone(); for block in blocks.iter() { xs = xs.apply(block)? } Ok(xs) })) } fn hiera_head(outputs: usize, nclasses: usize, vb: VarBuilder) -> Result<Func<'static>> { let norm = layer_norm(outputs, 1e-6, vb.pp("norm"))?; let linear = linear(outputs, nclasses, vb.pp("fc"))?; Ok(Func::new(move |xs| { xs.apply_t(&norm, false)?.apply(&linear) })) } // Build a hiera model for a given configuration. fn hiera_model(cfg: &Config, nclasses: Option<usize>, vb: VarBuilder) -> Result<Func<'static>> { let cls = match nclasses { None => None, Some(nclasses) => { let outputs = cfg.channels * 8; let head = hiera_head(outputs, nclasses, vb.pp("head"))?; Some(head) } }; let embeddings = hiera_embeddings(cfg.channels, vb.clone())?; let unroll = hiera_unroll()?; let blocks = hiera_blocks(cfg, vb.pp("blocks"))?; Ok(Func::new(move |xs| { let xs = xs .apply(&embeddings)? .apply(&unroll)? .apply(&blocks)? .mean(1)?; match &cls { None => Ok(xs), Some(cls) => xs.apply(cls), } })) } pub fn hiera(cfg: &Config, nclasses: usize, vb: VarBuilder) -> Result<Func<'static>> { hiera_model(cfg, Some(nclasses), vb) } pub fn hiera_no_final_layer(cfg: &Config, vb: VarBuilder) -> Result<Func<'static>> { hiera_model(cfg, None, vb) }
candle/candle-transformers/src/models/hiera.rs/0
{ "file_path": "candle/candle-transformers/src/models/hiera.rs", "repo_id": "candle", "token_count": 4434 }
41
pub mod blocks; pub mod embedding; pub mod model; pub mod projections;
candle/candle-transformers/src/models/mmdit/mod.rs/0
{ "file_path": "candle/candle-transformers/src/models/mmdit/mod.rs", "repo_id": "candle", "token_count": 21 }
42
use super::quantized_blip_text as blip_text; use crate::quantized_nn::{layer_norm, linear, Linear}; pub use crate::quantized_var_builder::VarBuilder; use candle::{Module, Result, Tensor, D}; use candle_nn::{Conv2d, Conv2dConfig, LayerNorm}; pub type VisionConfig = super::blip::VisionConfig; pub type Config = super::blip::Config; #[derive(Debug, Clone)] struct VisionEmbeddings { class_embedding: Tensor, patch_embedding: Conv2d, position_embedding: Tensor, } impl VisionEmbeddings { fn new(cfg: &VisionConfig, vb: VarBuilder) -> Result<Self> { let class_embedding = vb .get((1, 1, cfg.hidden_size), "class_embedding")? .dequantize(vb.device())?; let conv_cfg = Conv2dConfig { stride: cfg.patch_size, ..Default::default() }; let pe_vb = vb.pp("patch_embedding"); let pe_weight = pe_vb .get( (cfg.hidden_size, 3, cfg.patch_size, cfg.patch_size), "weight", )? .dequantize(vb.device())?; let pe_bias = pe_vb .get(cfg.hidden_size, "bias")? .dequantize(vb.device())?; let patch_embedding = Conv2d::new(pe_weight, Some(pe_bias), conv_cfg); let num_patches1 = cfg.image_size / cfg.patch_size; let num_patches = num_patches1 * num_patches1; let num_positions = num_patches + 1; let position_embedding = vb .get((1, num_positions, cfg.hidden_size), "position_embedding")? .dequantize(vb.device())?; Ok(Self { class_embedding, patch_embedding, position_embedding, }) } } impl Module for VisionEmbeddings { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let target_dtype = xs.dtype(); let b_size = xs.dim(0)?; let patch_embeds = xs.apply(&self.patch_embedding)?.flatten_from(2)?.t()?; let d = self.class_embedding.dim(D::Minus1)?; let class_embeds = self .class_embedding .broadcast_as((b_size, 1, d))? .to_dtype(target_dtype)?; let embeddings = Tensor::cat(&[&class_embeds, &patch_embeds], 1)?; let position_embedding = self.position_embedding.narrow(1, 0, embeddings.dim(1)?)?; embeddings.broadcast_add(&position_embedding) } } #[derive(Debug, Clone)] struct Attention { qkv: Linear, projection: Linear, scale: f64, num_heads: usize, } impl Attention { fn new(cfg: &VisionConfig, vb: VarBuilder) -> Result<Self> { let embed_dim = cfg.hidden_size; let num_heads = cfg.num_attention_heads; let head_dim = embed_dim / num_heads; let scale = 1f64 / (head_dim as f64).sqrt(); let qkv = linear(embed_dim, 3 * embed_dim, vb.pp("qkv"))?; let projection = linear(embed_dim, embed_dim, vb.pp("projection"))?; Ok(Self { qkv, projection, scale, num_heads, }) } fn forward(&self, xs: &Tensor, attn_mask: Option<&Tensor>) -> Result<Tensor> { let (b_sz, tgt_len, embed_dim) = xs.dims3()?; let mixed_qkv = xs .apply(&self.qkv)? .reshape((b_sz, tgt_len, 3, self.num_heads, embed_dim / self.num_heads))? .permute((2, 0, 3, 1, 4))?; let query = mixed_qkv.get(0)?; let key = mixed_qkv.get(1)?; let value = mixed_qkv.get(2)?; let attention_scores = query.matmul(&key.t()?)?; let attention_scores = (attention_scores * self.scale)?; let attention_probs = candle_nn::ops::softmax_last_dim(&attention_scores)?; let attention_probs = match attn_mask { None => attention_probs, Some(attn_mask) => (attention_probs * attn_mask)?, }; attention_probs .matmul(&value)? .permute((0, 2, 1, 3))? .flatten_from(D::Minus2)? .apply(&self.projection) } } #[derive(Debug, Clone)] #[allow(clippy::upper_case_acronyms)] struct MLP { activation_fn: candle_nn::Activation, fc1: Linear, fc2: Linear, } impl MLP { fn new(cfg: &VisionConfig, vb: VarBuilder) -> Result<Self> { let fc1 = linear(cfg.hidden_size, cfg.intermediate_size, vb.pp("fc1"))?; let fc2 = linear(cfg.intermediate_size, cfg.hidden_size, vb.pp("fc2"))?; Ok(Self { activation_fn: cfg.hidden_act, fc1, fc2, }) } } impl Module for MLP { fn forward(&self, xs: &Tensor) -> Result<Tensor> { xs.apply(&self.fc1)? .apply(&self.activation_fn)? .apply(&self.fc2) } } #[derive(Debug, Clone)] struct EncoderLayer { self_attn: Attention, layer_norm1: LayerNorm, mlp: MLP, layer_norm2: LayerNorm, } impl EncoderLayer { fn new(cfg: &VisionConfig, vb: VarBuilder) -> Result<Self> { let embed_dim = cfg.hidden_size; let self_attn = Attention::new(cfg, vb.pp("self_attn"))?; let layer_norm1 = layer_norm(embed_dim, cfg.layer_norm_eps, vb.pp("layer_norm1"))?; let layer_norm2 = layer_norm(embed_dim, cfg.layer_norm_eps, vb.pp("layer_norm2"))?; let mlp = MLP::new(cfg, vb.pp("mlp"))?; Ok(Self { self_attn, layer_norm1, mlp, layer_norm2, }) } fn forward(&self, xs: &Tensor, attention_mask: Option<&Tensor>) -> Result<Tensor> { let residual = xs; let xs = xs.apply(&self.layer_norm1)?; let xs = self.self_attn.forward(&xs, attention_mask)?; let xs = (xs + residual)?; let residual = &xs; let xs = xs.apply(&self.layer_norm2)?.apply(&self.mlp)?; xs + residual } } #[derive(Debug, Clone)] struct Encoder { layers: Vec<EncoderLayer>, } impl Encoder { fn new(cfg: &VisionConfig, vb: VarBuilder) -> Result<Self> { let mut layers = Vec::with_capacity(cfg.num_hidden_layers); let vb = vb.pp("layers"); for i in 0..cfg.num_hidden_layers { let layer = EncoderLayer::new(cfg, vb.pp(i))?; layers.push(layer) } Ok(Self { layers }) } fn forward(&self, xs: &Tensor, attention_mask: Option<&Tensor>) -> Result<Tensor> { let mut xs = xs.clone(); for layer in self.layers.iter() { xs = layer.forward(&xs, attention_mask)? } Ok(xs) } } #[derive(Debug, Clone)] pub struct VisionModel { embeddings: VisionEmbeddings, encoder: Encoder, post_layernorm: LayerNorm, } impl VisionModel { fn new(cfg: &VisionConfig, vb: VarBuilder) -> Result<Self> { let embeddings = VisionEmbeddings::new(cfg, vb.pp("embeddings"))?; let encoder = Encoder::new(cfg, vb.pp("encoder"))?; let post_layernorm = layer_norm(cfg.hidden_size, cfg.layer_norm_eps, vb.pp("post_layernorm"))?; Ok(Self { embeddings, encoder, post_layernorm, }) } } impl Module for VisionModel { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let xs = xs.apply(&self.embeddings)?; let encoder_outputs = self.encoder.forward(&xs, None)?; // Return the last hidden state rather than pooled outputs. encoder_outputs.apply(&self.post_layernorm) } } #[derive(Debug, Clone)] pub struct BlipForConditionalGeneration { vision_model: VisionModel, text_decoder: blip_text::TextLMHeadModel, } impl BlipForConditionalGeneration { pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let vision_model = VisionModel::new(&cfg.vision_config, vb.pp("vision_model"))?; let text_decoder = blip_text::TextLMHeadModel::new(&cfg.text_config, vb.pp("text_decoder"))?; Ok(Self { vision_model, text_decoder, }) } pub fn vision_model(&self) -> &VisionModel { &self.vision_model } pub fn text_decoder(&mut self) -> &mut blip_text::TextLMHeadModel { &mut self.text_decoder } pub fn reset_kv_cache(&mut self) { self.text_decoder.reset_kv_cache(); } }
candle/candle-transformers/src/models/quantized_blip.rs/0
{ "file_path": "candle/candle-transformers/src/models/quantized_blip.rs", "repo_id": "candle", "token_count": 4013 }
43
//! Attention Based Building Blocks use candle::{DType, IndexOp, Result, Tensor, D}; use candle_nn as nn; use candle_nn::Module; #[derive(Debug)] struct GeGlu { proj: nn::Linear, span: tracing::Span, } impl GeGlu { fn new(vs: nn::VarBuilder, dim_in: usize, dim_out: usize) -> Result<Self> { let proj = nn::linear(dim_in, dim_out * 2, vs.pp("proj"))?; let span = tracing::span!(tracing::Level::TRACE, "geglu"); Ok(Self { proj, span }) } } impl Module for GeGlu { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); let hidden_states_and_gate = self.proj.forward(xs)?.chunk(2, D::Minus1)?; &hidden_states_and_gate[0] * hidden_states_and_gate[1].gelu()? } } /// A feed-forward layer. #[derive(Debug)] struct FeedForward { project_in: GeGlu, linear: nn::Linear, span: tracing::Span, } impl FeedForward { // The glu parameter in the python code is unused? // https://github.com/huggingface/diffusers/blob/d3d22ce5a894becb951eec03e663951b28d45135/src/diffusers/models/attention.py#L347 /// Creates a new feed-forward layer based on some given input dimension, some /// output dimension, and a multiplier to be used for the intermediary layer. fn new(vs: nn::VarBuilder, dim: usize, dim_out: Option<usize>, mult: usize) -> Result<Self> { let inner_dim = dim * mult; let dim_out = dim_out.unwrap_or(dim); let vs = vs.pp("net"); let project_in = GeGlu::new(vs.pp("0"), dim, inner_dim)?; let linear = nn::linear(inner_dim, dim_out, vs.pp("2"))?; let span = tracing::span!(tracing::Level::TRACE, "ff"); Ok(Self { project_in, linear, span, }) } } impl Module for FeedForward { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); let xs = self.project_in.forward(xs)?; self.linear.forward(&xs) } } #[cfg(feature = "flash-attn")] fn flash_attn( q: &Tensor, k: &Tensor, v: &Tensor, softmax_scale: f32, causal: bool, ) -> Result<Tensor> { candle_flash_attn::flash_attn(q, k, v, softmax_scale, causal) } #[cfg(not(feature = "flash-attn"))] fn flash_attn(_: &Tensor, _: &Tensor, _: &Tensor, _: f32, _: bool) -> Result<Tensor> { unimplemented!("compile with '--features flash-attn'") } #[derive(Debug)] pub struct CrossAttention { to_q: nn::Linear, to_k: nn::Linear, to_v: nn::Linear, to_out: nn::Linear, heads: usize, scale: f64, slice_size: Option<usize>, span: tracing::Span, span_attn: tracing::Span, span_softmax: tracing::Span, use_flash_attn: bool, } impl CrossAttention { // Defaults should be heads = 8, dim_head = 64, context_dim = None pub fn new( vs: nn::VarBuilder, query_dim: usize, context_dim: Option<usize>, heads: usize, dim_head: usize, slice_size: Option<usize>, use_flash_attn: bool, ) -> Result<Self> { let inner_dim = dim_head * heads; let context_dim = context_dim.unwrap_or(query_dim); let scale = 1.0 / f64::sqrt(dim_head as f64); let to_q = nn::linear_no_bias(query_dim, inner_dim, vs.pp("to_q"))?; let to_k = nn::linear_no_bias(context_dim, inner_dim, vs.pp("to_k"))?; let to_v = nn::linear_no_bias(context_dim, inner_dim, vs.pp("to_v"))?; let to_out = nn::linear(inner_dim, query_dim, vs.pp("to_out.0"))?; let span = tracing::span!(tracing::Level::TRACE, "xa"); let span_attn = tracing::span!(tracing::Level::TRACE, "xa-attn"); let span_softmax = tracing::span!(tracing::Level::TRACE, "xa-softmax"); Ok(Self { to_q, to_k, to_v, to_out, heads, scale, slice_size, span, span_attn, span_softmax, use_flash_attn, }) } fn reshape_heads_to_batch_dim(&self, xs: &Tensor) -> Result<Tensor> { let (batch_size, seq_len, dim) = xs.dims3()?; xs.reshape((batch_size, seq_len, self.heads, dim / self.heads))? .transpose(1, 2)? .reshape((batch_size * self.heads, seq_len, dim / self.heads)) } fn reshape_batch_dim_to_heads(&self, xs: &Tensor) -> Result<Tensor> { let (batch_size, seq_len, dim) = xs.dims3()?; xs.reshape((batch_size / self.heads, self.heads, seq_len, dim))? .transpose(1, 2)? .reshape((batch_size / self.heads, seq_len, dim * self.heads)) } fn sliced_attention( &self, query: &Tensor, key: &Tensor, value: &Tensor, slice_size: usize, ) -> Result<Tensor> { let batch_size_attention = query.dim(0)?; let mut hidden_states = Vec::with_capacity(batch_size_attention / slice_size); let in_dtype = query.dtype(); let query = query.to_dtype(DType::F32)?; let key = key.to_dtype(DType::F32)?; let value = value.to_dtype(DType::F32)?; for i in 0..batch_size_attention / slice_size { let start_idx = i * slice_size; let end_idx = (i + 1) * slice_size; let xs = query .i(start_idx..end_idx)? .matmul(&(key.i(start_idx..end_idx)?.t()? * self.scale)?)?; let xs = nn::ops::softmax(&xs, D::Minus1)?.matmul(&value.i(start_idx..end_idx)?)?; hidden_states.push(xs) } let hidden_states = Tensor::stack(&hidden_states, 0)?.to_dtype(in_dtype)?; self.reshape_batch_dim_to_heads(&hidden_states) } fn attention(&self, query: &Tensor, key: &Tensor, value: &Tensor) -> Result<Tensor> { let _enter = self.span_attn.enter(); let xs = if self.use_flash_attn { let init_dtype = query.dtype(); let q = query .to_dtype(candle::DType::F16)? .unsqueeze(0)? .transpose(1, 2)?; let k = key .to_dtype(candle::DType::F16)? .unsqueeze(0)? .transpose(1, 2)?; let v = value .to_dtype(candle::DType::F16)? .unsqueeze(0)? .transpose(1, 2)?; flash_attn(&q, &k, &v, self.scale as f32, false)? .transpose(1, 2)? .squeeze(0)? .to_dtype(init_dtype)? } else { let in_dtype = query.dtype(); let query = query.to_dtype(DType::F32)?; let key = key.to_dtype(DType::F32)?; let value = value.to_dtype(DType::F32)?; let xs = query.matmul(&(key.t()? * self.scale)?)?; let xs = { let _enter = self.span_softmax.enter(); nn::ops::softmax_last_dim(&xs)? }; xs.matmul(&value)?.to_dtype(in_dtype)? }; self.reshape_batch_dim_to_heads(&xs) } pub fn forward(&self, xs: &Tensor, context: Option<&Tensor>) -> Result<Tensor> { let _enter = self.span.enter(); let query = self.to_q.forward(xs)?; let context = context.unwrap_or(xs).contiguous()?; let key = self.to_k.forward(&context)?; let value = self.to_v.forward(&context)?; let query = self.reshape_heads_to_batch_dim(&query)?; let key = self.reshape_heads_to_batch_dim(&key)?; let value = self.reshape_heads_to_batch_dim(&value)?; let dim0 = query.dim(0)?; let slice_size = self.slice_size.and_then(|slice_size| { if dim0 < slice_size { None } else { Some(slice_size) } }); let xs = match slice_size { None => self.attention(&query, &key, &value)?, Some(slice_size) => self.sliced_attention(&query, &key, &value, slice_size)?, }; self.to_out.forward(&xs) } } /// A basic Transformer block. #[derive(Debug)] struct BasicTransformerBlock { attn1: CrossAttention, ff: FeedForward, attn2: CrossAttention, norm1: nn::LayerNorm, norm2: nn::LayerNorm, norm3: nn::LayerNorm, span: tracing::Span, } impl BasicTransformerBlock { fn new( vs: nn::VarBuilder, dim: usize, n_heads: usize, d_head: usize, context_dim: Option<usize>, sliced_attention_size: Option<usize>, use_flash_attn: bool, ) -> Result<Self> { let attn1 = CrossAttention::new( vs.pp("attn1"), dim, None, n_heads, d_head, sliced_attention_size, use_flash_attn, )?; let ff = FeedForward::new(vs.pp("ff"), dim, None, 4)?; let attn2 = CrossAttention::new( vs.pp("attn2"), dim, context_dim, n_heads, d_head, sliced_attention_size, use_flash_attn, )?; let norm1 = nn::layer_norm(dim, 1e-5, vs.pp("norm1"))?; let norm2 = nn::layer_norm(dim, 1e-5, vs.pp("norm2"))?; let norm3 = nn::layer_norm(dim, 1e-5, vs.pp("norm3"))?; let span = tracing::span!(tracing::Level::TRACE, "basic-transformer"); Ok(Self { attn1, ff, attn2, norm1, norm2, norm3, span, }) } fn forward(&self, xs: &Tensor, context: Option<&Tensor>) -> Result<Tensor> { let _enter = self.span.enter(); let xs = (self.attn1.forward(&self.norm1.forward(xs)?, None)? + xs)?; let xs = (self.attn2.forward(&self.norm2.forward(&xs)?, context)? + xs)?; self.ff.forward(&self.norm3.forward(&xs)?)? + xs } } #[derive(Debug, Clone, Copy)] pub struct SpatialTransformerConfig { pub depth: usize, pub num_groups: usize, pub context_dim: Option<usize>, pub sliced_attention_size: Option<usize>, pub use_linear_projection: bool, } impl Default for SpatialTransformerConfig { fn default() -> Self { Self { depth: 1, num_groups: 32, context_dim: None, sliced_attention_size: None, use_linear_projection: false, } } } #[derive(Debug)] enum Proj { Conv2d(nn::Conv2d), Linear(nn::Linear), } // Aka Transformer2DModel #[derive(Debug)] pub struct SpatialTransformer { norm: nn::GroupNorm, proj_in: Proj, transformer_blocks: Vec<BasicTransformerBlock>, proj_out: Proj, span: tracing::Span, pub config: SpatialTransformerConfig, } impl SpatialTransformer { pub fn new( vs: nn::VarBuilder, in_channels: usize, n_heads: usize, d_head: usize, use_flash_attn: bool, config: SpatialTransformerConfig, ) -> Result<Self> { let inner_dim = n_heads * d_head; let norm = nn::group_norm(config.num_groups, in_channels, 1e-6, vs.pp("norm"))?; let proj_in = if config.use_linear_projection { Proj::Linear(nn::linear(in_channels, inner_dim, vs.pp("proj_in"))?) } else { Proj::Conv2d(nn::conv2d( in_channels, inner_dim, 1, Default::default(), vs.pp("proj_in"), )?) }; let mut transformer_blocks = vec![]; let vs_tb = vs.pp("transformer_blocks"); for index in 0..config.depth { let tb = BasicTransformerBlock::new( vs_tb.pp(index.to_string()), inner_dim, n_heads, d_head, config.context_dim, config.sliced_attention_size, use_flash_attn, )?; transformer_blocks.push(tb) } let proj_out = if config.use_linear_projection { Proj::Linear(nn::linear(in_channels, inner_dim, vs.pp("proj_out"))?) } else { Proj::Conv2d(nn::conv2d( inner_dim, in_channels, 1, Default::default(), vs.pp("proj_out"), )?) }; let span = tracing::span!(tracing::Level::TRACE, "spatial-transformer"); Ok(Self { norm, proj_in, transformer_blocks, proj_out, span, config, }) } pub fn forward(&self, xs: &Tensor, context: Option<&Tensor>) -> Result<Tensor> { let _enter = self.span.enter(); let (batch, _channel, height, weight) = xs.dims4()?; let residual = xs; let xs = self.norm.forward(xs)?; let (inner_dim, xs) = match &self.proj_in { Proj::Conv2d(p) => { let xs = p.forward(&xs)?; let inner_dim = xs.dim(1)?; let xs = xs .transpose(1, 2)? .t()? .reshape((batch, height * weight, inner_dim))?; (inner_dim, xs) } Proj::Linear(p) => { let inner_dim = xs.dim(1)?; let xs = xs .transpose(1, 2)? .t()? .reshape((batch, height * weight, inner_dim))?; (inner_dim, p.forward(&xs)?) } }; let mut xs = xs; for block in self.transformer_blocks.iter() { xs = block.forward(&xs, context)? } let xs = match &self.proj_out { Proj::Conv2d(p) => p.forward( &xs.reshape((batch, height, weight, inner_dim))? .t()? .transpose(1, 2)?, )?, Proj::Linear(p) => p .forward(&xs)? .reshape((batch, height, weight, inner_dim))? .t()? .transpose(1, 2)?, }; xs + residual } } /// Configuration for an attention block. #[derive(Debug, Clone, Copy)] pub struct AttentionBlockConfig { pub num_head_channels: Option<usize>, pub num_groups: usize, pub rescale_output_factor: f64, pub eps: f64, } impl Default for AttentionBlockConfig { fn default() -> Self { Self { num_head_channels: None, num_groups: 32, rescale_output_factor: 1., eps: 1e-5, } } } #[derive(Debug)] pub struct AttentionBlock { group_norm: nn::GroupNorm, query: nn::Linear, key: nn::Linear, value: nn::Linear, proj_attn: nn::Linear, channels: usize, num_heads: usize, span: tracing::Span, config: AttentionBlockConfig, } impl AttentionBlock { pub fn new(vs: nn::VarBuilder, channels: usize, config: AttentionBlockConfig) -> Result<Self> { let num_head_channels = config.num_head_channels.unwrap_or(channels); let num_heads = channels / num_head_channels; let group_norm = nn::group_norm(config.num_groups, channels, config.eps, vs.pp("group_norm"))?; let (q_path, k_path, v_path, out_path) = if vs.contains_tensor("to_q.weight") { ("to_q", "to_k", "to_v", "to_out.0") } else { ("query", "key", "value", "proj_attn") }; let query = nn::linear(channels, channels, vs.pp(q_path))?; let key = nn::linear(channels, channels, vs.pp(k_path))?; let value = nn::linear(channels, channels, vs.pp(v_path))?; let proj_attn = nn::linear(channels, channels, vs.pp(out_path))?; let span = tracing::span!(tracing::Level::TRACE, "attn-block"); Ok(Self { group_norm, query, key, value, proj_attn, channels, num_heads, span, config, }) } fn transpose_for_scores(&self, xs: Tensor) -> Result<Tensor> { let (batch, t, h_times_d) = xs.dims3()?; xs.reshape((batch, t, self.num_heads, h_times_d / self.num_heads))? .transpose(1, 2) } } impl Module for AttentionBlock { fn forward(&self, xs: &Tensor) -> Result<Tensor> { let _enter = self.span.enter(); let in_dtype = xs.dtype(); let residual = xs; let (batch, channel, height, width) = xs.dims4()?; let xs = self .group_norm .forward(xs)? .reshape((batch, channel, height * width))? .transpose(1, 2)?; let query_proj = self.query.forward(&xs)?; let key_proj = self.key.forward(&xs)?; let value_proj = self.value.forward(&xs)?; let query_states = self .transpose_for_scores(query_proj)? .to_dtype(DType::F32)?; let key_states = self.transpose_for_scores(key_proj)?.to_dtype(DType::F32)?; let value_states = self .transpose_for_scores(value_proj)? .to_dtype(DType::F32)?; // scale is applied twice, hence the -0.25 here rather than -0.5. // https://github.com/huggingface/diffusers/blob/d3d22ce5a894becb951eec03e663951b28d45135/src/diffusers/models/attention.py#L87 let scale = f64::powf(self.channels as f64 / self.num_heads as f64, -0.25); let attention_scores = (query_states * scale)?.matmul(&(key_states.t()? * scale)?)?; let attention_probs = nn::ops::softmax(&attention_scores, D::Minus1)?; // TODO: revert the call to force_contiguous once the three matmul kernels have been // adapted to handle layout with some dims set to 1. let xs = attention_probs.matmul(&value_states)?; let xs = xs.to_dtype(in_dtype)?; let xs = xs.transpose(1, 2)?.contiguous()?; let xs = xs.flatten_from(D::Minus2)?; let xs = self .proj_attn .forward(&xs)? .t()? .reshape((batch, channel, height, width))?; (xs + residual)? / self.config.rescale_output_factor } }
candle/candle-transformers/src/models/stable_diffusion/attention.rs/0
{ "file_path": "candle/candle-transformers/src/models/stable_diffusion/attention.rs", "repo_id": "candle", "token_count": 9458 }
44
use crate::models::vit::{Config, Embeddings, Encoder}; use candle::{DType, Result, Tensor}; use candle_nn::{ embedding, layer_norm, linear_no_bias, Embedding, LayerNorm, Linear, Module, VarBuilder, }; fn default_tie_word_embeddings() -> bool { true } fn default_use_learned_position_embeddings() -> bool { true } #[derive(Debug, Clone, PartialEq, serde::Deserialize)] pub struct TrOCRConfig { pub vocab_size: usize, pub d_model: usize, pub cross_attention_hidden_size: usize, pub decoder_layers: usize, pub decoder_attention_heads: usize, pub decoder_ffn_dim: usize, pub activation_function: candle_nn::Activation, pub max_position_embeddings: usize, pub dropout: f64, pub attention_dropout: f64, pub activation_dropout: f64, pub decoder_start_token_id: u32, pub init_std: f64, pub decoder_layerdrop: f64, pub use_cache: bool, pub scale_embedding: bool, pub pad_token_id: usize, pub bos_token_id: usize, pub eos_token_id: u32, pub decoder_vocab_size: Option<usize>, #[serde(default = "default_use_learned_position_embeddings")] pub use_learned_position_embeddings: bool, #[serde(default = "default_tie_word_embeddings")] pub tie_word_embeddings: bool, } impl Default for TrOCRConfig { fn default() -> Self { Self { vocab_size: 50265, d_model: 1024, cross_attention_hidden_size: 768, decoder_layers: 12, decoder_attention_heads: 16, decoder_ffn_dim: 4096, activation_function: candle_nn::Activation::Gelu, max_position_embeddings: 512, dropout: 0.1, attention_dropout: 0.0, activation_dropout: 0.0, decoder_start_token_id: 2, init_std: 0.02, decoder_layerdrop: 0.0, use_cache: true, scale_embedding: false, pad_token_id: 1, bos_token_id: 0, eos_token_id: 2, decoder_vocab_size: Some(50265), use_learned_position_embeddings: true, tie_word_embeddings: true, } } } #[derive(Debug, Clone)] struct TrOCRLearnedPositionalEmbedding { offset: usize, weights: Embedding, } impl TrOCRLearnedPositionalEmbedding { fn load(vb: VarBuilder, cfg: &TrOCRConfig) -> Result<Self> { let offset: usize = 2; let num_embeddings = cfg.max_position_embeddings; let embedding_dim = cfg.d_model; let weights = embedding(num_embeddings + offset, embedding_dim, vb)?; Ok(Self { offset, weights }) } fn new_sinusoidal(vb: VarBuilder, cfg: &TrOCRConfig) -> Result<Self> { // https://github.com/huggingface/transformers/blob/58e3d23e97078f361a533b9ec4a6a2de674ea52a/src/transformers/models/trocr/modeling_trocr.py#L81 let embedding_dim = cfg.d_model; let half_dim = embedding_dim / 2; let num_positions = cfg.max_position_embeddings + cfg.pad_token_id + 1; let dev = vb.device(); let inv_freq: Vec<_> = (0..half_dim) .map(|i| 1f32 / 10000f32.powf(i as f32 / (half_dim - 1) as f32)) .collect(); let inv_freq_len = inv_freq.len(); let inv_freq = Tensor::from_vec(inv_freq, (1, inv_freq_len), dev)?; let t = Tensor::arange(0u32, num_positions as u32, dev)? .to_dtype(DType::F32)? .reshape((num_positions, 1))?; let freqs = t.matmul(&inv_freq)?; let emb = Tensor::cat(&[freqs.sin()?, freqs.cos()?], 1)?; let emb = Tensor::cat( &[ emb.narrow(0, 0, cfg.pad_token_id)?, Tensor::zeros((1, embedding_dim), DType::F32, dev)?, emb.narrow(0, cfg.pad_token_id + 1, cfg.max_position_embeddings)?, ], 0, )? .contiguous()?; let emb = Embedding::new(emb, embedding_dim); Ok(Self { offset: cfg.pad_token_id + 1, weights: emb, }) } fn forward(&mut self, input_ids: &Tensor, past_key_values_length: u32) -> Result<Tensor> { let (b_sz, seq_len) = input_ids.dims2()?; let positions = Tensor::arange( past_key_values_length, seq_len as u32 + past_key_values_length, input_ids.device(), )? .expand((b_sz, seq_len))?; let positions = positions.broadcast_add(&Tensor::new(self.offset as u32, input_ids.device())?)?; self.weights.forward(&positions) } } #[derive(Debug, Clone)] struct TrOCRAttention { head_dim: usize, num_heads: usize, is_decoder: bool, scaling: f64, k_proj: Linear, v_proj: Linear, q_proj: Linear, out_proj: Linear, kv_cache: Option<(Tensor, Tensor)>, } impl TrOCRAttention { fn load( vb: VarBuilder, cfg: &TrOCRConfig, kdim: Option<usize>, vdim: Option<usize>, ) -> Result<Self> { let embed_dim = cfg.d_model; let num_heads = cfg.decoder_attention_heads; let head_dim = embed_dim / num_heads; let kdim = kdim.unwrap_or(embed_dim); let vdim = vdim.unwrap_or(embed_dim); let k_proj = linear_no_bias(kdim, embed_dim, vb.pp("k_proj"))?; let v_proj = linear_no_bias(vdim, embed_dim, vb.pp("v_proj"))?; let q_proj = linear_no_bias(embed_dim, embed_dim, vb.pp("q_proj"))?; let out_proj = linear_no_bias(embed_dim, embed_dim, vb.pp("out_proj"))?; Ok(Self { head_dim, num_heads, is_decoder: true, scaling: 1. / (head_dim as f64).sqrt(), k_proj, v_proj, q_proj, out_proj, kv_cache: None, }) } fn reset_kv_cache(&mut self) { self.kv_cache = None } fn _shape(&self, tensor: &Tensor, bsz: usize) -> Result<Tensor> { tensor .reshape((bsz, (), self.num_heads, self.head_dim))? .transpose(1, 2)? .contiguous() } fn forward( &mut self, xs: &Tensor, kv_states: Option<&Tensor>, attn_mask: Option<&Tensor>, ) -> Result<Tensor> { let (b_sz, tgt_len, _) = xs.dims3()?; let query_states = (xs.apply(&self.q_proj)? * self.scaling)?; let (key_states, value_states) = match kv_states { None => { let key_states = self._shape(&xs.apply(&self.k_proj)?, b_sz)?; let value_states = self._shape(&xs.apply(&self.v_proj)?, b_sz)?; if self.is_decoder { let kv_states = match &self.kv_cache { None => (key_states, value_states), Some((p_key_states, p_value_states)) => { let key_states = Tensor::cat(&[p_key_states, &key_states], 2)?; let value_states = Tensor::cat(&[p_value_states, &value_states], 2)?; (key_states, value_states) } }; self.kv_cache = Some(kv_states.clone()); kv_states } else { (key_states, value_states) } } Some(kv_states) => { let key_states = self._shape(&kv_states.apply(&self.k_proj)?, b_sz)?; let value_states = self._shape(&kv_states.apply(&self.v_proj)?, b_sz)?; (key_states, value_states) } }; let proj_shape = (b_sz * self.num_heads, (), self.head_dim); let query_states = self._shape(&query_states, b_sz)?.reshape(proj_shape)?; let key_states = key_states.reshape(proj_shape)?; let value_states = value_states.reshape(proj_shape)?; let attn_weights = query_states.matmul(&key_states.transpose(1, 2)?)?; let attn_weights = match attn_mask { None => attn_weights, Some(attn_mask) => attn_weights.broadcast_add(attn_mask)?, }; let attn_probs = candle_nn::ops::softmax_last_dim(&attn_weights)?; let attn_output = attn_probs.matmul(&value_states)?; attn_output .reshape((b_sz, self.num_heads, tgt_len, self.head_dim))? .transpose(1, 2)? .reshape((b_sz, tgt_len, self.head_dim * self.num_heads))? .apply(&self.out_proj) } } #[derive(Debug, Clone)] struct TrOCRDecoderLayer { self_attn: TrOCRAttention, activation_fn: candle_nn::Activation, self_attn_layer_norm: LayerNorm, encoder_attn: TrOCRAttention, encoder_attn_layer_norm: LayerNorm, fc1: Linear, fc2: Linear, final_layer_norm: LayerNorm, } impl TrOCRDecoderLayer { fn load(vb: VarBuilder, cfg: &TrOCRConfig) -> Result<Self> { let embed_dim = cfg.d_model; let self_attn = TrOCRAttention::load(vb.pp("self_attn"), cfg, None, None)?; let self_attn_layer_norm = layer_norm(embed_dim, 1e-5, vb.pp("self_attn_layer_norm"))?; let encoder_attn = TrOCRAttention::load( vb.pp("encoder_attn"), cfg, Some(cfg.cross_attention_hidden_size), Some(cfg.cross_attention_hidden_size), )?; let encoder_attn_layer_norm = layer_norm(embed_dim, 1e-5, vb.pp("encoder_attn_layer_norm"))?; let fc1 = linear_no_bias(embed_dim, cfg.decoder_ffn_dim, vb.pp("fc1"))?; let fc2 = linear_no_bias(cfg.decoder_ffn_dim, embed_dim, vb.pp("fc2"))?; let final_layer_norm = layer_norm(embed_dim, 1e-5, vb.pp("final_layer_norm"))?; Ok(Self { self_attn, activation_fn: cfg.activation_function, self_attn_layer_norm, encoder_attn, encoder_attn_layer_norm, fc1, fc2, final_layer_norm, }) } fn reset_kv_cache(&mut self) { self.self_attn.reset_kv_cache(); } fn forward( &mut self, xs: &Tensor, attention_mask: &Tensor, encoder_hidden_states: Option<&Tensor>, ) -> Result<Tensor> { let residual = xs.clone(); let xs = self.self_attn.forward(xs, None, Some(attention_mask))?; let xs = (xs + residual)?; let mut xs = self.self_attn_layer_norm.forward(&xs)?; if let Some(encoder_hidden_states) = &encoder_hidden_states { let residual = xs.clone(); let encoder_attention_mask = attention_mask.clone(); // TODO xs = self.encoder_attn.forward( &xs, Some(encoder_hidden_states), Some(&encoder_attention_mask), )?; xs = (xs + residual)?; xs = self.encoder_attn_layer_norm.forward(&xs)? } let residual = xs.clone(); let xs = self.fc1.forward(&xs)?; let xs = self.activation_fn.forward(&xs)?; let xs = self.fc2.forward(&xs)?; let xs = (xs + residual)?; let xs = self.final_layer_norm.forward(&xs)?; Ok(xs) } } #[derive(Debug, Clone)] pub struct TrOCRDecoder { layers: Vec<TrOCRDecoderLayer>, embed_scale: Option<f64>, embed_tokens: Embedding, embed_positions: TrOCRLearnedPositionalEmbedding, } impl TrOCRDecoder { fn new(cfg: &TrOCRConfig, vb: VarBuilder) -> Result<Self> { let vb = vb.pp("decoder.model.decoder"); let embed_tokens = embedding(cfg.vocab_size, cfg.d_model, vb.pp("embed_tokens"))?; let embed_positions = if cfg.use_learned_position_embeddings { TrOCRLearnedPositionalEmbedding::load(vb.pp("embed_positions"), cfg)? } else { TrOCRLearnedPositionalEmbedding::new_sinusoidal(vb.pp("embed_positions"), cfg)? }; let mut layers = Vec::with_capacity(cfg.decoder_layers); let vb_l = vb.pp("layers"); for idx in 0..cfg.decoder_layers { let layer = TrOCRDecoderLayer::load(vb_l.pp(idx), cfg)?; layers.push(layer) } let embed_scale = if cfg.scale_embedding { Some((cfg.d_model as f64).sqrt()) } else { None }; Ok(Self { layers, embed_scale, embed_tokens, embed_positions, }) } fn reset_kv_cache(&mut self) { self.layers.iter_mut().for_each(|l| l.reset_kv_cache()) } pub fn forward( &mut self, xs: &Tensor, encoder_xs: Option<&Tensor>, past_kv_len: usize, attn_mask: &Tensor, ) -> Result<Tensor> { let embed_pos = self.embed_positions.forward(xs, past_kv_len as u32)?; let xs = xs.apply(&self.embed_tokens)?; let xs = match self.embed_scale { None => xs, Some(scale) => (xs * scale)?, }; let mut xs = xs.broadcast_add(&embed_pos)?; for layer in self.layers.iter_mut() { xs = layer.forward(&xs, attn_mask, encoder_xs)?; } Ok(xs) } } #[derive(Debug, Clone)] pub struct TrOCREncoder { embeddings: Embeddings, encoder: Encoder, layernorm: LayerNorm, } impl TrOCREncoder { pub fn new(cfg: &Config, vb: VarBuilder) -> Result<Self> { let vb_v = vb.pp("encoder"); let embeddings = Embeddings::new(cfg, false, vb_v.pp("embeddings"))?; let encoder = Encoder::new(cfg, vb_v.pp("encoder"))?; let layernorm = layer_norm(cfg.hidden_size, cfg.layer_norm_eps, vb_v.pp("layernorm"))?; Ok(Self { embeddings, encoder, layernorm, }) } pub fn forward(&self, xs: &Tensor) -> Result<Tensor> { let embedding_output = self.embeddings.forward(xs, None, false)?; let encoder_outputs = self.encoder.forward(&embedding_output)?; self.layernorm.forward(&encoder_outputs) } } #[derive(Debug, Clone)] pub struct TrOCRForCausalLM { decoder: TrOCRDecoder, output_projection: Linear, } impl TrOCRForCausalLM { pub fn new(decoder_cfg: &TrOCRConfig, vb: VarBuilder) -> Result<Self> { let decoder = TrOCRDecoder::new(decoder_cfg, vb.clone())?; let output_projection = if decoder_cfg.tie_word_embeddings { candle_nn::Linear::new(decoder.embed_tokens.embeddings().clone(), None) } else { candle_nn::linear_no_bias( decoder_cfg.d_model, decoder_cfg.vocab_size, vb.pp("decoder.output_projection"), )? }; Ok(Self { decoder, output_projection, }) } pub fn forward( &mut self, xs: &Tensor, encoder_xs: Option<&Tensor>, past_kv_len: usize, attn_mask: &Tensor, ) -> Result<Tensor> { let xs = self .decoder .forward(xs, encoder_xs, past_kv_len, attn_mask)?; let xs = xs.apply(&self.output_projection)?; Ok(xs) } fn reset_kv_cache(&mut self) { self.decoder.reset_kv_cache(); } } #[derive(Debug, Clone)] pub struct TrOCRModel { encoder: TrOCREncoder, decoder: TrOCRForCausalLM, } impl TrOCRModel { pub fn new(encoder_cfg: &Config, decoder_cfg: &TrOCRConfig, vb: VarBuilder) -> Result<Self> { let encoder = TrOCREncoder::new(encoder_cfg, vb.clone())?; let decoder = TrOCRForCausalLM::new(decoder_cfg, vb)?; Ok(Self { encoder, decoder }) } pub fn encoder(&mut self) -> &mut TrOCREncoder { &mut self.encoder } pub fn decoder(&mut self) -> &mut TrOCRForCausalLM { &mut self.decoder } pub fn decode( &mut self, xs: &Tensor, encoder_xs: &Tensor, past_kv_len: usize, ) -> Result<Tensor> { let seq_len = xs.dim(1)?; let mask: Vec<_> = (0..seq_len) .flat_map(|i| (0..seq_len).map(move |j| if j > i { f32::NEG_INFINITY } else { 0f32 })) .collect(); let mask = Tensor::from_vec(mask, (seq_len, seq_len), xs.device())?; self.decoder .forward(xs, Some(encoder_xs), past_kv_len, &mask) } pub fn reset_kv_cache(&mut self) { self.decoder.reset_kv_cache(); } }
candle/candle-transformers/src/models/trocr.rs/0
{ "file_path": "candle/candle-transformers/src/models/trocr.rs", "repo_id": "candle", "token_count": 8465 }
45
/// A bounding box around an object. #[derive(Debug, Clone)] pub struct Bbox<D> { pub xmin: f32, pub ymin: f32, pub xmax: f32, pub ymax: f32, pub confidence: f32, pub data: D, } #[derive(Debug, Clone, Copy, PartialEq)] pub struct KeyPoint { pub x: f32, pub y: f32, pub mask: f32, } /// Intersection over union of two bounding boxes. pub fn iou<D>(b1: &Bbox<D>, b2: &Bbox<D>) -> f32 { let b1_area = (b1.xmax - b1.xmin + 1.) * (b1.ymax - b1.ymin + 1.); let b2_area = (b2.xmax - b2.xmin + 1.) * (b2.ymax - b2.ymin + 1.); let i_xmin = b1.xmin.max(b2.xmin); let i_xmax = b1.xmax.min(b2.xmax); let i_ymin = b1.ymin.max(b2.ymin); let i_ymax = b1.ymax.min(b2.ymax); let i_area = (i_xmax - i_xmin + 1.).max(0.) * (i_ymax - i_ymin + 1.).max(0.); i_area / (b1_area + b2_area - i_area) } pub fn non_maximum_suppression<D>(bboxes: &mut [Vec<Bbox<D>>], threshold: f32) { // Perform non-maximum suppression. for bboxes_for_class in bboxes.iter_mut() { bboxes_for_class.sort_by(|b1, b2| b2.confidence.partial_cmp(&b1.confidence).unwrap()); let mut current_index = 0; for index in 0..bboxes_for_class.len() { let mut drop = false; for prev_index in 0..current_index { let iou = iou(&bboxes_for_class[prev_index], &bboxes_for_class[index]); if iou > threshold { drop = true; break; } } if !drop { bboxes_for_class.swap(current_index, index); current_index += 1; } } bboxes_for_class.truncate(current_index); } } // Updates confidences starting at highest and comparing subsequent boxes. fn update_confidences<D>( bboxes_for_class: &[Bbox<D>], updated_confidences: &mut [f32], iou_threshold: f32, sigma: f32, ) { let len = bboxes_for_class.len(); for current_index in 0..len { let current_bbox = &bboxes_for_class[current_index]; for index in (current_index + 1)..len { let iou_val = iou(current_bbox, &bboxes_for_class[index]); if iou_val > iou_threshold { // Decay calculation from page 4 of: https://arxiv.org/pdf/1704.04503 let decay = (-iou_val * iou_val / sigma).exp(); let updated_confidence = bboxes_for_class[index].confidence * decay; updated_confidences[index] = updated_confidence; } } } } // Sorts the bounding boxes by confidence and applies soft non-maximum suppression. // This function is based on the algorithm described in https://arxiv.org/pdf/1704.04503 pub fn soft_non_maximum_suppression<D>( bboxes: &mut [Vec<Bbox<D>>], iou_threshold: Option<f32>, confidence_threshold: Option<f32>, sigma: Option<f32>, ) { let iou_threshold = iou_threshold.unwrap_or(0.5); let confidence_threshold = confidence_threshold.unwrap_or(0.1); let sigma = sigma.unwrap_or(0.5); for bboxes_for_class in bboxes.iter_mut() { // Sort boxes by confidence in descending order bboxes_for_class.sort_by(|b1, b2| b2.confidence.partial_cmp(&b1.confidence).unwrap()); let mut updated_confidences = bboxes_for_class .iter() .map(|bbox| bbox.confidence) .collect::<Vec<_>>(); update_confidences( bboxes_for_class, &mut updated_confidences, iou_threshold, sigma, ); // Update confidences, set to 0.0 if below threshold for (i, &confidence) in updated_confidences.iter().enumerate() { bboxes_for_class[i].confidence = if confidence < confidence_threshold { 0.0 } else { confidence }; } } }
candle/candle-transformers/src/object_detection.rs/0
{ "file_path": "candle/candle-transformers/src/object_detection.rs", "repo_id": "candle", "token_count": 1884 }
46
use candle::{Device, Tensor}; use candle_transformers::generation::LogitsProcessor; use candle_wasm_example_llama2::worker::{Model as M, ModelData}; use wasm_bindgen::prelude::*; #[wasm_bindgen] pub struct Model { inner: M, logits_processor: LogitsProcessor, tokens: Vec<u32>, repeat_penalty: f32, } impl Model { fn process(&mut self, tokens: &[u32]) -> candle::Result<String> { const REPEAT_LAST_N: usize = 64; let dev = Device::Cpu; let input = Tensor::new(tokens, &dev)?.unsqueeze(0)?; let logits = self.inner.llama.forward(&input, tokens.len())?; let logits = logits.squeeze(0)?; let logits = if self.repeat_penalty == 1. || tokens.is_empty() { logits } else { let start_at = self.tokens.len().saturating_sub(REPEAT_LAST_N); candle_transformers::utils::apply_repeat_penalty( &logits, self.repeat_penalty, &self.tokens[start_at..], )? }; let next_token = self.logits_processor.sample(&logits)?; self.tokens.push(next_token); let text = match self.inner.tokenizer.id_to_token(next_token) { Some(text) => text.replace('โ–', " ").replace("<0x0A>", "\n"), None => "".to_string(), }; Ok(text) } } #[wasm_bindgen] impl Model { #[wasm_bindgen(constructor)] pub fn new(weights: Vec<u8>, tokenizer: Vec<u8>) -> Result<Model, JsError> { let model = M::load(ModelData { tokenizer, model: weights, }); let logits_processor = LogitsProcessor::new(299792458, None, None); match model { Ok(inner) => Ok(Self { inner, logits_processor, tokens: vec![], repeat_penalty: 1., }), Err(e) => Err(JsError::new(&e.to_string())), } } #[wasm_bindgen] pub fn get_seq_len(&mut self) -> usize { self.inner.config.seq_len } #[wasm_bindgen] pub fn init_with_prompt( &mut self, prompt: String, temp: f64, top_p: f64, repeat_penalty: f32, seed: u64, ) -> Result<String, JsError> { // First reset the cache. { let mut cache = self.inner.cache.kvs.lock().unwrap(); for elem in cache.iter_mut() { *elem = None } } let temp = if temp <= 0. { None } else { Some(temp) }; let top_p = if top_p <= 0. || top_p >= 1. { None } else { Some(top_p) }; self.logits_processor = LogitsProcessor::new(seed, temp, top_p); self.repeat_penalty = repeat_penalty; self.tokens.clear(); let tokens = self .inner .tokenizer .encode(prompt, true) .map_err(|m| JsError::new(&m.to_string()))? .get_ids() .to_vec(); let text = self .process(&tokens) .map_err(|m| JsError::new(&m.to_string()))?; Ok(text) } #[wasm_bindgen] pub fn next_token(&mut self) -> Result<String, JsError> { let last_token = *self.tokens.last().unwrap(); let text = self .process(&[last_token]) .map_err(|m| JsError::new(&m.to_string()))?; Ok(text) } } fn main() {}
candle/candle-wasm-examples/llama2-c/src/bin/m.rs/0
{ "file_path": "candle/candle-wasm-examples/llama2-c/src/bin/m.rs", "repo_id": "candle", "token_count": 1807 }
47
<html> <head> <meta content="text/html;charset=utf-8" http-equiv="Content-Type" /> <title>Candle Phi 1.5 / Phi 2.0 Rust/WASM</title> </head> <body></body> </html> <!DOCTYPE html> <html> <head> <meta charset="UTF-8" /> <meta name="viewport" content="width=device-width, initial-scale=1.0" /> <link rel="stylesheet" href="https://cdn.jsdelivr.net/gh/highlightjs/cdn-release@11.8.0/build/styles/default.min.css" /> <style> @import url("https://fonts.googleapis.com/css2?family=Source+Code+Pro:wght@200;300;400&family=Source+Sans+3:wght@100;200;300;400;500;600;700;800;900&display=swap"); html, body { font-family: "Source Sans 3", sans-serif; } code, output, select, pre { font-family: "Source Code Pro", monospace; } </style> <style type="text/tailwindcss"> .link { @apply underline hover:text-blue-500 hover:no-underline; } </style> <script src="https://cdn.tailwindcss.com"></script> <script type="module"> import snarkdown from "https://cdn.skypack.dev/snarkdown"; import hljs from "https://cdn.skypack.dev/highlight.js"; // models base url const MODELS = { phi_1_5_q4k: { base_url: "https://huggingface.co/lmz/candle-quantized-phi/resolve/main/", model: "model-q4k.gguf", tokenizer: "tokenizer.json", config: "phi-1_5.json", quantized: true, seq_len: 2048, size: "800 MB", }, phi_1_5_q80: { base_url: "https://huggingface.co/lmz/candle-quantized-phi/resolve/main/", model: "model-q80.gguf", tokenizer: "tokenizer.json", config: "phi-1_5.json", quantized: true, seq_len: 2048, size: "1.51 GB", }, phi_2_0_q4k: { base_url: "https://huggingface.co/radames/phi-2-quantized/resolve/main/", model: [ "model-v2-q4k.gguf_aa.part", "model-v2-q4k.gguf_ab.part", "model-v2-q4k.gguf_ac.part", ], tokenizer: "tokenizer.json", config: "config.json", quantized: true, seq_len: 2048, size: "1.57GB", }, puffin_phi_v2_q4k: { base_url: "https://huggingface.co/lmz/candle-quantized-phi/resolve/main/", model: "model-puffin-phi-v2-q4k.gguf", tokenizer: "tokenizer-puffin-phi-v2.json", config: "puffin-phi-v2.json", quantized: true, seq_len: 2048, size: "798 MB", }, puffin_phi_v2_q80: { base_url: "https://huggingface.co/lmz/candle-quantized-phi/resolve/main/", model: "model-puffin-phi-v2-q80.gguf", tokenizer: "tokenizer-puffin-phi-v2.json", config: "puffin-phi-v2.json", quantized: true, seq_len: 2048, size: "1.50 GB", }, }; const TEMPLATES = [ { title: "Simple prompt", prompt: `Sebastien is in London today, itโ€™s the middle of July yet itโ€™s raining, so Sebastien is feeling gloomy. He`, }, { title: "Think step by step", prompt: `Suppose Alice originally had 3 apples, then Bob gave Alice 7 apples, then Alice gave Cook 5 apples, and then Tim gave Alice 3x the amount of apples Alice had. How many apples does Alice have now? Letโ€™s think step by step.`, }, { title: "Explaing a code snippet", prompt: `What does this script do? \`\`\`python s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.bind(('', 0)) s.listen(1) conn, addr = s.accept() print('Connected by', addr) return conn.getsockname()[1] \`\`\` Letโ€™s think step by step.`, }, { title: "Question answering", prompt: `Instruct: What is the capital of France? Output:`, }, { title: "Chat mode", prompt: `Alice: Can you tell me how to create a python application to go through all the files in one directory where the fileโ€™s name DOES NOT end with '.json'? Bob:`, }, { title: "Python code completion", prompt: `"""write a python function called batch(function, list) which call function(x) for x in list in parallel""" Solution:`, }, { title: "Python Sample", prompt: `"""Can you make sure those histograms appear side by side on the same plot: \`\`\`python plt.hist(intreps_retrained[0][1].view(64,-1).norm(dim=1).detach().cpu().numpy(), bins = 20) plt.hist(intreps_pretrained[0][1].view(64,-1).norm(dim=1).detach().cpu().numpy(), bins = 20) \`\`\` """`, }, { title: "Write a Twitter post", prompt: `Write a twitter post for the discovery of gravitational wave. Twitter Post:`, }, { title: "Write a review", prompt: `Write a polite review complaining that the video game 'Random Game' was too badly optimized and it burned my laptop. Very polite review:`, }, ]; const phiWorker = new Worker("./phiWorker.js", { type: "module", }); async function generateSequence(controller) { const getValue = (id) => document.querySelector(`#${id}`).value; const modelID = getValue("model"); const model = MODELS[modelID]; const weightsURL = model.model instanceof Array ? model.model.map((m) => model.base_url + m) : model.base_url + model.model; const tokenizerURL = model.base_url + model.tokenizer; const configURL = model.base_url + model.config; const prompt = getValue("prompt").trim(); const temperature = getValue("temperature"); const topP = getValue("top-p"); const repeatPenalty = getValue("repeat_penalty"); const seed = getValue("seed"); const maxSeqLen = getValue("max-seq"); function updateStatus(data) { const outStatus = document.querySelector("#output-status"); const outGen = document.querySelector("#output-generation"); const outCounter = document.querySelector("#output-counter"); switch (data.status) { case "loading": outStatus.hidden = false; outStatus.textContent = data.message; outGen.hidden = true; outCounter.hidden = true; break; case "generating": const { message, prompt, sentence, tokensSec, totalTime } = data; outStatus.hidden = true; outCounter.hidden = false; outGen.hidden = false; outGen.innerHTML = snarkdown(prompt + sentence); outCounter.innerHTML = `${(totalTime / 1000).toFixed( 2 )}s (${tokensSec.toFixed(2)} tok/s)`; hljs.highlightAll(); break; case "complete": outStatus.hidden = true; outGen.hidden = false; break; } } return new Promise((resolve, reject) => { phiWorker.postMessage({ weightsURL, modelID, tokenizerURL, configURL, quantized: model.quantized, prompt, temp: temperature, top_p: topP, repeatPenalty, seed: seed, maxSeqLen, command: "start", }); const handleAbort = () => { phiWorker.postMessage({ command: "abort" }); }; const handleMessage = (event) => { const { status, error, message, prompt, sentence } = event.data; if (status) updateStatus(event.data); if (error) { phiWorker.removeEventListener("message", handleMessage); reject(new Error(error)); } if (status === "aborted") { phiWorker.removeEventListener("message", handleMessage); resolve(event.data); } if (status === "complete") { phiWorker.removeEventListener("message", handleMessage); resolve(event.data); } }; controller.signal.addEventListener("abort", handleAbort); phiWorker.addEventListener("message", handleMessage); }); } const form = document.querySelector("#form"); const prompt = document.querySelector("#prompt"); const clearBtn = document.querySelector("#clear-btn"); const runBtn = document.querySelector("#run"); const modelSelect = document.querySelector("#model"); const promptTemplates = document.querySelector("#prompt-templates"); let runController = new AbortController(); let isRunning = false; document.addEventListener("DOMContentLoaded", () => { for (const [id, model] of Object.entries(MODELS)) { const option = document.createElement("option"); option.value = id; option.innerText = `${id} (${model.size})`; modelSelect.appendChild(option); } const query = new URLSearchParams(window.location.search); const modelID = query.get("model"); if (modelID) { modelSelect.value = modelID; } else { modelSelect.value = "phi_1_5_q4k"; } for (const [i, { title, prompt }] of TEMPLATES.entries()) { const div = document.createElement("div"); const input = document.createElement("input"); input.type = "radio"; input.name = "task"; input.id = `templates-${i}`; input.classList.add("font-light", "cursor-pointer"); input.value = prompt; const label = document.createElement("label"); label.htmlFor = `templates-${i}`; label.classList.add("cursor-pointer"); label.innerText = title; div.appendChild(input); div.appendChild(label); promptTemplates.appendChild(div); } }); promptTemplates.addEventListener("change", (e) => { const template = e.target.value; prompt.value = template; prompt.style.height = "auto"; prompt.style.height = prompt.scrollHeight + "px"; runBtn.disabled = false; clearBtn.classList.remove("invisible"); }); modelSelect.addEventListener("change", (e) => { const query = new URLSearchParams(window.location.search); query.set("model", e.target.value); window.history.replaceState( {}, "", `${window.location.pathname}?${query}` ); window.parent.postMessage({ queryString: "?" + query }, "*"); const model = MODELS[e.target.value]; document.querySelector("#max-seq").max = model.seq_len; document.querySelector("#max-seq").nextElementSibling.value = 200; }); form.addEventListener("submit", async (e) => { e.preventDefault(); if (isRunning) { stopRunning(); } else { startRunning(); await generateSequence(runController); stopRunning(); } }); function startRunning() { isRunning = true; runBtn.textContent = "Stop"; } function stopRunning() { runController.abort(); runController = new AbortController(); runBtn.textContent = "Run"; isRunning = false; } clearBtn.addEventListener("click", (e) => { e.preventDefault(); prompt.value = ""; clearBtn.classList.add("invisible"); runBtn.disabled = true; stopRunning(); }); prompt.addEventListener("input", (e) => { runBtn.disabled = false; if (e.target.value.length > 0) { clearBtn.classList.remove("invisible"); } else { clearBtn.classList.add("invisible"); } }); </script> </head> <body class="container max-w-4xl mx-auto p-4 text-gray-800"> <main class="grid grid-cols-1 gap-8 relative"> <span class="absolute text-5xl -ml-[1em]"> ๐Ÿ•ฏ๏ธ </span> <div> <h1 class="text-5xl font-bold">Candle Phi 1.5 / Phi 2.0</h1> <h2 class="text-2xl font-bold">Rust/WASM Demo</h2> <p class="max-w-lg"> The <a href="https://huggingface.co/microsoft/phi-1_5" class="link" target="_blank" >Phi-1.5</a > and <a href="https://huggingface.co/microsoft/phi-2" class="link" target="_blank" >Phi-2</a > models achieve state-of-the-art performance with only 1.3 billion and 2.7 billion parameters, compared to larger models with up to 13 billion parameters. Here you can try the quantized versions. Additional prompt examples are available in the <a href="https://arxiv.org/pdf/2309.05463.pdf#page=8" class="link" target="_blank" > technical report </a >. </p> <p class="max-w-lg"> You can also try <a href="https://huggingface.co/teknium/Puffin-Phi-v2" class="link" target="_blank" >Puffin-Phi V2 </a> quantized version, a fine-tuned version of Phi-1.5 on the <a href="https://huggingface.co/datasets/LDJnr/Puffin" class="link" target="_blank" >Puffin dataset </a> </p> </div> <div> <p class="text-xs italic max-w-lg"> <b>Note:</b> When first run, the app will download and cache the model, which could take a few minutes. The models are <b>~800MB</b> or <b>~1.57GB</b> in size. </p> </div> <div> <label for="model" class="font-medium">Models Options: </label> <select id="model" class="border-2 border-gray-500 rounded-md font-light" ></select> </div> <div> <details> <summary class="font-medium cursor-pointer">Prompt Templates</summary> <form id="prompt-templates" class="grid grid-cols-1 sm:grid-cols-2 gap-1 my-2" ></form> </details> </div> <form id="form" class="flex text-normal px-1 py-1 border border-gray-700 rounded-md items-center" > <input type="submit" hidden /> <textarea type="text" id="prompt" class="font-light text-lg w-full px-3 py-2 mx-1 resize-none outline-none" oninput="this.style.height = 0;this.style.height = this.scrollHeight + 'px'" placeholder="Add your prompt here..." > Instruct: Write a detailed analogy between mathematics and a lighthouse. Output:</textarea > <button id="clear-btn"> <svg fill="none" xmlns="http://www.w3.org/2000/svg" width="40" viewBox="0 0 70 40" > <path opacity=".5" d="M39 .2v40.2" stroke="#1F2937" /> <path d="M1.5 11.5 19 29.1m0-17.6L1.5 29.1" opacity=".5" stroke="#1F2937" stroke-width="2" /> </svg> </button> <button id="run" class="bg-gray-700 hover:bg-gray-800 text-white font-normal py-2 w-16 rounded disabled:bg-gray-300 disabled:cursor-not-allowed" > Run </button> </form> <details> <summary class="font-medium cursor-pointer">Advanced Options</summary> <div class="grid grid-cols-3 max-w-md items-center gap-3 py-3"> <label class="text-sm font-medium" for="max-seq" >Maximum length </label> <input type="range" id="max-seq" name="max-seq" min="1" max="2048" step="1" value="200" oninput="this.nextElementSibling.value = Number(this.value)" /> <output class="text-xs w-[50px] text-center font-light px-1 py-1 border border-gray-700 rounded-md" > 200</output > <label class="text-sm font-medium" for="temperature" >Temperature</label > <input type="range" id="temperature" name="temperature" min="0" max="2" step="0.01" value="0.00" oninput="this.nextElementSibling.value = Number(this.value).toFixed(2)" /> <output class="text-xs w-[50px] text-center font-light px-1 py-1 border border-gray-700 rounded-md" > 0.00</output > <label class="text-sm font-medium" for="top-p">Top-p</label> <input type="range" id="top-p" name="top-p" min="0" max="1" step="0.01" value="1.00" oninput="this.nextElementSibling.value = Number(this.value).toFixed(2)" /> <output class="text-xs w-[50px] text-center font-light px-1 py-1 border border-gray-700 rounded-md" > 1.00</output > <label class="text-sm font-medium" for="repeat_penalty" >Repeat Penalty</label > <input type="range" id="repeat_penalty" name="repeat_penalty" min="1" max="2" step="0.01" value="1.10" oninput="this.nextElementSibling.value = Number(this.value).toFixed(2)" /> <output class="text-xs w-[50px] text-center font-light px-1 py-1 border border-gray-700 rounded-md" >1.10</output > <label class="text-sm font-medium" for="seed">Seed</label> <input type="number" id="seed" name="seed" value="299792458" class="font-light border border-gray-700 text-right rounded-md p-2" /> <button id="run" onclick="document.querySelector('#seed').value = Math.floor(Math.random() * Number.MAX_SAFE_INTEGER)" class="bg-gray-700 hover:bg-gray-800 text-white font-normal py-1 w-[50px] rounded disabled:bg-gray-300 disabled:cursor-not-allowed text-sm" > Rand </button> </div> </details> <div> <h3 class="font-medium">Generation:</h3> <div class="min-h-[250px] bg-slate-100 text-gray-500 p-4 rounded-md flex flex-col gap-2" > <div id="output-counter" hidden class="ml-auto font-semibold grid-rows-1" ></div> <p hidden id="output-generation" class="grid-rows-2 text-lg"></p> <span id="output-status" class="m-auto font-light" >No output yet</span > </div> </div> </main> </body> </html>
candle/candle-wasm-examples/phi/index.html/0
{ "file_path": "candle/candle-wasm-examples/phi/index.html", "repo_id": "candle", "token_count": 9818 }
48
<html> <head> <meta content="text/html;charset=utf-8" http-equiv="Content-Type" /> <title>Candle T5</title> </head> <body></body> </html> <!DOCTYPE html> <html> <head> <meta charset="UTF-8" /> <meta name="viewport" content="width=device-width, initial-scale=1.0" /> <style> @import url("https://fonts.googleapis.com/css2?family=Source+Code+Pro:wght@200;300;400&family=Source+Sans+3:wght@100;200;300;400;500;600;700;800;900&display=swap"); html, body { font-family: "Source Sans 3", sans-serif; } </style> <style type="text/tailwindcss"> .link { @apply underline hover:text-blue-500 hover:no-underline; } </style> <script src="https://cdn.tailwindcss.com"></script> <script type="module"> import { getModelInfo, MODELS, extractEmbeddings, generateText, } from "./utils.js"; const t5ModelEncoderWorker = new Worker("./T5ModelEncoderWorker.js", { type: "module", }); const t5ModelConditionalGeneration = new Worker( "./T5ModelConditionalGeneration.js", { type: "module" } ); const formEl = document.querySelector("#form"); const modelEl = document.querySelector("#model"); const promptEl = document.querySelector("#prompt"); const temperatureEl = document.querySelector("#temperature"); const toppEL = document.querySelector("#top-p"); const repeatPenaltyEl = document.querySelector("#repeat_penalty"); const seedEl = document.querySelector("#seed"); const outputEl = document.querySelector("#output-generation"); const tasksEl = document.querySelector("#tasks"); let selectedTaskID = ""; document.addEventListener("DOMContentLoaded", () => { for (const [id, model] of Object.entries(MODELS)) { const option = document.createElement("option"); option.value = id; option.innerText = `${id} (${model.size})`; modelEl.appendChild(option); } populateTasks(modelEl.value); modelEl.addEventListener("change", (e) => { populateTasks(e.target.value); }); tasksEl.addEventListener("change", (e) => { const task = e.target.value; const modelID = modelEl.value; promptEl.value = MODELS[modelID].tasks[task].prefix; selectedTaskID = task; }); }); function populateTasks(modelID) { const tasks = MODELS[modelID].tasks; tasksEl.innerHTML = ""; for (const [task, params] of Object.entries(tasks)) { const div = document.createElement("div"); div.innerHTML = ` <input type="radio" name="task" id="${task}" class="font-light cursor-pointer" value="${task}" /> <label for="${task}" class="cursor-pointer"> ${params.prefix} </label> `; tasksEl.appendChild(div); } selectedTaskID = Object.keys(tasks)[0]; tasksEl.querySelector(`#${selectedTaskID}`).checked = true; } form.addEventListener("submit", (e) => { e.preventDefault(); const promptText = promptEl.value; const modelID = modelEl.value; const { modelURL, configURL, tokenizerURL, maxLength } = getModelInfo( modelID, selectedTaskID ); const params = { temperature: Number(temperatureEl.value), top_p: Number(toppEL.value), repetition_penalty: Number(repeatPenaltyEl.value), seed: BigInt(seedEl.value), max_length: maxLength, }; generateText( t5ModelConditionalGeneration, modelURL, tokenizerURL, configURL, modelID, promptText, params, (status) => { if (status.status === "loading") { outputEl.innerText = "Loading model..."; } if (status.status === "decoding") { outputEl.innerText = "Generating..."; } } ).then(({ output }) => { outputEl.innerText = output.generation; }); }); </script> </head> <body class="container max-w-4xl mx-auto p-4"> <main class="grid grid-cols-1 gap-8 relative"> <span class="absolute text-5xl -ml-[1em]"> ๐Ÿ•ฏ๏ธ </span> <div> <h1 class="text-5xl font-bold">Candle T5 Transformer</h1> <h2 class="text-2xl font-bold">Rust/WASM Demo</h2> <p class="max-w-lg"> This demo showcase Text-To-Text Transfer Transformer (<a href="https://blog.research.google/2020/02/exploring-transfer-learning-with-t5.html" target="_blank" class="link" >T5</a >) models right in your browser, thanks to <a href="https://github.com/huggingface/candle/" target="_blank" class="link"> Candle </a> ML framework and rust/wasm. You can choose from a range of available models, including <a href="https://huggingface.co/t5-small" target="_blank" class="link"> t5-small</a >, <a href="https://huggingface.co/t5-base" target="_blank" class="link" >t5-base</a >, <a href="https://huggingface.co/google/flan-t5-small" target="_blank" class="link" >flan-t5-small</a >, several <a href="https://huggingface.co/lmz/candle-quantized-t5/tree/main" target="_blank" class="link"> t5 quantized gguf models</a >, and also a quantized <a href="https://huggingface.co/jbochi/candle-coedit-quantized/tree/main" target="_blank" class="link"> CoEdIT model for text rewrite</a >. </p> </div> <div> <label for="model" class="font-medium">Models Options: </label> <select id="model" class="border-2 border-gray-500 rounded-md font-light"></select> </div> <div> <h3 class="font-medium">Task Prefix:</h3> <form id="tasks" class="flex flex-col gap-1 my-2"></form> </div> <form id="form" class="flex text-normal px-1 py-1 border border-gray-700 rounded-md items-center"> <input type="submit" hidden /> <input type="text" id="prompt" class="font-light w-full px-3 py-2 mx-1 resize-none outline-none" placeholder="Add prompt here, e.g. 'translate English to German: Today I'm going to eat Ice Cream'" value="translate English to German: Today I'm going to eat Ice Cream" /> <button class="bg-gray-700 hover:bg-gray-800 text-white font-normal py-2 w-16 rounded disabled:bg-gray-300 disabled:cursor-not-allowed"> Run </button> </form> <div class="grid grid-cols-3 max-w-md items-center gap-3"> <label class="text-sm font-medium" for="temperature">Temperature</label> <input type="range" id="temperature" name="temperature" min="0" max="2" step="0.01" value="0.00" oninput="this.nextElementSibling.value = Number(this.value).toFixed(2)" /> <output class="text-xs w-[50px] text-center font-light px-1 py-1 border border-gray-700 rounded-md"> 0.00</output > <label class="text-sm font-medium" for="top-p">Top-p</label> <input type="range" id="top-p" name="top-p" min="0" max="1" step="0.01" value="1.00" oninput="this.nextElementSibling.value = Number(this.value).toFixed(2)" /> <output class="text-xs w-[50px] text-center font-light px-1 py-1 border border-gray-700 rounded-md"> 1.00</output > <label class="text-sm font-medium" for="repeat_penalty" >Repeat Penalty</label > <input type="range" id="repeat_penalty" name="repeat_penalty" min="1" max="2" step="0.01" value="1.10" oninput="this.nextElementSibling.value = Number(this.value).toFixed(2)" /> <output class="text-xs w-[50px] text-center font-light px-1 py-1 border border-gray-700 rounded-md" >1.10</output > <label class="text-sm font-medium" for="seed">Seed</label> <input type="number" id="seed" name="seed" value="299792458" class="font-light border border-gray-700 text-right rounded-md p-2" /> <button id="run" onclick="document.querySelector('#seed').value = BigInt(Math.floor(Math.random() * 2**64-1))" class="bg-gray-700 hover:bg-gray-800 text-white font-normal py-1 w-[50px] rounded disabled:bg-gray-300 disabled:cursor-not-allowed text-sm"> Rand </button> </div> <div> <h3 class="font-medium">Generation:</h3> <div class="min-h-[250px] bg-slate-100 text-gray-500 p-4 rounded-md flex flex-col gap-2 text-lg"> <p id="output-generation" class="grid-rows-2">No output yet</p> </div> </div> </main> </body> </html>
candle/candle-wasm-examples/t5/index.html/0
{ "file_path": "candle/candle-wasm-examples/t5/index.html", "repo_id": "candle", "token_count": 4724 }
49
pub const LANGUAGES: [(&str, &str); 99] = [ ("en", "english"), ("zh", "chinese"), ("de", "german"), ("es", "spanish"), ("ru", "russian"), ("ko", "korean"), ("fr", "french"), ("ja", "japanese"), ("pt", "portuguese"), ("tr", "turkish"), ("pl", "polish"), ("ca", "catalan"), ("nl", "dutch"), ("ar", "arabic"), ("sv", "swedish"), ("it", "italian"), ("id", "indonesian"), ("hi", "hindi"), ("fi", "finnish"), ("vi", "vietnamese"), ("he", "hebrew"), ("uk", "ukrainian"), ("el", "greek"), ("ms", "malay"), ("cs", "czech"), ("ro", "romanian"), ("da", "danish"), ("hu", "hungarian"), ("ta", "tamil"), ("no", "norwegian"), ("th", "thai"), ("ur", "urdu"), ("hr", "croatian"), ("bg", "bulgarian"), ("lt", "lithuanian"), ("la", "latin"), ("mi", "maori"), ("ml", "malayalam"), ("cy", "welsh"), ("sk", "slovak"), ("te", "telugu"), ("fa", "persian"), ("lv", "latvian"), ("bn", "bengali"), ("sr", "serbian"), ("az", "azerbaijani"), ("sl", "slovenian"), ("kn", "kannada"), ("et", "estonian"), ("mk", "macedonian"), ("br", "breton"), ("eu", "basque"), ("is", "icelandic"), ("hy", "armenian"), ("ne", "nepali"), ("mn", "mongolian"), ("bs", "bosnian"), ("kk", "kazakh"), ("sq", "albanian"), ("sw", "swahili"), ("gl", "galician"), ("mr", "marathi"), ("pa", "punjabi"), ("si", "sinhala"), ("km", "khmer"), ("sn", "shona"), ("yo", "yoruba"), ("so", "somali"), ("af", "afrikaans"), ("oc", "occitan"), ("ka", "georgian"), ("be", "belarusian"), ("tg", "tajik"), ("sd", "sindhi"), ("gu", "gujarati"), ("am", "amharic"), ("yi", "yiddish"), ("lo", "lao"), ("uz", "uzbek"), ("fo", "faroese"), ("ht", "haitian creole"), ("ps", "pashto"), ("tk", "turkmen"), ("nn", "nynorsk"), ("mt", "maltese"), ("sa", "sanskrit"), ("lb", "luxembourgish"), ("my", "myanmar"), ("bo", "tibetan"), ("tl", "tagalog"), ("mg", "malagasy"), ("as", "assamese"), ("tt", "tatar"), ("haw", "hawaiian"), ("ln", "lingala"), ("ha", "hausa"), ("ba", "bashkir"), ("jw", "javanese"), ("su", "sundanese"), ];
candle/candle-wasm-examples/whisper/src/languages.rs/0
{ "file_path": "candle/candle-wasm-examples/whisper/src/languages.rs", "repo_id": "candle", "token_count": 1175 }
50
use crate::model::{report_detect, report_pose, Bbox, Multiples, YoloV8, YoloV8Pose}; use candle::{DType, Device, Result, Tensor}; use candle_nn::{Module, VarBuilder}; use serde::{Deserialize, Serialize}; use wasm_bindgen::prelude::*; use yew_agent::{HandlerId, Public, WorkerLink}; #[wasm_bindgen] extern "C" { // Use `js_namespace` here to bind `console.log(..)` instead of just // `log(..)` #[wasm_bindgen(js_namespace = console)] pub fn log(s: &str); } #[macro_export] macro_rules! console_log { // Note that this is using the `log` function imported above during // `bare_bones` ($($t:tt)*) => ($crate::worker::log(&format_args!($($t)*).to_string())) } // Communication to the worker happens through bincode, the model weights and configs are fetched // on the main thread and transferred via the following structure. #[derive(Serialize, Deserialize)] pub struct ModelData { pub weights: Vec<u8>, pub model_size: String, } #[derive(Serialize, Deserialize)] pub struct RunData { pub image_data: Vec<u8>, pub conf_threshold: f32, pub iou_threshold: f32, } pub struct Model { model: YoloV8, } impl Model { pub fn run( &self, image_data: Vec<u8>, conf_threshold: f32, iou_threshold: f32, ) -> Result<Vec<Vec<Bbox>>> { console_log!("image data: {}", image_data.len()); let image_data = std::io::Cursor::new(image_data); let original_image = image::ImageReader::new(image_data) .with_guessed_format()? .decode() .map_err(candle::Error::wrap)?; let (width, height) = { let w = original_image.width() as usize; let h = original_image.height() as usize; if w < h { let w = w * 640 / h; // Sizes have to be divisible by 32. (w / 32 * 32, 640) } else { let h = h * 640 / w; (640, h / 32 * 32) } }; let image_t = { let img = original_image.resize_exact( width as u32, height as u32, image::imageops::FilterType::CatmullRom, ); let data = img.to_rgb8().into_raw(); Tensor::from_vec( data, (img.height() as usize, img.width() as usize, 3), &Device::Cpu, )? .permute((2, 0, 1))? }; let image_t = (image_t.unsqueeze(0)?.to_dtype(DType::F32)? * (1. / 255.))?; let predictions = self.model.forward(&image_t)?.squeeze(0)?; console_log!("generated predictions {predictions:?}"); let bboxes = report_detect( &predictions, original_image, width, height, conf_threshold, iou_threshold, )?; Ok(bboxes) } pub fn load_(weights: Vec<u8>, model_size: &str) -> Result<Self> { let multiples = match model_size { "n" => Multiples::n(), "s" => Multiples::s(), "m" => Multiples::m(), "l" => Multiples::l(), "x" => Multiples::x(), _ => Err(candle::Error::Msg( "invalid model size: must be n, s, m, l or x".to_string(), ))?, }; let dev = &Device::Cpu; let vb = VarBuilder::from_buffered_safetensors(weights, DType::F32, dev)?; let model = YoloV8::load(vb, multiples, 80)?; Ok(Self { model }) } pub fn load(md: ModelData) -> Result<Self> { Self::load_(md.weights, &md.model_size.to_string()) } } pub struct ModelPose { model: YoloV8Pose, } impl ModelPose { pub fn run( &self, image_data: Vec<u8>, conf_threshold: f32, iou_threshold: f32, ) -> Result<Vec<Bbox>> { console_log!("image data: {}", image_data.len()); let image_data = std::io::Cursor::new(image_data); let original_image = image::ImageReader::new(image_data) .with_guessed_format()? .decode() .map_err(candle::Error::wrap)?; let (width, height) = { let w = original_image.width() as usize; let h = original_image.height() as usize; if w < h { let w = w * 640 / h; // Sizes have to be divisible by 32. (w / 32 * 32, 640) } else { let h = h * 640 / w; (640, h / 32 * 32) } }; let image_t = { let img = original_image.resize_exact( width as u32, height as u32, image::imageops::FilterType::CatmullRom, ); let data = img.to_rgb8().into_raw(); Tensor::from_vec( data, (img.height() as usize, img.width() as usize, 3), &Device::Cpu, )? .permute((2, 0, 1))? }; let image_t = (image_t.unsqueeze(0)?.to_dtype(DType::F32)? * (1. / 255.))?; let predictions = self.model.forward(&image_t)?.squeeze(0)?; console_log!("generated predictions {predictions:?}"); let bboxes = report_pose( &predictions, original_image, width, height, conf_threshold, iou_threshold, )?; Ok(bboxes) } pub fn load_(weights: Vec<u8>, model_size: &str) -> Result<Self> { let multiples = match model_size { "n" => Multiples::n(), "s" => Multiples::s(), "m" => Multiples::m(), "l" => Multiples::l(), "x" => Multiples::x(), _ => Err(candle::Error::Msg( "invalid model size: must be n, s, m, l or x".to_string(), ))?, }; let dev = &Device::Cpu; let vb = VarBuilder::from_buffered_safetensors(weights, DType::F32, dev)?; let model = YoloV8Pose::load(vb, multiples, 1, (17, 3))?; Ok(Self { model }) } pub fn load(md: ModelData) -> Result<Self> { Self::load_(md.weights, &md.model_size.to_string()) } } pub struct Worker { link: WorkerLink<Self>, model: Option<Model>, } #[derive(Serialize, Deserialize)] pub enum WorkerInput { ModelData(ModelData), RunData(RunData), } #[derive(Serialize, Deserialize)] pub enum WorkerOutput { ProcessingDone(std::result::Result<Vec<Vec<Bbox>>, String>), WeightsLoaded, } impl yew_agent::Worker for Worker { type Input = WorkerInput; type Message = (); type Output = std::result::Result<WorkerOutput, String>; type Reach = Public<Self>; fn create(link: WorkerLink<Self>) -> Self { Self { link, model: None } } fn update(&mut self, _msg: Self::Message) { // no messaging } fn handle_input(&mut self, msg: Self::Input, id: HandlerId) { let output = match msg { WorkerInput::ModelData(md) => match Model::load(md) { Ok(model) => { self.model = Some(model); Ok(WorkerOutput::WeightsLoaded) } Err(err) => Err(format!("model creation error {err:?}")), }, WorkerInput::RunData(rd) => match &mut self.model { None => Err("model has not been set yet".to_string()), Some(model) => { let result = model .run(rd.image_data, rd.conf_threshold, rd.iou_threshold) .map_err(|e| e.to_string()); Ok(WorkerOutput::ProcessingDone(result)) } }, }; self.link.respond(id, output); } fn name_of_resource() -> &'static str { "worker.js" } fn resource_path_is_relative() -> bool { true } }
candle/candle-wasm-examples/yolo/src/worker.rs/0
{ "file_path": "candle/candle-wasm-examples/yolo/src/worker.rs", "repo_id": "candle", "token_count": 4075 }
51
--- title: chat-ui emoji: ๐Ÿ”ฅ colorFrom: purple colorTo: purple sdk: docker pinned: false license: apache-2.0 base_path: /chat app_port: 3000 failure_strategy: rollback load_balancing_strategy: random --- # Chat UI **Find the docs at [hf.co/docs/chat-ui](https://huggingface.co/docs/chat-ui/index).** ![Chat UI repository thumbnail](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/chatui-websearch.png) A chat interface using open source models, eg OpenAssistant or Llama. It is a SvelteKit app and it powers the [HuggingChat app on hf.co/chat](https://huggingface.co/chat). 0. [Quickstart](#quickstart) 1. [No Setup Deploy](#no-setup-deploy) 2. [Setup](#setup) 3. [Launch](#launch) 4. [Web Search](#web-search) 5. [Text Embedding Models](#text-embedding-models) 6. [Extra parameters](#extra-parameters) 7. [Common issues](#common-issues) 8. [Deploying to a HF Space](#deploying-to-a-hf-space) 9. [Building](#building) ## Quickstart You can quickly start a locally running chat-ui & LLM text-generation server thanks to chat-ui's [llama.cpp server support](https://huggingface.co/docs/chat-ui/configuration/models/providers/llamacpp). **Step 1 (Start llama.cpp server):** Install llama.cpp w/ brew (for Mac): ```bash # install llama.cpp brew install llama.cpp ``` or [build directly from the source](https://github.com/ggerganov/llama.cpp/blob/master/docs/build.md) for your target device: ``` git clone https://github.com/ggerganov/llama.cpp && cd llama.cpp && make ``` Next, start the server with the [LLM of your choice](https://huggingface.co/models?library=gguf): ```bash # start llama.cpp server (using hf.co/microsoft/Phi-3-mini-4k-instruct-gguf as an example) llama-server --hf-repo microsoft/Phi-3-mini-4k-instruct-gguf --hf-file Phi-3-mini-4k-instruct-q4.gguf -c 4096 ``` A local LLaMA.cpp HTTP Server will start on `http://localhost:8080`. Read more [here](https://huggingface.co/docs/chat-ui/configuration/models/providers/llamacpp). **Step 2 (tell chat-ui to use local llama.cpp server):** Add the following to your `.env.local`: ```ini MODELS=`[ { "name": "Local microsoft/Phi-3-mini-4k-instruct-gguf", "tokenizer": "microsoft/Phi-3-mini-4k-instruct-gguf", "preprompt": "", "chatPromptTemplate": "<s>{{preprompt}}{{#each messages}}{{#ifUser}}<|user|>\n{{content}}<|end|>\n<|assistant|>\n{{/ifUser}}{{#ifAssistant}}{{content}}<|end|>\n{{/ifAssistant}}{{/each}}", "parameters": { "stop": ["<|end|>", "<|endoftext|>", "<|assistant|>"], "temperature": 0.7, "max_new_tokens": 1024, "truncate": 3071 }, "endpoints": [{ "type" : "llamacpp", "baseURL": "http://localhost:8080" }], }, ]` ``` Read more [here](https://huggingface.co/docs/chat-ui/configuration/models/providers/llamacpp). **Step 3 (make sure you have MongoDb running locally):** ```bash docker run -d -p 27017:27017 --name mongo-chatui mongo:latest ``` Read more [here](#database). **Step 4 (start chat-ui):** ```bash git clone https://github.com/huggingface/chat-ui cd chat-ui npm install npm run dev -- --open ``` Read more [here](#launch). <img class="hidden dark:block" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/chat-ui/llamacpp-dark.png" height="auto"/> ## No Setup Deploy If you don't want to configure, setup, and launch your own Chat UI yourself, you can use this option as a fast deploy alternative. You can deploy your own customized Chat UI instance with any supported [LLM](https://huggingface.co/models?pipeline_tag=text-generation&sort=trending) of your choice on [Hugging Face Spaces](https://huggingface.co/spaces). To do so, use the chat-ui template [available here](https://huggingface.co/new-space?template=huggingchat/chat-ui-template). Set `HF_TOKEN` in [Space secrets](https://huggingface.co/docs/hub/spaces-overview#managing-secrets-and-environment-variables) to deploy a model with gated access or a model in a private repository. It's also compatible with [Inference for PROs](https://huggingface.co/blog/inference-pro) curated list of powerful models with higher rate limits. Make sure to create your personal token first in your [User Access Tokens settings](https://huggingface.co/settings/tokens). Read the full tutorial [here](https://huggingface.co/docs/hub/spaces-sdks-docker-chatui#chatui-on-spaces). ## Setup The default config for Chat UI is stored in the `.env` file. You will need to override some values to get Chat UI to run locally. This is done in `.env.local`. Start by creating a `.env.local` file in the root of the repository. The bare minimum config you need to get Chat UI to run locally is the following: ```env MONGODB_URL=<the URL to your MongoDB instance> HF_TOKEN=<your access token> ``` ### Database The chat history is stored in a MongoDB instance, and having a DB instance available is needed for Chat UI to work. You can use a local MongoDB instance. The easiest way is to spin one up using docker: ```bash docker run -d -p 27017:27017 --name mongo-chatui mongo:latest ``` In which case the url of your DB will be `MONGODB_URL=mongodb://localhost:27017`. Alternatively, you can use a [free MongoDB Atlas](https://www.mongodb.com/pricing) instance for this, Chat UI should fit comfortably within their free tier. After which you can set the `MONGODB_URL` variable in `.env.local` to match your instance. ### Hugging Face Access Token If you use a remote inference endpoint, you will need a Hugging Face access token to run Chat UI locally. You can get one from [your Hugging Face profile](https://huggingface.co/settings/tokens). ## Launch After you're done with the `.env.local` file you can run Chat UI locally with: ```bash npm install npm run dev ``` ## Web Search Chat UI features a powerful Web Search feature. It works by: 1. Generating an appropriate search query from the user prompt. 2. Performing web search and extracting content from webpages. 3. Creating embeddings from texts using a text embedding model. 4. From these embeddings, find the ones that are closest to the user query using a vector similarity search. Specifically, we use `inner product` distance. 5. Get the corresponding texts to those closest embeddings and perform [Retrieval-Augmented Generation](https://huggingface.co/papers/2005.11401) (i.e. expand user prompt by adding those texts so that an LLM can use this information). ## Text Embedding Models By default (for backward compatibility), when `TEXT_EMBEDDING_MODELS` environment variable is not defined, [transformers.js](https://huggingface.co/docs/transformers.js) embedding models will be used for embedding tasks, specifically, [Xenova/gte-small](https://huggingface.co/Xenova/gte-small) model. You can customize the embedding model by setting `TEXT_EMBEDDING_MODELS` in your `.env.local` file. For example: ```env TEXT_EMBEDDING_MODELS = `[ { "name": "Xenova/gte-small", "displayName": "Xenova/gte-small", "description": "locally running embedding", "chunkCharLength": 512, "endpoints": [ {"type": "transformersjs"} ] }, { "name": "intfloat/e5-base-v2", "displayName": "intfloat/e5-base-v2", "description": "hosted embedding model", "chunkCharLength": 768, "preQuery": "query: ", # See https://huggingface.co/intfloat/e5-base-v2#faq "prePassage": "passage: ", # See https://huggingface.co/intfloat/e5-base-v2#faq "endpoints": [ { "type": "tei", "url": "http://127.0.0.1:8080/", "authorization": "TOKEN_TYPE TOKEN" // optional authorization field. Example: "Basic VVNFUjpQQVNT" } ] } ]` ``` The required fields are `name`, `chunkCharLength` and `endpoints`. Supported text embedding backends are: [`transformers.js`](https://huggingface.co/docs/transformers.js), [`TEI`](https://github.com/huggingface/text-embeddings-inference) and [`OpenAI`](https://platform.openai.com/docs/guides/embeddings). `transformers.js` models run locally as part of `chat-ui`, whereas `TEI` models run in a different environment & accessed through an API endpoint. `openai` models are accessed through the [OpenAI API](https://platform.openai.com/docs/guides/embeddings). When more than one embedding models are supplied in `.env.local` file, the first will be used by default, and the others will only be used on LLM's which configured `embeddingModel` to the name of the model. ## Extra parameters ### OpenID connect The login feature is disabled by default and users are attributed a unique ID based on their browser. But if you want to use OpenID to authenticate your users, you can add the following to your `.env.local` file: ```env OPENID_CONFIG=`{ PROVIDER_URL: "<your OIDC issuer>", CLIENT_ID: "<your OIDC client ID>", CLIENT_SECRET: "<your OIDC client secret>", SCOPES: "openid profile", TOLERANCE: // optional RESOURCE: // optional }` ``` These variables will enable the openID sign-in modal for users. ### Trusted header authentication You can set the env variable `TRUSTED_EMAIL_HEADER` to point to the header that contains the user's email address. This will allow you to authenticate users from the header. This setup is usually combined with a proxy that will be in front of chat-ui and will handle the auth and set the header. > [!WARNING] > Make sure to only allow requests to chat-ui through your proxy which handles authentication, otherwise users could authenticate as anyone by setting the header manually! Only set this up if you understand the implications and know how to do it correctly. Here is a list of header names for common auth providers: - Tailscale Serve: `Tailscale-User-Login` - Cloudflare Access: `Cf-Access-Authenticated-User-Email` - oauth2-proxy: `X-Forwarded-Email` ### Theming You can use a few environment variables to customize the look and feel of chat-ui. These are by default: ```env PUBLIC_APP_NAME=ChatUI PUBLIC_APP_ASSETS=chatui PUBLIC_APP_COLOR=blue PUBLIC_APP_DESCRIPTION="Making the community's best AI chat models available to everyone." PUBLIC_APP_DATA_SHARING= PUBLIC_APP_DISCLAIMER= ``` - `PUBLIC_APP_NAME` The name used as a title throughout the app. - `PUBLIC_APP_ASSETS` Is used to find logos & favicons in `static/$PUBLIC_APP_ASSETS`, current options are `chatui` and `huggingchat`. - `PUBLIC_APP_COLOR` Can be any of the [tailwind colors](https://tailwindcss.com/docs/customizing-colors#default-color-palette). - `PUBLIC_APP_DATA_SHARING` Can be set to 1 to add a toggle in the user settings that lets your users opt-in to data sharing with models creator. - `PUBLIC_APP_DISCLAIMER` If set to 1, we show a disclaimer about generated outputs on login. ### Web Search config You can enable the web search through an API by adding `YDC_API_KEY` ([docs.you.com](https://docs.you.com)) or `SERPER_API_KEY` ([serper.dev](https://serper.dev/)) or `SERPAPI_KEY` ([serpapi.com](https://serpapi.com/)) or `SERPSTACK_API_KEY` ([serpstack.com](https://serpstack.com/)) or `SEARCHAPI_KEY` ([searchapi.io](https://www.searchapi.io/)) to your `.env.local`. You can also simply enable the local google websearch by setting `USE_LOCAL_WEBSEARCH=true` in your `.env.local` or specify a SearXNG instance by adding the query URL to `SEARXNG_QUERY_URL`. You can enable javascript when parsing webpages to improve compatibility with `WEBSEARCH_JAVASCRIPT=true` at the cost of increased CPU usage. You'll want at least 4 cores when enabling. ### Custom models You can customize the parameters passed to the model or even use a new model by updating the `MODELS` variable in your `.env.local`. The default one can be found in `.env` and looks like this : ```env MODELS=`[ { "name": "mistralai/Mistral-7B-Instruct-v0.2", "displayName": "mistralai/Mistral-7B-Instruct-v0.2", "description": "Mistral 7B is a new Apache 2.0 model, released by Mistral AI that outperforms Llama2 13B in benchmarks.", "websiteUrl": "https://mistral.ai/news/announcing-mistral-7b/", "preprompt": "", "chatPromptTemplate" : "<s>{{#each messages}}{{#ifUser}}[INST] {{#if @first}}{{#if @root.preprompt}}{{@root.preprompt}}\n{{/if}}{{/if}}{{content}} [/INST]{{/ifUser}}{{#ifAssistant}}{{content}}</s>{{/ifAssistant}}{{/each}}", "parameters": { "temperature": 0.3, "top_p": 0.95, "repetition_penalty": 1.2, "top_k": 50, "truncate": 3072, "max_new_tokens": 1024, "stop": ["</s>"] }, "promptExamples": [ { "title": "Write an email from bullet list", "prompt": "As a restaurant owner, write a professional email to the supplier to get these products every week: \n\n- Wine (x10)\n- Eggs (x24)\n- Bread (x12)" }, { "title": "Code a snake game", "prompt": "Code a basic snake game in python, give explanations for each step." }, { "title": "Assist in a task", "prompt": "How do I make a delicious lemon cheesecake?" } ] } ]` ``` You can change things like the parameters, or customize the preprompt to better suit your needs. You can also add more models by adding more objects to the array, with different preprompts for example. #### chatPromptTemplate When querying the model for a chat response, the `chatPromptTemplate` template is used. `messages` is an array of chat messages, it has the format `[{ content: string }, ...]`. To identify if a message is a user message or an assistant message the `ifUser` and `ifAssistant` block helpers can be used. The following is the default `chatPromptTemplate`, although newlines and indentiation have been added for readability. You can find the prompts used in production for HuggingChat [here](https://github.com/huggingface/chat-ui/blob/main/PROMPTS.md). ```prompt {{preprompt}} {{#each messages}} {{#ifUser}}{{@root.userMessageToken}}{{content}}{{@root.userMessageEndToken}}{{/ifUser}} {{#ifAssistant}}{{@root.assistantMessageToken}}{{content}}{{@root.assistantMessageEndToken}}{{/ifAssistant}} {{/each}} {{assistantMessageToken}} ``` #### Multi modal model We currently support [IDEFICS](https://huggingface.co/blog/idefics) (hosted on TGI), OpenAI and Claude 3 as multimodal models. You can enable it by setting `multimodal: true` in your `MODELS` configuration. For IDEFICS, you must have a [PRO HF Api token](https://huggingface.co/settings/tokens). For OpenAI, see the [OpenAI section](#OpenAI). For Anthropic, see the [Anthropic section](#anthropic). ```env { "name": "HuggingFaceM4/idefics-80b-instruct", "multimodal" : true, "description": "IDEFICS is the new multimodal model by Hugging Face.", "preprompt": "", "chatPromptTemplate" : "{{#each messages}}{{#ifUser}}User: {{content}}{{/ifUser}}<end_of_utterance>\nAssistant: {{#ifAssistant}}{{content}}\n{{/ifAssistant}}{{/each}}", "parameters": { "temperature": 0.1, "top_p": 0.95, "repetition_penalty": 1.2, "top_k": 12, "truncate": 1000, "max_new_tokens": 1024, "stop": ["<end_of_utterance>", "User:", "\nUser:"] } } ``` #### Running your own models using a custom endpoint If you want to, instead of hitting models on the Hugging Face Inference API, you can run your own models locally. A good option is to hit a [text-generation-inference](https://github.com/huggingface/text-generation-inference) endpoint. This is what is done in the official [Chat UI Spaces Docker template](https://huggingface.co/new-space?template=huggingchat/chat-ui-template) for instance: both this app and a text-generation-inference server run inside the same container. To do this, you can add your own endpoints to the `MODELS` variable in `.env.local`, by adding an `"endpoints"` key for each model in `MODELS`. ```env { // rest of the model config here "endpoints": [{ "type" : "tgi", "url": "https://HOST:PORT", }] } ``` If `endpoints` are left unspecified, ChatUI will look for the model on the hosted Hugging Face inference API using the model name. ##### OpenAI API compatible models Chat UI can be used with any API server that supports OpenAI API compatibility, for example [text-generation-webui](https://github.com/oobabooga/text-generation-webui/tree/main/extensions/openai), [LocalAI](https://github.com/go-skynet/LocalAI), [FastChat](https://github.com/lm-sys/FastChat/blob/main/docs/openai_api.md), [llama-cpp-python](https://github.com/abetlen/llama-cpp-python), and [ialacol](https://github.com/chenhunghan/ialacol) and [vllm](https://docs.vllm.ai/en/latest/serving/openai_compatible_server.html). The following example config makes Chat UI works with [text-generation-webui](https://github.com/oobabooga/text-generation-webui/tree/main/extensions/openai), the `endpoint.baseUrl` is the url of the OpenAI API compatible server, this overrides the baseUrl to be used by OpenAI instance. The `endpoint.completion` determine which endpoint to be used, default is `chat_completions` which uses `v1/chat/completions`, change to `endpoint.completion` to `completions` to use the `v1/completions` endpoint. Parameters not supported by OpenAI (e.g., top_k, repetition_penalty, etc.) must be set in the extraBody of endpoints. Be aware that setting them in parameters will cause them to be omitted. ``` MODELS=`[ { "name": "text-generation-webui", "id": "text-generation-webui", "parameters": { "temperature": 0.9, "top_p": 0.95, "max_new_tokens": 1024, "stop": [] }, "endpoints": [{ "type" : "openai", "baseURL": "http://localhost:8000/v1", "extraBody": { "repetition_penalty": 1.2, "top_k": 50, "truncate": 1000 } }] } ]` ``` The `openai` type includes official OpenAI models. You can add, for example, GPT4/GPT3.5 as a "openai" model: ``` OPENAI_API_KEY=#your openai api key here MODELS=`[{ "name": "gpt-4", "displayName": "GPT 4", "endpoints" : [{ "type": "openai" }] }, { "name": "gpt-3.5-turbo", "displayName": "GPT 3.5 Turbo", "endpoints" : [{ "type": "openai" }] }]` ``` You may also consume any model provider that provides compatible OpenAI API endpoint. For example, you may self-host [Portkey](https://github.com/Portkey-AI/gateway) gateway and experiment with Claude or GPTs offered by Azure OpenAI. Example for Claude from Anthropic: ``` MODELS=`[{ "name": "claude-2.1", "displayName": "Claude 2.1", "description": "Anthropic has been founded by former OpenAI researchers...", "parameters": { "temperature": 0.5, "max_new_tokens": 4096, }, "endpoints": [ { "type": "openai", "baseURL": "https://gateway.example.com/v1", "defaultHeaders": { "x-portkey-config": '{"provider":"anthropic","api_key":"sk-ant-abc...xyz"}' } } ] }]` ``` Example for GPT 4 deployed on Azure OpenAI: ``` MODELS=`[{ "id": "gpt-4-1106-preview", "name": "gpt-4-1106-preview", "displayName": "gpt-4-1106-preview", "parameters": { "temperature": 0.5, "max_new_tokens": 4096, }, "endpoints": [ { "type": "openai", "baseURL": "https://{resource-name}.openai.azure.com/openai/deployments/{deployment-id}", "defaultHeaders": { "api-key": "{api-key}" }, "defaultQuery": { "api-version": "2023-05-15" } } ] }]` ``` Or try Mistral from [Deepinfra](https://deepinfra.com/mistralai/Mistral-7B-Instruct-v0.1/api?example=openai-http): > Note, apiKey can either be set custom per endpoint, or globally using `OPENAI_API_KEY` variable. ``` MODELS=`[{ "name": "mistral-7b", "displayName": "Mistral 7B", "description": "A 7B dense Transformer, fast-deployed and easily customisable. Small, yet powerful for a variety of use cases. Supports English and code, and a 8k context window.", "parameters": { "temperature": 0.5, "max_new_tokens": 4096, }, "endpoints": [ { "type": "openai", "baseURL": "https://api.deepinfra.com/v1/openai", "apiKey": "abc...xyz" } ] }]` ``` ##### Llama.cpp API server chat-ui also supports the llama.cpp API server directly without the need for an adapter. You can do this using the `llamacpp` endpoint type. If you want to run Chat UI with llama.cpp, you can do the following, using [microsoft/Phi-3-mini-4k-instruct-gguf](https://huggingface.co/microsoft/Phi-3-mini-4k-instruct-gguf) as an example model: ```bash # install llama.cpp brew install llama.cpp # start llama.cpp server llama-server --hf-repo microsoft/Phi-3-mini-4k-instruct-gguf --hf-file Phi-3-mini-4k-instruct-q4.gguf -c 4096 ``` ```env MODELS=`[ { "name": "Local Zephyr", "chatPromptTemplate": "<|system|>\n{{preprompt}}</s>\n{{#each messages}}{{#ifUser}}<|user|>\n{{content}}</s>\n<|assistant|>\n{{/ifUser}}{{#ifAssistant}}{{content}}</s>\n{{/ifAssistant}}{{/each}}", "parameters": { "temperature": 0.1, "top_p": 0.95, "repetition_penalty": 1.2, "top_k": 50, "truncate": 1000, "max_new_tokens": 2048, "stop": ["</s>"] }, "endpoints": [ { "url": "http://127.0.0.1:8080", "type": "llamacpp" } ] } ]` ``` Start chat-ui with `npm run dev` and you should be able to chat with Zephyr locally. #### Ollama We also support the Ollama inference server. Spin up a model with ```cli ollama run mistral ``` Then specify the endpoints like so: ```env MODELS=`[ { "name": "Ollama Mistral", "chatPromptTemplate": "<s>{{#each messages}}{{#ifUser}}[INST] {{#if @first}}{{#if @root.preprompt}}{{@root.preprompt}}\n{{/if}}{{/if}} {{content}} [/INST]{{/ifUser}}{{#ifAssistant}}{{content}}</s> {{/ifAssistant}}{{/each}}", "parameters": { "temperature": 0.1, "top_p": 0.95, "repetition_penalty": 1.2, "top_k": 50, "truncate": 3072, "max_new_tokens": 1024, "stop": ["</s>"] }, "endpoints": [ { "type": "ollama", "url" : "http://127.0.0.1:11434", "ollamaName" : "mistral" } ] } ]` ``` #### Anthropic We also support Anthropic models (including multimodal ones via `multmodal: true`) through the official SDK. You may provide your API key via the `ANTHROPIC_API_KEY` env variable, or alternatively, through the `endpoints.apiKey` as per the following example. ``` MODELS=`[ { "name": "claude-3-haiku-20240307", "displayName": "Claude 3 Haiku", "description": "Fastest and most compact model for near-instant responsiveness", "multimodal": true, "parameters": { "max_new_tokens": 4096, }, "endpoints": [ { "type": "anthropic", // optionals "apiKey": "sk-ant-...", "baseURL": "https://api.anthropic.com", "defaultHeaders": {}, "defaultQuery": {} } ] }, { "name": "claude-3-sonnet-20240229", "displayName": "Claude 3 Sonnet", "description": "Ideal balance of intelligence and speed", "multimodal": true, "parameters": { "max_new_tokens": 4096, }, "endpoints": [ { "type": "anthropic", // optionals "apiKey": "sk-ant-...", "baseURL": "https://api.anthropic.com", "defaultHeaders": {}, "defaultQuery": {} } ] }, { "name": "claude-3-opus-20240229", "displayName": "Claude 3 Opus", "description": "Most powerful model for highly complex tasks", "multimodal": true, "parameters": { "max_new_tokens": 4096 }, "endpoints": [ { "type": "anthropic", // optionals "apiKey": "sk-ant-...", "baseURL": "https://api.anthropic.com", "defaultHeaders": {}, "defaultQuery": {} } ] } ]` ``` We also support using Anthropic models running on Vertex AI. Authentication is done using Google Application Default Credentials. Project ID can be provided through the `endpoints.projectId` as per the following example: ``` MODELS=`[ { "name": "claude-3-sonnet@20240229", "displayName": "Claude 3 Sonnet", "description": "Ideal balance of intelligence and speed", "multimodal": true, "parameters": { "max_new_tokens": 4096, }, "endpoints": [ { "type": "anthropic-vertex", "region": "us-central1", "projectId": "gcp-project-id", // optionals "defaultHeaders": {}, "defaultQuery": {} } ] }, { "name": "claude-3-haiku@20240307", "displayName": "Claude 3 Haiku", "description": "Fastest, most compact model for near-instant responsiveness", "multimodal": true, "parameters": { "max_new_tokens": 4096 }, "endpoints": [ { "type": "anthropic-vertex", "region": "us-central1", "projectId": "gcp-project-id", // optionals "defaultHeaders": {}, "defaultQuery": {} } ] } ]` ``` #### Amazon You can also specify your Amazon SageMaker instance as an endpoint for chat-ui. The config goes like this: ```env "endpoints": [ { "type" : "aws", "service" : "sagemaker" "url": "", "accessKey": "", "secretKey" : "", "sessionToken": "", "region": "", "weight": 1 } ] ``` You can also set `"service" : "lambda"` to use a lambda instance. You can get the `accessKey` and `secretKey` from your AWS user, under programmatic access. #### Cloudflare Workers AI You can also use Cloudflare Workers AI to run your own models with serverless inference. You will need to have a Cloudflare account, then get your [account ID](https://developers.cloudflare.com/fundamentals/setup/find-account-and-zone-ids/) as well as your [API token](https://developers.cloudflare.com/workers-ai/get-started/rest-api/#1-get-an-api-token) for Workers AI. You can either specify them directly in your `.env.local` using the `CLOUDFLARE_ACCOUNT_ID` and `CLOUDFLARE_API_TOKEN` variables, or you can set them directly in the endpoint config. You can find the list of models available on Cloudflare [here](https://developers.cloudflare.com/workers-ai/models/#text-generation). ```env { "name" : "nousresearch/hermes-2-pro-mistral-7b", "tokenizer": "nousresearch/hermes-2-pro-mistral-7b", "parameters": { "stop": ["<|im_end|>"] }, "endpoints" : [ { "type" : "cloudflare" <!-- optionally specify these "accountId": "your-account-id", "authToken": "your-api-token" --> } ] } ``` #### Cohere You can also use Cohere to run their models directly from chat-ui. You will need to have a Cohere account, then get your [API token](https://dashboard.cohere.com/api-keys). You can either specify it directly in your `.env.local` using the `COHERE_API_TOKEN` variable, or you can set it in the endpoint config. Here is an example of a Cohere model config. You can set which model you want to use by setting the `id` field to the model name. ```env { "name" : "CohereForAI/c4ai-command-r-v01", "id": "command-r", "description": "C4AI Command-R is a research release of a 35 billion parameter highly performant generative model", "endpoints": [ { "type": "cohere", <!-- optionally specify these, or use COHERE_API_TOKEN "apiKey": "your-api-token" --> } ] } ``` ##### Google Vertex models Chat UI can connect to the google Vertex API endpoints ([List of supported models](https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models)). To enable: 1. [Select](https://console.cloud.google.com/project) or [create](https://cloud.google.com/resource-manager/docs/creating-managing-projects#creating_a_project) a Google Cloud project. 1. [Enable billing for your project](https://cloud.google.com/billing/docs/how-to/modify-project). 1. [Enable the Vertex AI API](https://console.cloud.google.com/flows/enableapi?apiid=aiplatform.googleapis.com). 1. [Set up authentication with a service account](https://cloud.google.com/docs/authentication/getting-started) so you can access the API from your local workstation. The service account credentials file can be imported as an environmental variable: ```env GOOGLE_APPLICATION_CREDENTIALS = clientid.json ``` Make sure your docker container has access to the file and the variable is correctly set. Afterwards Google Vertex endpoints can be configured as following: ``` MODELS=`[ //... { "name": "gemini-1.5-pro", "displayName": "Vertex Gemini Pro 1.5", "endpoints" : [{ "type": "vertex", "project": "abc-xyz", "location": "europe-west3", "model": "gemini-1.5-pro-preview-0409", // model-name // Optional "safetyThreshold": "BLOCK_MEDIUM_AND_ABOVE", "apiEndpoint": "", // alternative api endpoint url, // Optional "tools": [{ "googleSearchRetrieval": { "disableAttribution": true } }] }] }, ]` ``` ##### LangServe LangChain applications that are deployed using LangServe can be called with the following config: ``` MODELS=`[ //... { "name": "summarization-chain", //model-name "endpoints" : [{ "type": "langserve", "url" : "http://127.0.0.1:8100", }] }, ]` ``` ### Custom endpoint authorization #### Basic and Bearer Custom endpoints may require authorization, depending on how you configure them. Authentication will usually be set either with `Basic` or `Bearer`. For `Basic` we will need to generate a base64 encoding of the username and password. `echo -n "USER:PASS" | base64` > VVNFUjpQQVNT For `Bearer` you can use a token, which can be grabbed from [here](https://huggingface.co/settings/tokens). You can then add the generated information and the `authorization` parameter to your `.env.local`. ```env "endpoints": [ { "url": "https://HOST:PORT", "authorization": "Basic VVNFUjpQQVNT", } ] ``` Please note that if `HF_TOKEN` is also set or not empty, it will take precedence. #### Models hosted on multiple custom endpoints If the model being hosted will be available on multiple servers/instances add the `weight` parameter to your `.env.local`. The `weight` will be used to determine the probability of requesting a particular endpoint. ```env "endpoints": [ { "url": "https://HOST:PORT", "weight": 1 }, { "url": "https://HOST:PORT", "weight": 2 } ... ] ``` #### Client Certificate Authentication (mTLS) Custom endpoints may require client certificate authentication, depending on how you configure them. To enable mTLS between Chat UI and your custom endpoint, you will need to set the `USE_CLIENT_CERTIFICATE` to `true`, and add the `CERT_PATH` and `KEY_PATH` parameters to your `.env.local`. These parameters should point to the location of the certificate and key files on your local machine. The certificate and key files should be in PEM format. The key file can be encrypted with a passphrase, in which case you will also need to add the `CLIENT_KEY_PASSWORD` parameter to your `.env.local`. If you're using a certificate signed by a private CA, you will also need to add the `CA_PATH` parameter to your `.env.local`. This parameter should point to the location of the CA certificate file on your local machine. If you're using a self-signed certificate, e.g. for testing or development purposes, you can set the `REJECT_UNAUTHORIZED` parameter to `false` in your `.env.local`. This will disable certificate validation, and allow Chat UI to connect to your custom endpoint. #### Specific Embedding Model A model can use any of the embedding models defined in `.env.local`, (currently used when web searching), by default it will use the first embedding model, but it can be changed with the field `embeddingModel`: ```env TEXT_EMBEDDING_MODELS = `[ { "name": "Xenova/gte-small", "chunkCharLength": 512, "endpoints": [ {"type": "transformersjs"} ] }, { "name": "intfloat/e5-base-v2", "chunkCharLength": 768, "endpoints": [ {"type": "tei", "url": "http://127.0.0.1:8080/", "authorization": "Basic VVNFUjpQQVNT"}, {"type": "tei", "url": "http://127.0.0.1:8081/"} ] } ]` MODELS=`[ { "name": "Ollama Mistral", "chatPromptTemplate": "...", "embeddingModel": "intfloat/e5-base-v2" "parameters": { ... }, "endpoints": [ ... ] } ]` ``` ## Common issues ### 403๏ผšYou don't have access to this conversation Most likely you are running chat-ui over HTTP. The recommended option is to setup something like NGINX to handle HTTPS and proxy the requests to chat-ui. If you really need to run over HTTP you can add `ALLOW_INSECURE_COOKIES=true` to your `.env.local`. Make sure to set your `PUBLIC_ORIGIN` in your `.env.local` to the correct URL as well. ## Deploying to a HF Space Create a `DOTENV_LOCAL` secret to your HF space with the content of your .env.local, and they will be picked up automatically when you run. ## Building To create a production version of your app: ```bash npm run build ``` You can preview the production build with `npm run preview`. > To deploy your app, you may need to install an [adapter](https://kit.svelte.dev/docs/adapters) for your target environment. ## Config changes for HuggingChat The config file for HuggingChat is stored in the `chart/env/prod.yaml` file. It is the source of truth for the environment variables used for our CI/CD pipeline. For HuggingChat, as we need to customize the app color, as well as the base path, we build a custom docker image. You can find the workflow here. > [!TIP] > If you want to make changes to the model config used in production for HuggingChat, you should do so against `chart/env/prod.yaml`. ### Running a copy of HuggingChat locally If you want to run an exact copy of HuggingChat locally, you will need to do the following first: 1. Create an [OAuth App on the hub](https://huggingface.co/settings/applications/new) with `openid profile email` permissions. Make sure to set the callback URL to something like `http://localhost:5173/chat/login/callback` which matches the right path for your local instance. 2. Create a [HF Token](https://huggingface.co/settings/tokens) with your Hugging Face account. You will need a Pro account to be able to access some of the larger models available through HuggingChat. 3. Create a free account with [serper.dev](https://serper.dev/) (you will get 2500 free search queries) 4. Run an instance of mongoDB, however you want. (Local or remote) You can then create a new `.env.SECRET_CONFIG` file with the following content ```env MONGODB_URL=<link to your mongo DB from step 4> HF_TOKEN=<your HF token from step 2> OPENID_CONFIG=`{ PROVIDER_URL: "https://huggingface.co", CLIENT_ID: "<your client ID from step 1>", CLIENT_SECRET: "<your client secret from step 1>", }` SERPER_API_KEY=<your serper API key from step 3> MESSAGES_BEFORE_LOGIN=<can be any numerical value, or set to 0 to require login> ``` You can then run `npm run updateLocalEnv` in the root of chat-ui. This will create a `.env.local` file which combines the `chart/env/prod.yaml` and the `.env.SECRET_CONFIG` file. You can then run `npm run dev` to start your local instance of HuggingChat. ### Populate database > [!WARNING] > The `MONGODB_URL` used for this script will be fetched from `.env.local`. Make sure it's correct! The command runs directly on the database. You can populate the database using faker data using the `populate` script: ```bash npm run populate <flags here> ``` At least one flag must be specified, the following flags are available: - `reset` - resets the database - `all` - populates all tables - `users` - populates the users table - `settings` - populates the settings table for existing users - `assistants` - populates the assistants table for existing users - `conversations` - populates the conversations table for existing users For example, you could use it like so: ```bash npm run populate reset ``` to clear out the database. Then login in the app to create your user and run the following command: ```bash npm run populate users settings assistants conversations ``` to populate the database with fake data, including fake conversations and assistants for your user.
chat-ui/README.md/0
{ "file_path": "chat-ui/README.md", "repo_id": "chat-ui", "token_count": 13474 }
52
# Metrics The server can expose prometheus metrics on port `5565` but is off by default. You may enable the metrics server with `METRICS_ENABLED=true` and change the port with `METRICS_PORT=1234`. <Tip> In development with `npm run dev`, the metrics server does not shutdown gracefully due to Sveltekit not providing hooks for restart. It's recommended to disable the metrics server in this case. </Tip>
chat-ui/docs/source/configuration/metrics.md/0
{ "file_path": "chat-ui/docs/source/configuration/metrics.md", "repo_id": "chat-ui", "token_count": 111 }
53
# Theming You can use a few environment variables to customize the look and feel of Chat UI. These are by default: ```ini PUBLIC_APP_NAME=ChatUI PUBLIC_APP_ASSETS=chatui PUBLIC_APP_COLOR=blue PUBLIC_APP_DESCRIPTION="Making the community's best AI chat models available to everyone." PUBLIC_APP_DATA_SHARING= PUBLIC_APP_DISCLAIMER= ``` - `PUBLIC_APP_NAME` The name used as a title throughout the app. - `PUBLIC_APP_ASSETS` Is used to find logos & favicons in `static/$PUBLIC_APP_ASSETS`, current options are `chatui` and `huggingchat`. - `PUBLIC_APP_COLOR` Can be any of the [tailwind colors](https://tailwindcss.com/docs/customizing-colors#default-color-palette). - `PUBLIC_APP_DATA_SHARING` Can be set to 1 to add a toggle in the user settings that lets your users opt-in to data sharing with models creator. - `PUBLIC_APP_DISCLAIMER` If set to 1, we show a disclaimer about generated outputs on login.
chat-ui/docs/source/configuration/theming.md/0
{ "file_path": "chat-ui/docs/source/configuration/theming.md", "repo_id": "chat-ui", "token_count": 286 }
54
declare module "*.ttf" { const value: ArrayBuffer; export default value; }
chat-ui/src/ambient.d.ts/0
{ "file_path": "chat-ui/src/ambient.d.ts", "repo_id": "chat-ui", "token_count": 26 }
55
<script lang="ts"> import { base } from "$app/paths"; import { page } from "$app/stores"; import { env as envPublic } from "$env/dynamic/public"; import LogoHuggingFaceBorderless from "$lib/components/icons/LogoHuggingFaceBorderless.svelte"; import Modal from "$lib/components/Modal.svelte"; import { useSettingsStore } from "$lib/stores/settings"; import { cookiesAreEnabled } from "$lib/utils/cookiesAreEnabled"; import Logo from "./icons/Logo.svelte"; const settings = useSettingsStore(); </script> <Modal on:close> <div class="from-primary-500/40 via-primary-500/10 to-primary-500/0 flex w-full flex-col items-center gap-6 bg-gradient-to-b px-5 pb-8 pt-9 text-center" > <h2 class="flex items-center text-2xl font-semibold text-gray-800"> <Logo classNames="mr-1" /> {envPublic.PUBLIC_APP_NAME} </h2> <p class="text-balance text-lg font-semibold leading-snug text-gray-800"> {envPublic.PUBLIC_APP_DESCRIPTION} </p> <p class="text-balance rounded-xl border bg-white/80 p-2 text-base text-gray-800"> You have reached the guest message limit, <strong class="font-semibold" >Sign In with a free Hugging Face account</strong > to continue using HuggingChat. </p> <form action="{base}/{$page.data.loginRequired ? 'login' : 'settings'}" target="_parent" method="POST" class="flex w-full flex-col items-center gap-2" > {#if $page.data.loginRequired} <button type="submit" class="flex w-full flex-wrap items-center justify-center whitespace-nowrap rounded-full bg-black px-5 py-2 text-center text-lg font-semibold text-gray-100 transition-colors hover:bg-gray-900" > Sign in {#if envPublic.PUBLIC_APP_NAME === "HuggingChat"} <span class="flex items-center"> &nbsp;with <LogoHuggingFaceBorderless classNames="text-xl mr-1 ml-1.5" /> Hugging Face </span> {/if} </button> {:else} <button class="flex w-full items-center justify-center whitespace-nowrap rounded-full border-2 border-black bg-black px-5 py-2 text-lg font-semibold text-gray-100 transition-colors hover:bg-gray-900" on:click={(e) => { if (!cookiesAreEnabled()) { e.preventDefault(); window.open(window.location.href, "_blank"); } $settings.ethicsModalAccepted = true; }} > Start chatting </button> {/if} </form> </div> </Modal>
chat-ui/src/lib/components/LoginModal.svelte/0
{ "file_path": "chat-ui/src/lib/components/LoginModal.svelte", "repo_id": "chat-ui", "token_count": 974 }
56
<script lang="ts"> import type { Model } from "$lib/types/Model"; import { getTokenizer } from "$lib/utils/getTokenizer"; import type { PreTrainedTokenizer } from "@huggingface/transformers"; export let classNames = ""; export let prompt = ""; export let modelTokenizer: Exclude<Model["tokenizer"], undefined>; export let truncate: number | undefined = undefined; let tokenizer: PreTrainedTokenizer | undefined = undefined; async function tokenizeText(_prompt: string) { if (!tokenizer) { return; } const { input_ids } = await tokenizer(_prompt); return input_ids.size; } $: (async () => { tokenizer = await getTokenizer(modelTokenizer); })(); </script> {#if tokenizer} {#await tokenizeText(prompt) then nTokens} {@const exceedLimit = nTokens > (truncate || Infinity)} <div class={classNames}> <p class="peer text-sm {exceedLimit ? 'text-red-500 opacity-100' : 'opacity-60 hover:opacity-90'}" > {nTokens}{truncate ? `/${truncate}` : ""} </p> <div class="invisible absolute -top-6 right-0 whitespace-nowrap rounded bg-black px-1 text-sm text-white peer-hover:visible" > Tokens usage </div> </div> {/await} {/if}
chat-ui/src/lib/components/TokensCounter.svelte/0
{ "file_path": "chat-ui/src/lib/components/TokensCounter.svelte", "repo_id": "chat-ui", "token_count": 459 }
57
<script lang="ts"> export let classNames = ""; </script> <svg class={classNames} xmlns="http://www.w3.org/2000/svg" aria-hidden="true" fill="currentColor" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32" > <path d="M28,10V28H10V10H28m0-2H10a2,2,0,0,0-2,2V28a2,2,0,0,0,2,2H28a2,2,0,0,0,2-2V10a2,2,0,0,0-2-2Z" transform="translate(0)" /> <path d="M4,18H2V4A2,2,0,0,1,4,2H18V4H4Z" transform="translate(0)" /><rect fill="none" width="32" height="32" /> </svg>
chat-ui/src/lib/components/icons/IconCopy.svelte/0
{ "file_path": "chat-ui/src/lib/components/icons/IconCopy.svelte", "repo_id": "chat-ui", "token_count": 299 }
58
import type { Migration } from "."; import { collections } from "$lib/server/database"; import { ObjectId } from "mongodb"; const updateAssistantsModels: Migration = { _id: new ObjectId("5f9f3f3f3f3f3f3f3f3f3f3f"), name: "Update deprecated models in assistants with the default model", up: async () => { const models = (await import("$lib/server/models")).models; const { assistants } = collections; const modelIds = models.map((el) => el.id); // string[] const defaultModelId = models[0].id; // Find all assistants whose modelId is not in modelIds, and update it to use defaultModelId await assistants.updateMany( { modelId: { $nin: modelIds } }, { $set: { modelId: defaultModelId } } ); return true; }, runEveryTime: true, runForHuggingChat: "only", }; export default updateAssistantsModels;
chat-ui/src/lib/migrations/routines/02-update-assistants-models.ts/0
{ "file_path": "chat-ui/src/lib/migrations/routines/02-update-assistants-models.ts", "repo_id": "chat-ui", "token_count": 286 }
59
import { z } from "zod"; import type { Endpoint } from "../endpoints"; import { env } from "$env/dynamic/private"; import type { TextGenerationStreamOutput } from "@huggingface/inference"; import { createImageProcessorOptionsValidator } from "../images"; import { endpointMessagesToAnthropicMessages } from "./utils"; export const endpointAnthropicParametersSchema = z.object({ weight: z.number().int().positive().default(1), model: z.any(), type: z.literal("anthropic"), baseURL: z.string().url().default("https://api.anthropic.com"), apiKey: z.string().default(env.ANTHROPIC_API_KEY ?? "sk-"), defaultHeaders: z.record(z.string()).optional(), defaultQuery: z.record(z.string()).optional(), multimodal: z .object({ image: createImageProcessorOptionsValidator({ supportedMimeTypes: ["image/png", "image/jpeg", "image/webp"], preferredMimeType: "image/webp", // The 4 / 3 compensates for the 33% increase in size when converting to base64 maxSizeInMB: (5 / 4) * 3, maxWidth: 4096, maxHeight: 4096, }), }) .default({}), }); export async function endpointAnthropic( input: z.input<typeof endpointAnthropicParametersSchema> ): Promise<Endpoint> { const { baseURL, apiKey, model, defaultHeaders, defaultQuery, multimodal } = endpointAnthropicParametersSchema.parse(input); let Anthropic; try { Anthropic = (await import("@anthropic-ai/sdk")).default; } catch (e) { throw new Error("Failed to import @anthropic-ai/sdk", { cause: e }); } const anthropic = new Anthropic({ apiKey, baseURL, defaultHeaders, defaultQuery, }); return async ({ messages, preprompt, generateSettings }) => { let system = preprompt; if (messages?.[0]?.from === "system") { system = messages[0].content; } let tokenId = 0; const parameters = { ...model.parameters, ...generateSettings }; return (async function* () { const stream = anthropic.messages.stream({ model: model.id ?? model.name, messages: await endpointMessagesToAnthropicMessages(messages, multimodal), max_tokens: parameters?.max_new_tokens, temperature: parameters?.temperature, top_p: parameters?.top_p, top_k: parameters?.top_k, stop_sequences: parameters?.stop, system, }); while (true) { const result = await Promise.race([stream.emitted("text"), stream.emitted("end")]); // Stream end if (result === undefined) { yield { token: { id: tokenId++, text: "", logprob: 0, special: true, }, generated_text: await stream.finalText(), details: null, } satisfies TextGenerationStreamOutput; return; } // Text delta yield { token: { id: tokenId++, text: result as unknown as string, special: false, logprob: 0, }, generated_text: null, details: null, } satisfies TextGenerationStreamOutput; } })(); }; }
chat-ui/src/lib/server/endpoints/anthropic/endpointAnthropic.ts/0
{ "file_path": "chat-ui/src/lib/server/endpoints/anthropic/endpointAnthropic.ts", "repo_id": "chat-ui", "token_count": 1138 }
60
import type { TextGenerationStreamOutput } from "@huggingface/inference"; import type OpenAI from "openai"; import type { Stream } from "openai/streaming"; /** * Transform a stream of OpenAI.Completions.Completion into a stream of TextGenerationStreamOutput */ export async function* openAICompletionToTextGenerationStream( completionStream: Stream<OpenAI.Completions.Completion> ) { let generatedText = ""; let tokenId = 0; for await (const completion of completionStream) { const { choices } = completion; const text = choices[0]?.text ?? ""; const last = choices[0]?.finish_reason === "stop" || choices[0]?.finish_reason === "length"; if (text) { generatedText = generatedText + text; } const output: TextGenerationStreamOutput = { token: { id: tokenId++, text, logprob: 0, special: last, }, generated_text: last ? generatedText : null, details: null, }; yield output; } }
chat-ui/src/lib/server/endpoints/openai/openAICompletionToTextGenerationStream.ts/0
{ "file_path": "chat-ui/src/lib/server/endpoints/openai/openAICompletionToTextGenerationStream.ts", "repo_id": "chat-ui", "token_count": 325 }
61
import { env } from "$env/dynamic/private"; import { generateFromDefaultEndpoint } from "$lib/server/generateFromDefaultEndpoint"; import type { EndpointMessage } from "../endpoints/endpoints"; import { logger } from "$lib/server/logger"; import { MessageUpdateType, type MessageUpdate } from "$lib/types/MessageUpdate"; import type { Conversation } from "$lib/types/Conversation"; export async function* generateTitleForConversation( conv: Conversation ): AsyncGenerator<MessageUpdate, undefined, undefined> { try { const userMessage = conv.messages.find((m) => m.from === "user"); // HACK: detect if the conversation is new if (conv.title !== "New Chat" || !userMessage) return; const prompt = userMessage.content; const title = (await generateTitle(prompt)) ?? "New Chat"; yield { type: MessageUpdateType.Title, title, }; } catch (cause) { logger.error(Error("Failed whilte generating title for conversation", { cause })); } } export async function generateTitle(prompt: string) { if (env.LLM_SUMMARIZATION !== "true") { return prompt.split(/\s+/g).slice(0, 5).join(" "); } const messages: Array<EndpointMessage> = [ { from: "system", content: "You are a summarization AI. You'll never answer a user's question directly, but instead summarize the user's request into a single short sentence of four words or less. Always start your answer with an emoji relevant to the summary", }, { from: "user", content: "Who is the president of Gabon?" }, { from: "assistant", content: "๐Ÿ‡ฌ๐Ÿ‡ฆ President of Gabon" }, { from: "user", content: "Who is Julien Chaumond?" }, { from: "assistant", content: "๐Ÿง‘ Julien Chaumond" }, { from: "user", content: "what is 1 + 1?" }, { from: "assistant", content: "๐Ÿ”ข Simple math operation" }, { from: "user", content: "What are the latest news?" }, { from: "assistant", content: "๐Ÿ“ฐ Latest news" }, { from: "user", content: "How to make a great cheesecake?" }, { from: "assistant", content: "๐Ÿฐ Cheesecake recipe" }, { from: "user", content: "what is your favorite movie? do a short answer." }, { from: "assistant", content: "๐ŸŽฅ Favorite movie" }, { from: "user", content: "Explain the concept of artificial intelligence in one sentence" }, { from: "assistant", content: "๐Ÿค– AI definition" }, { from: "user", content: "Draw a cute cat" }, { from: "assistant", content: "๐Ÿฑ Cute cat drawing" }, { from: "user", content: prompt }, ]; return await generateFromDefaultEndpoint({ messages, preprompt: "You are a summarization AI. Summarize the user's request into a single short sentence of four words or less. Do not try to answer it, only summarize the user's query. Always start your answer with an emoji relevant to the summary", generateSettings: { max_new_tokens: 15, }, }) .then((summary) => { // add an emoji if none is found in the first three characters if (!/\p{Emoji}/u.test(summary.slice(0, 3))) { return "๐Ÿ’ฌ " + summary; } return summary; }) .catch((e) => { logger.error(e); return null; }); }
chat-ui/src/lib/server/textGeneration/title.ts/0
{ "file_path": "chat-ui/src/lib/server/textGeneration/title.ts", "repo_id": "chat-ui", "token_count": 1028 }
62
/* eslint-disable-next-line no-shadow */ export enum MarkdownElementType { Header = "HEADER", Paragraph = "PARAGRAPH", BlockQuote = "BLOCKQUOTE", CodeBlock = "CODE_BLOCK", UnorderedList = "UNORDERED_LIST", OrderedList = "ORDERED_LIST", UnorderedListItem = "UNORDERED_LIST_ITEM", OrderedListItem = "ORDERED_LIST_ITEM", } interface BaseMarkdownElement<T = MarkdownElementType> { type: T; content: string; parent: HeaderElement | null; } export interface HeaderElement extends BaseMarkdownElement<MarkdownElementType.Header> { level: number; children: MarkdownElement[]; } type ListItem = MarkdownElementType.UnorderedListItem | MarkdownElementType.OrderedListItem; interface ListItemElement extends BaseMarkdownElement<ListItem> { depth: number; } interface BlockQuoteElement extends BaseMarkdownElement<MarkdownElementType.BlockQuote> { depth: number; } interface ParagraphElement extends BaseMarkdownElement<MarkdownElementType.Paragraph> {} interface CodeBlockElement extends BaseMarkdownElement<MarkdownElementType.CodeBlock> {} export type MarkdownElement = | HeaderElement | ParagraphElement | BlockQuoteElement | CodeBlockElement | ListItemElement; export const tagNameMap: Record<string, MarkdownElementType> = { h1: MarkdownElementType.Header, h2: MarkdownElementType.Header, h3: MarkdownElementType.Header, h4: MarkdownElementType.Header, h5: MarkdownElementType.Header, h6: MarkdownElementType.Header, div: MarkdownElementType.Paragraph, p: MarkdownElementType.Paragraph, blockquote: MarkdownElementType.BlockQuote, pre: MarkdownElementType.CodeBlock, ul: MarkdownElementType.UnorderedList, ol: MarkdownElementType.OrderedList, li: MarkdownElementType.UnorderedListItem, };
chat-ui/src/lib/server/websearch/markdown/types.ts/0
{ "file_path": "chat-ui/src/lib/server/websearch/markdown/types.ts", "repo_id": "chat-ui", "token_count": 541 }
63
import { JSDOM, VirtualConsole } from "jsdom"; import { isURL } from "$lib/utils/isUrl"; import type { WebSearchSource } from "$lib/types/WebSearch"; export default async function searchWebLocal(query: string): Promise<WebSearchSource[]> { const abortController = new AbortController(); setTimeout(() => abortController.abort(), 10000); const htmlString = await fetch( "https://www.google.com/search?hl=en&q=" + encodeURIComponent(query), { signal: abortController.signal } ) .then((response) => response.text()) .catch(); const virtualConsole = new VirtualConsole(); virtualConsole.on("error", () => {}); // No-op to skip console errors. const document = new JSDOM(htmlString ?? "", { virtualConsole }).window.document; // get all links const links = document.querySelectorAll("a"); if (!links.length) throw new Error(`webpage doesn't have any "a" element`); // take url that start wirth /url?q= // and do not contain google.com links // and strip them up to '&sa=' const linksHref = Array.from(links) .map((el) => el.href) .filter((link) => link.startsWith("/url?q=") && !link.includes("google.com/")) .map((link) => link.slice("/url?q=".length, link.indexOf("&sa="))) .filter(isURL); // remove duplicate links and map links to the correct object shape return [...new Set(linksHref)].map((link) => ({ link })); }
chat-ui/src/lib/server/websearch/search/endpoints/webLocal.ts/0
{ "file_path": "chat-ui/src/lib/server/websearch/search/endpoints/webLocal.ts", "repo_id": "chat-ui", "token_count": 439 }
64
import type { Timestamps } from "./Timestamps"; import type { Assistant } from "./Assistant"; export interface AssistantStats extends Timestamps { assistantId: Assistant["_id"]; date: { at: Date; span: "hour"; }; count: number; }
chat-ui/src/lib/types/AssistantStats.ts/0
{ "file_path": "chat-ui/src/lib/types/AssistantStats.ts", "repo_id": "chat-ui", "token_count": 80 }
65
import type { Timestamps } from "./Timestamps"; export interface TokenCache extends Timestamps { tokenHash: string; // sha256 of the bearer token userId: string; // the matching hf user id }
chat-ui/src/lib/types/TokenCache.ts/0
{ "file_path": "chat-ui/src/lib/types/TokenCache.ts", "repo_id": "chat-ui", "token_count": 57 }
66
// Approximate width from which we disable autofocus const TABLET_VIEWPORT_WIDTH = 768; export function isDesktop(window: Window) { const { innerWidth } = window; return innerWidth > TABLET_VIEWPORT_WIDTH; }
chat-ui/src/lib/utils/isDesktop.ts/0
{ "file_path": "chat-ui/src/lib/utils/isDesktop.ts", "repo_id": "chat-ui", "token_count": 67 }
67
import { collections } from "$lib/server/database"; import { ObjectId } from "mongodb"; import { describe, expect, it } from "vitest"; import { insertLegacyConversation, insertSideBranchesConversation } from "./treeHelpers.spec"; import { addChildren } from "./addChildren"; import type { Message } from "$lib/types/Message"; const newMessage: Omit<Message, "id"> = { content: "new message", from: "user", }; Object.freeze(newMessage); describe("addChildren", async () => { it("should let you append on legacy conversations", async () => { const convId = await insertLegacyConversation(); const conv = await collections.conversations.findOne({ _id: new ObjectId(convId) }); if (!conv) throw new Error("Conversation not found"); const convLength = conv.messages.length; addChildren(conv, newMessage, conv.messages[conv.messages.length - 1].id); expect(conv.messages.length).toEqual(convLength + 1); }); it("should not let you create branches on legacy conversations", async () => { const convId = await insertLegacyConversation(); const conv = await collections.conversations.findOne({ _id: new ObjectId(convId) }); if (!conv) throw new Error("Conversation not found"); expect(() => addChildren(conv, newMessage, conv.messages[0].id)).toThrow(); }); it("should not let you create a message that already exists", async () => { const convId = await insertLegacyConversation(); const conv = await collections.conversations.findOne({ _id: new ObjectId(convId) }); if (!conv) throw new Error("Conversation not found"); const messageThatAlreadyExists: Message = { id: conv.messages[0].id, content: "new message", from: "user", }; expect(() => addChildren(conv, messageThatAlreadyExists, conv.messages[0].id)).toThrow(); }); it("should let you create branches on conversations with subtrees", async () => { const convId = await insertSideBranchesConversation(); const conv = await collections.conversations.findOne({ _id: new ObjectId(convId) }); if (!conv) throw new Error("Conversation not found"); const nChildren = conv.messages[0].children?.length; if (!nChildren) throw new Error("No children found"); addChildren(conv, newMessage, conv.messages[0].id); expect(conv.messages[0].children?.length).toEqual(nChildren + 1); }); it("should let you create a new leaf", async () => { const convId = await insertSideBranchesConversation(); const conv = await collections.conversations.findOne({ _id: new ObjectId(convId) }); if (!conv) throw new Error("Conversation not found"); const parentId = conv.messages[conv.messages.length - 1].id; const nChildren = conv.messages[conv.messages.length - 1].children?.length; if (nChildren === undefined) throw new Error("No children found"); expect(nChildren).toEqual(0); addChildren(conv, newMessage, parentId); expect(conv.messages[conv.messages.length - 2].children?.length).toEqual(nChildren + 1); }); it("should let you append to an empty conversation without specifying a parentId", async () => { const conv = { _id: new ObjectId(), rootMessageId: undefined, messages: [] as Message[], }; addChildren(conv, newMessage); expect(conv.messages.length).toEqual(1); expect(conv.rootMessageId).toEqual(conv.messages[0].id); }); it("should throw if you don't specify a parentId in a conversation with messages", async () => { const convId = await insertLegacyConversation(); const conv = await collections.conversations.findOne({ _id: new ObjectId(convId) }); if (!conv) throw new Error("Conversation not found"); expect(() => addChildren(conv, newMessage)).toThrow(); }); it("should return the id of the new message", async () => { const convId = await insertLegacyConversation(); const conv = await collections.conversations.findOne({ _id: new ObjectId(convId) }); if (!conv) throw new Error("Conversation not found"); expect(addChildren(conv, newMessage, conv.messages[conv.messages.length - 1].id)).toEqual( conv.messages[conv.messages.length - 1].id ); }); });
chat-ui/src/lib/utils/tree/addChildren.spec.ts/0
{ "file_path": "chat-ui/src/lib/utils/tree/addChildren.spec.ts", "repo_id": "chat-ui", "token_count": 1301 }
68
import { json } from "@sveltejs/kit"; import { logger } from "$lib/server/logger"; import { computeAllStats } from "$lib/jobs/refresh-conversation-stats"; // Triger like this: // curl -X POST "http://localhost:5173/chat/admin/stats/compute" -H "Authorization: Bearer <ADMIN_API_SECRET>" export async function POST() { computeAllStats().catch((e) => logger.error(e)); return json( { message: "Stats job started", }, { status: 202 } ); }
chat-ui/src/routes/admin/stats/compute/+server.ts/0
{ "file_path": "chat-ui/src/routes/admin/stats/compute/+server.ts", "repo_id": "chat-ui", "token_count": 161 }
69
<script lang="ts"> import type { PageData } from "./$types"; import { env as envPublic } from "$env/dynamic/public"; import { isHuggingChat } from "$lib/utils/isHuggingChat"; import { goto } from "$app/navigation"; import { base } from "$app/paths"; import { page } from "$app/stores"; import CarbonAdd from "~icons/carbon/add"; import CarbonHelpFilled from "~icons/carbon/help-filled"; import CarbonClose from "~icons/carbon/close"; import CarbonArrowUpRight from "~icons/carbon/arrow-up-right"; import CarbonEarthAmerica from "~icons/carbon/earth-americas-filled"; import CarbonUserMultiple from "~icons/carbon/user-multiple"; import CarbonSearch from "~icons/carbon/search"; import CarbonTools from "~icons/carbon/tools"; import Pagination from "$lib/components/Pagination.svelte"; import { formatUserCount } from "$lib/utils/formatUserCount"; import { getHref } from "$lib/utils/getHref"; import { debounce } from "$lib/utils/debounce"; import { useSettingsStore } from "$lib/stores/settings"; import IconInternet from "$lib/components/icons/IconInternet.svelte"; import { isDesktop } from "$lib/utils/isDesktop"; import { SortKey } from "$lib/types/Assistant"; export let data: PageData; $: assistantsCreator = $page.url.searchParams.get("user"); $: createdByMe = data.user?.username && data.user.username === assistantsCreator; const SEARCH_DEBOUNCE_DELAY = 400; let filterInputEl: HTMLInputElement; let filterValue = data.query; let isFilterInPorgress = false; let sortValue = data.sort as SortKey; const onModelChange = (e: Event) => { const newUrl = getHref($page.url, { newKeys: { modelId: (e.target as HTMLSelectElement).value }, existingKeys: { behaviour: "delete_except", keys: ["user"] }, }); resetFilter(); goto(newUrl); }; const resetFilter = () => { filterValue = ""; isFilterInPorgress = false; }; const filterOnName = debounce(async (value: string) => { filterValue = value; if (isFilterInPorgress) { return; } isFilterInPorgress = true; const newUrl = getHref($page.url, { newKeys: { q: value }, existingKeys: { behaviour: "delete", keys: ["p"] }, }); await goto(newUrl); if (isDesktop(window)) { setTimeout(() => filterInputEl.focus(), 0); } isFilterInPorgress = false; // there was a new filter query before server returned response if (filterValue !== value) { filterOnName(filterValue); } }, SEARCH_DEBOUNCE_DELAY); const sortAssistants = () => { const newUrl = getHref($page.url, { newKeys: { sort: sortValue }, existingKeys: { behaviour: "delete", keys: ["p"] }, }); goto(newUrl); }; const settings = useSettingsStore(); </script> <svelte:head> {#if isHuggingChat} <title>HuggingChat - Assistants</title> <meta property="og:title" content="HuggingChat - Assistants" /> <meta property="og:type" content="link" /> <meta property="og:description" content="Browse HuggingChat assistants made by the community." /> <meta property="og:image" content="{envPublic.PUBLIC_ORIGIN || $page.url.origin}{base}/{envPublic.PUBLIC_APP_ASSETS}/assistants-thumbnail.png" /> <meta property="og:url" content={$page.url.href} /> {/if} </svelte:head> <div class="scrollbar-custom mr-1 h-full overflow-y-auto py-12 max-sm:pt-8 md:py-24"> <div class="pt-42 mx-auto flex flex-col px-5 xl:w-[60rem] 2xl:w-[64rem]"> <div class="flex items-center"> <h1 class="text-2xl font-bold">Assistants</h1> {#if isHuggingChat} <div class="5 ml-1.5 rounded-lg text-xxs uppercase text-gray-500 dark:text-gray-500"> beta </div> <a href="https://huggingface.co/spaces/huggingchat/chat-ui/discussions/357" class="ml-auto dark:text-gray-400 dark:hover:text-gray-300" target="_blank" > <CarbonHelpFilled /> </a> {/if} </div> <h3 class="text-gray-500">Popular assistants made by the community</h3> <div class="mt-6 flex justify-between gap-2 max-sm:flex-col sm:items-center"> <select class="mt-1 h-[34px] rounded-lg border border-gray-300 bg-gray-50 px-2 text-sm text-gray-900 focus:border-blue-700 focus:ring-blue-700 dark:border-gray-600 dark:bg-gray-700 dark:text-white dark:placeholder-gray-400" bind:value={data.selectedModel} on:change={onModelChange} > <option value="">All models</option> {#each data.models.filter((model) => !model.unlisted) as model} <option value={model.name}>{model.name}</option> {/each} </select> <a href={`${base}/settings/assistants/new`} class="flex items-center gap-1 whitespace-nowrap rounded-lg border bg-white py-1 pl-1.5 pr-2.5 shadow-sm hover:bg-gray-50 hover:shadow-none dark:border-gray-600 dark:bg-gray-700 dark:hover:bg-gray-700" > <CarbonAdd />Create new assistant </a> </div> <div class="mt-7 flex flex-wrap items-center gap-x-2 gap-y-3 text-sm"> {#if assistantsCreator && !createdByMe} <div class="flex items-center gap-1.5 rounded-full border border-gray-300 bg-gray-50 px-3 py-1 dark:border-gray-600 dark:bg-gray-700 dark:text-white" > {assistantsCreator}'s Assistants <a href={getHref($page.url, { existingKeys: { behaviour: "delete", keys: ["user", "modelId", "p", "q"] }, })} on:click={resetFilter} class="group" ><CarbonClose class="text-xs group-hover:text-gray-800 dark:group-hover:text-gray-300" /></a > </div> {#if isHuggingChat} <a href="https://hf.co/{assistantsCreator}" target="_blank" class="ml-auto flex items-center text-xs text-gray-500 underline hover:text-gray-800 dark:text-gray-400 dark:hover:text-gray-300" ><CarbonArrowUpRight class="mr-1 flex-none text-[0.58rem]" target="_blank" />View {assistantsCreator} on HF</a > {/if} {:else} <a href={getHref($page.url, { existingKeys: { behaviour: "delete", keys: ["user", "modelId", "p", "q"] }, })} on:click={resetFilter} class="flex items-center gap-1.5 rounded-full border px-3 py-1 {!assistantsCreator ? 'border-gray-300 bg-gray-50 dark:border-gray-600 dark:bg-gray-700 dark:text-white' : 'border-transparent text-gray-400 hover:text-gray-800 dark:hover:text-gray-300'}" > <CarbonEarthAmerica class="text-xs" /> Community </a> {#if data.user?.username} <a href={getHref($page.url, { newKeys: { user: data.user.username }, existingKeys: { behaviour: "delete", keys: ["modelId", "p", "q"] }, })} on:click={resetFilter} class="flex items-center gap-1.5 truncate rounded-full border px-3 py-1 {assistantsCreator && createdByMe ? 'border-gray-300 bg-gray-50 dark:border-gray-600 dark:bg-gray-700 dark:text-white' : 'border-transparent text-gray-400 hover:text-gray-800 dark:hover:text-gray-300'}" >{data.user.username} </a> {/if} {/if} <div class="relative ml-auto flex h-[30px] w-40 items-center rounded-full border px-2 has-[:focus]:border-gray-400 dark:border-gray-600 sm:w-64" > <CarbonSearch class="pointer-events-none absolute left-2 text-xs text-gray-400" /> <input class="h-[30px] w-full bg-transparent pl-5 focus:outline-none" placeholder="Filter by name" value={filterValue} on:input={(e) => filterOnName(e.currentTarget.value)} bind:this={filterInputEl} maxlength="150" type="search" /> </div> <select bind:value={sortValue} on:change={sortAssistants} class="rounded-lg border border-gray-300 bg-gray-50 px-2 py-1 text-sm text-gray-900 focus:border-blue-700 focus:ring-blue-700 dark:border-gray-600 dark:bg-gray-700 dark:text-white dark:placeholder-gray-400" > <option value={SortKey.TRENDING}>{SortKey.TRENDING}</option> <option value={SortKey.POPULAR}>{SortKey.POPULAR}</option> </select> </div> <div class="mt-8 grid grid-cols-2 gap-3 sm:gap-5 md:grid-cols-3 lg:grid-cols-4"> {#each data.assistants as assistant (assistant._id)} {@const hasRag = assistant?.rag?.allowAllDomains || !!assistant?.rag?.allowedDomains?.length || !!assistant?.rag?.allowedLinks?.length || !!assistant?.dynamicPrompt} <button class="relative flex flex-col items-center justify-center overflow-hidden text-balance rounded-xl border bg-gray-50/50 px-4 py-6 text-center shadow hover:bg-gray-50 hover:shadow-inner dark:border-gray-800/70 dark:bg-gray-950/20 dark:hover:bg-gray-950/40 max-sm:px-4 sm:h-64 sm:pb-4 xl:pt-8" on:click={() => { if (data.settings.assistants.includes(assistant._id.toString())) { settings.instantSet({ activeModel: assistant._id.toString() }); goto(`${base}` || "/"); } else { goto(`${base}/assistant/${assistant._id}`); } }} > {#if assistant.userCount && assistant.userCount > 1} <div class="absolute right-3 top-3 flex items-center gap-1 text-xs text-gray-400" title="Number of users" > <CarbonUserMultiple class="text-xxs" />{formatUserCount(assistant.userCount)} </div> {/if} <div class="absolute left-3 top-3 flex items-center gap-1 text-xs text-gray-400"> {#if assistant.tools?.length} <div class="grid size-5 place-items-center rounded-full bg-purple-500/10" title="This assistant uses the websearch." > <CarbonTools class="text-xs text-purple-600" /> </div> {/if} {#if hasRag} <div class="grid size-5 place-items-center rounded-full bg-blue-500/10" title="This assistant uses the websearch." > <IconInternet classNames="text-sm text-blue-600" /> </div> {/if} </div> {#if assistant.avatar} <img src="{base}/settings/assistants/{assistant._id}/avatar.jpg" alt="Avatar" class="mb-2 aspect-square size-12 flex-none rounded-full object-cover sm:mb-6 sm:size-20" /> {:else} <div class="mb-2 flex aspect-square size-12 flex-none items-center justify-center rounded-full bg-gray-300 text-2xl font-bold uppercase text-gray-500 dark:bg-gray-800 sm:mb-6 sm:size-20" > {assistant.name[0]} </div> {/if} <h3 class="mb-2 line-clamp-2 max-w-full break-words text-center text-[.8rem] font-semibold leading-snug sm:text-sm" > {assistant.name} </h3> <p class="line-clamp-4 text-xs text-gray-700 dark:text-gray-400 sm:line-clamp-2"> {assistant.description} </p> {#if assistant.createdByName} <p class="mt-auto pt-2 text-xs text-gray-400 dark:text-gray-500"> Created by <a class="hover:underline" href="{base}/assistants?user={assistant.createdByName}" > {assistant.createdByName} </a> </p> {/if} </button> {:else} No assistants found {/each} </div> <Pagination classNames="w-full flex justify-center mt-14 mb-4" numItemsPerPage={data.numItemsPerPage} numTotalItems={data.numTotalItems} /> </div> </div>
chat-ui/src/routes/assistants/+page.svelte/0
{ "file_path": "chat-ui/src/routes/assistants/+page.svelte", "repo_id": "chat-ui", "token_count": 4777 }
70
import { dev } from "$app/environment"; import { base } from "$app/paths"; import { env } from "$env/dynamic/private"; import { collections } from "$lib/server/database"; import { redirect } from "@sveltejs/kit"; export const actions = { async default({ cookies, locals }) { await collections.sessions.deleteOne({ sessionId: locals.sessionId }); cookies.delete(env.COOKIE_NAME, { path: "/", // So that it works inside the space's iframe sameSite: dev || env.ALLOW_INSECURE_COOKIES === "true" ? "lax" : "none", secure: !dev && !(env.ALLOW_INSECURE_COOKIES === "true"), httpOnly: true, }); redirect(303, `${base}/`); }, };
chat-ui/src/routes/logout/+page.server.ts/0
{ "file_path": "chat-ui/src/routes/logout/+page.server.ts", "repo_id": "chat-ui", "token_count": 237 }
71
<script lang="ts"> import { applyAction, enhance } from "$app/forms"; import { invalidateAll } from "$app/navigation"; import Modal from "$lib/components/Modal.svelte"; import { createEventDispatcher } from "svelte"; const dispatch = createEventDispatcher<{ close: void }>(); let reason = ""; </script> <Modal on:close> <form method="POST" action="?/report" use:enhance={() => { return async ({ result }) => { await applyAction(result); dispatch("close"); invalidateAll(); }; }} class="w-full min-w-64 p-4" > <span class="mb-1 text-sm font-semibold">Report content</span> <p class="text-sm text-gray-500"> Please provide a brief description of why you are reporting this content. </p> <textarea name="reportReason" class="mt-6 max-h-48 w-full resize-y rounded-lg border-2 border-gray-200 bg-gray-100 p-2 text-smd" placeholder="Reason(s) for the report" maxlength="128" bind:value={reason} /> <div class="flex w-full flex-row justify-between px-2 pt-4"> <button type="button" class="text-sm text-gray-700 hover:underline" on:click={() => dispatch("close")}>Cancel</button > <button type="submit" class="rounded-full bg-black px-4 py-2 text-sm font-semibold text-white md:px-8" disabled={!reason} class:bg-gray-200={!reason} class:!text-gray-400={!reason} > Submit report </button> </div> </form> </Modal>
chat-ui/src/routes/settings/(nav)/assistants/[assistantId]/ReportModal.svelte/0
{ "file_path": "chat-ui/src/routes/settings/(nav)/assistants/[assistantId]/ReportModal.svelte", "repo_id": "chat-ui", "token_count": 592 }
72
<script lang="ts"> import { afterNavigate, goto } from "$app/navigation"; import { base } from "$app/paths"; import { page } from "$app/stores"; import Modal from "$lib/components/Modal.svelte"; import ToolLogo from "$lib/components/ToolLogo.svelte"; import { env as envPublic } from "$env/dynamic/public"; import { useSettingsStore } from "$lib/stores/settings"; import ReportModal from "../../settings/(nav)/assistants/[assistantId]/ReportModal.svelte"; import { applyAction, enhance } from "$app/forms"; import CopyToClipBoardBtn from "$lib/components/CopyToClipBoardBtn.svelte"; import CarbonPen from "~icons/carbon/pen"; import CarbonTrash from "~icons/carbon/trash-can"; import CarbonCopy from "~icons/carbon/copy-file"; import CarbonFlag from "~icons/carbon/flag"; import CarbonLink from "~icons/carbon/link"; import CarbonStar from "~icons/carbon/star"; export let data; const settings = useSettingsStore(); let previousPage: string = base; afterNavigate(({ from }) => { if (!from?.url.pathname.includes("tools/")) { previousPage = from?.url.toString() || previousPage; } }); const prefix = envPublic.PUBLIC_SHARE_PREFIX || `${envPublic.PUBLIC_ORIGIN || $page.url.origin}${base}`; $: shareUrl = `${prefix}/tools/${data.tool?._id}`; $: isActive = $settings.tools?.includes(data.tool?._id.toString()); let displayReportModal = false; </script> {#if displayReportModal} <ReportModal on:close={() => (displayReportModal = false)} /> {/if} <Modal on:close={() => goto(previousPage)} width="min-w-xl"> <div class="w-full min-w-64 p-8"> <div class="flex h-full flex-col gap-2"> <div class="flex gap-6"> <ToolLogo color={data.tool.color} icon={data.tool.icon} size="lg" /> <div class="flex-1"> <div> <h1 class="mr-1 inline text-xl font-semibold"> {data.tool.displayName} </h1> <span class="ml-1 rounded-full border px-2 py-0.5 text-sm leading-none text-gray-500" >public</span > </div> {#if data.tool?.baseUrl} {#if data.tool.baseUrl.startsWith("https://")} <p class="mb-2 line-clamp-2 font-mono text-gray-500"> {data.tool.baseUrl} </p> {:else} <a href="https://huggingface.co/spaces/{data.tool.baseUrl}" target="_blank" class="mb-2 line-clamp-2 font-mono text-gray-500 hover:underline" > {data.tool.baseUrl} </a> {/if} {/if} {#if data.tool.type === "community"} <p class="text-sm text-gray-500"> Added by <a class="underline" href="{base}/tools?user={data.tool?.createdByName}"> {data.tool?.createdByName} </a> <span class="text-gray-300">โ€ข</span> {data.tool.useCount} runs </p> {/if} <div class="flex items-center gap-4 whitespace-nowrap text-sm text-gray-500 hover:*:text-gray-800" > <button class="{isActive ? 'bg-gray-100 text-gray-800' : 'bg-black !text-white'} my-2 flex w-fit items-center rounded-full px-3 py-1 text-base" name="Activate model" on:click|stopPropagation={() => { if (isActive) { settings.instantSet({ tools: ($settings?.tools ?? []).filter((t) => t !== data.tool._id), }); } else { settings.instantSet({ tools: [...($settings?.tools ?? []), data.tool._id], }); } }} > {isActive ? "Deactivate" : "Activate"} </button> {#if data.tool?.createdByMe} <a href="{base}/tools/{data.tool?._id}/edit" class="underline" ><CarbonPen class="mr-1.5 inline text-xs" />Edit </a> <form method="POST" action="?/delete" use:enhance={async () => { return async ({ result }) => { if (result.type === "success") { $settings.tools = ($settings?.tools ?? []).filter((t) => t !== data.tool._id); goto(`${base}/tools`, { invalidateAll: true }); } else { await applyAction(result); } }; }} > <button type="submit" class="flex items-center underline"> <CarbonTrash class="mr-1.5 inline text-xs" />Delete</button > </form> {:else if !!data.tool?.baseUrl} <a href="{base}/tools/{data.tool?._id}/edit" class="underline"> <CarbonPen class="mr-1.5 inline text-xs" />View spec </a> <form method="POST" action="?/edit" use:enhance class="hidden"> <button type="submit" class="underline"> <CarbonCopy class="mr-1.5 inline text-xs" />Duplicate</button > </form> {#if !data.tool?.reported} <button type="button" on:click={() => { displayReportModal = true; }} class="underline" > <CarbonFlag class="mr-1.5 inline text-xs" />Report </button> {:else} <button type="button" disabled class="text-gray-700"> <CarbonFlag class="mr-1.5 inline text-xs" />Reported</button > {/if} {/if} {#if data?.user?.isAdmin} <form method="POST" action="?/delete" use:enhance> <button type="submit" class="flex items-center text-red-600 underline"> <CarbonTrash class="mr-1.5 inline text-xs" />Delete</button > </form> {#if data.tool?.featured} <form method="POST" action="?/unfeature" use:enhance> <button type="submit" class="flex items-center text-red-600 underline"> <CarbonTrash class="mr-1.5 inline text-xs" />Un-feature</button > </form> {:else} <form method="POST" action="?/feature" use:enhance> <button type="submit" class="flex items-center text-green-600 underline"> <CarbonStar class="mr-1.5 inline text-xs" />Feature</button > </form> {/if} {/if} </div> </div> </div> <p class="text-sm"> Tools are applications that the model can choose to call while you are chatting with it. </p> {#if data.tool.description} <div> <h2 class="text-lg font-semibold">Description</h2> <p class="pb-2">{data.tool.description}</p> </div> {/if} <div> <h2 class="text-lg font-semibold">Direct URL</h2> <p class="pb-2 text-sm text-gray-500">Share this link with people to use your tool.</p> <div class="flex flex-row gap-2 rounded-lg border-2 border-gray-200 bg-gray-100 py-2 pl-3 pr-1.5" > <input disabled class="flex-1 truncate bg-inherit" value={shareUrl} /> <CopyToClipBoardBtn value={shareUrl} classNames="!border-none !shadow-none !py-0 !px-1 !rounded-md" > <div class="flex items-center gap-1.5 text-gray-500 hover:underline"> <CarbonLink />Copy </div> </CopyToClipBoardBtn> </div> </div> </div> </div></Modal >
chat-ui/src/routes/tools/[toolId]/+page.svelte/0
{ "file_path": "chat-ui/src/routes/tools/[toolId]/+page.svelte", "repo_id": "chat-ui", "token_count": 3297 }
73
{ "$schema": "https://vega.github.io/schema/vega-lite/v4.json", "data": { "values": "<DVC_METRIC_DATA>" }, "title": "<DVC_METRIC_TITLE>", "mark": "rect", "encoding": { "x": { "field": "<DVC_METRIC_X>", "type": "nominal", "sort": "ascending", "title": "<DVC_METRIC_X_LABEL>" }, "y": { "field": "<DVC_METRIC_Y>", "type": "nominal", "sort": "ascending", "title": "<DVC_METRIC_Y_LABEL>" }, "color": { "aggregate": "count", "type": "quantitative" }, "facet": { "field": "rev", "type": "nominal" } } }
datasets/.dvc/plots/confusion.json/0
{ "file_path": "datasets/.dvc/plots/confusion.json", "repo_id": "datasets", "token_count": 450 }
74
# This is the list of HuggingFace Datasets authors for copyright purposes. # # This does not necessarily list everyone who has contributed code, since in # some cases, their employer may be the copyright holder. To see the full list # of contributors, see the revision history in source control. Google Inc. HuggingFace Inc.
datasets/AUTHORS/0
{ "file_path": "datasets/AUTHORS", "repo_id": "datasets", "token_count": 78 }
75
# Know your dataset There are two types of dataset objects, a regular [`Dataset`] and then an โœจ [`IterableDataset`] โœจ. A [`Dataset`] provides fast random access to the rows, and memory-mapping so that loading even large datasets only uses a relatively small amount of device memory. But for really, really big datasets that won't even fit on disk or in memory, an [`IterableDataset`] allows you to access and use the dataset without waiting for it to download completely! This tutorial will show you how to load and access a [`Dataset`] and an [`IterableDataset`]. ## Dataset When you load a dataset split, you'll get a [`Dataset`] object. You can do many things with a [`Dataset`] object, which is why it's important to learn how to manipulate and interact with the data stored inside. This tutorial uses the [rotten_tomatoes](https://huggingface.co/datasets/rotten_tomatoes) dataset, but feel free to load any dataset you'd like and follow along! ```py >>> from datasets import load_dataset >>> dataset = load_dataset("rotten_tomatoes", split="train") ``` ### Indexing A [`Dataset`] contains columns of data, and each column can be a different type of data. The *index*, or axis label, is used to access examples from the dataset. For example, indexing by the row returns a dictionary of an example from the dataset: ```py # Get the first row in the dataset >>> dataset[0] {'label': 1, 'text': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'} ``` Use the `-` operator to start from the end of the dataset: ```py # Get the last row in the dataset >>> dataset[-1] {'label': 0, 'text': 'things really get weird , though not particularly scary : the movie is all portent and no content .'} ``` Indexing by the column name returns a list of all the values in the column: ```py >>> dataset["text"] ['the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .', 'the gorgeously elaborate continuation of " the lord of the rings " trilogy is so huge that a column of words cannot adequately describe co-writer/director peter jackson\'s expanded vision of j . r . r . tolkien\'s middle-earth .', 'effective but too-tepid biopic', ..., 'things really get weird , though not particularly scary : the movie is all portent and no content .'] ``` You can combine row and column name indexing to return a specific value at a position: ```py >>> dataset[0]["text"] 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .' ``` But it is important to remember that indexing order matters, especially when working with large audio and image datasets. Indexing by the column name returns all the values in the column first, then loads the value at that position. For large datasets, it may be slower to index by the column name first. ```py >>> import time >>> start_time = time.time() >>> text = dataset[0]["text"] >>> end_time = time.time() >>> print(f"Elapsed time: {end_time - start_time:.4f} seconds") Elapsed time: 0.0031 seconds >>> start_time = time.time() >>> text = dataset["text"][0] >>> end_time = time.time() >>> print(f"Elapsed time: {end_time - start_time:.4f} seconds") Elapsed time: 0.0094 seconds ``` ### Slicing Slicing returns a slice - or subset - of the dataset, which is useful for viewing several rows at once. To slice a dataset, use the `:` operator to specify a range of positions. ```py # Get the first three rows >>> dataset[:3] {'label': [1, 1, 1], 'text': ['the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .', 'the gorgeously elaborate continuation of " the lord of the rings " trilogy is so huge that a column of words cannot adequately describe co-writer/director peter jackson\'s expanded vision of j . r . r . tolkien\'s middle-earth .', 'effective but too-tepid biopic']} # Get rows between three and six >>> dataset[3:6] {'label': [1, 1, 1], 'text': ['if you sometimes like to go to the movies to have fun , wasabi is a good place to start .', "emerges as something rare , an issue movie that's so honest and keenly observed that it doesn't feel like one .", 'the film provides some great insight into the neurotic mindset of all comics -- even those who have reached the absolute top of the game .']} ``` ## IterableDataset An [`IterableDataset`] is loaded when you set the `streaming` parameter to `True` in [`~datasets.load_dataset`]: ```py >>> from datasets import load_dataset >>> iterable_dataset = load_dataset("food101", split="train", streaming=True) >>> for example in iterable_dataset: ... print(example) ... break {'image': <PIL.JpegImagePlugin.JpegImageFile image mode=RGB size=384x512 at 0x7F0681F5C520>, 'label': 6} ``` You can also create an [`IterableDataset`] from an *existing* [`Dataset`], but it is faster than streaming mode because the dataset is streamed from local files: ```py >>> from datasets import load_dataset >>> dataset = load_dataset("rotten_tomatoes", split="train") >>> iterable_dataset = dataset.to_iterable_dataset() ``` An [`IterableDataset`] progressively iterates over a dataset one example at a time, so you don't have to wait for the whole dataset to download before you can use it. As you can imagine, this is quite useful for large datasets you want to use immediately! However, this means an [`IterableDataset`]'s behavior is different from a regular [`Dataset`]. You don't get random access to examples in an [`IterableDataset`]. Instead, you should iterate over its elements, for example, by calling `next(iter())` or with a `for` loop to return the next item from the [`IterableDataset`]: ```py >>> next(iter(iterable_dataset)) {'image': <PIL.JpegImagePlugin.JpegImageFile image mode=RGB size=384x512 at 0x7F0681F59B50>, 'label': 6} >>> for example in iterable_dataset: ... print(example) ... break {'image': <PIL.JpegImagePlugin.JpegImageFile image mode=RGB size=384x512 at 0x7F7479DE82B0>, 'label': 6} ``` You can return a subset of the dataset with a specific number of examples in it with [`IterableDataset.take`]: ```py # Get first three examples >>> list(iterable_dataset.take(3)) [{'image': <PIL.JpegImagePlugin.JpegImageFile image mode=RGB size=384x512 at 0x7F7479DEE9D0>, 'label': 6}, {'image': <PIL.JpegImagePlugin.JpegImageFile image mode=RGB size=512x512 at 0x7F7479DE8190>, 'label': 6}, {'image': <PIL.JpegImagePlugin.JpegImageFile image mode=RGB size=512x383 at 0x7F7479DE8310>, 'label': 6}] ``` But unlike [slicing](access/#slicing), [`IterableDataset.take`] creates a new [`IterableDataset`]. ## Next steps Interested in learning more about the differences between these two types of datasets? Learn more about them in the [Differences between `Dataset` and `IterableDataset`](about_mapstyle_vs_iterable) conceptual guide. To get more hands-on with these datasets types, check out the [Process](process) guide to learn how to preprocess a [`Dataset`] or the [Stream](stream) guide to learn how to preprocess an [`IterableDataset`].
datasets/docs/source/access.mdx/0
{ "file_path": "datasets/docs/source/access.mdx", "repo_id": "datasets", "token_count": 2274 }
76
# Process image data This guide shows specific methods for processing image datasets. Learn how to: - Use [`~Dataset.map`] with image dataset. - Apply data augmentations to a dataset with [`~Dataset.set_transform`]. For a guide on how to process any type of dataset, take a look at the <a class="underline decoration-sky-400 decoration-2 font-semibold" href="./process">general process guide</a>. ## Map The [`~Dataset.map`] function can apply transforms over an entire dataset. For example, create a basic [`Resize`](https://pytorch.org/vision/stable/generated/torchvision.transforms.Resize.html) function: ```py >>> def transforms(examples): ... examples["pixel_values"] = [image.convert("RGB").resize((100,100)) for image in examples["image"]] ... return examples ``` Now use the [`~Dataset.map`] function to resize the entire dataset, and set `batched=True` to speed up the process by accepting batches of examples. The transform returns `pixel_values` as a cacheable `PIL.Image` object: ```py >>> dataset = dataset.map(transforms, remove_columns=["image"], batched=True) >>> dataset[0] {'label': 6, 'pixel_values': <PIL.PngImagePlugin.PngImageFile image mode=RGB size=100x100 at 0x7F058237BB10>} ``` The cache file saves time because you don't have to execute the same transform twice. The [`~Dataset.map`] function is best for operations you only run once per training - like resizing an image - instead of using it for operations executed for each epoch, like data augmentations. [`~Dataset.map`] takes up some memory, but you can reduce its memory requirements with the following parameters: - [`batch_size`](./package_reference/main_classes#datasets.DatasetDict.map.batch_size) determines the number of examples that are processed in one call to the transform function. - [`writer_batch_size`](./package_reference/main_classes#datasets.DatasetDict.map.writer_batch_size) determines the number of processed examples that are kept in memory before they are stored away. Both parameter values default to 1000, which can be expensive if you are storing images. Lower these values to use less memory when you use [`~Dataset.map`]. ## Apply transforms ๐Ÿค— Datasets applies data augmentations from any library or package to your dataset. Transforms can be applied on-the-fly on batches of data with [`~Dataset.set_transform`], which consumes less disk space. <Tip> The following example uses [torchvision](https://pytorch.org/vision/stable/index.html), but feel free to use other data augmentation libraries like [Albumentations](https://albumentations.ai/docs/), [Kornia](https://kornia.readthedocs.io/en/latest/), and [imgaug](https://imgaug.readthedocs.io/en/latest/). </Tip> For example, if you'd like to change the color properties of an image randomly: ```py >>> from torchvision.transforms import Compose, ColorJitter, ToTensor >>> jitter = Compose( ... [ ... ColorJitter(brightness=0.25, contrast=0.25, saturation=0.25, hue=0.7), ... ToTensor(), ... ] ... ) ``` Create a function to apply the `ColorJitter` transform: ```py >>> def transforms(examples): ... examples["pixel_values"] = [jitter(image.convert("RGB")) for image in examples["image"]] ... return examples ``` Apply the transform with the [`~Dataset.set_transform`] function: ```py >>> dataset.set_transform(transforms) ```
datasets/docs/source/image_process.mdx/0
{ "file_path": "datasets/docs/source/image_process.mdx", "repo_id": "datasets", "token_count": 1031 }
77
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Quickstart [[open-in-colab]] This quickstart is intended for developers who are ready to dive into the code and see an example of how to integrate ๐Ÿค— Datasets into their model training workflow. If you're a beginner, we recommend starting with our [tutorials](./tutorial), where you'll get a more thorough introduction. Each dataset is unique, and depending on the task, some datasets may require additional steps to prepare it for training. But you can always use ๐Ÿค— Datasets tools to load and process a dataset. The fastest and easiest way to get started is by loading an existing dataset from the [Hugging Face Hub](https://huggingface.co/datasets). There are thousands of datasets to choose from, spanning many tasks. Choose the type of dataset you want to work with, and let's get started! <div class="mt-4"> <div class="w-full flex flex-col space-y-4 md:space-y-0 md:grid md:grid-cols-3 md:gap-y-4 md:gap-x-5"> <a class="!no-underline border dark:border-gray-700 p-5 rounded-lg shadow hover:shadow-lg" href="#audio" ><div class="w-full text-center bg-gradient-to-r from-violet-300 via-sky-400 to-green-500 rounded-lg py-1.5 font-semibold mb-5 text-white text-lg leading-relaxed">Audio</div> <p class="text-gray-700">Resample an audio dataset and get it ready for a model to classify what type of banking issue a speaker is calling about.</p> </a> <a class="!no-underline border dark:border-gray-700 p-5 rounded-lg shadow hover:shadow-lg" href="#vision" ><div class="w-full text-center bg-gradient-to-r from-pink-400 via-purple-400 to-blue-500 rounded-lg py-1.5 font-semibold mb-5 text-white text-lg leading-relaxed">Vision</div> <p class="text-gray-700">Apply data augmentation to an image dataset and get it ready for a model to diagnose disease in bean plants.</p> </a> <a class="!no-underline border dark:border-gray-700 p-5 rounded-lg shadow hover:shadow-lg" href="#nlp" ><div class="w-full text-center bg-gradient-to-r from-orange-300 via-red-400 to-violet-500 rounded-lg py-1.5 font-semibold mb-5 text-white text-lg leading-relaxed">NLP</div> <p class="text-gray-700">Tokenize a dataset and get it ready for a model to determine whether a pair of sentences have the same meaning.</p> </a> </div> </div> <Tip> Check out [Chapter 5](https://huggingface.co/course/chapter5/1?fw=pt) of the Hugging Face course to learn more about other important topics such as loading remote or local datasets, tools for cleaning up a dataset, and creating your own dataset. </Tip> Start by installing ๐Ÿค— Datasets: ```bash pip install datasets ``` ๐Ÿค— Datasets also support audio and image data formats: * To work with audio datasets, install the [`Audio`] feature: ```bash pip install datasets[audio] ``` * To work with image datasets, install the [`Image`] feature: ```bash pip install datasets[vision] ``` Besides ๐Ÿค— Datasets, make sure your preferred machine learning framework is installed: <frameworkcontent> <pt> ```bash pip install torch ``` </pt> <tf> ```bash pip install tensorflow ``` </tf> </frameworkcontent> ## Audio Audio datasets are loaded just like text datasets. However, an audio dataset is preprocessed a bit differently. Instead of a tokenizer, you'll need a [feature extractor](https://huggingface.co/docs/transformers/main_classes/feature_extractor#feature-extractor). An audio input may also require resampling its sampling rate to match the sampling rate of the pretrained model you're using. In this quickstart, you'll prepare the [MInDS-14](https://huggingface.co/datasets/PolyAI/minds14) dataset for a model train on and classify the banking issue a customer is having. **1**. Load the MInDS-14 dataset by providing the [`load_dataset`] function with the dataset name, dataset configuration (not all datasets will have a configuration), and a dataset split: ```py >>> from datasets import load_dataset, Audio >>> dataset = load_dataset("PolyAI/minds14", "en-US", split="train") ``` **2**. Next, load a pretrained [Wav2Vec2](https://huggingface.co/facebook/wav2vec2-base) model and its corresponding feature extractor from the [๐Ÿค— Transformers](https://huggingface.co/transformers/) library. It is totally normal to see a warning after you load the model about some weights not being initialized. This is expected because you are loading this model checkpoint for training with another task. ```py >>> from transformers import AutoModelForAudioClassification, AutoFeatureExtractor >>> model = AutoModelForAudioClassification.from_pretrained("facebook/wav2vec2-base") >>> feature_extractor = AutoFeatureExtractor.from_pretrained("facebook/wav2vec2-base") ``` **3**. The [MInDS-14](https://huggingface.co/datasets/PolyAI/minds14) dataset card indicates the sampling rate is 8kHz, but the Wav2Vec2 model was pretrained on a sampling rate of 16kHZ. You'll need to upsample the `audio` column with the [`~Dataset.cast_column`] function and [`Audio`] feature to match the model's sampling rate. ```py >>> dataset = dataset.cast_column("audio", Audio(sampling_rate=16000)) >>> dataset[0]["audio"] {'array': array([ 2.3443763e-05, 2.1729663e-04, 2.2145823e-04, ..., 3.8356509e-05, -7.3497440e-06, -2.1754686e-05], dtype=float32), 'path': '/root/.cache/huggingface/datasets/downloads/extracted/f14948e0e84be638dd7943ac36518a4cf3324e8b7aa331c5ab11541518e9368c/en-US~JOINT_ACCOUNT/602ba55abb1e6d0fbce92065.wav', 'sampling_rate': 16000} ``` **4**. Create a function to preprocess the audio `array` with the feature extractor, and truncate and pad the sequences into tidy rectangular tensors. The most important thing to remember is to call the audio `array` in the feature extractor since the `array` - the actual speech signal - is the model input. Once you have a preprocessing function, use the [`~Dataset.map`] function to speed up processing by applying the function to batches of examples in the dataset. ```py >>> def preprocess_function(examples): ... audio_arrays = [x["array"] for x in examples["audio"]] ... inputs = feature_extractor( ... audio_arrays, ... sampling_rate=16000, ... padding=True, ... max_length=100000, ... truncation=True, ... ) ... return inputs >>> dataset = dataset.map(preprocess_function, batched=True) ``` **5**. Use the [`~Dataset.rename_column`] function to rename the `intent_class` column to `labels`, which is the expected input name in [Wav2Vec2ForSequenceClassification](https://huggingface.co/docs/transformers/main/en/model_doc/wav2vec2#transformers.Wav2Vec2ForSequenceClassification): ```py >>> dataset = dataset.rename_column("intent_class", "labels") ``` **6**. Set the dataset format according to the machine learning framework you're using. <frameworkcontent> <pt> Use the [`~Dataset.set_format`] function to set the dataset format to `torch` and specify the columns you want to format. This function applies formatting on-the-fly. After converting to PyTorch tensors, wrap the dataset in [`torch.utils.data.DataLoader`](https://alband.github.io/doc_view/data.html?highlight=torch%20utils%20data%20dataloader#torch.utils.data.DataLoader): ```py >>> from torch.utils.data import DataLoader >>> dataset.set_format(type="torch", columns=["input_values", "labels"]) >>> dataloader = DataLoader(dataset, batch_size=4) ``` </pt> <tf> Use the [`~transformers.TFPreTrainedModel.prepare_tf_dataset`] method from ๐Ÿค— Transformers to prepare the dataset to be compatible with TensorFlow, and ready to train/fine-tune a model, as it wraps a HuggingFace [`~datasets.Dataset`] as a `tf.data.Dataset` with collation and batching, so one can pass it directly to Keras methods like `fit()` without further modification. ```py >>> import tensorflow as tf >>> tf_dataset = model.prepare_tf_dataset( ... dataset, ... batch_size=4, ... shuffle=True, ... ) ``` </tf> </frameworkcontent> **7**. Start training with your machine learning framework! Check out the ๐Ÿค— Transformers [audio classification guide](https://huggingface.co/docs/transformers/tasks/audio_classification) for an end-to-end example of how to train a model on an audio dataset. ## Vision Image datasets are loaded just like text datasets. However, instead of a tokenizer, you'll need a [feature extractor](https://huggingface.co/docs/transformers/main_classes/feature_extractor#feature-extractor) to preprocess the dataset. Applying data augmentation to an image is common in computer vision to make the model more robust against overfitting. You're free to use any data augmentation library you want, and then you can apply the augmentations with ๐Ÿค— Datasets. In this quickstart, you'll load the [Beans](https://huggingface.co/datasets/beans) dataset and get it ready for the model to train on and identify disease from the leaf images. **1**. Load the Beans dataset by providing the [`load_dataset`] function with the dataset name and a dataset split: ```py >>> from datasets import load_dataset, Image >>> dataset = load_dataset("beans", split="train") ``` Most image models work with RBG images. If your dataset contains images in a different mode, you can use the [`~Dataset.cast_column`] function to set the mode to RGB: ```py >>> dataset = dataset.cast_column("image", Image(mode="RGB")) ``` The Beans dataset contains only RGB images, so this step is unnecessary here. **2**. Now you can add some data augmentations with any library ([Albumentations](https://albumentations.ai/), [imgaug](https://imgaug.readthedocs.io/en/latest/), [Kornia](https://kornia.readthedocs.io/en/latest/)) you like. Here, you'll use [torchvision](https://pytorch.org/vision/stable/transforms.html) to randomly change the color properties of an image: ```py >>> from torchvision.transforms import Compose, ColorJitter, ToTensor >>> jitter = Compose( ... [ColorJitter(brightness=0.5, hue=0.5), ToTensor()] ... ) ``` **3**. Create a function to apply your transform to the dataset and generate the model input: `pixel_values`. ```python >>> def transforms(examples): ... examples["pixel_values"] = [jitter(image.convert("RGB")) for image in examples["image"]] ... return examples ``` **4**. Use the [`~Dataset.with_transform`] function to apply the data augmentations on-the-fly: ```py >>> dataset = dataset.with_transform(transforms) ``` **5**. Set the dataset format according to the machine learning framework you're using. <frameworkcontent> <pt> Wrap the dataset in [`torch.utils.data.DataLoader`](https://alband.github.io/doc_view/data.html?highlight=torch%20utils%20data%20dataloader#torch.utils.data.DataLoader). You'll also need to create a collate function to collate the samples into batches: ```py >>> from torch.utils.data import DataLoader >>> def collate_fn(examples): ... images = [] ... labels = [] ... for example in examples: ... images.append((example["pixel_values"])) ... labels.append(example["labels"]) ... ... pixel_values = torch.stack(images) ... labels = torch.tensor(labels) ... return {"pixel_values": pixel_values, "labels": labels} >>> dataloader = DataLoader(dataset, collate_fn=collate_fn, batch_size=4) ``` </pt> <tf> Use the [`~transformers.TFPreTrainedModel.prepare_tf_dataset`] method from ๐Ÿค— Transformers to prepare the dataset to be compatible with TensorFlow, and ready to train/fine-tune a model, as it wraps a HuggingFace [`~datasets.Dataset`] as a `tf.data.Dataset` with collation and batching, so one can pass it directly to Keras methods like `fit()` without further modification. Before you start, make sure you have up-to-date versions of `albumentations` and `cv2` installed: ```bash pip install -U albumentations opencv-python ``` ```py >>> import albumentations >>> import numpy as np >>> transform = albumentations.Compose([ ... albumentations.RandomCrop(width=256, height=256), ... albumentations.HorizontalFlip(p=0.5), ... albumentations.RandomBrightnessContrast(p=0.2), ... ]) >>> def transforms(examples): ... examples["pixel_values"] = [ ... transform(image=np.array(image))["image"] for image in examples["image"] ... ] ... return examples >>> dataset.set_transform(transforms) >>> tf_dataset = model.prepare_tf_dataset( ... dataset, ... batch_size=4, ... shuffle=True, ... ) ``` </tf> </frameworkcontent> **6**. Start training with your machine learning framework! Check out the ๐Ÿค— Transformers [image classification guide](https://huggingface.co/docs/transformers/tasks/image_classification) for an end-to-end example of how to train a model on an image dataset. ## NLP Text needs to be tokenized into individual tokens by a [tokenizer](https://huggingface.co/docs/transformers/main_classes/tokenizer). For the quickstart, you'll load the [Microsoft Research Paraphrase Corpus (MRPC)](https://huggingface.co/datasets/glue/viewer/mrpc) training dataset to train a model to determine whether a pair of sentences mean the same thing. **1**. Load the MRPC dataset by providing the [`load_dataset`] function with the dataset name, dataset configuration (not all datasets will have a configuration), and dataset split: ```py >>> from datasets import load_dataset >>> dataset = load_dataset("glue", "mrpc", split="train") ``` **2**. Next, load a pretrained [BERT](https://huggingface.co/bert-base-uncased) model and its corresponding tokenizer from the [๐Ÿค— Transformers](https://huggingface.co/transformers/) library. It is totally normal to see a warning after you load the model about some weights not being initialized. This is expected because you are loading this model checkpoint for training with another task. ```py >>> from transformers import AutoModelForSequenceClassification, AutoTokenizer >>> model = AutoModelForSequenceClassification.from_pretrained("bert-base-uncased") >>> tokenizer = AutoTokenizer.from_pretrained("bert-base-uncased") ===PT-TF-SPLIT=== >>> from transformers import TFAutoModelForSequenceClassification, AutoTokenizer >>> model = TFAutoModelForSequenceClassification.from_pretrained("bert-base-uncased") >>> tokenizer = AutoTokenizer.from_pretrained("bert-base-uncased") ``` **3**. Create a function to tokenize the dataset, and you should also truncate and pad the text into tidy rectangular tensors. The tokenizer generates three new columns in the dataset: `input_ids`, `token_type_ids`, and an `attention_mask`. These are the model inputs. Use the [`~Dataset.map`] function to speed up processing by applying your tokenization function to batches of examples in the dataset: ```py >>> def encode(examples): ... return tokenizer(examples["sentence1"], examples["sentence2"], truncation=True, padding="max_length") >>> dataset = dataset.map(encode, batched=True) >>> dataset[0] {'sentence1': 'Amrozi accused his brother , whom he called " the witness " , of deliberately distorting his evidence .', 'sentence2': 'Referring to him as only " the witness " , Amrozi accused his brother of deliberately distorting his evidence .', 'label': 1, 'idx': 0, 'input_ids': array([ 101, 7277, 2180, 5303, 4806, 1117, 1711, 117, 2292, 1119, 1270, 107, 1103, 7737, 107, 117, 1104, 9938, 4267, 12223, 21811, 1117, 2554, 119, 102, 11336, 6732, 3384, 1106, 1140, 1112, 1178, 107, 1103, 7737, 107, 117, 7277, 2180, 5303, 4806, 1117, 1711, 1104, 9938, 4267, 12223, 21811, 1117, 2554, 119, 102]), 'token_type_ids': array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]), 'attention_mask': array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1])} ``` **4**. Rename the `label` column to `labels`, which is the expected input name in [BertForSequenceClassification](https://huggingface.co/docs/transformers/main/en/model_doc/bert#transformers.BertForSequenceClassification): ```py >>> dataset = dataset.map(lambda examples: {"labels": examples["label"]}, batched=True) ``` **5**. Set the dataset format according to the machine learning framework you're using. <frameworkcontent> <pt> Use the [`~Dataset.set_format`] function to set the dataset format to `torch` and specify the columns you want to format. This function applies formatting on-the-fly. After converting to PyTorch tensors, wrap the dataset in [`torch.utils.data.DataLoader`](https://alband.github.io/doc_view/data.html?highlight=torch%20utils%20data%20dataloader#torch.utils.data.DataLoader): ```py >>> import torch >>> dataset.set_format(type="torch", columns=["input_ids", "token_type_ids", "attention_mask", "labels"]) >>> dataloader = torch.utils.data.DataLoader(dataset, batch_size=32) ``` </pt> <tf> Use the [`~transformers.TFPreTrainedModel.prepare_tf_dataset`] method from ๐Ÿค— Transformers to prepare the dataset to be compatible with TensorFlow, and ready to train/fine-tune a model, as it wraps a HuggingFace [`~datasets.Dataset`] as a `tf.data.Dataset` with collation and batching, so one can pass it directly to Keras methods like `fit()` without further modification. ```py >>> import tensorflow as tf >>> tf_dataset = model.prepare_tf_dataset( ... dataset, ... batch_size=4, ... shuffle=True, ... ) ``` </tf> </frameworkcontent> **6**. Start training with your machine learning framework! Check out the ๐Ÿค— Transformers [text classification guide](https://huggingface.co/docs/transformers/tasks/sequence_classification) for an end-to-end example of how to train a model on a text dataset. ## What's next? This completes the ๐Ÿค— Datasets quickstart! You can load any text, audio, or image dataset with a single function and get it ready for your model to train on. For your next steps, take a look at our [How-to guides](./how_to) and learn how to do more specific things like loading different dataset formats, aligning labels, and streaming large datasets. If you're interested in learning more about ๐Ÿค— Datasets core concepts, grab a cup of coffee and read our [Conceptual Guides](./about_arrow)!
datasets/docs/source/quickstart.mdx/0
{ "file_path": "datasets/docs/source/quickstart.mdx", "repo_id": "datasets", "token_count": 6102 }
78
[tool.ruff] line-length = 119 [tool.ruff.lint] # Ignored rules: # "E501" -> line length violation # "F821" -> undefined named in type annotation (e.g. Literal["something"]) # "C901" -> `function_name` is too complex ignore = ["E501", "F821", "C901"] select = ["C", "E", "F", "I", "W"] [tool.ruff.lint.isort] lines-after-imports = 2 known-first-party = ["datasets"] [tool.ruff.lint.per-file-ignores] "__init__.py" = ["F401", "F403", "F405"] [tool.pytest.ini_options] # Test fails if a FutureWarning is thrown by `huggingface_hub` filterwarnings = [ "error::FutureWarning:huggingface_hub*", ] markers = [ "unit: unit test", "integration: integration test", ]
datasets/pyproject.toml/0
{ "file_path": "datasets/pyproject.toml", "repo_id": "datasets", "token_count": 274 }
79
import os import re from functools import partial from glob import has_magic from pathlib import Path, PurePath from typing import Callable, Dict, List, Optional, Set, Tuple, Union import huggingface_hub from fsspec.core import url_to_fs from fsspec.implementations.http import HTTPFileSystem from huggingface_hub import HfFileSystem from packaging import version from tqdm.contrib.concurrent import thread_map from . import config from .download import DownloadConfig from .naming import _split_re from .splits import Split from .utils import logging from .utils import tqdm as hf_tqdm from .utils.file_utils import _prepare_path_and_storage_options, is_local_path, is_relative_path, xbasename, xjoin from .utils.py_utils import glob_pattern_to_regex, string_to_dict SingleOriginMetadata = Union[Tuple[str, str], Tuple[str], Tuple[()]] SANITIZED_DEFAULT_SPLIT = str(Split.TRAIN) logger = logging.get_logger(__name__) class Url(str): pass class EmptyDatasetError(FileNotFoundError): pass SPLIT_PATTERN_SHARDED = "data/{split}-[0-9][0-9][0-9][0-9][0-9]-of-[0-9][0-9][0-9][0-9][0-9]*.*" SPLIT_KEYWORDS = { Split.TRAIN: ["train", "training"], Split.VALIDATION: ["validation", "valid", "dev", "val"], Split.TEST: ["test", "testing", "eval", "evaluation"], } NON_WORDS_CHARS = "-._ 0-9" if config.FSSPEC_VERSION < version.parse("2023.9.0"): KEYWORDS_IN_FILENAME_BASE_PATTERNS = ["**[{sep}/]{keyword}[{sep}]*", "{keyword}[{sep}]*"] KEYWORDS_IN_DIR_NAME_BASE_PATTERNS = [ "{keyword}/**", "{keyword}[{sep}]*/**", "**[{sep}/]{keyword}/**", "**[{sep}/]{keyword}[{sep}]*/**", ] elif config.FSSPEC_VERSION < version.parse("2023.12.0"): KEYWORDS_IN_FILENAME_BASE_PATTERNS = ["**/*[{sep}/]{keyword}[{sep}]*", "{keyword}[{sep}]*"] KEYWORDS_IN_DIR_NAME_BASE_PATTERNS = [ "{keyword}/**/*", "{keyword}[{sep}]*/**/*", "**/*[{sep}/]{keyword}/**/*", "**/*[{sep}/]{keyword}[{sep}]*/**/*", ] else: KEYWORDS_IN_FILENAME_BASE_PATTERNS = ["**/{keyword}[{sep}]*", "**/*[{sep}]{keyword}[{sep}]*"] KEYWORDS_IN_DIR_NAME_BASE_PATTERNS = [ "**/{keyword}/**", "**/{keyword}[{sep}]*/**", "**/*[{sep}]{keyword}/**", "**/*[{sep}]{keyword}[{sep}]*/**", ] DEFAULT_SPLITS = [Split.TRAIN, Split.VALIDATION, Split.TEST] DEFAULT_PATTERNS_SPLIT_IN_FILENAME = { split: [ pattern.format(keyword=keyword, sep=NON_WORDS_CHARS) for keyword in SPLIT_KEYWORDS[split] for pattern in KEYWORDS_IN_FILENAME_BASE_PATTERNS ] for split in DEFAULT_SPLITS } DEFAULT_PATTERNS_SPLIT_IN_DIR_NAME = { split: [ pattern.format(keyword=keyword, sep=NON_WORDS_CHARS) for keyword in SPLIT_KEYWORDS[split] for pattern in KEYWORDS_IN_DIR_NAME_BASE_PATTERNS ] for split in DEFAULT_SPLITS } DEFAULT_PATTERNS_ALL = { Split.TRAIN: ["**"], } ALL_SPLIT_PATTERNS = [SPLIT_PATTERN_SHARDED] ALL_DEFAULT_PATTERNS = [ DEFAULT_PATTERNS_SPLIT_IN_DIR_NAME, DEFAULT_PATTERNS_SPLIT_IN_FILENAME, DEFAULT_PATTERNS_ALL, ] if config.FSSPEC_VERSION < version.parse("2023.9.0"): METADATA_PATTERNS = [ "metadata.csv", "**/metadata.csv", "metadata.jsonl", "**/metadata.jsonl", ] # metadata file for ImageFolder and AudioFolder else: METADATA_PATTERNS = [ "**/metadata.csv", "**/metadata.jsonl", ] # metadata file for ImageFolder and AudioFolder WILDCARD_CHARACTERS = "*[]" FILES_TO_IGNORE = [ "README.md", "config.json", "dataset_info.json", "dataset_infos.json", "dummy_data.zip", "dataset_dict.json", ] def contains_wildcards(pattern: str) -> bool: return any(wilcard_character in pattern for wilcard_character in WILDCARD_CHARACTERS) def sanitize_patterns(patterns: Union[Dict, List, str]) -> Dict[str, Union[List[str], "DataFilesList"]]: """ Take the data_files patterns from the user, and format them into a dictionary. Each key is the name of the split, and each value is a list of data files patterns (paths or urls). The default split is "train". Returns: patterns: dictionary of split_name -> list of patterns """ if isinstance(patterns, dict): return {str(key): value if isinstance(value, list) else [value] for key, value in patterns.items()} elif isinstance(patterns, str): return {SANITIZED_DEFAULT_SPLIT: [patterns]} elif isinstance(patterns, list): if any(isinstance(pattern, dict) for pattern in patterns): for pattern in patterns: if not ( isinstance(pattern, dict) and len(pattern) == 2 and "split" in pattern and isinstance(pattern.get("path"), (str, list)) ): raise ValueError( f"Expected each split to have a 'path' key which can be a string or a list of strings, but got {pattern}" ) splits = [pattern["split"] for pattern in patterns] if len(set(splits)) != len(splits): raise ValueError(f"Some splits are duplicated in data_files: {splits}") return { str(pattern["split"]): pattern["path"] if isinstance(pattern["path"], list) else [pattern["path"]] for pattern in patterns } else: return {SANITIZED_DEFAULT_SPLIT: patterns} else: return sanitize_patterns(list(patterns)) def _is_inside_unrequested_special_dir(matched_rel_path: str, pattern: str) -> bool: """ When a path matches a pattern, we additionnally check if it's inside a special directory we ignore by default (if it starts with a double underscore). Users can still explicitly request a filepath inside such a directory if "__pycache__" is mentioned explicitly in the requested pattern. Some examples: base directory: ./ โ””โ”€โ”€ __pycache__ โ””โ”€โ”€ b.txt >>> _is_inside_unrequested_special_dir("__pycache__/b.txt", "**") True >>> _is_inside_unrequested_special_dir("__pycache__/b.txt", "*/b.txt") True >>> _is_inside_unrequested_special_dir("__pycache__/b.txt", "__pycache__/*") False >>> _is_inside_unrequested_special_dir("__pycache__/b.txt", "__*/*") False """ # We just need to check if every special directories from the path is present explicly in the pattern. # Since we assume that the path matches the pattern, it's equivalent to counting that both # the parent path and the parent pattern have the same number of special directories. data_dirs_to_ignore_in_path = [part for part in PurePath(matched_rel_path).parent.parts if part.startswith("__")] data_dirs_to_ignore_in_pattern = [part for part in PurePath(pattern).parent.parts if part.startswith("__")] return len(data_dirs_to_ignore_in_path) != len(data_dirs_to_ignore_in_pattern) def _is_unrequested_hidden_file_or_is_inside_unrequested_hidden_dir(matched_rel_path: str, pattern: str) -> bool: """ When a path matches a pattern, we additionnally check if it's a hidden file or if it's inside a hidden directory we ignore by default, i.e. if the file name or a parent directory name starts with a dot. Users can still explicitly request a filepath that is hidden or is inside a hidden directory if the hidden part is mentioned explicitly in the requested pattern. Some examples: base directory: ./ โ””โ”€โ”€ .hidden_file.txt >>> _is_unrequested_hidden_file_or_is_inside_unrequested_hidden_dir(".hidden_file.txt", "**") True >>> _is_unrequested_hidden_file_or_is_inside_unrequested_hidden_dir(".hidden_file.txt", ".*") False base directory: ./ โ””โ”€โ”€ .hidden_dir โ””โ”€โ”€ a.txt >>> _is_unrequested_hidden_file_or_is_inside_unrequested_hidden_dir(".hidden_dir/a.txt", "**") True >>> _is_unrequested_hidden_file_or_is_inside_unrequested_hidden_dir(".hidden_dir/a.txt", ".*/*") False >>> _is_unrequested_hidden_file_or_is_inside_unrequested_hidden_dir(".hidden_dir/a.txt", ".hidden_dir/*") False base directory: ./ โ””โ”€โ”€ .hidden_dir โ””โ”€โ”€ .hidden_file.txt >>> _is_unrequested_hidden_file_or_is_inside_unrequested_hidden_dir(".hidden_dir/.hidden_file.txt", "**") True >>> _is_unrequested_hidden_file_or_is_inside_unrequested_hidden_dir(".hidden_dir/.hidden_file.txt", ".*/*") True >>> _is_unrequested_hidden_file_or_is_inside_unrequested_hidden_dir(".hidden_dir/.hidden_file.txt", ".*/.*") False >>> _is_unrequested_hidden_file_or_is_inside_unrequested_hidden_dir(".hidden_dir/.hidden_file.txt", ".hidden_dir/*") True >>> _is_unrequested_hidden_file_or_is_inside_unrequested_hidden_dir(".hidden_dir/.hidden_file.txt", ".hidden_dir/.*") False """ # We just need to check if every hidden part from the path is present explicly in the pattern. # Since we assume that the path matches the pattern, it's equivalent to counting that both # the path and the pattern have the same number of hidden parts. hidden_directories_in_path = [ part for part in PurePath(matched_rel_path).parts if part.startswith(".") and not set(part) == {"."} ] hidden_directories_in_pattern = [ part for part in PurePath(pattern).parts if part.startswith(".") and not set(part) == {"."} ] return len(hidden_directories_in_path) != len(hidden_directories_in_pattern) def _get_data_files_patterns(pattern_resolver: Callable[[str], List[str]]) -> Dict[str, List[str]]: """ Get the default pattern from a directory or repository by testing all the supported patterns. The first patterns to return a non-empty list of data files is returned. In order, it first tests if SPLIT_PATTERN_SHARDED works, otherwise it tests the patterns in ALL_DEFAULT_PATTERNS. """ # first check the split patterns like data/{split}-00000-of-00001.parquet for split_pattern in ALL_SPLIT_PATTERNS: pattern = split_pattern.replace("{split}", "*") try: data_files = pattern_resolver(pattern) except FileNotFoundError: continue if len(data_files) > 0: splits: Set[str] = { string_to_dict(xbasename(p), glob_pattern_to_regex(xbasename(split_pattern)))["split"] for p in data_files } if any(not re.match(_split_re, split) for split in splits): raise ValueError(f"Split name should match '{_split_re}'' but got '{splits}'.") sorted_splits = [str(split) for split in DEFAULT_SPLITS if split in splits] + sorted( splits - set(DEFAULT_SPLITS) ) return {split: [split_pattern.format(split=split)] for split in sorted_splits} # then check the default patterns based on train/valid/test splits for patterns_dict in ALL_DEFAULT_PATTERNS: non_empty_splits = [] for split, patterns in patterns_dict.items(): for pattern in patterns: try: data_files = pattern_resolver(pattern) except FileNotFoundError: continue if len(data_files) > 0: non_empty_splits.append(split) break if non_empty_splits: return {split: patterns_dict[split] for split in non_empty_splits} raise FileNotFoundError(f"Couldn't resolve pattern {pattern} with resolver {pattern_resolver}") def _get_metadata_files_patterns(pattern_resolver: Callable[[str], List[str]]) -> List[str]: """ Get the supported metadata patterns from a directory or repository. """ non_empty_patterns = [] for pattern in METADATA_PATTERNS: try: metadata_files = pattern_resolver(pattern) if len(metadata_files) > 0: non_empty_patterns.append(pattern) except FileNotFoundError: pass if non_empty_patterns: return non_empty_patterns raise FileNotFoundError(f"Couldn't resolve pattern {pattern} with resolver {pattern_resolver}") def resolve_pattern( pattern: str, base_path: str, allowed_extensions: Optional[List[str]] = None, download_config: Optional[DownloadConfig] = None, ) -> List[str]: """ Resolve the paths and URLs of the data files from the pattern passed by the user. You can use patterns to resolve multiple local files. Here are a few examples: - *.csv to match all the CSV files at the first level - **.csv to match all the CSV files at any level - data/* to match all the files inside "data" - data/** to match all the files inside "data" and its subdirectories The patterns are resolved using the fsspec glob. In fsspec>=2023.12.0 this is equivalent to Python's glob.glob, Path.glob, Path.match and fnmatch where ** is unsupported with a prefix/suffix other than a forward slash /. More generally: - '*' matches any character except a forward-slash (to match just the file or directory name) - '**' matches any character including a forward-slash / Hidden files and directories (i.e. whose names start with a dot) are ignored, unless they are explicitly requested. The same applies to special directories that start with a double underscore like "__pycache__". You can still include one if the pattern explicilty mentions it: - to include a hidden file: "*/.hidden.txt" or "*/.*" - to include a hidden directory: ".hidden/*" or ".*/*" - to include a special directory: "__special__/*" or "__*/*" Example:: >>> from datasets.data_files import resolve_pattern >>> base_path = "." >>> resolve_pattern("docs/**/*.py", base_path) [/Users/mariosasko/Desktop/projects/datasets/docs/source/_config.py'] Args: pattern (str): Unix pattern or paths or URLs of the data files to resolve. The paths can be absolute or relative to base_path. Remote filesystems using fsspec are supported, e.g. with the hf:// protocol. base_path (str): Base path to use when resolving relative paths. allowed_extensions (Optional[list], optional): White-list of file extensions to use. Defaults to None (all extensions). For example: allowed_extensions=[".csv", ".json", ".txt", ".parquet"] download_config ([`DownloadConfig`], *optional*): Specific download configuration parameters. Returns: List[str]: List of paths or URLs to the local or remote files that match the patterns. """ if is_relative_path(pattern): pattern = xjoin(base_path, pattern) elif is_local_path(pattern): base_path = os.path.splitdrive(pattern)[0] + os.sep else: base_path = "" pattern, storage_options = _prepare_path_and_storage_options(pattern, download_config=download_config) fs, fs_pattern = url_to_fs(pattern, **storage_options) files_to_ignore = set(FILES_TO_IGNORE) - {xbasename(pattern)} protocol = fs.protocol if isinstance(fs.protocol, str) else fs.protocol[0] protocol_prefix = protocol + "://" if protocol != "file" else "" glob_kwargs = {} if protocol == "hf" and config.HF_HUB_VERSION >= version.parse("0.20.0"): # 10 times faster glob with detail=True (ignores costly info like lastCommit) glob_kwargs["expand_info"] = False matched_paths = [ filepath if filepath.startswith(protocol_prefix) else protocol_prefix + filepath for filepath, info in fs.glob(pattern, detail=True, **glob_kwargs).items() if info["type"] == "file" and (xbasename(filepath) not in files_to_ignore) and not _is_inside_unrequested_special_dir(filepath, fs_pattern) and not _is_unrequested_hidden_file_or_is_inside_unrequested_hidden_dir(filepath, fs_pattern) ] # ignore .ipynb and __pycache__, but keep /../ if allowed_extensions is not None: out = [ filepath for filepath in matched_paths if any("." + suffix in allowed_extensions for suffix in xbasename(filepath).split(".")[1:]) ] if len(out) < len(matched_paths): invalid_matched_files = list(set(matched_paths) - set(out)) logger.info( f"Some files matched the pattern '{pattern}' but don't have valid data file extensions: {invalid_matched_files}" ) else: out = matched_paths if not out: error_msg = f"Unable to find '{pattern}'" if allowed_extensions is not None: error_msg += f" with any supported extension {list(allowed_extensions)}" raise FileNotFoundError(error_msg) return out def get_data_patterns(base_path: str, download_config: Optional[DownloadConfig] = None) -> Dict[str, List[str]]: """ Get the default pattern from a directory testing all the supported patterns. The first patterns to return a non-empty list of data files is returned. Some examples of supported patterns: Input: my_dataset_repository/ โ”œโ”€โ”€ README.md โ””โ”€โ”€ dataset.csv Output: {'train': ['**']} Input: my_dataset_repository/ โ”œโ”€โ”€ README.md โ”œโ”€โ”€ train.csv โ””โ”€โ”€ test.csv my_dataset_repository/ โ”œโ”€โ”€ README.md โ””โ”€โ”€ data/ โ”œโ”€โ”€ train.csv โ””โ”€โ”€ test.csv my_dataset_repository/ โ”œโ”€โ”€ README.md โ”œโ”€โ”€ train_0.csv โ”œโ”€โ”€ train_1.csv โ”œโ”€โ”€ train_2.csv โ”œโ”€โ”€ train_3.csv โ”œโ”€โ”€ test_0.csv โ””โ”€โ”€ test_1.csv Output: {'train': ['**/train[-._ 0-9]*', '**/*[-._ 0-9]train[-._ 0-9]*', '**/training[-._ 0-9]*', '**/*[-._ 0-9]training[-._ 0-9]*'], 'test': ['**/test[-._ 0-9]*', '**/*[-._ 0-9]test[-._ 0-9]*', '**/testing[-._ 0-9]*', '**/*[-._ 0-9]testing[-._ 0-9]*', ...]} Input: my_dataset_repository/ โ”œโ”€โ”€ README.md โ””โ”€โ”€ data/ โ”œโ”€โ”€ train/ โ”‚ โ”œโ”€โ”€ shard_0.csv โ”‚ โ”œโ”€โ”€ shard_1.csv โ”‚ โ”œโ”€โ”€ shard_2.csv โ”‚ โ””โ”€โ”€ shard_3.csv โ””โ”€โ”€ test/ โ”œโ”€โ”€ shard_0.csv โ””โ”€โ”€ shard_1.csv Output: {'train': ['**/train/**', '**/train[-._ 0-9]*/**', '**/*[-._ 0-9]train/**', '**/*[-._ 0-9]train[-._ 0-9]*/**', ...], 'test': ['**/test/**', '**/test[-._ 0-9]*/**', '**/*[-._ 0-9]test/**', '**/*[-._ 0-9]test[-._ 0-9]*/**', ...]} Input: my_dataset_repository/ โ”œโ”€โ”€ README.md โ””โ”€โ”€ data/ โ”œโ”€โ”€ train-00000-of-00003.csv โ”œโ”€โ”€ train-00001-of-00003.csv โ”œโ”€โ”€ train-00002-of-00003.csv โ”œโ”€โ”€ test-00000-of-00001.csv โ”œโ”€โ”€ random-00000-of-00003.csv โ”œโ”€โ”€ random-00001-of-00003.csv โ””โ”€โ”€ random-00002-of-00003.csv Output: {'train': ['data/train-[0-9][0-9][0-9][0-9][0-9]-of-[0-9][0-9][0-9][0-9][0-9]*.*'], 'test': ['data/test-[0-9][0-9][0-9][0-9][0-9]-of-[0-9][0-9][0-9][0-9][0-9]*.*'], 'random': ['data/random-[0-9][0-9][0-9][0-9][0-9]-of-[0-9][0-9][0-9][0-9][0-9]*.*']} In order, it first tests if SPLIT_PATTERN_SHARDED works, otherwise it tests the patterns in ALL_DEFAULT_PATTERNS. """ resolver = partial(resolve_pattern, base_path=base_path, download_config=download_config) try: return _get_data_files_patterns(resolver) except FileNotFoundError: raise EmptyDatasetError(f"The directory at {base_path} doesn't contain any data files") from None def get_metadata_patterns( base_path: str, download_config: Optional[DownloadConfig] = None, ) -> List[str]: """ Get the supported metadata patterns from a local directory. """ resolver = partial(resolve_pattern, base_path=base_path, download_config=download_config) try: return _get_metadata_files_patterns(resolver) except FileNotFoundError: raise FileNotFoundError(f"The directory at {base_path} doesn't contain any metadata file") from None def _get_single_origin_metadata( data_file: str, download_config: Optional[DownloadConfig] = None, ) -> SingleOriginMetadata: data_file, storage_options = _prepare_path_and_storage_options(data_file, download_config=download_config) fs, *_ = url_to_fs(data_file, **storage_options) if isinstance(fs, HfFileSystem): resolved_path = fs.resolve_path(data_file) return resolved_path.repo_id, resolved_path.revision elif isinstance(fs, HTTPFileSystem) and data_file.startswith(config.HF_ENDPOINT): hffs = HfFileSystem(endpoint=config.HF_ENDPOINT, token=download_config.token) data_file = "hf://" + data_file[len(config.HF_ENDPOINT) + 1 :].replace("/resolve/", "@", 1) resolved_path = hffs.resolve_path(data_file) return resolved_path.repo_id, resolved_path.revision info = fs.info(data_file) # s3fs uses "ETag", gcsfs uses "etag", and for local we simply check mtime for key in ["ETag", "etag", "mtime"]: if key in info: return (str(info[key]),) return () def _get_origin_metadata( data_files: List[str], download_config: Optional[DownloadConfig] = None, max_workers: Optional[int] = None, ) -> List[SingleOriginMetadata]: max_workers = max_workers if max_workers is not None else config.HF_DATASETS_MULTITHREADING_MAX_WORKERS return thread_map( partial(_get_single_origin_metadata, download_config=download_config), data_files, max_workers=max_workers, tqdm_class=hf_tqdm, desc="Resolving data files", # set `disable=None` rather than `disable=False` by default to disable progress bar when no TTY attached disable=len(data_files) <= 16 or None, ) class DataFilesList(List[str]): """ List of data files (absolute local paths or URLs). It has two construction methods given the user's data files patterns: - ``from_hf_repo``: resolve patterns inside a dataset repository - ``from_local_or_remote``: resolve patterns from a local path Moreover, DataFilesList has an additional attribute ``origin_metadata``. It can store: - the last modified time of local files - ETag of remote files - commit sha of a dataset repository Thanks to this additional attribute, it is possible to hash the list and get a different hash if and only if at least one file changed. This is useful for caching Dataset objects that are obtained from a list of data files. """ def __init__(self, data_files: List[str], origin_metadata: List[SingleOriginMetadata]) -> None: super().__init__(data_files) self.origin_metadata = origin_metadata def __add__(self, other: "DataFilesList") -> "DataFilesList": return DataFilesList([*self, *other], self.origin_metadata + other.origin_metadata) @classmethod def from_hf_repo( cls, patterns: List[str], dataset_info: huggingface_hub.hf_api.DatasetInfo, base_path: Optional[str] = None, allowed_extensions: Optional[List[str]] = None, download_config: Optional[DownloadConfig] = None, ) -> "DataFilesList": base_path = f"hf://datasets/{dataset_info.id}@{dataset_info.sha}/{base_path or ''}".rstrip("/") return cls.from_patterns( patterns, base_path=base_path, allowed_extensions=allowed_extensions, download_config=download_config ) @classmethod def from_local_or_remote( cls, patterns: List[str], base_path: Optional[str] = None, allowed_extensions: Optional[List[str]] = None, download_config: Optional[DownloadConfig] = None, ) -> "DataFilesList": base_path = base_path if base_path is not None else Path().resolve().as_posix() return cls.from_patterns( patterns, base_path=base_path, allowed_extensions=allowed_extensions, download_config=download_config ) @classmethod def from_patterns( cls, patterns: List[str], base_path: Optional[str] = None, allowed_extensions: Optional[List[str]] = None, download_config: Optional[DownloadConfig] = None, ) -> "DataFilesList": base_path = base_path if base_path is not None else Path().resolve().as_posix() data_files = [] for pattern in patterns: try: data_files.extend( resolve_pattern( pattern, base_path=base_path, allowed_extensions=allowed_extensions, download_config=download_config, ) ) except FileNotFoundError: if not has_magic(pattern): raise origin_metadata = _get_origin_metadata(data_files, download_config=download_config) return cls(data_files, origin_metadata) def filter_extensions(self, extensions: List[str]) -> "DataFilesList": pattern = "|".join("\\" + ext for ext in extensions) pattern = re.compile(f".*({pattern})(\\..+)?$") return DataFilesList( [data_file for data_file in self if pattern.match(data_file)], origin_metadata=self.origin_metadata, ) class DataFilesDict(Dict[str, DataFilesList]): """ Dict of split_name -> list of data files (absolute local paths or URLs). It has two construction methods given the user's data files patterns : - ``from_hf_repo``: resolve patterns inside a dataset repository - ``from_local_or_remote``: resolve patterns from a local path Moreover, each list is a DataFilesList. It is possible to hash the dictionary and get a different hash if and only if at least one file changed. For more info, see [`DataFilesList`]. This is useful for caching Dataset objects that are obtained from a list of data files. Changing the order of the keys of this dictionary also doesn't change its hash. """ @classmethod def from_local_or_remote( cls, patterns: Dict[str, Union[List[str], DataFilesList]], base_path: Optional[str] = None, allowed_extensions: Optional[List[str]] = None, download_config: Optional[DownloadConfig] = None, ) -> "DataFilesDict": out = cls() for key, patterns_for_key in patterns.items(): out[key] = ( patterns_for_key if isinstance(patterns_for_key, DataFilesList) else DataFilesList.from_local_or_remote( patterns_for_key, base_path=base_path, allowed_extensions=allowed_extensions, download_config=download_config, ) ) return out @classmethod def from_hf_repo( cls, patterns: Dict[str, Union[List[str], DataFilesList]], dataset_info: huggingface_hub.hf_api.DatasetInfo, base_path: Optional[str] = None, allowed_extensions: Optional[List[str]] = None, download_config: Optional[DownloadConfig] = None, ) -> "DataFilesDict": out = cls() for key, patterns_for_key in patterns.items(): out[key] = ( patterns_for_key if isinstance(patterns_for_key, DataFilesList) else DataFilesList.from_hf_repo( patterns_for_key, dataset_info=dataset_info, base_path=base_path, allowed_extensions=allowed_extensions, download_config=download_config, ) ) return out @classmethod def from_patterns( cls, patterns: Dict[str, Union[List[str], DataFilesList]], base_path: Optional[str] = None, allowed_extensions: Optional[List[str]] = None, download_config: Optional[DownloadConfig] = None, ) -> "DataFilesDict": out = cls() for key, patterns_for_key in patterns.items(): out[key] = ( patterns_for_key if isinstance(patterns_for_key, DataFilesList) else DataFilesList.from_patterns( patterns_for_key, base_path=base_path, allowed_extensions=allowed_extensions, download_config=download_config, ) ) return out def filter_extensions(self, extensions: List[str]) -> "DataFilesDict": out = type(self)() for key, data_files_list in self.items(): out[key] = data_files_list.filter_extensions(extensions) return out class DataFilesPatternsList(List[str]): """ List of data files patterns (absolute local paths or URLs). For each pattern there should also be a list of allowed extensions to keep, or a None ot keep all the files for the pattern. """ def __init__( self, patterns: List[str], allowed_extensions: List[Optional[List[str]]], ): super().__init__(patterns) self.allowed_extensions = allowed_extensions def __add__(self, other): return DataFilesList([*self, *other], self.allowed_extensions + other.allowed_extensions) @classmethod def from_patterns( cls, patterns: List[str], allowed_extensions: Optional[List[str]] = None ) -> "DataFilesPatternsList": return cls(patterns, [allowed_extensions] * len(patterns)) def resolve( self, base_path: str, download_config: Optional[DownloadConfig] = None, ) -> "DataFilesList": base_path = base_path if base_path is not None else Path().resolve().as_posix() data_files = [] for pattern, allowed_extensions in zip(self, self.allowed_extensions): try: data_files.extend( resolve_pattern( pattern, base_path=base_path, allowed_extensions=allowed_extensions, download_config=download_config, ) ) except FileNotFoundError: if not has_magic(pattern): raise origin_metadata = _get_origin_metadata(data_files, download_config=download_config) return DataFilesList(data_files, origin_metadata) def filter_extensions(self, extensions: List[str]) -> "DataFilesPatternsList": return DataFilesPatternsList( self, [allowed_extensions + extensions for allowed_extensions in self.allowed_extensions] ) class DataFilesPatternsDict(Dict[str, DataFilesPatternsList]): """ Dict of split_name -> list of data files patterns (absolute local paths or URLs). """ @classmethod def from_patterns( cls, patterns: Dict[str, List[str]], allowed_extensions: Optional[List[str]] = None ) -> "DataFilesPatternsDict": out = cls() for key, patterns_for_key in patterns.items(): out[key] = ( patterns_for_key if isinstance(patterns_for_key, DataFilesPatternsList) else DataFilesPatternsList.from_patterns( patterns_for_key, allowed_extensions=allowed_extensions, ) ) return out def resolve( self, base_path: str, download_config: Optional[DownloadConfig] = None, ) -> "DataFilesDict": out = DataFilesDict() for key, data_files_patterns_list in self.items(): out[key] = data_files_patterns_list.resolve(base_path, download_config) return out def filter_extensions(self, extensions: List[str]) -> "DataFilesPatternsDict": out = type(self)() for key, data_files_patterns_list in self.items(): out[key] = data_files_patterns_list.filter_extensions(extensions) return out
datasets/src/datasets/data_files.py/0
{ "file_path": "datasets/src/datasets/data_files.py", "repo_id": "datasets", "token_count": 13790 }
80
# Copyright 2020 The HuggingFace Datasets Authors and the TensorFlow Datasets Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Dict, List, Optional, Type from .. import config from ..utils import logging from .formatting import ( ArrowFormatter, CustomFormatter, Formatter, PandasFormatter, PythonFormatter, TensorFormatter, format_table, query_table, ) from .np_formatter import NumpyFormatter logger = logging.get_logger(__name__) _FORMAT_TYPES: Dict[Optional[str], Type[Formatter]] = {} _FORMAT_TYPES_ALIASES: Dict[Optional[str], str] = {} _FORMAT_TYPES_ALIASES_UNAVAILABLE: Dict[Optional[str], Exception] = {} def _register_formatter( formatter_cls: type, format_type: Optional[str], aliases: Optional[List[str]] = None, ): """ Register a Formatter object using a name and optional aliases. This function must be used on a Formatter class. """ aliases = aliases if aliases is not None else [] if format_type in _FORMAT_TYPES: logger.warning( f"Overwriting format type '{format_type}' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})" ) _FORMAT_TYPES[format_type] = formatter_cls for alias in set(aliases + [format_type]): if alias in _FORMAT_TYPES_ALIASES: logger.warning( f"Overwriting format type alias '{alias}' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})" ) _FORMAT_TYPES_ALIASES[alias] = format_type def _register_unavailable_formatter( unavailable_error: Exception, format_type: Optional[str], aliases: Optional[List[str]] = None ): """ Register an unavailable Formatter object using a name and optional aliases. This function must be used on an Exception object that is raised when trying to get the unavailable formatter. """ aliases = aliases if aliases is not None else [] for alias in set(aliases + [format_type]): _FORMAT_TYPES_ALIASES_UNAVAILABLE[alias] = unavailable_error # Here we define all the available formatting functions that can be used by `Dataset.set_format` _register_formatter(PythonFormatter, None, aliases=["python"]) _register_formatter(ArrowFormatter, "arrow", aliases=["pa", "pyarrow"]) _register_formatter(NumpyFormatter, "numpy", aliases=["np"]) _register_formatter(PandasFormatter, "pandas", aliases=["pd"]) _register_formatter(CustomFormatter, "custom") if config.POLARS_AVAILABLE: from .polars_formatter import PolarsFormatter _register_formatter(PolarsFormatter, "polars", aliases=["pl"]) else: _polars_error = ValueError("Polars needs to be installed to be able to return Polars dataframes.") _register_unavailable_formatter(_polars_error, "polars", aliases=["pl"]) if config.TORCH_AVAILABLE: from .torch_formatter import TorchFormatter _register_formatter(TorchFormatter, "torch", aliases=["pt", "pytorch"]) else: _torch_error = ValueError("PyTorch needs to be installed to be able to return PyTorch tensors.") _register_unavailable_formatter(_torch_error, "torch", aliases=["pt", "pytorch"]) if config.TF_AVAILABLE: from .tf_formatter import TFFormatter _register_formatter(TFFormatter, "tensorflow", aliases=["tf"]) else: _tf_error = ValueError("Tensorflow needs to be installed to be able to return Tensorflow tensors.") _register_unavailable_formatter(_tf_error, "tensorflow", aliases=["tf"]) if config.JAX_AVAILABLE: from .jax_formatter import JaxFormatter _register_formatter(JaxFormatter, "jax", aliases=[]) else: _jax_error = ValueError("JAX needs to be installed to be able to return JAX arrays.") _register_unavailable_formatter(_jax_error, "jax", aliases=[]) def get_format_type_from_alias(format_type: Optional[str]) -> Optional[str]: """If the given format type is a known alias, then return its main type name. Otherwise return the type with no change.""" if format_type in _FORMAT_TYPES_ALIASES: return _FORMAT_TYPES_ALIASES[format_type] else: return format_type def get_formatter(format_type: Optional[str], **format_kwargs) -> Formatter: """ Factory function to get a Formatter given its type name and keyword arguments. A formatter is an object that extracts and formats data from pyarrow table. It defines the formatting for rows, colums and batches. If the formatter for a given type name doesn't exist or is not available, an error is raised. """ format_type = get_format_type_from_alias(format_type) if format_type in _FORMAT_TYPES: return _FORMAT_TYPES[format_type](**format_kwargs) if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE: raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type] else: raise ValueError(f"Format type should be one of {list(_FORMAT_TYPES.keys())}, but got '{format_type}'")
datasets/src/datasets/formatting/__init__.py/0
{ "file_path": "datasets/src/datasets/formatting/__init__.py", "repo_id": "datasets", "token_count": 1892 }
81
from typing import Optional import pyspark from .. import Features, NamedSplit from ..download import DownloadMode from ..packaged_modules.spark.spark import Spark from .abc import AbstractDatasetReader class SparkDatasetReader(AbstractDatasetReader): """A dataset reader that reads from a Spark DataFrame. When caching, cache materialization is parallelized over Spark; an NFS that is accessible to the driver must be provided. Streaming is not currently supported. """ def __init__( self, df: pyspark.sql.DataFrame, split: Optional[NamedSplit] = None, features: Optional[Features] = None, streaming: bool = True, cache_dir: str = None, keep_in_memory: bool = False, working_dir: str = None, load_from_cache_file: bool = True, file_format: str = "arrow", **kwargs, ): super().__init__( split=split, features=features, cache_dir=cache_dir, keep_in_memory=keep_in_memory, streaming=streaming, **kwargs, ) self._load_from_cache_file = load_from_cache_file self._file_format = file_format self.builder = Spark( df=df, features=features, cache_dir=cache_dir, working_dir=working_dir, **kwargs, ) def read(self): if self.streaming: return self.builder.as_streaming_dataset(split=self.split) download_mode = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD self.builder.download_and_prepare( download_mode=download_mode, file_format=self._file_format, ) return self.builder.as_dataset(split=self.split)
datasets/src/datasets/io/spark.py/0
{ "file_path": "datasets/src/datasets/io/spark.py", "repo_id": "datasets", "token_count": 787 }
82
from typing import Callable def is_documented_by(function_with_docstring: Callable): """Decorator to share docstrings across common functions. Args: function_with_docstring (`Callable`): Name of the function with the docstring. """ def wrapper(target_function): target_function.__doc__ = function_with_docstring.__doc__ return target_function return wrapper
datasets/src/datasets/utils/doc_utils.py/0
{ "file_path": "datasets/src/datasets/utils/doc_utils.py", "repo_id": "datasets", "token_count": 137 }
83
[ "unknown", "n<1K", "1K<n<10K", "10K<n<100K", "100K<n<1M", "1M<n<10M", "10M<n<100M", "100M<n<1B", "1B<n<10B", "10B<n<100B", "100B<n<1T", "n>1T" ]
datasets/src/datasets/utils/resources/size_categories.json/0
{ "file_path": "datasets/src/datasets/utils/resources/size_categories.json", "repo_id": "datasets", "token_count": 124 }
84
import os from collections import namedtuple import pytest from datasets import ClassLabel, Features, Sequence, Value from datasets.commands.test import TestCommand from datasets.info import DatasetInfo, DatasetInfosDict _TestCommandArgs = namedtuple( "_TestCommandArgs", [ "dataset", "name", "cache_dir", "data_dir", "all_configs", "save_infos", "ignore_verifications", "force_redownload", "clear_cache", "num_proc", "trust_remote_code", ], defaults=[None, None, None, False, False, False, False, False, None, None], ) def is_1percent_close(source, target): return (abs(source - target) / target) < 0.01 @pytest.mark.integration def test_test_command(dataset_loading_script_dir): args = _TestCommandArgs( dataset=dataset_loading_script_dir, all_configs=True, save_infos=True, trust_remote_code=True ) test_command = TestCommand(*args) test_command.run() dataset_readme_path = os.path.join(dataset_loading_script_dir, "README.md") assert os.path.exists(dataset_readme_path) dataset_infos = DatasetInfosDict.from_directory(dataset_loading_script_dir) expected_dataset_infos = DatasetInfosDict( { "default": DatasetInfo( features=Features( { "tokens": Sequence(Value("string")), "ner_tags": Sequence( ClassLabel(names=["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]) ), "langs": Sequence(Value("string")), "spans": Sequence(Value("string")), } ), splits=[ { "name": "train", "num_bytes": 2351563, "num_examples": 10000, }, { "name": "validation", "num_bytes": 238418, "num_examples": 1000, }, ], download_size=3940680, dataset_size=2589981, ) } ) assert dataset_infos.keys() == expected_dataset_infos.keys() for key in DatasetInfo._INCLUDED_INFO_IN_YAML: result, expected = getattr(dataset_infos["default"], key), getattr(expected_dataset_infos["default"], key) if key == "num_bytes": assert is_1percent_close(result, expected) elif key == "splits": assert list(result) == list(expected) for split in result: assert result[split].name == expected[split].name assert result[split].num_examples == expected[split].num_examples assert is_1percent_close(result[split].num_bytes, expected[split].num_bytes) else: result == expected
datasets/tests/commands/test_test.py/0
{ "file_path": "datasets/tests/commands/test_test.py", "repo_id": "datasets", "token_count": 1546 }
85
import contextlib import csv import json import os import sqlite3 import tarfile import textwrap import zipfile import pandas as pd import pyarrow as pa import pyarrow.parquet as pq import pytest import datasets import datasets.config # dataset + arrow_file @pytest.fixture(scope="session") def dataset(): n = 10 features = datasets.Features( { "tokens": datasets.Sequence(datasets.Value("string")), "labels": datasets.Sequence(datasets.ClassLabel(names=["negative", "positive"])), "answers": datasets.Sequence( { "text": datasets.Value("string"), "answer_start": datasets.Value("int32"), } ), "id": datasets.Value("int64"), } ) dataset = datasets.Dataset.from_dict( { "tokens": [["foo"] * 5] * n, "labels": [[1] * 5] * n, "answers": [{"answer_start": [97], "text": ["1976"]}] * 10, "id": list(range(n)), }, features=features, ) return dataset @pytest.fixture(scope="session") def arrow_file(tmp_path_factory, dataset): filename = str(tmp_path_factory.mktemp("data") / "file.arrow") dataset.map(cache_file_name=filename) return filename # FILE_CONTENT + files FILE_CONTENT = """\ Text data. Second line of data.""" @pytest.fixture(scope="session") def text_file_content(): return FILE_CONTENT @pytest.fixture(scope="session") def text_file(tmp_path_factory): filename = tmp_path_factory.mktemp("data") / "file.txt" data = FILE_CONTENT with open(filename, "w") as f: f.write(data) return filename @pytest.fixture(scope="session") def bz2_file(tmp_path_factory): import bz2 path = tmp_path_factory.mktemp("data") / "file.txt.bz2" data = bytes(FILE_CONTENT, "utf-8") with bz2.open(path, "wb") as f: f.write(data) return path @pytest.fixture(scope="session") def gz_file(tmp_path_factory): import gzip path = str(tmp_path_factory.mktemp("data") / "file.txt.gz") data = bytes(FILE_CONTENT, "utf-8") with gzip.open(path, "wb") as f: f.write(data) return path @pytest.fixture(scope="session") def lz4_file(tmp_path_factory): if datasets.config.LZ4_AVAILABLE: import lz4.frame path = tmp_path_factory.mktemp("data") / "file.txt.lz4" data = bytes(FILE_CONTENT, "utf-8") with lz4.frame.open(path, "wb") as f: f.write(data) return path @pytest.fixture(scope="session") def seven_zip_file(tmp_path_factory, text_file): if datasets.config.PY7ZR_AVAILABLE: import py7zr path = tmp_path_factory.mktemp("data") / "file.txt.7z" with py7zr.SevenZipFile(path, "w") as archive: archive.write(text_file, arcname=os.path.basename(text_file)) return path @pytest.fixture(scope="session") def tar_file(tmp_path_factory, text_file): import tarfile path = tmp_path_factory.mktemp("data") / "file.txt.tar" with tarfile.TarFile(path, "w") as f: f.add(text_file, arcname=os.path.basename(text_file)) return path @pytest.fixture(scope="session") def xz_file(tmp_path_factory): import lzma path = tmp_path_factory.mktemp("data") / "file.txt.xz" data = bytes(FILE_CONTENT, "utf-8") with lzma.open(path, "wb") as f: f.write(data) return path @pytest.fixture(scope="session") def zip_file(tmp_path_factory, text_file): import zipfile path = tmp_path_factory.mktemp("data") / "file.txt.zip" with zipfile.ZipFile(path, "w") as f: f.write(text_file, arcname=os.path.basename(text_file)) return path @pytest.fixture(scope="session") def zstd_file(tmp_path_factory): if datasets.config.ZSTANDARD_AVAILABLE: import zstandard as zstd path = tmp_path_factory.mktemp("data") / "file.txt.zst" data = bytes(FILE_CONTENT, "utf-8") with zstd.open(path, "wb") as f: f.write(data) return path # xml_file @pytest.fixture(scope="session") def xml_file(tmp_path_factory): filename = tmp_path_factory.mktemp("data") / "file.xml" data = textwrap.dedent( """\ <?xml version="1.0" encoding="UTF-8" ?> <tmx version="1.4"> <header segtype="sentence" srclang="ca" /> <body> <tu> <tuv xml:lang="ca"><seg>Contingut 1</seg></tuv> <tuv xml:lang="en"><seg>Content 1</seg></tuv> </tu> <tu> <tuv xml:lang="ca"><seg>Contingut 2</seg></tuv> <tuv xml:lang="en"><seg>Content 2</seg></tuv> </tu> <tu> <tuv xml:lang="ca"><seg>Contingut 3</seg></tuv> <tuv xml:lang="en"><seg>Content 3</seg></tuv> </tu> <tu> <tuv xml:lang="ca"><seg>Contingut 4</seg></tuv> <tuv xml:lang="en"><seg>Content 4</seg></tuv> </tu> <tu> <tuv xml:lang="ca"><seg>Contingut 5</seg></tuv> <tuv xml:lang="en"><seg>Content 5</seg></tuv> </tu> </body> </tmx>""" ) with open(filename, "w") as f: f.write(data) return filename DATA = [ {"col_1": "0", "col_2": 0, "col_3": 0.0}, {"col_1": "1", "col_2": 1, "col_3": 1.0}, {"col_1": "2", "col_2": 2, "col_3": 2.0}, {"col_1": "3", "col_2": 3, "col_3": 3.0}, ] DATA2 = [ {"col_1": "4", "col_2": 4, "col_3": 4.0}, {"col_1": "5", "col_2": 5, "col_3": 5.0}, ] DATA_DICT_OF_LISTS = { "col_1": ["0", "1", "2", "3"], "col_2": [0, 1, 2, 3], "col_3": [0.0, 1.0, 2.0, 3.0], } DATA_312 = [ {"col_3": 0.0, "col_1": "0", "col_2": 0}, {"col_3": 1.0, "col_1": "1", "col_2": 1}, ] DATA_STR = [ {"col_1": "s0", "col_2": 0, "col_3": 0.0}, {"col_1": "s1", "col_2": 1, "col_3": 1.0}, {"col_1": "s2", "col_2": 2, "col_3": 2.0}, {"col_1": "s3", "col_2": 3, "col_3": 3.0}, ] @pytest.fixture(scope="session") def dataset_dict(): return DATA_DICT_OF_LISTS @pytest.fixture(scope="session") def arrow_path(tmp_path_factory): dataset = datasets.Dataset.from_dict(DATA_DICT_OF_LISTS) path = str(tmp_path_factory.mktemp("data") / "dataset.arrow") dataset.map(cache_file_name=path) return path @pytest.fixture(scope="session") def sqlite_path(tmp_path_factory): path = str(tmp_path_factory.mktemp("data") / "dataset.sqlite") with contextlib.closing(sqlite3.connect(path)) as con: cur = con.cursor() cur.execute("CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)") for item in DATA: cur.execute("INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)", tuple(item.values())) con.commit() return path @pytest.fixture(scope="session") def csv_path(tmp_path_factory): path = str(tmp_path_factory.mktemp("data") / "dataset.csv") with open(path, "w", newline="") as f: writer = csv.DictWriter(f, fieldnames=["col_1", "col_2", "col_3"]) writer.writeheader() for item in DATA: writer.writerow(item) return path @pytest.fixture(scope="session") def csv2_path(tmp_path_factory): path = str(tmp_path_factory.mktemp("data") / "dataset2.csv") with open(path, "w", newline="") as f: writer = csv.DictWriter(f, fieldnames=["col_1", "col_2", "col_3"]) writer.writeheader() for item in DATA: writer.writerow(item) return path @pytest.fixture(scope="session") def bz2_csv_path(csv_path, tmp_path_factory): import bz2 path = tmp_path_factory.mktemp("data") / "dataset.csv.bz2" with open(csv_path, "rb") as f: data = f.read() # data = bytes(FILE_CONTENT, "utf-8") with bz2.open(path, "wb") as f: f.write(data) return path @pytest.fixture(scope="session") def zip_csv_path(csv_path, csv2_path, tmp_path_factory): path = tmp_path_factory.mktemp("zip_csv_path") / "csv-dataset.zip" with zipfile.ZipFile(path, "w") as f: f.write(csv_path, arcname=os.path.basename(csv_path)) f.write(csv2_path, arcname=os.path.basename(csv2_path)) return path @pytest.fixture(scope="session") def zip_uppercase_csv_path(csv_path, csv2_path, tmp_path_factory): path = tmp_path_factory.mktemp("data") / "dataset.csv.zip" with zipfile.ZipFile(path, "w") as f: f.write(csv_path, arcname=os.path.basename(csv_path.replace(".csv", ".CSV"))) f.write(csv2_path, arcname=os.path.basename(csv2_path.replace(".csv", ".CSV"))) return path @pytest.fixture(scope="session") def zip_csv_with_dir_path(csv_path, csv2_path, tmp_path_factory): path = tmp_path_factory.mktemp("data") / "dataset_with_dir.csv.zip" with zipfile.ZipFile(path, "w") as f: f.write(csv_path, arcname=os.path.join("main_dir", os.path.basename(csv_path))) f.write(csv2_path, arcname=os.path.join("main_dir", os.path.basename(csv2_path))) return path @pytest.fixture(scope="session") def parquet_path(tmp_path_factory): path = str(tmp_path_factory.mktemp("data") / "dataset.parquet") schema = pa.schema( { "col_1": pa.string(), "col_2": pa.int64(), "col_3": pa.float64(), } ) with open(path, "wb") as f: writer = pq.ParquetWriter(f, schema=schema) pa_table = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(DATA))] for k in DATA[0]}, schema=schema) writer.write_table(pa_table) writer.close() return path @pytest.fixture(scope="session") def geoparquet_path(tmp_path_factory): df = pd.read_parquet(path="https://github.com/opengeospatial/geoparquet/raw/v1.0.0/examples/example.parquet") path = str(tmp_path_factory.mktemp("data") / "dataset.geoparquet") df.to_parquet(path=path) return path @pytest.fixture(scope="session") def json_list_of_dicts_path(tmp_path_factory): path = str(tmp_path_factory.mktemp("data") / "dataset.json") data = {"data": DATA} with open(path, "w") as f: json.dump(data, f) return path @pytest.fixture(scope="session") def json_dict_of_lists_path(tmp_path_factory): path = str(tmp_path_factory.mktemp("data") / "dataset.json") data = {"data": DATA_DICT_OF_LISTS} with open(path, "w") as f: json.dump(data, f) return path @pytest.fixture(scope="session") def jsonl_path(tmp_path_factory): path = str(tmp_path_factory.mktemp("data") / "dataset.jsonl") with open(path, "w") as f: for item in DATA: f.write(json.dumps(item) + "\n") return path @pytest.fixture(scope="session") def jsonl2_path(tmp_path_factory): path = str(tmp_path_factory.mktemp("data") / "dataset2.jsonl") with open(path, "w") as f: for item in DATA: f.write(json.dumps(item) + "\n") return path @pytest.fixture(scope="session") def jsonl_312_path(tmp_path_factory): path = str(tmp_path_factory.mktemp("data") / "dataset_312.jsonl") with open(path, "w") as f: for item in DATA_312: f.write(json.dumps(item) + "\n") return path @pytest.fixture(scope="session") def jsonl_str_path(tmp_path_factory): path = str(tmp_path_factory.mktemp("data") / "dataset-str.jsonl") with open(path, "w") as f: for item in DATA_STR: f.write(json.dumps(item) + "\n") return path @pytest.fixture(scope="session") def text_gz_path(tmp_path_factory, text_path): import gzip path = str(tmp_path_factory.mktemp("data") / "dataset.txt.gz") with open(text_path, "rb") as orig_file: with gzip.open(path, "wb") as zipped_file: zipped_file.writelines(orig_file) return path @pytest.fixture(scope="session") def jsonl_gz_path(tmp_path_factory, jsonl_path): import gzip path = str(tmp_path_factory.mktemp("data") / "dataset.jsonl.gz") with open(jsonl_path, "rb") as orig_file: with gzip.open(path, "wb") as zipped_file: zipped_file.writelines(orig_file) return path @pytest.fixture(scope="session") def zip_jsonl_path(jsonl_path, jsonl2_path, tmp_path_factory): path = tmp_path_factory.mktemp("data") / "dataset.jsonl.zip" with zipfile.ZipFile(path, "w") as f: f.write(jsonl_path, arcname=os.path.basename(jsonl_path)) f.write(jsonl2_path, arcname=os.path.basename(jsonl2_path)) return path @pytest.fixture(scope="session") def zip_nested_jsonl_path(zip_jsonl_path, jsonl_path, jsonl2_path, tmp_path_factory): path = tmp_path_factory.mktemp("data") / "dataset_nested.jsonl.zip" with zipfile.ZipFile(path, "w") as f: f.write(zip_jsonl_path, arcname=os.path.join("nested", os.path.basename(zip_jsonl_path))) return path @pytest.fixture(scope="session") def zip_jsonl_with_dir_path(jsonl_path, jsonl2_path, tmp_path_factory): path = tmp_path_factory.mktemp("data") / "dataset_with_dir.jsonl.zip" with zipfile.ZipFile(path, "w") as f: f.write(jsonl_path, arcname=os.path.join("main_dir", os.path.basename(jsonl_path))) f.write(jsonl2_path, arcname=os.path.join("main_dir", os.path.basename(jsonl2_path))) return path @pytest.fixture(scope="session") def tar_jsonl_path(jsonl_path, jsonl2_path, tmp_path_factory): path = tmp_path_factory.mktemp("data") / "dataset.jsonl.tar" with tarfile.TarFile(path, "w") as f: f.add(jsonl_path, arcname=os.path.basename(jsonl_path)) f.add(jsonl2_path, arcname=os.path.basename(jsonl2_path)) return path @pytest.fixture(scope="session") def tar_nested_jsonl_path(tar_jsonl_path, jsonl_path, jsonl2_path, tmp_path_factory): path = tmp_path_factory.mktemp("data") / "dataset_nested.jsonl.tar" with tarfile.TarFile(path, "w") as f: f.add(tar_jsonl_path, arcname=os.path.join("nested", os.path.basename(tar_jsonl_path))) return path @pytest.fixture(scope="session") def text_path(tmp_path_factory): data = ["0", "1", "2", "3"] path = str(tmp_path_factory.mktemp("data") / "dataset.txt") with open(path, "w") as f: for item in data: f.write(item + "\n") return path @pytest.fixture(scope="session") def text2_path(tmp_path_factory): data = ["0", "1", "2", "3"] path = str(tmp_path_factory.mktemp("data") / "dataset2.txt") with open(path, "w") as f: for item in data: f.write(item + "\n") return path @pytest.fixture(scope="session") def text_dir(tmp_path_factory): data = ["0", "1", "2", "3"] path = tmp_path_factory.mktemp("data_text_dir") / "dataset.txt" with open(path, "w") as f: for item in data: f.write(item + "\n") return path.parent @pytest.fixture(scope="session") def text_dir_with_unsupported_extension(tmp_path_factory): data = ["0", "1", "2", "3"] path = tmp_path_factory.mktemp("data") / "dataset.abc" with open(path, "w") as f: for item in data: f.write(item + "\n") return path @pytest.fixture(scope="session") def zip_text_path(text_path, text2_path, tmp_path_factory): path = tmp_path_factory.mktemp("data") / "dataset.text.zip" with zipfile.ZipFile(path, "w") as f: f.write(text_path, arcname=os.path.basename(text_path)) f.write(text2_path, arcname=os.path.basename(text2_path)) return path @pytest.fixture(scope="session") def zip_text_with_dir_path(text_path, text2_path, tmp_path_factory): path = tmp_path_factory.mktemp("data") / "dataset_with_dir.text.zip" with zipfile.ZipFile(path, "w") as f: f.write(text_path, arcname=os.path.join("main_dir", os.path.basename(text_path))) f.write(text2_path, arcname=os.path.join("main_dir", os.path.basename(text2_path))) return path @pytest.fixture(scope="session") def zip_unsupported_ext_path(text_path, text2_path, tmp_path_factory): path = tmp_path_factory.mktemp("data") / "dataset.ext.zip" with zipfile.ZipFile(path, "w") as f: f.write(text_path, arcname=os.path.basename("unsupported.ext")) f.write(text2_path, arcname=os.path.basename("unsupported_2.ext")) return path @pytest.fixture(scope="session") def text_path_with_unicode_new_lines(tmp_path_factory): text = "\n".join(["First", "Second\u2029with Unicode new line", "Third"]) path = str(tmp_path_factory.mktemp("data") / "dataset_with_unicode_new_lines.txt") with open(path, "w", encoding="utf-8") as f: f.write(text) return path @pytest.fixture(scope="session") def image_file(): return os.path.join("tests", "features", "data", "test_image_rgb.jpg") @pytest.fixture(scope="session") def audio_file(): return os.path.join("tests", "features", "data", "test_audio_44100.wav") @pytest.fixture(scope="session") def tensor_file(tmp_path_factory): import torch path = tmp_path_factory.mktemp("data") / "tensor.pth" with open(path, "wb") as f: torch.save(torch.ones(128), f) return path @pytest.fixture(scope="session") def zip_image_path(image_file, tmp_path_factory): path = tmp_path_factory.mktemp("data") / "dataset.img.zip" with zipfile.ZipFile(path, "w") as f: f.write(image_file, arcname=os.path.basename(image_file)) f.write(image_file, arcname=os.path.basename(image_file).replace(".jpg", "2.jpg")) return path @pytest.fixture(scope="session") def data_dir_with_hidden_files(tmp_path_factory): data_dir = tmp_path_factory.mktemp("data_dir") (data_dir / "subdir").mkdir() with open(data_dir / "subdir" / "train.txt", "w") as f: f.write("foo\n" * 10) with open(data_dir / "subdir" / "test.txt", "w") as f: f.write("bar\n" * 10) # hidden file with open(data_dir / "subdir" / ".test.txt", "w") as f: f.write("bar\n" * 10) # hidden directory (data_dir / ".subdir").mkdir() with open(data_dir / ".subdir" / "train.txt", "w") as f: f.write("foo\n" * 10) with open(data_dir / ".subdir" / "test.txt", "w") as f: f.write("bar\n" * 10) return data_dir
datasets/tests/fixtures/files.py/0
{ "file_path": "datasets/tests/fixtures/files.py", "repo_id": "datasets", "token_count": 8339 }
86
import os import textwrap import pyarrow as pa import pytest from datasets import ClassLabel, Features, Image from datasets.builder import InvalidConfigName from datasets.data_files import DataFilesList from datasets.packaged_modules.csv.csv import Csv, CsvConfig from ..utils import require_pil @pytest.fixture def csv_file(tmp_path): filename = tmp_path / "file.csv" data = textwrap.dedent( """\ header1,header2 1,2 10,20 """ ) with open(filename, "w") as f: f.write(data) return str(filename) @pytest.fixture def malformed_csv_file(tmp_path): filename = tmp_path / "malformed_file.csv" data = textwrap.dedent( """\ header1,header2 1,2 10,20, """ ) with open(filename, "w") as f: f.write(data) return str(filename) @pytest.fixture def csv_file_with_image(tmp_path, image_file): filename = tmp_path / "csv_with_image.csv" data = textwrap.dedent( f"""\ image {image_file} """ ) with open(filename, "w") as f: f.write(data) return str(filename) @pytest.fixture def csv_file_with_label(tmp_path): filename = tmp_path / "csv_with_label.csv" data = textwrap.dedent( """\ label good bad good """ ) with open(filename, "w") as f: f.write(data) return str(filename) @pytest.fixture def csv_file_with_int_list(tmp_path): filename = tmp_path / "csv_with_int_list.csv" data = textwrap.dedent( """\ int_list 1 2 3 4 5 6 7 8 9 """ ) with open(filename, "w") as f: f.write(data) return str(filename) def test_config_raises_when_invalid_name() -> None: with pytest.raises(InvalidConfigName, match="Bad characters"): _ = CsvConfig(name="name-with-*-invalid-character") @pytest.mark.parametrize("data_files", ["str_path", ["str_path"], DataFilesList(["str_path"], [()])]) def test_config_raises_when_invalid_data_files(data_files) -> None: with pytest.raises(ValueError, match="Expected a DataFilesDict"): _ = CsvConfig(name="name", data_files=data_files) def test_csv_generate_tables_raises_error_with_malformed_csv(csv_file, malformed_csv_file, caplog): csv = Csv() generator = csv._generate_tables([[csv_file, malformed_csv_file]]) with pytest.raises(ValueError, match="Error tokenizing data"): for _ in generator: pass assert any( record.levelname == "ERROR" and "Failed to read file" in record.message and os.path.basename(malformed_csv_file) in record.message for record in caplog.records ) @require_pil def test_csv_cast_image(csv_file_with_image): with open(csv_file_with_image, encoding="utf-8") as f: image_file = f.read().splitlines()[1] csv = Csv(encoding="utf-8", features=Features({"image": Image()})) generator = csv._generate_tables([[csv_file_with_image]]) pa_table = pa.concat_tables([table for _, table in generator]) assert pa_table.schema.field("image").type == Image()() generated_content = pa_table.to_pydict()["image"] assert generated_content == [{"path": image_file, "bytes": None}] def test_csv_cast_label(csv_file_with_label): with open(csv_file_with_label, encoding="utf-8") as f: labels = f.read().splitlines()[1:] csv = Csv(encoding="utf-8", features=Features({"label": ClassLabel(names=["good", "bad"])})) generator = csv._generate_tables([[csv_file_with_label]]) pa_table = pa.concat_tables([table for _, table in generator]) assert pa_table.schema.field("label").type == ClassLabel(names=["good", "bad"])() generated_content = pa_table.to_pydict()["label"] assert generated_content == [ClassLabel(names=["good", "bad"]).str2int(label) for label in labels] def test_csv_convert_int_list(csv_file_with_int_list): csv = Csv(encoding="utf-8", sep=",", converters={"int_list": lambda x: [int(i) for i in x.split()]}) generator = csv._generate_tables([[csv_file_with_int_list]]) pa_table = pa.concat_tables([table for _, table in generator]) assert pa.types.is_list(pa_table.schema.field("int_list").type) generated_content = pa_table.to_pydict()["int_list"] assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
datasets/tests/packaged_modules/test_csv.py/0
{ "file_path": "datasets/tests/packaged_modules/test_csv.py", "repo_id": "datasets", "token_count": 1880 }
87
from unittest import TestCase from datasets import Sequence, Value from datasets.arrow_dataset import Dataset class DatasetListTest(TestCase): def _create_example_records(self): return [ {"col_1": 3, "col_2": "a"}, {"col_1": 2, "col_2": "b"}, {"col_1": 1, "col_2": "c"}, {"col_1": 0, "col_2": "d"}, ] def _create_example_dict(self): data = {"col_1": [3, 2, 1, 0], "col_2": ["a", "b", "c", "d"]} return Dataset.from_dict(data) def test_create(self): example_records = self._create_example_records() dset = Dataset.from_list(example_records) self.assertListEqual(dset.column_names, ["col_1", "col_2"]) for i, r in enumerate(dset): self.assertDictEqual(r, example_records[i]) def test_list_dict_equivalent(self): example_records = self._create_example_records() dset = Dataset.from_list(example_records) dset_from_dict = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]}) self.assertEqual(dset.info, dset_from_dict.info) def test_uneven_records(self): # checks what happens with missing columns uneven_records = [{"col_1": 1}, {"col_2": "x"}] dset = Dataset.from_list(uneven_records) self.assertDictEqual(dset[0], {"col_1": 1}) self.assertDictEqual(dset[1], {"col_1": None}) # NB: first record is used for columns def test_variable_list_records(self): # checks if the type can be inferred from the second record list_records = [{"col_1": []}, {"col_1": [1, 2]}] dset = Dataset.from_list(list_records) self.assertEqual(dset.info.features["col_1"], Sequence(Value("int64"))) def test_create_empty(self): dset = Dataset.from_list([]) self.assertEqual(len(dset), 0) self.assertListEqual(dset.column_names, [])
datasets/tests/test_dataset_list.py/0
{ "file_path": "datasets/tests/test_dataset_list.py", "repo_id": "datasets", "token_count": 875 }
88
import importlib import os import pickle import shutil import tempfile import time from hashlib import sha256 from multiprocessing import Pool from pathlib import Path from unittest import TestCase from unittest.mock import patch import dill import pyarrow as pa import pytest import requests import datasets from datasets import config, load_dataset, load_from_disk from datasets.arrow_dataset import Dataset from datasets.arrow_writer import ArrowWriter from datasets.builder import DatasetBuilder from datasets.config import METADATA_CONFIGS_FIELD from datasets.data_files import DataFilesDict, DataFilesPatternsDict from datasets.dataset_dict import DatasetDict, IterableDatasetDict from datasets.download.download_config import DownloadConfig from datasets.exceptions import DatasetNotFoundError from datasets.features import Features, Image, Value from datasets.iterable_dataset import IterableDataset from datasets.load import ( CachedDatasetModuleFactory, HubDatasetModuleFactoryWithoutScript, HubDatasetModuleFactoryWithParquetExport, HubDatasetModuleFactoryWithScript, LocalDatasetModuleFactoryWithoutScript, LocalDatasetModuleFactoryWithScript, PackagedDatasetModuleFactory, infer_module_for_data_files_list, infer_module_for_data_files_list_in_archives, load_dataset_builder, resolve_trust_remote_code, ) from datasets.packaged_modules.audiofolder.audiofolder import AudioFolder, AudioFolderConfig from datasets.packaged_modules.imagefolder.imagefolder import ImageFolder, ImageFolderConfig from datasets.packaged_modules.parquet.parquet import ParquetConfig from datasets.utils import _dataset_viewer from datasets.utils.logging import INFO, get_logger from .utils import ( OfflineSimulationMode, assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, offline, require_moto, require_not_windows, require_pil, require_sndfile, set_current_working_directory_to_temp_dir, ) DATASET_LOADING_SCRIPT_NAME = "__dummy_dataset1__" DATASET_LOADING_SCRIPT_CODE = """ import os import datasets from datasets import DatasetInfo, Features, Split, SplitGenerator, Value class __DummyDataset1__(datasets.GeneratorBasedBuilder): def _info(self) -> DatasetInfo: return DatasetInfo(features=Features({"text": Value("string")})) def _split_generators(self, dl_manager): return [ SplitGenerator(Split.TRAIN, gen_kwargs={"filepath": os.path.join(dl_manager.manual_dir, "train.txt")}), SplitGenerator(Split.TEST, gen_kwargs={"filepath": os.path.join(dl_manager.manual_dir, "test.txt")}), ] def _generate_examples(self, filepath, **kwargs): with open(filepath, "r", encoding="utf-8") as f: for i, line in enumerate(f): yield i, {"text": line.strip()} """ SAMPLE_DATASET_IDENTIFIER = "hf-internal-testing/dataset_with_script" # has dataset script and also a parquet export SAMPLE_DATASET_IDENTIFIER2 = "hf-internal-testing/dataset_with_data_files" # only has data files SAMPLE_DATASET_IDENTIFIER3 = "hf-internal-testing/multi_dir_dataset" # has multiple data directories SAMPLE_DATASET_IDENTIFIER4 = "hf-internal-testing/imagefolder_with_metadata" # imagefolder with a metadata file outside of the train/test directories SAMPLE_DATASET_IDENTIFIER5 = "hf-internal-testing/imagefolder_with_metadata_no_splits" # imagefolder with a metadata file and no default split names in data files SAMPLE_NOT_EXISTING_DATASET_IDENTIFIER = "hf-internal-testing/_dummy" SAMPLE_DATASET_NAME_THAT_DOESNT_EXIST = "_dummy" SAMPLE_DATASET_NO_CONFIGS_IN_METADATA = "hf-internal-testing/audiofolder_no_configs_in_metadata" SAMPLE_DATASET_SINGLE_CONFIG_IN_METADATA = "hf-internal-testing/audiofolder_single_config_in_metadata" SAMPLE_DATASET_TWO_CONFIG_IN_METADATA = "hf-internal-testing/audiofolder_two_configs_in_metadata" SAMPLE_DATASET_TWO_CONFIG_IN_METADATA_WITH_DEFAULT = ( "hf-internal-testing/audiofolder_two_configs_in_metadata_with_default" ) SAMPLE_DATASET_CAPITAL_LETTERS_IN_NAME = "hf-internal-testing/DatasetWithCapitalLetters" @pytest.fixture def data_dir(tmp_path): data_dir = tmp_path / "data_dir" data_dir.mkdir() with open(data_dir / "train.txt", "w") as f: f.write("foo\n" * 10) with open(data_dir / "test.txt", "w") as f: f.write("bar\n" * 10) return str(data_dir) @pytest.fixture def data_dir_with_arrow(tmp_path): data_dir = tmp_path / "data_dir" data_dir.mkdir() output_train = os.path.join(data_dir, "train.arrow") with ArrowWriter(path=output_train) as writer: writer.write_table(pa.Table.from_pydict({"col_1": ["foo"] * 10})) num_examples, num_bytes = writer.finalize() assert num_examples == 10 assert num_bytes > 0 output_test = os.path.join(data_dir, "test.arrow") with ArrowWriter(path=output_test) as writer: writer.write_table(pa.Table.from_pydict({"col_1": ["bar"] * 10})) num_examples, num_bytes = writer.finalize() assert num_examples == 10 assert num_bytes > 0 return str(data_dir) @pytest.fixture def data_dir_with_metadata(tmp_path): data_dir = tmp_path / "data_dir_with_metadata" data_dir.mkdir() with open(data_dir / "train.jpg", "wb") as f: f.write(b"train_image_bytes") with open(data_dir / "test.jpg", "wb") as f: f.write(b"test_image_bytes") with open(data_dir / "metadata.jsonl", "w") as f: f.write( """\ {"file_name": "train.jpg", "caption": "Cool tran image"} {"file_name": "test.jpg", "caption": "Cool test image"} """ ) return str(data_dir) @pytest.fixture def data_dir_with_single_config_in_metadata(tmp_path): data_dir = tmp_path / "data_dir_with_one_default_config_in_metadata" cats_data_dir = data_dir / "cats" cats_data_dir.mkdir(parents=True) dogs_data_dir = data_dir / "dogs" dogs_data_dir.mkdir(parents=True) with open(cats_data_dir / "cat.jpg", "wb") as f: f.write(b"this_is_a_cat_image_bytes") with open(dogs_data_dir / "dog.jpg", "wb") as f: f.write(b"this_is_a_dog_image_bytes") with open(data_dir / "README.md", "w") as f: f.write( f"""\ --- {METADATA_CONFIGS_FIELD}: - config_name: custom drop_labels: true --- """ ) return str(data_dir) @pytest.fixture def data_dir_with_config_and_data_files(tmp_path): data_dir = tmp_path / "data_dir_with_config_and_data_files" cats_data_dir = data_dir / "data" / "cats" cats_data_dir.mkdir(parents=True) dogs_data_dir = data_dir / "data" / "dogs" dogs_data_dir.mkdir(parents=True) with open(cats_data_dir / "cat.jpg", "wb") as f: f.write(b"this_is_a_cat_image_bytes") with open(dogs_data_dir / "dog.jpg", "wb") as f: f.write(b"this_is_a_dog_image_bytes") with open(data_dir / "README.md", "w") as f: f.write( f"""\ --- {METADATA_CONFIGS_FIELD}: - config_name: custom data_files: "data/**/*.jpg" --- """ ) return str(data_dir) @pytest.fixture def data_dir_with_two_config_in_metadata(tmp_path): data_dir = tmp_path / "data_dir_with_two_configs_in_metadata" cats_data_dir = data_dir / "cats" cats_data_dir.mkdir(parents=True) dogs_data_dir = data_dir / "dogs" dogs_data_dir.mkdir(parents=True) with open(cats_data_dir / "cat.jpg", "wb") as f: f.write(b"this_is_a_cat_image_bytes") with open(dogs_data_dir / "dog.jpg", "wb") as f: f.write(b"this_is_a_dog_image_bytes") with open(data_dir / "README.md", "w") as f: f.write( f"""\ --- {METADATA_CONFIGS_FIELD}: - config_name: "v1" drop_labels: true default: true - config_name: "v2" drop_labels: false --- """ ) return str(data_dir) @pytest.fixture def data_dir_with_data_dir_configs_in_metadata(tmp_path): data_dir = tmp_path / "data_dir_with_two_configs_in_metadata" cats_data_dir = data_dir / "cats" cats_data_dir.mkdir(parents=True) dogs_data_dir = data_dir / "dogs" dogs_data_dir.mkdir(parents=True) with open(cats_data_dir / "cat.jpg", "wb") as f: f.write(b"this_is_a_cat_image_bytes") with open(dogs_data_dir / "dog.jpg", "wb") as f: f.write(b"this_is_a_dog_image_bytes") @pytest.fixture def sub_data_dirs(tmp_path): data_dir2 = tmp_path / "data_dir2" relative_subdir1 = "subdir1" sub_data_dir1 = data_dir2 / relative_subdir1 sub_data_dir1.mkdir(parents=True) with open(sub_data_dir1 / "train.txt", "w") as f: f.write("foo\n" * 10) with open(sub_data_dir1 / "test.txt", "w") as f: f.write("bar\n" * 10) relative_subdir2 = "subdir2" sub_data_dir2 = tmp_path / data_dir2 / relative_subdir2 sub_data_dir2.mkdir(parents=True) with open(sub_data_dir2 / "train.txt", "w") as f: f.write("foo\n" * 10) with open(sub_data_dir2 / "test.txt", "w") as f: f.write("bar\n" * 10) return str(data_dir2), relative_subdir1 @pytest.fixture def complex_data_dir(tmp_path): data_dir = tmp_path / "complex_data_dir" data_dir.mkdir() (data_dir / "data").mkdir() with open(data_dir / "data" / "train.txt", "w") as f: f.write("foo\n" * 10) with open(data_dir / "data" / "test.txt", "w") as f: f.write("bar\n" * 10) with open(data_dir / "README.md", "w") as f: f.write("This is a readme") with open(data_dir / ".dummy", "w") as f: f.write("this is a dummy file that is not a data file") return str(data_dir) @pytest.fixture def dataset_loading_script_dir(tmp_path): script_name = DATASET_LOADING_SCRIPT_NAME script_dir = tmp_path / script_name script_dir.mkdir() script_path = script_dir / f"{script_name}.py" with open(script_path, "w") as f: f.write(DATASET_LOADING_SCRIPT_CODE) return str(script_dir) @pytest.fixture def dataset_loading_script_dir_readonly(tmp_path): script_name = DATASET_LOADING_SCRIPT_NAME script_dir = tmp_path / "readonly" / script_name script_dir.mkdir(parents=True) script_path = script_dir / f"{script_name}.py" with open(script_path, "w") as f: f.write(DATASET_LOADING_SCRIPT_CODE) dataset_loading_script_dir = str(script_dir) # Make this directory readonly os.chmod(dataset_loading_script_dir, 0o555) os.chmod(os.path.join(dataset_loading_script_dir, f"{script_name}.py"), 0o555) return dataset_loading_script_dir @pytest.mark.parametrize( "data_files, expected_module, expected_builder_kwargs", [ (["train.csv"], "csv", {}), (["train.tsv"], "csv", {"sep": "\t"}), (["train.json"], "json", {}), (["train.jsonl"], "json", {}), (["train.parquet"], "parquet", {}), (["train.geoparquet"], "parquet", {}), (["train.gpq"], "parquet", {}), (["train.arrow"], "arrow", {}), (["train.txt"], "text", {}), (["uppercase.TXT"], "text", {}), (["unsupported.ext"], None, {}), ([""], None, {}), ], ) def test_infer_module_for_data_files(data_files, expected_module, expected_builder_kwargs): module, builder_kwargs = infer_module_for_data_files_list(data_files) assert module == expected_module assert builder_kwargs == expected_builder_kwargs @pytest.mark.parametrize( "data_file, expected_module", [ ("zip_csv_path", "csv"), ("zip_csv_with_dir_path", "csv"), ("zip_uppercase_csv_path", "csv"), ("zip_unsupported_ext_path", None), ], ) def test_infer_module_for_data_files_in_archives( data_file, expected_module, zip_csv_path, zip_csv_with_dir_path, zip_uppercase_csv_path, zip_unsupported_ext_path ): data_file_paths = { "zip_csv_path": zip_csv_path, "zip_csv_with_dir_path": zip_csv_with_dir_path, "zip_uppercase_csv_path": zip_uppercase_csv_path, "zip_unsupported_ext_path": zip_unsupported_ext_path, } data_files = [str(data_file_paths[data_file])] inferred_module, _ = infer_module_for_data_files_list_in_archives(data_files) assert inferred_module == expected_module class ModuleFactoryTest(TestCase): @pytest.fixture(autouse=True) def inject_fixtures( self, jsonl_path, data_dir, data_dir_with_metadata, data_dir_with_single_config_in_metadata, data_dir_with_config_and_data_files, data_dir_with_two_config_in_metadata, sub_data_dirs, dataset_loading_script_dir, ): self._jsonl_path = jsonl_path self._data_dir = data_dir self._data_dir_with_metadata = data_dir_with_metadata self._data_dir_with_single_config_in_metadata = data_dir_with_single_config_in_metadata self._data_dir_with_config_and_data_files = data_dir_with_config_and_data_files self._data_dir_with_two_config_in_metadata = data_dir_with_two_config_in_metadata self._data_dir2 = sub_data_dirs[0] self._sub_data_dir = sub_data_dirs[1] self._dataset_loading_script_dir = dataset_loading_script_dir def setUp(self): self.hf_modules_cache = tempfile.mkdtemp() self.cache_dir = tempfile.mkdtemp() self.download_config = DownloadConfig(cache_dir=self.cache_dir) self.dynamic_modules_path = datasets.load.init_dynamic_modules( name="test_datasets_modules_" + os.path.basename(self.hf_modules_cache), hf_modules_cache=self.hf_modules_cache, ) def test_HubDatasetModuleFactoryWithScript_dont_trust_remote_code(self): factory = HubDatasetModuleFactoryWithScript( "hf-internal-testing/dataset_with_script", download_config=self.download_config, dynamic_modules_path=self.dynamic_modules_path, ) with patch.object(config, "HF_DATASETS_TRUST_REMOTE_CODE", None): # this will be the default soon self.assertRaises(ValueError, factory.get_module) factory = HubDatasetModuleFactoryWithScript( "hf-internal-testing/dataset_with_script", download_config=self.download_config, dynamic_modules_path=self.dynamic_modules_path, trust_remote_code=False, ) self.assertRaises(ValueError, factory.get_module) def test_HubDatasetModuleFactoryWithScript_with_hub_dataset(self): # "wmt_t2t" has additional imports (internal) factory = HubDatasetModuleFactoryWithScript( "wmt_t2t", download_config=self.download_config, dynamic_modules_path=self.dynamic_modules_path, revision="861aac88b2c6247dd93ade8b1c189ce714627750", trust_remote_code=True, ) module_factory_result = factory.get_module() assert importlib.import_module(module_factory_result.module_path) is not None assert module_factory_result.builder_kwargs["base_path"].startswith(config.HF_ENDPOINT) def test_LocalDatasetModuleFactoryWithScript(self): path = os.path.join(self._dataset_loading_script_dir, f"{DATASET_LOADING_SCRIPT_NAME}.py") factory = LocalDatasetModuleFactoryWithScript( path, download_config=self.download_config, dynamic_modules_path=self.dynamic_modules_path, trust_remote_code=True, ) module_factory_result = factory.get_module() assert importlib.import_module(module_factory_result.module_path) is not None assert os.path.isdir(module_factory_result.builder_kwargs["base_path"]) def test_LocalDatasetModuleFactoryWithScript_dont_trust_remote_code(self): path = os.path.join(self._dataset_loading_script_dir, f"{DATASET_LOADING_SCRIPT_NAME}.py") factory = LocalDatasetModuleFactoryWithScript( path, download_config=self.download_config, dynamic_modules_path=self.dynamic_modules_path ) self.assertRaises(ValueError, factory.get_module) factory = LocalDatasetModuleFactoryWithScript( path, download_config=self.download_config, dynamic_modules_path=self.dynamic_modules_path, trust_remote_code=False, ) self.assertRaises(ValueError, factory.get_module) def test_LocalDatasetModuleFactoryWithoutScript(self): factory = LocalDatasetModuleFactoryWithoutScript(self._data_dir) module_factory_result = factory.get_module() assert importlib.import_module(module_factory_result.module_path) is not None assert os.path.isdir(module_factory_result.builder_kwargs["base_path"]) def test_LocalDatasetModuleFactoryWithoutScript_with_data_dir(self): factory = LocalDatasetModuleFactoryWithoutScript(self._data_dir2, data_dir=self._sub_data_dir) module_factory_result = factory.get_module() assert importlib.import_module(module_factory_result.module_path) is not None builder_config = module_factory_result.builder_configs_parameters.builder_configs[0] assert ( builder_config.data_files is not None and len(builder_config.data_files["train"]) == 1 and len(builder_config.data_files["test"]) == 1 ) assert all( self._sub_data_dir in Path(data_file).parts for data_file in builder_config.data_files["train"] + builder_config.data_files["test"] ) def test_LocalDatasetModuleFactoryWithoutScript_with_metadata(self): factory = LocalDatasetModuleFactoryWithoutScript(self._data_dir_with_metadata) module_factory_result = factory.get_module() assert importlib.import_module(module_factory_result.module_path) is not None builder_config = module_factory_result.builder_configs_parameters.builder_configs[0] assert ( builder_config.data_files is not None and len(builder_config.data_files["train"]) > 0 and len(builder_config.data_files["test"]) > 0 ) assert any(Path(data_file).name == "metadata.jsonl" for data_file in builder_config.data_files["train"]) assert any(Path(data_file).name == "metadata.jsonl" for data_file in builder_config.data_files["test"]) def test_LocalDatasetModuleFactoryWithoutScript_with_single_config_in_metadata(self): factory = LocalDatasetModuleFactoryWithoutScript( self._data_dir_with_single_config_in_metadata, ) module_factory_result = factory.get_module() assert importlib.import_module(module_factory_result.module_path) is not None module_metadata_configs = module_factory_result.builder_configs_parameters.metadata_configs assert module_metadata_configs is not None assert len(module_metadata_configs) == 1 assert next(iter(module_metadata_configs)) == "custom" assert "drop_labels" in next(iter(module_metadata_configs.values())) assert next(iter(module_metadata_configs.values()))["drop_labels"] is True module_builder_configs = module_factory_result.builder_configs_parameters.builder_configs assert module_builder_configs is not None assert len(module_builder_configs) == 1 assert isinstance(module_builder_configs[0], ImageFolderConfig) assert module_builder_configs[0].name == "custom" assert module_builder_configs[0].data_files is not None assert isinstance(module_builder_configs[0].data_files, DataFilesPatternsDict) module_builder_configs[0]._resolve_data_files(self._data_dir_with_single_config_in_metadata, DownloadConfig()) assert isinstance(module_builder_configs[0].data_files, DataFilesDict) assert len(module_builder_configs[0].data_files) == 1 # one train split assert len(module_builder_configs[0].data_files["train"]) == 2 # two files assert module_builder_configs[0].drop_labels is True # parameter is passed from metadata # config named "default" is automatically considered to be a default config assert module_factory_result.builder_configs_parameters.default_config_name == "custom" # we don't pass config params to builder in builder_kwargs, they are stored in builder_configs directly assert "drop_labels" not in module_factory_result.builder_kwargs def test_LocalDatasetModuleFactoryWithoutScript_with_config_and_data_files(self): factory = LocalDatasetModuleFactoryWithoutScript( self._data_dir_with_config_and_data_files, ) module_factory_result = factory.get_module() assert importlib.import_module(module_factory_result.module_path) is not None module_metadata_configs = module_factory_result.builder_configs_parameters.metadata_configs builder_kwargs = module_factory_result.builder_kwargs assert module_metadata_configs is not None assert len(module_metadata_configs) == 1 assert next(iter(module_metadata_configs)) == "custom" assert "data_files" in next(iter(module_metadata_configs.values())) assert next(iter(module_metadata_configs.values()))["data_files"] == "data/**/*.jpg" assert "data_files" not in builder_kwargs def test_LocalDatasetModuleFactoryWithoutScript_data_dir_with_config_and_data_files(self): factory = LocalDatasetModuleFactoryWithoutScript(self._data_dir_with_config_and_data_files, data_dir="data") module_factory_result = factory.get_module() assert importlib.import_module(module_factory_result.module_path) is not None module_metadata_configs = module_factory_result.builder_configs_parameters.metadata_configs builder_kwargs = module_factory_result.builder_kwargs assert module_metadata_configs is not None assert len(module_metadata_configs) == 1 assert next(iter(module_metadata_configs)) == "custom" assert "data_files" in next(iter(module_metadata_configs.values())) assert next(iter(module_metadata_configs.values()))["data_files"] == "data/**/*.jpg" assert "data_files" in builder_kwargs assert "train" in builder_kwargs["data_files"] assert len(builder_kwargs["data_files"]["train"]) == 2 def test_LocalDatasetModuleFactoryWithoutScript_with_two_configs_in_metadata(self): factory = LocalDatasetModuleFactoryWithoutScript( self._data_dir_with_two_config_in_metadata, ) module_factory_result = factory.get_module() assert importlib.import_module(module_factory_result.module_path) is not None module_metadata_configs = module_factory_result.builder_configs_parameters.metadata_configs assert module_metadata_configs is not None assert len(module_metadata_configs) == 2 assert list(module_metadata_configs) == ["v1", "v2"] assert "drop_labels" in module_metadata_configs["v1"] assert module_metadata_configs["v1"]["drop_labels"] is True assert "drop_labels" in module_metadata_configs["v2"] assert module_metadata_configs["v2"]["drop_labels"] is False module_builder_configs = module_factory_result.builder_configs_parameters.builder_configs assert module_builder_configs is not None assert len(module_builder_configs) == 2 module_builder_config_v1, module_builder_config_v2 = module_builder_configs assert module_builder_config_v1.name == "v1" assert module_builder_config_v2.name == "v2" assert isinstance(module_builder_config_v1, ImageFolderConfig) assert isinstance(module_builder_config_v2, ImageFolderConfig) assert isinstance(module_builder_config_v1.data_files, DataFilesPatternsDict) assert isinstance(module_builder_config_v2.data_files, DataFilesPatternsDict) module_builder_config_v1._resolve_data_files(self._data_dir_with_two_config_in_metadata, DownloadConfig()) module_builder_config_v2._resolve_data_files(self._data_dir_with_two_config_in_metadata, DownloadConfig()) assert isinstance(module_builder_config_v1.data_files, DataFilesDict) assert isinstance(module_builder_config_v2.data_files, DataFilesDict) assert sorted(module_builder_config_v1.data_files) == ["train"] assert len(module_builder_config_v1.data_files["train"]) == 2 assert sorted(module_builder_config_v2.data_files) == ["train"] assert len(module_builder_config_v2.data_files["train"]) == 2 assert module_builder_config_v1.drop_labels is True # parameter is passed from metadata assert module_builder_config_v2.drop_labels is False # parameter is passed from metadata assert ( module_factory_result.builder_configs_parameters.default_config_name == "v1" ) # it's marked as a default one in yaml # we don't pass config params to builder in builder_kwargs, they are stored in builder_configs directly assert "drop_labels" not in module_factory_result.builder_kwargs def test_PackagedDatasetModuleFactory(self): factory = PackagedDatasetModuleFactory( "json", data_files=self._jsonl_path, download_config=self.download_config ) module_factory_result = factory.get_module() assert importlib.import_module(module_factory_result.module_path) is not None def test_PackagedDatasetModuleFactory_with_data_dir(self): factory = PackagedDatasetModuleFactory("json", data_dir=self._data_dir, download_config=self.download_config) module_factory_result = factory.get_module() assert importlib.import_module(module_factory_result.module_path) is not None data_files = module_factory_result.builder_kwargs.get("data_files") assert data_files is not None and len(data_files["train"]) > 0 and len(data_files["test"]) > 0 assert Path(data_files["train"][0]).parent.samefile(self._data_dir) assert Path(data_files["test"][0]).parent.samefile(self._data_dir) def test_PackagedDatasetModuleFactory_with_data_dir_and_metadata(self): factory = PackagedDatasetModuleFactory( "imagefolder", data_dir=self._data_dir_with_metadata, download_config=self.download_config ) module_factory_result = factory.get_module() assert importlib.import_module(module_factory_result.module_path) is not None data_files = module_factory_result.builder_kwargs.get("data_files") assert data_files is not None and len(data_files["train"]) > 0 and len(data_files["test"]) > 0 assert Path(data_files["train"][0]).parent.samefile(self._data_dir_with_metadata) assert Path(data_files["test"][0]).parent.samefile(self._data_dir_with_metadata) assert any(Path(data_file).name == "metadata.jsonl" for data_file in data_files["train"]) assert any(Path(data_file).name == "metadata.jsonl" for data_file in data_files["test"]) @pytest.mark.integration def test_HubDatasetModuleFactoryWithoutScript(self): factory = HubDatasetModuleFactoryWithoutScript( SAMPLE_DATASET_IDENTIFIER2, download_config=self.download_config ) module_factory_result = factory.get_module() assert importlib.import_module(module_factory_result.module_path) is not None assert module_factory_result.builder_kwargs["base_path"].startswith(config.HF_ENDPOINT) @pytest.mark.integration def test_HubDatasetModuleFactoryWithoutScript_with_data_dir(self): data_dir = "data2" factory = HubDatasetModuleFactoryWithoutScript( SAMPLE_DATASET_IDENTIFIER3, data_dir=data_dir, download_config=self.download_config ) module_factory_result = factory.get_module() assert importlib.import_module(module_factory_result.module_path) is not None builder_config = module_factory_result.builder_configs_parameters.builder_configs[0] assert module_factory_result.builder_kwargs["base_path"].startswith(config.HF_ENDPOINT) assert ( builder_config.data_files is not None and len(builder_config.data_files["train"]) == 1 and len(builder_config.data_files["test"]) == 1 ) assert all( data_dir in Path(data_file).parts for data_file in builder_config.data_files["train"] + builder_config.data_files["test"] ) @pytest.mark.integration def test_HubDatasetModuleFactoryWithoutScript_with_metadata(self): factory = HubDatasetModuleFactoryWithoutScript( SAMPLE_DATASET_IDENTIFIER4, download_config=self.download_config ) module_factory_result = factory.get_module() assert importlib.import_module(module_factory_result.module_path) is not None builder_config = module_factory_result.builder_configs_parameters.builder_configs[0] assert module_factory_result.builder_kwargs["base_path"].startswith(config.HF_ENDPOINT) assert ( builder_config.data_files is not None and len(builder_config.data_files["train"]) > 0 and len(builder_config.data_files["test"]) > 0 ) assert any(Path(data_file).name == "metadata.jsonl" for data_file in builder_config.data_files["train"]) assert any(Path(data_file).name == "metadata.jsonl" for data_file in builder_config.data_files["test"]) factory = HubDatasetModuleFactoryWithoutScript( SAMPLE_DATASET_IDENTIFIER5, download_config=self.download_config ) module_factory_result = factory.get_module() assert importlib.import_module(module_factory_result.module_path) is not None builder_config = module_factory_result.builder_configs_parameters.builder_configs[0] assert module_factory_result.builder_kwargs["base_path"].startswith(config.HF_ENDPOINT) assert ( builder_config.data_files is not None and len(builder_config.data_files) == 1 and len(builder_config.data_files["train"]) > 0 ) assert any(Path(data_file).name == "metadata.jsonl" for data_file in builder_config.data_files["train"]) @pytest.mark.integration def test_HubDatasetModuleFactoryWithoutScript_with_one_default_config_in_metadata(self): factory = HubDatasetModuleFactoryWithoutScript( SAMPLE_DATASET_SINGLE_CONFIG_IN_METADATA, download_config=self.download_config, ) module_factory_result = factory.get_module() assert importlib.import_module(module_factory_result.module_path) is not None assert module_factory_result.builder_kwargs["base_path"].startswith(config.HF_ENDPOINT) module_metadata_configs = module_factory_result.builder_configs_parameters.metadata_configs assert module_metadata_configs is not None assert len(module_metadata_configs) == 1 assert next(iter(module_metadata_configs)) == "custom" assert "drop_labels" in next(iter(module_metadata_configs.values())) assert next(iter(module_metadata_configs.values()))["drop_labels"] is True module_builder_configs = module_factory_result.builder_configs_parameters.builder_configs assert module_builder_configs is not None assert len(module_builder_configs) == 1 assert isinstance(module_builder_configs[0], AudioFolderConfig) assert module_builder_configs[0].name == "custom" assert module_builder_configs[0].data_files is not None assert isinstance(module_builder_configs[0].data_files, DataFilesPatternsDict) module_builder_configs[0]._resolve_data_files( module_factory_result.builder_kwargs["base_path"], DownloadConfig() ) assert isinstance(module_builder_configs[0].data_files, DataFilesDict) assert sorted(module_builder_configs[0].data_files) == ["test", "train"] assert len(module_builder_configs[0].data_files["train"]) == 3 assert len(module_builder_configs[0].data_files["test"]) == 3 assert module_builder_configs[0].drop_labels is True # parameter is passed from metadata # config named "default" is automatically considered to be a default config assert module_factory_result.builder_configs_parameters.default_config_name == "custom" # we don't pass config params to builder in builder_kwargs, they are stored in builder_configs directly assert "drop_labels" not in module_factory_result.builder_kwargs @pytest.mark.integration def test_HubDatasetModuleFactoryWithoutScript_with_two_configs_in_metadata(self): datasets_names = [SAMPLE_DATASET_TWO_CONFIG_IN_METADATA, SAMPLE_DATASET_TWO_CONFIG_IN_METADATA_WITH_DEFAULT] for dataset_name in datasets_names: factory = HubDatasetModuleFactoryWithoutScript(dataset_name, download_config=self.download_config) module_factory_result = factory.get_module() assert importlib.import_module(module_factory_result.module_path) is not None module_metadata_configs = module_factory_result.builder_configs_parameters.metadata_configs assert module_metadata_configs is not None assert len(module_metadata_configs) == 2 assert list(module_metadata_configs) == ["v1", "v2"] assert "drop_labels" in module_metadata_configs["v1"] assert module_metadata_configs["v1"]["drop_labels"] is True assert "drop_labels" in module_metadata_configs["v2"] assert module_metadata_configs["v2"]["drop_labels"] is False module_builder_configs = module_factory_result.builder_configs_parameters.builder_configs assert module_builder_configs is not None assert len(module_builder_configs) == 2 module_builder_config_v1, module_builder_config_v2 = module_builder_configs assert module_builder_config_v1.name == "v1" assert module_builder_config_v2.name == "v2" assert isinstance(module_builder_config_v1, AudioFolderConfig) assert isinstance(module_builder_config_v2, AudioFolderConfig) assert isinstance(module_builder_config_v1.data_files, DataFilesPatternsDict) assert isinstance(module_builder_config_v2.data_files, DataFilesPatternsDict) module_builder_config_v1._resolve_data_files( module_factory_result.builder_kwargs["base_path"], DownloadConfig() ) module_builder_config_v2._resolve_data_files( module_factory_result.builder_kwargs["base_path"], DownloadConfig() ) assert isinstance(module_builder_config_v1.data_files, DataFilesDict) assert isinstance(module_builder_config_v2.data_files, DataFilesDict) assert sorted(module_builder_config_v1.data_files) == ["test", "train"] assert len(module_builder_config_v1.data_files["train"]) == 3 assert len(module_builder_config_v1.data_files["test"]) == 3 assert sorted(module_builder_config_v2.data_files) == ["test", "train"] assert len(module_builder_config_v2.data_files["train"]) == 2 assert len(module_builder_config_v2.data_files["test"]) == 1 assert module_builder_config_v1.drop_labels is True # parameter is passed from metadata assert module_builder_config_v2.drop_labels is False # parameter is passed from metadata # we don't pass config params to builder in builder_kwargs, they are stored in builder_configs directly assert "drop_labels" not in module_factory_result.builder_kwargs if dataset_name == SAMPLE_DATASET_TWO_CONFIG_IN_METADATA_WITH_DEFAULT: assert module_factory_result.builder_configs_parameters.default_config_name == "v1" else: assert module_factory_result.builder_configs_parameters.default_config_name is None @pytest.mark.integration def test_HubDatasetModuleFactoryWithScript(self): factory = HubDatasetModuleFactoryWithScript( SAMPLE_DATASET_IDENTIFIER, download_config=self.download_config, dynamic_modules_path=self.dynamic_modules_path, trust_remote_code=True, ) module_factory_result = factory.get_module() assert importlib.import_module(module_factory_result.module_path) is not None assert module_factory_result.builder_kwargs["base_path"].startswith(config.HF_ENDPOINT) @pytest.mark.integration def test_HubDatasetModuleFactoryWithParquetExport(self): factory = HubDatasetModuleFactoryWithParquetExport( SAMPLE_DATASET_IDENTIFIER, download_config=self.download_config, ) module_factory_result = factory.get_module() assert module_factory_result.module_path == "datasets.packaged_modules.parquet.parquet" assert module_factory_result.builder_configs_parameters.builder_configs assert isinstance(module_factory_result.builder_configs_parameters.builder_configs[0], ParquetConfig) module_factory_result.builder_configs_parameters.builder_configs[0]._resolve_data_files( base_path="", download_config=self.download_config ) assert module_factory_result.builder_configs_parameters.builder_configs[0].data_files == { "train": [ "hf://datasets/hf-internal-testing/dataset_with_script@3d8a0e457ddb581301fb13f60d38906c9cdcaf1c/default/train/0000.parquet" ], "validation": [ "hf://datasets/hf-internal-testing/dataset_with_script@3d8a0e457ddb581301fb13f60d38906c9cdcaf1c/default/validation/0000.parquet" ], } @pytest.mark.integration def test_HubDatasetModuleFactoryWithParquetExport_errors_on_wrong_sha(self): factory = HubDatasetModuleFactoryWithParquetExport( SAMPLE_DATASET_IDENTIFIER, download_config=self.download_config, revision="0e1cee81e718feadf49560b287c4eb669c2efb1a", ) factory.get_module() factory = HubDatasetModuleFactoryWithParquetExport( SAMPLE_DATASET_IDENTIFIER, download_config=self.download_config, revision="wrong_sha", ) with self.assertRaises(_dataset_viewer.DatasetViewerError): factory.get_module() @pytest.mark.integration def test_CachedDatasetModuleFactory(self): name = SAMPLE_DATASET_IDENTIFIER2 load_dataset_builder(name, cache_dir=self.cache_dir).download_and_prepare() for offline_mode in OfflineSimulationMode: with offline(offline_mode): factory = CachedDatasetModuleFactory( name, cache_dir=self.cache_dir, ) module_factory_result = factory.get_module() assert importlib.import_module(module_factory_result.module_path) is not None def test_CachedDatasetModuleFactory_with_script(self): path = os.path.join(self._dataset_loading_script_dir, f"{DATASET_LOADING_SCRIPT_NAME}.py") factory = LocalDatasetModuleFactoryWithScript( path, download_config=self.download_config, dynamic_modules_path=self.dynamic_modules_path, trust_remote_code=True, ) module_factory_result = factory.get_module() for offline_mode in OfflineSimulationMode: with offline(offline_mode): factory = CachedDatasetModuleFactory( DATASET_LOADING_SCRIPT_NAME, dynamic_modules_path=self.dynamic_modules_path, ) module_factory_result = factory.get_module() assert importlib.import_module(module_factory_result.module_path) is not None @pytest.mark.parametrize( "factory_class", [ CachedDatasetModuleFactory, HubDatasetModuleFactoryWithoutScript, HubDatasetModuleFactoryWithScript, LocalDatasetModuleFactoryWithoutScript, LocalDatasetModuleFactoryWithScript, PackagedDatasetModuleFactory, ], ) def test_module_factories(factory_class): name = "dummy_name" factory = factory_class(name) assert factory.name == name @pytest.mark.integration class LoadTest(TestCase): @pytest.fixture(autouse=True) def inject_fixtures(self, caplog): self._caplog = caplog def setUp(self): self.hf_modules_cache = tempfile.mkdtemp() self.cache_dir = tempfile.mkdtemp() self.dynamic_modules_path = datasets.load.init_dynamic_modules( name="test_datasets_modules2", hf_modules_cache=self.hf_modules_cache ) def tearDown(self): shutil.rmtree(self.hf_modules_cache) shutil.rmtree(self.cache_dir) def _dummy_module_dir(self, modules_dir, dummy_module_name, dummy_code): assert dummy_module_name.startswith("__") module_dir = os.path.join(modules_dir, dummy_module_name) os.makedirs(module_dir, exist_ok=True) module_path = os.path.join(module_dir, dummy_module_name + ".py") with open(module_path, "w") as f: f.write(dummy_code) return module_dir def test_dataset_module_factory(self): with tempfile.TemporaryDirectory() as tmp_dir: # prepare module from directory path dummy_code = "MY_DUMMY_VARIABLE = 'hello there'" module_dir = self._dummy_module_dir(tmp_dir, "__dummy_module_name1__", dummy_code) dataset_module = datasets.load.dataset_module_factory( module_dir, dynamic_modules_path=self.dynamic_modules_path, trust_remote_code=True ) dummy_module = importlib.import_module(dataset_module.module_path) self.assertEqual(dummy_module.MY_DUMMY_VARIABLE, "hello there") self.assertEqual(dataset_module.hash, sha256(dummy_code.encode("utf-8")).hexdigest()) # prepare module from file path + check resolved_file_path dummy_code = "MY_DUMMY_VARIABLE = 'general kenobi'" module_dir = self._dummy_module_dir(tmp_dir, "__dummy_module_name1__", dummy_code) module_path = os.path.join(module_dir, "__dummy_module_name1__.py") dataset_module = datasets.load.dataset_module_factory( module_path, dynamic_modules_path=self.dynamic_modules_path, trust_remote_code=True ) dummy_module = importlib.import_module(dataset_module.module_path) self.assertEqual(dummy_module.MY_DUMMY_VARIABLE, "general kenobi") self.assertEqual(dataset_module.hash, sha256(dummy_code.encode("utf-8")).hexdigest()) # missing module for offline_simulation_mode in list(OfflineSimulationMode): with offline(offline_simulation_mode): with self.assertRaises( (DatasetNotFoundError, ConnectionError, requests.exceptions.ConnectionError) ): datasets.load.dataset_module_factory( "__missing_dummy_module_name__", dynamic_modules_path=self.dynamic_modules_path ) @pytest.mark.integration def test_offline_dataset_module_factory(self): repo_id = SAMPLE_DATASET_IDENTIFIER2 builder = load_dataset_builder(repo_id, cache_dir=self.cache_dir) builder.download_and_prepare() for offline_simulation_mode in list(OfflineSimulationMode): with offline(offline_simulation_mode): self._caplog.clear() # allow provide the repo id without an explicit path to remote or local actual file dataset_module = datasets.load.dataset_module_factory(repo_id, cache_dir=self.cache_dir) self.assertEqual(dataset_module.module_path, "datasets.packaged_modules.cache.cache") self.assertIn("Using the latest cached version of the dataset", self._caplog.text) def test_offline_dataset_module_factory_with_script(self): with tempfile.TemporaryDirectory() as tmp_dir: dummy_code = "MY_DUMMY_VARIABLE = 'hello there'" module_dir = self._dummy_module_dir(tmp_dir, "__dummy_module_name2__", dummy_code) dataset_module_1 = datasets.load.dataset_module_factory( module_dir, dynamic_modules_path=self.dynamic_modules_path, trust_remote_code=True ) time.sleep(0.1) # make sure there's a difference in the OS update time of the python file dummy_code = "MY_DUMMY_VARIABLE = 'general kenobi'" module_dir = self._dummy_module_dir(tmp_dir, "__dummy_module_name2__", dummy_code) dataset_module_2 = datasets.load.dataset_module_factory( module_dir, dynamic_modules_path=self.dynamic_modules_path, trust_remote_code=True ) for offline_simulation_mode in list(OfflineSimulationMode): with offline(offline_simulation_mode): self._caplog.clear() # allow provide the module name without an explicit path to remote or local actual file dataset_module_3 = datasets.load.dataset_module_factory( "__dummy_module_name2__", dynamic_modules_path=self.dynamic_modules_path ) # it loads the most recent version of the module self.assertEqual(dataset_module_2.module_path, dataset_module_3.module_path) self.assertNotEqual(dataset_module_1.module_path, dataset_module_3.module_path) self.assertIn("Using the latest cached version of the module", self._caplog.text) @pytest.mark.integration def test_offline_dataset_module_factory_with_capital_letters_in_name(self): repo_id = SAMPLE_DATASET_CAPITAL_LETTERS_IN_NAME builder = load_dataset_builder(repo_id, cache_dir=self.cache_dir) builder.download_and_prepare() for offline_simulation_mode in list(OfflineSimulationMode): with offline(offline_simulation_mode): self._caplog.clear() # allow provide the repo id without an explicit path to remote or local actual file dataset_module = datasets.load.dataset_module_factory(repo_id, cache_dir=self.cache_dir) self.assertEqual(dataset_module.module_path, "datasets.packaged_modules.cache.cache") self.assertIn("Using the latest cached version of the dataset", self._caplog.text) def test_load_dataset_from_hub(self): with self.assertRaises(DatasetNotFoundError) as context: datasets.load_dataset("_dummy") self.assertIn( "Dataset '_dummy' doesn't exist on the Hub", str(context.exception), ) with self.assertRaises(DatasetNotFoundError) as context: datasets.load_dataset("HuggingFaceFW/fineweb-edu", revision="0.0.0") self.assertIn( "Revision '0.0.0' doesn't exist for dataset 'HuggingFaceFW/fineweb-edu' on the Hub.", str(context.exception), ) for offline_simulation_mode in list(OfflineSimulationMode): with offline(offline_simulation_mode): with self.assertRaises(ConnectionError) as context: datasets.load_dataset("_dummy") if offline_simulation_mode != OfflineSimulationMode.HF_HUB_OFFLINE_SET_TO_1: self.assertIn( "Couldn't reach '_dummy' on the Hub", str(context.exception), ) def test_load_dataset_namespace(self): with self.assertRaises(DatasetNotFoundError) as context: datasets.load_dataset("hf-internal-testing/_dummy") self.assertIn( "hf-internal-testing/_dummy", str(context.exception), ) for offline_simulation_mode in list(OfflineSimulationMode): with offline(offline_simulation_mode): with self.assertRaises(ConnectionError) as context: datasets.load_dataset("hf-internal-testing/_dummy") self.assertIn("hf-internal-testing/_dummy", str(context.exception), msg=offline_simulation_mode) @pytest.mark.integration def test_load_dataset_builder_with_metadata(): builder = datasets.load_dataset_builder(SAMPLE_DATASET_IDENTIFIER4) assert isinstance(builder, ImageFolder) assert builder.config.name == "default" assert builder.config.data_files is not None assert builder.config.drop_metadata is None with pytest.raises(ValueError): builder = datasets.load_dataset_builder(SAMPLE_DATASET_IDENTIFIER4, "non-existing-config") @pytest.mark.integration def test_load_dataset_builder_config_kwargs_passed_as_arguments(): builder_default = datasets.load_dataset_builder(SAMPLE_DATASET_IDENTIFIER4) builder_custom = datasets.load_dataset_builder(SAMPLE_DATASET_IDENTIFIER4, drop_metadata=True) assert builder_custom.config.drop_metadata != builder_default.config.drop_metadata assert builder_custom.config.drop_metadata is True @pytest.mark.integration def test_load_dataset_builder_with_two_configs_in_metadata(): builder = datasets.load_dataset_builder(SAMPLE_DATASET_TWO_CONFIG_IN_METADATA, "v1") assert isinstance(builder, AudioFolder) assert builder.config.name == "v1" assert builder.config.data_files is not None with pytest.raises(ValueError): datasets.load_dataset_builder(SAMPLE_DATASET_TWO_CONFIG_IN_METADATA) with pytest.raises(ValueError): datasets.load_dataset_builder(SAMPLE_DATASET_TWO_CONFIG_IN_METADATA, "non-existing-config") @pytest.mark.parametrize("serializer", [pickle, dill]) def test_load_dataset_builder_with_metadata_configs_pickable(serializer): builder = datasets.load_dataset_builder(SAMPLE_DATASET_SINGLE_CONFIG_IN_METADATA) builder_unpickled = serializer.loads(serializer.dumps(builder)) assert builder.BUILDER_CONFIGS == builder_unpickled.BUILDER_CONFIGS assert list(builder_unpickled.builder_configs) == ["custom"] assert isinstance(builder_unpickled.builder_configs["custom"], AudioFolderConfig) builder2 = datasets.load_dataset_builder(SAMPLE_DATASET_TWO_CONFIG_IN_METADATA, "v1") builder2_unpickled = serializer.loads(serializer.dumps(builder2)) assert builder2.BUILDER_CONFIGS == builder2_unpickled.BUILDER_CONFIGS != builder_unpickled.BUILDER_CONFIGS assert list(builder2_unpickled.builder_configs) == ["v1", "v2"] assert isinstance(builder2_unpickled.builder_configs["v1"], AudioFolderConfig) assert isinstance(builder2_unpickled.builder_configs["v2"], AudioFolderConfig) def test_load_dataset_builder_for_absolute_script_dir(dataset_loading_script_dir, data_dir): builder = datasets.load_dataset_builder(dataset_loading_script_dir, data_dir=data_dir, trust_remote_code=True) assert isinstance(builder, DatasetBuilder) assert builder.name == DATASET_LOADING_SCRIPT_NAME assert builder.dataset_name == DATASET_LOADING_SCRIPT_NAME assert builder.info.features == Features({"text": Value("string")}) def test_load_dataset_builder_for_relative_script_dir(dataset_loading_script_dir, data_dir): with set_current_working_directory_to_temp_dir(): relative_script_dir = DATASET_LOADING_SCRIPT_NAME shutil.copytree(dataset_loading_script_dir, relative_script_dir) builder = datasets.load_dataset_builder(relative_script_dir, data_dir=data_dir, trust_remote_code=True) assert isinstance(builder, DatasetBuilder) assert builder.name == DATASET_LOADING_SCRIPT_NAME assert builder.dataset_name == DATASET_LOADING_SCRIPT_NAME assert builder.info.features == Features({"text": Value("string")}) def test_load_dataset_builder_for_script_path(dataset_loading_script_dir, data_dir): builder = datasets.load_dataset_builder( os.path.join(dataset_loading_script_dir, DATASET_LOADING_SCRIPT_NAME + ".py"), data_dir=data_dir, trust_remote_code=True, ) assert isinstance(builder, DatasetBuilder) assert builder.name == DATASET_LOADING_SCRIPT_NAME assert builder.dataset_name == DATASET_LOADING_SCRIPT_NAME assert builder.info.features == Features({"text": Value("string")}) def test_load_dataset_builder_for_absolute_data_dir(complex_data_dir): builder = datasets.load_dataset_builder(complex_data_dir) assert isinstance(builder, DatasetBuilder) assert builder.name == "text" assert builder.dataset_name == Path(complex_data_dir).name assert builder.config.name == "default" assert isinstance(builder.config.data_files, DataFilesDict) assert len(builder.config.data_files["train"]) > 0 assert len(builder.config.data_files["test"]) > 0 def test_load_dataset_builder_for_relative_data_dir(complex_data_dir): with set_current_working_directory_to_temp_dir(): relative_data_dir = "relative_data_dir" shutil.copytree(complex_data_dir, relative_data_dir) builder = datasets.load_dataset_builder(relative_data_dir) assert isinstance(builder, DatasetBuilder) assert builder.name == "text" assert builder.dataset_name == relative_data_dir assert builder.config.name == "default" assert isinstance(builder.config.data_files, DataFilesDict) assert len(builder.config.data_files["train"]) > 0 assert len(builder.config.data_files["test"]) > 0 @pytest.mark.integration def test_load_dataset_builder_for_community_dataset_with_script(): builder = datasets.load_dataset_builder(SAMPLE_DATASET_IDENTIFIER) assert isinstance(builder, DatasetBuilder) assert builder.name == "parquet" assert builder.dataset_name == SAMPLE_DATASET_IDENTIFIER.split("/")[-1] assert builder.config.name == "default" assert builder.info.features == Features({"text": Value("string")}) namespace = SAMPLE_DATASET_IDENTIFIER[: SAMPLE_DATASET_IDENTIFIER.index("/")] assert builder._relative_data_dir().startswith(namespace) assert builder.__module__.startswith("datasets.") @pytest.mark.integration def test_load_dataset_builder_for_community_dataset_with_script_no_parquet_export(): with patch.object(config, "USE_PARQUET_EXPORT", False): builder = datasets.load_dataset_builder(SAMPLE_DATASET_IDENTIFIER, trust_remote_code=True) assert isinstance(builder, DatasetBuilder) assert builder.name == SAMPLE_DATASET_IDENTIFIER.split("/")[-1] assert builder.dataset_name == SAMPLE_DATASET_IDENTIFIER.split("/")[-1] assert builder.config.name == "default" assert builder.info.features == Features({"text": Value("string")}) namespace = SAMPLE_DATASET_IDENTIFIER[: SAMPLE_DATASET_IDENTIFIER.index("/")] assert builder._relative_data_dir().startswith(namespace) assert SAMPLE_DATASET_IDENTIFIER.replace("/", "--") in builder.__module__ @pytest.mark.integration def test_load_dataset_builder_use_parquet_export_if_dont_trust_remote_code_keeps_features(): dataset_name = "food101" builder = datasets.load_dataset_builder(dataset_name, trust_remote_code=False) assert isinstance(builder, DatasetBuilder) assert builder.name == "parquet" assert builder.dataset_name == dataset_name assert builder.config.name == "default" assert list(builder.info.features) == ["image", "label"] assert builder.info.features["image"] == Image() @pytest.mark.integration def test_load_dataset_builder_for_community_dataset_without_script(): builder = datasets.load_dataset_builder(SAMPLE_DATASET_IDENTIFIER2) assert isinstance(builder, DatasetBuilder) assert builder.name == "text" assert builder.dataset_name == SAMPLE_DATASET_IDENTIFIER2.split("/")[-1] assert builder.config.name == "default" assert isinstance(builder.config.data_files, DataFilesDict) assert len(builder.config.data_files["train"]) > 0 assert len(builder.config.data_files["test"]) > 0 def test_load_dataset_builder_fail(): with pytest.raises(DatasetNotFoundError): datasets.load_dataset_builder("blabla") @pytest.mark.parametrize("keep_in_memory", [False, True]) def test_load_dataset_local_script(dataset_loading_script_dir, data_dir, keep_in_memory, caplog): with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): dataset = load_dataset( dataset_loading_script_dir, data_dir=data_dir, keep_in_memory=keep_in_memory, trust_remote_code=True ) assert isinstance(dataset, DatasetDict) assert all(isinstance(d, Dataset) for d in dataset.values()) assert len(dataset) == 2 assert isinstance(next(iter(dataset["train"])), dict) def test_load_dataset_cached_local_script(dataset_loading_script_dir, data_dir, caplog): dataset = load_dataset(dataset_loading_script_dir, data_dir=data_dir, trust_remote_code=True) assert isinstance(dataset, DatasetDict) assert all(isinstance(d, Dataset) for d in dataset.values()) assert len(dataset) == 2 assert isinstance(next(iter(dataset["train"])), dict) for offline_simulation_mode in list(OfflineSimulationMode): with offline(offline_simulation_mode): caplog.clear() # Load dataset from cache dataset = datasets.load_dataset(DATASET_LOADING_SCRIPT_NAME, data_dir=data_dir) assert len(dataset) == 2 assert "Using the latest cached version of the module" in caplog.text assert isinstance(next(iter(dataset["train"])), dict) with pytest.raises(DatasetNotFoundError) as exc_info: datasets.load_dataset(SAMPLE_DATASET_NAME_THAT_DOESNT_EXIST) assert f"Dataset '{SAMPLE_DATASET_NAME_THAT_DOESNT_EXIST}' doesn't exist on the Hub" in str(exc_info.value) @pytest.mark.integration @pytest.mark.parametrize( "kwargs, expected_train_num_rows, expected_test_num_rows", [ ({}, 2, 2), ({"data_dir": "data1"}, 1, 1), # GH-6918: NonMatchingSplitsSizesError ({"data_files": "data1/train.txt"}, 1, None), # GH-6939: ExpectedMoreSplits ], ) def test_load_dataset_without_script_from_hub(kwargs, expected_train_num_rows, expected_test_num_rows): dataset = load_dataset(SAMPLE_DATASET_IDENTIFIER3, **kwargs) assert dataset["train"].num_rows == expected_train_num_rows assert (dataset["test"].num_rows == expected_test_num_rows) if expected_test_num_rows else ("test" not in dataset) @pytest.mark.integration @pytest.mark.parametrize("stream_from_cache, ", [False, True]) def test_load_dataset_cached_from_hub(stream_from_cache, caplog): dataset = load_dataset(SAMPLE_DATASET_IDENTIFIER3) assert isinstance(dataset, DatasetDict) assert all(isinstance(d, Dataset) for d in dataset.values()) assert len(dataset) == 2 assert isinstance(next(iter(dataset["train"])), dict) for offline_simulation_mode in list(OfflineSimulationMode): with offline(offline_simulation_mode): caplog.clear() # Load dataset from cache dataset = datasets.load_dataset(SAMPLE_DATASET_IDENTIFIER3, streaming=stream_from_cache) assert len(dataset) == 2 assert "Using the latest cached version of the dataset" in caplog.text assert isinstance(next(iter(dataset["train"])), dict) with pytest.raises(DatasetNotFoundError) as exc_info: datasets.load_dataset(SAMPLE_DATASET_NAME_THAT_DOESNT_EXIST) assert f"Dataset '{SAMPLE_DATASET_NAME_THAT_DOESNT_EXIST}' doesn't exist on the Hub" in str(exc_info.value) def test_load_dataset_streaming(dataset_loading_script_dir, data_dir): dataset = load_dataset(dataset_loading_script_dir, streaming=True, data_dir=data_dir, trust_remote_code=True) assert isinstance(dataset, IterableDatasetDict) assert all(isinstance(d, IterableDataset) for d in dataset.values()) assert len(dataset) == 2 assert isinstance(next(iter(dataset["train"])), dict) def test_load_dataset_streaming_gz_json(jsonl_gz_path): data_files = jsonl_gz_path ds = load_dataset("json", split="train", data_files=data_files, streaming=True) assert isinstance(ds, IterableDataset) ds_item = next(iter(ds)) assert ds_item == {"col_1": "0", "col_2": 0, "col_3": 0.0} @pytest.mark.integration @pytest.mark.parametrize( "path", ["sample.jsonl", "sample.jsonl.gz", "sample.tar", "sample.jsonl.xz", "sample.zip", "sample.jsonl.zst"] ) def test_load_dataset_streaming_compressed_files(path): repo_id = "hf-internal-testing/compressed_files" data_files = f"https://huggingface.co/datasets/{repo_id}/resolve/main/{path}" if data_files[-3:] in ("zip", "tar"): # we need to glob "*" inside archives data_files = data_files[-3:] + "://*::" + data_files return # TODO(QL, albert): support re-add support for ZIP and TAR archives streaming ds = load_dataset("json", split="train", data_files=data_files, streaming=True) assert isinstance(ds, IterableDataset) ds_item = next(iter(ds)) assert ds_item == { "tokens": ["Ministeri", "de", "Justรญcia", "d'Espanya"], "ner_tags": [1, 2, 2, 2], "langs": ["ca", "ca", "ca", "ca"], "spans": ["PER: Ministeri de Justรญcia d'Espanya"], } @pytest.mark.parametrize("path_extension", ["csv", "csv.bz2"]) @pytest.mark.parametrize("streaming", [False, True]) def test_load_dataset_streaming_csv(path_extension, streaming, csv_path, bz2_csv_path): paths = {"csv": csv_path, "csv.bz2": bz2_csv_path} data_files = str(paths[path_extension]) features = Features({"col_1": Value("string"), "col_2": Value("int32"), "col_3": Value("float32")}) ds = load_dataset("csv", split="train", data_files=data_files, features=features, streaming=streaming) assert isinstance(ds, IterableDataset if streaming else Dataset) ds_item = next(iter(ds)) assert ds_item == {"col_1": "0", "col_2": 0, "col_3": 0.0} @pytest.mark.parametrize("streaming", [False, True]) @pytest.mark.parametrize("data_file", ["zip_csv_path", "zip_csv_with_dir_path", "csv_path"]) def test_load_dataset_zip_csv(data_file, streaming, zip_csv_path, zip_csv_with_dir_path, csv_path): data_file_paths = { "zip_csv_path": zip_csv_path, "zip_csv_with_dir_path": zip_csv_with_dir_path, "csv_path": csv_path, } data_files = str(data_file_paths[data_file]) expected_size = 8 if data_file.startswith("zip") else 4 features = Features({"col_1": Value("string"), "col_2": Value("int32"), "col_3": Value("float32")}) ds = load_dataset("csv", split="train", data_files=data_files, features=features, streaming=streaming) if streaming: ds_item_counter = 0 for ds_item in ds: if ds_item_counter == 0: assert ds_item == {"col_1": "0", "col_2": 0, "col_3": 0.0} ds_item_counter += 1 assert ds_item_counter == expected_size else: assert ds.shape[0] == expected_size ds_item = next(iter(ds)) assert ds_item == {"col_1": "0", "col_2": 0, "col_3": 0.0} @pytest.mark.parametrize("streaming", [False, True]) @pytest.mark.parametrize("data_file", ["zip_jsonl_path", "zip_jsonl_with_dir_path", "jsonl_path"]) def test_load_dataset_zip_jsonl(data_file, streaming, zip_jsonl_path, zip_jsonl_with_dir_path, jsonl_path): data_file_paths = { "zip_jsonl_path": zip_jsonl_path, "zip_jsonl_with_dir_path": zip_jsonl_with_dir_path, "jsonl_path": jsonl_path, } data_files = str(data_file_paths[data_file]) expected_size = 8 if data_file.startswith("zip") else 4 features = Features({"col_1": Value("string"), "col_2": Value("int32"), "col_3": Value("float32")}) ds = load_dataset("json", split="train", data_files=data_files, features=features, streaming=streaming) if streaming: ds_item_counter = 0 for ds_item in ds: if ds_item_counter == 0: assert ds_item == {"col_1": "0", "col_2": 0, "col_3": 0.0} ds_item_counter += 1 assert ds_item_counter == expected_size else: assert ds.shape[0] == expected_size ds_item = next(iter(ds)) assert ds_item == {"col_1": "0", "col_2": 0, "col_3": 0.0} @pytest.mark.parametrize("streaming", [False, True]) @pytest.mark.parametrize("data_file", ["zip_text_path", "zip_text_with_dir_path", "text_path"]) def test_load_dataset_zip_text(data_file, streaming, zip_text_path, zip_text_with_dir_path, text_path): data_file_paths = { "zip_text_path": zip_text_path, "zip_text_with_dir_path": zip_text_with_dir_path, "text_path": text_path, } data_files = str(data_file_paths[data_file]) expected_size = 8 if data_file.startswith("zip") else 4 ds = load_dataset("text", split="train", data_files=data_files, streaming=streaming) if streaming: ds_item_counter = 0 for ds_item in ds: if ds_item_counter == 0: assert ds_item == {"text": "0"} ds_item_counter += 1 assert ds_item_counter == expected_size else: assert ds.shape[0] == expected_size ds_item = next(iter(ds)) assert ds_item == {"text": "0"} @pytest.mark.parametrize("streaming", [False, True]) def test_load_dataset_arrow(streaming, data_dir_with_arrow): ds = load_dataset("arrow", split="train", data_dir=data_dir_with_arrow, streaming=streaming) expected_size = 10 if streaming: ds_item_counter = 0 for ds_item in ds: if ds_item_counter == 0: assert ds_item == {"col_1": "foo"} ds_item_counter += 1 assert ds_item_counter == 10 else: assert ds.num_rows == 10 assert ds.shape[0] == expected_size ds_item = next(iter(ds)) assert ds_item == {"col_1": "foo"} def test_load_dataset_text_with_unicode_new_lines(text_path_with_unicode_new_lines): data_files = str(text_path_with_unicode_new_lines) ds = load_dataset("text", split="train", data_files=data_files) assert ds.num_rows == 3 def test_load_dataset_with_unsupported_extensions(text_dir_with_unsupported_extension): data_files = str(text_dir_with_unsupported_extension) ds = load_dataset("text", split="train", data_files=data_files) assert ds.num_rows == 4 @pytest.mark.integration def test_loading_from_the_datasets_hub(): with tempfile.TemporaryDirectory() as tmp_dir: with load_dataset(SAMPLE_DATASET_IDENTIFIER, cache_dir=tmp_dir) as dataset: assert len(dataset["train"]) == 2 assert len(dataset["validation"]) == 3 @pytest.mark.integration def test_loading_from_the_datasets_hub_with_token(): true_request = requests.Session().request def assert_auth(method, url, *args, headers, **kwargs): assert headers["authorization"] == "Bearer foo" return true_request(method, url, *args, headers=headers, **kwargs) with patch("requests.Session.request") as mock_request: mock_request.side_effect = assert_auth with tempfile.TemporaryDirectory() as tmp_dir: with offline(): with pytest.raises((ConnectionError, requests.exceptions.ConnectionError)): load_dataset(SAMPLE_NOT_EXISTING_DATASET_IDENTIFIER, cache_dir=tmp_dir, token="foo") mock_request.assert_called() @pytest.mark.integration def test_load_streaming_private_dataset(hf_token, hf_private_dataset_repo_txt_data): ds = load_dataset(hf_private_dataset_repo_txt_data, streaming=True, token=hf_token) assert next(iter(ds)) is not None @pytest.mark.integration def test_load_dataset_builder_private_dataset(hf_token, hf_private_dataset_repo_txt_data): builder = load_dataset_builder(hf_private_dataset_repo_txt_data, token=hf_token) assert isinstance(builder, DatasetBuilder) @pytest.mark.integration def test_load_streaming_private_dataset_with_zipped_data(hf_token, hf_private_dataset_repo_zipped_txt_data): ds = load_dataset(hf_private_dataset_repo_zipped_txt_data, streaming=True, token=hf_token) assert next(iter(ds)) is not None @pytest.mark.integration def test_load_dataset_config_kwargs_passed_as_arguments(): ds_default = load_dataset(SAMPLE_DATASET_IDENTIFIER4) ds_custom = load_dataset(SAMPLE_DATASET_IDENTIFIER4, drop_metadata=True) assert list(ds_default["train"].features) == ["image", "caption"] assert list(ds_custom["train"].features) == ["image"] @require_sndfile @pytest.mark.integration def test_load_hub_dataset_without_script_with_single_config_in_metadata(): # load the same dataset but with no configurations (=with default parameters) ds = load_dataset(SAMPLE_DATASET_NO_CONFIGS_IN_METADATA) assert list(ds["train"].features) == ["audio", "label"] # assert label feature is here as expected by default assert len(ds["train"]) == 5 and len(ds["test"]) == 4 ds2 = load_dataset(SAMPLE_DATASET_SINGLE_CONFIG_IN_METADATA) # single config -> no need to specify it assert list(ds2["train"].features) == ["audio"] # assert param `drop_labels=True` from metadata is passed assert len(ds2["train"]) == 3 and len(ds2["test"]) == 3 ds3 = load_dataset(SAMPLE_DATASET_SINGLE_CONFIG_IN_METADATA, "custom") assert list(ds3["train"].features) == ["audio"] # assert param `drop_labels=True` from metadata is passed assert len(ds3["train"]) == 3 and len(ds3["test"]) == 3 with pytest.raises(ValueError): # no config named "default" _ = load_dataset(SAMPLE_DATASET_SINGLE_CONFIG_IN_METADATA, "default") @require_sndfile @pytest.mark.integration def test_load_hub_dataset_without_script_with_two_config_in_metadata(): ds = load_dataset(SAMPLE_DATASET_TWO_CONFIG_IN_METADATA, "v1") assert list(ds["train"].features) == ["audio"] # assert param `drop_labels=True` from metadata is passed assert len(ds["train"]) == 3 and len(ds["test"]) == 3 ds2 = load_dataset(SAMPLE_DATASET_TWO_CONFIG_IN_METADATA, "v2") assert list(ds2["train"].features) == [ "audio", "label", ] # assert param `drop_labels=False` from metadata is passed assert len(ds2["train"]) == 2 and len(ds2["test"]) == 1 with pytest.raises(ValueError): # config is required but not specified _ = load_dataset(SAMPLE_DATASET_TWO_CONFIG_IN_METADATA) with pytest.raises(ValueError): # no config named "default" _ = load_dataset(SAMPLE_DATASET_TWO_CONFIG_IN_METADATA, "default") ds_with_default = load_dataset(SAMPLE_DATASET_TWO_CONFIG_IN_METADATA_WITH_DEFAULT) # it's a dataset with the same data but "v1" config is marked as a default one assert list(ds_with_default["train"].features) == list(ds["train"].features) assert len(ds_with_default["train"]) == len(ds["train"]) and len(ds_with_default["test"]) == len(ds["test"]) @require_sndfile @pytest.mark.integration def test_load_hub_dataset_without_script_with_metadata_config_in_parallel(): # assert it doesn't fail (pickling of dynamically created class works) ds = load_dataset(SAMPLE_DATASET_SINGLE_CONFIG_IN_METADATA, num_proc=2) assert "label" not in ds["train"].features # assert param `drop_labels=True` from metadata is passed assert len(ds["train"]) == 3 and len(ds["test"]) == 3 ds = load_dataset(SAMPLE_DATASET_TWO_CONFIG_IN_METADATA, "v1", num_proc=2) assert "label" not in ds["train"].features # assert param `drop_labels=True` from metadata is passed assert len(ds["train"]) == 3 and len(ds["test"]) == 3 ds = load_dataset(SAMPLE_DATASET_TWO_CONFIG_IN_METADATA, "v2", num_proc=2) assert "label" in ds["train"].features assert len(ds["train"]) == 2 and len(ds["test"]) == 1 @require_pil @pytest.mark.integration @pytest.mark.parametrize("streaming", [True]) def test_load_dataset_private_zipped_images(hf_private_dataset_repo_zipped_img_data, hf_token, streaming): ds = load_dataset(hf_private_dataset_repo_zipped_img_data, split="train", streaming=streaming, token=hf_token) assert isinstance(ds, IterableDataset if streaming else Dataset) ds_items = list(ds) assert len(ds_items) == 2 def test_load_dataset_then_move_then_reload(dataset_loading_script_dir, data_dir, tmp_path, caplog): cache_dir1 = tmp_path / "cache1" cache_dir2 = tmp_path / "cache2" dataset = load_dataset( dataset_loading_script_dir, data_dir=data_dir, split="train", cache_dir=cache_dir1, trust_remote_code=True ) fingerprint1 = dataset._fingerprint del dataset os.rename(cache_dir1, cache_dir2) caplog.clear() with caplog.at_level(INFO, logger=get_logger().name): dataset = load_dataset(dataset_loading_script_dir, data_dir=data_dir, split="train", cache_dir=cache_dir2) assert "Found cached dataset" in caplog.text assert dataset._fingerprint == fingerprint1, "for the caching mechanism to work, fingerprint should stay the same" dataset = load_dataset(dataset_loading_script_dir, data_dir=data_dir, split="test", cache_dir=cache_dir2) assert dataset._fingerprint != fingerprint1 def test_load_dataset_builder_then_edit_then_load_again(tmp_path: Path): dataset_dir = tmp_path / "test_load_dataset_then_edit_then_load_again" dataset_dir.mkdir() with open(dataset_dir / "train.txt", "w") as f: f.write("Hello there") dataset_builder = load_dataset_builder(str(dataset_dir)) with open(dataset_dir / "train.txt", "w") as f: f.write("General Kenobi !") edited_dataset_builder = load_dataset_builder(str(dataset_dir)) assert dataset_builder.cache_dir != edited_dataset_builder.cache_dir def test_load_dataset_readonly(dataset_loading_script_dir, dataset_loading_script_dir_readonly, data_dir, tmp_path): cache_dir1 = tmp_path / "cache1" cache_dir2 = tmp_path / "cache2" dataset = load_dataset( dataset_loading_script_dir, data_dir=data_dir, split="train", cache_dir=cache_dir1, trust_remote_code=True ) fingerprint1 = dataset._fingerprint del dataset # Load readonly dataset and check that the fingerprint is the same. dataset = load_dataset(dataset_loading_script_dir_readonly, data_dir=data_dir, split="train", cache_dir=cache_dir2) assert dataset._fingerprint == fingerprint1, "Cannot load a dataset in a readonly folder." @pytest.mark.parametrize("max_in_memory_dataset_size", ["default", 0, 50, 500]) def test_load_dataset_local_with_default_in_memory( max_in_memory_dataset_size, dataset_loading_script_dir, data_dir, monkeypatch ): current_dataset_size = 148 if max_in_memory_dataset_size == "default": max_in_memory_dataset_size = 0 # default else: monkeypatch.setattr(datasets.config, "IN_MEMORY_MAX_SIZE", max_in_memory_dataset_size) if max_in_memory_dataset_size: expected_in_memory = current_dataset_size < max_in_memory_dataset_size else: expected_in_memory = False with assert_arrow_memory_increases() if expected_in_memory else assert_arrow_memory_doesnt_increase(): dataset = load_dataset(dataset_loading_script_dir, data_dir=data_dir, trust_remote_code=True) assert (dataset["train"].dataset_size < max_in_memory_dataset_size) is expected_in_memory @pytest.mark.parametrize("max_in_memory_dataset_size", ["default", 0, 100, 1000]) def test_load_from_disk_with_default_in_memory( max_in_memory_dataset_size, dataset_loading_script_dir, data_dir, tmp_path, monkeypatch ): current_dataset_size = 512 # arrow file size = 512, in-memory dataset size = 148 if max_in_memory_dataset_size == "default": max_in_memory_dataset_size = 0 # default else: monkeypatch.setattr(datasets.config, "IN_MEMORY_MAX_SIZE", max_in_memory_dataset_size) if max_in_memory_dataset_size: expected_in_memory = current_dataset_size < max_in_memory_dataset_size else: expected_in_memory = False dset = load_dataset(dataset_loading_script_dir, data_dir=data_dir, keep_in_memory=True, trust_remote_code=True) dataset_path = tmp_path / "saved_dataset" dset.save_to_disk(dataset_path) with assert_arrow_memory_increases() if expected_in_memory else assert_arrow_memory_doesnt_increase(): _ = load_from_disk(dataset_path) @pytest.fixture def moto_server(monkeypatch): from moto.server import ThreadedMotoServer monkeypatch.setattr( "os.environ", { "AWS_ENDPOINT_URL": "http://localhost:5000", "AWS_DEFAULT_REGION": "us-east-1", "AWS_ACCESS_KEY_ID": "FOO", "AWS_SECRET_ACCESS_KEY": "BAR", }, ) server = ThreadedMotoServer() server.start() try: yield finally: server.stop() @require_moto def test_load_file_from_s3(moto_server): # we need server mode here because of an aiobotocore incompatibility with moto.mock_aws # (https://github.com/getmoto/moto/issues/6836) import boto3 # Create a mock S3 bucket bucket_name = "test-bucket" s3 = boto3.client("s3", region_name="us-east-1") s3.create_bucket(Bucket=bucket_name) # Upload a file to the mock bucket key = "test-file.csv" csv_data = "Island\nIsabela\nBaltra" s3.put_object(Bucket=bucket_name, Key=key, Body=csv_data) # Load the file from the mock bucket ds = datasets.load_dataset("csv", data_files={"train": "s3://test-bucket/test-file.csv"}) # Check if the loaded content matches the original content assert list(ds["train"]) == [{"Island": "Isabela"}, {"Island": "Baltra"}] @pytest.mark.integration def test_remote_data_files(): repo_id = "hf-internal-testing/raw_jsonl" filename = "wikiann-bn-validation.jsonl" data_files = f"https://huggingface.co/datasets/{repo_id}/resolve/main/{filename}" ds = load_dataset("json", split="train", data_files=data_files, streaming=True) assert isinstance(ds, IterableDataset) ds_item = next(iter(ds)) assert ds_item.keys() == {"langs", "ner_tags", "spans", "tokens"} def distributed_load_dataset(args): data_name, tmp_dir, datafiles = args dataset = load_dataset(data_name, cache_dir=tmp_dir, data_files=datafiles) return dataset def test_load_dataset_distributed(tmp_path, csv_path): num_workers = 5 args = "csv", str(tmp_path), csv_path with Pool(processes=num_workers) as pool: # start num_workers processes datasets = pool.map(distributed_load_dataset, [args] * num_workers) assert len(datasets) == num_workers assert all(len(dataset) == len(datasets[0]) > 0 for dataset in datasets) assert len(datasets[0].cache_files) > 0 assert all(dataset.cache_files == datasets[0].cache_files for dataset in datasets) def distributed_load_dataset_with_script(args): data_name, tmp_dir, download_mode = args dataset = load_dataset(data_name, cache_dir=tmp_dir, download_mode=download_mode, trust_remote_code=True) return dataset @require_not_windows # windows doesn't support overwriting Arrow files from other processes @pytest.mark.parametrize("download_mode", [None, "force_redownload"]) def test_load_dataset_distributed_with_script(tmp_path, download_mode): # we need to check in the "force_redownload" case # since in `_copy_script_and_other_resources_in_importable_dir()` we might delete the directory # containing the .py file while the other processes use it num_workers = 5 args = (SAMPLE_DATASET_IDENTIFIER, str(tmp_path), download_mode) with Pool(processes=num_workers) as pool: # start num_workers processes datasets = pool.map(distributed_load_dataset_with_script, [args] * num_workers) assert len(datasets) == num_workers assert all(len(dataset) == len(datasets[0]) > 0 for dataset in datasets) assert len(datasets[0].cache_files) > 0 assert all(dataset.cache_files == datasets[0].cache_files for dataset in datasets) def test_load_dataset_with_storage_options(mockfs): with mockfs.open("data.txt", "w") as f: f.write("Hello there\n") f.write("General Kenobi !") data_files = {"train": ["mock://data.txt"]} ds = load_dataset("text", data_files=data_files, storage_options=mockfs.storage_options) assert list(ds["train"]) == [{"text": "Hello there"}, {"text": "General Kenobi !"}] @require_pil def test_load_dataset_with_storage_options_with_decoding(mockfs, image_file): import PIL.Image filename = os.path.basename(image_file) with mockfs.open(filename, "wb") as fout: with open(image_file, "rb") as fin: fout.write(fin.read()) data_files = {"train": ["mock://" + filename]} ds = load_dataset("imagefolder", data_files=data_files, storage_options=mockfs.storage_options) assert len(ds["train"]) == 1 assert isinstance(ds["train"][0]["image"], PIL.Image.Image) def test_load_dataset_without_script_with_zip(zip_csv_path): path = str(zip_csv_path.parent) ds = load_dataset(path) assert list(ds.keys()) == ["train"] assert ds["train"].column_names == ["col_1", "col_2", "col_3"] assert ds["train"].num_rows == 8 assert ds["train"][0] == {"col_1": 0, "col_2": 0, "col_3": 0.0} @pytest.mark.parametrize("trust_remote_code, expected", [(False, False), (True, True), (None, ValueError)]) def test_resolve_trust_remote_code_future(trust_remote_code, expected): if isinstance(expected, bool): resolve_trust_remote_code(trust_remote_code, repo_id="dummy") is expected else: with pytest.raises(expected): resolve_trust_remote_code(trust_remote_code, repo_id="dummy") @pytest.mark.integration def test_reload_old_cache_from_2_15(tmp_path: Path): cache_dir = tmp_path / "test_reload_old_cache_from_2_15" builder_cache_dir = ( cache_dir / "polinaeterna___audiofolder_two_configs_in_metadata/v2-374bfde4f55442bc/0.0.0/7896925d64deea5d" ) builder_cache_dir.mkdir(parents=True) arrow_path = builder_cache_dir / "audiofolder_two_configs_in_metadata-train.arrow" dataset_info_path = builder_cache_dir / "dataset_info.json" with dataset_info_path.open("w") as f: f.write("{}") arrow_path.touch() builder = load_dataset_builder( "polinaeterna/audiofolder_two_configs_in_metadata", "v2", data_files="v2/train/*", cache_dir=cache_dir.as_posix(), ) assert builder.cache_dir == builder_cache_dir.as_posix() # old cache from 2.15 builder = load_dataset_builder( "polinaeterna/audiofolder_two_configs_in_metadata", "v2", cache_dir=cache_dir.as_posix() ) assert ( builder.cache_dir == ( cache_dir / "polinaeterna___audiofolder_two_configs_in_metadata" / "v2" / "0.0.0" / str(builder.hash) ).as_posix() ) # new cache @pytest.mark.integration def test_update_dataset_card_data_with_standalone_yaml(): # Labels defined in .huggingface.yml because they are too long to be in README.md from datasets.utils.metadata import MetadataConfigs with patch( "datasets.utils.metadata.MetadataConfigs.from_dataset_card_data", side_effect=MetadataConfigs.from_dataset_card_data, ) as card_data_read_mock: builder = load_dataset_builder("datasets-maintainers/dataset-with-standalone-yaml") assert card_data_read_mock.call_args.args[0]["license"] is not None # from README.md assert card_data_read_mock.call_args.args[0]["dataset_info"] is not None # from standalone yaml assert card_data_read_mock.call_args.args[0]["tags"] == ["test"] # standalone yaml has precedence assert isinstance( builder.info.features["label"], datasets.ClassLabel ) # correctly loaded from long labels list in standalone yaml
datasets/tests/test_load.py/0
{ "file_path": "datasets/tests/test_load.py", "repo_id": "datasets", "token_count": 34804 }
89
- title: Unit 0. Welcome to the course sections: - local: unit0/introduction title: Welcome to the course ๐Ÿค— - local: unit0/setup title: Setup - local: unit0/discord101 title: Discord 101 - title: Unit 1. Introduction to Deep Reinforcement Learning sections: - local: unit1/introduction title: Introduction - local: unit1/what-is-rl title: What is Reinforcement Learning? - local: unit1/rl-framework title: The Reinforcement Learning Framework - local: unit1/tasks title: The type of tasks - local: unit1/exp-exp-tradeoff title: The Exploration/ Exploitation tradeoff - local: unit1/two-methods title: The two main approaches for solving RL problems - local: unit1/deep-rl title: The โ€œDeepโ€ in Deep Reinforcement Learning - local: unit1/summary title: Summary - local: unit1/glossary title: Glossary - local: unit1/hands-on title: Hands-on - local: unit1/quiz title: Quiz - local: unit1/conclusion title: Conclusion - local: unit1/additional-readings title: Additional Readings - title: Bonus Unit 1. Introduction to Deep Reinforcement Learning with Huggy sections: - local: unitbonus1/introduction title: Introduction - local: unitbonus1/how-huggy-works title: How Huggy works? - local: unitbonus1/train title: Train Huggy - local: unitbonus1/play title: Play with Huggy - local: unitbonus1/conclusion title: Conclusion - title: Live 1. How the course work, Q&A, and playing with Huggy sections: - local: live1/live1 title: Live 1. How the course work, Q&A, and playing with Huggy ๐Ÿถ - title: Unit 2. Introduction to Q-Learning sections: - local: unit2/introduction title: Introduction - local: unit2/what-is-rl title: What is RL? A short recap - local: unit2/two-types-value-based-methods title: The two types of value-based methods - local: unit2/bellman-equation title: The Bellman Equation, simplify our value estimation - local: unit2/mc-vs-td title: Monte Carlo vs Temporal Difference Learning - local: unit2/mid-way-recap title: Mid-way Recap - local: unit2/mid-way-quiz title: Mid-way Quiz - local: unit2/q-learning title: Introducing Q-Learning - local: unit2/q-learning-example title: A Q-Learning example - local: unit2/q-learning-recap title: Q-Learning Recap - local: unit2/glossary title: Glossary - local: unit2/hands-on title: Hands-on - local: unit2/quiz2 title: Q-Learning Quiz - local: unit2/conclusion title: Conclusion - local: unit2/additional-readings title: Additional Readings - title: Unit 3. Deep Q-Learning with Atari Games sections: - local: unit3/introduction title: Introduction - local: unit3/from-q-to-dqn title: From Q-Learning to Deep Q-Learning - local: unit3/deep-q-network title: The Deep Q-Network (DQN) - local: unit3/deep-q-algorithm title: The Deep Q Algorithm - local: unit3/glossary title: Glossary - local: unit3/hands-on title: Hands-on - local: unit3/quiz title: Quiz - local: unit3/conclusion title: Conclusion - local: unit3/additional-readings title: Additional Readings - title: Bonus Unit 2. Automatic Hyperparameter Tuning with Optuna sections: - local: unitbonus2/introduction title: Introduction - local: unitbonus2/optuna title: Optuna - local: unitbonus2/hands-on title: Hands-on - title: Unit 4. Policy Gradient with PyTorch sections: - local: unit4/introduction title: Introduction - local: unit4/what-are-policy-based-methods title: What are the policy-based methods? - local: unit4/advantages-disadvantages title: The advantages and disadvantages of policy-gradient methods - local: unit4/policy-gradient title: Diving deeper into policy-gradient - local: unit4/pg-theorem title: (Optional) the Policy Gradient Theorem - local: unit4/glossary title: Glossary - local: unit4/hands-on title: Hands-on - local: unit4/quiz title: Quiz - local: unit4/conclusion title: Conclusion - local: unit4/additional-readings title: Additional Readings - title: Unit 5. Introduction to Unity ML-Agents sections: - local: unit5/introduction title: Introduction - local: unit5/how-mlagents-works title: How ML-Agents works? - local: unit5/snowball-target title: The SnowballTarget environment - local: unit5/pyramids title: The Pyramids environment - local: unit5/curiosity title: (Optional) What is curiosity in Deep Reinforcement Learning? - local: unit5/hands-on title: Hands-on - local: unit5/bonus title: Bonus. Learn to create your own environments with Unity and MLAgents - local: unit5/quiz title: Quiz - local: unit5/conclusion title: Conclusion - title: Unit 6. Actor Critic methods with Robotics environments sections: - local: unit6/introduction title: Introduction - local: unit6/variance-problem title: The Problem of Variance in Reinforce - local: unit6/advantage-actor-critic title: Advantage Actor Critic (A2C) - local: unit6/hands-on title: Advantage Actor Critic (A2C) using Robotics Simulations with Panda-Gym ๐Ÿค– - local: unit6/quiz title: Quiz - local: unit6/conclusion title: Conclusion - local: unit6/additional-readings title: Additional Readings - title: Unit 7. Introduction to Multi-Agents and AI vs AI sections: - local: unit7/introduction title: Introduction - local: unit7/introduction-to-marl title: An introduction to Multi-Agents Reinforcement Learning (MARL) - local: unit7/multi-agent-setting title: Designing Multi-Agents systems - local: unit7/self-play title: Self-Play - local: unit7/hands-on title: Let's train our soccer team to beat your classmates' teams (AI vs. AI) - local: unit7/quiz title: Quiz - local: unit7/conclusion title: Conclusion - local: unit7/additional-readings title: Additional Readings - title: Unit 8. Part 1 Proximal Policy Optimization (PPO) sections: - local: unit8/introduction title: Introduction - local: unit8/intuition-behind-ppo title: The intuition behind PPO - local: unit8/clipped-surrogate-objective title: Introducing the Clipped Surrogate Objective Function - local: unit8/visualize title: Visualize the Clipped Surrogate Objective Function - local: unit8/hands-on-cleanrl title: PPO with CleanRL - local: unit8/conclusion title: Conclusion - local: unit8/additional-readings title: Additional Readings - title: Unit 8. Part 2 Proximal Policy Optimization (PPO) with Doom sections: - local: unit8/introduction-sf title: Introduction - local: unit8/hands-on-sf title: PPO with Sample Factory and Doom - local: unit8/conclusion-sf title: Conclusion - title: Bonus Unit 3. Advanced Topics in Reinforcement Learning sections: - local: unitbonus3/introduction title: Introduction - local: unitbonus3/model-based title: Model-Based Reinforcement Learning - local: unitbonus3/offline-online title: Offline vs. Online Reinforcement Learning - local: unitbonus3/generalisation title: Generalisation Reinforcement Learning - local: unitbonus3/rlhf title: Reinforcement Learning from Human Feedback - local: unitbonus3/decision-transformers title: Decision Transformers and Offline RL - local: unitbonus3/language-models title: Language models in RL - local: unitbonus3/curriculum-learning title: (Automatic) Curriculum Learning for RL - local: unitbonus3/envs-to-try title: Interesting environments to try - local: unitbonus3/learning-agents title: An introduction to Unreal Learning Agents - local: unitbonus3/godotrl title: An Introduction to Godot RL - local: unitbonus3/student-works title: Students projects - local: unitbonus3/rl-documentation title: Brief introduction to RL documentation - title: Bonus Unit 5. Imitation Learning with Godot RL Agents sections: - local: unitbonus5/introduction title: Introduction - local: unitbonus5/the-environment title: The environment - local: unitbonus5/getting-started title: Getting started - local: unitbonus5/train-our-robot title: Train our robot - local: unitbonus5/customize-the-environment title: (Optional) Customize the environment - local: unitbonus5/conclusion title: Conclusion - title: Certification and congratulations sections: - local: communication/conclusion title: Congratulations - local: communication/certification title: Get your certificate of completion
deep-rl-class/units/en/_toctree.yml/0
{ "file_path": "deep-rl-class/units/en/_toctree.yml", "repo_id": "deep-rl-class", "token_count": 2897 }
90
# Summary [[summary]] That was a lot of information! Let's summarize: - Reinforcement Learning is a computational approach of learning from actions. We build an agent that learns from the environment **by interacting with it through trial and error** and receiving rewards (negative or positive) as feedback. - The goal of any RL agent is to maximize its expected cumulative reward (also called expected return) because RL is based on the **reward hypothesis**, which is that **all goals can be described as the maximization of the expected cumulative reward.** - The RL process is a loop that outputs a sequence of **state, action, reward and next state.** - To calculate the expected cumulative reward (expected return), we discount the rewards: the rewards that come sooner (at the beginning of the game) **are more probable to happen since they are more predictable than the long term future reward.** - To solve an RL problem, you want to **find an optimal policy**. The policy is the โ€œbrainโ€ of your agent, which will tell us **what action to take given a state.** The optimal policy is the one which **gives you the actions that maximize the expected return.** - There are two ways to find your optimal policy: 1. By training your policy directly: **policy-based methods.** 2. By training a value function that tells us the expected return the agent will get at each state and use this function to define our policy: **value-based methods.** - Finally, we speak about Deep RL because we introduce **deep neural networks to estimate the action to take (policy-based) or to estimate the value of a state (value-based)** hence the name โ€œdeepโ€.
deep-rl-class/units/en/unit1/summary.mdx/0
{ "file_path": "deep-rl-class/units/en/unit1/summary.mdx", "repo_id": "deep-rl-class", "token_count": 382 }
91
# Second Quiz [[quiz2]] The best way to learn and [to avoid the illusion of competence](https://www.coursera.org/lecture/learning-how-to-learn/illusions-of-competence-BuFzf) **is to test yourself.** This will help you to find **where you need to reinforce your knowledge**. ### Q1: What is Q-Learning? <Question choices={[ { text: "The algorithm we use to train our Q-function", explain: "", correct: true }, { text: "A value function", explain: "It's an action-value function since it determines the value of being at a particular state and taking a specific action at that state", }, { text: "An algorithm that determines the value of being at a particular state and taking a specific action at that state", explain: "Q-function is the function that determines the value of being at a particular state and taking a specific action at that state.", }, { text: "A table", explain: "Q-learning is not a Q-table. The Q-function is the algorithm that will feed the Q-table." } ]} /> ### Q2: What is a Q-table? <Question choices={[ { text: "An algorithm we use in Q-Learning", explain: "", }, { text: "Q-table is the internal memory of our agent", explain: "", correct: true }, { text: "In Q-table each cell corresponds a state value", explain: "Each cell corresponds to a state-action value pair value. Not a state value.", } ]} /> ### Q3: Why if we have an optimal Q-function Q* we have an optimal policy? <details> <summary>Solution</summary> Because if we have an optimal Q-function, we have an optimal policy since we know for each state what is the best action to take. <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/link-value-policy.jpg" alt="link value policy"/> </details> ### Q4: Can you explain what is Epsilon-Greedy Strategy? <details> <summary>Solution</summary> Epsilon Greedy Strategy is a policy that handles the exploration/exploitation trade-off. The idea is that we define epsilon ษ› = 1.0: - With *probability 1 โ€” ษ›* : we do exploitation (aka our agent selects the action with the highest state-action pair value). - With *probability ษ›* : we do exploration (trying random action). <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/Q-learning-4.jpg" alt="Epsilon Greedy"/> </details> ### Q5: How do we update the Q value of a state, action pair? <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/q-update-ex.jpg" alt="Q Update exercise"/> <details> <summary>Solution</summary> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/q-update-solution.jpg" alt="Q Update exercise"/> </details> ### Q6: What's the difference between on-policy and off-policy <details> <summary>Solution</summary> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/off-on-4.jpg" alt="On/off policy"/> </details> Congrats on finishing this Quiz ๐Ÿฅณ, if you missed some elements, take time to read again the chapter to reinforce (๐Ÿ˜) your knowledge.
deep-rl-class/units/en/unit2/quiz2.mdx/0
{ "file_path": "deep-rl-class/units/en/unit2/quiz2.mdx", "repo_id": "deep-rl-class", "token_count": 1097 }
92
# Hands on <CourseFloatingBanner classNames="absolute z-10 right-0 top-0" notebooks={[ {label: "Google Colab", value: "https://colab.research.google.com/github/huggingface/deep-rl-class/blob/main/notebooks/unit4/unit4.ipynb"} ]} askForHelpUrl="http://hf.co/join/discord" /> Now that we've studied the theory behind Reinforce, **youโ€™re ready to code your Reinforce agent with PyTorch**. And you'll test its robustness using CartPole-v1 and PixelCopter,. You'll then be able to iterate and improve this implementation for more advanced environments. <figure class="image table text-center m-0 w-full"> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit6/envs.gif" alt="Environments"/> </figure> To validate this hands-on for the certification process, you need to push your trained models to the Hub and: - Get a result of >= 350 for `Cartpole-v1` - Get a result of >= 5 for `PixelCopter`. To find your result, go to the leaderboard and find your model, **the result = mean_reward - std of reward**. **If you don't see your model on the leaderboard, go at the bottom of the leaderboard page and click on the refresh button**. **If you don't find your model, go to the bottom of the page and click on the refresh button.** For more information about the certification process, check this section ๐Ÿ‘‰ https://huggingface.co/deep-rl-course/en/unit0/introduction#certification-process And you can check your progress here ๐Ÿ‘‰ https://huggingface.co/spaces/ThomasSimonini/Check-my-progress-Deep-RL-Course **To start the hands-on click on Open In Colab button** ๐Ÿ‘‡ : [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/deep-rl-class/blob/master/notebooks/unit4/unit4.ipynb) We strongly **recommend students use Google Colab for the hands-on exercises** instead of running them on their personal computers. By using Google Colab, **you can focus on learning and experimenting without worrying about the technical aspects** of setting up your environments. # Unit 4: Code your first Deep Reinforcement Learning Algorithm with PyTorch: Reinforce. And test its robustness ๐Ÿ’ช <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit6/thumbnail.png" alt="thumbnail"/> In this notebook, you'll code your first Deep Reinforcement Learning algorithm from scratch: Reinforce (also called Monte Carlo Policy Gradient). Reinforce is a *Policy-based method*: a Deep Reinforcement Learning algorithm that tries **to optimize the policy directly without using an action-value function**. More precisely, Reinforce is a *Policy-gradient method*, a subclass of *Policy-based methods* that aims **to optimize the policy directly by estimating the weights of the optimal policy using gradient ascent**. To test its robustness, we're going to train it in 2 different simple environments: - Cartpole-v1 - PixelcopterEnv โฌ‡๏ธ Here is an example of what **you will achieve at the end of this notebook.** โฌ‡๏ธ <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit6/envs.gif" alt="Environments"/> ### ๐ŸŽฎ Environments: - [CartPole-v1](https://www.gymlibrary.dev/environments/classic_control/cart_pole/) - [PixelCopter](https://pygame-learning-environment.readthedocs.io/en/latest/user/games/pixelcopter.html) ### ๐Ÿ“š RL-Library: - Python - PyTorch We're constantly trying to improve our tutorials, so **if you find some issues in this notebook**, please [open an issue on the GitHub Repo](https://github.com/huggingface/deep-rl-class/issues). ## Objectives of this notebook ๐Ÿ† At the end of the notebook, you will: - Be able to **code a Reinforce algorithm from scratch using PyTorch.** - Be able to **test the robustness of your agent using simple environments.** - Be able to **push your trained agent to the Hub** with a nice video replay and an evaluation score ๐Ÿ”ฅ. ## Prerequisites ๐Ÿ—๏ธ Before diving into the notebook, you need to: ๐Ÿ”ฒ ๐Ÿ“š [Study Policy Gradients by reading Unit 4](https://huggingface.co/deep-rl-course/unit4/introduction) # Let's code Reinforce algorithm from scratch ๐Ÿ”ฅ ## Some advice ๐Ÿ’ก It's better to run this colab in a copy on your Google Drive, so that **if it times out** you still have the saved notebook on your Google Drive and do not need to fill everything in from scratch. To do that you can either do `Ctrl + S` or `File > Save a copy in Google Drive.` ## Set the GPU ๐Ÿ’ช - To **accelerate the agent's training, we'll use a GPU**. To do that, go to `Runtime > Change Runtime type` <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/notebooks/gpu-step1.jpg" alt="GPU Step 1"> - `Hardware Accelerator > GPU` <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/notebooks/gpu-step2.jpg" alt="GPU Step 2"> ## Create a virtual display ๐Ÿ–ฅ During the notebook, we'll need to generate a replay video. To do so, with colab, **we need to have a virtual screen to be able to render the environment** (and thus record the frames). The following cell will install the librairies and create and run a virtual screen ๐Ÿ–ฅ ```python %%capture !apt install python-opengl !apt install ffmpeg !apt install xvfb !pip install pyvirtualdisplay !pip install pyglet==1.5.1 ``` ```python # Virtual display from pyvirtualdisplay import Display virtual_display = Display(visible=0, size=(1400, 900)) virtual_display.start() ``` ## Install the dependencies ๐Ÿ”ฝ The first step is to install the dependencies. Weโ€™ll install multiple ones: - `gym` - `gym-games`: Extra gym environments made with PyGame. - `huggingface_hub`: The Hub works as a central place where anyone can share and explore models and datasets. It has versioning, metrics, visualizations, and other features that will allow you to easily collaborate with others. You may be wondering why we install gym and not gymnasium, a more recent version of gym? **Because the gym-games we are using are not updated yet with gymnasium**. The differences you'll encounter here: - In `gym` we don't have `terminated` and `truncated` but only `done`. - In `gym` using `env.step()` returns `state, reward, done, info` You can learn more about the differences between Gym and Gymnasium here ๐Ÿ‘‰ https://gymnasium.farama.org/content/migration-guide/ You can see here all the Reinforce models available ๐Ÿ‘‰ https://huggingface.co/models?other=reinforce And you can find all the Deep Reinforcement Learning models here ๐Ÿ‘‰ https://huggingface.co/models?pipeline_tag=reinforcement-learning ```bash !pip install -r https://raw.githubusercontent.com/huggingface/deep-rl-class/main/notebooks/unit4/requirements-unit4.txt ``` ## Import the packages ๐Ÿ“ฆ In addition to importing the installed libraries, we also import: - `imageio`: A library that will help us to generate a replay video ```python import numpy as np from collections import deque import matplotlib.pyplot as plt %matplotlib inline # PyTorch import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim from torch.distributions import Categorical # Gym import gym import gym_pygame # Hugging Face Hub from huggingface_hub import notebook_login # To log to our Hugging Face account to be able to upload models to the Hub. import imageio ``` ## Check if we have a GPU - Let's check if we have a GPU - If it's the case you should see `device:cuda0` ```python device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") ``` ```python print(device) ``` We're now ready to implement our Reinforce algorithm ๐Ÿ”ฅ # First agent: Playing CartPole-v1 ๐Ÿค– ## Create the CartPole environment and understand how it works ### [The environment ๐ŸŽฎ](https://www.gymlibrary.dev/environments/classic_control/cart_pole/) ### Why do we use a simple environment like CartPole-v1? As explained in [Reinforcement Learning Tips and Tricks](https://stable-baselines3.readthedocs.io/en/master/guide/rl_tips.html), when you implement your agent from scratch, you need **to be sure that it works correctly and find bugs with easy environments before going deeper** as finding bugs will be much easier in simple environments. > Try to have some โ€œsign of lifeโ€ on toy problems > Validate the implementation by making it run on harder and harder envs (you can compare results against the RL zoo). You usually need to run hyperparameter optimization for that step. ### The CartPole-v1 environment > A pole is attached by an un-actuated joint to a cart, which moves along a frictionless track. The pendulum is placed upright on the cart and the goal is to balance the pole by applying forces in the left and right direction on the cart. So, we start with CartPole-v1. The goal is to push the cart left or right **so that the pole stays in the equilibrium.** The episode ends if: - The pole Angle is greater than ยฑ12ยฐ - The Cart Position is greater than ยฑ2.4 - The episode length is greater than 500 We get a reward ๐Ÿ’ฐ of +1 every timestep that the Pole stays in the equilibrium. ```python env_id = "CartPole-v1" # Create the env env = gym.make(env_id) # Create the evaluation env eval_env = gym.make(env_id) # Get the state space and action space s_size = env.observation_space.shape[0] a_size = env.action_space.n ``` ```python print("_____OBSERVATION SPACE_____ \n") print("The State Space is: ", s_size) print("Sample observation", env.observation_space.sample()) # Get a random observation ``` ```python print("\n _____ACTION SPACE_____ \n") print("The Action Space is: ", a_size) print("Action Space Sample", env.action_space.sample()) # Take a random action ``` ## Let's build the Reinforce Architecture This implementation is based on three implementations: - [PyTorch official Reinforcement Learning example](https://github.com/pytorch/examples/blob/main/reinforcement_learning/reinforce.py) - [Udacity Reinforce](https://github.com/udacity/deep-reinforcement-learning/blob/master/reinforce/REINFORCE.ipynb) - [Improvement of the integration by Chris1nexus](https://github.com/huggingface/deep-rl-class/pull/95) <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit6/reinforce.png" alt="Reinforce"/> So we want: - Two fully connected layers (fc1 and fc2). - To use ReLU as activation function of fc1 - To use Softmax to output a probability distribution over actions ```python class Policy(nn.Module): def __init__(self, s_size, a_size, h_size): super(Policy, self).__init__() # Create two fully connected layers def forward(self, x): # Define the forward pass # state goes to fc1 then we apply ReLU activation function # fc1 outputs goes to fc2 # We output the softmax def act(self, state): """ Given a state, take action """ state = torch.from_numpy(state).float().unsqueeze(0).to(device) probs = self.forward(state).cpu() m = Categorical(probs) action = np.argmax(m) return action.item(), m.log_prob(action) ``` ### Solution ```python class Policy(nn.Module): def __init__(self, s_size, a_size, h_size): super(Policy, self).__init__() self.fc1 = nn.Linear(s_size, h_size) self.fc2 = nn.Linear(h_size, a_size) def forward(self, x): x = F.relu(self.fc1(x)) x = self.fc2(x) return F.softmax(x, dim=1) def act(self, state): state = torch.from_numpy(state).float().unsqueeze(0).to(device) probs = self.forward(state).cpu() m = Categorical(probs) action = np.argmax(m) return action.item(), m.log_prob(action) ``` I made a mistake, can you guess where? - To find out let's make a forward pass: ```python debug_policy = Policy(s_size, a_size, 64).to(device) debug_policy.act(env.reset()) ``` - Here we see that the error says `ValueError: The value argument to log_prob must be a Tensor` - It means that `action` in `m.log_prob(action)` must be a Tensor **but it's not.** - Do you know why? Check the act function and try to see why it does not work. Advice ๐Ÿ’ก: Something is wrong in this implementation. Remember that for the act function **we want to sample an action from the probability distribution over actions**. ### (Real) Solution ```python class Policy(nn.Module): def __init__(self, s_size, a_size, h_size): super(Policy, self).__init__() self.fc1 = nn.Linear(s_size, h_size) self.fc2 = nn.Linear(h_size, a_size) def forward(self, x): x = F.relu(self.fc1(x)) x = self.fc2(x) return F.softmax(x, dim=1) def act(self, state): state = torch.from_numpy(state).float().unsqueeze(0).to(device) probs = self.forward(state).cpu() m = Categorical(probs) action = m.sample() return action.item(), m.log_prob(action) ``` By using CartPole, it was easier to debug since **we know that the bug comes from our integration and not from our simple environment**. - Since **we want to sample an action from the probability distribution over actions**, we can't use `action = np.argmax(m)` since it will always output the action that has the highest probability. - We need to replace this with `action = m.sample()` which will sample an action from the probability distribution P(.|s) ### Let's build the Reinforce Training Algorithm This is the Reinforce algorithm pseudocode: <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit6/pg_pseudocode.png" alt="Policy gradient pseudocode"/> - When we calculate the return Gt (line 6), we see that we calculate the sum of discounted rewards **starting at timestep t**. - Why? Because our policy should only **reinforce actions on the basis of the consequences**: so rewards obtained before taking an action are useless (since they were not because of the action), **only the ones that come after the action matters**. - Before coding this you should read this section [don't let the past distract you](https://spinningup.openai.com/en/latest/spinningup/rl_intro3.html#don-t-let-the-past-distract-you) that explains why we use reward-to-go policy gradient. We use an interesting technique coded by [Chris1nexus](https://github.com/Chris1nexus) to **compute the return at each timestep efficiently**. The comments explained the procedure. Don't hesitate also [to check the PR explanation](https://github.com/huggingface/deep-rl-class/pull/95) But overall the idea is to **compute the return at each timestep efficiently**. The second question you may ask is **why do we minimize the loss**? Didn't we talk about Gradient Ascent, not Gradient Descent earlier? - We want to maximize our utility function $J(\theta)$, but in PyTorch and TensorFlow, it's better to **minimize an objective function.** - So let's say we want to reinforce action 3 at a certain timestep. Before training this action P is 0.25. - So we want to modify \\(theta \\) such that \\(\pi_\theta(a_3|s; \theta) > 0.25 \\) - Because all P must sum to 1, max \\(pi_\theta(a_3|s; \theta)\\) will **minimize other action probability.** - So we should tell PyTorch **to min \\(1 - \pi_\theta(a_3|s; \theta)\\).** - This loss function approaches 0 as \\(\pi_\theta(a_3|s; \theta)\\) nears 1. - So we are encouraging the gradient to max \\(\pi_\theta(a_3|s; \theta)\\) ```python def reinforce(policy, optimizer, n_training_episodes, max_t, gamma, print_every): # Help us to calculate the score during the training scores_deque = deque(maxlen=100) scores = [] # Line 3 of pseudocode for i_episode in range(1, n_training_episodes+1): saved_log_probs = [] rewards = [] state = # TODO: reset the environment # Line 4 of pseudocode for t in range(max_t): action, log_prob = # TODO get the action saved_log_probs.append(log_prob) state, reward, done, _ = # TODO: take an env step rewards.append(reward) if done: break scores_deque.append(sum(rewards)) scores.append(sum(rewards)) # Line 6 of pseudocode: calculate the return returns = deque(maxlen=max_t) n_steps = len(rewards) # Compute the discounted returns at each timestep, # as the sum of the gamma-discounted return at time t (G_t) + the reward at time t # In O(N) time, where N is the number of time steps # (this definition of the discounted return G_t follows the definition of this quantity # shown at page 44 of Sutton&Barto 2017 2nd draft) # G_t = r_(t+1) + r_(t+2) + ... # Given this formulation, the returns at each timestep t can be computed # by re-using the computed future returns G_(t+1) to compute the current return G_t # G_t = r_(t+1) + gamma*G_(t+1) # G_(t-1) = r_t + gamma* G_t # (this follows a dynamic programming approach, with which we memorize solutions in order # to avoid computing them multiple times) # This is correct since the above is equivalent to (see also page 46 of Sutton&Barto 2017 2nd draft) # G_(t-1) = r_t + gamma*r_(t+1) + gamma*gamma*r_(t+2) + ... ## Given the above, we calculate the returns at timestep t as: # gamma[t] * return[t] + reward[t] # ## We compute this starting from the last timestep to the first, in order ## to employ the formula presented above and avoid redundant computations that would be needed ## if we were to do it from first to last. ## Hence, the queue "returns" will hold the returns in chronological order, from t=0 to t=n_steps ## thanks to the appendleft() function which allows to append to the position 0 in constant time O(1) ## a normal python list would instead require O(N) to do this. for t in range(n_steps)[::-1]: disc_return_t = (returns[0] if len(returns)>0 else 0) returns.appendleft( ) # TODO: complete here ## standardization of the returns is employed to make training more stable eps = np.finfo(np.float32).eps.item() ## eps is the smallest representable float, which is # added to the standard deviation of the returns to avoid numerical instabilities returns = torch.tensor(returns) returns = (returns - returns.mean()) / (returns.std() + eps) # Line 7: policy_loss = [] for log_prob, disc_return in zip(saved_log_probs, returns): policy_loss.append(-log_prob * disc_return) policy_loss = torch.cat(policy_loss).sum() # Line 8: PyTorch prefers gradient descent optimizer.zero_grad() policy_loss.backward() optimizer.step() if i_episode % print_every == 0: print('Episode {}\tAverage Score: {:.2f}'.format(i_episode, np.mean(scores_deque))) return scores ``` #### Solution ```python def reinforce(policy, optimizer, n_training_episodes, max_t, gamma, print_every): # Help us to calculate the score during the training scores_deque = deque(maxlen=100) scores = [] # Line 3 of pseudocode for i_episode in range(1, n_training_episodes + 1): saved_log_probs = [] rewards = [] state = env.reset() # Line 4 of pseudocode for t in range(max_t): action, log_prob = policy.act(state) saved_log_probs.append(log_prob) state, reward, done, _ = env.step(action) rewards.append(reward) if done: break scores_deque.append(sum(rewards)) scores.append(sum(rewards)) # Line 6 of pseudocode: calculate the return returns = deque(maxlen=max_t) n_steps = len(rewards) # Compute the discounted returns at each timestep, # as # the sum of the gamma-discounted return at time t (G_t) + the reward at time t # # In O(N) time, where N is the number of time steps # (this definition of the discounted return G_t follows the definition of this quantity # shown at page 44 of Sutton&Barto 2017 2nd draft) # G_t = r_(t+1) + r_(t+2) + ... # Given this formulation, the returns at each timestep t can be computed # by re-using the computed future returns G_(t+1) to compute the current return G_t # G_t = r_(t+1) + gamma*G_(t+1) # G_(t-1) = r_t + gamma* G_t # (this follows a dynamic programming approach, with which we memorize solutions in order # to avoid computing them multiple times) # This is correct since the above is equivalent to (see also page 46 of Sutton&Barto 2017 2nd draft) # G_(t-1) = r_t + gamma*r_(t+1) + gamma*gamma*r_(t+2) + ... ## Given the above, we calculate the returns at timestep t as: # gamma[t] * return[t] + reward[t] # ## We compute this starting from the last timestep to the first, in order ## to employ the formula presented above and avoid redundant computations that would be needed ## if we were to do it from first to last. ## Hence, the queue "returns" will hold the returns in chronological order, from t=0 to t=n_steps ## thanks to the appendleft() function which allows to append to the position 0 in constant time O(1) ## a normal python list would instead require O(N) to do this. for t in range(n_steps)[::-1]: disc_return_t = returns[0] if len(returns) > 0 else 0 returns.appendleft(gamma * disc_return_t + rewards[t]) ## standardization of the returns is employed to make training more stable eps = np.finfo(np.float32).eps.item() ## eps is the smallest representable float, which is # added to the standard deviation of the returns to avoid numerical instabilities returns = torch.tensor(returns) returns = (returns - returns.mean()) / (returns.std() + eps) # Line 7: policy_loss = [] for log_prob, disc_return in zip(saved_log_probs, returns): policy_loss.append(-log_prob * disc_return) policy_loss = torch.cat(policy_loss).sum() # Line 8: PyTorch prefers gradient descent optimizer.zero_grad() policy_loss.backward() optimizer.step() if i_episode % print_every == 0: print("Episode {}\tAverage Score: {:.2f}".format(i_episode, np.mean(scores_deque))) return scores ``` ## Train it - We're now ready to train our agent. - But first, we define a variable containing all the training hyperparameters. - You can change the training parameters (and should ๐Ÿ˜‰) ```python cartpole_hyperparameters = { "h_size": 16, "n_training_episodes": 1000, "n_evaluation_episodes": 10, "max_t": 1000, "gamma": 1.0, "lr": 1e-2, "env_id": env_id, "state_space": s_size, "action_space": a_size, } ``` ```python # Create policy and place it to the device cartpole_policy = Policy( cartpole_hyperparameters["state_space"], cartpole_hyperparameters["action_space"], cartpole_hyperparameters["h_size"], ).to(device) cartpole_optimizer = optim.Adam(cartpole_policy.parameters(), lr=cartpole_hyperparameters["lr"]) ``` ```python scores = reinforce( cartpole_policy, cartpole_optimizer, cartpole_hyperparameters["n_training_episodes"], cartpole_hyperparameters["max_t"], cartpole_hyperparameters["gamma"], 100, ) ``` ## Define evaluation method ๐Ÿ“ - Here we define the evaluation method that we're going to use to test our Reinforce agent. ```python def evaluate_agent(env, max_steps, n_eval_episodes, policy): """ Evaluate the agent for ``n_eval_episodes`` episodes and returns average reward and std of reward. :param env: The evaluation environment :param n_eval_episodes: Number of episode to evaluate the agent :param policy: The Reinforce agent """ episode_rewards = [] for episode in range(n_eval_episodes): state = env.reset() step = 0 done = False total_rewards_ep = 0 for step in range(max_steps): action, _ = policy.act(state) new_state, reward, done, info = env.step(action) total_rewards_ep += reward if done: break state = new_state episode_rewards.append(total_rewards_ep) mean_reward = np.mean(episode_rewards) std_reward = np.std(episode_rewards) return mean_reward, std_reward ``` ## Evaluate our agent ๐Ÿ“ˆ ```python evaluate_agent( eval_env, cartpole_hyperparameters["max_t"], cartpole_hyperparameters["n_evaluation_episodes"], cartpole_policy ) ``` ### Publish our trained model on the Hub ๐Ÿ”ฅ Now that we saw we got good results after the training, we can publish our trained model on the hub ๐Ÿค— with one line of code. Here's an example of a Model Card: <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit6/modelcard.png"/> ### Push to the Hub #### Do not modify this code ```python from huggingface_hub import HfApi, snapshot_download from huggingface_hub.repocard import metadata_eval_result, metadata_save from pathlib import Path import datetime import json import imageio import tempfile import os ``` ```python def record_video(env, policy, out_directory, fps=30): """ Generate a replay video of the agent :param env :param Qtable: Qtable of our agent :param out_directory :param fps: how many frame per seconds (with taxi-v3 and frozenlake-v1 we use 1) """ images = [] done = False state = env.reset() img = env.render(mode="rgb_array") images.append(img) while not done: # Take the action (index) that have the maximum expected future reward given that state action, _ = policy.act(state) state, reward, done, info = env.step(action) # We directly put next_state = state for recording logic img = env.render(mode="rgb_array") images.append(img) imageio.mimsave(out_directory, [np.array(img) for i, img in enumerate(images)], fps=fps) ``` ```python def push_to_hub(repo_id, model, hyperparameters, eval_env, video_fps=30 ): """ Evaluate, Generate a video and Upload a model to Hugging Face Hub. This method does the complete pipeline: - It evaluates the model - It generates the model card - It generates a replay video of the agent - It pushes everything to the Hub :param repo_id: repo_id: id of the model repository from the Hugging Face Hub :param model: the pytorch model we want to save :param hyperparameters: training hyperparameters :param eval_env: evaluation environment :param video_fps: how many frame per seconds to record our video replay """ _, repo_name = repo_id.split("/") api = HfApi() # Step 1: Create the repo repo_url = api.create_repo( repo_id=repo_id, exist_ok=True, ) with tempfile.TemporaryDirectory() as tmpdirname: local_directory = Path(tmpdirname) # Step 2: Save the model torch.save(model, local_directory / "model.pt") # Step 3: Save the hyperparameters to JSON with open(local_directory / "hyperparameters.json", "w") as outfile: json.dump(hyperparameters, outfile) # Step 4: Evaluate the model and build JSON mean_reward, std_reward = evaluate_agent(eval_env, hyperparameters["max_t"], hyperparameters["n_evaluation_episodes"], model) # Get datetime eval_datetime = datetime.datetime.now() eval_form_datetime = eval_datetime.isoformat() evaluate_data = { "env_id": hyperparameters["env_id"], "mean_reward": mean_reward, "n_evaluation_episodes": hyperparameters["n_evaluation_episodes"], "eval_datetime": eval_form_datetime, } # Write a JSON file with open(local_directory / "results.json", "w") as outfile: json.dump(evaluate_data, outfile) # Step 5: Create the model card env_name = hyperparameters["env_id"] metadata = {} metadata["tags"] = [ env_name, "reinforce", "reinforcement-learning", "custom-implementation", "deep-rl-class" ] # Add metrics eval = metadata_eval_result( model_pretty_name=repo_name, task_pretty_name="reinforcement-learning", task_id="reinforcement-learning", metrics_pretty_name="mean_reward", metrics_id="mean_reward", metrics_value=f"{mean_reward:.2f} +/- {std_reward:.2f}", dataset_pretty_name=env_name, dataset_id=env_name, ) # Merges both dictionaries metadata = {**metadata, **eval} model_card = f""" # **Reinforce** Agent playing **{env_id}** This is a trained model of a **Reinforce** agent playing **{env_id}** . To learn to use this model and train yours check Unit 4 of the Deep Reinforcement Learning Course: https://huggingface.co/deep-rl-course/unit4/introduction """ readme_path = local_directory / "README.md" readme = "" if readme_path.exists(): with readme_path.open("r", encoding="utf8") as f: readme = f.read() else: readme = model_card with readme_path.open("w", encoding="utf-8") as f: f.write(readme) # Save our metrics to Readme metadata metadata_save(readme_path, metadata) # Step 6: Record a video video_path = local_directory / "replay.mp4" record_video(env, model, video_path, video_fps) # Step 7. Push everything to the Hub api.upload_folder( repo_id=repo_id, folder_path=local_directory, path_in_repo=".", ) print(f"Your model is pushed to the Hub. You can view your model here: {repo_url}") ``` By using `push_to_hub`, **you evaluate, record a replay, generate a model card of your agent, and push it to the Hub**. This way: - You can **showcase our work** ๐Ÿ”ฅ - You can **visualize your agent playing** ๐Ÿ‘€ - You can **share an agent with the community that others can use** ๐Ÿ’พ - You can **access a leaderboard ๐Ÿ† to see how well your agent is performing compared to your classmates** ๐Ÿ‘‰ https://huggingface.co/spaces/huggingface-projects/Deep-Reinforcement-Learning-Leaderboard To be able to share your model with the community there are three more steps to follow: 1๏ธโƒฃ (If it's not already done) create an account to HF โžก https://huggingface.co/join 2๏ธโƒฃ Sign in and then, you need to store your authentication token from the Hugging Face website. - Create a new token (https://huggingface.co/settings/tokens) **with write role** <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/notebooks/create-token.jpg" alt="Create HF Token"> ```python notebook_login() ``` If you don't want to use Google Colab or a Jupyter Notebook, you need to use this command instead: `huggingface-cli login` (or `login`) 3๏ธโƒฃ We're now ready to push our trained agent to the ๐Ÿค— Hub ๐Ÿ”ฅ using `package_to_hub()` function ```python repo_id = "" # TODO Define your repo id {username/Reinforce-{model-id}} push_to_hub( repo_id, cartpole_policy, # The model we want to save cartpole_hyperparameters, # Hyperparameters eval_env, # Evaluation environment video_fps=30 ) ``` Now that we tested the robustness of our implementation, let's try a more complex environment: PixelCopter ๐Ÿš ## Second agent: PixelCopter ๐Ÿš ### Study the PixelCopter environment ๐Ÿ‘€ - [The Environment documentation](https://pygame-learning-environment.readthedocs.io/en/latest/user/games/pixelcopter.html) ```python env_id = "Pixelcopter-PLE-v0" env = gym.make(env_id) eval_env = gym.make(env_id) s_size = env.observation_space.shape[0] a_size = env.action_space.n ``` ```python print("_____OBSERVATION SPACE_____ \n") print("The State Space is: ", s_size) print("Sample observation", env.observation_space.sample()) # Get a random observation ``` ```python print("\n _____ACTION SPACE_____ \n") print("The Action Space is: ", a_size) print("Action Space Sample", env.action_space.sample()) # Take a random action ``` The observation space (7) ๐Ÿ‘€: - player y position - player velocity - player distance to floor - player distance to ceiling - next block x distance to player - next blocks top y location - next blocks bottom y location The action space(2) ๐ŸŽฎ: - Up (press accelerator) - Do nothing (don't press accelerator) The reward function ๐Ÿ’ฐ: - For each vertical block it passes, it gains a positive reward of +1. Each time a terminal state is reached it receives a negative reward of -1. ### Define the new Policy ๐Ÿง  - We need to have a deeper neural network since the environment is more complex ```python class Policy(nn.Module): def __init__(self, s_size, a_size, h_size): super(Policy, self).__init__() # Define the three layers here def forward(self, x): # Define the forward process here return F.softmax(x, dim=1) def act(self, state): state = torch.from_numpy(state).float().unsqueeze(0).to(device) probs = self.forward(state).cpu() m = Categorical(probs) action = m.sample() return action.item(), m.log_prob(action) ``` #### Solution ```python class Policy(nn.Module): def __init__(self, s_size, a_size, h_size): super(Policy, self).__init__() self.fc1 = nn.Linear(s_size, h_size) self.fc2 = nn.Linear(h_size, h_size * 2) self.fc3 = nn.Linear(h_size * 2, a_size) def forward(self, x): x = F.relu(self.fc1(x)) x = F.relu(self.fc2(x)) x = self.fc3(x) return F.softmax(x, dim=1) def act(self, state): state = torch.from_numpy(state).float().unsqueeze(0).to(device) probs = self.forward(state).cpu() m = Categorical(probs) action = m.sample() return action.item(), m.log_prob(action) ``` ### Define the hyperparameters โš™๏ธ - Because this environment is more complex. - Especially for the hidden size, we need more neurons. ```python pixelcopter_hyperparameters = { "h_size": 64, "n_training_episodes": 50000, "n_evaluation_episodes": 10, "max_t": 10000, "gamma": 0.99, "lr": 1e-4, "env_id": env_id, "state_space": s_size, "action_space": a_size, } ``` ### Train it - We're now ready to train our agent ๐Ÿ”ฅ. ```python # Create policy and place it to the device # torch.manual_seed(50) pixelcopter_policy = Policy( pixelcopter_hyperparameters["state_space"], pixelcopter_hyperparameters["action_space"], pixelcopter_hyperparameters["h_size"], ).to(device) pixelcopter_optimizer = optim.Adam(pixelcopter_policy.parameters(), lr=pixelcopter_hyperparameters["lr"]) ``` ```python scores = reinforce( pixelcopter_policy, pixelcopter_optimizer, pixelcopter_hyperparameters["n_training_episodes"], pixelcopter_hyperparameters["max_t"], pixelcopter_hyperparameters["gamma"], 1000, ) ``` ### Publish our trained model on the Hub ๐Ÿ”ฅ ```python repo_id = "" # TODO Define your repo id {username/Reinforce-{model-id}} push_to_hub( repo_id, pixelcopter_policy, # The model we want to save pixelcopter_hyperparameters, # Hyperparameters eval_env, # Evaluation environment video_fps=30 ) ``` ## Some additional challenges ๐Ÿ† The best way to learn **is to try things on your own**! As you saw, the current agent is not doing great. As a first suggestion, you can train for more steps. But also try to find better parameters. In the [Leaderboard](https://huggingface.co/spaces/huggingface-projects/Deep-Reinforcement-Learning-Leaderboard) you will find your agents. Can you get to the top? Here are some ideas to climb up the leaderboard: * Train more steps * Try different hyperparameters by looking at what your classmates have done ๐Ÿ‘‰ https://huggingface.co/models?other=reinforce * **Push your new trained model** on the Hub ๐Ÿ”ฅ * **Improving the implementation for more complex environments** (for instance, what about changing the network to a Convolutional Neural Network to handle frames as observation)? ________________________________________________________________________ **Congrats on finishing this unit**!ย There was a lot of information. And congrats on finishing the tutorial. You've just coded your first Deep Reinforcement Learning agent from scratch using PyTorch and shared it on the Hub ๐Ÿฅณ. Don't hesitate to iterate on this unit **by improving the implementation for more complex environments** (for instance, what about changing the network to a Convolutional Neural Network to handle frames as observation)? In the next unit, **we're going to learn more about Unity MLAgents**, by training agents in Unity environments. This way, you will be ready to participate in the **AI vs AI challenges where you'll train your agents to compete against other agents in a snowball fight and a soccer game.** Sound fun? See you next time! Finally, we would love **to hear what you think of the course and how we can improve it**. If you have some feedback then please ๐Ÿ‘‰ [fill this form](https://forms.gle/BzKXWzLAGZESGNaE9) See you in Unit 5! ๐Ÿ”ฅ ### Keep Learning, stay awesome ๐Ÿค—
deep-rl-class/units/en/unit4/hands-on.mdx/0
{ "file_path": "deep-rl-class/units/en/unit4/hands-on.mdx", "repo_id": "deep-rl-class", "token_count": 13495 }
93
# Advantage Actor-Critic (A2C) [[advantage-actor-critic]] ## Reducing variance with Actor-Critic methods The solution to reducing the variance of the Reinforce algorithm and training our agent faster and better is to use a combination of Policy-Based and Value-Based methods: *the Actor-Critic method*. To understand the Actor-Critic, imagine you're playing a video game. You can play with a friend that will provide you with some feedback. You're the Actor and your friend is the Critic. <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit8/ac.jpg" alt="Actor Critic"/> You don't know how to play at the beginning, **so you try some actions randomly**. The Critic observes your action and **provides feedback**. Learning from this feedback, **you'll update your policy and be better at playing that game.** On the other hand, your friend (Critic) will also update their way to provide feedback so it can be better next time. This is the idea behind Actor-Critic. We learn two function approximations: - *A policy* that **controls how our agent acts**: \\( \pi_{\theta}(s) \\) - *A value function* to assist the policy update by measuring how good the action taken is: \\( \hat{q}_{w}(s,a) \\) ## The Actor-Critic Process Now that we have seen the Actor Critic's big picture, let's dive deeper to understand how the Actor and Critic improve together during the training. As we saw, with Actor-Critic methods, there are two function approximations (two neural networks): - *Actor*, a **policy function** parameterized by theta: \\( \pi_{\theta}(s) \\) - *Critic*, a **value function** parameterized by w: \\( \hat{q}_{w}(s,a) \\) Let's see the training process to understand how the Actor and Critic are optimized: - At each timestep, t, we get the current state \\( S_t\\) from the environment and **pass it as input through our Actor and Critic**. - Our Policy takes the state and **outputs an action** \\( A_t \\). <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit8/step1.jpg" alt="Step 1 Actor Critic"/> - The Critic takes that action also as input and, using \\( S_t\\) and \\( A_t \\), **computes the value of taking that action at that state: the Q-value**. <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit8/step2.jpg" alt="Step 2 Actor Critic"/> - The action \\( A_t\\) performed in the environment outputs a new state \\( S_{t+1}\\) and a reward \\( R_{t+1} \\) . <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit8/step3.jpg" alt="Step 3 Actor Critic"/> - The Actor updates its policy parameters using the Q value. <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit8/step4.jpg" alt="Step 4 Actor Critic"/> - Thanks to its updated parameters, the Actor produces the next action to take at \\( A_{t+1} \\) given the new state \\( S_{t+1} \\). - The Critic then updates its value parameters. <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit8/step5.jpg" alt="Step 5 Actor Critic"/> ## Adding Advantage in Actor-Critic (A2C) We can stabilize learning further by **using the Advantage function as Critic instead of the Action value function**. The idea is that the Advantage function calculates the relative advantage of an action compared to the others possible at a state: **how taking that action at a state is better compared to the average value of the state**. It's subtracting the mean value of the state from the state action pair: <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit8/advantage1.jpg" alt="Advantage Function"/> In other words, this function calculates **the extra reward we get if we take this action at that state compared to the mean reward we get at that state**. The extra reward is what's beyond the expected value of that state. - If A(s,a) > 0: our gradient is **pushed in that direction**. - If A(s,a) < 0 (our action does worse than the average value of that state), **our gradient is pushed in the opposite direction**. The problem with implementing this advantage function is that it requires two value functions โ€” \\( Q(s,a)\\) and \\( V(s)\\). Fortunately, **we can use the TD error as a good estimator of the advantage function.** <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit8/advantage2.jpg" alt="Advantage Function"/>
deep-rl-class/units/en/unit6/advantage-actor-critic.mdx/0
{ "file_path": "deep-rl-class/units/en/unit6/advantage-actor-critic.mdx", "repo_id": "deep-rl-class", "token_count": 1384 }
94
# Conclusion That's all for today. Congrats on finishing this Unit and the tutorial! โญ๏ธ Now that you've successfully trained your Doom agent, why not try deathmatch? Remember, that's a much more complex level than the one you've just trained, **but it's a nice experiment and I advise you to try it.** If you do it, don't hesitate to share your model in the `#rl-i-made-this` channel in our [discord server](https://www.hf.co/join/discord). This concludes the last unit, but we are not finished yet! ๐Ÿค— The following **bonus unit includes some of the most interesting, advanced, and cutting edge work in Deep Reinforcement Learning**. See you next time ๐Ÿ”ฅ ## Keep Learning, Stay awesome ๐Ÿค—
deep-rl-class/units/en/unit8/conclusion-sf.mdx/0
{ "file_path": "deep-rl-class/units/en/unit8/conclusion-sf.mdx", "repo_id": "deep-rl-class", "token_count": 188 }
95
# (Automatic) Curriculum Learning for RL While most of the RL methods seen in this course work well in practice, there are some cases where using them alone fails. This can happen, for instance, when: - the task to learn is hard and requires an **incremental acquisition of skills** (for instance when one wants to make a bipedal agent learn to go through hard obstacles, it must first learn to stand, then walk, then maybe jumpโ€ฆ) - there are variations in the environment (that affect the difficulty) and one wants its agent to be **robust** to them <figure> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit9/bipedal.gif" alt="Bipedal"/> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit9/movable_creepers.gif" alt="Movable creepers"/> <figcaption> <a href="https://developmentalsystems.org/TeachMyAgent/">TeachMyAgent</a> </figcaption> </figure> In such cases, it seems needed to propose different tasks to our RL agent and organize them such that the agent progressively acquires skills. This approach is called **Curriculum Learning** and usually implies a hand-designed curriculum (or set of tasks organized in a specific order). In practice, one can, for instance, control the generation of the environment, the initial states, or use Self-Play and control the level of opponents proposed to the RL agent. As designing such a curriculum is not always trivial, the field of **Automatic Curriculum Learning (ACL) proposes to design approaches that learn to create such an organization of tasks in order to maximize the RL agentโ€™s performances**. Portelas et al. proposed to define ACL as: > โ€ฆ a family of mechanisms that automatically adapt the distribution of training data by learning to adjust the selection of learning situations to the capabilities of RL agents. > As an example, OpenAI used **Domain Randomization** (they applied random variations on the environment) to make a robot hand solve Rubikโ€™s Cubes. <figure> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit9/dr.jpg" alt="Dr"/> <figcaption> <a href="https://openai.com/blog/solving-rubiks-cube/">OpenAI - Solving Rubikโ€™s Cube with a Robot Hand</a></figcaption> </figure> Finally, you can play with the robustness of agents trained in the <a href="https://huggingface.co/spaces/flowers-team/Interactive_DeepRL_Demo">TeachMyAgent</a> benchmark by controlling environment variations or even drawing the terrain ๐Ÿ‘‡ <figure> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit9/demo.png" alt="Demo"/> <figcaption> <a href="https://huggingface.co/spaces/flowers-team/Interactive_DeepRL_Demo">https://huggingface.co/spaces/flowers-team/Interactive_DeepRL_Demo</a></figcaption> </figure> ## Further reading For more information, we recommend that you check out the following resources: ### Overview of the field - [Automatic Curriculum Learning For Deep RL: A Short Survey](https://arxiv.org/pdf/2003.04664.pdf) - [Curriculum for Reinforcement Learning](https://lilianweng.github.io/posts/2020-01-29-curriculum-rl/) ### Recent methods - [Evolving Curricula with Regret-Based Environment Design](https://arxiv.org/abs/2203.01302) - [Curriculum Reinforcement Learning via Constrained Optimal Transport](https://proceedings.mlr.press/v162/klink22a.html) - [Prioritized Level Replay](https://arxiv.org/abs/2010.03934) ## Author This section was written by <a href="https://twitter.com/ClementRomac"> Clรฉment Romac </a>
deep-rl-class/units/en/unitbonus3/curriculum-learning.mdx/0
{ "file_path": "deep-rl-class/units/en/unitbonus3/curriculum-learning.mdx", "repo_id": "deep-rl-class", "token_count": 1058 }
96
# Introduction: <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit13/thumbnail.png" alt="Unit bonus 4 thumbnail"/> Welcome to this bonus unit, where you will **train a robot agent to complete a mini-game level using imitation learning.** At the end of the unit, **you will have a trained agent capable of solving the level as in the video**: <video src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit13/onnx_inference_test.mp4" type="video/mp4" controls autoplay loop mute /> ## Objectives: - Learn how to use imitation learning with Godot RL Agents by training an agent to complete a mini-game environment using human-recorded expert demonstrations. ## Prerequisites and requirements: - It is recommended that you complete the previous chapter ([Godot RL Agents](https://huggingface.co/learn/deep-rl-course/unitbonus3/godotrl)) before starting this tutorial, - Some familiarity with Godot is recommended, although completing the tutorial does not require any gdscript coding knowledge, - Godot with .NET support (tested to work with [4.3.dev5 .NET](https://godotengine.org/article/dev-snapshot-godot-4-3-dev-5/), may work with newer versions too), - Godot RL Agents (you can use `pip install godot-rl` in the venv/conda env), - [Imitation library](https://huggingface.co/learn/deep-rl-course/unitbonus5/train-our-robot), - Time: ~1-2 hours to complete the project and training. It can be outside of this range depending on the hardware used.
deep-rl-class/units/en/unitbonus5/introduction.mdx/0
{ "file_path": "deep-rl-class/units/en/unitbonus5/introduction.mdx", "repo_id": "deep-rl-class", "token_count": 454 }
97
cff-version: 1.2.0 title: 'Diffusers: State-of-the-art diffusion models' message: >- If you use this software, please cite it using the metadata from this file. type: software authors: - given-names: Patrick family-names: von Platen - given-names: Suraj family-names: Patil - given-names: Anton family-names: Lozhkov - given-names: Pedro family-names: Cuenca - given-names: Nathan family-names: Lambert - given-names: Kashif family-names: Rasul - given-names: Mishig family-names: Davaadorj - given-names: Dhruv family-names: Nair - given-names: Sayak family-names: Paul - given-names: Steven family-names: Liu - given-names: William family-names: Berman - given-names: Yiyi family-names: Xu - given-names: Thomas family-names: Wolf repository-code: 'https://github.com/huggingface/diffusers' abstract: >- Diffusers provides pretrained diffusion models across multiple modalities, such as vision and audio, and serves as a modular toolbox for inference and training of diffusion models. keywords: - deep-learning - pytorch - image-generation - hacktoberfest - diffusion - text2image - image2image - score-based-generative-modeling - stable-diffusion - stable-diffusion-diffusers license: Apache-2.0 version: 0.12.1
diffusers/CITATION.cff/0
{ "file_path": "diffusers/CITATION.cff", "repo_id": "diffusers", "token_count": 460 }
98