text
stringlengths
96
319k
id
stringlengths
14
178
metadata
dict
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless r...
transformers/tests/models/sew_d/test_modeling_sew_d.py/0
{ "file_path": "transformers/tests/models/sew_d/test_modeling_sew_d.py", "repo_id": "transformers", "token_count": 10675 }
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless r...
transformers/tests/models/speecht5/test_modeling_speecht5.py/0
{ "file_path": "transformers/tests/models/speecht5/test_modeling_speecht5.py", "repo_id": "transformers", "token_count": 36221 }
# coding=utf-8 # Copyright 2022 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or ag...
transformers/tests/models/videomae/test_image_processing_videomae.py/0
{ "file_path": "transformers/tests/models/videomae/test_image_processing_videomae.py", "repo_id": "transformers", "token_count": 3761 }
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or ag...
transformers/tests/models/vitmatte/test_image_processing_vitmatte.py/0
{ "file_path": "transformers/tests/models/vitmatte/test_image_processing_vitmatte.py", "repo_id": "transformers", "token_count": 3654 }
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless r...
transformers/tests/models/wav2vec2/test_modeling_tf_wav2vec2.py/0
{ "file_path": "transformers/tests/models/wav2vec2/test_modeling_tf_wav2vec2.py", "repo_id": "transformers", "token_count": 17509 }
# coding=utf-8 # Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless requir...
transformers/tests/models/xlm/test_tokenization_xlm.py/0
{ "file_path": "transformers/tests/models/xlm/test_tokenization_xlm.py", "repo_id": "transformers", "token_count": 1536 }
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless r...
transformers/tests/models/yolos/test_modeling_yolos.py/0
{ "file_path": "transformers/tests/models/yolos/test_modeling_yolos.py", "repo_id": "transformers", "token_count": 6807 }
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicabl...
transformers/tests/pipelines/test_pipelines_table_question_answering.py/0
{ "file_path": "transformers/tests/pipelines/test_pipelines_table_question_answering.py", "repo_id": "transformers", "token_count": 14916 }
# coding=utf-8 # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless requir...
transformers/tests/quantization/autoawq/test_awq.py/0
{ "file_path": "transformers/tests/quantization/autoawq/test_awq.py", "repo_id": "transformers", "token_count": 8896 }
# coding=utf-8 # Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless requir...
transformers/tests/quantization/ggml/test_ggml.py/0
{ "file_path": "transformers/tests/quantization/ggml/test_ggml.py", "repo_id": "transformers", "token_count": 17906 }
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable...
transformers/tests/repo_utils/test_get_test_info.py/0
{ "file_path": "transformers/tests/repo_utils/test_get_test_info.py", "repo_id": "transformers", "token_count": 2131 }
# coding=utf-8 # Copyright 2021 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or ag...
transformers/tests/test_feature_extraction_common.py/0
{ "file_path": "transformers/tests/test_feature_extraction_common.py", "repo_id": "transformers", "token_count": 828 }
# coding=utf-8 # Copyright 2018 the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable...
transformers/tests/trainer/test_trainer.py/0
{ "file_path": "transformers/tests/trainer/test_trainer.py", "repo_id": "transformers", "token_count": 110809 }
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicabl...
transformers/tests/utils/test_backbone_utils.py/0
{ "file_path": "transformers/tests/utils/test_backbone_utils.py", "repo_id": "transformers", "token_count": 5009 }
import os import unittest from pathlib import Path from transformers.utils.import_utils import define_import_structure, spread_import_structure import_structures = Path("import_structures") def fetch__all__(file_content): """ Returns the content of the __all__ variable in the file content. Returns None...
transformers/tests/utils/test_import_structure.py/0
{ "file_path": "transformers/tests/utils/test_import_structure.py", "repo_id": "transformers", "token_count": 2227 }
#!/usr/bin/env python # coding=utf-8 # Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/L...
transformers/utils/check_bad_commit.py/0
{ "file_path": "transformers/utils/check_bad_commit.py", "repo_id": "transformers", "token_count": 2747 }
# coding=utf-8 # Copyright 2020 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable...
transformers/utils/check_tf_ops.py/0
{ "file_path": "transformers/utils/check_tf_ops.py", "repo_id": "transformers", "token_count": 1302 }
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicabl...
transformers/utils/notification_service.py/0
{ "file_path": "transformers/utils/notification_service.py", "repo_id": "transformers", "token_count": 25355 }
#!/bin/bash # This script runs an SFT example end-to-end on a tiny model using different possible configurations # but defaults to QLoRA + PEFT OUTPUT_DIR="test_sft/" MODEL_NAME="trl-internal-testing/tiny-Qwen2ForCausalLM-2.5" DATASET_NAME="stanfordnlp/imdb" MAX_STEPS=5 BATCH_SIZE=2 SEQ_LEN=128 # Handle extra argumen...
trl/commands/run_sft.sh/0
{ "file_path": "trl/commands/run_sft.sh", "repo_id": "trl", "token_count": 626 }
# Detoxifying a Language Model using PPO Language models (LMs) are known to sometimes generate toxic outputs. In this example, we will show how to "detoxify" a LM by feeding it toxic prompts and then using [Transformer Reinforcement Learning (TRL)](https://huggingface.co/docs/trl/index) and Proximal Policy Optimizatio...
trl/docs/source/detoxifying_a_lm.md/0
{ "file_path": "trl/docs/source/detoxifying_a_lm.md", "repo_id": "trl", "token_count": 3789 }
# Use model after training Once you have trained a model using either the SFTTrainer, PPOTrainer, or DPOTrainer, you will have a fine-tuned model that can be used for text generation. In this section, we'll walk through the process of loading the fine-tuned model and generating text. If you need to run an inference se...
trl/docs/source/use_model.md/0
{ "file_path": "trl/docs/source/use_model.md", "repo_id": "trl", "token_count": 778 }
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicabl...
trl/examples/datasets/rlaif-v.py/0
{ "file_path": "trl/examples/datasets/rlaif-v.py", "repo_id": "trl", "token_count": 1594 }
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicabl...
trl/examples/research_projects/stack_llama_2/scripts/dpo_llama2.py/0
{ "file_path": "trl/examples/research_projects/stack_llama_2/scripts/dpo_llama2.py", "repo_id": "trl", "token_count": 3977 }
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicabl...
trl/examples/scripts/dpo_vlm.py/0
{ "file_path": "trl/examples/scripts/dpo_vlm.py", "repo_id": "trl", "token_count": 1983 }
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicabl...
trl/examples/scripts/xpo.py/0
{ "file_path": "trl/examples/scripts/xpo.py", "repo_id": "trl", "token_count": 1865 }
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicabl...
trl/tests/test_bco_trainer.py/0
{ "file_path": "trl/tests/test_bco_trainer.py", "repo_id": "trl", "token_count": 8319 }
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicabl...
trl/tests/test_iterative_sft_trainer.py/0
{ "file_path": "trl/tests/test_iterative_sft_trainer.py", "repo_id": "trl", "token_count": 2179 }
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicabl...
trl/tests/test_utils.py/0
{ "file_path": "trl/tests/test_utils.py", "repo_id": "trl", "token_count": 9869 }
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicabl...
trl/trl/models/auxiliary_modules.py/0
{ "file_path": "trl/trl/models/auxiliary_modules.py", "repo_id": "trl", "token_count": 1377 }
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicabl...
trl/trl/trainer/alignprop_config.py/0
{ "file_path": "trl/trl/trainer/alignprop_config.py", "repo_id": "trl", "token_count": 3895 }
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicabl...
trl/trl/trainer/judges.py/0
{ "file_path": "trl/trl/trainer/judges.py", "repo_id": "trl", "token_count": 7488 }
# Copyright 2025 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicabl...
trl/trl/trainer/rloo_config.py/0
{ "file_path": "trl/trl/trainer/rloo_config.py", "repo_id": "trl", "token_count": 1717 }
# Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by appl...
accelerate/benchmarks/fp8/ms_amp/ddp.py/0
{ "file_path": "accelerate/benchmarks/fp8/ms_amp/ddp.py", "repo_id": "accelerate", "token_count": 1938 }
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed...
accelerate/docs/source/concept_guides/performance.md/0
{ "file_path": "accelerate/docs/source/concept_guides/performance.md", "repo_id": "accelerate", "token_count": 1476 }
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed...
accelerate/docs/source/usage_guides/local_sgd.md/0
{ "file_path": "accelerate/docs/source/usage_guides/local_sgd.md", "repo_id": "accelerate", "token_count": 1475 }
# This config template simply setups up the TransformersEngine config (and a config for a single GPU), # this can interop with the other configs in this folder distributed_type: "NO" mixed_precision: "fp8" # Then we specify the fp8 configuration: fp8_config: backend: TE # Can be TE | MS-AMP # The following are TE s...
accelerate/examples/config_yaml_templates/fp8.yaml/0
{ "file_path": "accelerate/examples/config_yaml_templates/fp8.yaml", "repo_id": "accelerate", "token_count": 239 }
# Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by appl...
accelerate/examples/inference/distributed/phi2.py/0
{ "file_path": "accelerate/examples/inference/distributed/phi2.py", "repo_id": "accelerate", "token_count": 1161 }
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicabl...
accelerate/manim_animations/big_model_inference/stage_1.py/0
{ "file_path": "accelerate/manim_animations/big_model_inference/stage_1.py", "repo_id": "accelerate", "token_count": 1904 }
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicabl...
accelerate/src/accelerate/accelerator.py/0
{ "file_path": "accelerate/src/accelerate/accelerator.py", "repo_id": "accelerate", "token_count": 72627 }
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicabl...
accelerate/src/accelerate/test_utils/scripts/test_ddp_comm_hook.py/0
{ "file_path": "accelerate/src/accelerate/test_utils/scripts/test_ddp_comm_hook.py", "repo_id": "accelerate", "token_count": 1232 }
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicabl...
accelerate/src/accelerate/utils/fsdp_utils.py/0
{ "file_path": "accelerate/src/accelerate/utils/fsdp_utils.py", "repo_id": "accelerate", "token_count": 7632 }
{ "fp16": { "enabled": "auto", "loss_scale": 0, "loss_scale_window": 1000, "initial_scale_power": 16, "hysteresis": 2, "min_loss_scale": 1 }, "bf16": { "enabled": "auto" }, "zero_optimization": { "stage": 2, "offload_optimizer":...
accelerate/tests/deepspeed/ds_config_zero2_model_only.json/0
{ "file_path": "accelerate/tests/deepspeed/ds_config_zero2_model_only.json", "repo_id": "accelerate", "token_count": 427 }
compute_environment: LOCAL_MACHINE deepspeed_config: {} distributed_type: 'NO' downcast_bf16: 'no' fsdp_config: {} gpu_ids: all machine_rank: 0 main_process_ip: null main_process_port: null main_training_function: main megatron_lm_config: {} mixed_precision: 'no' num_machines: 1 num_processes: 1 rdzv_backend: static sa...
accelerate/tests/test_configs/latest.yaml/0
{ "file_path": "accelerate/tests/test_configs/latest.yaml", "repo_id": "accelerate", "token_count": 186 }
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicabl...
accelerate/tests/test_sagemaker.py/0
{ "file_path": "accelerate/tests/test_sagemaker.py", "repo_id": "accelerate", "token_count": 1007 }
# Using the hub Install the [`hf-hub`](https://github.com/huggingface/hf-hub) crate: ```bash cargo add hf-hub ``` Then let's start by downloading the [model file](https://huggingface.co/bert-base-uncased/tree/main). ```rust # extern crate candle_core; # extern crate hf_hub; use hf_hub::api::sync::Api; use candle_c...
candle/candle-book/src/inference/hub.md/0
{ "file_path": "candle/candle-book/src/inference/hub.md", "repo_id": "candle", "token_count": 1098 }
pub(crate) mod affine; pub(crate) mod conv_transpose2d; pub(crate) mod matmul; pub(crate) mod qmatmul; pub(crate) mod random; pub(crate) mod reduce; pub(crate) mod unary; pub(crate) mod where_cond; use candle_core::{Device, Result}; pub(crate) trait BenchDevice { fn sync(&self) -> Result<()>; fn bench_name<S...
candle/candle-core/benches/benchmarks/mod.rs/0
{ "file_path": "candle/candle-core/benches/benchmarks/mod.rs", "repo_id": "candle", "token_count": 1064 }
#![allow(clippy::excessive_precision)] // Code taken from https://github.com/statrs-dev/statrs //! Provides the [error](https://en.wikipedia.org/wiki/Error_function) and //! related functions mod evaluate { //! Provides functions that don't have a numerical solution and must //! be solved computationally (e.g....
candle/candle-core/src/cpu/erf.rs/0
{ "file_path": "candle/candle-core/src/cpu/erf.rs", "repo_id": "candle", "token_count": 11974 }
//! Implementation of the Cuda backend when Cuda support has not been compiled in. //! #![allow(dead_code)] use crate::op::{BinaryOpT, CmpOp, ReduceOp, UnaryOpT}; use crate::{CpuStorage, DType, Error, Layout, Result, Shape}; #[derive(Debug, Clone)] pub struct CudaDevice; #[derive(Debug)] pub struct CudaStorage; macr...
candle/candle-core/src/dummy_cuda_backend.rs/0
{ "file_path": "candle/candle-core/src/dummy_cuda_backend.rs", "repo_id": "candle", "token_count": 3405 }
//! Support for the GGML file format. use super::{k_quants, GgmlDType, QStorage}; use crate::{Device, Result}; use byteorder::{LittleEndian, ReadBytesExt}; use std::collections::HashMap; // https://github.com/ggerganov/llama.cpp/blob/468ea24fb4633a0d681f7ac84089566c1c6190cb/llama.h#L37 #[derive(Debug, Clone, Copy, Pa...
candle/candle-core/src/quantized/ggml_file.rs/0
{ "file_path": "candle/candle-core/src/quantized/ggml_file.rs", "repo_id": "candle", "token_count": 4584 }
use crate::{shape::Dim, Context, Error, Result, Shape, Tensor}; impl Tensor { /// Concatenates two or more tensors along a particular dimension. /// /// All tensors must of the same rank, and the output will have /// the same rank /// /// ```rust /// # use candle_core::{Tensor, DType, Devic...
candle/candle-core/src/tensor_cat.rs/0
{ "file_path": "candle/candle-core/src/tensor_cat.rs", "repo_id": "candle", "token_count": 6380 }
use candle_core::{ bail, quantized::{self, GgmlDType}, test_device, test_utils::to_vec2_round, DType, Device, IndexOp, Module, Result, Tensor, }; use quantized::{k_quants, GgmlType}; use rand::prelude::*; const GGML_TEST_SIZE: usize = 32 * 128; const GGML_MAX_QUANTIZATION_TOTAL_ERROR: f32 = 0.002;...
candle/candle-core/tests/quantized_tests.rs/0
{ "file_path": "candle/candle-core/tests/quantized_tests.rs", "repo_id": "candle", "token_count": 21659 }
use candle::Tensor; pub struct Dataset { pub train_images: Tensor, pub train_labels: Tensor, pub test_images: Tensor, pub test_labels: Tensor, pub labels: usize, } pub mod cifar; pub mod mnist;
candle/candle-datasets/src/vision/mod.rs/0
{ "file_path": "candle/candle-datasets/src/vision/mod.rs", "repo_id": "candle", "token_count": 92 }
# candle-clip Contrastive Language-Image Pre-Training (CLIP) is an architecture trained on pairs of images with related texts. https://github.com/openai/CLIP https://github.com/huggingface/transformers/tree/f6fa0f0bf0796ac66f201f23bdb8585de1609add/src/transformers/models/clip ## Running on an example on cpu ``` $ ...
candle/candle-examples/examples/clip/README.md/0
{ "file_path": "candle/candle-examples/examples/clip/README.md", "repo_id": "candle", "token_count": 623 }
use enterpolation::linear::ConstEquidistantLinear; use enterpolation::Generator; use palette::LinSrgb; use candle::Tensor; pub struct SpectralRColormap { gradient: ConstEquidistantLinear<f32, LinSrgb, 9>, } impl SpectralRColormap { pub(crate) fn new() -> Self { // Define a colormap similar to 'Spectr...
candle/candle-examples/examples/depth_anything_v2/color_map.rs/0
{ "file_path": "candle/candle-examples/examples/depth_anything_v2/color_map.rs", "repo_id": "candle", "token_count": 896 }
//! EVA-02: Explore the limits of Visual representation at scAle //! https://github.com/baaivision/EVA #[cfg(feature = "mkl")] extern crate intel_mkl_src; #[cfg(feature = "accelerate")] extern crate accelerate_src; use clap::Parser; use candle::{DType, Device, IndexOp, Result, Tensor, D}; use candle_nn::{Module, Va...
candle/candle-examples/examples/eva2/main.rs/0
{ "file_path": "candle/candle-examples/examples/eva2/main.rs", "repo_id": "candle", "token_count": 1221 }
#[cfg(feature = "mkl")] extern crate intel_mkl_src; #[cfg(feature = "accelerate")] extern crate accelerate_src; use anyhow::{Error as E, Result}; use clap::Parser; use candle_transformers::models::qwen2::{Config, Model}; use candle::{DType, Tensor}; use candle_nn::VarBuilder; use hf_hub::{api::sync::Api, Repo, Repo...
candle/candle-examples/examples/gte-qwen/main.rs/0
{ "file_path": "candle/candle-examples/examples/gte-qwen/main.rs", "repo_id": "candle", "token_count": 2613 }
# candle-llava LLaVA (Large Language-and-Vision Assistant) is an end-to-end trained large multimodal model. This example is from [candle-llava](https://github.com/chenwanqq/candle-llava) The code is based on [https://github.com/haotian-liu/LLaVA](https://github.com/haotian-liu/LLaVA), Hence the llava-hf version of co...
candle/candle-examples/examples/llava/readme.md/0
{ "file_path": "candle/candle-examples/examples/llava/readme.md", "repo_id": "candle", "token_count": 671 }
# candle-mixtral: 8x7b LLM using a sparse mixture of experts. Mixtral-8x7B-v0.1 is a pretrained generative LLM with 56 billion parameters. - [Blog post](https://mistral.ai/news/mixtral-of-experts/) from Mistral announcing the model release. - [Model card](https://huggingface.co/mistralai/Mixtral-8x7B-v0.1) on the Hu...
candle/candle-examples/examples/mixtral/README.md/0
{ "file_path": "candle/candle-examples/examples/mixtral/README.md", "repo_id": "candle", "token_count": 322 }
#[cfg(feature = "mkl")] extern crate intel_mkl_src; #[cfg(feature = "accelerate")] extern crate accelerate_src; use anyhow::{Error as E, Result}; use candle::{DType, IndexOp, Shape, Tensor, D}; use candle_nn::VarBuilder; use candle_transformers::models::nvembed_v2::model::Model; use clap::Parser; use hf_hub::{api::sy...
candle/candle-examples/examples/nvembed_v2/main.rs/0
{ "file_path": "candle/candle-examples/examples/nvembed_v2/main.rs", "repo_id": "candle", "token_count": 3339 }
# candle-quantized-qwen2-instruct [Qwen2]((https://qwenlm.github.io/blog/qwen2/)) is an upgraded version of Qwen1.5, released by Alibaba Cloud. ## Running the example ```bash cargo run --example quantized-qwen2-instruct --release -- --prompt "Write a function to count prime numbers up to N." ``` 0.5b, 1.5b, 7b and ...
candle/candle-examples/examples/quantized-qwen2-instruct/README.md/0
{ "file_path": "candle/candle-examples/examples/quantized-qwen2-instruct/README.md", "repo_id": "candle", "token_count": 129 }
#[cfg(feature = "mkl")] extern crate intel_mkl_src; #[cfg(feature = "accelerate")] extern crate accelerate_src; use candle::Result; use clap::{Parser, Subcommand}; mod gym_env; mod vec_gym_env; mod ddpg; mod dqn; mod policy_gradient; #[derive(Parser)] struct Args { #[command(subcommand)] command: Command, ...
candle/candle-examples/examples/reinforcement-learning/main.rs/0
{ "file_path": "candle/candle-examples/examples/reinforcement-learning/main.rs", "repo_id": "candle", "token_count": 277 }
# candle-stable-diffusion: A Diffusers API in Rust/Candle ![rusty robot holding a candle](./assets/stable-diffusion-xl.jpg) _A rusty robot holding a fire torch in its hand_, generated by Stable Diffusion XL using Rust and [candle](https://github.com/huggingface/candle). The `stable-diffusion` example is a conversion...
candle/candle-examples/examples/stable-diffusion/README.md/0
{ "file_path": "candle/candle-examples/examples/stable-diffusion/README.md", "repo_id": "candle", "token_count": 935 }
#[cfg(feature = "mkl")] extern crate intel_mkl_src; #[cfg(feature = "accelerate")] extern crate accelerate_src; use candle::{DType, IndexOp, D}; use candle_nn::{ModuleT, VarBuilder}; use candle_transformers::models::vgg::{Models, Vgg}; use clap::{Parser, ValueEnum}; #[derive(Clone, Copy, Debug, ValueEnum)] enum Whic...
candle/candle-examples/examples/vgg/main.rs/0
{ "file_path": "candle/candle-examples/examples/vgg/main.rs", "repo_id": "candle", "token_count": 967 }
use std::path::PathBuf; use anyhow::{Error as E, Result}; use candle::{Device, Tensor}; use candle_nn::VarBuilder; use candle_transformers::models::xlm_roberta::{ Config, XLMRobertaForMaskedLM, XLMRobertaForSequenceClassification, }; use clap::{Parser, ValueEnum}; use hf_hub::{api::sync::Api, Repo, RepoType}; use ...
candle/candle-examples/examples/xlm-roberta/main.rs/0
{ "file_path": "candle/candle-examples/examples/xlm-roberta/main.rs", "repo_id": "candle", "token_count": 4653 }
/****************************************************************************** * Copyright (c) 2024, Tri Dao. ******************************************************************************/ #pragma once #include <tuple> #include <cstdio> #if !defined(__CUDACC_RTC__) #include "cuda_runtime.h" #endif #define CHECK...
candle/candle-flash-attn/kernels/hardware_info.h/0
{ "file_path": "candle/candle-flash-attn/kernels/hardware_info.h", "repo_id": "candle", "token_count": 854 }
fn main() { println!("cargo:rerun-if-changed=build.rs"); println!("cargo:rerun-if-changed=src/compatibility.cuh"); println!("cargo:rerun-if-changed=src/cuda_utils.cuh"); println!("cargo:rerun-if-changed=src/binary_op_macros.cuh"); let builder = bindgen_cuda::Builder::default(); println!("cargo:...
candle/candle-kernels/build.rs/0
{ "file_path": "candle/candle-kernels/build.rs", "repo_id": "candle", "token_count": 177 }
[package] name = "candle-metal-kernels" version = "0.8.2" edition = "2021" description = "Metal kernels for Candle" repository = "https://github.com/huggingface/candle" keywords = ["blas", "tensor", "machine-learning"] categories = ["science"] license = "MIT OR Apache-2.0" [dependencies] metal = { version = "0.27.0"...
candle/candle-metal-kernels/Cargo.toml/0
{ "file_path": "candle/candle-metal-kernels/Cargo.toml", "repo_id": "candle", "token_count": 259 }
// Updated from MLX commit has f70764a #include <metal_stdlib> #include <metal_simdgroup> using namespace metal; // ============ "mlx/backend/metal/kernels/scaled_dot_product_attention_params.h" struct MLXFastAttentionParams { const int M; const int N; const int K; const int ldq; // ldq == ldo const int ...
candle/candle-metal-kernels/src/scaled_dot_product_attention.metal/0
{ "file_path": "candle/candle-metal-kernels/src/scaled_dot_product_attention.metal", "repo_id": "candle", "token_count": 21797 }
use crate::benchmarks::{BenchDevice, BenchDeviceHandler}; use candle::{DType, Device, Module, Tensor}; use candle_nn::LayerNorm; use criterion::{black_box, criterion_group, Criterion}; use std::time::Instant; fn run(input: &Tensor, weight: &Tensor, bias: &Tensor) { let _ = LayerNorm::new(weight.clone(), bias.clone...
candle/candle-nn/benches/benchmarks/layer_norm.rs/0
{ "file_path": "candle/candle-nn/benches/benchmarks/layer_norm.rs", "repo_id": "candle", "token_count": 676 }
//! Linear layer //! //! This layer applies a linear transformation to the incoming data, `y = x@w.t() + b`. //! The bias is optional. The `forward` method can be used to apply the layer, it supports input //! with a batch dimension (so of shape `(b_sz, in_c)`) or without (of shape `(in_c,)`), the //! output has shape ...
candle/candle-nn/src/linear.rs/0
{ "file_path": "candle/candle-nn/src/linear.rs", "repo_id": "candle", "token_count": 1252 }
#[cfg(feature = "mkl")] extern crate intel_mkl_src; #[cfg(feature = "accelerate")] extern crate accelerate_src; use candle::test_utils::{to_vec0_round, to_vec2_round}; use anyhow::Result; use candle::{DType, Device, Tensor, Var}; use candle_nn::{AdamW, Linear, Module, Optimizer, ParamsAdamW, SGD}; #[test] fn sgd_op...
candle/candle-nn/tests/optim.rs/0
{ "file_path": "candle/candle-nn/tests/optim.rs", "repo_id": "candle", "token_count": 2568 }
from candle.utils import load_safetensors, save_gguf, load_gguf from candle.models.bert import BertModel, Config import json from candle import Tensor from tqdm import tqdm from dataclasses import fields import os import time from huggingface_hub import hf_hub_download from transformers import BertTokenizer, AutoModel...
candle/candle-pyo3/e5.py/0
{ "file_path": "candle/candle-pyo3/e5.py", "repo_id": "candle", "token_count": 1778 }
import candle from candle import Tensor _UNSIGNED_DTYPES = set([str(candle.u8), str(candle.u32)]) def _assert_tensor_metadata( actual: Tensor, expected: Tensor, check_device: bool = True, check_dtype: bool = True, check_layout: bool = True, check_stride: bool = False, ): if check_device:...
candle/candle-pyo3/py_src/candle/testing/__init__.py/0
{ "file_path": "candle/candle-pyo3/py_src/candle/testing/__init__.py", "repo_id": "candle", "token_count": 854 }
import candle from candle import Tensor from candle.testing import assert_equal, assert_almost_equal import pytest @pytest.mark.parametrize("dtype", [candle.f32, candle.f64, candle.f16, candle.u32, candle.u8, candle.i64]) def test_assert_equal_asserts_correctly(dtype: candle.DType): a = Tensor([1, 2, 3]).to(dtype...
candle/candle-pyo3/tests/bindings/test_testing.py/0
{ "file_path": "candle/candle-pyo3/tests/bindings/test_testing.py", "repo_id": "candle", "token_count": 476 }
//! Chinese contrastive Language-Image Pre-Training //! //! Chinese contrastive Language-Image Pre-Training (CLIP) is an architecture trained on //! pairs of images with related texts. //! //! - 💻 [Chinese-CLIP](https://github.com/OFA-Sys/Chinese-CLIP) //! - 💻 [HF](https://github.com/huggingface/transformers/blob/5af...
candle/candle-transformers/src/models/chinese_clip/text_model.rs/0
{ "file_path": "candle/candle-transformers/src/models/chinese_clip/text_model.rs", "repo_id": "candle", "token_count": 8950 }
//! EfficientViT (MSRA) inference implementation based on timm. //! //! This crate provides an implementation of the EfficientViT model from Microsoft Research Asia //! for efficient image classification. The model uses cascaded group attention modules //! to achieve strong performance while maintaining low memory usag...
candle/candle-transformers/src/models/efficientvit.rs/0
{ "file_path": "candle/candle-transformers/src/models/efficientvit.rs", "repo_id": "candle", "token_count": 7414 }
//! # JinaBERT inference implementation //! //! Based on implementation from huggingface for Jina BERT and its variants //! //! See: [Jina Embeddings on HuggingFace](https://huggingface.co/jinaai/jina-embeddings-v2-base-en) use super::with_tracing::{linear, linear_no_bias, Embedding, Linear}; use candle::{DType, Devic...
candle/candle-transformers/src/models/jina_bert.rs/0
{ "file_path": "candle/candle-transformers/src/models/jina_bert.rs", "repo_id": "candle", "token_count": 6364 }
//! NV-Embed-v2 //! //! NV-Embed-v2 is a text embedding model that combines a Mistral decoder with a latent attention mechanism to produce high-quality text embeddings. //! //! This implementation is based on the [paper](https://arxiv.org/pdf/2405.17428) and [weights](https://huggingface.co/nvidia/NV-Embed-v2) //! //! ...
candle/candle-transformers/src/models/nvembed_v2/mod.rs/0
{ "file_path": "candle/candle-transformers/src/models/nvembed_v2/mod.rs", "repo_id": "candle", "token_count": 211 }
//! Quantized Llama2 model implementation. //! //! This provides an 8-bit quantized implementation of Meta's LLaMA2 language model //! for reduced memory usage and faster inference. //! //! Key characteristics: //! - Decoder-only transformer architecture //! - RoPE position embeddings //! - Grouped Query Attention //! ...
candle/candle-transformers/src/models/quantized_llama2_c.rs/0
{ "file_path": "candle/candle-transformers/src/models/quantized_llama2_c.rs", "repo_id": "candle", "token_count": 4607 }
//! Recurrent Gemma model implementation //! //! Recurrent Gemma is a version of the Gemma language model that incorporates recurrent memory. //! This allows the model to maintain state between predictions and have longer-range memory. //! //! Key characteristics: //! - Real-gated linear recurrent units (RGLRU) //! - 1...
candle/candle-transformers/src/models/recurrent_gemma.rs/0
{ "file_path": "candle/candle-transformers/src/models/recurrent_gemma.rs", "repo_id": "candle", "token_count": 12053 }
//! # Denoising Diffusion Implicit Models //! //! The Denoising Diffusion Implicit Models (DDIM) is a simple scheduler //! similar to Denoising Diffusion Probabilistic Models (DDPM). The DDPM //! generative process is the reverse of a Markovian process, DDIM generalizes //! this to non-Markovian guidance. //! //! Denoi...
candle/candle-transformers/src/models/stable_diffusion/ddim.rs/0
{ "file_path": "candle/candle-transformers/src/models/stable_diffusion/ddim.rs", "repo_id": "candle", "token_count": 3954 }
//! TrOCR model implementation. //! //! TrOCR is a Transformer-based OCR model that uses a Vision Transformer encoder //! and a BART-like decoder for optical character recognition. //! //! Key characteristics: //! - Vision Transformer encoder for image processing //! - BART-style decoder for text generation //! - Learn...
candle/candle-transformers/src/models/trocr.rs/0
{ "file_path": "candle/candle-transformers/src/models/trocr.rs", "repo_id": "candle", "token_count": 8631 }
//! Yi model implementation. //! //! This candle implementation uses a pre-trained Yi decoder-only large language model for inference. //! The model was trained by 01.AI and follows a standard transformer architecture similar to LLaMA. //! //! Original code: //! - 💻 [Yi Model](https://huggingface.co/01-ai/Yi-6B) //! -...
candle/candle-transformers/src/models/yi.rs/0
{ "file_path": "candle/candle-transformers/src/models/yi.rs", "repo_id": "candle", "token_count": 6426 }
export async function getEmbeddings( worker, weightsURL, tokenizerURL, configURL, modelID, sentences, updateStatus = null ) { return new Promise((resolve, reject) => { worker.postMessage({ weightsURL, tokenizerURL, configURL, modelID, sentences, }); function mes...
candle/candle-wasm-examples/bert/utils.js/0
{ "file_path": "candle/candle-wasm-examples/bert/utils.js", "repo_id": "candle", "token_count": 1250 }
image: repository: ghcr.io/huggingface name: chat-ui tag: 0.0.0-latest pullPolicy: IfNotPresent replicas: 3 domain: huggingface.co networkPolicy: enabled: false allowedBlocks: [] service: type: NodePort annotations: { } serviceAccount: enabled: false create: false name: "" automountServiceA...
chat-ui/chart/values.yaml/0
{ "file_path": "chat-ui/chart/values.yaml", "repo_id": "chat-ui", "token_count": 392 }
# Text Generation Inference (TGI) | Feature | Available | | --------------------------- | --------- | | [Tools](../tools) | Yes\* | | [Multimodal](../multimodal) | Yes\* | \* Tools are only supported with the Cohere Command R+ model with the Xenova tokenizers. Please see the [Too...
chat-ui/docs/source/configuration/models/providers/tgi.md/0
{ "file_path": "chat-ui/docs/source/configuration/models/providers/tgi.md", "repo_id": "chat-ui", "token_count": 1063 }
<script lang="ts"> import { onDestroy } from "svelte"; import IconCopy from "./icons/IconCopy.svelte"; import Tooltip from "./Tooltip.svelte"; interface Props { classNames?: string; value: string; children?: import("svelte").Snippet; onClick?: () => void; } let { classNames = "", value, children, onCli...
chat-ui/src/lib/components/CopyToClipBoardBtn.svelte/0
{ "file_path": "chat-ui/src/lib/components/CopyToClipBoardBtn.svelte", "repo_id": "chat-ui", "token_count": 620 }
<script lang="ts"> import { run } from "svelte/legacy"; import { fade } from "svelte/transition"; import { onDestroy } from "svelte"; import IconChevron from "./icons/IconChevron.svelte"; let visible = $state(false); interface Props { scrollNode: HTMLElement; class?: string; } let { scrollNode, class: cl...
chat-ui/src/lib/components/ScrollToBottomBtn.svelte/0
{ "file_path": "chat-ui/src/lib/components/ScrollToBottomBtn.svelte", "repo_id": "chat-ui", "token_count": 518 }
<script lang="ts"> import { env as envPublic } from "$env/dynamic/public"; import Logo from "$lib/components/icons/Logo.svelte"; import { createEventDispatcher } from "svelte"; import IconGear from "~icons/bi/gear-fill"; import AnnouncementBanner from "../AnnouncementBanner.svelte"; import type { Model } from "$l...
chat-ui/src/lib/components/chat/ChatIntroduction.svelte/0
{ "file_path": "chat-ui/src/lib/components/chat/ChatIntroduction.svelte", "repo_id": "chat-ui", "token_count": 1483 }
<script lang="ts"> interface Props { classNames?: string; } let { classNames = "" }: Props = $props(); </script> <svg xmlns="http://www.w3.org/2000/svg" class={classNames} width="1em" height="1em" fill="none" viewBox="0 0 32 32" ><path fill="currentColor" fill-rule="evenodd" d="M3.143 20.286h4.286v2...
chat-ui/src/lib/components/icons/IconNew.svelte/0
{ "file_path": "chat-ui/src/lib/components/icons/IconNew.svelte", "repo_id": "chat-ui", "token_count": 451 }
import type { Migration } from "."; import { collections } from "$lib/server/database"; import { ObjectId } from "mongodb"; import { logger } from "$lib/server/logger"; const addToolsToSettings: Migration = { _id: new ObjectId("5c9c4c4c4c4c4c4c4c4c4c4c"), name: "Add empty 'tools' record in settings", up: async () =...
chat-ui/src/lib/migrations/routines/03-add-tools-in-settings.ts/0
{ "file_path": "chat-ui/src/lib/migrations/routines/03-add-tools-in-settings.ts", "repo_id": "chat-ui", "token_count": 272 }
import { z } from "zod"; import type { EmbeddingEndpoint } from "../embeddingEndpoints"; import type { Tensor, FeatureExtractionPipeline } from "@huggingface/transformers"; import { pipeline } from "@huggingface/transformers"; export const embeddingEndpointTransformersJSParametersSchema = z.object({ weight: z.number(...
chat-ui/src/lib/server/embeddingEndpoints/transformersjs/embeddingEndpoints.ts/0
{ "file_path": "chat-ui/src/lib/server/embeddingEndpoints/transformersjs/embeddingEndpoints.ts", "repo_id": "chat-ui", "token_count": 542 }
import { buildPrompt } from "$lib/buildPrompt"; import type { TextGenerationStreamOutput } from "@huggingface/inference"; import type { Endpoint } from "../endpoints"; import { z } from "zod"; export const endpointOllamaParametersSchema = z.object({ weight: z.number().int().positive().default(1), model: z.any(), ty...
chat-ui/src/lib/server/endpoints/ollama/endpointOllama.ts/0
{ "file_path": "chat-ui/src/lib/server/endpoints/ollama/endpointOllama.ts", "repo_id": "chat-ui", "token_count": 1380 }
import type { ProcessedModel } from "../models"; import type { Endpoint } from "../endpoints/endpoints"; import type { Conversation } from "$lib/types/Conversation"; import type { Message } from "$lib/types/Message"; import type { Assistant } from "$lib/types/Assistant"; export interface TextGenerationContext { model...
chat-ui/src/lib/server/textGeneration/types.ts/0
{ "file_path": "chat-ui/src/lib/server/textGeneration/types.ts", "repo_id": "chat-ui", "token_count": 190 }
/** Remove excess whitespace and newlines */ export const sanitizeString = (str: string) => str .split("\n") .map((s) => s.trim()) .filter(Boolean) .join("\n") .replaceAll(/ +/g, " "); /** Collapses a string into a single line */ export const collapseString = (str: string) => sanitizeString(str.replaceAll(/...
chat-ui/src/lib/server/websearch/markdown/utils/nlp.ts/0
{ "file_path": "chat-ui/src/lib/server/websearch/markdown/utils/nlp.ts", "repo_id": "chat-ui", "token_count": 126 }
import type { Message } from "$lib/types/Message"; import { format } from "date-fns"; import type { EndpointMessage } from "../../endpoints/endpoints"; import { generateFromDefaultEndpoint } from "../../generateFromDefaultEndpoint"; import { getReturnFromGenerator } from "$lib/utils/getReturnFromGenerator"; export asy...
chat-ui/src/lib/server/websearch/search/generateQuery.ts/0
{ "file_path": "chat-ui/src/lib/server/websearch/search/generateQuery.ts", "repo_id": "chat-ui", "token_count": 803 }
import type { ObjectId } from "mongodb"; import type { Message } from "./Message"; import type { Timestamps } from "./Timestamps"; import type { User } from "./User"; import type { Assistant } from "./Assistant"; export interface Conversation extends Timestamps { _id: ObjectId; sessionId?: string; userId?: User["_...
chat-ui/src/lib/types/Conversation.ts/0
{ "file_path": "chat-ui/src/lib/types/Conversation.ts", "repo_id": "chat-ui", "token_count": 182 }
import type { ObjectId } from "mongodb"; import type { User } from "./User"; import type { Timestamps } from "./Timestamps"; import type { BackendToolContext } from "$lib/server/tools"; import type { MessageUpdate } from "./MessageUpdate"; import { z } from "zod"; import type { ReviewStatus } from "./Review"; export c...
chat-ui/src/lib/types/Tool.ts/0
{ "file_path": "chat-ui/src/lib/types/Tool.ts", "repo_id": "chat-ui", "token_count": 1451 }
<script lang="ts"> import { goto } from "$app/navigation"; import { base } from "$app/paths"; import { page } from "$app/state"; import { env as envPublic } from "$env/dynamic/public"; import ChatWindow from "$lib/components/chat/ChatWindow.svelte"; import { ERROR_MESSAGES, error } from "$lib/stores/errors"; imp...
chat-ui/src/routes/+page.svelte/0
{ "file_path": "chat-ui/src/routes/+page.svelte", "repo_id": "chat-ui", "token_count": 1058 }
<script lang="ts"> import logo from "../../../../../static/huggingchat/logo.svg?raw"; interface Props { name: string; description?: string; createdByName: string | undefined; avatar: string | undefined; } let { name, description = "", createdByName, avatar }: Props = $props(); </script> <div class="flex h...
chat-ui/src/routes/assistant/[assistantId]/thumbnail.png/ChatThumbnail.svelte/0
{ "file_path": "chat-ui/src/routes/assistant/[assistantId]/thumbnail.png/ChatThumbnail.svelte", "repo_id": "chat-ui", "token_count": 573 }