code
stringlengths
66
870k
docstring
stringlengths
19
26.7k
func_name
stringlengths
1
138
language
stringclasses
1 value
repo
stringlengths
7
68
path
stringlengths
5
324
url
stringlengths
46
389
license
stringclasses
7 values
def require_torch_gpu(test_case): """Decorator marking a test that requires CUDA and PyTorch.""" if torch_device != "cuda": return unittest.skip("test requires CUDA")(test_case) else: return test_case
Decorator marking a test that requires CUDA and PyTorch.
require_torch_gpu
python
huggingface/smollm
vision/m4/testing_utils.py
https://github.com/huggingface/smollm/blob/master/vision/m4/testing_utils.py
Apache-2.0
def require_deepspeed(test_case): """ Decorator marking a test that requires deepspeed """ if not is_deepspeed_available(): return unittest.skip("test requires deepspeed")(test_case) else: return test_case
Decorator marking a test that requires deepspeed
require_deepspeed
python
huggingface/smollm
vision/m4/testing_utils.py
https://github.com/huggingface/smollm/blob/master/vision/m4/testing_utils.py
Apache-2.0
def require_bnb(test_case): """ Decorator marking a test that requires bitsandbytes """ if not is_bnb_available(): return unittest.skip("test requires bitsandbytes from https://github.com/facebookresearch/bitsandbytes")( test_case ) else: return test_case
Decorator marking a test that requires bitsandbytes
require_bnb
python
huggingface/smollm
vision/m4/testing_utils.py
https://github.com/huggingface/smollm/blob/master/vision/m4/testing_utils.py
Apache-2.0
def require_bnb_non_decorator(): """ Non-Decorator function that would skip a test if bitsandbytes is missing """ if not is_bnb_available(): raise SkipTest("Test requires bitsandbytes from https://github.com/facebookresearch/bitsandbytes")
Non-Decorator function that would skip a test if bitsandbytes is missing
require_bnb_non_decorator
python
huggingface/smollm
vision/m4/testing_utils.py
https://github.com/huggingface/smollm/blob/master/vision/m4/testing_utils.py
Apache-2.0
def set_seed(seed: int = 42): """ Helper function for reproducible behavior to set the seed in ``random``, ``numpy``, ``torch`` Args: seed (:obj:`int`): The seed to set. """ random.seed(seed) np.random.seed(seed) if is_torch_available(): torch.manual_seed(seed) torch...
Helper function for reproducible behavior to set the seed in ``random``, ``numpy``, ``torch`` Args: seed (:obj:`int`): The seed to set.
set_seed
python
huggingface/smollm
vision/m4/testing_utils.py
https://github.com/huggingface/smollm/blob/master/vision/m4/testing_utils.py
Apache-2.0
def get_gpu_count(): """ Return the number of available gpus (regardless of whether torch or tf is used) """ if is_torch_available(): import torch return torch.cuda.device_count() else: return 0
Return the number of available gpus (regardless of whether torch or tf is used)
get_gpu_count
python
huggingface/smollm
vision/m4/testing_utils.py
https://github.com/huggingface/smollm/blob/master/vision/m4/testing_utils.py
Apache-2.0
def torch_assert_equal(actual, expected, **kwargs): """ compare two tensors or non-tensor numbers for their equality """ # assert_close was added around pt-1.9, it does better checks - e.g will check dimensions match return torch.testing.assert_close(actual, expected, rtol=0.0, atol=0.0, **kwargs)
compare two tensors or non-tensor numbers for their equality
torch_assert_equal
python
huggingface/smollm
vision/m4/testing_utils.py
https://github.com/huggingface/smollm/blob/master/vision/m4/testing_utils.py
Apache-2.0
def torch_assert_close(actual, expected, **kwargs): """ compare two tensors or non-tensor numbers for their closeness. """ # assert_close was added around pt-1.9, it does better checks - e.g will check dimensions match return torch.testing.assert_close(actual, expected, **kwargs)
compare two tensors or non-tensor numbers for their closeness.
torch_assert_close
python
huggingface/smollm
vision/m4/testing_utils.py
https://github.com/huggingface/smollm/blob/master/vision/m4/testing_utils.py
Apache-2.0
def require_torch_bf16(test_case): """Decorator marking a test that requires CUDA hardware supporting bf16 and PyTorch >= 1.9.""" if not is_torch_bf16_available(): return unittest.skip("test requires CUDA hardware supporting bf16 and PyTorch >= 1.9")(test_case) else: return test_case
Decorator marking a test that requires CUDA hardware supporting bf16 and PyTorch >= 1.9.
require_torch_bf16
python
huggingface/smollm
vision/m4/testing_utils.py
https://github.com/huggingface/smollm/blob/master/vision/m4/testing_utils.py
Apache-2.0
def get_tests_dir(append_path=None): """ Args: append_path: optional path to append to the tests dir path Return: The full path to the `tests` dir, so that the tests can be invoked from anywhere. Optionally `append_path` is joined after the `tests` dir the former is provided. "...
Args: append_path: optional path to append to the tests dir path Return: The full path to the `tests` dir, so that the tests can be invoked from anywhere. Optionally `append_path` is joined after the `tests` dir the former is provided.
get_tests_dir
python
huggingface/smollm
vision/m4/testing_utils.py
https://github.com/huggingface/smollm/blob/master/vision/m4/testing_utils.py
Apache-2.0
def ExtendSysPath(path: Union[str, os.PathLike]) -> Iterator[None]: """ Temporary add given path to `sys.path`. Usage :: with ExtendSysPath('/path/to/dir'): mymodule = importlib.import_module('mymodule') """ path = os.fspath(path) try: sys.path.insert(0, path) ...
Temporary add given path to `sys.path`. Usage :: with ExtendSysPath('/path/to/dir'): mymodule = importlib.import_module('mymodule')
ExtendSysPath
python
huggingface/smollm
vision/m4/testing_utils.py
https://github.com/huggingface/smollm/blob/master/vision/m4/testing_utils.py
Apache-2.0
def get_env(self): """ Return a copy of the ``os.environ`` object that sets up ``PYTHONPATH`` correctly. This is useful for invoking external programs from the test suite - e.g. distributed training. It always inserts ``.`` first, then ``./tests`` depending on the test suite type and ...
Return a copy of the ``os.environ`` object that sets up ``PYTHONPATH`` correctly. This is useful for invoking external programs from the test suite - e.g. distributed training. It always inserts ``.`` first, then ``./tests`` depending on the test suite type and finally the preset ``PYT...
get_env
python
huggingface/smollm
vision/m4/testing_utils.py
https://github.com/huggingface/smollm/blob/master/vision/m4/testing_utils.py
Apache-2.0
def get_auto_remove_tmp_dir(self, tmp_dir=None, before=None, after=None): """ Args: tmp_dir (:obj:`string`, `optional`): if :obj:`None`: - a unique temporary path will be created - sets ``before=True`` if ``before`` is :obj:`None` ...
Args: tmp_dir (:obj:`string`, `optional`): if :obj:`None`: - a unique temporary path will be created - sets ``before=True`` if ``before`` is :obj:`None` - sets ``after=True`` if ``after`` is :obj:`None` else: ...
get_auto_remove_tmp_dir
python
huggingface/smollm
vision/m4/testing_utils.py
https://github.com/huggingface/smollm/blob/master/vision/m4/testing_utils.py
Apache-2.0
def mockenv_context(*remove, **update): """ Temporarily updates the ``os.environ`` dictionary in-place. Similar to mockenv The ``os.environ`` dictionary is updated in-place so that the modification is sure to work in all situations. Args: remove: Environment variables to remove. update: Di...
Temporarily updates the ``os.environ`` dictionary in-place. Similar to mockenv The ``os.environ`` dictionary is updated in-place so that the modification is sure to work in all situations. Args: remove: Environment variables to remove. update: Dictionary of environment variables and values to...
mockenv_context
python
huggingface/smollm
vision/m4/testing_utils.py
https://github.com/huggingface/smollm/blob/master/vision/m4/testing_utils.py
Apache-2.0
def get_xdist_worker_id(): """ when run under pytest-xdist returns the worker id (int), otherwise returns 0 """ worker_id_string = os.environ.get("PYTEST_XDIST_WORKER", "gw0") return int(worker_id_string[2:]) # strip "gw"
when run under pytest-xdist returns the worker id (int), otherwise returns 0
get_xdist_worker_id
python
huggingface/smollm
vision/m4/testing_utils.py
https://github.com/huggingface/smollm/blob/master/vision/m4/testing_utils.py
Apache-2.0
def pytest_addoption_shared(parser): """ This function is to be called from `conftest.py` via `pytest_addoption` wrapper that has to be defined there. It allows loading both `conftest.py` files at once without causing a failure due to adding the same `pytest` option. """ option = "--make-repor...
This function is to be called from `conftest.py` via `pytest_addoption` wrapper that has to be defined there. It allows loading both `conftest.py` files at once without causing a failure due to adding the same `pytest` option.
pytest_addoption_shared
python
huggingface/smollm
vision/m4/testing_utils.py
https://github.com/huggingface/smollm/blob/master/vision/m4/testing_utils.py
Apache-2.0
def pytest_terminal_summary_main(tr, id): """ Generate multiple reports at the end of test suite run - each report goes into a dedicated file in the current directory. The report files are prefixed with the test suite name. This function emulates --duration and -rA pytest arguments. This function ...
Generate multiple reports at the end of test suite run - each report goes into a dedicated file in the current directory. The report files are prefixed with the test suite name. This function emulates --duration and -rA pytest arguments. This function is to be called from `conftest.py` via `pytest_te...
pytest_terminal_summary_main
python
huggingface/smollm
vision/m4/testing_utils.py
https://github.com/huggingface/smollm/blob/master/vision/m4/testing_utils.py
Apache-2.0
def _compute_relaxed_vqa_accuracy(self, generated_texts_unique, answers_unique, normalize_text_fn): """ From https://aclanthology.org/2022.findings-acl.177.pdf We use a relaxed accuracy measure for the numeric answers to allow a minor inaccuracy that may result from the automatic data extraction...
From https://aclanthology.org/2022.findings-acl.177.pdf We use a relaxed accuracy measure for the numeric answers to allow a minor inaccuracy that may result from the automatic data extraction process. We consider an answer to be correct if it is within 5% of the gold answer. For non-numeric answers, w...
_compute_relaxed_vqa_accuracy
python
huggingface/smollm
vision/m4/evaluation/custom_metrics/open_ended_vqa_metrics.py
https://github.com/huggingface/smollm/blob/master/vision/m4/evaluation/custom_metrics/open_ended_vqa_metrics.py
Apache-2.0
def vqa_normalize_text(text: str) -> str: """Process a text Source: https://github.com/GT-Vision-Lab/VQA/blob/master/PythonEvaluationTools/vqaEvaluation/vqaEval.py 1. Conversion of characters to lower case 2. Replace breaking lines and tabulations by a white space 3. Replace punctuations by a white ...
Process a text Source: https://github.com/GT-Vision-Lab/VQA/blob/master/PythonEvaluationTools/vqaEvaluation/vqaEval.py 1. Conversion of characters to lower case 2. Replace breaking lines and tabulations by a white space 3. Replace punctuations by a white space 4. Conversion of numbers written in let...
vqa_normalize_text
python
huggingface/smollm
vision/m4/evaluation/custom_metrics/utils.py
https://github.com/huggingface/smollm/blob/master/vision/m4/evaluation/custom_metrics/utils.py
Apache-2.0
def check_is_number(string): """ Check if the given string is a number """ try: _ = convert_to_number(string) return True except ValueError: return False
Check if the given string is a number
check_is_number
python
huggingface/smollm
vision/m4/evaluation/custom_metrics/utils.py
https://github.com/huggingface/smollm/blob/master/vision/m4/evaluation/custom_metrics/utils.py
Apache-2.0
def normalize_str_mmmu(string): """ Normalize the str to lower case and make them float numbers if possible. """ # check if characters in the string # if number, numerize it. string = string.strip() if string.startswith("Answer: "): string = string.replace("Answer: ", "") is_nu...
Normalize the str to lower case and make them float numbers if possible.
normalize_str_mmmu
python
huggingface/smollm
vision/m4/evaluation/custom_metrics/utils.py
https://github.com/huggingface/smollm/blob/master/vision/m4/evaluation/custom_metrics/utils.py
Apache-2.0
def extract_numbers_mmmu(string): """ Exact all forms of numbers from a string with regex. """ # Pattern for numbers with commas pattern_commas = r"-?\b\d{1,3}(?:,\d{3})+\b" # Pattern for scientific notation pattern_scientific = r"-?\d+(?:\.\d+)?[eE][+-]?\d+" # Pattern for simple numbers...
Exact all forms of numbers from a string with regex.
extract_numbers_mmmu
python
huggingface/smollm
vision/m4/evaluation/custom_metrics/utils.py
https://github.com/huggingface/smollm/blob/master/vision/m4/evaluation/custom_metrics/utils.py
Apache-2.0
def parse_open_response_mmmu(response, normalize_text_fn): """ Parse the prediction from the generated response. Return a list of predicted strings or numbers """ def get_key_subresponses(response): key_responses = [] response = response.strip().strip(".").lower() sub_respon...
Parse the prediction from the generated response. Return a list of predicted strings or numbers
parse_open_response_mmmu
python
huggingface/smollm
vision/m4/evaluation/custom_metrics/utils.py
https://github.com/huggingface/smollm/blob/master/vision/m4/evaluation/custom_metrics/utils.py
Apache-2.0
def _split_to_single_caption(caption): """This function is mainly used in Localized Narratives where a paragraph can contain multiple relevant captions to a single image. We split the paragraph into multiple captions and then return each as an individual sample. """ extended = [] captions = capt...
This function is mainly used in Localized Narratives where a paragraph can contain multiple relevant captions to a single image. We split the paragraph into multiple captions and then return each as an individual sample.
_split_to_single_caption
python
huggingface/smollm
vision/m4/evaluation/scripts/create_sample_evaluation_datasets_simplified.py
https://github.com/huggingface/smollm/blob/master/vision/m4/evaluation/scripts/create_sample_evaluation_datasets_simplified.py
Apache-2.0
def fetch_training_run(training_run_name): """ Fetch training run. There can only be one corresponding training run. If not, double check the tags (killed, failed, etc.) """ matching_runs = [] runs = api.runs(f"{args.wandb_entity}/{args.wandb_training_project}") ...
Fetch training run. There can only be one corresponding training run. If not, double check the tags (killed, failed, etc.)
fetch_training_run
python
huggingface/smollm
vision/m4/evaluation/scripts/sync_evaluations_on_wandb.py
https://github.com/huggingface/smollm/blob/master/vision/m4/evaluation/scripts/sync_evaluations_on_wandb.py
Apache-2.0
def fetch_evaluation_run(evaluation_run_name): """ Fetch evaluation run. There can only be one corresponding evaluation run at most. If not, double check the tags (killed, failed, etc.) """ matching_runs = [] runs = api.runs(f"{args.wandb_entity}/{args.wandb_eval_project...
Fetch evaluation run. There can only be one corresponding evaluation run at most. If not, double check the tags (killed, failed, etc.)
fetch_evaluation_run
python
huggingface/smollm
vision/m4/evaluation/scripts/sync_evaluations_on_wandb.py
https://github.com/huggingface/smollm/blob/master/vision/m4/evaluation/scripts/sync_evaluations_on_wandb.py
Apache-2.0
def get_logged_eval_values(evaluation_run): """ If `evaluation_run` already exists, get the already logged values into a dictionary. """ logged_evaluation_values = defaultdict() if evaluation_run is not None: for row in evaluation_run.scan_history(): ...
If `evaluation_run` already exists, get the already logged values into a dictionary.
get_logged_eval_values
python
huggingface/smollm
vision/m4/evaluation/scripts/sync_evaluations_on_wandb.py
https://github.com/huggingface/smollm/blob/master/vision/m4/evaluation/scripts/sync_evaluations_on_wandb.py
Apache-2.0
def get_evaluations_values_from_json(): """ Load all values from the json file """ evaluation_values = defaultdict(lambda: defaultdict()) for evaluation_jsonl_file in args.evaluation_jsonl_files: with open(evaluation_jsonl_file, "r") as f: for line in ...
Load all values from the json file
get_evaluations_values_from_json
python
huggingface/smollm
vision/m4/evaluation/scripts/sync_evaluations_on_wandb.py
https://github.com/huggingface/smollm/blob/master/vision/m4/evaluation/scripts/sync_evaluations_on_wandb.py
Apache-2.0
def convert_training_run_to_dict(training_run): """ Get all the logged values from the training into a dictionary. """ training_history = training_run.scan_history() d = defaultdict(dict) for row in training_history: if "num_opt_steps" not in row: ...
Get all the logged values from the training into a dictionary.
convert_training_run_to_dict
python
huggingface/smollm
vision/m4/evaluation/scripts/sync_evaluations_on_wandb.py
https://github.com/huggingface/smollm/blob/master/vision/m4/evaluation/scripts/sync_evaluations_on_wandb.py
Apache-2.0
def from_pretrained(cls, *model_args, is_resume=False, new_model=False, **kwargs): """ Use this method when loading an already pretrained vloom model - either from a checkpoint or from hub. For creating an untrained model use `pretrained_models` instead. """ # config is: ...
Use this method when loading an already pretrained vloom model - either from a checkpoint or from hub. For creating an untrained model use `pretrained_models` instead.
from_pretrained
python
huggingface/smollm
vision/m4/models/custom_modules.py
https://github.com/huggingface/smollm/blob/master/vision/m4/models/custom_modules.py
Apache-2.0
def __init__( self, num_embeddings, num_additional_embeddings, embedding_dim, partially_freeze=False, device=None, dtype=None, padding_idx=None, **kwargs, ) -> None: """ num_additional_embeddings: int. Number of additional embed...
num_additional_embeddings: int. Number of additional embeddings. Only useful when you `partially_freeze=True`. partially_freeze: bool. If True, the regular `weight` will be frozen. `additional_weight` is never frozen. Note: there are a lot of other parameters to initialize a standard `nn.Embed...
__init__
python
huggingface/smollm
vision/m4/models/custom_modules.py
https://github.com/huggingface/smollm/blob/master/vision/m4/models/custom_modules.py
Apache-2.0
def forward(self, input_ids): """ we have 2 embeddings, with different indices - one pretrained self.weight and another self.additional_embedding.weight that is being trained. in order to make a lookup of the input ids, we: 1. find out the indices of the entries belonging to the...
we have 2 embeddings, with different indices - one pretrained self.weight and another self.additional_embedding.weight that is being trained. in order to make a lookup of the input ids, we: 1. find out the indices of the entries belonging to the 2nd embedding 2. extract those v...
forward
python
huggingface/smollm
vision/m4/models/custom_modules.py
https://github.com/huggingface/smollm/blob/master/vision/m4/models/custom_modules.py
Apache-2.0
def __init__( self, in_features: int, out_features: int, out_additional_features: int = 0, bias: bool = True, partially_freeze: bool = True, device=None, dtype=None, ) -> None: """ out_additional_features: int. Number of additional trai...
out_additional_features: int. Number of additional trainable dimensions. Only makes sense when `partially_freeze=True`. partially_freeze: bool. If True, the regular `weight` will be frozen and extra parameters (if any) will be trainable. If False, default to the regular behavior of nn.Linear.
__init__
python
huggingface/smollm
vision/m4/models/custom_modules.py
https://github.com/huggingface/smollm/blob/master/vision/m4/models/custom_modules.py
Apache-2.0
def extra_repr(self) -> str: """Overwriting `nn.Linear.extra_repr` to include new parameters.""" return "in_features={}, out_features={}, out_additional_features={}, bias={}, partially_freeze={}".format( self.in_features, self.out_features, self.out_additional_feature...
Overwriting `nn.Linear.extra_repr` to include new parameters.
extra_repr
python
huggingface/smollm
vision/m4/models/custom_modules.py
https://github.com/huggingface/smollm/blob/master/vision/m4/models/custom_modules.py
Apache-2.0
def to_dict(self): """ Serializes this instance to a Python dictionary. Override the default [`~PretrainedConfig.to_dict`]. Returns: `Dict[str, any]`: Dictionary of all the attributes that make up this configuration instance, """ output = copy.deepcopy(self.__dict__) ...
Serializes this instance to a Python dictionary. Override the default [`~PretrainedConfig.to_dict`]. Returns: `Dict[str, any]`: Dictionary of all the attributes that make up this configuration instance,
to_dict
python
huggingface/smollm
vision/m4/models/idefics/configuration_idefics.py
https://github.com/huggingface/smollm/blob/master/vision/m4/models/idefics/configuration_idefics.py
Apache-2.0
def _make_causal_mask(input_ids_shape: torch.Size, dtype: torch.dtype, past_key_values_length: int = 0): """ Make causal mask used for bi-directional self-attention. """ bsz, tgt_len = input_ids_shape mask = torch.full((tgt_len, tgt_len), torch.tensor(torch.finfo(dtype).min)) mask_cond = torch.a...
Make causal mask used for bi-directional self-attention.
_make_causal_mask
python
huggingface/smollm
vision/m4/models/idefics/modeling_idefics.py
https://github.com/huggingface/smollm/blob/master/vision/m4/models/idefics/modeling_idefics.py
Apache-2.0
def rotate_half(x): """Rotates half the hidden dims of the input.""" x1 = x[..., : x.shape[-1] // 2] x2 = x[..., x.shape[-1] // 2 :] return torch.cat((-x2, x1), dim=-1)
Rotates half the hidden dims of the input.
rotate_half
python
huggingface/smollm
vision/m4/models/idefics/modeling_idefics.py
https://github.com/huggingface/smollm/blob/master/vision/m4/models/idefics/modeling_idefics.py
Apache-2.0
def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_value: Optional[Tuple[torch.Tensor]] = None, output_attentions: Optional[bool] = False, use_cache: Optional[boo...
Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`, *optional*): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values...
forward
python
huggingface/smollm
vision/m4/models/idefics/modeling_idefics.py
https://github.com/huggingface/smollm/blob/master/vision/m4/models/idefics/modeling_idefics.py
Apache-2.0
def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, image_hidden_states: Optional[torch.Tensor] = None, image_attention_mask: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = False, use_cache: Optional[b...
Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`, *optional*): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values...
forward
python
huggingface/smollm
vision/m4/models/idefics/modeling_idefics.py
https://github.com/huggingface/smollm/blob/master/vision/m4/models/idefics/modeling_idefics.py
Apache-2.0
def tie_weights(self): """ Overwrite `transformers.modeling_utils.PreTrainedModel.tie_weights` to handle the case of DecoupledLinear and DecoupledEmbedding. """ output_embeddings = self.get_output_embeddings() input_embeddings = self.get_input_embeddings() if getattr(sel...
Overwrite `transformers.modeling_utils.PreTrainedModel.tie_weights` to handle the case of DecoupledLinear and DecoupledEmbedding.
tie_weights
python
huggingface/smollm
vision/m4/models/idefics/modeling_idefics.py
https://github.com/huggingface/smollm/blob/master/vision/m4/models/idefics/modeling_idefics.py
Apache-2.0
def forward( self, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[List[torch.FloatTensor]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, pix...
Args: labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `...
forward
python
huggingface/smollm
vision/m4/models/idefics/modeling_idefics.py
https://github.com/huggingface/smollm/blob/master/vision/m4/models/idefics/modeling_idefics.py
Apache-2.0
def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor: """ This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch, num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim) """ batch, num_key_value_...
This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch, num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
repeat_kv
python
huggingface/smollm
vision/m4/models/perceiver/perceiver.py
https://github.com/huggingface/smollm/blob/master/vision/m4/models/perceiver/perceiver.py
Apache-2.0
def __init__(self, config) -> None: """Perceiver Cross-Attention Module --> let long-form inputs be `context`, resampled embeddings be `latents`""" super().__init__() self.config = config self.hidden_size = config.hidden_size self.num_heads = config.perceiver_config.resampler_n...
Perceiver Cross-Attention Module --> let long-form inputs be `context`, resampled embeddings be `latents`
__init__
python
huggingface/smollm
vision/m4/models/perceiver/perceiver.py
https://github.com/huggingface/smollm/blob/master/vision/m4/models/perceiver/perceiver.py
Apache-2.0
def forward( self, latents: torch.Tensor, context: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_value: Optional[Tuple[torch.Tensor]] = None, output_attentions: bool = False, use_cach...
Runs Perceiver Self-Attention, with special (context, latents) appended along the `seq` dimension! :param context: Tensor of shape [bsz, seq, embed_dim] representing long-form context to resample. :param latents: Tensor of shape [bsz, n_latents, embed_dim] representing fixed length latents to c...
forward
python
huggingface/smollm
vision/m4/models/perceiver/perceiver.py
https://github.com/huggingface/smollm/blob/master/vision/m4/models/perceiver/perceiver.py
Apache-2.0
def _flash_attention_forward( self, query_states, key_states, value_states, attention_mask, query_length, dropout=0.0, softmax_scale=None, use_sliding_windows=False, ): """ Calls the forward method of Flash Attention - if the in...
Calls the forward method of Flash Attention - if the input hidden states contain at least one padding token first unpad the input, then computes the attention scores and pad the final attention scores. Args: query_states (`torch.Tensor`): Input query states to be pa...
_flash_attention_forward
python
huggingface/smollm
vision/m4/models/perceiver/perceiver.py
https://github.com/huggingface/smollm/blob/master/vision/m4/models/perceiver/perceiver.py
Apache-2.0
def forward( self, latents: torch.Tensor, context: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_value: Optional[Tuple[torch.Tensor]] = None, output_attentions: Optional[bool] = False, ...
Args: latents (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` context (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`, *optional*): attention mask of size `(...
forward
python
huggingface/smollm
vision/m4/models/perceiver/perceiver.py
https://github.com/huggingface/smollm/blob/master/vision/m4/models/perceiver/perceiver.py
Apache-2.0
def __init__( self, config, ) -> None: """ Instantiates a Perceiver Resampler that operates over a sequence of embeddings (say from a ResNet or ViT or MAE) of a given dimension, performs `depth` blocks of cross-attention with a fixed `n_latents` inputs, then returns a...
Instantiates a Perceiver Resampler that operates over a sequence of embeddings (say from a ResNet or ViT or MAE) of a given dimension, performs `depth` blocks of cross-attention with a fixed `n_latents` inputs, then returns a Tensor of shape [bsz, n_latents, embed_dim]. :param embed_dim...
__init__
python
huggingface/smollm
vision/m4/models/perceiver/perceiver.py
https://github.com/huggingface/smollm/blob/master/vision/m4/models/perceiver/perceiver.py
Apache-2.0
def retrieve_idx_closest_examples(ref_embedding, embeddings_to_compare, num_examples): "Returns the indices of the `num_examples` closest embeddings in ascending order" sim = np.dot(embeddings_to_compare, ref_embedding) # We can achieve linear complexity because we don't need to sort...
Returns the indices of the `num_examples` closest embeddings in ascending order
retrieve_idx_closest_examples
python
huggingface/smollm
vision/m4/models/vgpt2/evaluation_captioning_in_context_vgpt2.py
https://github.com/huggingface/smollm/blob/master/vision/m4/models/vgpt2/evaluation_captioning_in_context_vgpt2.py
Apache-2.0
def prepare_dataset(self, exs: Dict, **kwargs) -> Dict: """ Prepare batch of examples. Each example (X, y) where y is among (y1, y2, ..., yN) - the labels options - is turned into [(X, y1), (X, y2), ... (X, yN)]. """ support_dataset: Dataset = kwargs["support_dataset"] ...
Prepare batch of examples. Each example (X, y) where y is among (y1, y2, ..., yN) - the labels options - is turned into [(X, y1), (X, y2), ... (X, yN)].
prepare_dataset
python
huggingface/smollm
vision/m4/models/vgpt2/evaluation_classification_in_context_vgpt2.py
https://github.com/huggingface/smollm/blob/master/vision/m4/models/vgpt2/evaluation_classification_in_context_vgpt2.py
Apache-2.0
def retrieve_idx_closest_examples(ref_embedding, embeddings_to_compare, num_examples): "Returns the indices of the `num_examples` closest embeddings in ascending order" sim = np.dot(embeddings_to_compare, ref_embedding) # We can achieve linear complexity because we don't need to sort...
Returns the indices of the `num_examples` closest embeddings in ascending order
retrieve_idx_closest_examples
python
huggingface/smollm
vision/m4/models/vgpt2/evaluation_classification_in_context_vgpt2.py
https://github.com/huggingface/smollm/blob/master/vision/m4/models/vgpt2/evaluation_classification_in_context_vgpt2.py
Apache-2.0
def prepare_dataset(self, exs: Dict, **kwargs) -> Dict: """ Prepare batch of examples. Each example (X, y) where y is among (y1, y2, ..., yN) - the labels options - is turned into [(X, y1), (X, y2), ... (X, yN)]. """ support_dataset: Dataset = kwargs["support_dataset"] ...
Prepare batch of examples. Each example (X, y) where y is among (y1, y2, ..., yN) - the labels options - is turned into [(X, y1), (X, y2), ... (X, yN)].
prepare_dataset
python
huggingface/smollm
vision/m4/models/vgpt2/evaluation_classification_vqa_in_context_vgpt2.py
https://github.com/huggingface/smollm/blob/master/vision/m4/models/vgpt2/evaluation_classification_vqa_in_context_vgpt2.py
Apache-2.0
def retrieve_idx_closest_examples(ref_embedding, embeddings_to_compare, num_examples): "Returns the indices of the `num_examples` closest embeddings in ascending order" sim = np.dot(embeddings_to_compare, ref_embedding) # We can achieve linear complexity because we don't need to sort...
Returns the indices of the `num_examples` closest embeddings in ascending order
retrieve_idx_closest_examples
python
huggingface/smollm
vision/m4/models/vgpt2/evaluation_classification_vqa_in_context_vgpt2.py
https://github.com/huggingface/smollm/blob/master/vision/m4/models/vgpt2/evaluation_classification_vqa_in_context_vgpt2.py
Apache-2.0
def retrieve_idx_closest_examples(ref_embedding, embeddings_to_compare, num_examples): "Returns the indices of the `num_examples` closest embeddings in ascending order" sim = np.dot(embeddings_to_compare, ref_embedding) # We can achieve linear complexity because we don't need to sort...
Returns the indices of the `num_examples` closest embeddings in ascending order
retrieve_idx_closest_examples
python
huggingface/smollm
vision/m4/models/vgpt2/evaluation_open_ended_vqa_in_context_vgpt2.py
https://github.com/huggingface/smollm/blob/master/vision/m4/models/vgpt2/evaluation_open_ended_vqa_in_context_vgpt2.py
Apache-2.0
def load_tf_weights_in_gpt2(model, config, gpt2_checkpoint_path): """Load tf checkpoints in a pytorch model""" try: import re import tensorflow as tf except ImportError: logger.error( "Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see...
Load tf checkpoints in a pytorch model
load_tf_weights_in_gpt2
python
huggingface/smollm
vision/m4/models/vgpt2/modeling_vgpt2.py
https://github.com/huggingface/smollm/blob/master/vision/m4/models/vgpt2/modeling_vgpt2.py
Apache-2.0
def tie_weights(self): """ Overwrite `transformers.modeling_utils.PreTrainedModel.tie_weights` to handle the case of DecoupledLinear and DecoupledEmbedding. """ output_embeddings = self.get_output_embeddings() input_embeddings = self.get_input_embeddings() if getattr(sel...
Overwrite `transformers.modeling_utils.PreTrainedModel.tie_weights` to handle the case of DecoupledLinear and DecoupledEmbedding.
tie_weights
python
huggingface/smollm
vision/m4/models/vgpt2/modeling_vgpt2.py
https://github.com/huggingface/smollm/blob/master/vision/m4/models/vgpt2/modeling_vgpt2.py
Apache-2.0
def forward( self, input_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] =...
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set `labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-10...
forward
python
huggingface/smollm
vision/m4/models/vgpt2/modeling_vgpt2.py
https://github.com/huggingface/smollm/blob/master/vision/m4/models/vgpt2/modeling_vgpt2.py
Apache-2.0
def _reorder_cache(past: Tuple[Tuple[torch.Tensor]], beam_idx: torch.Tensor) -> Tuple[Tuple[torch.Tensor]]: """ This function is used to re-order the `past_key_values` cache if [`~PreTrainedModel.beam_search`] or [`~PreTrainedModel.beam_sample`] is called. This is required to match `past_key_val...
This function is used to re-order the `past_key_values` cache if [`~PreTrainedModel.beam_search`] or [`~PreTrainedModel.beam_sample`] is called. This is required to match `past_key_values` with the correct beam_idx at every generation step.
_reorder_cache
python
huggingface/smollm
vision/m4/models/vgpt2/modeling_vgpt2.py
https://github.com/huggingface/smollm/blob/master/vision/m4/models/vgpt2/modeling_vgpt2.py
Apache-2.0
def to_dict(self): """ Serializes this instance to a Python dictionary. Override the default [`~PretrainedConfig.to_dict`]. Returns: `Dict[str, any]`: Dictionary of all the attributes that make up this configuration instance, """ output = copy.deepcopy(self.__dict__) ...
Serializes this instance to a Python dictionary. Override the default [`~PretrainedConfig.to_dict`]. Returns: `Dict[str, any]`: Dictionary of all the attributes that make up this configuration instance,
to_dict
python
huggingface/smollm
vision/m4/models/vllama3/configuration_vllama3.py
https://github.com/huggingface/smollm/blob/master/vision/m4/models/vllama3/configuration_vllama3.py
Apache-2.0
def _dynamic_frequency_update(self, position_ids, device): """ dynamic RoPE layers should recompute `inv_freq` in the following situations: 1 - growing beyond the cached sequence length (allow scaling) 2 - the current sequence length is in the original scale (avoid losing precision with ...
dynamic RoPE layers should recompute `inv_freq` in the following situations: 1 - growing beyond the cached sequence length (allow scaling) 2 - the current sequence length is in the original scale (avoid losing precision with small sequences)
_dynamic_frequency_update
python
huggingface/smollm
vision/m4/models/vllama3/modeling_vllama3.py
https://github.com/huggingface/smollm/blob/master/vision/m4/models/vllama3/modeling_vllama3.py
Apache-2.0
def rotate_half(x): """Rotates half the hidden dims of the input.""" x1 = x[..., : x.shape[-1] // 2] x2 = x[..., x.shape[-1] // 2 :] return torch.cat((-x2, x1), dim=-1)
Rotates half the hidden dims of the input.
rotate_half
python
huggingface/smollm
vision/m4/models/vllama3/modeling_vllama3.py
https://github.com/huggingface/smollm/blob/master/vision/m4/models/vllama3/modeling_vllama3.py
Apache-2.0
def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1): """Applies Rotary Position Embedding to the query and key tensors. Args: q (`torch.Tensor`): The query tensor. k (`torch.Tensor`): The key tensor. cos (`torch.Tensor`): The cosine part of the rotary embedding....
Applies Rotary Position Embedding to the query and key tensors. Args: q (`torch.Tensor`): The query tensor. k (`torch.Tensor`): The key tensor. cos (`torch.Tensor`): The cosine part of the rotary embedding. sin (`torch.Tensor`): The sine part of the rotary embedding. positio...
apply_rotary_pos_emb
python
huggingface/smollm
vision/m4/models/vllama3/modeling_vllama3.py
https://github.com/huggingface/smollm/blob/master/vision/m4/models/vllama3/modeling_vllama3.py
Apache-2.0
def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor: """ This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch, num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim) """ batch, num_key_value_...
This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch, num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
repeat_kv
python
huggingface/smollm
vision/m4/models/vllama3/modeling_vllama3.py
https://github.com/huggingface/smollm/blob/master/vision/m4/models/vllama3/modeling_vllama3.py
Apache-2.0
def _flash_attention_forward( self, query_states, key_states, value_states, attention_mask, query_length, dropout=0.0, softmax_scale=None ): """ Calls the forward method of Flash Attention - if the input hidden states contain at least one padding token first unpad the input, then com...
Calls the forward method of Flash Attention - if the input hidden states contain at least one padding token first unpad the input, then computes the attention scores and pad the final attention scores. Args: query_states (`torch.Tensor`): Input query states to be pa...
_flash_attention_forward
python
huggingface/smollm
vision/m4/models/vllama3/modeling_vllama3.py
https://github.com/huggingface/smollm/blob/master/vision/m4/models/vllama3/modeling_vllama3.py
Apache-2.0
def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_value: Optional[Cache] = None, output_attentions: Optional[bool] = False, use_cache: Optional[bool] = False, ...
Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`, *optional*): attention mask of size `(batch_size, sequence_length)` if flash attention is used or `(batch_size, 1, qu...
forward
python
huggingface/smollm
vision/m4/models/vllama3/modeling_vllama3.py
https://github.com/huggingface/smollm/blob/master/vision/m4/models/vllama3/modeling_vllama3.py
Apache-2.0
def enable_input_require_grads(self): """ Enables the gradients for the input embeddings. This is useful for fine-tuning adapter weights while keeping the model weights fixed. """ def get_lowest_module(module): if len(list(module.children())) == 0: # ...
Enables the gradients for the input embeddings. This is useful for fine-tuning adapter weights while keeping the model weights fixed.
enable_input_require_grads
python
huggingface/smollm
vision/m4/models/vllama3/modeling_vllama3.py
https://github.com/huggingface/smollm/blob/master/vision/m4/models/vllama3/modeling_vllama3.py
Apache-2.0
def inputs_merger( self, input_ids: torch.LongTensor = None, inputs_embeds: Optional[torch.Tensor] = None, image_hidden_states: Optional[torch.Tensor] = None, ): """ This method aims at merging the token embeddings with the image hidden states into one single sequence...
This method aims at merging the token embeddings with the image hidden states into one single sequence of vectors that are fed to the transformer LM. The merging happens as follows: - The text token sequence is: `tok_1 tok_2 tok_3 <fake_token_around_image> <image> <image> ... <image> <fake_toke...
inputs_merger
python
huggingface/smollm
vision/m4/models/vllama3/modeling_vllama3.py
https://github.com/huggingface/smollm/blob/master/vision/m4/models/vllama3/modeling_vllama3.py
Apache-2.0
def enable_input_require_grads(self): """ Enables the gradients for the input embeddings. This is useful for fine-tuning adapter weights while keeping the model weights fixed. """ def get_lowest_module(module): if len(list(module.children())) == 0: # ...
Enables the gradients for the input embeddings. This is useful for fine-tuning adapter weights while keeping the model weights fixed.
enable_input_require_grads
python
huggingface/smollm
vision/m4/models/vllama3/modeling_vllama3.py
https://github.com/huggingface/smollm/blob/master/vision/m4/models/vllama3/modeling_vllama3.py
Apache-2.0
def tie_weights(self): """ Overwrite `transformers.modeling_utils.PreTrainedModel.tie_weights` to handle the case of DecoupledLinear and DecoupledEmbedding. """ lm_head, additional_fc = self.get_output_embeddings() input_embeddings = self.get_input_embeddings() if getat...
Overwrite `transformers.modeling_utils.PreTrainedModel.tie_weights` to handle the case of DecoupledLinear and DecoupledEmbedding.
tie_weights
python
huggingface/smollm
vision/m4/models/vllama3/modeling_vllama3.py
https://github.com/huggingface/smollm/blob/master/vision/m4/models/vllama3/modeling_vllama3.py
Apache-2.0
def forward( self, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None, inputs_embeds: Optional[torch.FloatTensor] = Non...
Args: labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `...
forward
python
huggingface/smollm
vision/m4/models/vllama3/modeling_vllama3.py
https://github.com/huggingface/smollm/blob/master/vision/m4/models/vllama3/modeling_vllama3.py
Apache-2.0
def to_dict(self): """ Serializes this instance to a Python dictionary. Override the default [`~PretrainedConfig.to_dict`]. Returns: `Dict[str, any]`: Dictionary of all the attributes that make up this configuration instance, """ output = copy.deepcopy(self.__dict__) ...
Serializes this instance to a Python dictionary. Override the default [`~PretrainedConfig.to_dict`]. Returns: `Dict[str, any]`: Dictionary of all the attributes that make up this configuration instance,
to_dict
python
huggingface/smollm
vision/m4/models/vmistral/configuration_vmistral.py
https://github.com/huggingface/smollm/blob/master/vision/m4/models/vmistral/configuration_vmistral.py
Apache-2.0
def rotate_half(x): """Rotates half the hidden dims of the input.""" x1 = x[..., : x.shape[-1] // 2] x2 = x[..., x.shape[-1] // 2 :] return torch.cat((-x2, x1), dim=-1)
Rotates half the hidden dims of the input.
rotate_half
python
huggingface/smollm
vision/m4/models/vmistral/modeling_vmistral.py
https://github.com/huggingface/smollm/blob/master/vision/m4/models/vmistral/modeling_vmistral.py
Apache-2.0
def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor: """ This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch, num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim) """ batch, num_key_value_...
This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch, num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
repeat_kv
python
huggingface/smollm
vision/m4/models/vmistral/modeling_vmistral.py
https://github.com/huggingface/smollm/blob/master/vision/m4/models/vmistral/modeling_vmistral.py
Apache-2.0
def _flash_attention_forward( self, query_states, key_states, value_states, attention_mask, query_length, dropout=0.0, softmax_scale=None, use_sliding_windows=False, ): """ Calls the forward method of Flash Attention - if the in...
Calls the forward method of Flash Attention - if the input hidden states contain at least one padding token first unpad the input, then computes the attention scores and pad the final attention scores. Args: query_states (`torch.Tensor`): Input query states to be pa...
_flash_attention_forward
python
huggingface/smollm
vision/m4/models/vmistral/modeling_vmistral.py
https://github.com/huggingface/smollm/blob/master/vision/m4/models/vmistral/modeling_vmistral.py
Apache-2.0
def enable_input_require_grads(self): """ Enables the gradients for the input embeddings. This is useful for fine-tuning adapter weights while keeping the model weights fixed. """ def get_lowest_module(module): if len(list(module.children())) == 0: # ...
Enables the gradients for the input embeddings. This is useful for fine-tuning adapter weights while keeping the model weights fixed.
enable_input_require_grads
python
huggingface/smollm
vision/m4/models/vmistral/modeling_vmistral.py
https://github.com/huggingface/smollm/blob/master/vision/m4/models/vmistral/modeling_vmistral.py
Apache-2.0
def inputs_merger( self, input_ids: torch.LongTensor = None, inputs_embeds: Optional[torch.Tensor] = None, image_hidden_states: Optional[torch.Tensor] = None, ): """ This method aims at merging the token embeddings with the image hidden states into one single sequence...
This method aims at merging the token embeddings with the image hidden states into one single sequence of vectors that are fed to the transformer LM. The merging happens as follows: - The text token sequence is: `tok_1 tok_2 tok_3 <fake_token_around_image> <image> <image> ... <image> <fake_toke...
inputs_merger
python
huggingface/smollm
vision/m4/models/vmistral/modeling_vmistral.py
https://github.com/huggingface/smollm/blob/master/vision/m4/models/vmistral/modeling_vmistral.py
Apache-2.0
def enable_input_require_grads(self): """ Enables the gradients for the input embeddings. This is useful for fine-tuning adapter weights while keeping the model weights fixed. """ def get_lowest_module(module): if len(list(module.children())) == 0: # ...
Enables the gradients for the input embeddings. This is useful for fine-tuning adapter weights while keeping the model weights fixed.
enable_input_require_grads
python
huggingface/smollm
vision/m4/models/vmistral/modeling_vmistral.py
https://github.com/huggingface/smollm/blob/master/vision/m4/models/vmistral/modeling_vmistral.py
Apache-2.0
def tie_weights(self): """ Overwrite `transformers.modeling_utils.PreTrainedModel.tie_weights` to handle the case of DecoupledLinear and DecoupledEmbedding. """ lm_head, additional_fc = self.get_output_embeddings() input_embeddings = self.get_input_embeddings() if getat...
Overwrite `transformers.modeling_utils.PreTrainedModel.tie_weights` to handle the case of DecoupledLinear and DecoupledEmbedding.
tie_weights
python
huggingface/smollm
vision/m4/models/vmistral/modeling_vmistral.py
https://github.com/huggingface/smollm/blob/master/vision/m4/models/vmistral/modeling_vmistral.py
Apache-2.0
def forward( self, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[List[torch.FloatTensor]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, pix...
Args: labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `...
forward
python
huggingface/smollm
vision/m4/models/vmistral/modeling_vmistral.py
https://github.com/huggingface/smollm/blob/master/vision/m4/models/vmistral/modeling_vmistral.py
Apache-2.0
def should_process(path, control_file_path, args): """Heuristics to decide whether to cleanup this opt_step-XXX checkpoint or not""" s3_completed_path = path / finished_uploading_file_name eval_completed_paths = [ path / "run_evals_0_shots_done", path / "run_evals_4_shots_done", pat...
Heuristics to decide whether to cleanup this opt_step-XXX checkpoint or not
should_process
python
huggingface/smollm
vision/m4/scripts/cleanup-checkpoints.py
https://github.com/huggingface/smollm/blob/master/vision/m4/scripts/cleanup-checkpoints.py
Apache-2.0
def should_process(path, force, control_file_path): """Heuristics to decide whether to convert this opt_step-XXX checkpoint or not""" target_dir = path / "unwrapped_model" config_file = target_dir / "config.json" # check if target directory exists if not target_dir.exists(): print(f"[N...
Heuristics to decide whether to convert this opt_step-XXX checkpoint or not
should_process
python
huggingface/smollm
vision/m4/scripts/convert-checkpoints.py
https://github.com/huggingface/smollm/blob/master/vision/m4/scripts/convert-checkpoints.py
Apache-2.0
def should_process(path, force, control_file_path, finished_uploading_file_path, args): """Heuristics to decide whether to upload this opt_step-XXX checkpoint or not""" # check if checkpoint is fully saved finished_saving_path = path / "finished-saving" # defined in from trainer.py if not finished_sav...
Heuristics to decide whether to upload this opt_step-XXX checkpoint or not
should_process
python
huggingface/smollm
vision/m4/scripts/s3-upload-checkpoints.py
https://github.com/huggingface/smollm/blob/master/vision/m4/scripts/s3-upload-checkpoints.py
Apache-2.0
def check_eval_crash(path): """Heuristics to decide whether to restart this opt_step-XXX checkpoint evaluation or not""" eval_start_paths = map( lambda x: path / x, [ "start_run_evals_0_shots", "start_run_evals_4_shots", "start_run_evals_perplexity_validation"...
Heuristics to decide whether to restart this opt_step-XXX checkpoint evaluation or not
check_eval_crash
python
huggingface/smollm
vision/m4/scripts/schedule-evals.py
https://github.com/huggingface/smollm/blob/master/vision/m4/scripts/schedule-evals.py
Apache-2.0
def _strip_html_tree(self, selectolax_tree): """ Strips all nodes with tags NOT in INTERESTING_TAGS_SET and has counterintuitively nothing to do with the STRIP_TAGS list """ strip_tags_l = [ node.tag for node in selectolax_tree.root.traverse() ...
Strips all nodes with tags NOT in INTERESTING_TAGS_SET and has counterintuitively nothing to do with the STRIP_TAGS list
_strip_html_tree
python
huggingface/smollm
vision/m4/sourcing/data_collection/processors/dom_tree_simplificator.py
https://github.com/huggingface/smollm/blob/master/vision/m4/sourcing/data_collection/processors/dom_tree_simplificator.py
Apache-2.0
def _remove_empty_leaves(self, selectolax_tree): """ Function used to remove empty leaves iteratively, so it also ends up also removing nodes that are higher up in the tree. """ modification = True while modification: nodes_to_remove = [ node ...
Function used to remove empty leaves iteratively, so it also ends up also removing nodes that are higher up in the tree.
_remove_empty_leaves
python
huggingface/smollm
vision/m4/sourcing/data_collection/processors/dom_tree_simplificator.py
https://github.com/huggingface/smollm/blob/master/vision/m4/sourcing/data_collection/processors/dom_tree_simplificator.py
Apache-2.0
def _get_clip_scores(self, media_info, image): """If possible, modifies `media_info`to add clip scores on available texts""" texts = [] for text_key in ["formatted_filename", "alt_text", "extracted_text"]: if text_key in media_info and media_info[text_key] != "": text...
If possible, modifies `media_info`to add clip scores on available texts
_get_clip_scores
python
huggingface/smollm
vision/m4/sourcing/data_collection/processors/pair_extractor.py
https://github.com/huggingface/smollm/blob/master/vision/m4/sourcing/data_collection/processors/pair_extractor.py
Apache-2.0
def split_on_whitespace( text, new_line=False, tab=False, ): """This method also removes concatenated spaces.""" sep = [" "] + new_line * ["\n"] + tab * ["\t"] sep = "|".join(sep) split_text = re.split(sep, text) split_text = PairFiltering.remove_empty...
This method also removes concatenated spaces.
split_on_whitespace
python
huggingface/smollm
vision/m4/sourcing/data_collection/processors/pair_filtering.py
https://github.com/huggingface/smollm/blob/master/vision/m4/sourcing/data_collection/processors/pair_filtering.py
Apache-2.0
def strip(text, strip_characters): """Way faster than text.strip(strip_characters) since strip_characters is a set instead of a str, and it contains a lot of elements (all the emojis).""" if not text: return text beg_ind = 0 end_ind = len(text) for i i...
Way faster than text.strip(strip_characters) since strip_characters is a set instead of a str, and it contains a lot of elements (all the emojis).
strip
python
huggingface/smollm
vision/m4/sourcing/data_collection/processors/pair_filtering.py
https://github.com/huggingface/smollm/blob/master/vision/m4/sourcing/data_collection/processors/pair_filtering.py
Apache-2.0
def get_words_from_text(text, lower_case, strip_words, strip_characters): """Get words from a text. Non reversible since the text is split on multiple characters, words are stripped of special characters and characters are converted to lower case. Useful to compute ratios, like the stopw...
Get words from a text. Non reversible since the text is split on multiple characters, words are stripped of special characters and characters are converted to lower case. Useful to compute ratios, like the stopword ratio.
get_words_from_text
python
huggingface/smollm
vision/m4/sourcing/data_collection/processors/pair_filtering.py
https://github.com/huggingface/smollm/blob/master/vision/m4/sourcing/data_collection/processors/pair_filtering.py
Apache-2.0
def split_on_whitespace( text, new_line=False, tab=False, ): """This method also removes concatenated spaces.""" sep = [" "] + new_line * ["\n"] + tab * ["\t"] sep = "|".join(sep) split_text = re.split(sep, text) split_text = FilteringFunctions.remove_...
This method also removes concatenated spaces.
split_on_whitespace
python
huggingface/smollm
vision/m4/sourcing/data_collection/processors/web_document_filtering.py
https://github.com/huggingface/smollm/blob/master/vision/m4/sourcing/data_collection/processors/web_document_filtering.py
Apache-2.0
def strip(text, strip_characters): """Way faster than text.strip(strip_characters) since strip_characters is a set instead of a str, and it contains a lot of elements (all the emojis).""" if not text: return text beg_ind = 0 end_ind = len(text) for i i...
Way faster than text.strip(strip_characters) since strip_characters is a set instead of a str, and it contains a lot of elements (all the emojis).
strip
python
huggingface/smollm
vision/m4/sourcing/data_collection/processors/web_document_filtering.py
https://github.com/huggingface/smollm/blob/master/vision/m4/sourcing/data_collection/processors/web_document_filtering.py
Apache-2.0
def get_words_from_text(text, lower_case=True, strip_words=True, strip_characters=SPECIAL_CHARACTERS): """Get words from a text. Non reversible since the text is split on multiple characters, words are stripped of special characters and characters are converted to lower case. Useful to c...
Get words from a text. Non reversible since the text is split on multiple characters, words are stripped of special characters and characters are converted to lower case. Useful to compute ratios, like the stopword ratio.
get_words_from_text
python
huggingface/smollm
vision/m4/sourcing/data_collection/processors/web_document_filtering.py
https://github.com/huggingface/smollm/blob/master/vision/m4/sourcing/data_collection/processors/web_document_filtering.py
Apache-2.0
def compute_clip_score(texts, image, num_max_words=NUM_MAX_WORDS): """ Args texts: List[str] images: (:obj:`PIL.Image.Image`, :obj:`np.ndarray`, :obj:`torch.Tensor`): The image to be prepared. Each image can be a PIL image, NumPy array or PyTorch tensor. In case o...
Args texts: List[str] images: (:obj:`PIL.Image.Image`, :obj:`np.ndarray`, :obj:`torch.Tensor`): The image to be prepared. Each image can be a PIL image, NumPy array or PyTorch tensor. In case of a NumPy array/PyTorch tensor, each image should be of shape (C, H, W), w...
compute_clip_score
python
huggingface/smollm
vision/m4/sourcing/data_collection/utils/clip_utils.py
https://github.com/huggingface/smollm/blob/master/vision/m4/sourcing/data_collection/utils/clip_utils.py
Apache-2.0
def check_image_quality(media_info): """ Args_ : Media Node Returns : img_has_good_quality: Boolean indictating there is an image with good quality (defined by its height, width, and aspect ratio) w: image width h: image height """ w, h = media_info["original_widt...
Args_ : Media Node Returns : img_has_good_quality: Boolean indictating there is an image with good quality (defined by its height, width, and aspect ratio) w: image width h: image height
check_image_quality
python
huggingface/smollm
vision/m4/sourcing/data_collection/visualization/pair_stat_dashboard.py
https://github.com/huggingface/smollm/blob/master/vision/m4/sourcing/data_collection/visualization/pair_stat_dashboard.py
Apache-2.0
def check_text(media_info): """ Args_ : Media Node Returns : has_text: Boolean indictating if there is a text that corresponds to the media txt_dict: Dictionary mapping each text_length to its text type (filename, alt-text, extracted_text) Note: All variables are set ...
Args_ : Media Node Returns : has_text: Boolean indictating if there is a text that corresponds to the media txt_dict: Dictionary mapping each text_length to its text type (filename, alt-text, extracted_text) Note: All variables are set to 0 if they don't exist in the med...
check_text
python
huggingface/smollm
vision/m4/sourcing/data_collection/visualization/pair_stat_dashboard.py
https://github.com/huggingface/smollm/blob/master/vision/m4/sourcing/data_collection/visualization/pair_stat_dashboard.py
Apache-2.0
def check_CLIP(media_info): """ Args_ : Media Node Returns : clip_score_max_per_img: Max CLIP score per Image clip_nbr_per_img: Number of CLIP scores for a given image clip_dict: Dictionary mapping each CLIP score to its text type (filename, alt-text, extracted_text). ...
Args_ : Media Node Returns : clip_score_max_per_img: Max CLIP score per Image clip_nbr_per_img: Number of CLIP scores for a given image clip_dict: Dictionary mapping each CLIP score to its text type (filename, alt-text, extracted_text). Note: All variables ar...
check_CLIP
python
huggingface/smollm
vision/m4/sourcing/data_collection/visualization/pair_stat_dashboard.py
https://github.com/huggingface/smollm/blob/master/vision/m4/sourcing/data_collection/visualization/pair_stat_dashboard.py
Apache-2.0
def update_df_metrics_and_lists_for_extraction_method( media_info, aggregate_metrics_df, image_centric_df, text_centric_df, extraction_method_name ): """_summary_ Given a Media_Node and the Extraction_Method_Name used to get this Media_Node, this function uses the Media_Node's values to update the 2D Da...
_summary_ Given a Media_Node and the Extraction_Method_Name used to get this Media_Node, this function uses the Media_Node's values to update the 2D Dataframes' numbers and append values to the 3D Dataframes' lists.
update_df_metrics_and_lists_for_extraction_method
python
huggingface/smollm
vision/m4/sourcing/data_collection/visualization/pair_stat_dashboard.py
https://github.com/huggingface/smollm/blob/master/vision/m4/sourcing/data_collection/visualization/pair_stat_dashboard.py
Apache-2.0
def get_extraction_evaluation_metrics( num_docs_to_consider=100, use_clip_scores=True, ): """_summary_ Args: num_docs_to_consider (int, optional): _description_. Defaults to 100. use_clip_scores (bool, optional): _description_. Defaults to True. Returns: _type_: _descriptio...
_summary_ Args: num_docs_to_consider (int, optional): _description_. Defaults to 100. use_clip_scores (bool, optional): _description_. Defaults to True. Returns: _type_: _description_
get_extraction_evaluation_metrics
python
huggingface/smollm
vision/m4/sourcing/data_collection/visualization/pair_stat_dashboard.py
https://github.com/huggingface/smollm/blob/master/vision/m4/sourcing/data_collection/visualization/pair_stat_dashboard.py
Apache-2.0
def display_bar_charts(self, header, list_metric_to_compare): """ Given a list of metrics to compare, makes one bar chart per metric and compares over all extraction methods. Each bar chart has its own column, so it is better to put no more than 3 metrics. """ charts = []...
Given a list of metrics to compare, makes one bar chart per metric and compares over all extraction methods. Each bar chart has its own column, so it is better to put no more than 3 metrics.
display_bar_charts
python
huggingface/smollm
vision/m4/sourcing/data_collection/visualization/pair_stat_dashboard.py
https://github.com/huggingface/smollm/blob/master/vision/m4/sourcing/data_collection/visualization/pair_stat_dashboard.py
Apache-2.0
def cached_path( url_or_filename, compute_cache_path, download_config=None, **download_kwargs, ) -> str: """ Given something that might be a URL (or might be a local path), determine which. If it's a URL, download the file and cache it, and return the path to the cached file. If it's alr...
Given something that might be a URL (or might be a local path), determine which. If it's a URL, download the file and cache it, and return the path to the cached file. If it's already a local path, make sure the file exists and then return the path. Return: Local path (string) Raises:...
cached_path
python
huggingface/smollm
vision/m4/sourcing/pmd/cache_path.py
https://github.com/huggingface/smollm/blob/master/vision/m4/sourcing/pmd/cache_path.py
Apache-2.0
def get_from_cache( url, compute_cache_path, cache_dir=None, force_download=False, proxies=None, etag_timeout=10.0, # reduce timeout resume_download=False, user_agent=None, local_files_only=False, use_etag=True, max_retries=0, use_auth_token=None, ignore_url_params=F...
Given a URL, look for the corresponding file in the local cache. If it's not there, download it. Then return the path to the cached file. Return: Local path (string) Raises: FileNotFoundError: in case of non-recoverable file (non-existent or no cache on disk) Conne...
get_from_cache
python
huggingface/smollm
vision/m4/sourcing/pmd/cache_path.py
https://github.com/huggingface/smollm/blob/master/vision/m4/sourcing/pmd/cache_path.py
Apache-2.0