id
int64
0
190k
prompt
stringlengths
21
13.4M
docstring
stringlengths
1
12k
11,489
import json import os import re import unicodedata from typing import List, Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging The provided code snippet includes necessary dependencies for implementing the `replace_unicode_punct` function. Write a Python function `def replace_unicode_punct(text)` to solve the following problem: Port of https://github.com/moses-smt/mosesdecoder/blob/master/scripts/tokenizer/replace-unicode-punctuation.perl Here is the function: def replace_unicode_punct(text): """ Port of https://github.com/moses-smt/mosesdecoder/blob/master/scripts/tokenizer/replace-unicode-punctuation.perl """ text = text.replace(",", ",") text = re.sub(r"。\s*", ". ", text) text = text.replace("、", ",") text = text.replace("”", '"') text = text.replace("“", '"') text = text.replace("∶", ":") text = text.replace(":", ":") text = text.replace("?", "?") text = text.replace("《", '"') text = text.replace("》", '"') text = text.replace(")", ")") text = text.replace("!", "!") text = text.replace("(", "(") text = text.replace(";", ";") text = text.replace("1", "1") text = text.replace("」", '"') text = text.replace("「", '"') text = text.replace("0", "0") text = text.replace("3", "3") text = text.replace("2", "2") text = text.replace("5", "5") text = text.replace("6", "6") text = text.replace("9", "9") text = text.replace("7", "7") text = text.replace("8", "8") text = text.replace("4", "4") text = re.sub(r".\s*", ". ", text) text = text.replace("~", "~") text = text.replace("’", "'") text = text.replace("…", "...") text = text.replace("━", "-") text = text.replace("〈", "<") text = text.replace("〉", ">") text = text.replace("【", "[") text = text.replace("】", "]") text = text.replace("%", "%") return text
Port of https://github.com/moses-smt/mosesdecoder/blob/master/scripts/tokenizer/replace-unicode-punctuation.perl
11,490
import json import os import re import unicodedata from typing import List, Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging The provided code snippet includes necessary dependencies for implementing the `remove_non_printing_char` function. Write a Python function `def remove_non_printing_char(text)` to solve the following problem: Port of https://github.com/moses-smt/mosesdecoder/blob/master/scripts/tokenizer/remove-non-printing-char.perl Here is the function: def remove_non_printing_char(text): """ Port of https://github.com/moses-smt/mosesdecoder/blob/master/scripts/tokenizer/remove-non-printing-char.perl """ output = [] for char in text: cat = unicodedata.category(char) if cat.startswith("C"): continue output.append(char) return "".join(output)
Port of https://github.com/moses-smt/mosesdecoder/blob/master/scripts/tokenizer/remove-non-printing-char.perl
11,491
import itertools import random import warnings from dataclasses import dataclass from typing import Dict, Optional, Tuple, Union import numpy as np import tensorflow as tf from ...activations_tf import get_tf_activation from ...modeling_tf_outputs import ( TFBaseModelOutput, TFMultipleChoiceModelOutput, TFQuestionAnsweringModelOutput, TFSequenceClassifierOutput, TFTokenClassifierOutput, ) from ...modeling_tf_utils import ( TFModelInputType, TFMultipleChoiceLoss, TFPreTrainedModel, TFQuestionAnsweringLoss, TFSequenceClassificationLoss, TFSequenceSummary, TFSharedEmbeddings, TFTokenClassificationLoss, get_initializer, keras_serializable, unpack_inputs, ) from ...tf_utils import shape_list, stable_softmax from ...utils import ( MULTIPLE_CHOICE_DUMMY_INPUTS, ModelOutput, add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, ) from .configuration_flaubert import FlaubertConfig def shape_list(tensor: Union[tf.Tensor, np.ndarray]) -> List[int]: """ Deal with dynamic shape in tensorflow cleanly. Args: tensor (`tf.Tensor` or `np.ndarray`): The tensor we want the shape of. Returns: `List[int]`: The shape of the tensor as a list. """ if isinstance(tensor, np.ndarray): return list(tensor.shape) dynamic = tf.shape(tensor) if tensor.shape == tf.TensorShape(None): return dynamic static = tensor.shape.as_list() return [dynamic[i] if s is None else s for i, s in enumerate(static)] The provided code snippet includes necessary dependencies for implementing the `get_masks` function. Write a Python function `def get_masks(slen, lengths, causal, padding_mask=None)` to solve the following problem: Generate hidden states mask, and optionally an attention mask. Here is the function: def get_masks(slen, lengths, causal, padding_mask=None): """ Generate hidden states mask, and optionally an attention mask. """ bs = shape_list(lengths)[0] if padding_mask is not None: mask = padding_mask else: # assert lengths.max().item() <= slen alen = tf.range(slen, dtype=lengths.dtype) mask = alen < tf.expand_dims(lengths, axis=1) # attention mask is the same as mask, or triangular inferior attention (causal) if causal: attn_mask = tf.less_equal( tf.tile(tf.reshape(alen, (1, 1, slen)), (bs, slen, 1)), tf.reshape(alen, (1, slen, 1)) ) else: attn_mask = mask # sanity check # assert shape_list(mask) == [bs, slen] tf.debugging.assert_equal(shape_list(mask), [bs, slen]) if causal: tf.debugging.assert_equal(shape_list(attn_mask), [bs, slen, slen]) return mask, attn_mask
Generate hidden states mask, and optionally an attention mask.
11,492
import collections.abc import math from dataclasses import dataclass from typing import Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from ...activations import ACT2FN from ...modeling_utils import PreTrainedModel from ...pytorch_utils import find_pruneable_heads_and_indices, prune_linear_layer from ...utils import ( ModelOutput, add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, ) from .configuration_donut_swin import DonutSwinConfig The provided code snippet includes necessary dependencies for implementing the `window_partition` function. Write a Python function `def window_partition(input_feature, window_size)` to solve the following problem: Partitions the given input into windows. Here is the function: def window_partition(input_feature, window_size): """ Partitions the given input into windows. """ batch_size, height, width, num_channels = input_feature.shape input_feature = input_feature.view( batch_size, height // window_size, window_size, width // window_size, window_size, num_channels ) windows = input_feature.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, num_channels) return windows
Partitions the given input into windows.
11,493
import collections.abc import math from dataclasses import dataclass from typing import Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from ...activations import ACT2FN from ...modeling_utils import PreTrainedModel from ...pytorch_utils import find_pruneable_heads_and_indices, prune_linear_layer from ...utils import ( ModelOutput, add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, ) from .configuration_donut_swin import DonutSwinConfig The provided code snippet includes necessary dependencies for implementing the `window_reverse` function. Write a Python function `def window_reverse(windows, window_size, height, width)` to solve the following problem: Merges windows to produce higher resolution features. Here is the function: def window_reverse(windows, window_size, height, width): """ Merges windows to produce higher resolution features. """ num_channels = windows.shape[-1] windows = windows.view(-1, height // window_size, width // window_size, window_size, window_size, num_channels) windows = windows.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, height, width, num_channels) return windows
Merges windows to produce higher resolution features.
11,494
import collections.abc import math from dataclasses import dataclass from typing import Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from ...activations import ACT2FN from ...modeling_utils import PreTrainedModel from ...pytorch_utils import find_pruneable_heads_and_indices, prune_linear_layer from ...utils import ( ModelOutput, add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, ) from .configuration_donut_swin import DonutSwinConfig The provided code snippet includes necessary dependencies for implementing the `drop_path` function. Write a Python function `def drop_path(input, drop_prob=0.0, training=False, scale_by_keep=True)` to solve the following problem: Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). Comment by Ross Wightman: This is the same as the DropConnect impl I created for EfficientNet, etc networks, however, the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper... See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for changing the layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use 'survival rate' as the argument. Here is the function: def drop_path(input, drop_prob=0.0, training=False, scale_by_keep=True): """ Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). Comment by Ross Wightman: This is the same as the DropConnect impl I created for EfficientNet, etc networks, however, the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper... See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for changing the layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use 'survival rate' as the argument. """ if drop_prob == 0.0 or not training: return input keep_prob = 1 - drop_prob shape = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets random_tensor = keep_prob + torch.rand(shape, dtype=input.dtype, device=input.device) random_tensor.floor_() # binarize output = input.div(keep_prob) * random_tensor return output
Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). Comment by Ross Wightman: This is the same as the DropConnect impl I created for EfficientNet, etc networks, however, the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper... See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for changing the layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use 'survival rate' as the argument.
11,495
import argparse import torch from datasets import load_dataset from donut import DonutModel from transformers import ( DonutFeatureExtractor, DonutProcessor, DonutSwinConfig, DonutSwinModel, MBartConfig, MBartForCausalLM, VisionEncoderDecoderModel, XLMRobertaTokenizerFast, ) def get_configs(model): def convert_state_dict(orig_state_dict, model): def convert_donut_checkpoint(model_name, pytorch_dump_folder_path=None, push_to_hub=False): # load original model original_model = DonutModel.from_pretrained(model_name).eval() # load HuggingFace model encoder_config, decoder_config = get_configs(original_model) encoder = DonutSwinModel(encoder_config) decoder = MBartForCausalLM(decoder_config) model = VisionEncoderDecoderModel(encoder=encoder, decoder=decoder) model.eval() state_dict = original_model.state_dict() new_state_dict = convert_state_dict(state_dict, model) model.load_state_dict(new_state_dict) # verify results on scanned document dataset = load_dataset("hf-internal-testing/example-documents") image = dataset["test"][0]["image"].convert("RGB") tokenizer = XLMRobertaTokenizerFast.from_pretrained(model_name, from_slow=True) feature_extractor = DonutFeatureExtractor( do_align_long_axis=original_model.config.align_long_axis, size=original_model.config.input_size[::-1] ) processor = DonutProcessor(feature_extractor, tokenizer) pixel_values = processor(image, return_tensors="pt").pixel_values if model_name == "naver-clova-ix/donut-base-finetuned-docvqa": task_prompt = "<s_docvqa><s_question>{user_input}</s_question><s_answer>" question = "When is the coffee break?" task_prompt = task_prompt.replace("{user_input}", question) elif model_name == "naver-clova-ix/donut-base-finetuned-rvlcdip": task_prompt = "<s_rvlcdip>" elif model_name in [ "naver-clova-ix/donut-base-finetuned-cord-v1", "naver-clova-ix/donut-base-finetuned-cord-v1-2560", ]: task_prompt = "<s_cord>" elif model_name == "naver-clova-ix/donut-base-finetuned-cord-v2": task_prompt = "s_cord-v2>" elif model_name == "naver-clova-ix/donut-base-finetuned-zhtrainticket": task_prompt = "<s_zhtrainticket>" elif model_name in ["naver-clova-ix/donut-proto", "naver-clova-ix/donut-base"]: # use a random prompt task_prompt = "hello world" else: raise ValueError("Model name not supported") prompt_tensors = original_model.decoder.tokenizer(task_prompt, add_special_tokens=False, return_tensors="pt")[ "input_ids" ] original_patch_embed = original_model.encoder.model.patch_embed(pixel_values) patch_embeddings, _ = model.encoder.embeddings(pixel_values) assert torch.allclose(original_patch_embed, patch_embeddings, atol=1e-3) # verify encoder hidden states original_last_hidden_state = original_model.encoder(pixel_values) last_hidden_state = model.encoder(pixel_values).last_hidden_state assert torch.allclose(original_last_hidden_state, last_hidden_state, atol=1e-2) # verify decoder hidden states original_logits = original_model(pixel_values, prompt_tensors, None).logits logits = model(pixel_values, decoder_input_ids=prompt_tensors).logits assert torch.allclose(original_logits, logits, atol=1e-3) print("Looks ok!") if pytorch_dump_folder_path is not None: print(f"Saving model and processor to {pytorch_dump_folder_path}") model.save_pretrained(pytorch_dump_folder_path) processor.save_pretrained(pytorch_dump_folder_path) if push_to_hub: model.push_to_hub("nielsr/" + model_name.split("/")[-1], commit_message="Update model") processor.push_to_hub("nielsr/" + model_name.split("/")[-1], commit_message="Update model")
null
11,496
import dataclasses import json import sys from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError from copy import copy from enum import Enum from inspect import isclass from pathlib import Path from typing import Any, Dict, Iterable, NewType, Optional, Tuple, Union, get_type_hints import yaml def string_to_bool(v): if isinstance(v, bool): return v if v.lower() in ("yes", "true", "t", "y", "1"): return True elif v.lower() in ("no", "false", "f", "n", "0"): return False else: raise ArgumentTypeError( f"Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive)." )
null
11,497
from ..utils import DummyObject, requires_backends def top_k_top_p_filtering(*args, **kwargs): requires_backends(top_k_top_p_filtering, ["torch"])
null
11,498
from ..utils import DummyObject, requires_backends def load_tf_weights_in_albert(*args, **kwargs): requires_backends(load_tf_weights_in_albert, ["torch"])
null
11,499
from ..utils import DummyObject, requires_backends def load_tf_weights_in_bert(*args, **kwargs): requires_backends(load_tf_weights_in_bert, ["torch"])
null
11,500
from ..utils import DummyObject, requires_backends def load_tf_weights_in_bert_generation(*args, **kwargs): requires_backends(load_tf_weights_in_bert_generation, ["torch"])
null
11,501
from ..utils import DummyObject, requires_backends def load_tf_weights_in_big_bird(*args, **kwargs): requires_backends(load_tf_weights_in_big_bird, ["torch"])
null
11,502
from ..utils import DummyObject, requires_backends def load_tf_weights_in_canine(*args, **kwargs): requires_backends(load_tf_weights_in_canine, ["torch"])
null
11,503
from ..utils import DummyObject, requires_backends def load_tf_weights_in_convbert(*args, **kwargs): requires_backends(load_tf_weights_in_convbert, ["torch"])
null
11,504
from ..utils import DummyObject, requires_backends def load_tf_weights_in_electra(*args, **kwargs): requires_backends(load_tf_weights_in_electra, ["torch"])
null
11,505
from ..utils import DummyObject, requires_backends def load_tf_weights_in_funnel(*args, **kwargs): requires_backends(load_tf_weights_in_funnel, ["torch"])
null
11,506
from ..utils import DummyObject, requires_backends def load_tf_weights_in_gpt2(*args, **kwargs): requires_backends(load_tf_weights_in_gpt2, ["torch"])
null
11,507
from ..utils import DummyObject, requires_backends def load_tf_weights_in_gpt_neo(*args, **kwargs): requires_backends(load_tf_weights_in_gpt_neo, ["torch"])
null
11,508
from ..utils import DummyObject, requires_backends def load_tf_weights_in_imagegpt(*args, **kwargs): requires_backends(load_tf_weights_in_imagegpt, ["torch"])
null
11,509
from ..utils import DummyObject, requires_backends def load_tf_weights_in_mobilebert(*args, **kwargs): requires_backends(load_tf_weights_in_mobilebert, ["torch"])
null
11,510
from ..utils import DummyObject, requires_backends def load_tf_weights_in_openai_gpt(*args, **kwargs): requires_backends(load_tf_weights_in_openai_gpt, ["torch"])
null
11,511
from ..utils import DummyObject, requires_backends def load_tf_weights_in_qdqbert(*args, **kwargs): requires_backends(load_tf_weights_in_qdqbert, ["torch"])
null
11,512
from ..utils import DummyObject, requires_backends def load_tf_weights_in_realm(*args, **kwargs): requires_backends(load_tf_weights_in_realm, ["torch"])
null
11,513
from ..utils import DummyObject, requires_backends def load_tf_weights_in_rembert(*args, **kwargs): requires_backends(load_tf_weights_in_rembert, ["torch"])
null
11,514
from ..utils import DummyObject, requires_backends def load_tf_weights_in_roformer(*args, **kwargs): requires_backends(load_tf_weights_in_roformer, ["torch"])
null
11,515
from ..utils import DummyObject, requires_backends def load_tf_weights_in_t5(*args, **kwargs): requires_backends(load_tf_weights_in_t5, ["torch"])
null
11,516
from ..utils import DummyObject, requires_backends def load_tf_weights_in_transfo_xl(*args, **kwargs): requires_backends(load_tf_weights_in_transfo_xl, ["torch"])
null
11,517
from ..utils import DummyObject, requires_backends def load_tf_weights_in_xlnet(*args, **kwargs): requires_backends(load_tf_weights_in_xlnet, ["torch"])
null
11,518
from ..utils import DummyObject, requires_backends def get_constant_schedule(*args, **kwargs): requires_backends(get_constant_schedule, ["torch"])
null
11,519
from ..utils import DummyObject, requires_backends def get_constant_schedule_with_warmup(*args, **kwargs): requires_backends(get_constant_schedule_with_warmup, ["torch"])
null
11,520
from ..utils import DummyObject, requires_backends def get_cosine_schedule_with_warmup(*args, **kwargs): requires_backends(get_cosine_schedule_with_warmup, ["torch"])
null
11,521
from ..utils import DummyObject, requires_backends def get_cosine_with_hard_restarts_schedule_with_warmup(*args, **kwargs): requires_backends(get_cosine_with_hard_restarts_schedule_with_warmup, ["torch"])
null
11,522
from ..utils import DummyObject, requires_backends def get_linear_schedule_with_warmup(*args, **kwargs): requires_backends(get_linear_schedule_with_warmup, ["torch"])
null
11,523
from ..utils import DummyObject, requires_backends def get_polynomial_decay_schedule_with_warmup(*args, **kwargs): requires_backends(get_polynomial_decay_schedule_with_warmup, ["torch"])
null
11,524
from ..utils import DummyObject, requires_backends def get_scheduler(*args, **kwargs): requires_backends(get_scheduler, ["torch"])
null
11,525
from ..utils import DummyObject, requires_backends def apply_chunking_to_forward(*args, **kwargs): requires_backends(apply_chunking_to_forward, ["torch"])
null
11,526
from ..utils import DummyObject, requires_backends def prune_layer(*args, **kwargs): requires_backends(prune_layer, ["torch"])
null
11,527
from ..utils import DummyObject, requires_backends def torch_distributed_zero_first(*args, **kwargs): requires_backends(torch_distributed_zero_first, ["torch"])
null
11,528
from ..utils import DummyObject, requires_backends def rescale(*args, **kwargs): requires_backends(rescale, ["vision"])
null
11,529
from ..utils import DummyObject, requires_backends def resize(*args, **kwargs): requires_backends(resize, ["vision"])
null
11,530
from ..utils import DummyObject, requires_backends def to_pil_image(*args, **kwargs): requires_backends(to_pil_image, ["vision"])
null
11,531
import json import os import re import shutil import sys import tempfile import traceback import warnings from pathlib import Path from typing import Dict, List, Optional, Tuple, Union from urllib.parse import urlparse from uuid import uuid4 import huggingface_hub import requests from huggingface_hub import ( CommitOperationAdd, HfFolder, create_commit, create_repo, get_hf_file_metadata, hf_hub_download, hf_hub_url, whoami, ) from huggingface_hub.file_download import REGEX_COMMIT_HASH, http_get from huggingface_hub.utils import ( EntryNotFoundError, LocalEntryNotFoundError, RepositoryNotFoundError, RevisionNotFoundError, hf_raise_for_status, ) from requests.exceptions import HTTPError from transformers.utils.logging import tqdm from . import __version__, logging from .generic import working_or_temp_dir from .import_utils import ( ENV_VARS_TRUE_VALUES, _tf_version, _torch_version, is_tf_available, is_torch_available, is_training_run_on_sagemaker, ) def is_remote_url(url_or_filename): parsed = urlparse(url_or_filename) return parsed.scheme in ("http", "https")
null
11,532
import json import os import re import shutil import sys import tempfile import traceback import warnings from pathlib import Path from typing import Dict, List, Optional, Tuple, Union from urllib.parse import urlparse from uuid import uuid4 import huggingface_hub import requests from huggingface_hub import ( CommitOperationAdd, HfFolder, create_commit, create_repo, get_hf_file_metadata, hf_hub_download, hf_hub_url, whoami, ) from huggingface_hub.file_download import REGEX_COMMIT_HASH, http_get from huggingface_hub.utils import ( EntryNotFoundError, LocalEntryNotFoundError, RepositoryNotFoundError, RevisionNotFoundError, hf_raise_for_status, ) from requests.exceptions import HTTPError from transformers.utils.logging import tqdm from . import __version__, logging from .generic import working_or_temp_dir from .import_utils import ( ENV_VARS_TRUE_VALUES, _tf_version, _torch_version, is_tf_available, is_torch_available, is_training_run_on_sagemaker, ) if ( os.path.isdir(old_default_cache_path) and not os.path.isdir(default_cache_path) and "PYTORCH_PRETRAINED_BERT_CACHE" not in os.environ and "PYTORCH_TRANSFORMERS_CACHE" not in os.environ and "TRANSFORMERS_CACHE" not in os.environ ): logger.warning( "In Transformers v4.0.0, the default path to cache downloaded models changed from" " '~/.cache/torch/transformers' to '~/.cache/huggingface/transformers'. Since you don't seem to have" " overridden and '~/.cache/torch/transformers' is a directory that exists, we're moving it to" " '~/.cache/huggingface/transformers' to avoid redownloading models you have already in the cache. You should" " only see this message once." ) shutil.move(old_default_cache_path, default_cache_path) TRANSFORMERS_CACHE = os.getenv("TRANSFORMERS_CACHE", HUGGINGFACE_HUB_CACHE) if os.environ.get("HUGGINGFACE_CO_RESOLVE_ENDPOINT", None) is not None: warnings.warn( "Using the environment variable `HUGGINGFACE_CO_RESOLVE_ENDPOINT` is deprecated and will be removed in " "Transformers v5. Use `HF_ENDPOINT` instead.", FutureWarning, ) HUGGINGFACE_CO_RESOLVE_ENDPOINT = os.environ.get("HUGGINGFACE_CO_RESOLVE_ENDPOINT", None) if not os.path.isfile(cache_version_file): cache_version = 0 else: with open(cache_version_file) as f: cache_version = int(f.read()) The provided code snippet includes necessary dependencies for implementing the `get_cached_models` function. Write a Python function `def get_cached_models(cache_dir: Union[str, Path] = None) -> List[Tuple]` to solve the following problem: Returns a list of tuples representing model binaries that are cached locally. Each tuple has shape `(model_url, etag, size_MB)`. Filenames in `cache_dir` are use to get the metadata for each model, only urls ending with *.bin* are added. Args: cache_dir (`Union[str, Path]`, *optional*): The cache directory to search for models within. Will default to the transformers cache if unset. Returns: List[Tuple]: List of tuples each with shape `(model_url, etag, size_MB)` Here is the function: def get_cached_models(cache_dir: Union[str, Path] = None) -> List[Tuple]: """ Returns a list of tuples representing model binaries that are cached locally. Each tuple has shape `(model_url, etag, size_MB)`. Filenames in `cache_dir` are use to get the metadata for each model, only urls ending with *.bin* are added. Args: cache_dir (`Union[str, Path]`, *optional*): The cache directory to search for models within. Will default to the transformers cache if unset. Returns: List[Tuple]: List of tuples each with shape `(model_url, etag, size_MB)` """ if cache_dir is None: cache_dir = TRANSFORMERS_CACHE elif isinstance(cache_dir, Path): cache_dir = str(cache_dir) if not os.path.isdir(cache_dir): return [] cached_models = [] for file in os.listdir(cache_dir): if file.endswith(".json"): meta_path = os.path.join(cache_dir, file) with open(meta_path, encoding="utf-8") as meta_file: metadata = json.load(meta_file) url = metadata["url"] etag = metadata["etag"] if url.endswith(".bin"): size_MB = os.path.getsize(meta_path.strip(".json")) / 1e6 cached_models.append((url, etag, size_MB)) return cached_models
Returns a list of tuples representing model binaries that are cached locally. Each tuple has shape `(model_url, etag, size_MB)`. Filenames in `cache_dir` are use to get the metadata for each model, only urls ending with *.bin* are added. Args: cache_dir (`Union[str, Path]`, *optional*): The cache directory to search for models within. Will default to the transformers cache if unset. Returns: List[Tuple]: List of tuples each with shape `(model_url, etag, size_MB)`
11,533
import json import os import re import shutil import sys import tempfile import traceback import warnings from pathlib import Path from typing import Dict, List, Optional, Tuple, Union from urllib.parse import urlparse from uuid import uuid4 import huggingface_hub import requests from huggingface_hub import ( CommitOperationAdd, HfFolder, create_commit, create_repo, get_hf_file_metadata, hf_hub_download, hf_hub_url, whoami, ) from huggingface_hub.file_download import REGEX_COMMIT_HASH, http_get from huggingface_hub.utils import ( EntryNotFoundError, LocalEntryNotFoundError, RepositoryNotFoundError, RevisionNotFoundError, hf_raise_for_status, ) from requests.exceptions import HTTPError from transformers.utils.logging import tqdm from . import __version__, logging from .generic import working_or_temp_dir from .import_utils import ( ENV_VARS_TRUE_VALUES, _tf_version, _torch_version, is_tf_available, is_torch_available, is_training_run_on_sagemaker, ) The provided code snippet includes necessary dependencies for implementing the `extract_commit_hash` function. Write a Python function `def extract_commit_hash(resolved_file: Optional[str], commit_hash: Optional[str])` to solve the following problem: Extracts the commit hash from a resolved filename toward a cache file. Here is the function: def extract_commit_hash(resolved_file: Optional[str], commit_hash: Optional[str]): """ Extracts the commit hash from a resolved filename toward a cache file. """ if resolved_file is None or commit_hash is not None: return commit_hash resolved_file = str(Path(resolved_file).as_posix()) search = re.search(r"snapshots/([^/]+)/", resolved_file) if search is None: return None commit_hash = search.groups()[0] return commit_hash if REGEX_COMMIT_HASH.match(commit_hash) else None
Extracts the commit hash from a resolved filename toward a cache file.
11,534
import json import os import re import shutil import sys import tempfile import traceback import warnings from pathlib import Path from typing import Dict, List, Optional, Tuple, Union from urllib.parse import urlparse from uuid import uuid4 import huggingface_hub import requests from huggingface_hub import ( CommitOperationAdd, HfFolder, create_commit, create_repo, get_hf_file_metadata, hf_hub_download, hf_hub_url, whoami, ) from huggingface_hub.file_download import REGEX_COMMIT_HASH, http_get from huggingface_hub.utils import ( EntryNotFoundError, LocalEntryNotFoundError, RepositoryNotFoundError, RevisionNotFoundError, hf_raise_for_status, ) from requests.exceptions import HTTPError from transformers.utils.logging import tqdm from . import __version__, logging from .generic import working_or_temp_dir from .import_utils import ( ENV_VARS_TRUE_VALUES, _tf_version, _torch_version, is_tf_available, is_torch_available, is_training_run_on_sagemaker, ) if ( os.path.isdir(old_default_cache_path) and not os.path.isdir(default_cache_path) and "PYTORCH_PRETRAINED_BERT_CACHE" not in os.environ and "PYTORCH_TRANSFORMERS_CACHE" not in os.environ and "TRANSFORMERS_CACHE" not in os.environ ): logger.warning( "In Transformers v4.0.0, the default path to cache downloaded models changed from" " '~/.cache/torch/transformers' to '~/.cache/huggingface/transformers'. Since you don't seem to have" " overridden and '~/.cache/torch/transformers' is a directory that exists, we're moving it to" " '~/.cache/huggingface/transformers' to avoid redownloading models you have already in the cache. You should" " only see this message once." ) shutil.move(old_default_cache_path, default_cache_path) if os.environ.get("HUGGINGFACE_CO_RESOLVE_ENDPOINT", None) is not None: warnings.warn( "Using the environment variable `HUGGINGFACE_CO_RESOLVE_ENDPOINT` is deprecated and will be removed in " "Transformers v5. Use `HF_ENDPOINT` instead.", FutureWarning, ) HUGGINGFACE_CO_RESOLVE_ENDPOINT = os.environ.get("HUGGINGFACE_CO_RESOLVE_ENDPOINT", None) def cached_file( path_or_repo_id: Union[str, os.PathLike], filename: str, cache_dir: Optional[Union[str, os.PathLike]] = None, force_download: bool = False, resume_download: bool = False, proxies: Optional[Dict[str, str]] = None, use_auth_token: Optional[Union[bool, str]] = None, revision: Optional[str] = None, local_files_only: bool = False, subfolder: str = "", user_agent: Optional[Union[str, Dict[str, str]]] = None, _raise_exceptions_for_missing_entries: bool = True, _raise_exceptions_for_connection_errors: bool = True, _commit_hash: Optional[str] = None, ): """ Tries to locate a file in a local folder and repo, downloads and cache it if necessary. Args: path_or_repo_id (`str` or `os.PathLike`): This can be either: - a string, the *model id* of a model repo on huggingface.co. - a path to a *directory* potentially containing the file. filename (`str`): The name of the file to locate in `path_or_repo`. cache_dir (`str` or `os.PathLike`, *optional*): Path to a directory in which a downloaded pretrained model configuration should be cached if the standard cache should not be used. force_download (`bool`, *optional*, defaults to `False`): Whether or not to force to (re-)download the configuration files and override the cached versions if they exist. resume_download (`bool`, *optional*, defaults to `False`): Whether or not to delete incompletely received file. Attempts to resume the download if such a file exists. proxies (`Dict[str, str]`, *optional*): A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request. use_auth_token (`str` or *bool*, *optional*): The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated when running `huggingface-cli login` (stored in `~/.huggingface`). revision (`str`, *optional*, defaults to `"main"`): The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any identifier allowed by git. local_files_only (`bool`, *optional*, defaults to `False`): If `True`, will only try to load the tokenizer configuration from local files. subfolder (`str`, *optional*, defaults to `""`): In case the relevant files are located inside a subfolder of the model repo on huggingface.co, you can specify the folder name here. <Tip> Passing `use_auth_token=True` is required when you want to use a private model. </Tip> Returns: `Optional[str]`: Returns the resolved file (to the cache folder if downloaded from a repo). Examples: ```python # Download a model weight from the Hub and cache it. model_weights_file = cached_file("bert-base-uncased", "pytorch_model.bin") ```""" # Private arguments # _raise_exceptions_for_missing_entries: if False, do not raise an exception for missing entries but return # None. # _raise_exceptions_for_connection_errors: if False, do not raise an exception for connection errors but return # None. # _commit_hash: passed when we are chaining several calls to various files (e.g. when loading a tokenizer or # a pipeline). If files are cached for this commit hash, avoid calls to head and get from the cache. if is_offline_mode() and not local_files_only: logger.info("Offline mode: forcing local_files_only=True") local_files_only = True if subfolder is None: subfolder = "" path_or_repo_id = str(path_or_repo_id) full_filename = os.path.join(subfolder, filename) if os.path.isdir(path_or_repo_id): resolved_file = os.path.join(os.path.join(path_or_repo_id, subfolder), filename) if not os.path.isfile(resolved_file): if _raise_exceptions_for_missing_entries: raise EnvironmentError( f"{path_or_repo_id} does not appear to have a file named {full_filename}. Checkout " f"'https://huggingface.co/{path_or_repo_id}/{revision}' for available files." ) else: return None return resolved_file if cache_dir is None: cache_dir = TRANSFORMERS_CACHE if isinstance(cache_dir, Path): cache_dir = str(cache_dir) if _commit_hash is not None: # If the file is cached under that commit hash, we return it directly. resolved_file = try_to_load_from_cache( path_or_repo_id, full_filename, cache_dir=cache_dir, revision=_commit_hash ) if resolved_file is not None: if resolved_file is not _CACHED_NO_EXIST: return resolved_file elif not _raise_exceptions_for_missing_entries: return None else: raise EnvironmentError(f"Could not locate {full_filename} inside {path_or_repo_id}.") user_agent = http_user_agent(user_agent) try: # Load from URL or cache if already cached resolved_file = hf_hub_download( path_or_repo_id, filename, subfolder=None if len(subfolder) == 0 else subfolder, revision=revision, cache_dir=cache_dir, user_agent=user_agent, force_download=force_download, proxies=proxies, resume_download=resume_download, use_auth_token=use_auth_token, local_files_only=local_files_only, ) except RepositoryNotFoundError: raise EnvironmentError( f"{path_or_repo_id} is not a local folder and is not a valid model identifier " "listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to " "pass a token having permission to this repo with `use_auth_token` or log in with " "`huggingface-cli login` and pass `use_auth_token=True`." ) except RevisionNotFoundError: raise EnvironmentError( f"{revision} is not a valid git identifier (branch name, tag name or commit id) that exists " "for this model name. Check the model page at " f"'https://huggingface.co/{path_or_repo_id}' for available revisions." ) except LocalEntryNotFoundError: # We try to see if we have a cached version (not up to date): resolved_file = try_to_load_from_cache(path_or_repo_id, full_filename, cache_dir=cache_dir, revision=revision) if resolved_file is not None and resolved_file != _CACHED_NO_EXIST: return resolved_file if not _raise_exceptions_for_missing_entries or not _raise_exceptions_for_connection_errors: return None raise EnvironmentError( f"We couldn't connect to '{HUGGINGFACE_CO_RESOLVE_ENDPOINT}' to load this file, couldn't find it in the" f" cached files and it looks like {path_or_repo_id} is not the path to a directory containing a file named" f" {full_filename}.\nCheckout your internet connection or see how to run the library in offline mode at" " 'https://huggingface.co/docs/transformers/installation#offline-mode'." ) except EntryNotFoundError: if not _raise_exceptions_for_missing_entries: return None if revision is None: revision = "main" raise EnvironmentError( f"{path_or_repo_id} does not appear to have a file named {full_filename}. Checkout " f"'https://huggingface.co/{path_or_repo_id}/{revision}' for available files." ) except HTTPError as err: # First we try to see if we have a cached version (not up to date): resolved_file = try_to_load_from_cache(path_or_repo_id, full_filename, cache_dir=cache_dir, revision=revision) if resolved_file is not None and resolved_file != _CACHED_NO_EXIST: return resolved_file if not _raise_exceptions_for_connection_errors: return None raise EnvironmentError(f"There was a specific connection error when trying to load {path_or_repo_id}:\n{err}") return resolved_file if not os.path.isfile(cache_version_file): cache_version = 0 else: with open(cache_version_file) as f: cache_version = int(f.read()) The provided code snippet includes necessary dependencies for implementing the `get_file_from_repo` function. Write a Python function `def get_file_from_repo( path_or_repo: Union[str, os.PathLike], filename: str, cache_dir: Optional[Union[str, os.PathLike]] = None, force_download: bool = False, resume_download: bool = False, proxies: Optional[Dict[str, str]] = None, use_auth_token: Optional[Union[bool, str]] = None, revision: Optional[str] = None, local_files_only: bool = False, subfolder: str = "", )` to solve the following problem: Tries to locate a file in a local folder and repo, downloads and cache it if necessary. Args: path_or_repo (`str` or `os.PathLike`): This can be either: - a string, the *model id* of a model repo on huggingface.co. - a path to a *directory* potentially containing the file. filename (`str`): The name of the file to locate in `path_or_repo`. cache_dir (`str` or `os.PathLike`, *optional*): Path to a directory in which a downloaded pretrained model configuration should be cached if the standard cache should not be used. force_download (`bool`, *optional*, defaults to `False`): Whether or not to force to (re-)download the configuration files and override the cached versions if they exist. resume_download (`bool`, *optional*, defaults to `False`): Whether or not to delete incompletely received file. Attempts to resume the download if such a file exists. proxies (`Dict[str, str]`, *optional*): A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request. use_auth_token (`str` or *bool*, *optional*): The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated when running `huggingface-cli login` (stored in `~/.huggingface`). revision (`str`, *optional*, defaults to `"main"`): The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any identifier allowed by git. local_files_only (`bool`, *optional*, defaults to `False`): If `True`, will only try to load the tokenizer configuration from local files. subfolder (`str`, *optional*, defaults to `""`): In case the relevant files are located inside a subfolder of the model repo on huggingface.co, you can specify the folder name here. <Tip> Passing `use_auth_token=True` is required when you want to use a private model. </Tip> Returns: `Optional[str]`: Returns the resolved file (to the cache folder if downloaded from a repo) or `None` if the file does not exist. Examples: ```python # Download a tokenizer configuration from huggingface.co and cache. tokenizer_config = get_file_from_repo("bert-base-uncased", "tokenizer_config.json") # This model does not have a tokenizer config so the result will be None. tokenizer_config = get_file_from_repo("xlm-roberta-base", "tokenizer_config.json") ``` Here is the function: def get_file_from_repo( path_or_repo: Union[str, os.PathLike], filename: str, cache_dir: Optional[Union[str, os.PathLike]] = None, force_download: bool = False, resume_download: bool = False, proxies: Optional[Dict[str, str]] = None, use_auth_token: Optional[Union[bool, str]] = None, revision: Optional[str] = None, local_files_only: bool = False, subfolder: str = "", ): """ Tries to locate a file in a local folder and repo, downloads and cache it if necessary. Args: path_or_repo (`str` or `os.PathLike`): This can be either: - a string, the *model id* of a model repo on huggingface.co. - a path to a *directory* potentially containing the file. filename (`str`): The name of the file to locate in `path_or_repo`. cache_dir (`str` or `os.PathLike`, *optional*): Path to a directory in which a downloaded pretrained model configuration should be cached if the standard cache should not be used. force_download (`bool`, *optional*, defaults to `False`): Whether or not to force to (re-)download the configuration files and override the cached versions if they exist. resume_download (`bool`, *optional*, defaults to `False`): Whether or not to delete incompletely received file. Attempts to resume the download if such a file exists. proxies (`Dict[str, str]`, *optional*): A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request. use_auth_token (`str` or *bool*, *optional*): The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated when running `huggingface-cli login` (stored in `~/.huggingface`). revision (`str`, *optional*, defaults to `"main"`): The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any identifier allowed by git. local_files_only (`bool`, *optional*, defaults to `False`): If `True`, will only try to load the tokenizer configuration from local files. subfolder (`str`, *optional*, defaults to `""`): In case the relevant files are located inside a subfolder of the model repo on huggingface.co, you can specify the folder name here. <Tip> Passing `use_auth_token=True` is required when you want to use a private model. </Tip> Returns: `Optional[str]`: Returns the resolved file (to the cache folder if downloaded from a repo) or `None` if the file does not exist. Examples: ```python # Download a tokenizer configuration from huggingface.co and cache. tokenizer_config = get_file_from_repo("bert-base-uncased", "tokenizer_config.json") # This model does not have a tokenizer config so the result will be None. tokenizer_config = get_file_from_repo("xlm-roberta-base", "tokenizer_config.json") ```""" return cached_file( path_or_repo_id=path_or_repo, filename=filename, cache_dir=cache_dir, force_download=force_download, resume_download=resume_download, proxies=proxies, use_auth_token=use_auth_token, revision=revision, local_files_only=local_files_only, subfolder=subfolder, _raise_exceptions_for_missing_entries=False, _raise_exceptions_for_connection_errors=False, )
Tries to locate a file in a local folder and repo, downloads and cache it if necessary. Args: path_or_repo (`str` or `os.PathLike`): This can be either: - a string, the *model id* of a model repo on huggingface.co. - a path to a *directory* potentially containing the file. filename (`str`): The name of the file to locate in `path_or_repo`. cache_dir (`str` or `os.PathLike`, *optional*): Path to a directory in which a downloaded pretrained model configuration should be cached if the standard cache should not be used. force_download (`bool`, *optional*, defaults to `False`): Whether or not to force to (re-)download the configuration files and override the cached versions if they exist. resume_download (`bool`, *optional*, defaults to `False`): Whether or not to delete incompletely received file. Attempts to resume the download if such a file exists. proxies (`Dict[str, str]`, *optional*): A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request. use_auth_token (`str` or *bool*, *optional*): The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated when running `huggingface-cli login` (stored in `~/.huggingface`). revision (`str`, *optional*, defaults to `"main"`): The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any identifier allowed by git. local_files_only (`bool`, *optional*, defaults to `False`): If `True`, will only try to load the tokenizer configuration from local files. subfolder (`str`, *optional*, defaults to `""`): In case the relevant files are located inside a subfolder of the model repo on huggingface.co, you can specify the folder name here. <Tip> Passing `use_auth_token=True` is required when you want to use a private model. </Tip> Returns: `Optional[str]`: Returns the resolved file (to the cache folder if downloaded from a repo) or `None` if the file does not exist. Examples: ```python # Download a tokenizer configuration from huggingface.co and cache. tokenizer_config = get_file_from_repo("bert-base-uncased", "tokenizer_config.json") # This model does not have a tokenizer config so the result will be None. tokenizer_config = get_file_from_repo("xlm-roberta-base", "tokenizer_config.json") ```
11,535
import json import os import re import shutil import sys import tempfile import traceback import warnings from pathlib import Path from typing import Dict, List, Optional, Tuple, Union from urllib.parse import urlparse from uuid import uuid4 import huggingface_hub import requests from huggingface_hub import ( CommitOperationAdd, HfFolder, create_commit, create_repo, get_hf_file_metadata, hf_hub_download, hf_hub_url, whoami, ) from huggingface_hub.file_download import REGEX_COMMIT_HASH, http_get from huggingface_hub.utils import ( EntryNotFoundError, LocalEntryNotFoundError, RepositoryNotFoundError, RevisionNotFoundError, hf_raise_for_status, ) from requests.exceptions import HTTPError from transformers.utils.logging import tqdm from . import __version__, logging from .generic import working_or_temp_dir from .import_utils import ( ENV_VARS_TRUE_VALUES, _tf_version, _torch_version, is_tf_available, is_torch_available, is_training_run_on_sagemaker, ) The provided code snippet includes necessary dependencies for implementing the `download_url` function. Write a Python function `def download_url(url, proxies=None)` to solve the following problem: Downloads a given url in a temporary file. This function is not safe to use in multiple processes. Its only use is for deprecated behavior allowing to download config/models with a single url instead of using the Hub. Args: url (`str`): The url of the file to download. proxies (`Dict[str, str]`, *optional*): A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request. Returns: `str`: The location of the temporary file where the url was downloaded. Here is the function: def download_url(url, proxies=None): """ Downloads a given url in a temporary file. This function is not safe to use in multiple processes. Its only use is for deprecated behavior allowing to download config/models with a single url instead of using the Hub. Args: url (`str`): The url of the file to download. proxies (`Dict[str, str]`, *optional*): A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request. Returns: `str`: The location of the temporary file where the url was downloaded. """ warnings.warn( f"Using `from_pretrained` with the url of a file (here {url}) is deprecated and won't be possible anymore in" " v5 of Transformers. You should host your file on the Hub (hf.co) instead and use the repository ID. Note" " that this is not compatible with the caching system (your file will be downloaded at each execution) or" " multiple processes (each process will download the file in a different temporary file)." ) tmp_file = tempfile.mktemp() with open(tmp_file, "wb") as f: http_get(url, f, proxies=proxies) return tmp_file
Downloads a given url in a temporary file. This function is not safe to use in multiple processes. Its only use is for deprecated behavior allowing to download config/models with a single url instead of using the Hub. Args: url (`str`): The url of the file to download. proxies (`Dict[str, str]`, *optional*): A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request. Returns: `str`: The location of the temporary file where the url was downloaded.
11,536
import json import os import re import shutil import sys import tempfile import traceback import warnings from pathlib import Path from typing import Dict, List, Optional, Tuple, Union from urllib.parse import urlparse from uuid import uuid4 import huggingface_hub import requests from huggingface_hub import ( CommitOperationAdd, HfFolder, create_commit, create_repo, get_hf_file_metadata, hf_hub_download, hf_hub_url, whoami, ) from huggingface_hub.file_download import REGEX_COMMIT_HASH, http_get from huggingface_hub.utils import ( EntryNotFoundError, LocalEntryNotFoundError, RepositoryNotFoundError, RevisionNotFoundError, hf_raise_for_status, ) from requests.exceptions import HTTPError from transformers.utils.logging import tqdm from . import __version__, logging from .generic import working_or_temp_dir from .import_utils import ( ENV_VARS_TRUE_VALUES, _tf_version, _torch_version, is_tf_available, is_torch_available, is_training_run_on_sagemaker, ) logger = logging.get_logger(__name__) if ( os.path.isdir(old_default_cache_path) and not os.path.isdir(default_cache_path) and "PYTORCH_PRETRAINED_BERT_CACHE" not in os.environ and "PYTORCH_TRANSFORMERS_CACHE" not in os.environ and "TRANSFORMERS_CACHE" not in os.environ ): logger.warning( "In Transformers v4.0.0, the default path to cache downloaded models changed from" " '~/.cache/torch/transformers' to '~/.cache/huggingface/transformers'. Since you don't seem to have" " overridden and '~/.cache/torch/transformers' is a directory that exists, we're moving it to" " '~/.cache/huggingface/transformers' to avoid redownloading models you have already in the cache. You should" " only see this message once." ) shutil.move(old_default_cache_path, default_cache_path) if os.environ.get("HUGGINGFACE_CO_RESOLVE_ENDPOINT", None) is not None: warnings.warn( "Using the environment variable `HUGGINGFACE_CO_RESOLVE_ENDPOINT` is deprecated and will be removed in " "Transformers v5. Use `HF_ENDPOINT` instead.", FutureWarning, ) HUGGINGFACE_CO_RESOLVE_ENDPOINT = os.environ.get("HUGGINGFACE_CO_RESOLVE_ENDPOINT", None) def http_user_agent(user_agent: Union[Dict, str, None] = None) -> str: """ Formats a user-agent string with basic info about a request. """ ua = f"transformers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}" if is_torch_available(): ua += f"; torch/{_torch_version}" if is_tf_available(): ua += f"; tensorflow/{_tf_version}" if DISABLE_TELEMETRY: return ua + "; telemetry/off" if is_training_run_on_sagemaker(): ua += "; " + "; ".join(f"{k}/{v}" for k, v in define_sagemaker_information().items()) # CI will set this value to True if os.environ.get("TRANSFORMERS_IS_CI", "").upper() in ENV_VARS_TRUE_VALUES: ua += "; is_ci/true" if isinstance(user_agent, dict): ua += "; " + "; ".join(f"{k}/{v}" for k, v in user_agent.items()) elif isinstance(user_agent, str): ua += "; " + user_agent return ua if not os.path.isfile(cache_version_file): cache_version = 0 else: with open(cache_version_file) as f: cache_version = int(f.read()) The provided code snippet includes necessary dependencies for implementing the `has_file` function. Write a Python function `def has_file( path_or_repo: Union[str, os.PathLike], filename: str, revision: Optional[str] = None, proxies: Optional[Dict[str, str]] = None, use_auth_token: Optional[Union[bool, str]] = None, )` to solve the following problem: Checks if a repo contains a given file wihtout downloading it. Works for remote repos and local folders. <Tip warning={false}> This function will raise an error if the repository `path_or_repo` is not valid or if `revision` does not exist for this repo, but will return False for regular connection errors. </Tip> Here is the function: def has_file( path_or_repo: Union[str, os.PathLike], filename: str, revision: Optional[str] = None, proxies: Optional[Dict[str, str]] = None, use_auth_token: Optional[Union[bool, str]] = None, ): """ Checks if a repo contains a given file wihtout downloading it. Works for remote repos and local folders. <Tip warning={false}> This function will raise an error if the repository `path_or_repo` is not valid or if `revision` does not exist for this repo, but will return False for regular connection errors. </Tip> """ if os.path.isdir(path_or_repo): return os.path.isfile(os.path.join(path_or_repo, filename)) url = hf_hub_url(path_or_repo, filename=filename, revision=revision) headers = {"user-agent": http_user_agent()} if isinstance(use_auth_token, str): headers["authorization"] = f"Bearer {use_auth_token}" elif use_auth_token: token = HfFolder.get_token() if token is None: raise EnvironmentError("You specified use_auth_token=True, but a huggingface token was not found.") headers["authorization"] = f"Bearer {token}" r = requests.head(url, headers=headers, allow_redirects=False, proxies=proxies, timeout=10) try: hf_raise_for_status(r) return True except RepositoryNotFoundError as e: logger.error(e) raise EnvironmentError(f"{path_or_repo} is not a local folder or a valid repository name on 'https://hf.co'.") except RevisionNotFoundError as e: logger.error(e) raise EnvironmentError( f"{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for this " f"model name. Check the model page at 'https://huggingface.co/{path_or_repo}' for available revisions." ) except requests.HTTPError: # We return false for EntryNotFoundError (logical) as well as any connection error. return False
Checks if a repo contains a given file wihtout downloading it. Works for remote repos and local folders. <Tip warning={false}> This function will raise an error if the repository `path_or_repo` is not valid or if `revision` does not exist for this repo, but will return False for regular connection errors. </Tip>
11,537
import json import os import re import shutil import sys import tempfile import traceback import warnings from pathlib import Path from typing import Dict, List, Optional, Tuple, Union from urllib.parse import urlparse from uuid import uuid4 import huggingface_hub import requests from huggingface_hub import ( CommitOperationAdd, HfFolder, create_commit, create_repo, get_hf_file_metadata, hf_hub_download, hf_hub_url, whoami, ) from huggingface_hub.file_download import REGEX_COMMIT_HASH, http_get from huggingface_hub.utils import ( EntryNotFoundError, LocalEntryNotFoundError, RepositoryNotFoundError, RevisionNotFoundError, hf_raise_for_status, ) from requests.exceptions import HTTPError from transformers.utils.logging import tqdm from . import __version__, logging from .generic import working_or_temp_dir from .import_utils import ( ENV_VARS_TRUE_VALUES, _tf_version, _torch_version, is_tf_available, is_torch_available, is_training_run_on_sagemaker, ) def get_full_repo_name(model_id: str, organization: Optional[str] = None, token: Optional[str] = None): if token is None: token = HfFolder.get_token() if organization is None: username = whoami(token)["name"] return f"{username}/{model_id}" else: return f"{organization}/{model_id}"
null
11,538
import json import os import re import shutil import sys import tempfile import traceback import warnings from pathlib import Path from typing import Dict, List, Optional, Tuple, Union from urllib.parse import urlparse from uuid import uuid4 import huggingface_hub import requests from huggingface_hub import ( CommitOperationAdd, HfFolder, create_commit, create_repo, get_hf_file_metadata, hf_hub_download, hf_hub_url, whoami, ) from huggingface_hub.file_download import REGEX_COMMIT_HASH, http_get from huggingface_hub.utils import ( EntryNotFoundError, LocalEntryNotFoundError, RepositoryNotFoundError, RevisionNotFoundError, hf_raise_for_status, ) from requests.exceptions import HTTPError from transformers.utils.logging import tqdm from . import __version__, logging from .generic import working_or_temp_dir from .import_utils import ( ENV_VARS_TRUE_VALUES, _tf_version, _torch_version, is_tf_available, is_torch_available, is_training_run_on_sagemaker, ) def is_offline_mode(): return _is_offline_mode if ( os.path.isdir(old_default_cache_path) and not os.path.isdir(default_cache_path) and "PYTORCH_PRETRAINED_BERT_CACHE" not in os.environ and "PYTORCH_TRANSFORMERS_CACHE" not in os.environ and "TRANSFORMERS_CACHE" not in os.environ ): logger.warning( "In Transformers v4.0.0, the default path to cache downloaded models changed from" " '~/.cache/torch/transformers' to '~/.cache/huggingface/transformers'. Since you don't seem to have" " overridden and '~/.cache/torch/transformers' is a directory that exists, we're moving it to" " '~/.cache/huggingface/transformers' to avoid redownloading models you have already in the cache. You should" " only see this message once." ) shutil.move(old_default_cache_path, default_cache_path) if os.environ.get("HUGGINGFACE_CO_RESOLVE_ENDPOINT", None) is not None: warnings.warn( "Using the environment variable `HUGGINGFACE_CO_RESOLVE_ENDPOINT` is deprecated and will be removed in " "Transformers v5. Use `HF_ENDPOINT` instead.", FutureWarning, ) HUGGINGFACE_CO_RESOLVE_ENDPOINT = os.environ.get("HUGGINGFACE_CO_RESOLVE_ENDPOINT", None) HUGGINGFACE_CO_EXAMPLES_TELEMETRY = HUGGINGFACE_CO_RESOLVE_ENDPOINT + "/api/telemetry/examples" def http_user_agent(user_agent: Union[Dict, str, None] = None) -> str: """ Formats a user-agent string with basic info about a request. """ ua = f"transformers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}" if is_torch_available(): ua += f"; torch/{_torch_version}" if is_tf_available(): ua += f"; tensorflow/{_tf_version}" if DISABLE_TELEMETRY: return ua + "; telemetry/off" if is_training_run_on_sagemaker(): ua += "; " + "; ".join(f"{k}/{v}" for k, v in define_sagemaker_information().items()) # CI will set this value to True if os.environ.get("TRANSFORMERS_IS_CI", "").upper() in ENV_VARS_TRUE_VALUES: ua += "; is_ci/true" if isinstance(user_agent, dict): ua += "; " + "; ".join(f"{k}/{v}" for k, v in user_agent.items()) elif isinstance(user_agent, str): ua += "; " + user_agent return ua if not os.path.isfile(cache_version_file): cache_version = 0 else: with open(cache_version_file) as f: cache_version = int(f.read()) The provided code snippet includes necessary dependencies for implementing the `send_example_telemetry` function. Write a Python function `def send_example_telemetry(example_name, *example_args, framework="pytorch")` to solve the following problem: Sends telemetry that helps tracking the examples use. Args: example_name (`str`): The name of the example. *example_args (dataclasses or `argparse.ArgumentParser`): The arguments to the script. This function will only try to extract the model and dataset name from those. Nothing else is tracked. framework (`str`, *optional*, defaults to `"pytorch"`): The framework for the example. Here is the function: def send_example_telemetry(example_name, *example_args, framework="pytorch"): """ Sends telemetry that helps tracking the examples use. Args: example_name (`str`): The name of the example. *example_args (dataclasses or `argparse.ArgumentParser`): The arguments to the script. This function will only try to extract the model and dataset name from those. Nothing else is tracked. framework (`str`, *optional*, defaults to `"pytorch"`): The framework for the example. """ if is_offline_mode(): return data = {"example": example_name, "framework": framework} for args in example_args: args_as_dict = {k: v for k, v in args.__dict__.items() if not k.startswith("_") and v is not None} if "model_name_or_path" in args_as_dict: model_name = args_as_dict["model_name_or_path"] # Filter out local paths if not os.path.isdir(model_name): data["model_name"] = args_as_dict["model_name_or_path"] if "dataset_name" in args_as_dict: data["dataset_name"] = args_as_dict["dataset_name"] elif "task_name" in args_as_dict: # Extract script name from the example_name script_name = example_name.replace("tf_", "").replace("flax_", "").replace("run_", "") script_name = script_name.replace("_no_trainer", "") data["dataset_name"] = f"{script_name}-{args_as_dict['task_name']}" headers = {"user-agent": http_user_agent(data)} try: r = requests.head(HUGGINGFACE_CO_EXAMPLES_TELEMETRY, headers=headers) r.raise_for_status() except Exception: # We don't want to error in case of connection errors of any kind. pass
Sends telemetry that helps tracking the examples use. Args: example_name (`str`): The name of the example. *example_args (dataclasses or `argparse.ArgumentParser`): The arguments to the script. This function will only try to extract the model and dataset name from those. Nothing else is tracked. framework (`str`, *optional*, defaults to `"pytorch"`): The framework for the example.
11,539
import json import os import re import shutil import sys import tempfile import traceback import warnings from pathlib import Path from typing import Dict, List, Optional, Tuple, Union from urllib.parse import urlparse from uuid import uuid4 import huggingface_hub import requests from huggingface_hub import ( CommitOperationAdd, HfFolder, create_commit, create_repo, get_hf_file_metadata, hf_hub_download, hf_hub_url, whoami, ) from huggingface_hub.file_download import REGEX_COMMIT_HASH, http_get from huggingface_hub.utils import ( EntryNotFoundError, LocalEntryNotFoundError, RepositoryNotFoundError, RevisionNotFoundError, hf_raise_for_status, ) from requests.exceptions import HTTPError from transformers.utils.logging import tqdm from . import __version__, logging from .generic import working_or_temp_dir from .import_utils import ( ENV_VARS_TRUE_VALUES, _tf_version, _torch_version, is_tf_available, is_torch_available, is_training_run_on_sagemaker, ) if ( os.path.isdir(old_default_cache_path) and not os.path.isdir(default_cache_path) and "PYTORCH_PRETRAINED_BERT_CACHE" not in os.environ and "PYTORCH_TRANSFORMERS_CACHE" not in os.environ and "TRANSFORMERS_CACHE" not in os.environ ): logger.warning( "In Transformers v4.0.0, the default path to cache downloaded models changed from" " '~/.cache/torch/transformers' to '~/.cache/huggingface/transformers'. Since you don't seem to have" " overridden and '~/.cache/torch/transformers' is a directory that exists, we're moving it to" " '~/.cache/huggingface/transformers' to avoid redownloading models you have already in the cache. You should" " only see this message once." ) shutil.move(old_default_cache_path, default_cache_path) HUGGINGFACE_CO_RESOLVE_ENDPOINT = _default_endpoint if os.environ.get("HUGGINGFACE_CO_RESOLVE_ENDPOINT", None) is not None: warnings.warn( "Using the environment variable `HUGGINGFACE_CO_RESOLVE_ENDPOINT` is deprecated and will be removed in " "Transformers v5. Use `HF_ENDPOINT` instead.", FutureWarning, ) HUGGINGFACE_CO_RESOLVE_ENDPOINT = os.environ.get("HUGGINGFACE_CO_RESOLVE_ENDPOINT", None) HUGGINGFACE_CO_RESOLVE_ENDPOINT = os.environ.get("HF_ENDPOINT", HUGGINGFACE_CO_RESOLVE_ENDPOINT) def cached_file( path_or_repo_id: Union[str, os.PathLike], filename: str, cache_dir: Optional[Union[str, os.PathLike]] = None, force_download: bool = False, resume_download: bool = False, proxies: Optional[Dict[str, str]] = None, use_auth_token: Optional[Union[bool, str]] = None, revision: Optional[str] = None, local_files_only: bool = False, subfolder: str = "", user_agent: Optional[Union[str, Dict[str, str]]] = None, _raise_exceptions_for_missing_entries: bool = True, _raise_exceptions_for_connection_errors: bool = True, _commit_hash: Optional[str] = None, ): """ Tries to locate a file in a local folder and repo, downloads and cache it if necessary. Args: path_or_repo_id (`str` or `os.PathLike`): This can be either: - a string, the *model id* of a model repo on huggingface.co. - a path to a *directory* potentially containing the file. filename (`str`): The name of the file to locate in `path_or_repo`. cache_dir (`str` or `os.PathLike`, *optional*): Path to a directory in which a downloaded pretrained model configuration should be cached if the standard cache should not be used. force_download (`bool`, *optional*, defaults to `False`): Whether or not to force to (re-)download the configuration files and override the cached versions if they exist. resume_download (`bool`, *optional*, defaults to `False`): Whether or not to delete incompletely received file. Attempts to resume the download if such a file exists. proxies (`Dict[str, str]`, *optional*): A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request. use_auth_token (`str` or *bool*, *optional*): The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated when running `huggingface-cli login` (stored in `~/.huggingface`). revision (`str`, *optional*, defaults to `"main"`): The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any identifier allowed by git. local_files_only (`bool`, *optional*, defaults to `False`): If `True`, will only try to load the tokenizer configuration from local files. subfolder (`str`, *optional*, defaults to `""`): In case the relevant files are located inside a subfolder of the model repo on huggingface.co, you can specify the folder name here. <Tip> Passing `use_auth_token=True` is required when you want to use a private model. </Tip> Returns: `Optional[str]`: Returns the resolved file (to the cache folder if downloaded from a repo). Examples: ```python # Download a model weight from the Hub and cache it. model_weights_file = cached_file("bert-base-uncased", "pytorch_model.bin") ```""" # Private arguments # _raise_exceptions_for_missing_entries: if False, do not raise an exception for missing entries but return # None. # _raise_exceptions_for_connection_errors: if False, do not raise an exception for connection errors but return # None. # _commit_hash: passed when we are chaining several calls to various files (e.g. when loading a tokenizer or # a pipeline). If files are cached for this commit hash, avoid calls to head and get from the cache. if is_offline_mode() and not local_files_only: logger.info("Offline mode: forcing local_files_only=True") local_files_only = True if subfolder is None: subfolder = "" path_or_repo_id = str(path_or_repo_id) full_filename = os.path.join(subfolder, filename) if os.path.isdir(path_or_repo_id): resolved_file = os.path.join(os.path.join(path_or_repo_id, subfolder), filename) if not os.path.isfile(resolved_file): if _raise_exceptions_for_missing_entries: raise EnvironmentError( f"{path_or_repo_id} does not appear to have a file named {full_filename}. Checkout " f"'https://huggingface.co/{path_or_repo_id}/{revision}' for available files." ) else: return None return resolved_file if cache_dir is None: cache_dir = TRANSFORMERS_CACHE if isinstance(cache_dir, Path): cache_dir = str(cache_dir) if _commit_hash is not None: # If the file is cached under that commit hash, we return it directly. resolved_file = try_to_load_from_cache( path_or_repo_id, full_filename, cache_dir=cache_dir, revision=_commit_hash ) if resolved_file is not None: if resolved_file is not _CACHED_NO_EXIST: return resolved_file elif not _raise_exceptions_for_missing_entries: return None else: raise EnvironmentError(f"Could not locate {full_filename} inside {path_or_repo_id}.") user_agent = http_user_agent(user_agent) try: # Load from URL or cache if already cached resolved_file = hf_hub_download( path_or_repo_id, filename, subfolder=None if len(subfolder) == 0 else subfolder, revision=revision, cache_dir=cache_dir, user_agent=user_agent, force_download=force_download, proxies=proxies, resume_download=resume_download, use_auth_token=use_auth_token, local_files_only=local_files_only, ) except RepositoryNotFoundError: raise EnvironmentError( f"{path_or_repo_id} is not a local folder and is not a valid model identifier " "listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to " "pass a token having permission to this repo with `use_auth_token` or log in with " "`huggingface-cli login` and pass `use_auth_token=True`." ) except RevisionNotFoundError: raise EnvironmentError( f"{revision} is not a valid git identifier (branch name, tag name or commit id) that exists " "for this model name. Check the model page at " f"'https://huggingface.co/{path_or_repo_id}' for available revisions." ) except LocalEntryNotFoundError: # We try to see if we have a cached version (not up to date): resolved_file = try_to_load_from_cache(path_or_repo_id, full_filename, cache_dir=cache_dir, revision=revision) if resolved_file is not None and resolved_file != _CACHED_NO_EXIST: return resolved_file if not _raise_exceptions_for_missing_entries or not _raise_exceptions_for_connection_errors: return None raise EnvironmentError( f"We couldn't connect to '{HUGGINGFACE_CO_RESOLVE_ENDPOINT}' to load this file, couldn't find it in the" f" cached files and it looks like {path_or_repo_id} is not the path to a directory containing a file named" f" {full_filename}.\nCheckout your internet connection or see how to run the library in offline mode at" " 'https://huggingface.co/docs/transformers/installation#offline-mode'." ) except EntryNotFoundError: if not _raise_exceptions_for_missing_entries: return None if revision is None: revision = "main" raise EnvironmentError( f"{path_or_repo_id} does not appear to have a file named {full_filename}. Checkout " f"'https://huggingface.co/{path_or_repo_id}/{revision}' for available files." ) except HTTPError as err: # First we try to see if we have a cached version (not up to date): resolved_file = try_to_load_from_cache(path_or_repo_id, full_filename, cache_dir=cache_dir, revision=revision) if resolved_file is not None and resolved_file != _CACHED_NO_EXIST: return resolved_file if not _raise_exceptions_for_connection_errors: return None raise EnvironmentError(f"There was a specific connection error when trying to load {path_or_repo_id}:\n{err}") return resolved_file if not os.path.isfile(cache_version_file): cache_version = 0 else: with open(cache_version_file) as f: cache_version = int(f.read()) The provided code snippet includes necessary dependencies for implementing the `get_checkpoint_shard_files` function. Write a Python function `def get_checkpoint_shard_files( pretrained_model_name_or_path, index_filename, cache_dir=None, force_download=False, proxies=None, resume_download=False, local_files_only=False, use_auth_token=None, user_agent=None, revision=None, subfolder="", _commit_hash=None, )` to solve the following problem: For a given model: - download and cache all the shards of a sharded checkpoint if `pretrained_model_name_or_path` is a model ID on the Hub - returns the list of paths to all the shards, as well as some metadata. For the description of each arg, see [`PreTrainedModel.from_pretrained`]. `index_filename` is the full path to the index (downloaded and cached if `pretrained_model_name_or_path` is a model ID on the Hub). Here is the function: def get_checkpoint_shard_files( pretrained_model_name_or_path, index_filename, cache_dir=None, force_download=False, proxies=None, resume_download=False, local_files_only=False, use_auth_token=None, user_agent=None, revision=None, subfolder="", _commit_hash=None, ): """ For a given model: - download and cache all the shards of a sharded checkpoint if `pretrained_model_name_or_path` is a model ID on the Hub - returns the list of paths to all the shards, as well as some metadata. For the description of each arg, see [`PreTrainedModel.from_pretrained`]. `index_filename` is the full path to the index (downloaded and cached if `pretrained_model_name_or_path` is a model ID on the Hub). """ import json if not os.path.isfile(index_filename): raise ValueError(f"Can't find a checkpoint index ({index_filename}) in {pretrained_model_name_or_path}.") with open(index_filename, "r") as f: index = json.loads(f.read()) shard_filenames = sorted(list(set(index["weight_map"].values()))) sharded_metadata = index["metadata"] sharded_metadata["all_checkpoint_keys"] = list(index["weight_map"].keys()) # First, let's deal with local folder. if os.path.isdir(pretrained_model_name_or_path): shard_filenames = [os.path.join(pretrained_model_name_or_path, subfolder, f) for f in shard_filenames] return shard_filenames, sharded_metadata # At this stage pretrained_model_name_or_path is a model identifier on the Hub cached_filenames = [] for shard_filename in shard_filenames: try: # Load from URL cached_filename = cached_file( pretrained_model_name_or_path, shard_filename, cache_dir=cache_dir, force_download=force_download, proxies=proxies, resume_download=resume_download, local_files_only=local_files_only, use_auth_token=use_auth_token, user_agent=user_agent, revision=revision, subfolder=subfolder, _commit_hash=_commit_hash, ) # We have already dealt with RepositoryNotFoundError and RevisionNotFoundError when getting the index, so # we don't have to catch them here. except EntryNotFoundError: raise EnvironmentError( f"{pretrained_model_name_or_path} does not appear to have a file named {shard_filename} which is " "required according to the checkpoint index." ) except HTTPError: raise EnvironmentError( f"We couldn't connect to '{HUGGINGFACE_CO_RESOLVE_ENDPOINT}' to load {shard_filename}. You should try" " again after checking your internet connection." ) cached_filenames.append(cached_filename) return cached_filenames, sharded_metadata
For a given model: - download and cache all the shards of a sharded checkpoint if `pretrained_model_name_or_path` is a model ID on the Hub - returns the list of paths to all the shards, as well as some metadata. For the description of each arg, see [`PreTrainedModel.from_pretrained`]. `index_filename` is the full path to the index (downloaded and cached if `pretrained_model_name_or_path` is a model ID on the Hub).
11,540
import json import os import re import shutil import sys import tempfile import traceback import warnings from pathlib import Path from typing import Dict, List, Optional, Tuple, Union from urllib.parse import urlparse from uuid import uuid4 import huggingface_hub import requests from huggingface_hub import ( CommitOperationAdd, HfFolder, create_commit, create_repo, get_hf_file_metadata, hf_hub_download, hf_hub_url, whoami, ) from huggingface_hub.file_download import REGEX_COMMIT_HASH, http_get from huggingface_hub.utils import ( EntryNotFoundError, LocalEntryNotFoundError, RepositoryNotFoundError, RevisionNotFoundError, hf_raise_for_status, ) from requests.exceptions import HTTPError from transformers.utils.logging import tqdm from . import __version__, logging from .generic import working_or_temp_dir from .import_utils import ( ENV_VARS_TRUE_VALUES, _tf_version, _torch_version, is_tf_available, is_torch_available, is_training_run_on_sagemaker, ) if ( os.path.isdir(old_default_cache_path) and not os.path.isdir(default_cache_path) and "PYTORCH_PRETRAINED_BERT_CACHE" not in os.environ and "PYTORCH_TRANSFORMERS_CACHE" not in os.environ and "TRANSFORMERS_CACHE" not in os.environ ): logger.warning( "In Transformers v4.0.0, the default path to cache downloaded models changed from" " '~/.cache/torch/transformers' to '~/.cache/huggingface/transformers'. Since you don't seem to have" " overridden and '~/.cache/torch/transformers' is a directory that exists, we're moving it to" " '~/.cache/huggingface/transformers' to avoid redownloading models you have already in the cache. You should" " only see this message once." ) shutil.move(old_default_cache_path, default_cache_path) TRANSFORMERS_CACHE = os.getenv("TRANSFORMERS_CACHE", HUGGINGFACE_HUB_CACHE) if os.environ.get("HUGGINGFACE_CO_RESOLVE_ENDPOINT", None) is not None: warnings.warn( "Using the environment variable `HUGGINGFACE_CO_RESOLVE_ENDPOINT` is deprecated and will be removed in " "Transformers v5. Use `HF_ENDPOINT` instead.", FutureWarning, ) HUGGINGFACE_CO_RESOLVE_ENDPOINT = os.environ.get("HUGGINGFACE_CO_RESOLVE_ENDPOINT", None) def get_all_cached_files(cache_dir=None): """ Returns a list for all files cached with appropriate metadata. """ if cache_dir is None: cache_dir = TRANSFORMERS_CACHE else: cache_dir = str(cache_dir) if not os.path.isdir(cache_dir): return [] cached_files = [] for file in os.listdir(cache_dir): meta_path = os.path.join(cache_dir, f"{file}.json") if not os.path.isfile(meta_path): continue with open(meta_path, encoding="utf-8") as meta_file: metadata = json.load(meta_file) url = metadata["url"] etag = metadata["etag"].replace('"', "") cached_files.append({"file": file, "url": url, "etag": etag}) return cached_files def extract_info_from_url(url): """ Extract repo_name, revision and filename from an url. """ search = re.search(r"^https://huggingface\.co/(.*)/resolve/([^/]*)/(.*)$", url) if search is None: return None repo, revision, filename = search.groups() cache_repo = "--".join(["models"] + repo.split("/")) return {"repo": cache_repo, "revision": revision, "filename": filename} def clean_files_for(file): """ Remove, if they exist, file, file.json and file.lock """ for f in [file, f"{file}.json", f"{file}.lock"]: if os.path.isfile(f): os.remove(f) def move_to_new_cache(file, repo, filename, revision, etag, commit_hash): """ Move file to repo following the new huggingface hub cache organization. """ os.makedirs(repo, exist_ok=True) # refs os.makedirs(os.path.join(repo, "refs"), exist_ok=True) if revision != commit_hash: ref_path = os.path.join(repo, "refs", revision) with open(ref_path, "w") as f: f.write(commit_hash) # blobs os.makedirs(os.path.join(repo, "blobs"), exist_ok=True) blob_path = os.path.join(repo, "blobs", etag) shutil.move(file, blob_path) # snapshots os.makedirs(os.path.join(repo, "snapshots"), exist_ok=True) os.makedirs(os.path.join(repo, "snapshots", commit_hash), exist_ok=True) pointer_path = os.path.join(repo, "snapshots", commit_hash, filename) huggingface_hub.file_download._create_relative_symlink(blob_path, pointer_path) clean_files_for(file) if not os.path.isfile(cache_version_file): cache_version = 0 else: with open(cache_version_file) as f: cache_version = int(f.read()) def move_cache(cache_dir=None, new_cache_dir=None, token=None): if new_cache_dir is None: new_cache_dir = TRANSFORMERS_CACHE if cache_dir is None: # Migrate from old cache in .cache/huggingface/hub old_cache = Path(TRANSFORMERS_CACHE).parent / "transformers" if os.path.isdir(str(old_cache)): cache_dir = str(old_cache) else: cache_dir = new_cache_dir if token is None: token = HfFolder.get_token() cached_files = get_all_cached_files(cache_dir=cache_dir) print(f"Moving {len(cached_files)} files to the new cache system") hub_metadata = {} for file_info in tqdm(cached_files): url = file_info.pop("url") if url not in hub_metadata: try: hub_metadata[url] = get_hf_file_metadata(url, use_auth_token=token) except requests.HTTPError: continue etag, commit_hash = hub_metadata[url].etag, hub_metadata[url].commit_hash if etag is None or commit_hash is None: continue if file_info["etag"] != etag: # Cached file is not up to date, we just throw it as a new version will be downloaded anyway. clean_files_for(os.path.join(cache_dir, file_info["file"])) continue url_info = extract_info_from_url(url) if url_info is None: # Not a file from huggingface.co continue repo = os.path.join(new_cache_dir, url_info["repo"]) move_to_new_cache( file=os.path.join(cache_dir, file_info["file"]), repo=repo, filename=url_info["filename"], revision=url_info["revision"], etag=etag, commit_hash=commit_hash, )
null
11,541
import re import time from typing import Optional import IPython.display as disp from ..trainer_callback import TrainerCallback from ..trainer_utils import IntervalStrategy, has_length The provided code snippet includes necessary dependencies for implementing the `format_time` function. Write a Python function `def format_time(t)` to solve the following problem: Format `t` (in seconds) to (h):mm:ss Here is the function: def format_time(t): "Format `t` (in seconds) to (h):mm:ss" t = int(t) h, m, s = t // 3600, (t // 60) % 60, t % 60 return f"{h}:{m:02d}:{s:02d}" if h != 0 else f"{m:02d}:{s:02d}"
Format `t` (in seconds) to (h):mm:ss
11,542
import re import time from typing import Optional import IPython.display as disp from ..trainer_callback import TrainerCallback from ..trainer_utils import IntervalStrategy, has_length def html_progress_bar(value, total, prefix, label, width=300): # docstyle-ignore return f""" <div> {prefix} <progress value='{value}' max='{total}' style='width:{width}px; height:20px; vertical-align: middle;'></progress> {label} </div> """
null
11,543
import re import time from typing import Optional import IPython.display as disp from ..trainer_callback import TrainerCallback from ..trainer_utils import IntervalStrategy, has_length The provided code snippet includes necessary dependencies for implementing the `text_to_html_table` function. Write a Python function `def text_to_html_table(items)` to solve the following problem: Put the texts in `items` in an HTML table. Here is the function: def text_to_html_table(items): "Put the texts in `items` in an HTML table." html_code = """<table border="1" class="dataframe">\n""" html_code += """ <thead>\n <tr style="text-align: left;">\n""" for i in items[0]: html_code += f" <th>{i}</th>\n" html_code += " </tr>\n </thead>\n <tbody>\n" for line in items[1:]: html_code += " <tr>\n" for elt in line: elt = f"{elt:.6f}" if isinstance(elt, float) else str(elt) html_code += f" <td>{elt}</td>\n" html_code += " </tr>\n" html_code += " </tbody>\n</table><p>" return html_code
Put the texts in `items` in an HTML table.
11,544
import importlib.util import json import os import shutil import sys import warnings from collections import OrderedDict from functools import lru_cache, wraps from itertools import chain from types import ModuleType from typing import Any from packaging import version from transformers.utils.versions import importlib_metadata from . import logging def is_kenlm_available(): return importlib.util.find_spec("kenlm") is not None
null
11,545
import importlib.util import json import os import shutil import sys import warnings from collections import OrderedDict from functools import lru_cache, wraps from itertools import chain from types import ModuleType from typing import Any from packaging import version from transformers.utils.versions import importlib_metadata from . import logging _pyctcdecode_available = importlib.util.find_spec("pyctcdecode") is not None def is_pyctcdecode_available(): return _pyctcdecode_available
null
11,546
import importlib.util import json import os import shutil import sys import warnings from collections import OrderedDict from functools import lru_cache, wraps from itertools import chain from types import ModuleType from typing import Any from packaging import version from transformers.utils.versions import importlib_metadata from . import logging _librosa_available = importlib.util.find_spec("librosa") is not None def is_librosa_available(): return _librosa_available
null
11,547
import importlib.util import json import os import shutil import sys import warnings from collections import OrderedDict from functools import lru_cache, wraps from itertools import chain from types import ModuleType from typing import Any from packaging import version from transformers.utils.versions import importlib_metadata from . import logging def is_torch_available(): try: import torch _torch_available = True except ImportError: _torch_available = False try: from torch.hub import _get_torch_home torch_cache_home = _get_torch_home() except ImportError: torch_cache_home = os.path.expanduser( os.getenv("TORCH_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "torch")) ) def is_torch_cuda_available(): if is_torch_available(): import torch return torch.cuda.is_available() else: return False
null
11,548
import importlib.util import json import os import shutil import sys import warnings from collections import OrderedDict from functools import lru_cache, wraps from itertools import chain from types import ModuleType from typing import Any from packaging import version from transformers.utils.versions import importlib_metadata from . import logging try: # Check we're not importing a "datasets" directory somewhere but the actual library by trying to grab the version # AND checking it has an author field in the metadata that is HuggingFace. _ = importlib_metadata.version("datasets") _datasets_metadata = importlib_metadata.metadata("datasets") if _datasets_metadata.get("author", "") != "HuggingFace Inc.": _datasets_available = False except importlib_metadata.PackageNotFoundError: _datasets_available = False def is_torch_available(): return _torch_available try: import torch _torch_available = True except ImportError: _torch_available = False try: from torch.hub import _get_torch_home torch_cache_home = _get_torch_home() except ImportError: torch_cache_home = os.path.expanduser( os.getenv("TORCH_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "torch")) ) def is_torch_bf16_cpu_available(): if not is_torch_available(): return False import torch if version.parse(version.parse(torch.__version__).base_version) < version.parse("1.10"): return False try: # multiple levels of AttributeError depending on the pytorch version so do them all in one check _ = torch.cpu.amp.autocast except AttributeError: return False return True
null
11,549
import importlib.util import json import os import shutil import sys import warnings from collections import OrderedDict from functools import lru_cache, wraps from itertools import chain from types import ModuleType from typing import Any from packaging import version from transformers.utils.versions import importlib_metadata from . import logging def is_torch_bf16_gpu_available(): if not is_torch_available(): return False import torch # since currently no utility function is available we build our own. # some bits come from https://github.com/pytorch/pytorch/blob/2289a12f21c54da93bf5d696e3f9aea83dd9c10d/torch/testing/_internal/common_cuda.py#L51 # with additional check for torch version # to succeed: # 1. torch >= 1.10 (1.9 should be enough for AMP API has changed in 1.10, so using 1.10 as minimal) # 2. the hardware needs to support bf16 (GPU arch >= Ampere, or CPU) # 3. if using gpu, CUDA >= 11 # 4. torch.autocast exists # XXX: one problem here is that it may give invalid results on mixed gpus setup, so it's # really only correct for the 0th gpu (or currently set default device if different from 0) if version.parse(version.parse(torch.__version__).base_version) < version.parse("1.10"): return False if torch.cuda.is_available() and torch.version.cuda is not None: if torch.cuda.get_device_properties(torch.cuda.current_device()).major < 8: return False if int(torch.version.cuda.split(".")[0]) < 11: return False if not hasattr(torch.cuda.amp, "autocast"): return False else: return False return True def is_torch_bf16_available(): # the original bf16 check was for gpu only, but later a cpu/bf16 combo has emerged so this util # has become ambiguous and therefore deprecated warnings.warn( "The util is_torch_bf16_available is deprecated, please use is_torch_bf16_gpu_available " "or is_torch_bf16_cpu_available instead according to whether it's used with cpu or gpu", FutureWarning, ) return is_torch_bf16_gpu_available()
null
11,550
import importlib.util import json import os import shutil import sys import warnings from collections import OrderedDict from functools import lru_cache, wraps from itertools import chain from types import ModuleType from typing import Any from packaging import version from transformers.utils.versions import importlib_metadata from . import logging def is_torch_available(): return _torch_available try: import torch _torch_available = True except ImportError: _torch_available = False try: from torch.hub import _get_torch_home torch_cache_home = _get_torch_home() except ImportError: torch_cache_home = os.path.expanduser( os.getenv("TORCH_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "torch")) ) def is_torch_tf32_available(): if not is_torch_available(): return False import torch if not torch.cuda.is_available() or torch.version.cuda is None: return False if torch.cuda.get_device_properties(torch.cuda.current_device()).major < 8: return False if int(torch.version.cuda.split(".")[0]) < 11: return False if version.parse(version.parse(torch.__version__).base_version) < version.parse("1.7"): return False return True
null
11,551
import importlib.util import json import os import shutil import sys import warnings from collections import OrderedDict from functools import lru_cache, wraps from itertools import chain from types import ModuleType from typing import Any from packaging import version from transformers.utils.versions import importlib_metadata from . import logging def is_bs4_available(): return importlib.util.find_spec("bs4") is not None
null
11,552
import importlib.util import json import os import shutil import sys import warnings from collections import OrderedDict from functools import lru_cache, wraps from itertools import chain from types import ModuleType from typing import Any from packaging import version from transformers.utils.versions import importlib_metadata from . import logging def is_torch_onnx_dict_inputs_support_available(): return _torch_onnx_dict_inputs_support_available
null
11,553
import importlib.util import json import os import shutil import sys import warnings from collections import OrderedDict from functools import lru_cache, wraps from itertools import chain from types import ModuleType from typing import Any from packaging import version from transformers.utils.versions import importlib_metadata from . import logging try: _coloredlogs_available = importlib_metadata.version("coloredlogs") logger.debug(f"Successfully imported sympy version {_coloredlogs_available}") except importlib_metadata.PackageNotFoundError: _coloredlogs_available = False def is_coloredlogs_available(): return _coloredlogs_available
null
11,554
import importlib.util import json import os import shutil import sys import warnings from collections import OrderedDict from functools import lru_cache, wraps from itertools import chain from types import ModuleType from typing import Any from packaging import version from transformers.utils.versions import importlib_metadata from . import logging _tf2onnx_available = importlib.util.find_spec("tf2onnx") is not None def is_tf2onnx_available(): return _tf2onnx_available
null
11,555
import importlib.util import json import os import shutil import sys import warnings from collections import OrderedDict from functools import lru_cache, wraps from itertools import chain from types import ModuleType from typing import Any from packaging import version from transformers.utils.versions import importlib_metadata from . import logging _onnx_available = importlib.util.find_spec("onnxruntime") is not None def is_onnx_available(): return _onnx_available
null
11,556
import importlib.util import json import os import shutil import sys import warnings from collections import OrderedDict from functools import lru_cache, wraps from itertools import chain from types import ModuleType from typing import Any from packaging import version from transformers.utils.versions import importlib_metadata from . import logging _ftfy_available = importlib.util.find_spec("ftfy") is not None def is_ftfy_available(): return _ftfy_available
null
11,557
import importlib.util import json import os import shutil import sys import warnings from collections import OrderedDict from functools import lru_cache, wraps from itertools import chain from types import ModuleType from typing import Any from packaging import version from transformers.utils.versions import importlib_metadata from . import logging try: # Check we're not importing a "datasets" directory somewhere but the actual library by trying to grab the version # AND checking it has an author field in the metadata that is HuggingFace. _ = importlib_metadata.version("datasets") _datasets_metadata = importlib_metadata.metadata("datasets") if _datasets_metadata.get("author", "") != "HuggingFace Inc.": _datasets_available = False except importlib_metadata.PackageNotFoundError: _datasets_available = False if _torch_available: torch_version = version.parse(importlib_metadata.version("torch")) _torch_fx_available = (torch_version.major, torch_version.minor) >= ( TORCH_FX_REQUIRED_VERSION.major, TORCH_FX_REQUIRED_VERSION.minor, ) _torch_onnx_dict_inputs_support_available = torch_version >= TORCH_ONNX_DICT_INPUTS_MINIMUM_VERSION The provided code snippet includes necessary dependencies for implementing the `is_torch_tpu_available` function. Write a Python function `def is_torch_tpu_available(check_device=True)` to solve the following problem: Checks if `torch_xla` is installed and potentially if a TPU is in the environment Here is the function: def is_torch_tpu_available(check_device=True): "Checks if `torch_xla` is installed and potentially if a TPU is in the environment" if not _torch_available: return False if importlib.util.find_spec("torch_xla") is not None: if check_device: # We need to check if `xla_device` can be found, will raise a RuntimeError if not try: import torch_xla.core.xla_model as xm _ = xm.xla_device() return True except RuntimeError: return False return True return False
Checks if `torch_xla` is installed and potentially if a TPU is in the environment
11,558
import importlib.util import json import os import shutil import sys import warnings from collections import OrderedDict from functools import lru_cache, wraps from itertools import chain from types import ModuleType from typing import Any from packaging import version from transformers.utils.versions import importlib_metadata from . import logging def is_torchdynamo_available(): return importlib.util.find_spec("torchdynamo") is not None
null
11,559
import importlib.util import json import os import shutil import sys import warnings from collections import OrderedDict from functools import lru_cache, wraps from itertools import chain from types import ModuleType from typing import Any from packaging import version from transformers.utils.versions import importlib_metadata from . import logging def is_torch_tensorrt_fx_available(): if importlib.util.find_spec("torch_tensorrt") is None: return False return importlib.util.find_spec("torch_tensorrt.fx") is not None
null
11,560
import importlib.util import json import os import shutil import sys import warnings from collections import OrderedDict from functools import lru_cache, wraps from itertools import chain from types import ModuleType from typing import Any from packaging import version from transformers.utils.versions import importlib_metadata from . import logging _datasets_available = importlib.util.find_spec("datasets") is not None def is_datasets_available(): return _datasets_available
null
11,561
import importlib.util import json import os import shutil import sys import warnings from collections import OrderedDict from functools import lru_cache, wraps from itertools import chain from types import ModuleType from typing import Any from packaging import version from transformers.utils.versions import importlib_metadata from . import logging _detectron2_available = importlib.util.find_spec("detectron2") is not None def is_detectron2_available(): return _detectron2_available
null
11,562
import importlib.util import json import os import shutil import sys import warnings from collections import OrderedDict from functools import lru_cache, wraps from itertools import chain from types import ModuleType from typing import Any from packaging import version from transformers.utils.versions import importlib_metadata from . import logging def is_more_itertools_available(): return importlib.util.find_spec("more_itertools") is not None
null
11,563
import importlib.util import json import os import shutil import sys import warnings from collections import OrderedDict from functools import lru_cache, wraps from itertools import chain from types import ModuleType from typing import Any from packaging import version from transformers.utils.versions import importlib_metadata from . import logging def is_rjieba_available(): return importlib.util.find_spec("rjieba") is not None
null
11,564
import importlib.util import json import os import shutil import sys import warnings from collections import OrderedDict from functools import lru_cache, wraps from itertools import chain from types import ModuleType from typing import Any from packaging import version from transformers.utils.versions import importlib_metadata from . import logging def is_psutil_available(): return importlib.util.find_spec("psutil") is not None
null
11,565
import importlib.util import json import os import shutil import sys import warnings from collections import OrderedDict from functools import lru_cache, wraps from itertools import chain from types import ModuleType from typing import Any from packaging import version from transformers.utils.versions import importlib_metadata from . import logging def is_py3nvml_available(): return importlib.util.find_spec("py3nvml") is not None
null
11,566
import importlib.util import json import os import shutil import sys import warnings from collections import OrderedDict from functools import lru_cache, wraps from itertools import chain from types import ModuleType from typing import Any from packaging import version from transformers.utils.versions import importlib_metadata from . import logging def is_sacremoses_available(): return importlib.util.find_spec("sacremoses") is not None
null
11,567
import importlib.util import json import os import shutil import sys import warnings from collections import OrderedDict from functools import lru_cache, wraps from itertools import chain from types import ModuleType from typing import Any from packaging import version from transformers.utils.versions import importlib_metadata from . import logging def is_apex_available(): return importlib.util.find_spec("apex") is not None
null
11,568
import importlib.util import json import os import shutil import sys import warnings from collections import OrderedDict from functools import lru_cache, wraps from itertools import chain from types import ModuleType from typing import Any from packaging import version from transformers.utils.versions import importlib_metadata from . import logging def is_ninja_available(): return importlib.util.find_spec("ninja") is not None
null
11,569
import importlib.util import json import os import shutil import sys import warnings from collections import OrderedDict from functools import lru_cache, wraps from itertools import chain from types import ModuleType from typing import Any from packaging import version from transformers.utils.versions import importlib_metadata from . import logging logger = logging.get_logger(__name__) _torch_version = "N/A" def is_torch_available(): return _torch_available def is_ipex_available(): def get_major_and_minor_from_version(full_version): return str(version.parse(full_version).major) + "." + str(version.parse(full_version).minor) if not is_torch_available() or importlib.util.find_spec("intel_extension_for_pytorch") is None: return False _ipex_version = "N/A" try: _ipex_version = importlib_metadata.version("intel_extension_for_pytorch") except importlib_metadata.PackageNotFoundError: return False torch_major_and_minor = get_major_and_minor_from_version(_torch_version) ipex_major_and_minor = get_major_and_minor_from_version(_ipex_version) if torch_major_and_minor != ipex_major_and_minor: logger.warning( f"Intel Extension for PyTorch {ipex_major_and_minor} needs to work with PyTorch {ipex_major_and_minor}.*," f" but PyTorch {_torch_version} is found. Please switch to the matching version and run again." ) return False return True
null
11,570
import importlib.util import json import os import shutil import sys import warnings from collections import OrderedDict from functools import lru_cache, wraps from itertools import chain from types import ModuleType from typing import Any from packaging import version from transformers.utils.versions import importlib_metadata from . import logging def is_bitsandbytes_available(): return importlib.util.find_spec("bitsandbytes") is not None
null
11,571
import importlib.util import json import os import shutil import sys import warnings from collections import OrderedDict from functools import lru_cache, wraps from itertools import chain from types import ModuleType from typing import Any from packaging import version from transformers.utils.versions import importlib_metadata from . import logging _faiss_available = importlib.util.find_spec("faiss") is not None def is_faiss_available(): return _faiss_available
null
11,572
import importlib.util import json import os import shutil import sys import warnings from collections import OrderedDict from functools import lru_cache, wraps from itertools import chain from types import ModuleType from typing import Any from packaging import version from transformers.utils.versions import importlib_metadata from . import logging def is_scipy_available(): return importlib.util.find_spec("scipy") is not None def is_sklearn_available(): if importlib.util.find_spec("sklearn") is None: return False return is_scipy_available() and importlib.util.find_spec("sklearn.metrics")
null
11,573
import importlib.util import json import os import shutil import sys import warnings from collections import OrderedDict from functools import lru_cache, wraps from itertools import chain from types import ModuleType from typing import Any from packaging import version from transformers.utils.versions import importlib_metadata from . import logging def is_sentencepiece_available(): return importlib.util.find_spec("sentencepiece") is not None
null
11,574
import importlib.util import json import os import shutil import sys import warnings from collections import OrderedDict from functools import lru_cache, wraps from itertools import chain from types import ModuleType from typing import Any from packaging import version from transformers.utils.versions import importlib_metadata from . import logging def is_protobuf_available(): if importlib.util.find_spec("google") is None: return False return importlib.util.find_spec("google.protobuf") is not None
null
11,575
import importlib.util import json import os import shutil import sys import warnings from collections import OrderedDict from functools import lru_cache, wraps from itertools import chain from types import ModuleType from typing import Any from packaging import version from transformers.utils.versions import importlib_metadata from . import logging def is_accelerate_available(): return importlib.util.find_spec("accelerate") is not None
null
11,576
import importlib.util import json import os import shutil import sys import warnings from collections import OrderedDict from functools import lru_cache, wraps from itertools import chain from types import ModuleType from typing import Any from packaging import version from transformers.utils.versions import importlib_metadata from . import logging def is_safetensors_available(): return importlib.util.find_spec("safetensors") is not None
null
11,577
import importlib.util import json import os import shutil import sys import warnings from collections import OrderedDict from functools import lru_cache, wraps from itertools import chain from types import ModuleType from typing import Any from packaging import version from transformers.utils.versions import importlib_metadata from . import logging def is_tokenizers_available(): return importlib.util.find_spec("tokenizers") is not None
null
11,578
import importlib.util import json import os import shutil import sys import warnings from collections import OrderedDict from functools import lru_cache, wraps from itertools import chain from types import ModuleType from typing import Any from packaging import version from transformers.utils.versions import importlib_metadata from . import logging def is_vision_available(): return importlib.util.find_spec("PIL") is not None
null
11,579
import importlib.util import json import os import shutil import sys import warnings from collections import OrderedDict from functools import lru_cache, wraps from itertools import chain from types import ModuleType from typing import Any from packaging import version from transformers.utils.versions import importlib_metadata from . import logging def is_pytesseract_available(): return importlib.util.find_spec("pytesseract") is not None
null
11,580
import importlib.util import json import os import shutil import sys import warnings from collections import OrderedDict from functools import lru_cache, wraps from itertools import chain from types import ModuleType from typing import Any from packaging import version from transformers.utils.versions import importlib_metadata from . import logging def is_spacy_available(): return importlib.util.find_spec("spacy") is not None
null
11,581
import importlib.util import json import os import shutil import sys import warnings from collections import OrderedDict from functools import lru_cache, wraps from itertools import chain from types import ModuleType from typing import Any from packaging import version from transformers.utils.versions import importlib_metadata from . import logging def is_tensorflow_text_available(): return importlib.util.find_spec("tensorflow_text") is not None
null
11,582
import importlib.util import json import os import shutil import sys import warnings from collections import OrderedDict from functools import lru_cache, wraps from itertools import chain from types import ModuleType from typing import Any from packaging import version from transformers.utils.versions import importlib_metadata from . import logging def is_in_notebook(): try: # Test adapted from tqdm.autonotebook: https://github.com/tqdm/tqdm/blob/master/tqdm/autonotebook.py get_ipython = sys.modules["IPython"].get_ipython if "IPKernelApp" not in get_ipython().config: raise ImportError("console") if "VSCODE_PID" in os.environ: raise ImportError("vscode") if "DATABRICKS_RUNTIME_VERSION" in os.environ and os.environ["DATABRICKS_RUNTIME_VERSION"] < "11.0": # Databricks Runtime 11.0 and above uses IPython kernel by default so it should be compatible with Jupyter notebook # https://docs.microsoft.com/en-us/azure/databricks/notebooks/ipython-kernel raise ImportError("databricks") return importlib.util.find_spec("IPython") is not None except (AttributeError, ImportError, KeyError): return False
null
11,583
import importlib.util import json import os import shutil import sys import warnings from collections import OrderedDict from functools import lru_cache, wraps from itertools import chain from types import ModuleType from typing import Any from packaging import version from transformers.utils.versions import importlib_metadata from . import logging _scatter_available = importlib.util.find_spec("torch_scatter") is not None def is_scatter_available(): return _scatter_available
null
11,584
import importlib.util import json import os import shutil import sys import warnings from collections import OrderedDict from functools import lru_cache, wraps from itertools import chain from types import ModuleType from typing import Any from packaging import version from transformers.utils.versions import importlib_metadata from . import logging _pytorch_quantization_available = importlib.util.find_spec("pytorch_quantization") is not None def is_pytorch_quantization_available(): return _pytorch_quantization_available
null
11,585
import importlib.util import json import os import shutil import sys import warnings from collections import OrderedDict from functools import lru_cache, wraps from itertools import chain from types import ModuleType from typing import Any from packaging import version from transformers.utils.versions import importlib_metadata from . import logging _tensorflow_probability_available = importlib.util.find_spec("tensorflow_probability") is not None def is_tensorflow_probability_available(): return _tensorflow_probability_available
null
11,586
import importlib.util import json import os import shutil import sys import warnings from collections import OrderedDict from functools import lru_cache, wraps from itertools import chain from types import ModuleType from typing import Any from packaging import version from transformers.utils.versions import importlib_metadata from . import logging def is_pandas_available(): return importlib.util.find_spec("pandas") is not None
null
11,587
import importlib.util import json import os import shutil import sys import warnings from collections import OrderedDict from functools import lru_cache, wraps from itertools import chain from types import ModuleType from typing import Any from packaging import version from transformers.utils.versions import importlib_metadata from . import logging def is_sagemaker_dp_enabled(): # Get the sagemaker specific env variable. sagemaker_params = os.getenv("SM_FRAMEWORK_PARAMS", "{}") try: # Parse it and check the field "sagemaker_distributed_dataparallel_enabled". sagemaker_params = json.loads(sagemaker_params) if not sagemaker_params.get("sagemaker_distributed_dataparallel_enabled", False): return False except json.JSONDecodeError: return False # Lastly, check if the `smdistributed` module is present. return importlib.util.find_spec("smdistributed") is not None
null
11,588
import importlib.util import json import os import shutil import sys import warnings from collections import OrderedDict from functools import lru_cache, wraps from itertools import chain from types import ModuleType from typing import Any from packaging import version from transformers.utils.versions import importlib_metadata from . import logging def is_sagemaker_mp_enabled(): # Get the sagemaker specific mp parameters from smp_options variable. smp_options = os.getenv("SM_HP_MP_PARAMETERS", "{}") try: # Parse it and check the field "partitions" is included, it is required for model parallel. smp_options = json.loads(smp_options) if "partitions" not in smp_options: return False except json.JSONDecodeError: return False # Get the sagemaker specific framework parameters from mpi_options variable. mpi_options = os.getenv("SM_FRAMEWORK_PARAMS", "{}") try: # Parse it and check the field "sagemaker_distributed_dataparallel_enabled". mpi_options = json.loads(mpi_options) if not mpi_options.get("sagemaker_mpi_enabled", False): return False except json.JSONDecodeError: return False # Lastly, check if the `smdistributed` module is present. return importlib.util.find_spec("smdistributed") is not None
null