code
stringlengths
82
54.1k
code_codestyle
int64
0
699
style_context
stringlengths
111
35.6k
style_context_codestyle
int64
0
699
label
int64
0
1
'''simple docstring''' from __future__ import annotations import queue class __lowerCAmelCase : '''simple docstring''' def __init__( self : Optional[int] ,_a : int ): '''simple docstring''' A_ : Optional[int] = data A_ : Dict = None A_ : List[str] = None def lowerCamelCase ( ): print("""\n********Press N to stop entering at any point of time********\n""") A_ : List[Any] = input("""Enter the value of the root node: """).strip().lower() A_ : queue.Queue = queue.Queue() A_ : List[Any] = TreeNode(int(lowerCamelCase)) q.put(lowerCamelCase) while not q.empty(): A_ : str = q.get() A_ : List[Any] = F'Enter the left node of {node_found.data}: ' A_ : Tuple = input(lowerCamelCase).strip().lower() or """n""" if check == "n": return tree_node A_ : Optional[Any] = TreeNode(int(lowerCamelCase)) A_ : Any = left_node q.put(lowerCamelCase) A_ : List[Any] = F'Enter the right node of {node_found.data}: ' A_ : Optional[Any] = input(lowerCamelCase).strip().lower() or """n""" if check == "n": return tree_node A_ : Dict = TreeNode(int(lowerCamelCase)) A_ : List[Any] = right_node q.put(lowerCamelCase) raise def lowerCamelCase ( lowerCamelCase : TreeNode): if not isinstance(lowerCamelCase , lowerCamelCase) or not node: return print(node.data , end=""",""") pre_order(node.left) pre_order(node.right) def lowerCamelCase ( lowerCamelCase : TreeNode): if not isinstance(lowerCamelCase , lowerCamelCase) or not node: return in_order(node.left) print(node.data , end=""",""") in_order(node.right) def lowerCamelCase ( lowerCamelCase : TreeNode): if not isinstance(lowerCamelCase , lowerCamelCase) or not node: return post_order(node.left) post_order(node.right) print(node.data , end=""",""") def lowerCamelCase ( lowerCamelCase : TreeNode): if not isinstance(lowerCamelCase , lowerCamelCase) or not node: return A_ : queue.Queue = queue.Queue() q.put(lowerCamelCase) while not q.empty(): A_ : List[Any] = q.get() print(node_dequeued.data , end=""",""") if node_dequeued.left: q.put(node_dequeued.left) if node_dequeued.right: q.put(node_dequeued.right) def lowerCamelCase ( lowerCamelCase : TreeNode): if not isinstance(lowerCamelCase , lowerCamelCase) or not node: return A_ : queue.Queue = queue.Queue() q.put(lowerCamelCase) while not q.empty(): A_ : Dict = [] while not q.empty(): A_ : Any = q.get() print(node_dequeued.data , end=""",""") if node_dequeued.left: list_.append(node_dequeued.left) if node_dequeued.right: list_.append(node_dequeued.right) print() for node in list_: q.put(lowerCamelCase) def lowerCamelCase ( lowerCamelCase : TreeNode): if not isinstance(lowerCamelCase , lowerCamelCase) or not node: return A_ : list[TreeNode] = [] A_ : int = node while n or stack: while n: # start from root node, find its left child print(n.data , end=""",""") stack.append(lowerCamelCase) A_ : Any = n.left # end of while means current node doesn't have left child A_ : Union[str, Any] = stack.pop() # start to traverse its right child A_ : Any = n.right def lowerCamelCase ( lowerCamelCase : TreeNode): if not isinstance(lowerCamelCase , lowerCamelCase) or not node: return A_ : list[TreeNode] = [] A_ : List[str] = node while n or stack: while n: stack.append(lowerCamelCase) A_ : Optional[Any] = n.left A_ : List[Any] = stack.pop() print(n.data , end=""",""") A_ : List[Any] = n.right def lowerCamelCase ( lowerCamelCase : TreeNode): if not isinstance(lowerCamelCase , lowerCamelCase) or not node: return A_ , A_ : Optional[Any] = [], [] A_ : int = node stacka.append(lowerCamelCase) while stacka: # to find the reversed order of post order, store it in stack2 A_ : List[Any] = stacka.pop() if n.left: stacka.append(n.left) if n.right: stacka.append(n.right) stacka.append(lowerCamelCase) while stacka: # pop up from stack2 will be the post order print(stacka.pop().data , end=""",""") def lowerCamelCase ( lowerCamelCase : str = "" , lowerCamelCase : List[str]=50 , lowerCamelCase : Union[str, Any]="*"): if not s: return "\n" + width * char A_ , A_ : Optional[int] = divmod(width - len(lowerCamelCase) - 2 , 2) return F'{left * char} {s} {(left + extra) * char}' if __name__ == "__main__": import doctest doctest.testmod() print(prompt('Binary Tree Traversals')) __magic_name__ = build_tree() print(prompt('Pre Order Traversal')) pre_order(node) print(prompt() + '\n') print(prompt('In Order Traversal')) in_order(node) print(prompt() + '\n') print(prompt('Post Order Traversal')) post_order(node) print(prompt() + '\n') print(prompt('Level Order Traversal')) level_order(node) print(prompt() + '\n') print(prompt('Actual Level Order Traversal')) level_order_actual(node) print('*' * 50 + '\n') print(prompt('Pre Order Traversal - Iteration Version')) pre_order_iter(node) print(prompt() + '\n') print(prompt('In Order Traversal - Iteration Version')) in_order_iter(node) print(prompt() + '\n') print(prompt('Post Order Traversal - Iteration Version')) post_order_iter(node) print(prompt())
665
'''simple docstring''' import argparse from transformers import ( TapasConfig, TapasForMaskedLM, TapasForQuestionAnswering, TapasForSequenceClassification, TapasModel, TapasTokenizer, load_tf_weights_in_tapas, ) from transformers.utils import logging logging.set_verbosity_info() def lowerCamelCase ( lowerCamelCase : Optional[Any] , lowerCamelCase : Any , lowerCamelCase : Union[str, Any] , lowerCamelCase : Tuple , lowerCamelCase : str): # Initialise PyTorch model. # If you want to convert a checkpoint that uses absolute position embeddings, make sure to set reset_position_index_per_cell of # TapasConfig to False. # initialize configuration from json file A_ : int = TapasConfig.from_json_file(lowerCamelCase) # set absolute/relative position embeddings parameter A_ : List[Any] = reset_position_index_per_cell # set remaining parameters of TapasConfig as well as the model based on the task if task == "SQA": A_ : Optional[int] = TapasForQuestionAnswering(config=lowerCamelCase) elif task == "WTQ": # run_task_main.py hparams A_ : Tuple = 4 A_ : Optional[Any] = True # hparam_utils.py hparams A_ : Any = 0.66_4694 A_ : str = 0.20_7951 A_ : Any = 0.12_1194 A_ : str = True A_ : Dict = True A_ : int = False A_ : int = 0.035_2513 A_ : Tuple = TapasForQuestionAnswering(config=lowerCamelCase) elif task == "WIKISQL_SUPERVISED": # run_task_main.py hparams A_ : int = 4 A_ : Union[str, Any] = False # hparam_utils.py hparams A_ : Dict = 36.4519 A_ : List[Any] = 0.90_3421 A_ : Any = 222.088 A_ : Optional[Any] = True A_ : Optional[int] = True A_ : Optional[Any] = True A_ : Optional[int] = 0.76_3141 A_ : Any = TapasForQuestionAnswering(config=lowerCamelCase) elif task == "TABFACT": A_ : Any = TapasForSequenceClassification(config=lowerCamelCase) elif task == "MLM": A_ : List[Any] = TapasForMaskedLM(config=lowerCamelCase) elif task == "INTERMEDIATE_PRETRAINING": A_ : Union[str, Any] = TapasModel(config=lowerCamelCase) else: raise ValueError(F'Task {task} not supported.') print(F'Building PyTorch model from configuration: {config}') # Load weights from tf checkpoint load_tf_weights_in_tapas(lowerCamelCase , lowerCamelCase , lowerCamelCase) # Save pytorch-model (weights and configuration) print(F'Save PyTorch model to {pytorch_dump_path}') model.save_pretrained(lowerCamelCase) # Save tokenizer files print(F'Save tokenizer files to {pytorch_dump_path}') A_ : Optional[Any] = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + """vocab.txt""" , model_max_length=512) tokenizer.save_pretrained(lowerCamelCase) print("""Used relative position embeddings:""" , model.config.reset_position_index_per_cell) if __name__ == "__main__": __magic_name__ = argparse.ArgumentParser() # Required parameters parser.add_argument( '--task', default='SQA', type=str, help='Model task for which to convert a checkpoint. Defaults to SQA.' ) parser.add_argument( '--reset_position_index_per_cell', default=False, action='store_true', help='Whether to use relative position embeddings or not. Defaults to True.', ) parser.add_argument( '--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.' ) parser.add_argument( '--tapas_config_file', default=None, type=str, required=True, help=( 'The config json file corresponding to the pre-trained TAPAS model. \n' 'This specifies the model architecture.' ), ) parser.add_argument( '--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) __magic_name__ = parser.parse_args() convert_tf_checkpoint_to_pytorch( args.task, args.reset_position_index_per_cell, args.tf_checkpoint_path, args.tapas_config_file, args.pytorch_dump_path, )
665
1
'''simple docstring''' def lowerCamelCase ( lowerCamelCase : Optional[int]): A_ : int = 0 A_ : List[str] = len(lowerCamelCase) for i in range(n - 1): for j in range(i + 1 , lowerCamelCase): if arr[i] > arr[j]: num_inversions += 1 return num_inversions def lowerCamelCase ( lowerCamelCase : Optional[int]): if len(lowerCamelCase) <= 1: return arr, 0 A_ : int = len(lowerCamelCase) // 2 A_ : Optional[int] = arr[0:mid] A_ : List[Any] = arr[mid:] A_ , A_ : Tuple = count_inversions_recursive(lowerCamelCase) A_ , A_ : List[Any] = count_inversions_recursive(lowerCamelCase) A_ , A_ : Optional[int] = _count_cross_inversions(lowerCamelCase , lowerCamelCase) A_ : List[Any] = inversion_p + inversions_q + cross_inversions return c, num_inversions def lowerCamelCase ( lowerCamelCase : Any , lowerCamelCase : Optional[Any]): A_ : int = [] A_ : int = 0 while i < len(lowerCamelCase) and j < len(lowerCamelCase): if p[i] > q[j]: # if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P) # These are all inversions. The claim emerges from the # property that P is sorted. num_inversion += len(lowerCamelCase) - i r.append(q[j]) j += 1 else: r.append(p[i]) i += 1 if i < len(lowerCamelCase): r.extend(p[i:]) else: r.extend(q[j:]) return r, num_inversion def lowerCamelCase ( ): A_ : str = [10, 2, 1, 5, 5, 2, 11] # this arr has 8 inversions: # (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2) A_ : Optional[int] = count_inversions_bf(lowerCamelCase) A_ , A_ : Any = count_inversions_recursive(lowerCamelCase) assert num_inversions_bf == num_inversions_recursive == 8 print("""number of inversions = """ , lowerCamelCase) # testing an array with zero inversion (a sorted arr_1) arr_a.sort() A_ : Dict = count_inversions_bf(lowerCamelCase) A_ , A_ : Any = count_inversions_recursive(lowerCamelCase) assert num_inversions_bf == num_inversions_recursive == 0 print("""number of inversions = """ , lowerCamelCase) # an empty list should also have zero inversions A_ : int = [] A_ : Tuple = count_inversions_bf(lowerCamelCase) A_ , A_ : Optional[Any] = count_inversions_recursive(lowerCamelCase) assert num_inversions_bf == num_inversions_recursive == 0 print("""number of inversions = """ , lowerCamelCase) if __name__ == "__main__": main()
665
'''simple docstring''' from math import acos, sin from typing import List, Tuple, Union import numpy as np import torch from PIL import Image from ...models import AutoencoderKL, UNetaDConditionModel from ...schedulers import DDIMScheduler, DDPMScheduler from ...utils import randn_tensor from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput from .mel import Mel class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' a_ = ["""vqvae"""] def __init__( self : Optional[Any] ,_a : AutoencoderKL ,_a : UNetaDConditionModel ,_a : Mel ,_a : Union[DDIMScheduler, DDPMScheduler] ,): '''simple docstring''' super().__init__() self.register_modules(unet=_a ,scheduler=_a ,mel=_a ,vqvae=_a ) def _a ( self : str ): '''simple docstring''' return 50 if isinstance(self.scheduler ,_a ) else 1000 @torch.no_grad() def __call__( self : Optional[int] ,_a : int = 1 ,_a : str = None ,_a : np.ndarray = None ,_a : int = 0 ,_a : int = 0 ,_a : int = None ,_a : torch.Generator = None ,_a : float = 0 ,_a : float = 0 ,_a : torch.Generator = None ,_a : float = 0 ,_a : torch.Tensor = None ,_a : torch.Tensor = None ,_a : int=True ,): '''simple docstring''' A_ : List[str] = steps or self.get_default_steps() self.scheduler.set_timesteps(_a ) A_ : Union[str, Any] = step_generator or generator # For backwards compatibility if type(self.unet.config.sample_size ) == int: A_ : Tuple = (self.unet.config.sample_size, self.unet.config.sample_size) if noise is None: A_ : int = randn_tensor( ( batch_size, self.unet.config.in_channels, self.unet.config.sample_size[0], self.unet.config.sample_size[1], ) ,generator=_a ,device=self.device ,) A_ : List[Any] = noise A_ : str = None if audio_file is not None or raw_audio is not None: self.mel.load_audio(_a ,_a ) A_ : Any = self.mel.audio_slice_to_image(_a ) A_ : Union[str, Any] = np.frombuffer(input_image.tobytes() ,dtype="""uint8""" ).reshape( (input_image.height, input_image.width) ) A_ : Optional[Any] = (input_image / 255) * 2 - 1 A_ : Union[str, Any] = torch.tensor(input_image[np.newaxis, :, :] ,dtype=torch.float ).to(self.device ) if self.vqvae is not None: A_ : Union[str, Any] = self.vqvae.encode(torch.unsqueeze(_a ,0 ) ).latent_dist.sample( generator=_a )[0] A_ : List[str] = self.vqvae.config.scaling_factor * input_images if start_step > 0: A_ : Any = self.scheduler.add_noise(_a ,_a ,self.scheduler.timesteps[start_step - 1] ) A_ : Tuple = ( self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length ) A_ : Tuple = int(mask_start_secs * pixels_per_second ) A_ : str = int(mask_end_secs * pixels_per_second ) A_ : int = self.scheduler.add_noise(_a ,_a ,torch.tensor(self.scheduler.timesteps[start_step:] ) ) for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ): if isinstance(self.unet ,_a ): A_ : Optional[Any] = self.unet(_a ,_a ,_a )["""sample"""] else: A_ : List[Any] = self.unet(_a ,_a )["""sample"""] if isinstance(self.scheduler ,_a ): A_ : Dict = self.scheduler.step( model_output=_a ,timestep=_a ,sample=_a ,eta=_a ,generator=_a ,)["""prev_sample"""] else: A_ : Any = self.scheduler.step( model_output=_a ,timestep=_a ,sample=_a ,generator=_a ,)["""prev_sample"""] if mask is not None: if mask_start > 0: A_ : Tuple = mask[:, step, :, :mask_start] if mask_end > 0: A_ : List[str] = mask[:, step, :, -mask_end:] if self.vqvae is not None: # 0.18215 was scaling factor used in training to ensure unit variance A_ : str = 1 / self.vqvae.config.scaling_factor * images A_ : Union[str, Any] = self.vqvae.decode(_a )["""sample"""] A_ : int = (images / 2 + 0.5).clamp(0 ,1 ) A_ : str = images.cpu().permute(0 ,2 ,3 ,1 ).numpy() A_ : Optional[int] = (images * 255).round().astype("""uint8""" ) A_ : List[Any] = list( (Image.fromarray(_[:, :, 0] ) for _ in images) if images.shape[3] == 1 else (Image.fromarray(_a ,mode="""RGB""" ).convert("""L""" ) for _ in images) ) A_ : Tuple = [self.mel.image_to_audio(_a ) for _ in images] if not return_dict: return images, (self.mel.get_sample_rate(), audios) return BaseOutput(**AudioPipelineOutput(np.array(_a )[:, np.newaxis, :] ) ,**ImagePipelineOutput(_a ) ) @torch.no_grad() def _a ( self : Union[str, Any] ,_a : List[Image.Image] ,_a : int = 50 ): '''simple docstring''' assert isinstance(self.scheduler ,_a ) self.scheduler.set_timesteps(_a ) A_ : Optional[Any] = np.array( [np.frombuffer(image.tobytes() ,dtype="""uint8""" ).reshape((1, image.height, image.width) ) for image in images] ) A_ : List[str] = (sample / 255) * 2 - 1 A_ : Optional[int] = torch.Tensor(_a ).to(self.device ) for t in self.progress_bar(torch.flip(self.scheduler.timesteps ,(0,) ) ): A_ : List[str] = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps A_ : Any = self.scheduler.alphas_cumprod[t] A_ : List[Any] = ( self.scheduler.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.scheduler.final_alpha_cumprod ) A_ : str = 1 - alpha_prod_t A_ : List[str] = self.unet(_a ,_a )["""sample"""] A_ : str = (1 - alpha_prod_t_prev) ** 0.5 * model_output A_ : Union[str, Any] = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5) A_ : Optional[int] = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output return sample @staticmethod def _a ( _a : torch.Tensor ,_a : torch.Tensor ,_a : float ): '''simple docstring''' A_ : List[Any] = acos(torch.dot(torch.flatten(_a ) ,torch.flatten(_a ) ) / torch.norm(_a ) / torch.norm(_a ) ) return sin((1 - alpha) * theta ) * xa / sin(_a ) + sin(alpha * theta ) * xa / sin(_a )
665
1
'''simple docstring''' import inspect import unittest from transformers import DPTConfig from transformers.file_utils import is_torch_available, is_vision_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_vision, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import DPTImageProcessor class __lowerCAmelCase : '''simple docstring''' def __init__( self : Optional[int] ,_a : List[str] ,_a : Any=2 ,_a : Dict=32 ,_a : Optional[Any]=16 ,_a : Optional[Any]=3 ,_a : str=True ,_a : Tuple=True ,_a : Tuple=32 ,_a : int=4 ,_a : str=[0, 1, 2, 3] ,_a : Union[str, Any]=4 ,_a : str=37 ,_a : int="gelu" ,_a : Tuple=0.1 ,_a : Optional[Any]=0.1 ,_a : List[Any]=0.02 ,_a : Union[str, Any]=3 ,_a : Tuple=[1, 384, 24, 24] ,_a : str=True ,_a : int=None ,): '''simple docstring''' A_ : List[str] = parent A_ : Tuple = batch_size A_ : Union[str, Any] = image_size A_ : str = patch_size A_ : Optional[int] = num_channels A_ : int = is_training A_ : Tuple = use_labels A_ : str = hidden_size A_ : List[Any] = num_hidden_layers A_ : List[Any] = backbone_out_indices A_ : Optional[int] = num_attention_heads A_ : Any = intermediate_size A_ : Optional[int] = hidden_act A_ : Any = hidden_dropout_prob A_ : Union[str, Any] = attention_probs_dropout_prob A_ : List[Any] = initializer_range A_ : Optional[Any] = num_labels A_ : Optional[int] = backbone_featmap_shape A_ : Dict = scope A_ : Dict = is_hybrid # sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token) A_ : Optional[int] = (image_size // patch_size) ** 2 A_ : Any = num_patches + 1 def _a ( self : Tuple ): '''simple docstring''' A_ : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) A_ : Optional[int] = None if self.use_labels: A_ : Optional[int] = ids_tensor([self.batch_size, self.image_size, self.image_size] ,self.num_labels ) A_ : List[str] = self.get_config() return config, pixel_values, labels def _a ( self : Optional[Any] ): '''simple docstring''' A_ : Optional[Any] = { """global_padding""": """same""", """layer_type""": """bottleneck""", """depths""": [3, 4, 9], """out_features""": ["""stage1""", """stage2""", """stage3"""], """embedding_dynamic_padding""": True, """hidden_sizes""": [96, 192, 384, 768], """num_groups""": 2, } return DPTConfig( image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,backbone_out_indices=self.backbone_out_indices ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=_a ,initializer_range=self.initializer_range ,is_hybrid=self.is_hybrid ,backbone_config=_a ,backbone_featmap_shape=self.backbone_featmap_shape ,) def _a ( self : Union[str, Any] ,_a : Dict ,_a : Optional[Any] ,_a : int ): '''simple docstring''' A_ : List[str] = DPTModel(config=_a ) model.to(_a ) model.eval() A_ : List[Any] = model(_a ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def _a ( self : Dict ,_a : List[str] ,_a : int ,_a : Optional[int] ): '''simple docstring''' A_ : Optional[int] = self.num_labels A_ : Any = DPTForDepthEstimation(_a ) model.to(_a ) model.eval() A_ : str = model(_a ) self.parent.assertEqual(result.predicted_depth.shape ,(self.batch_size, self.image_size, self.image_size) ) def _a ( self : Dict ,_a : Optional[Any] ,_a : str ,_a : Union[str, Any] ): '''simple docstring''' A_ : Optional[int] = self.num_labels A_ : Optional[int] = DPTForSemanticSegmentation(_a ) model.to(_a ) model.eval() A_ : Tuple = model(_a ,labels=_a ) self.parent.assertEqual( result.logits.shape ,(self.batch_size, self.num_labels, self.image_size, self.image_size) ) def _a ( self : List[str] ): '''simple docstring''' A_ : Dict = self.prepare_config_and_inputs() A_ , A_ , A_ : Any = config_and_inputs A_ : Optional[int] = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' a_ = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else () a_ = ( { """depth-estimation""": DPTForDepthEstimation, """feature-extraction""": DPTModel, """image-segmentation""": DPTForSemanticSegmentation, } if is_torch_available() else {} ) a_ = False a_ = False a_ = False def _a ( self : Union[str, Any] ): '''simple docstring''' A_ : List[str] = DPTModelTester(self ) A_ : Any = ConfigTester(self ,config_class=_a ,has_text_modality=_a ,hidden_size=37 ) def _a ( self : Optional[int] ): '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason="""DPT does not use inputs_embeds""" ) def _a ( self : List[Any] ): '''simple docstring''' pass def _a ( self : Dict ): '''simple docstring''' A_ , A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A_ : Union[str, Any] = model_class(_a ) self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) ) A_ : List[Any] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(_a ,nn.Linear ) ) def _a ( self : Tuple ): '''simple docstring''' A_ , A_ : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A_ : Tuple = model_class(_a ) A_ : Union[str, Any] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic A_ : Tuple = [*signature.parameters.keys()] A_ : int = ["""pixel_values"""] self.assertListEqual(arg_names[:1] ,_a ) def _a ( self : Union[str, Any] ): '''simple docstring''' A_ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_a ) def _a ( self : int ): '''simple docstring''' A_ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_depth_estimation(*_a ) def _a ( self : Tuple ): '''simple docstring''' A_ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*_a ) def _a ( self : int ): '''simple docstring''' for model_class in self.all_model_classes: if model_class.__name__ == "DPTForDepthEstimation": continue A_ , A_ : Any = self.model_tester.prepare_config_and_inputs_for_common() A_ : List[str] = True if model_class in get_values(_a ): continue A_ : Any = model_class(_a ) model.to(_a ) model.train() A_ : Optional[int] = self._prepare_for_class(_a ,_a ,return_labels=_a ) A_ : List[str] = model(**_a ).loss loss.backward() def _a ( self : Any ): '''simple docstring''' for model_class in self.all_model_classes: if model_class.__name__ == "DPTForDepthEstimation": continue A_ , A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() A_ : str = False A_ : Any = True if model_class in get_values(_a ) or not model_class.supports_gradient_checkpointing: continue A_ : Tuple = model_class(_a ) model.to(_a ) model.gradient_checkpointing_enable() model.train() A_ : Tuple = self._prepare_for_class(_a ,_a ,return_labels=_a ) A_ : Any = model(**_a ).loss loss.backward() def _a ( self : List[Any] ): '''simple docstring''' A_ , A_ : str = self.model_tester.prepare_config_and_inputs_for_common() A_ : List[Any] = _config_zero_init(_a ) for model_class in self.all_model_classes: A_ : Optional[Any] = model_class(config=_a ) # Skip the check for the backbone A_ : Optional[int] = [] for name, module in model.named_modules(): if module.__class__.__name__ == "DPTViTHybridEmbeddings": A_ : Dict = [f'{name}.{key}' for key in module.state_dict().keys()] break for name, param in model.named_parameters(): if param.requires_grad: if name in backbone_params: continue self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item() ,[0.0, 1.0] ,msg=f'Parameter {name} of model {model_class} seems not properly initialized' ,) @unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" ) def _a ( self : str ): '''simple docstring''' pass @slow def _a ( self : Union[str, Any] ): '''simple docstring''' for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]: A_ : List[str] = DPTModel.from_pretrained(_a ) self.assertIsNotNone(_a ) def _a ( self : Optional[Any] ): '''simple docstring''' A_ , A_ : Any = self.model_tester.prepare_config_and_inputs_for_common() A_ : Any = """add""" with self.assertRaises(_a ): A_ : Optional[int] = DPTForDepthEstimation(_a ) def lowerCamelCase ( ): A_ : Optional[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""") return image @require_torch @require_vision @slow class __lowerCAmelCase ( unittest.TestCase ): '''simple docstring''' def _a ( self : str ): '''simple docstring''' A_ : List[str] = DPTImageProcessor.from_pretrained("""Intel/dpt-hybrid-midas""" ) A_ : int = DPTForDepthEstimation.from_pretrained("""Intel/dpt-hybrid-midas""" ).to(_a ) A_ : List[Any] = prepare_img() A_ : int = image_processor(images=_a ,return_tensors="""pt""" ).to(_a ) # forward pass with torch.no_grad(): A_ : Tuple = model(**_a ) A_ : Union[str, Any] = outputs.predicted_depth # verify the predicted depth A_ : int = torch.Size((1, 384, 384) ) self.assertEqual(predicted_depth.shape ,_a ) A_ : Dict = torch.tensor( [[[5.6437, 5.6146, 5.6511], [5.4371, 5.5649, 5.5958], [5.5215, 5.5184, 5.5293]]] ).to(_a ) self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 100 ,_a ,atol=1e-4 ) )
665
'''simple docstring''' import argparse import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## __magic_name__ = 16 __magic_name__ = 32 def lowerCamelCase ( lowerCamelCase : Accelerator , lowerCamelCase : int = 16): A_ : Any = AutoTokenizer.from_pretrained("""bert-base-cased""") A_ : str = load_dataset("""glue""" , """mrpc""") def tokenize_function(lowerCamelCase : Dict): # max_length=None => use the model max length (it's actually the default) A_ : List[str] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowerCamelCase , max_length=lowerCamelCase) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): A_ : Tuple = datasets.map( lowerCamelCase , batched=lowerCamelCase , remove_columns=["""idx""", """sentence1""", """sentence2"""] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library A_ : List[str] = tokenized_datasets.rename_column("""label""" , """labels""") def collate_fn(lowerCamelCase : Tuple): # On TPU it's best to pad everything to the same length or training will be very slow. A_ : str = 128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": A_ : List[Any] = 16 elif accelerator.mixed_precision != "no": A_ : Any = 8 else: A_ : Tuple = None return tokenizer.pad( lowerCamelCase , padding="""longest""" , max_length=lowerCamelCase , pad_to_multiple_of=lowerCamelCase , return_tensors="""pt""" , ) # Instantiate dataloaders. A_ : int = DataLoader( tokenized_datasets["""train"""] , shuffle=lowerCamelCase , collate_fn=lowerCamelCase , batch_size=lowerCamelCase , drop_last=lowerCamelCase) A_ : str = DataLoader( tokenized_datasets["""validation"""] , shuffle=lowerCamelCase , collate_fn=lowerCamelCase , batch_size=lowerCamelCase , drop_last=(accelerator.mixed_precision == """fp8""") , ) return train_dataloader, eval_dataloader def lowerCamelCase ( lowerCamelCase : Any , lowerCamelCase : Dict): # Initialize accelerator A_ : Tuple = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs A_ : List[Any] = config["""lr"""] A_ : List[Any] = int(config["""num_epochs"""]) A_ : int = int(config["""seed"""]) A_ : Dict = int(config["""batch_size"""]) A_ : Union[str, Any] = evaluate.load("""glue""" , """mrpc""") # If the batch size is too big we use gradient accumulation A_ : int = 1 if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU: A_ : Any = batch_size // MAX_GPU_BATCH_SIZE A_ : Union[str, Any] = MAX_GPU_BATCH_SIZE set_seed(lowerCamelCase) A_ , A_ : List[str] = get_dataloaders(lowerCamelCase , lowerCamelCase) # Instantiate the model (we build the model here so that the seed also control new weights initialization) A_ : Union[str, Any] = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=lowerCamelCase) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). A_ : str = model.to(accelerator.device) # Instantiate optimizer A_ : str = AdamW(params=model.parameters() , lr=lowerCamelCase) # Instantiate scheduler A_ : Tuple = get_linear_schedule_with_warmup( optimizer=lowerCamelCase , num_warmup_steps=100 , num_training_steps=(len(lowerCamelCase) * num_epochs) // gradient_accumulation_steps , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. A_ , A_ , A_ , A_ , A_ : Union[str, Any] = accelerator.prepare( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase) # Now we train the model for epoch in range(lowerCamelCase): model.train() for step, batch in enumerate(lowerCamelCase): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device) A_ : Optional[int] = model(**lowerCamelCase) A_ : List[Any] = outputs.loss A_ : Tuple = loss / gradient_accumulation_steps accelerator.backward(lowerCamelCase) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(lowerCamelCase): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device) with torch.no_grad(): A_ : Union[str, Any] = model(**lowerCamelCase) A_ : Any = outputs.logits.argmax(dim=-1) A_ , A_ : Tuple = accelerator.gather_for_metrics((predictions, batch["""labels"""])) metric.add_batch( predictions=lowerCamelCase , references=lowerCamelCase , ) A_ : int = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F'epoch {epoch}:' , lowerCamelCase) def lowerCamelCase ( ): A_ : Optional[int] = argparse.ArgumentParser(description="""Simple example of training script.""") parser.add_argument( """--mixed_precision""" , type=lowerCamelCase , default=lowerCamelCase , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose""" """between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.""" """and an Nvidia Ampere GPU.""" , ) parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""") A_ : Dict = parser.parse_args() A_ : Dict = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16} training_function(lowerCamelCase , lowerCamelCase) if __name__ == "__main__": main()
665
1
'''simple docstring''' import logging import os from dataclasses import dataclass, field from typing import Dict, Optional import numpy as np from utils_multiple_choice import MultipleChoiceDataset, Split, processors import transformers from transformers import ( AutoConfig, AutoModelForMultipleChoice, AutoTokenizer, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import is_main_process __magic_name__ = logging.getLogger(__name__) def lowerCamelCase ( lowerCamelCase : List[Any] , lowerCamelCase : int): return (preds == labels).mean() @dataclass class __lowerCAmelCase : '''simple docstring''' a_ = field( metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} ) a_ = field( default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} ) a_ = field( default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} ) a_ = field( default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , ) @dataclass class __lowerCAmelCase : '''simple docstring''' a_ = field(metadata={"""help""": """The name of the task to train on: """ + """, """.join(processors.keys() )} ) a_ = field(metadata={"""help""": """Should contain the data files for the task."""} ) a_ = field( default=128 , metadata={ """help""": ( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) } , ) a_ = field( default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} ) def lowerCamelCase ( ): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. A_ : Any = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments)) A_ , A_ , A_ : Any = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir) and os.listdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( F'Output directory ({training_args.output_dir}) already exists and is not empty. Use' """ --overwrite_output_dir to overcome.""") # Setup logging logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( """Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1) , training_args.fpaa , ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info("""Training/evaluation parameters %s""" , lowerCamelCase) # Set seed set_seed(training_args.seed) try: A_ : List[str] = processors[data_args.task_name]() A_ : Dict = processor.get_labels() A_ : List[str] = len(lowerCamelCase) except KeyError: raise ValueError("""Task not found: %s""" % (data_args.task_name)) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. A_ : str = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=lowerCamelCase , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , ) A_ : Optional[int] = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) A_ : str = AutoModelForMultipleChoice.from_pretrained( model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path) , config=lowerCamelCase , cache_dir=model_args.cache_dir , ) # Get datasets A_ : Any = ( MultipleChoiceDataset( data_dir=data_args.data_dir , tokenizer=lowerCamelCase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , ) if training_args.do_train else None ) A_ : Tuple = ( MultipleChoiceDataset( data_dir=data_args.data_dir , tokenizer=lowerCamelCase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , ) if training_args.do_eval else None ) def compute_metrics(lowerCamelCase : EvalPrediction) -> Dict: A_ : Optional[Any] = np.argmax(p.predictions , axis=1) return {"acc": simple_accuracy(lowerCamelCase , p.label_ids)} # Data collator A_ : Optional[Any] = DataCollatorWithPadding(lowerCamelCase , pad_to_multiple_of=8) if training_args.fpaa else None # Initialize our Trainer A_ : int = Trainer( model=lowerCamelCase , args=lowerCamelCase , train_dataset=lowerCamelCase , eval_dataset=lowerCamelCase , compute_metrics=lowerCamelCase , data_collator=lowerCamelCase , ) # Training if training_args.do_train: trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path) else None) trainer.save_model() # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) if trainer.is_world_master(): tokenizer.save_pretrained(training_args.output_dir) # Evaluation A_ : str = {} if training_args.do_eval: logger.info("""*** Evaluate ***""") A_ : int = trainer.evaluate() A_ : List[str] = os.path.join(training_args.output_dir , """eval_results.txt""") if trainer.is_world_master(): with open(lowerCamelCase , """w""") as writer: logger.info("""***** Eval results *****""") for key, value in result.items(): logger.info(""" %s = %s""" , lowerCamelCase , lowerCamelCase) writer.write("""%s = %s\n""" % (key, value)) results.update(lowerCamelCase) return results def lowerCamelCase ( lowerCamelCase : int): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
665
'''simple docstring''' import functools def lowerCamelCase ( lowerCamelCase : list[int] , lowerCamelCase : list[int]): # Validation if not isinstance(lowerCamelCase , lowerCamelCase) or not all(isinstance(lowerCamelCase , lowerCamelCase) for day in days): raise ValueError("""The parameter days should be a list of integers""") if len(lowerCamelCase) != 3 or not all(isinstance(lowerCamelCase , lowerCamelCase) for cost in costs): raise ValueError("""The parameter costs should be a list of three integers""") if len(lowerCamelCase) == 0: return 0 if min(lowerCamelCase) <= 0: raise ValueError("""All days elements should be greater than 0""") if max(lowerCamelCase) >= 366: raise ValueError("""All days elements should be less than 366""") A_ : Tuple = set(lowerCamelCase) @functools.cache def dynamic_programming(lowerCamelCase : int) -> int: if index > 365: return 0 if index not in days_set: return dynamic_programming(index + 1) return min( costs[0] + dynamic_programming(index + 1) , costs[1] + dynamic_programming(index + 7) , costs[2] + dynamic_programming(index + 30) , ) return dynamic_programming(1) if __name__ == "__main__": import doctest doctest.testmod()
665
1
'''simple docstring''' from __future__ import annotations def lowerCamelCase ( lowerCamelCase : list[int]): # This function is recursive A_ : Union[str, Any] = len(lowerCamelCase) # If the array contains only one element, we return it (it's the stop condition of # recursion) if array_length <= 1: return array # Else A_ : Optional[int] = array[0] A_ : Union[str, Any] = False A_ : str = 1 A_ : list[int] = [] while not is_found and i < array_length: if array[i] < pivot: A_ : Dict = True A_ : Dict = [element for element in array[i:] if element >= array[i]] A_ : Dict = longest_subsequence(lowerCamelCase) if len(lowerCamelCase) > len(lowerCamelCase): A_ : Union[str, Any] = temp_array else: i += 1 A_ : Tuple = [element for element in array[1:] if element >= pivot] A_ : Tuple = [pivot, *longest_subsequence(lowerCamelCase)] if len(lowerCamelCase) > len(lowerCamelCase): return temp_array else: return longest_subseq if __name__ == "__main__": import doctest doctest.testmod()
665
'''simple docstring''' from __future__ import annotations import numpy as np from numpy import floataa from numpy.typing import NDArray def lowerCamelCase ( lowerCamelCase : NDArray[floataa] , lowerCamelCase : NDArray[floataa] , lowerCamelCase : list[int] , lowerCamelCase : int , ): A_ , A_ : int = coefficient_matrix.shape A_ , A_ : Union[str, Any] = constant_matrix.shape if rowsa != colsa: A_ : Any = F'Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}' raise ValueError(lowerCamelCase) if colsa != 1: A_ : Tuple = F'Constant matrix must be nx1 but received {rowsa}x{colsa}' raise ValueError(lowerCamelCase) if rowsa != rowsa: A_ : Dict = ( """Coefficient and constant matrices dimensions must be nxn and nx1 but """ F'received {rowsa}x{colsa} and {rowsa}x{colsa}' ) raise ValueError(lowerCamelCase) if len(lowerCamelCase) != rowsa: A_ : Union[str, Any] = ( """Number of initial values must be equal to number of rows in coefficient """ F'matrix but received {len(lowerCamelCase)} and {rowsa}' ) raise ValueError(lowerCamelCase) if iterations <= 0: raise ValueError("""Iterations must be at least 1""") A_ : NDArray[floataa] = np.concatenate( (coefficient_matrix, constant_matrix) , axis=1) A_ , A_ : int = table.shape strictly_diagonally_dominant(lowerCamelCase) # Iterates the whole matrix for given number of times for _ in range(lowerCamelCase): A_ : List[Any] = [] for row in range(lowerCamelCase): A_ : int = 0 for col in range(lowerCamelCase): if col == row: A_ : List[str] = table[row][col] elif col == cols - 1: A_ : str = table[row][col] else: temp += (-1) * table[row][col] * init_val[col] A_ : Union[str, Any] = (temp + val) / denom new_val.append(lowerCamelCase) A_ : Tuple = new_val return [float(lowerCamelCase) for i in new_val] def lowerCamelCase ( lowerCamelCase : NDArray[floataa]): A_ , A_ : Dict = table.shape A_ : Union[str, Any] = True for i in range(0 , lowerCamelCase): A_ : str = 0 for j in range(0 , cols - 1): if i == j: continue else: total += table[i][j] if table[i][i] <= total: raise ValueError("""Coefficient matrix is not strictly diagonally dominant""") return is_diagonally_dominant # Test Cases if __name__ == "__main__": import doctest doctest.testmod()
665
1
'''simple docstring''' from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING __magic_name__ = logging.get_logger(__name__) @add_end_docstrings(__SCREAMING_SNAKE_CASE ) class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' def __init__( self : Optional[int] ,*_a : str ,**_a : Optional[int] ): '''simple docstring''' super().__init__(*_a ,**_a ) requires_backends(self ,"""vision""" ) self.check_model_type( TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == """tf""" else MODEL_FOR_VISION_2_SEQ_MAPPING ) def _a ( self : Any ,_a : Union[str, Any]=None ,_a : Any=None ,_a : Optional[int]=None ): '''simple docstring''' A_ : int = {} A_ : Optional[Any] = {} if prompt is not None: A_ : Any = prompt if generate_kwargs is not None: A_ : Any = generate_kwargs if max_new_tokens is not None: if "generate_kwargs" not in forward_kwargs: A_ : Any = {} if "max_new_tokens" in forward_kwargs["generate_kwargs"]: raise ValueError( """'max_new_tokens' is defined twice, once in 'generate_kwargs' and once as a direct parameter,""" """ please use only one""" ) A_ : Tuple = max_new_tokens return preprocess_params, forward_kwargs, {} def __call__( self : Optional[Any] ,_a : Union[str, List[str], "Image.Image", List["Image.Image"]] ,**_a : Union[str, Any] ): '''simple docstring''' return super().__call__(_a ,**_a ) def _a ( self : Dict ,_a : Tuple ,_a : List[Any]=None ): '''simple docstring''' A_ : Optional[int] = load_image(_a ) if prompt is not None: if not isinstance(_a ,_a ): raise ValueError( f'Received an invalid text input, got - {type(_a )} - but expected a single string. ' """Note also that one single text can be provided for conditional image to text generation.""" ) A_ : Optional[Any] = self.model.config.model_type if model_type == "git": A_ : Optional[Any] = self.image_processor(images=_a ,return_tensors=self.framework ) A_ : Any = self.tokenizer(text=_a ,add_special_tokens=_a ).input_ids A_ : Union[str, Any] = [self.tokenizer.cls_token_id] + input_ids A_ : List[str] = torch.tensor(_a ).unsqueeze(0 ) model_inputs.update({"""input_ids""": input_ids} ) elif model_type == "pix2struct": A_ : Optional[Any] = self.image_processor(images=_a ,header_text=_a ,return_tensors=self.framework ) elif model_type != "vision-encoder-decoder": # vision-encoder-decoder does not support conditional generation A_ : Dict = self.image_processor(images=_a ,return_tensors=self.framework ) A_ : Any = self.tokenizer(_a ,return_tensors=self.framework ) model_inputs.update(_a ) else: raise ValueError(f'Model type {model_type} does not support conditional text generation' ) else: A_ : List[str] = self.image_processor(images=_a ,return_tensors=self.framework ) if self.model.config.model_type == "git" and prompt is None: A_ : List[Any] = None return model_inputs def _a ( self : str ,_a : List[str] ,_a : List[Any]=None ): '''simple docstring''' if ( "input_ids" in model_inputs and isinstance(model_inputs["""input_ids"""] ,_a ) and all(x is None for x in model_inputs["""input_ids"""] ) ): A_ : Tuple = None if generate_kwargs is None: A_ : Optional[int] = {} # FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py` # parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas # the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name` # in the `_prepare_model_inputs` method. A_ : Dict = model_inputs.pop(self.model.main_input_name ) A_ : Dict = self.model.generate(_a ,**_a ,**_a ) return model_outputs def _a ( self : List[str] ,_a : List[Any] ): '''simple docstring''' A_ : Optional[int] = [] for output_ids in model_outputs: A_ : Optional[Any] = { """generated_text""": self.tokenizer.decode( _a ,skip_special_tokens=_a ,) } records.append(_a ) return records
665
'''simple docstring''' def lowerCamelCase ( lowerCamelCase : str , lowerCamelCase : str): A_ : Any = len(lowerCamelCase) A_ : Optional[Any] = len(lowerCamelCase) A_ : Optional[int] = [[False for _ in range(m + 1)] for _ in range(n + 1)] A_ : Union[str, Any] = True for i in range(lowerCamelCase): for j in range(m + 1): if dp[i][j]: if j < m and a[i].upper() == b[j]: A_ : Optional[int] = True if a[i].islower(): A_ : List[Any] = True return dp[n][m] if __name__ == "__main__": import doctest doctest.testmod()
665
1
'''simple docstring''' from __future__ import annotations from collections.abc import Callable __magic_name__ = list[list[float | int]] def lowerCamelCase ( lowerCamelCase : Matrix , lowerCamelCase : Matrix): A_ : int = len(lowerCamelCase) A_ : Matrix = [[0 for _ in range(size + 1)] for _ in range(lowerCamelCase)] A_ : int A_ : int A_ : int A_ : int A_ : int A_ : float for row in range(lowerCamelCase): for col in range(lowerCamelCase): A_ : List[str] = matrix[row][col] A_ : Union[str, Any] = vector[row][0] A_ : Union[str, Any] = 0 A_ : Optional[Any] = 0 while row < size and col < size: # pivoting A_ : Any = max((abs(augmented[rowa][col]), rowa) for rowa in range(lowerCamelCase , lowerCamelCase))[ 1 ] if augmented[pivot_row][col] == 0: col += 1 continue else: A_ , A_ : Dict = augmented[pivot_row], augmented[row] for rowa in range(row + 1 , lowerCamelCase): A_ : List[Any] = augmented[rowa][col] / augmented[row][col] A_ : Any = 0 for cola in range(col + 1 , size + 1): augmented[rowa][cola] -= augmented[row][cola] * ratio row += 1 col += 1 # back substitution for col in range(1 , lowerCamelCase): for row in range(lowerCamelCase): A_ : Optional[int] = augmented[row][col] / augmented[col][col] for cola in range(lowerCamelCase , size + 1): augmented[row][cola] -= augmented[col][cola] * ratio # round to get rid of numbers like 2.000000000000004 return [ [round(augmented[row][size] / augmented[row][row] , 10)] for row in range(lowerCamelCase) ] def lowerCamelCase ( lowerCamelCase : list[int]): A_ : int = len(lowerCamelCase) A_ : Matrix = [[0 for _ in range(lowerCamelCase)] for _ in range(lowerCamelCase)] A_ : Matrix = [[0] for _ in range(lowerCamelCase)] A_ : Matrix A_ : int A_ : int A_ : int for x_val, y_val in enumerate(lowerCamelCase): for col in range(lowerCamelCase): A_ : Optional[Any] = (x_val + 1) ** (size - col - 1) A_ : int = y_val A_ : List[Any] = solve(lowerCamelCase , lowerCamelCase) def interpolated_func(lowerCamelCase : int) -> int: return sum( round(coeffs[x_val][0]) * (var ** (size - x_val - 1)) for x_val in range(lowerCamelCase)) return interpolated_func def lowerCamelCase ( lowerCamelCase : int): return ( 1 - variable + variable**2 - variable**3 + variable**4 - variable**5 + variable**6 - variable**7 + variable**8 - variable**9 + variable**10 ) def lowerCamelCase ( lowerCamelCase : Callable[[int], int] = question_function , lowerCamelCase : int = 10): A_ : list[int] = [func(lowerCamelCase) for x_val in range(1 , order + 1)] A_ : list[Callable[[int], int]] = [ interpolate(data_points[:max_coeff]) for max_coeff in range(1 , order + 1) ] A_ : int = 0 A_ : Callable[[int], int] A_ : int for poly in polynomials: A_ : Union[str, Any] = 1 while func(lowerCamelCase) == poly(lowerCamelCase): x_val += 1 ret += poly(lowerCamelCase) return ret if __name__ == "__main__": print(f"""{solution() = }""")
665
'''simple docstring''' from __future__ import annotations from collections import deque from collections.abc import Iterator from dataclasses import dataclass @dataclass class __lowerCAmelCase : '''simple docstring''' a_ = 42 a_ = 42 class __lowerCAmelCase : '''simple docstring''' def __init__( self : Union[str, Any] ,_a : int ): '''simple docstring''' A_ : list[list[Edge]] = [[] for _ in range(_a )] A_ : List[Any] = size def __getitem__( self : int ,_a : int ): '''simple docstring''' return iter(self._graph[vertex] ) @property def _a ( self : str ): '''simple docstring''' return self._size def _a ( self : str ,_a : int ,_a : int ,_a : int ): '''simple docstring''' if weight not in (0, 1): raise ValueError("""Edge weight must be either 0 or 1.""" ) if to_vertex < 0 or to_vertex >= self.size: raise ValueError("""Vertex indexes must be in [0; size).""" ) self._graph[from_vertex].append(Edge(_a ,_a ) ) def _a ( self : Dict ,_a : int ,_a : int ): '''simple docstring''' A_ : Tuple = deque([start_vertex] ) A_ : list[int | None] = [None] * self.size A_ : Union[str, Any] = 0 while queue: A_ : List[Any] = queue.popleft() A_ : Tuple = distances[current_vertex] if current_distance is None: continue for edge in self[current_vertex]: A_ : Union[str, Any] = current_distance + edge.weight A_ : Optional[Any] = distances[edge.destination_vertex] if ( isinstance(_a ,_a ) and new_distance >= dest_vertex_distance ): continue A_ : Tuple = new_distance if edge.weight == 0: queue.appendleft(edge.destination_vertex ) else: queue.append(edge.destination_vertex ) if distances[finish_vertex] is None: raise ValueError("""No path from start_vertex to finish_vertex.""" ) return distances[finish_vertex] if __name__ == "__main__": import doctest doctest.testmod()
665
1
'''simple docstring''' import importlib.util import os import platform from argparse import ArgumentParser import huggingface_hub from .. import __version__ as version from ..utils import ( is_accelerate_available, is_flax_available, is_safetensors_available, is_tf_available, is_torch_available, ) from . import BaseTransformersCLICommand def lowerCamelCase ( lowerCamelCase : Union[str, Any]): return EnvironmentCommand() def lowerCamelCase ( lowerCamelCase : Any): return EnvironmentCommand(args.accelerate_config_file) class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' @staticmethod def _a ( _a : ArgumentParser ): '''simple docstring''' A_ : str = parser.add_parser("""env""" ) download_parser.set_defaults(func=_a ) download_parser.add_argument( """--accelerate-config_file""" ,default=_a ,help="""The accelerate config file to use for the default values in the launching script.""" ,) download_parser.set_defaults(func=_a ) def __init__( self : Optional[int] ,_a : Optional[Any] ,*_a : List[Any] ): '''simple docstring''' A_ : Optional[int] = accelerate_config_file def _a ( self : Dict ): '''simple docstring''' A_ : Optional[int] = """not installed""" if is_safetensors_available(): import safetensors A_ : Optional[Any] = safetensors.__version__ elif importlib.util.find_spec("""safetensors""" ) is not None: import safetensors A_ : Optional[Any] = f'{safetensors.__version__} but is ignored because of PyTorch version too old.' A_ : List[str] = """not installed""" A_ : Union[str, Any] = """not found""" if is_accelerate_available(): import accelerate from accelerate.commands.config import default_config_file, load_config_from_file A_ : Dict = accelerate.__version__ # Get the default from the config file. if self._accelerate_config_file is not None or os.path.isfile(_a ): A_ : List[str] = load_config_from_file(self._accelerate_config_file ).to_dict() A_ : List[str] = ( """\n""".join([f'\t- {prop}: {val}' for prop, val in accelerate_config.items()] ) if isinstance(_a ,_a ) else f'\t{accelerate_config}' ) A_ : Tuple = """not installed""" A_ : Dict = """NA""" if is_torch_available(): import torch A_ : Union[str, Any] = torch.__version__ A_ : List[Any] = torch.cuda.is_available() A_ : Dict = """not installed""" A_ : Optional[int] = """NA""" if is_tf_available(): import tensorflow as tf A_ : str = tf.__version__ try: # deprecated in v2.1 A_ : Union[str, Any] = tf.test.is_gpu_available() except AttributeError: # returns list of devices, convert to bool A_ : Union[str, Any] = bool(tf.config.list_physical_devices("""GPU""" ) ) A_ : Dict = """not installed""" A_ : List[str] = """not installed""" A_ : Dict = """not installed""" A_ : str = """NA""" if is_flax_available(): import flax import jax import jaxlib A_ : Dict = flax.__version__ A_ : Tuple = jax.__version__ A_ : List[str] = jaxlib.__version__ A_ : Any = jax.lib.xla_bridge.get_backend().platform A_ : List[str] = { """`transformers` version""": version, """Platform""": platform.platform(), """Python version""": platform.python_version(), """Huggingface_hub version""": huggingface_hub.__version__, """Safetensors version""": f'{safetensors_version}', """Accelerate version""": f'{accelerate_version}', """Accelerate config""": f'{accelerate_config_str}', """PyTorch version (GPU?)""": f'{pt_version} ({pt_cuda_available})', """Tensorflow version (GPU?)""": f'{tf_version} ({tf_cuda_available})', """Flax version (CPU?/GPU?/TPU?)""": f'{flax_version} ({jax_backend})', """Jax version""": f'{jax_version}', """JaxLib version""": f'{jaxlib_version}', """Using GPU in script?""": """<fill in>""", """Using distributed or parallel set-up in script?""": """<fill in>""", } print("""\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n""" ) print(self.format_dict(_a ) ) return info @staticmethod def _a ( _a : Union[str, Any] ): '''simple docstring''' return "\n".join([f'- {prop}: {val}' for prop, val in d.items()] ) + "\n"
665
'''simple docstring''' def lowerCamelCase ( lowerCamelCase : int = 10**9): A_ : Optional[int] = 1 A_ : int = 2 A_ : List[Any] = 0 A_ : Optional[Any] = 0 A_ : str = 0 while perimeter <= max_perimeter: perimeters_sum += perimeter prev_value += 2 * value value += prev_value A_ : Optional[Any] = 2 * value + 2 if i % 2 == 0 else 2 * value - 2 i += 1 return perimeters_sum if __name__ == "__main__": print(f"""{solution() = }""")
665
1
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging __magic_name__ = logging.get_logger(__name__) __magic_name__ = { 'bigcode/gpt_bigcode-santacoder': 'https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json', } class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' a_ = """gpt_bigcode""" a_ = ["""past_key_values"""] a_ = { """hidden_size""": """n_embd""", """max_position_embeddings""": """n_positions""", """num_attention_heads""": """n_head""", """num_hidden_layers""": """n_layer""", } def __init__( self : Optional[int] ,_a : Optional[int]=50257 ,_a : Dict=1024 ,_a : Union[str, Any]=768 ,_a : Union[str, Any]=12 ,_a : Union[str, Any]=12 ,_a : Tuple=None ,_a : int="gelu_pytorch_tanh" ,_a : Optional[Any]=0.1 ,_a : List[str]=0.1 ,_a : Union[str, Any]=0.1 ,_a : List[Any]=1e-5 ,_a : List[str]=0.02 ,_a : Any=True ,_a : Union[str, Any]=True ,_a : Tuple=50256 ,_a : Optional[int]=50256 ,_a : int=True ,_a : Optional[int]=True ,_a : Optional[int]=True ,**_a : List[str] ,): '''simple docstring''' A_ : Optional[Any] = vocab_size A_ : int = n_positions A_ : Union[str, Any] = n_embd A_ : int = n_layer A_ : Optional[int] = n_head A_ : Union[str, Any] = n_inner A_ : List[Any] = activation_function A_ : Dict = resid_pdrop A_ : int = embd_pdrop A_ : Optional[int] = attn_pdrop A_ : Union[str, Any] = layer_norm_epsilon A_ : int = initializer_range A_ : Union[str, Any] = scale_attn_weights A_ : List[str] = use_cache A_ : Tuple = attention_softmax_in_fpaa A_ : List[str] = scale_attention_softmax_in_fpaa A_ : Union[str, Any] = multi_query A_ : Any = bos_token_id A_ : Optional[int] = eos_token_id super().__init__(bos_token_id=_a ,eos_token_id=_a ,**_a )
665
'''simple docstring''' # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from argparse import ArgumentParser from accelerate.commands.config import get_config_parser from accelerate.commands.env import env_command_parser from accelerate.commands.launch import launch_command_parser from accelerate.commands.test import test_command_parser from accelerate.commands.tpu import tpu_command_parser def lowerCamelCase ( ): A_ : Optional[int] = ArgumentParser("""Accelerate CLI tool""" , usage="""accelerate <command> [<args>]""" , allow_abbrev=lowerCamelCase) A_ : Optional[int] = parser.add_subparsers(help="""accelerate command helpers""") # Register commands get_config_parser(subparsers=lowerCamelCase) env_command_parser(subparsers=lowerCamelCase) launch_command_parser(subparsers=lowerCamelCase) tpu_command_parser(subparsers=lowerCamelCase) test_command_parser(subparsers=lowerCamelCase) # Let's go A_ : Dict = parser.parse_args() if not hasattr(lowerCamelCase , """func"""): parser.print_help() exit(1) # Run args.func(lowerCamelCase) if __name__ == "__main__": main()
665
1
'''simple docstring''' import logging import os from dataclasses import dataclass from enum import Enum from typing import List, Optional, Union from filelock import FileLock from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available __magic_name__ = logging.getLogger(__name__) @dataclass class __lowerCAmelCase : '''simple docstring''' a_ = 42 a_ = 42 a_ = 42 @dataclass class __lowerCAmelCase : '''simple docstring''' a_ = 42 a_ = 42 a_ = None a_ = None class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' a_ = """train""" a_ = """dev""" a_ = """test""" class __lowerCAmelCase : '''simple docstring''' @staticmethod def _a ( _a : Dict ,_a : Union[Split, str] ): '''simple docstring''' raise NotImplementedError @staticmethod def _a ( _a : str ): '''simple docstring''' raise NotImplementedError @staticmethod def _a ( _a : List[InputExample] ,_a : List[str] ,_a : int ,_a : PreTrainedTokenizer ,_a : str=False ,_a : Union[str, Any]="[CLS]" ,_a : int=1 ,_a : Optional[int]="[SEP]" ,_a : List[Any]=False ,_a : List[Any]=False ,_a : Optional[Any]=0 ,_a : Dict=0 ,_a : int=-100 ,_a : Union[str, Any]=0 ,_a : Tuple=True ,): '''simple docstring''' A_ : Any = {label: i for i, label in enumerate(_a )} A_ : Any = [] for ex_index, example in enumerate(_a ): if ex_index % 10000 == 0: logger.info("""Writing example %d of %d""" ,_a ,len(_a ) ) A_ : List[Any] = [] A_ : List[str] = [] for word, label in zip(example.words ,example.labels ): A_ : Any = tokenizer.tokenize(_a ) # bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space. if len(_a ) > 0: tokens.extend(_a ) # Use the real label id for the first token of the word, and padding ids for the remaining tokens label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(_a ) - 1) ) # Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa. A_ : str = tokenizer.num_special_tokens_to_add() if len(_a ) > max_seq_length - special_tokens_count: A_ : str = tokens[: (max_seq_length - special_tokens_count)] A_ : Optional[int] = label_ids[: (max_seq_length - special_tokens_count)] # The convention in BERT is: # (a) For sequence pairs: # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP] # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1 # (b) For single sequences: # tokens: [CLS] the dog is hairy . [SEP] # type_ids: 0 0 0 0 0 0 0 # # Where "type_ids" are used to indicate whether this is the first # sequence or the second sequence. The embedding vectors for `type=0` and # `type=1` were learned during pre-training and are added to the wordpiece # embedding vector (and position vector). This is not *strictly* necessary # since the [SEP] token unambiguously separates the sequences, but it makes # it easier for the model to learn the concept of sequences. # # For classification tasks, the first vector (corresponding to [CLS]) is # used as the "sentence vector". Note that this only makes sense because # the entire model is fine-tuned. tokens += [sep_token] label_ids += [pad_token_label_id] if sep_token_extra: # roberta uses an extra separator b/w pairs of sentences tokens += [sep_token] label_ids += [pad_token_label_id] A_ : int = [sequence_a_segment_id] * len(_a ) if cls_token_at_end: tokens += [cls_token] label_ids += [pad_token_label_id] segment_ids += [cls_token_segment_id] else: A_ : Optional[int] = [cls_token] + tokens A_ : str = [pad_token_label_id] + label_ids A_ : Any = [cls_token_segment_id] + segment_ids A_ : str = tokenizer.convert_tokens_to_ids(_a ) # The mask has 1 for real tokens and 0 for padding tokens. Only real # tokens are attended to. A_ : Tuple = [1 if mask_padding_with_zero else 0] * len(_a ) # Zero-pad up to the sequence length. A_ : str = max_seq_length - len(_a ) if pad_on_left: A_ : Union[str, Any] = ([pad_token] * padding_length) + input_ids A_ : Optional[Any] = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask A_ : Optional[int] = ([pad_token_segment_id] * padding_length) + segment_ids A_ : Optional[int] = ([pad_token_label_id] * padding_length) + label_ids else: input_ids += [pad_token] * padding_length input_mask += [0 if mask_padding_with_zero else 1] * padding_length segment_ids += [pad_token_segment_id] * padding_length label_ids += [pad_token_label_id] * padding_length assert len(_a ) == max_seq_length assert len(_a ) == max_seq_length assert len(_a ) == max_seq_length assert len(_a ) == max_seq_length if ex_index < 5: logger.info("""*** Example ***""" ) logger.info("""guid: %s""" ,example.guid ) logger.info("""tokens: %s""" ,""" """.join([str(_a ) for x in tokens] ) ) logger.info("""input_ids: %s""" ,""" """.join([str(_a ) for x in input_ids] ) ) logger.info("""input_mask: %s""" ,""" """.join([str(_a ) for x in input_mask] ) ) logger.info("""segment_ids: %s""" ,""" """.join([str(_a ) for x in segment_ids] ) ) logger.info("""label_ids: %s""" ,""" """.join([str(_a ) for x in label_ids] ) ) if "token_type_ids" not in tokenizer.model_input_names: A_ : Dict = None features.append( InputFeatures( input_ids=_a ,attention_mask=_a ,token_type_ids=_a ,label_ids=_a ) ) return features if is_torch_available(): import torch from torch import nn from torch.utils.data import Dataset class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' a_ = 42 a_ = nn.CrossEntropyLoss().ignore_index def __init__( self : Optional[int] ,_a : TokenClassificationTask ,_a : str ,_a : PreTrainedTokenizer ,_a : List[str] ,_a : str ,_a : Optional[int] = None ,_a : Tuple=False ,_a : Split = Split.train ,): '''simple docstring''' A_ : str = os.path.join( _a ,"""cached_{}_{}_{}""".format(mode.value ,tokenizer.__class__.__name__ ,str(_a ) ) ,) # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. A_ : Union[str, Any] = cached_features_file + """.lock""" with FileLock(_a ): if os.path.exists(_a ) and not overwrite_cache: logger.info(f'Loading features from cached file {cached_features_file}' ) A_ : Any = torch.load(_a ) else: logger.info(f'Creating features from dataset file at {data_dir}' ) A_ : Optional[int] = token_classification_task.read_examples_from_file(_a ,_a ) # TODO clean up all this to leverage built-in features of tokenizers A_ : int = token_classification_task.convert_examples_to_features( _a ,_a ,_a ,_a ,cls_token_at_end=bool(model_type in ["""xlnet"""] ) ,cls_token=tokenizer.cls_token ,cls_token_segment_id=2 if model_type in ["""xlnet"""] else 0 ,sep_token=tokenizer.sep_token ,sep_token_extra=_a ,pad_on_left=bool(tokenizer.padding_side == """left""" ) ,pad_token=tokenizer.pad_token_id ,pad_token_segment_id=tokenizer.pad_token_type_id ,pad_token_label_id=self.pad_token_label_id ,) logger.info(f'Saving features into cached file {cached_features_file}' ) torch.save(self.features ,_a ) def __len__( self : int ): '''simple docstring''' return len(self.features ) def __getitem__( self : Dict ,_a : Optional[Any] ): '''simple docstring''' return self.features[i] if is_tf_available(): import tensorflow as tf class __lowerCAmelCase : '''simple docstring''' a_ = 42 a_ = -100 def __init__( self : int ,_a : TokenClassificationTask ,_a : str ,_a : PreTrainedTokenizer ,_a : List[str] ,_a : str ,_a : Optional[int] = None ,_a : List[str]=False ,_a : Split = Split.train ,): '''simple docstring''' A_ : Dict = token_classification_task.read_examples_from_file(_a ,_a ) # TODO clean up all this to leverage built-in features of tokenizers A_ : List[str] = token_classification_task.convert_examples_to_features( _a ,_a ,_a ,_a ,cls_token_at_end=bool(model_type in ["""xlnet"""] ) ,cls_token=tokenizer.cls_token ,cls_token_segment_id=2 if model_type in ["""xlnet"""] else 0 ,sep_token=tokenizer.sep_token ,sep_token_extra=_a ,pad_on_left=bool(tokenizer.padding_side == """left""" ) ,pad_token=tokenizer.pad_token_id ,pad_token_segment_id=tokenizer.pad_token_type_id ,pad_token_label_id=self.pad_token_label_id ,) def gen(): for ex in self.features: if ex.token_type_ids is None: yield ( {"input_ids": ex.input_ids, "attention_mask": ex.attention_mask}, ex.label_ids, ) else: yield ( { "input_ids": ex.input_ids, "attention_mask": ex.attention_mask, "token_type_ids": ex.token_type_ids, }, ex.label_ids, ) if "token_type_ids" not in tokenizer.model_input_names: A_ : Any = tf.data.Dataset.from_generator( _a ,({"""input_ids""": tf.intaa, """attention_mask""": tf.intaa}, tf.intaa) ,( {"""input_ids""": tf.TensorShape([None] ), """attention_mask""": tf.TensorShape([None] )}, tf.TensorShape([None] ), ) ,) else: A_ : Optional[int] = tf.data.Dataset.from_generator( _a ,({"""input_ids""": tf.intaa, """attention_mask""": tf.intaa, """token_type_ids""": tf.intaa}, tf.intaa) ,( { """input_ids""": tf.TensorShape([None] ), """attention_mask""": tf.TensorShape([None] ), """token_type_ids""": tf.TensorShape([None] ), }, tf.TensorShape([None] ), ) ,) def _a ( self : Dict ): '''simple docstring''' A_ : Union[str, Any] = self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features ) ) ) return self.dataset def __len__( self : Optional[int] ): '''simple docstring''' return len(self.features ) def __getitem__( self : Optional[Any] ,_a : Optional[Any] ): '''simple docstring''' return self.features[i]
665
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __magic_name__ = { 'configuration_altclip': [ 'ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP', 'AltCLIPConfig', 'AltCLIPTextConfig', 'AltCLIPVisionConfig', ], 'processing_altclip': ['AltCLIPProcessor'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __magic_name__ = [ 'ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST', 'AltCLIPPreTrainedModel', 'AltCLIPModel', 'AltCLIPTextModel', 'AltCLIPVisionModel', ] if TYPE_CHECKING: from .configuration_altclip import ( ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, AltCLIPConfig, AltCLIPTextConfig, AltCLIPVisionConfig, ) from .processing_altclip import AltCLIPProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_altclip import ( ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST, AltCLIPModel, AltCLIPPreTrainedModel, AltCLIPTextModel, AltCLIPVisionModel, ) else: import sys __magic_name__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
665
1
'''simple docstring''' import json import os import torch from diffusers import UNetaDModel os.makedirs('hub/hopper-medium-v2/unet/hor32', exist_ok=True) os.makedirs('hub/hopper-medium-v2/unet/hor128', exist_ok=True) os.makedirs('hub/hopper-medium-v2/value_function', exist_ok=True) def lowerCamelCase ( lowerCamelCase : Union[str, Any]): if hor == 128: A_ : List[Any] = ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""") A_ : List[Any] = (32, 128, 256) A_ : Tuple = ("""UpResnetBlock1D""", """UpResnetBlock1D""") elif hor == 32: A_ : str = ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""") A_ : Optional[Any] = (32, 64, 128, 256) A_ : Optional[int] = ("""UpResnetBlock1D""", """UpResnetBlock1D""", """UpResnetBlock1D""") A_ : Optional[int] = torch.load(F'/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch') A_ : Optional[Any] = model.state_dict() A_ : Optional[int] = { """down_block_types""": down_block_types, """block_out_channels""": block_out_channels, """up_block_types""": up_block_types, """layers_per_block""": 1, """use_timestep_embedding""": True, """out_block_type""": """OutConv1DBlock""", """norm_num_groups""": 8, """downsample_each_block""": False, """in_channels""": 14, """out_channels""": 14, """extra_in_channels""": 0, """time_embedding_type""": """positional""", """flip_sin_to_cos""": False, """freq_shift""": 1, """sample_size""": 6_5536, """mid_block_type""": """MidResTemporalBlock1D""", """act_fn""": """mish""", } A_ : List[str] = UNetaDModel(**lowerCamelCase) print(F'length of state dict: {len(state_dict.keys())}') print(F'length of value function dict: {len(hf_value_function.state_dict().keys())}') A_ : Dict = dict(zip(model.state_dict().keys() , hf_value_function.state_dict().keys())) for k, v in mapping.items(): A_ : Any = state_dict.pop(lowerCamelCase) hf_value_function.load_state_dict(lowerCamelCase) torch.save(hf_value_function.state_dict() , F'hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin') with open(F'hub/hopper-medium-v2/unet/hor{hor}/config.json' , """w""") as f: json.dump(lowerCamelCase , lowerCamelCase) def lowerCamelCase ( ): A_ : Tuple = { """in_channels""": 14, """down_block_types""": ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D"""), """up_block_types""": (), """out_block_type""": """ValueFunction""", """mid_block_type""": """ValueFunctionMidBlock1D""", """block_out_channels""": (32, 64, 128, 256), """layers_per_block""": 1, """downsample_each_block""": True, """sample_size""": 6_5536, """out_channels""": 14, """extra_in_channels""": 0, """time_embedding_type""": """positional""", """use_timestep_embedding""": True, """flip_sin_to_cos""": False, """freq_shift""": 1, """norm_num_groups""": 8, """act_fn""": """mish""", } A_ : Tuple = torch.load("""/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch""") A_ : List[str] = model A_ : Dict = UNetaDModel(**lowerCamelCase) print(F'length of state dict: {len(state_dict.keys())}') print(F'length of value function dict: {len(hf_value_function.state_dict().keys())}') A_ : List[str] = dict(zip(state_dict.keys() , hf_value_function.state_dict().keys())) for k, v in mapping.items(): A_ : int = state_dict.pop(lowerCamelCase) hf_value_function.load_state_dict(lowerCamelCase) torch.save(hf_value_function.state_dict() , """hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin""") with open("""hub/hopper-medium-v2/value_function/config.json""" , """w""") as f: json.dump(lowerCamelCase , lowerCamelCase) if __name__ == "__main__": unet(32) # unet(128) value_function()
665
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available __magic_name__ = {'configuration_yolos': ['YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP', 'YolosConfig', 'YolosOnnxConfig']} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __magic_name__ = ['YolosFeatureExtractor'] __magic_name__ = ['YolosImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __magic_name__ = [ 'YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST', 'YolosForObjectDetection', 'YolosModel', 'YolosPreTrainedModel', ] if TYPE_CHECKING: from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_yolos import YolosFeatureExtractor from .image_processing_yolos import YolosImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_yolos import ( YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST, YolosForObjectDetection, YolosModel, YolosPreTrainedModel, ) else: import sys __magic_name__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
665
1
'''simple docstring''' import qiskit def lowerCamelCase ( lowerCamelCase : int , lowerCamelCase : int): A_ : Any = qiskit.Aer.get_backend("""aer_simulator""") A_ : Optional[int] = qiskit.QuantumCircuit(4 , 2) # encode inputs in qubits 0 and 1 if bita == 1: qc_ha.x(0) if bita == 1: qc_ha.x(1) qc_ha.barrier() # use cnots to write XOR of the inputs on qubit2 qc_ha.cx(0 , 2) qc_ha.cx(1 , 2) # use ccx / toffoli gate to write AND of the inputs on qubit3 qc_ha.ccx(0 , 1 , 3) qc_ha.barrier() # extract outputs qc_ha.measure(2 , 0) # extract XOR value qc_ha.measure(3 , 1) # extract AND value # Execute the circuit on the qasm simulator A_ : int = qiskit.execute(lowerCamelCase , lowerCamelCase , shots=1000) # Return the histogram data of the results of the experiment return job.result().get_counts(lowerCamelCase) if __name__ == "__main__": __magic_name__ = half_adder(1, 1) print(f"""Half Adder Output Qubit Counts: {counts}""")
665
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) __magic_name__ = { 'configuration_deberta': ['DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DebertaConfig', 'DebertaOnnxConfig'], 'tokenization_deberta': ['DebertaTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __magic_name__ = ['DebertaTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __magic_name__ = [ 'DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST', 'DebertaForMaskedLM', 'DebertaForQuestionAnswering', 'DebertaForSequenceClassification', 'DebertaForTokenClassification', 'DebertaModel', 'DebertaPreTrainedModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __magic_name__ = [ 'TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFDebertaForMaskedLM', 'TFDebertaForQuestionAnswering', 'TFDebertaForSequenceClassification', 'TFDebertaForTokenClassification', 'TFDebertaModel', 'TFDebertaPreTrainedModel', ] if TYPE_CHECKING: from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig from .tokenization_deberta import DebertaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_deberta_fast import DebertaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_deberta import ( DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, DebertaForMaskedLM, DebertaForQuestionAnswering, DebertaForSequenceClassification, DebertaForTokenClassification, DebertaModel, DebertaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_deberta import ( TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, TFDebertaForMaskedLM, TFDebertaForQuestionAnswering, TFDebertaForSequenceClassification, TFDebertaForTokenClassification, TFDebertaModel, TFDebertaPreTrainedModel, ) else: import sys __magic_name__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
665
1
'''simple docstring''' import argparse import json import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils.deepspeed import DummyOptim, DummyScheduler __magic_name__ = 16 __magic_name__ = 32 def lowerCamelCase ( lowerCamelCase : Accelerator , lowerCamelCase : int = 16 , lowerCamelCase : str = "bert-base-cased"): A_ : int = AutoTokenizer.from_pretrained(lowerCamelCase) A_ : Tuple = load_dataset("""glue""" , """mrpc""") def tokenize_function(lowerCamelCase : int): # max_length=None => use the model max length (it's actually the default) A_ : Any = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowerCamelCase , max_length=lowerCamelCase) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset A_ : Optional[Any] = datasets.map( lowerCamelCase , batched=lowerCamelCase , remove_columns=["""idx""", """sentence1""", """sentence2"""] , load_from_cache_file=lowerCamelCase) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library A_ : Optional[Any] = tokenized_datasets.rename_column("""label""" , """labels""") def collate_fn(lowerCamelCase : Tuple): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(lowerCamelCase , padding="""max_length""" , max_length=128 , return_tensors="""pt""") return tokenizer.pad(lowerCamelCase , padding="""longest""" , return_tensors="""pt""") # Instantiate dataloaders. A_ : List[Any] = DataLoader( tokenized_datasets["""train"""] , shuffle=lowerCamelCase , collate_fn=lowerCamelCase , batch_size=lowerCamelCase) A_ : List[str] = DataLoader( tokenized_datasets["""validation"""] , shuffle=lowerCamelCase , collate_fn=lowerCamelCase , batch_size=lowerCamelCase) return train_dataloader, eval_dataloader def lowerCamelCase ( lowerCamelCase : Optional[int] , lowerCamelCase : Optional[int]): # Initialize accelerator A_ : Union[str, Any] = Accelerator() # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs A_ : Optional[int] = config["""lr"""] A_ : Dict = int(config["""num_epochs"""]) A_ : Dict = int(config["""seed"""]) A_ : List[str] = int(config["""batch_size"""]) A_ : Optional[int] = args.model_name_or_path set_seed(lowerCamelCase) A_ , A_ : str = get_dataloaders(lowerCamelCase , lowerCamelCase , lowerCamelCase) # Instantiate the model (we build the model here so that the seed also control new weights initialization) A_ : List[str] = AutoModelForSequenceClassification.from_pretrained(lowerCamelCase , return_dict=lowerCamelCase) # Instantiate optimizer A_ : Tuple = ( AdamW if accelerator.state.deepspeed_plugin is None or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config else DummyOptim ) A_ : str = optimizer_cls(params=model.parameters() , lr=lowerCamelCase) if accelerator.state.deepspeed_plugin is not None: A_ : Optional[int] = accelerator.state.deepspeed_plugin.deepspeed_config[ """gradient_accumulation_steps""" ] else: A_ : Tuple = 1 A_ : Any = (len(lowerCamelCase) * num_epochs) // gradient_accumulation_steps # Instantiate scheduler if ( accelerator.state.deepspeed_plugin is None or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config ): A_ : Optional[int] = get_linear_schedule_with_warmup( optimizer=lowerCamelCase , num_warmup_steps=0 , num_training_steps=lowerCamelCase , ) else: A_ : Dict = DummyScheduler(lowerCamelCase , total_num_steps=lowerCamelCase , warmup_num_steps=0) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. A_ , A_ , A_ , A_ , A_ : Tuple = accelerator.prepare( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase) # We need to keep track of how many total steps we have iterated over A_ : List[str] = 0 # We also need to keep track of the stating epoch so files are named properly A_ : str = 0 # Now we train the model A_ : Dict = evaluate.load("""glue""" , """mrpc""") A_ : str = 0 A_ : str = {} for epoch in range(lowerCamelCase , lowerCamelCase): model.train() for step, batch in enumerate(lowerCamelCase): A_ : Union[str, Any] = model(**lowerCamelCase) A_ : str = outputs.loss A_ : List[str] = loss / gradient_accumulation_steps accelerator.backward(lowerCamelCase) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() overall_step += 1 model.eval() A_ : Optional[int] = 0 for step, batch in enumerate(lowerCamelCase): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device) with torch.no_grad(): A_ : List[str] = model(**lowerCamelCase) A_ : Dict = outputs.logits.argmax(dim=-1) # It is slightly faster to call this once, than multiple times A_ , A_ : str = accelerator.gather( (predictions, batch["""labels"""])) # If we are in a multiprocess environment, the last batch has duplicates if accelerator.use_distributed: if step == len(lowerCamelCase) - 1: A_ : Dict = predictions[: len(eval_dataloader.dataset) - samples_seen] A_ : Tuple = references[: len(eval_dataloader.dataset) - samples_seen] else: samples_seen += references.shape[0] metric.add_batch( predictions=lowerCamelCase , references=lowerCamelCase , ) A_ : Tuple = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F'epoch {epoch}:' , lowerCamelCase) A_ : Tuple = eval_metric["""accuracy"""] if best_performance < eval_metric["accuracy"]: A_ : Optional[Any] = eval_metric["""accuracy"""] if args.performance_lower_bound is not None: assert ( args.performance_lower_bound <= best_performance ), F'Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}' accelerator.wait_for_everyone() if accelerator.is_main_process: with open(os.path.join(args.output_dir , """all_results.json""") , """w""") as f: json.dump(lowerCamelCase , lowerCamelCase) def lowerCamelCase ( ): A_ : Tuple = argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""") parser.add_argument( """--model_name_or_path""" , type=lowerCamelCase , default="""bert-base-cased""" , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=lowerCamelCase , ) parser.add_argument( """--output_dir""" , type=lowerCamelCase , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , ) parser.add_argument( """--performance_lower_bound""" , type=lowerCamelCase , default=lowerCamelCase , help="""Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.""" , ) parser.add_argument( """--num_epochs""" , type=lowerCamelCase , default=3 , help="""Number of train epochs.""" , ) A_ : Union[str, Any] = parser.parse_args() A_ : str = {"""lr""": 2E-5, """num_epochs""": args.num_epochs, """seed""": 42, """batch_size""": 16} training_function(lowerCamelCase , lowerCamelCase) if __name__ == "__main__": main()
665
'''simple docstring''' def lowerCamelCase ( lowerCamelCase : Tuple): A_ : str = [0] * len(lowerCamelCase) A_ : Union[str, Any] = [] A_ : Union[str, Any] = [] A_ : Tuple = 0 for values in graph.values(): for i in values: indegree[i] += 1 for i in range(len(lowerCamelCase)): if indegree[i] == 0: queue.append(lowerCamelCase) while queue: A_ : Any = queue.pop(0) cnt += 1 topo.append(lowerCamelCase) for x in graph[vertex]: indegree[x] -= 1 if indegree[x] == 0: queue.append(lowerCamelCase) if cnt != len(lowerCamelCase): print("""Cycle exists""") else: print(lowerCamelCase) # Adjacency List of Graph __magic_name__ = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []} topological_sort(graph)
665
1
'''simple docstring''' import unittest from knapsack import greedy_knapsack as kp class __lowerCAmelCase ( unittest.TestCase ): '''simple docstring''' def _a ( self : Union[str, Any] ): '''simple docstring''' A_ : str = [10, 20, 30, 40, 50, 60] A_ : int = [2, 4, 6, 8, 10, 12] A_ : Tuple = 100 self.assertEqual(kp.calc_profit(_a ,_a ,_a ) ,210 ) def _a ( self : List[str] ): '''simple docstring''' self.assertRaisesRegex(_a ,"""max_weight must greater than zero.""" ) def _a ( self : List[str] ): '''simple docstring''' self.assertRaisesRegex(_a ,"""Weight can not be negative.""" ) def _a ( self : Dict ): '''simple docstring''' self.assertRaisesRegex(_a ,"""Profit can not be negative.""" ) def _a ( self : Tuple ): '''simple docstring''' self.assertRaisesRegex(_a ,"""max_weight must greater than zero.""" ) def _a ( self : Union[str, Any] ): '''simple docstring''' self.assertRaisesRegex( _a ,"""The length of profit and weight must be same.""" ) if __name__ == "__main__": unittest.main()
665
'''simple docstring''' import unittest from parameterized import parameterized from transformers import LlamaConfig, is_torch_available, set_seed from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer class __lowerCAmelCase : '''simple docstring''' def __init__( self : Optional[int] ,_a : List[Any] ,_a : Dict=13 ,_a : List[str]=7 ,_a : Dict=True ,_a : List[Any]=True ,_a : Dict=False ,_a : Optional[int]=True ,_a : List[Any]=99 ,_a : Any=32 ,_a : Optional[int]=5 ,_a : List[Any]=4 ,_a : int=37 ,_a : List[Any]="gelu" ,_a : List[str]=0.1 ,_a : Union[str, Any]=0.1 ,_a : Any=512 ,_a : int=16 ,_a : Optional[int]=2 ,_a : Any=0.02 ,_a : Any=3 ,_a : Any=4 ,_a : List[str]=None ,): '''simple docstring''' A_ : List[str] = parent A_ : Any = batch_size A_ : Tuple = seq_length A_ : List[str] = is_training A_ : Tuple = use_input_mask A_ : Dict = use_token_type_ids A_ : List[Any] = use_labels A_ : Union[str, Any] = vocab_size A_ : Any = hidden_size A_ : str = num_hidden_layers A_ : Optional[Any] = num_attention_heads A_ : str = intermediate_size A_ : Tuple = hidden_act A_ : Any = hidden_dropout_prob A_ : Any = attention_probs_dropout_prob A_ : List[str] = max_position_embeddings A_ : int = type_vocab_size A_ : Union[str, Any] = type_sequence_label_size A_ : Any = initializer_range A_ : List[Any] = num_labels A_ : Optional[Any] = num_choices A_ : List[Any] = scope def _a ( self : Optional[int] ): '''simple docstring''' A_ : str = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) A_ : int = None if self.use_input_mask: A_ : List[str] = random_attention_mask([self.batch_size, self.seq_length] ) A_ : Dict = None if self.use_token_type_ids: A_ : Tuple = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size ) A_ : str = None A_ : Any = None A_ : str = None if self.use_labels: A_ : Dict = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) A_ : Any = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels ) A_ : Optional[int] = ids_tensor([self.batch_size] ,self.num_choices ) A_ : str = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def _a ( self : Optional[Any] ): '''simple docstring''' return LlamaConfig( vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=_a ,initializer_range=self.initializer_range ,) def _a ( self : Union[str, Any] ,_a : Optional[Any] ,_a : Optional[Any] ,_a : Any ,_a : Any ,_a : Optional[Any] ,_a : Optional[Any] ,_a : Tuple ): '''simple docstring''' A_ : Any = LlamaModel(config=_a ) model.to(_a ) model.eval() A_ : Optional[Any] = model(_a ,attention_mask=_a ) A_ : Optional[int] = model(_a ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def _a ( self : Optional[int] ,_a : int ,_a : List[str] ,_a : Any ,_a : Any ,_a : Dict ,_a : List[str] ,_a : Optional[int] ,_a : Any ,_a : List[str] ,): '''simple docstring''' A_ : List[str] = True A_ : Union[str, Any] = LlamaModel(_a ) model.to(_a ) model.eval() A_ : Tuple = model( _a ,attention_mask=_a ,encoder_hidden_states=_a ,encoder_attention_mask=_a ,) A_ : List[Any] = model( _a ,attention_mask=_a ,encoder_hidden_states=_a ,) A_ : int = model(_a ,attention_mask=_a ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def _a ( self : Any ,_a : Any ,_a : Optional[int] ,_a : List[Any] ,_a : List[Any] ,_a : Dict ,_a : Tuple ,_a : Optional[int] ,_a : List[Any] ,_a : Union[str, Any] ,): '''simple docstring''' A_ : List[Any] = LlamaForCausalLM(config=_a ) model.to(_a ) model.eval() A_ : Dict = model(_a ,attention_mask=_a ,labels=_a ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) ) def _a ( self : str ,_a : List[Any] ,_a : Dict ,_a : str ,_a : Tuple ,_a : Tuple ,_a : Tuple ,_a : Optional[Any] ,_a : Dict ,_a : Union[str, Any] ,): '''simple docstring''' A_ : Optional[Any] = True A_ : Any = True A_ : Tuple = LlamaForCausalLM(config=_a ) model.to(_a ) model.eval() # first forward pass A_ : Optional[int] = model( _a ,attention_mask=_a ,encoder_hidden_states=_a ,encoder_attention_mask=_a ,use_cache=_a ,) A_ : Tuple = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids A_ : int = ids_tensor((self.batch_size, 3) ,config.vocab_size ) A_ : List[Any] = ids_tensor((self.batch_size, 3) ,vocab_size=2 ) # append to next input_ids and A_ : Tuple = torch.cat([input_ids, next_tokens] ,dim=-1 ) A_ : int = torch.cat([input_mask, next_mask] ,dim=-1 ) A_ : List[str] = model( _a ,attention_mask=_a ,encoder_hidden_states=_a ,encoder_attention_mask=_a ,output_hidden_states=_a ,)["""hidden_states"""][0] A_ : Any = model( _a ,attention_mask=_a ,encoder_hidden_states=_a ,encoder_attention_mask=_a ,past_key_values=_a ,output_hidden_states=_a ,)["""hidden_states"""][0] # select random slice A_ : List[str] = ids_tensor((1,) ,output_from_past.shape[-1] ).item() A_ : str = output_from_no_past[:, -3:, random_slice_idx].detach() A_ : int = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(_a ,_a ,atol=1e-3 ) ) def _a ( self : Optional[Any] ): '''simple docstring''' A_ : int = self.prepare_config_and_inputs() ( ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ) : Any = config_and_inputs A_ : int = {"""input_ids""": input_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' a_ = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else () a_ = (LlamaForCausalLM,) if is_torch_available() else () a_ = ( { """feature-extraction""": LlamaModel, """text-classification""": LlamaForSequenceClassification, """text-generation""": LlamaForCausalLM, """zero-shot""": LlamaForSequenceClassification, } if is_torch_available() else {} ) a_ = False a_ = False def _a ( self : List[Any] ): '''simple docstring''' A_ : Union[str, Any] = LlamaModelTester(self ) A_ : List[str] = ConfigTester(self ,config_class=_a ,hidden_size=37 ) def _a ( self : Dict ): '''simple docstring''' self.config_tester.run_common_tests() def _a ( self : Optional[Any] ): '''simple docstring''' A_ : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_a ) def _a ( self : Optional[Any] ): '''simple docstring''' A_ : int = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: A_ : Dict = type self.model_tester.create_and_check_model(*_a ) def _a ( self : List[Any] ): '''simple docstring''' A_ , A_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() A_ : List[str] = 3 A_ : Any = input_dict["""input_ids"""] A_ : Union[str, Any] = input_ids.ne(1 ).to(_a ) A_ : Union[str, Any] = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size ) A_ : List[Any] = LlamaForSequenceClassification(_a ) model.to(_a ) model.eval() A_ : int = model(_a ,attention_mask=_a ,labels=_a ) self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) ) def _a ( self : Dict ): '''simple docstring''' A_ , A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() A_ : str = 3 A_ : Union[str, Any] = """single_label_classification""" A_ : Union[str, Any] = input_dict["""input_ids"""] A_ : List[Any] = input_ids.ne(1 ).to(_a ) A_ : Dict = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size ) A_ : List[Any] = LlamaForSequenceClassification(_a ) model.to(_a ) model.eval() A_ : List[str] = model(_a ,attention_mask=_a ,labels=_a ) self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) ) def _a ( self : Optional[Any] ): '''simple docstring''' A_ , A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() A_ : Dict = 3 A_ : Dict = """multi_label_classification""" A_ : Any = input_dict["""input_ids"""] A_ : Optional[Any] = input_ids.ne(1 ).to(_a ) A_ : List[str] = ids_tensor( [self.model_tester.batch_size, config.num_labels] ,self.model_tester.type_sequence_label_size ).to(torch.float ) A_ : Optional[int] = LlamaForSequenceClassification(_a ) model.to(_a ) model.eval() A_ : Any = model(_a ,attention_mask=_a ,labels=_a ) self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) ) @unittest.skip("""LLaMA buffers include complex numbers, which breaks this test""" ) def _a ( self : Any ): '''simple docstring''' pass @parameterized.expand([("""linear""",), ("""dynamic""",)] ) def _a ( self : Optional[Any] ,_a : List[Any] ): '''simple docstring''' A_ , A_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() A_ : Tuple = ids_tensor([1, 10] ,config.vocab_size ) A_ : Union[str, Any] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] ,config.vocab_size ) set_seed(42 ) # Fixed seed at init time so the two models get the same random weights A_ : int = LlamaModel(_a ) original_model.to(_a ) original_model.eval() A_ : Tuple = original_model(_a ).last_hidden_state A_ : Union[str, Any] = original_model(_a ).last_hidden_state set_seed(42 ) # Fixed seed at init time so the two models get the same random weights A_ : Tuple = {"""type""": scaling_type, """factor""": 10.0} A_ : int = LlamaModel(_a ) scaled_model.to(_a ) scaled_model.eval() A_ : List[Any] = scaled_model(_a ).last_hidden_state A_ : Any = scaled_model(_a ).last_hidden_state # Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original # maximum sequence length, so the outputs for the short input should match. if scaling_type == "dynamic": self.assertTrue(torch.allclose(_a ,_a ,atol=1e-5 ) ) else: self.assertFalse(torch.allclose(_a ,_a ,atol=1e-5 ) ) # The output should be different for long inputs self.assertFalse(torch.allclose(_a ,_a ,atol=1e-5 ) ) @require_torch class __lowerCAmelCase ( unittest.TestCase ): '''simple docstring''' @unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" ) @slow def _a ( self : Tuple ): '''simple docstring''' A_ : Any = [1, 306, 4658, 278, 6593, 310, 2834, 338] A_ : List[str] = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-7b-hf""" ,device_map="""auto""" ) A_ : str = model(torch.tensor([input_ids] ) ) # Expected mean on dim = -1 A_ : Union[str, Any] = torch.tensor([[-6.6550, -4.1227, -4.9859, -3.2406, 0.8262, -3.0033, 1.2964, -3.3699]] ) torch.testing.assert_close(out.mean(-1 ) ,_a ,atol=1e-2 ,rtol=1e-2 ) # slicing logits[0, 0, 0:30] # fmt: off A_ : str = torch.tensor([-12.8281, -7.4453, -0.4639, -8.0625, -7.2500, -8.0000, -6.4883, -7.7695, -7.8438, -7.0312, -6.2188, -7.1328, -1.8496, 1.9961, -8.6250, -6.7227, -12.8281, -6.9492, -7.0742, -7.7852, -7.5820, -7.9062, -6.9375, -7.9805, -8.3438, -8.1562, -8.0469, -7.6250, -7.7422, -7.3398,] ) # fmt: on torch.testing.assert_close(out[0, 0, :30] ,_a ,atol=1e-5 ,rtol=1e-5 ) @unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" ) @slow def _a ( self : str ): '''simple docstring''' A_ : Dict = [1, 306, 4658, 278, 6593, 310, 2834, 338] A_ : Optional[int] = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-13b-hf""" ,device_map="""auto""" ) A_ : Tuple = model(torch.tensor(_a ) ) # Expected mean on dim = -1 A_ : str = torch.tensor([[-2.0622, -1.2794, -1.1638, -0.9788, -1.4603, -1.0238, -1.7893, -1.4411]] ) torch.testing.assert_close(out.mean(-1 ) ,_a ,atol=1e-2 ,rtol=1e-2 ) # slicing logits[0, 0, 0:30] # fmt: off A_ : str = torch.tensor([-8.1406, -8.0547, 2.7461, -1.2344, -0.1448, -1.8262, -1.0020, -1.8154, -1.6895, -1.8516, -2.3574, -0.9277, 3.7598, 6.5742, -1.2998, -0.1177, -8.1406, -2.9688, -2.9199, -3.1699, -3.5254, -2.3555, -2.7988, -3.4141, -2.8262, -4.5195, -3.3379, -3.3164, -2.7832, -3.0273] ) # fmt: on torch.testing.assert_close(out[0, 0, :30] ,_a ,atol=1e-5 ,rtol=1e-5 ) @unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" ) @slow def _a ( self : Union[str, Any] ): '''simple docstring''' A_ : Union[str, Any] = [1, 306, 4658, 278, 6593, 310, 2834, 338] A_ : Optional[int] = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-13b-chat-hf""" ,device_map="""auto""" ) A_ : int = model(torch.tensor(_a ) ) # Expected mean on dim = -1 A_ : Union[str, Any] = torch.tensor([[-0.8562, -1.8520, -0.7551, -0.4162, -1.5161, -1.2038, -2.4823, -2.3254]] ) torch.testing.assert_close(out.mean(-1 ) ,_a ,atol=1e-2 ,rtol=1e-2 ) # slicing logits[0, 0, 0:30] # fmt: off A_ : Optional[int] = torch.tensor([-2.2227, 4.8828, 0.9023, -0.4578, -0.7871, -0.1033, -0.6221, -0.5786, -0.7803, -1.0674, -1.2920, -0.1570, 0.8008, 2.0723, -0.9497, 0.2771, -2.2227, -0.7612, -1.4346, -1.2061, -1.6426, -0.3000, -0.7139, -1.1934, -1.8691, -1.6973, -1.5947, -1.2705, -0.3523, -0.5513] ) # fmt: on torch.testing.assert_close(out.mean(-1 ) ,_a ,atol=1e-2 ,rtol=1e-2 ) @unittest.skip( """Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test""" ) @slow def _a ( self : Optional[Any] ): '''simple docstring''' A_ : Optional[int] = [1, 306, 4658, 278, 6593, 310, 2834, 338] A_ : str = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-70b-hf""" ,device_map="""auto""" ) A_ : Tuple = model(torch.tensor(_a ) ) A_ : Dict = torch.tensor( [[-4.2327, -3.3360, -4.6665, -4.7631, -1.8180, -3.4170, -1.4211, -3.1810]] ,dtype=torch.floataa ) torch.testing.assert_close(out.mean(-1 ) ,_a ,atol=1e-2 ,rtol=1e-2 ) # fmt: off A_ : List[str] = torch.tensor([-9.4922, -3.9551, 1.7998, -5.6758, -5.1055, -5.8984, -4.8320, -6.8086, -6.5391, -5.6172, -5.5820, -5.5352, 1.7881, 3.6289, -6.5117, -3.4785, -9.5000, -6.0352, -6.8125, -6.0195, -6.6836, -5.4727, -6.2812, -6.0391, -7.3398, -7.4297, -7.4844, -6.5820, -5.8789, -5.5312] ) # fmt: on torch.testing.assert_close(out[0, 0, :30] ,_a ,atol=1e-5 ,rtol=1e-5 ) @unittest.skip("""Model is curently gated""" ) @slow def _a ( self : Tuple ): '''simple docstring''' A_ : Union[str, Any] = """Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the \"princi""" A_ : List[str] = """Simply put, the theory of relativity states that """ A_ : Any = LlamaTokenizer.from_pretrained("""meta-llama/Llama-2-13b-chat-hf""" ) A_ : Union[str, Any] = tokenizer.encode(_a ,return_tensors="""pt""" ) A_ : List[str] = LlamaForCausalLM.from_pretrained( """meta-llama/Llama-2-13b-chat-hf""" ,device_map="""sequential""" ,use_safetensors=_a ) # greedy generation outputs A_ : str = model.generate(_a ,max_new_tokens=64 ,top_p=_a ,temperature=1 ,do_sample=_a ) A_ : Optional[Any] = tokenizer.decode(generated_ids[0] ,skip_special_tokens=_a ) self.assertEqual(_a ,_a )
665
1
'''simple docstring''' from collections import defaultdict from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst def lowerCamelCase ( ): A_ , A_ : List[str] = 9, 14 # noqa: F841 A_ : Optional[Any] = [ [0, 1, 4], [0, 7, 8], [1, 2, 8], [7, 8, 7], [7, 6, 1], [2, 8, 2], [8, 6, 6], [2, 3, 7], [2, 5, 4], [6, 5, 2], [3, 5, 14], [3, 4, 9], [5, 4, 10], [1, 7, 11], ] A_ : List[str] = defaultdict(lowerCamelCase) for nodea, nodea, cost in edges: adjancency[nodea].append([nodea, cost]) adjancency[nodea].append([nodea, cost]) A_ : List[Any] = mst(lowerCamelCase) A_ : Optional[int] = [ [7, 6, 1], [2, 8, 2], [6, 5, 2], [0, 1, 4], [2, 5, 4], [2, 3, 7], [0, 7, 8], [3, 4, 9], ] for answer in expected: A_ : Tuple = tuple(answer[:2]) A_ : Union[str, Any] = tuple(edge[::-1]) assert edge in result or reverse in result
665
'''simple docstring''' import math_equivalence # From: git+https://github.com/hendrycks/math.git import datasets __magic_name__ = '\\n@article{hendrycksmath2021,\n title={Measuring Mathematical Problem Solving With the MATH Dataset},\n author={Dan Hendrycks\n and Collin Burns\n and Saurav Kadavath\n and Akul Arora\n and Steven Basart\n and Eric Tang\n and Dawn Song\n and Jacob Steinhardt},\n journal={arXiv preprint arXiv:2103.03874},\n year={2021}\n}\n' __magic_name__ = '\\nThis metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.\nIt first canonicalizes the inputs (e.g., converting "1/2" to "\\frac{1}{2}") and then computes accuracy.\n' __magic_name__ = r'\nCalculates accuracy after canonicalizing inputs.\n\nArgs:\n predictions: list of predictions to score. Each prediction\n is a string that contains natural language and LaTex.\n references: list of reference for each prediction. Each\n reference is a string that contains natural language\n and LaTex.\nReturns:\n accuracy: accuracy after canonicalizing inputs\n (e.g., converting "1/2" to "\\frac{1}{2}")\n\nExamples:\n >>> metric = datasets.load_metric("competition_math")\n >>> results = metric.compute(references=["\\frac{1}{2}"], predictions=["1/2"])\n >>> print(results)\n {\'accuracy\': 1.0}\n' @datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __lowerCAmelCase ( datasets.Metric ): '''simple docstring''' def _a ( self : Optional[Any] ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features( { """predictions""": datasets.Value("""string""" ), """references""": datasets.Value("""string""" ), } ) ,homepage="""https://github.com/hendrycks/math""" ,codebase_urls=["""https://github.com/hendrycks/math"""] ,) def _a ( self : List[Any] ,_a : Union[str, Any] ,_a : Optional[int] ): '''simple docstring''' A_ : Union[str, Any] = 0.0 for i, j in zip(_a ,_a ): n_correct += 1.0 if math_equivalence.is_equiv(_a ,_a ) else 0.0 A_ : List[str] = n_correct / len(_a ) return { "accuracy": accuracy, }
665
1
'''simple docstring''' import shutil import tempfile import unittest import numpy as np import pytest from transformers.testing_utils import require_vision from transformers.utils import is_vision_available if is_vision_available(): from PIL import Image from transformers import ( AutoProcessor, BertTokenizerFast, BlipImageProcessor, GPTaTokenizer, InstructBlipProcessor, PreTrainedTokenizerFast, ) @require_vision class __lowerCAmelCase ( unittest.TestCase ): '''simple docstring''' def _a ( self : Optional[int] ): '''simple docstring''' A_ : Optional[int] = tempfile.mkdtemp() A_ : Optional[int] = BlipImageProcessor() A_ : Dict = GPTaTokenizer.from_pretrained("""hf-internal-testing/tiny-random-GPT2Model""" ) A_ : Optional[Any] = BertTokenizerFast.from_pretrained("""hf-internal-testing/tiny-random-bert""" ) A_ : Optional[Any] = InstructBlipProcessor(_a ,_a ,_a ) processor.save_pretrained(self.tmpdirname ) def _a ( self : Any ,**_a : Dict ): '''simple docstring''' return AutoProcessor.from_pretrained(self.tmpdirname ,**_a ).tokenizer def _a ( self : Tuple ,**_a : List[str] ): '''simple docstring''' return AutoProcessor.from_pretrained(self.tmpdirname ,**_a ).image_processor def _a ( self : str ,**_a : Optional[int] ): '''simple docstring''' return AutoProcessor.from_pretrained(self.tmpdirname ,**_a ).qformer_tokenizer def _a ( self : str ): '''simple docstring''' shutil.rmtree(self.tmpdirname ) def _a ( self : Union[str, Any] ): '''simple docstring''' A_ : str = [np.random.randint(255 ,size=(3, 30, 400) ,dtype=np.uinta )] A_ : Union[str, Any] = [Image.fromarray(np.moveaxis(_a ,0 ,-1 ) ) for x in image_inputs] return image_inputs def _a ( self : Optional[Any] ): '''simple docstring''' A_ : List[str] = InstructBlipProcessor( tokenizer=self.get_tokenizer() ,image_processor=self.get_image_processor() ,qformer_tokenizer=self.get_qformer_tokenizer() ,) processor.save_pretrained(self.tmpdirname ) A_ : str = self.get_tokenizer(bos_token="""(BOS)""" ,eos_token="""(EOS)""" ) A_ : Optional[Any] = self.get_image_processor(do_normalize=_a ,padding_value=1.0 ) A_ : Optional[int] = InstructBlipProcessor.from_pretrained( self.tmpdirname ,bos_token="""(BOS)""" ,eos_token="""(EOS)""" ,do_normalize=_a ,padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer ,_a ) self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor ,_a ) self.assertIsInstance(processor.qformer_tokenizer ,_a ) def _a ( self : Dict ): '''simple docstring''' A_ : Optional[int] = self.get_image_processor() A_ : Optional[Any] = self.get_tokenizer() A_ : Optional[Any] = self.get_qformer_tokenizer() A_ : List[Any] = InstructBlipProcessor( tokenizer=_a ,image_processor=_a ,qformer_tokenizer=_a ) A_ : Dict = self.prepare_image_inputs() A_ : int = image_processor(_a ,return_tensors="""np""" ) A_ : Tuple = processor(images=_a ,return_tensors="""np""" ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1e-2 ) def _a ( self : List[str] ): '''simple docstring''' A_ : List[Any] = self.get_image_processor() A_ : Union[str, Any] = self.get_tokenizer() A_ : Union[str, Any] = self.get_qformer_tokenizer() A_ : Any = InstructBlipProcessor( tokenizer=_a ,image_processor=_a ,qformer_tokenizer=_a ) A_ : Any = """lower newer""" A_ : int = processor(text=_a ) A_ : List[Any] = tokenizer(_a ,return_token_type_ids=_a ) A_ : Optional[int] = qformer_tokenizer(_a ,return_token_type_ids=_a ) for key in encoded_tokens.keys(): self.assertListEqual(encoded_tokens[key] ,encoded_processor[key] ) for key in encoded_tokens_qformer.keys(): self.assertListEqual(encoded_tokens_qformer[key] ,encoded_processor["""qformer_""" + key] ) def _a ( self : List[Any] ): '''simple docstring''' A_ : Any = self.get_image_processor() A_ : Optional[int] = self.get_tokenizer() A_ : List[Any] = self.get_qformer_tokenizer() A_ : Optional[int] = InstructBlipProcessor( tokenizer=_a ,image_processor=_a ,qformer_tokenizer=_a ) A_ : Optional[int] = """lower newer""" A_ : List[Any] = self.prepare_image_inputs() A_ : Any = processor(text=_a ,images=_a ) self.assertListEqual( list(inputs.keys() ) ,["""input_ids""", """attention_mask""", """qformer_input_ids""", """qformer_attention_mask""", """pixel_values"""] ,) # test if it raises when no input is passed with pytest.raises(_a ): processor() def _a ( self : Union[str, Any] ): '''simple docstring''' A_ : List[str] = self.get_image_processor() A_ : Union[str, Any] = self.get_tokenizer() A_ : Optional[int] = self.get_qformer_tokenizer() A_ : List[Any] = InstructBlipProcessor( tokenizer=_a ,image_processor=_a ,qformer_tokenizer=_a ) A_ : str = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] A_ : str = processor.batch_decode(_a ) A_ : Optional[Any] = tokenizer.batch_decode(_a ) self.assertListEqual(_a ,_a ) def _a ( self : Tuple ): '''simple docstring''' A_ : str = self.get_image_processor() A_ : List[Any] = self.get_tokenizer() A_ : int = self.get_qformer_tokenizer() A_ : Optional[Any] = InstructBlipProcessor( tokenizer=_a ,image_processor=_a ,qformer_tokenizer=_a ) A_ : int = """lower newer""" A_ : List[Any] = self.prepare_image_inputs() A_ : Optional[int] = processor(text=_a ,images=_a ) self.assertListEqual( list(inputs.keys() ) ,["""input_ids""", """attention_mask""", """qformer_input_ids""", """qformer_attention_mask""", """pixel_values"""] ,)
665
'''simple docstring''' from ....configuration_utils import PretrainedConfig from ....utils import logging __magic_name__ = logging.get_logger(__name__) # TODO: upload to AWS __magic_name__ = { 'yjernite/retribert-base-uncased': ( 'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json' ), } class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' a_ = """retribert""" def __init__( self : int ,_a : Dict=30522 ,_a : List[Any]=768 ,_a : Optional[Any]=8 ,_a : str=12 ,_a : str=3072 ,_a : Tuple="gelu" ,_a : Optional[int]=0.1 ,_a : Dict=0.1 ,_a : List[Any]=512 ,_a : Union[str, Any]=2 ,_a : Tuple=0.02 ,_a : List[str]=1e-12 ,_a : Dict=True ,_a : Tuple=128 ,_a : Optional[int]=0 ,**_a : Tuple ,): '''simple docstring''' super().__init__(pad_token_id=_a ,**_a ) A_ : Dict = vocab_size A_ : int = hidden_size A_ : Union[str, Any] = num_hidden_layers A_ : Union[str, Any] = num_attention_heads A_ : Tuple = hidden_act A_ : int = intermediate_size A_ : Tuple = hidden_dropout_prob A_ : Optional[int] = attention_probs_dropout_prob A_ : int = max_position_embeddings A_ : Any = type_vocab_size A_ : Optional[int] = initializer_range A_ : Dict = layer_norm_eps A_ : str = share_encoders A_ : List[Any] = projection_dim
665
1
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) __magic_name__ = {'configuration_encoder_decoder': ['EncoderDecoderConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __magic_name__ = ['EncoderDecoderModel'] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __magic_name__ = ['TFEncoderDecoderModel'] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __magic_name__ = ['FlaxEncoderDecoderModel'] if TYPE_CHECKING: from .configuration_encoder_decoder import EncoderDecoderConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_encoder_decoder import EncoderDecoderModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_encoder_decoder import TFEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel else: import sys __magic_name__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
665
'''simple docstring''' import os import re from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging __magic_name__ = logging.get_logger(__name__) __magic_name__ = {'vocab_file': 'spiece.model'} __magic_name__ = { 'vocab_file': { 'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model', 'google/bigbird-roberta-large': ( 'https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model' ), 'google/bigbird-base-trivia-itc': ( 'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model' ), } } __magic_name__ = { 'google/bigbird-roberta-base': 4_096, 'google/bigbird-roberta-large': 4_096, 'google/bigbird-base-trivia-itc': 4_096, } class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' a_ = VOCAB_FILES_NAMES a_ = PRETRAINED_VOCAB_FILES_MAP a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a_ = ["""input_ids""", """attention_mask"""] a_ = [] def __init__( self : Optional[int] ,_a : int ,_a : Optional[Any]="<unk>" ,_a : int="<s>" ,_a : str="</s>" ,_a : Optional[Any]="<pad>" ,_a : Tuple="[SEP]" ,_a : Tuple="[MASK]" ,_a : Union[str, Any]="[CLS]" ,_a : Optional[Dict[str, Any]] = None ,**_a : Any ,): '''simple docstring''' A_ : Dict = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else bos_token A_ : Union[str, Any] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else eos_token A_ : Optional[Any] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else unk_token A_ : Union[str, Any] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else pad_token A_ : Any = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else cls_token A_ : Optional[int] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else sep_token # Mask token behave like a normal word, i.e. include the space before it A_ : List[Any] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else mask_token A_ : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=_a ,eos_token=_a ,unk_token=_a ,pad_token=_a ,sep_token=_a ,mask_token=_a ,cls_token=_a ,sp_model_kwargs=self.sp_model_kwargs ,**_a ,) A_ : Optional[int] = vocab_file A_ : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(_a ) @property def _a ( self : Union[str, Any] ): '''simple docstring''' return self.sp_model.get_piece_size() def _a ( self : Optional[Any] ): '''simple docstring''' A_ : Tuple = {self.convert_ids_to_tokens(_a ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : List[Any] ): '''simple docstring''' A_ : Union[str, Any] = self.__dict__.copy() A_ : Union[str, Any] = None return state def __setstate__( self : List[Any] ,_a : Any ): '''simple docstring''' A_ : Tuple = d # for backward compatibility if not hasattr(self ,"""sp_model_kwargs""" ): A_ : Tuple = {} A_ : int = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def _a ( self : Union[str, Any] ,_a : str ): '''simple docstring''' return self.sp_model.encode(_a ,out_type=_a ) def _a ( self : Optional[int] ,_a : str ): '''simple docstring''' return self.sp_model.piece_to_id(_a ) def _a ( self : int ,_a : Optional[int] ): '''simple docstring''' A_ : List[str] = self.sp_model.IdToPiece(_a ) return token def _a ( self : Dict ,_a : int ): '''simple docstring''' A_ : int = [] A_ : Any = """""" A_ : str = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(_a ) + token A_ : Dict = True A_ : Union[str, Any] = [] else: current_sub_tokens.append(_a ) A_ : str = False out_string += self.sp_model.decode(_a ) return out_string.strip() def _a ( self : int ,_a : List[int] ,_a : bool = False ,_a : bool = None ,_a : bool = True ,**_a : str ,): '''simple docstring''' A_ : Any = kwargs.pop("""use_source_tokenizer""" ,_a ) A_ : Union[str, Any] = self.convert_ids_to_tokens(_a ,skip_special_tokens=_a ) # To avoid mixing byte-level and unicode for byte-level BPT # we need to build string separately for added tokens and byte-level tokens # cf. https://github.com/huggingface/transformers/issues/1133 A_ : str = [] A_ : int = [] for token in filtered_tokens: if skip_special_tokens and token in self.all_special_ids: continue if token in self.added_tokens_encoder: if current_sub_text: sub_texts.append(self.convert_tokens_to_string(_a ) ) A_ : List[str] = [] sub_texts.append(_a ) else: current_sub_text.append(_a ) if current_sub_text: sub_texts.append(self.convert_tokens_to_string(_a ) ) # Mimic the behavior of the Rust tokenizer: # No space before [MASK] and [SEP] if spaces_between_special_tokens: A_ : Optional[int] = re.sub(r""" (\[(MASK|SEP)\])""" ,r"""\1""" ,""" """.join(_a ) ) else: A_ : Tuple = """""".join(_a ) A_ : str = ( clean_up_tokenization_spaces if clean_up_tokenization_spaces is not None else self.clean_up_tokenization_spaces ) if clean_up_tokenization_spaces: A_ : Optional[Any] = self.clean_up_tokenization(_a ) return clean_text else: return text def _a ( self : int ,_a : str ,_a : Optional[str] = None ): '''simple docstring''' if not os.path.isdir(_a ): logger.error(f'Vocabulary path ({save_directory}) should be a directory' ) return A_ : int = os.path.join( _a ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file ,_a ) elif not os.path.isfile(self.vocab_file ): with open(_a ,"""wb""" ) as fi: A_ : str = self.sp_model.serialized_model_proto() fi.write(_a ) return (out_vocab_file,) def _a ( self : Optional[Any] ,_a : List[int] ,_a : Optional[List[int]] = None ): '''simple docstring''' if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] A_ : List[Any] = [self.cls_token_id] A_ : Union[str, Any] = [self.sep_token_id] return cls + token_ids_a + sep + token_ids_a + sep def _a ( self : Optional[int] ,_a : List[int] ,_a : Optional[List[int]] = None ,_a : bool = False ): '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_a ,token_ids_a=_a ,already_has_special_tokens=_a ) if token_ids_a is None: return [1] + ([0] * len(_a )) + [1] return [1] + ([0] * len(_a )) + [1] + ([0] * len(_a )) + [1] def _a ( self : Tuple ,_a : List[int] ,_a : Optional[List[int]] = None ): '''simple docstring''' A_ : Tuple = [self.sep_token_id] A_ : Optional[int] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
665
1
'''simple docstring''' import warnings from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' a_ = ["""image_processor""", """tokenizer"""] a_ = """LayoutLMv3ImageProcessor""" a_ = ("""LayoutLMv3Tokenizer""", """LayoutLMv3TokenizerFast""") def __init__( self : Tuple ,_a : Any=None ,_a : Optional[Any]=None ,**_a : Optional[Any] ): '''simple docstring''' A_ : List[str] = None if "feature_extractor" in kwargs: warnings.warn( """The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`""" """ instead.""" ,_a ,) A_ : Dict = kwargs.pop("""feature_extractor""" ) A_ : Tuple = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("""You need to specify an `image_processor`.""" ) if tokenizer is None: raise ValueError("""You need to specify a `tokenizer`.""" ) super().__init__(_a ,_a ) def __call__( self : Any ,_a : Union[str, Any] ,_a : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None ,_a : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None ,_a : Union[List[List[int]], List[List[List[int]]]] = None ,_a : Optional[Union[List[int], List[List[int]]]] = None ,_a : bool = True ,_a : Union[bool, str, PaddingStrategy] = False ,_a : Union[bool, str, TruncationStrategy] = None ,_a : Optional[int] = None ,_a : int = 0 ,_a : Optional[int] = None ,_a : Optional[bool] = None ,_a : Optional[bool] = None ,_a : bool = False ,_a : bool = False ,_a : bool = False ,_a : bool = False ,_a : bool = True ,_a : Optional[Union[str, TensorType]] = None ,**_a : int ,): '''simple docstring''' if self.image_processor.apply_ocr and (boxes is not None): raise ValueError( """You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True.""" ) if self.image_processor.apply_ocr and (word_labels is not None): raise ValueError( """You cannot provide word labels if you initialized the image processor with apply_ocr set to True.""" ) # first, apply the image processor A_ : Dict = self.image_processor(images=_a ,return_tensors=_a ) # second, apply the tokenizer if text is not None and self.image_processor.apply_ocr and text_pair is None: if isinstance(_a ,_a ): A_ : List[Any] = [text] # add batch dimension (as the image processor always adds a batch dimension) A_ : Any = features["""words"""] A_ : Union[str, Any] = self.tokenizer( text=text if text is not None else features["""words"""] ,text_pair=text_pair if text_pair is not None else None ,boxes=boxes if boxes is not None else features["""boxes"""] ,word_labels=_a ,add_special_tokens=_a ,padding=_a ,truncation=_a ,max_length=_a ,stride=_a ,pad_to_multiple_of=_a ,return_token_type_ids=_a ,return_attention_mask=_a ,return_overflowing_tokens=_a ,return_special_tokens_mask=_a ,return_offsets_mapping=_a ,return_length=_a ,verbose=_a ,return_tensors=_a ,**_a ,) # add pixel values A_ : Tuple = features.pop("""pixel_values""" ) if return_overflowing_tokens is True: A_ : Union[str, Any] = self.get_overflowing_images(_a ,encoded_inputs["""overflow_to_sample_mapping"""] ) A_ : Optional[int] = images return encoded_inputs def _a ( self : Union[str, Any] ,_a : Dict ,_a : int ): '''simple docstring''' A_ : Any = [] for sample_idx in overflow_to_sample_mapping: images_with_overflow.append(images[sample_idx] ) if len(_a ) != len(_a ): raise ValueError( """Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got""" f' {len(_a )} and {len(_a )}' ) return images_with_overflow def _a ( self : Dict ,*_a : Optional[Any] ,**_a : List[Any] ): '''simple docstring''' return self.tokenizer.batch_decode(*_a ,**_a ) def _a ( self : Any ,*_a : int ,**_a : Any ): '''simple docstring''' return self.tokenizer.decode(*_a ,**_a ) @property def _a ( self : int ): '''simple docstring''' return ["input_ids", "bbox", "attention_mask", "pixel_values"] @property def _a ( self : Optional[Any] ): '''simple docstring''' warnings.warn( """`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" ,_a ,) return self.image_processor_class @property def _a ( self : Any ): '''simple docstring''' warnings.warn( """`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" ,_a ,) return self.image_processor
665
'''simple docstring''' import unittest from transformers import ( MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, TextaTextGenerationPipeline, pipeline, ) from transformers.testing_utils import is_pipeline_test, require_tf, require_torch from transformers.utils import is_torch_available from .test_pipelines_common import ANY if is_torch_available(): import torch @is_pipeline_test class __lowerCAmelCase ( unittest.TestCase ): '''simple docstring''' a_ = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING a_ = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING def _a ( self : List[str] ,_a : int ,_a : Any ,_a : int ): '''simple docstring''' A_ : Dict = TextaTextGenerationPipeline(model=_a ,tokenizer=_a ) return generator, ["Something to write", "Something else"] def _a ( self : str ,_a : Union[str, Any] ,_a : int ): '''simple docstring''' A_ : Any = generator("""Something there""" ) self.assertEqual(_a ,[{"""generated_text""": ANY(_a )}] ) # These are encoder decoder, they don't just append to incoming string self.assertFalse(outputs[0]["""generated_text"""].startswith("""Something there""" ) ) A_ : List[Any] = generator(["""This is great !""", """Something else"""] ,num_return_sequences=2 ,do_sample=_a ) self.assertEqual( _a ,[ [{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}], [{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}], ] ,) A_ : List[str] = generator( ["""This is great !""", """Something else"""] ,num_return_sequences=2 ,batch_size=2 ,do_sample=_a ) self.assertEqual( _a ,[ [{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}], [{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}], ] ,) with self.assertRaises(_a ): generator(4 ) @require_torch def _a ( self : Union[str, Any] ): '''simple docstring''' A_ : int = pipeline("""text2text-generation""" ,model="""patrickvonplaten/t5-tiny-random""" ,framework="""pt""" ) # do_sample=False necessary for reproducibility A_ : Tuple = generator("""Something there""" ,do_sample=_a ) self.assertEqual(_a ,[{"""generated_text""": """"""}] ) A_ : Optional[int] = 3 A_ : Tuple = generator( """Something there""" ,num_return_sequences=_a ,num_beams=_a ,) A_ : Optional[Any] = [ {"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide Beide"""}, {"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide"""}, {"""generated_text""": """"""}, ] self.assertEqual(_a ,_a ) A_ : Optional[int] = generator("""This is a test""" ,do_sample=_a ,num_return_sequences=2 ,return_tensors=_a ) self.assertEqual( _a ,[ {"""generated_token_ids""": ANY(torch.Tensor )}, {"""generated_token_ids""": ANY(torch.Tensor )}, ] ,) A_ : Dict = generator.model.config.eos_token_id A_ : Optional[int] = """<pad>""" A_ : List[Any] = generator( ["""This is a test""", """This is a second test"""] ,do_sample=_a ,num_return_sequences=2 ,batch_size=2 ,return_tensors=_a ,) self.assertEqual( _a ,[ [ {"""generated_token_ids""": ANY(torch.Tensor )}, {"""generated_token_ids""": ANY(torch.Tensor )}, ], [ {"""generated_token_ids""": ANY(torch.Tensor )}, {"""generated_token_ids""": ANY(torch.Tensor )}, ], ] ,) @require_tf def _a ( self : List[Any] ): '''simple docstring''' A_ : Optional[int] = pipeline("""text2text-generation""" ,model="""patrickvonplaten/t5-tiny-random""" ,framework="""tf""" ) # do_sample=False necessary for reproducibility A_ : Dict = generator("""Something there""" ,do_sample=_a ) self.assertEqual(_a ,[{"""generated_text""": """"""}] )
665
1
'''simple docstring''' import argparse from pathlib import Path from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration def lowerCamelCase ( lowerCamelCase : str , lowerCamelCase : str , lowerCamelCase : str , lowerCamelCase : Path , lowerCamelCase : str = None , lowerCamelCase : str = None , lowerCamelCase : str = None , ): if config_name_or_path is None: A_ : int = """facebook/rag-token-base""" if model_type == """rag_token""" else """facebook/rag-sequence-base""" if generator_tokenizer_name_or_path is None: A_ : List[str] = generator_name_or_path if question_encoder_tokenizer_name_or_path is None: A_ : Optional[Any] = question_encoder_name_or_path A_ : Tuple = RagTokenForGeneration if model_type == """rag_token""" else RagSequenceForGeneration # Save model. A_ : int = RagConfig.from_pretrained(lowerCamelCase) A_ : Dict = AutoConfig.from_pretrained(lowerCamelCase) A_ : int = AutoConfig.from_pretrained(lowerCamelCase) A_ : Any = gen_config A_ : Tuple = question_encoder_config A_ : Any = model_class.from_pretrained_question_encoder_generator( lowerCamelCase , lowerCamelCase , config=lowerCamelCase) rag_model.save_pretrained(lowerCamelCase) # Sanity check. model_class.from_pretrained(lowerCamelCase) # Save tokenizers. A_ : Tuple = AutoTokenizer.from_pretrained(lowerCamelCase) gen_tokenizer.save_pretrained(dest_dir / """generator_tokenizer/""") A_ : List[str] = AutoTokenizer.from_pretrained(lowerCamelCase) question_encoder_tokenizer.save_pretrained(dest_dir / """question_encoder_tokenizer/""") if __name__ == "__main__": __magic_name__ = argparse.ArgumentParser() parser.add_argument( '--model_type', choices=['rag_sequence', 'rag_token'], required=True, type=str, help='RAG model type: rag_sequence, rag_token', ) parser.add_argument('--dest', type=str, required=True, help='Path to the output checkpoint directory.') parser.add_argument('--generator_name_or_path', type=str, required=True, help='Generator model identifier') parser.add_argument( '--question_encoder_name_or_path', type=str, required=True, help='Question encoder model identifier' ) parser.add_argument( '--generator_tokenizer_name_or_path', type=str, help='Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``', ) parser.add_argument( '--question_encoder_tokenizer_name_or_path', type=str, help='Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``', ) parser.add_argument( '--config_name_or_path', type=str, help=( 'Identifier of the model config to use, if not provided, resolves to a base config for a given' ' ``model_type``' ), ) __magic_name__ = parser.parse_args() __magic_name__ = Path(args.dest) dest_dir.mkdir(exist_ok=True) consolidate( args.model_type, args.generator_name_or_path, args.question_encoder_name_or_path, dest_dir, args.config_name_or_path, args.generator_tokenizer_name_or_path, args.question_encoder_tokenizer_name_or_path, )
665
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging __magic_name__ = logging.get_logger(__name__) __magic_name__ = { 'bigcode/gpt_bigcode-santacoder': 'https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json', } class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' a_ = """gpt_bigcode""" a_ = ["""past_key_values"""] a_ = { """hidden_size""": """n_embd""", """max_position_embeddings""": """n_positions""", """num_attention_heads""": """n_head""", """num_hidden_layers""": """n_layer""", } def __init__( self : Optional[int] ,_a : Optional[int]=50257 ,_a : Dict=1024 ,_a : Union[str, Any]=768 ,_a : Union[str, Any]=12 ,_a : Union[str, Any]=12 ,_a : Tuple=None ,_a : int="gelu_pytorch_tanh" ,_a : Optional[Any]=0.1 ,_a : List[str]=0.1 ,_a : Union[str, Any]=0.1 ,_a : List[Any]=1e-5 ,_a : List[str]=0.02 ,_a : Any=True ,_a : Union[str, Any]=True ,_a : Tuple=50256 ,_a : Optional[int]=50256 ,_a : int=True ,_a : Optional[int]=True ,_a : Optional[int]=True ,**_a : List[str] ,): '''simple docstring''' A_ : Optional[Any] = vocab_size A_ : int = n_positions A_ : Union[str, Any] = n_embd A_ : int = n_layer A_ : Optional[int] = n_head A_ : Union[str, Any] = n_inner A_ : List[Any] = activation_function A_ : Dict = resid_pdrop A_ : int = embd_pdrop A_ : Optional[int] = attn_pdrop A_ : Union[str, Any] = layer_norm_epsilon A_ : int = initializer_range A_ : Union[str, Any] = scale_attn_weights A_ : List[str] = use_cache A_ : Tuple = attention_softmax_in_fpaa A_ : List[str] = scale_attention_softmax_in_fpaa A_ : Union[str, Any] = multi_query A_ : Any = bos_token_id A_ : Optional[int] = eos_token_id super().__init__(bos_token_id=_a ,eos_token_id=_a ,**_a )
665
1
'''simple docstring''' import argparse from typing import Dict import tensorflow as tf import torch from tqdm import tqdm from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration __magic_name__ = [ # tf -> hf ('/', '.'), ('layer_', 'layers.'), ('kernel', 'weight'), ('beta', 'bias'), ('gamma', 'weight'), ('pegasus', 'model'), ] __magic_name__ = [ ('.output.dense', '.fc2'), ('intermediate.LayerNorm', 'final_layer_norm'), ('intermediate.dense', 'fc1'), ] __magic_name__ = ( INIT_COMMON + [ ('attention.self.LayerNorm', 'self_attn_layer_norm'), ('attention.output.dense', 'self_attn.out_proj'), ('attention.self', 'self_attn'), ('attention.encdec.LayerNorm', 'encoder_attn_layer_norm'), ('attention.encdec_output.dense', 'encoder_attn.out_proj'), ('attention.encdec', 'encoder_attn'), ('key', 'k_proj'), ('value', 'v_proj'), ('query', 'q_proj'), ('decoder.LayerNorm', 'decoder.layernorm_embedding'), ] + END_COMMON ) __magic_name__ = ( INIT_COMMON + [ ('embeddings.word_embeddings', 'shared.weight'), ('embeddings.position_embeddings', 'embed_positions.weight'), ('attention.self.LayerNorm', 'self_attn_layer_norm'), ('attention.output.dense', 'self_attn.output'), ('attention.self', 'self_attn.self'), ('encoder.LayerNorm', 'encoder.layernorm_embedding'), ] + END_COMMON ) __magic_name__ = [ 'encdec/key/bias', 'encdec/query/bias', 'encdec/value/bias', 'self/key/bias', 'self/query/bias', 'self/value/bias', 'encdec_output/dense/bias', 'attention/output/dense/bias', ] def lowerCamelCase ( lowerCamelCase : List[Any] , lowerCamelCase : Dict): for tf_name, hf_name in patterns: A_ : Tuple = k.replace(lowerCamelCase , lowerCamelCase) return k def lowerCamelCase ( lowerCamelCase : dict , lowerCamelCase : dict): A_ : Optional[int] = BigBirdPegasusConfig(**lowerCamelCase) A_ : Optional[Any] = BigBirdPegasusForConditionalGeneration(lowerCamelCase) A_ : str = torch_model.state_dict() A_ : Optional[Any] = {} # separating decoder weights A_ : str = {k: tf_weights[k] for k in tf_weights if k.startswith("""pegasus/decoder""")} A_ : Union[str, Any] = {k: tf_weights[k] for k in tf_weights if not k.startswith("""pegasus/decoder""")} for k, v in tqdm(decoder_weights.items() , """tf -> hf conversion"""): A_ : List[str] = [k.endswith(lowerCamelCase) for ending in KEYS_TO_IGNORE] if any(lowerCamelCase): continue A_ : Optional[Any] = DECODER_PATTERNS A_ : int = rename_state_dict_key(lowerCamelCase , lowerCamelCase) if new_k not in state_dict: raise ValueError(F'could not find new key {new_k} in state dict. (converted from {k})') if any(True if i in k else False for i in ["""dense""", """query""", """key""", """value"""]): A_ : int = v.T A_ : Union[str, Any] = torch.from_numpy(lowerCamelCase) assert v.shape == state_dict[new_k].shape, F'{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}' for k, v in tqdm(remaining_weights.items() , """tf -> hf conversion"""): A_ : List[Any] = [k.endswith(lowerCamelCase) for ending in KEYS_TO_IGNORE] if any(lowerCamelCase): continue A_ : Optional[int] = REMAINING_PATTERNS A_ : Any = rename_state_dict_key(lowerCamelCase , lowerCamelCase) if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings": raise ValueError(F'could not find new key {new_k} in state dict. (converted from {k})') if any(True if i in k else False for i in ["""dense""", """query""", """key""", """value"""]): A_ : Optional[Any] = v.T A_ : int = torch.from_numpy(lowerCamelCase) if k != "pegasus/embeddings/position_embeddings": assert v.shape == state_dict[new_k].shape, F'{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}' A_ : int = mapping["""model.embed_positions.weight"""] A_ : Union[str, Any] = mapping.pop("""model.embed_positions.weight""") A_ , A_ : str = torch_model.load_state_dict(lowerCamelCase , strict=lowerCamelCase) A_ : List[Any] = [ k for k in missing if k not in [ """final_logits_bias""", """model.encoder.embed_tokens.weight""", """model.decoder.embed_tokens.weight""", """lm_head.weight""", ] ] assert unexpected_missing == [], F'no matches found for the following torch keys {unexpected_missing}' assert extra == [], F'no matches found for the following tf keys {extra}' return torch_model def lowerCamelCase ( lowerCamelCase : List[str]): A_ : Any = tf.train.list_variables(lowerCamelCase) A_ : Optional[int] = {} A_ : Dict = ["""global_step"""] for name, shape in tqdm(lowerCamelCase , desc="""converting tf checkpoint to dict"""): A_ : List[str] = any(pat in name for pat in ignore_name) if skip_key: continue A_ : List[Any] = tf.train.load_variable(lowerCamelCase , lowerCamelCase) A_ : List[Any] = array return tf_weights def lowerCamelCase ( lowerCamelCase : str , lowerCamelCase : str , lowerCamelCase : dict): A_ : List[Any] = get_tf_weights_as_numpy(lowerCamelCase) A_ : int = convert_bigbird_pegasus(lowerCamelCase , lowerCamelCase) torch_model.save_pretrained(lowerCamelCase) if __name__ == "__main__": __magic_name__ = argparse.ArgumentParser() parser.add_argument('--tf_ckpt_path', type=str, help='passed to tf.train.list_variables') parser.add_argument('--save_dir', default=None, type=str, help='Path to the output PyTorch model.') __magic_name__ = parser.parse_args() __magic_name__ = {} convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
665
'''simple docstring''' import json import os from functools import lru_cache from typing import List, Optional, Tuple import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging __magic_name__ = logging.get_logger(__name__) __magic_name__ = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'} __magic_name__ = { 'vocab_file': { 'allenai/longformer-base-4096': 'https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json', 'allenai/longformer-large-4096': ( 'https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json' ), 'allenai/longformer-large-4096-finetuned-triviaqa': ( 'https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json' ), 'allenai/longformer-base-4096-extra.pos.embd.only': ( 'https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json' ), 'allenai/longformer-large-4096-extra.pos.embd.only': ( 'https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json' ), }, 'merges_file': { 'allenai/longformer-base-4096': 'https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt', 'allenai/longformer-large-4096': ( 'https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt' ), 'allenai/longformer-large-4096-finetuned-triviaqa': ( 'https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt' ), 'allenai/longformer-base-4096-extra.pos.embd.only': ( 'https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt' ), 'allenai/longformer-large-4096-extra.pos.embd.only': ( 'https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt' ), }, } __magic_name__ = { 'allenai/longformer-base-4096': 4_096, 'allenai/longformer-large-4096': 4_096, 'allenai/longformer-large-4096-finetuned-triviaqa': 4_096, 'allenai/longformer-base-4096-extra.pos.embd.only': 4_096, 'allenai/longformer-large-4096-extra.pos.embd.only': 4_096, } @lru_cache() # Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode def lowerCamelCase ( ): A_ : Union[str, Any] = ( list(range(ord("""!""") , ord("""~""") + 1)) + list(range(ord("""¡""") , ord("""¬""") + 1)) + list(range(ord("""®""") , ord("""ÿ""") + 1)) ) A_ : Optional[Any] = bs[:] A_ : List[str] = 0 for b in range(2**8): if b not in bs: bs.append(lowerCamelCase) cs.append(2**8 + n) n += 1 A_ : List[Any] = [chr(lowerCamelCase) for n in cs] return dict(zip(lowerCamelCase , lowerCamelCase)) def lowerCamelCase ( lowerCamelCase : int): A_ : int = set() A_ : int = word[0] for char in word[1:]: pairs.add((prev_char, char)) A_ : List[str] = char return pairs class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' a_ = VOCAB_FILES_NAMES a_ = PRETRAINED_VOCAB_FILES_MAP a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a_ = ["""input_ids""", """attention_mask"""] def __init__( self : int ,_a : Tuple ,_a : Union[str, Any] ,_a : Optional[Any]="replace" ,_a : Union[str, Any]="<s>" ,_a : Union[str, Any]="</s>" ,_a : int="</s>" ,_a : List[str]="<s>" ,_a : List[Any]="<unk>" ,_a : Any="<pad>" ,_a : Dict="<mask>" ,_a : Optional[int]=False ,**_a : List[Any] ,): '''simple docstring''' A_ : Dict = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else bos_token A_ : Optional[int] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else eos_token A_ : Optional[Any] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else sep_token A_ : int = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else cls_token A_ : int = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else unk_token A_ : Optional[Any] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else pad_token # Mask token behave like a normal word, i.e. include the space before it A_ : Any = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else mask_token super().__init__( errors=_a ,bos_token=_a ,eos_token=_a ,unk_token=_a ,sep_token=_a ,cls_token=_a ,pad_token=_a ,mask_token=_a ,add_prefix_space=_a ,**_a ,) with open(_a ,encoding="""utf-8""" ) as vocab_handle: A_ : str = json.load(_a ) A_ : Optional[int] = {v: k for k, v in self.encoder.items()} A_ : List[str] = errors # how to handle errors in decoding A_ : List[str] = bytes_to_unicode() A_ : str = {v: k for k, v in self.byte_encoder.items()} with open(_a ,encoding="""utf-8""" ) as merges_handle: A_ : Any = merges_handle.read().split("""\n""" )[1:-1] A_ : str = [tuple(merge.split() ) for merge in bpe_merges] A_ : int = dict(zip(_a ,range(len(_a ) ) ) ) A_ : List[Any] = {} A_ : Optional[int] = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions A_ : Optional[Any] = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" ) @property def _a ( self : Any ): '''simple docstring''' return len(self.encoder ) def _a ( self : str ): '''simple docstring''' return dict(self.encoder ,**self.added_tokens_encoder ) def _a ( self : int ,_a : int ): '''simple docstring''' if token in self.cache: return self.cache[token] A_ : Optional[int] = tuple(_a ) A_ : Any = get_pairs(_a ) if not pairs: return token while True: A_ : Optional[Any] = min(_a ,key=lambda _a : self.bpe_ranks.get(_a ,float("""inf""" ) ) ) if bigram not in self.bpe_ranks: break A_ , A_ : Dict = bigram A_ : int = [] A_ : Optional[Any] = 0 while i < len(_a ): try: A_ : List[str] = word.index(_a ,_a ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) A_ : Tuple = j if word[i] == first and i < len(_a ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 A_ : str = tuple(_a ) A_ : str = new_word if len(_a ) == 1: break else: A_ : int = get_pairs(_a ) A_ : Optional[int] = """ """.join(_a ) A_ : List[str] = word return word def _a ( self : Dict ,_a : Optional[int] ): '''simple docstring''' A_ : Any = [] for token in re.findall(self.pat ,_a ): A_ : Any = """""".join( self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_a ).split(""" """ ) ) return bpe_tokens def _a ( self : Union[str, Any] ,_a : Optional[int] ): '''simple docstring''' return self.encoder.get(_a ,self.encoder.get(self.unk_token ) ) def _a ( self : int ,_a : Dict ): '''simple docstring''' return self.decoder.get(_a ) def _a ( self : Optional[int] ,_a : List[Any] ): '''simple docstring''' A_ : Optional[int] = """""".join(_a ) A_ : Dict = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" ,errors=self.errors ) return text def _a ( self : int ,_a : str ,_a : Optional[str] = None ): '''simple docstring''' if not os.path.isdir(_a ): logger.error(f'Vocabulary path ({save_directory}) should be a directory' ) return A_ : int = os.path.join( _a ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) A_ : int = os.path.join( _a ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] ) with open(_a ,"""w""" ,encoding="""utf-8""" ) as f: f.write(json.dumps(self.encoder ,indent=2 ,sort_keys=_a ,ensure_ascii=_a ) + """\n""" ) A_ : int = 0 with open(_a ,"""w""" ,encoding="""utf-8""" ) as writer: writer.write("""#version: 0.2\n""" ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() ,key=lambda _a : kv[1] ): if index != token_index: logger.warning( f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.' """ Please check that the tokenizer is not corrupted!""" ) A_ : Dict = token_index writer.write(""" """.join(_a ) + """\n""" ) index += 1 return vocab_file, merge_file def _a ( self : List[str] ,_a : List[int] ,_a : Optional[List[int]] = None ): '''simple docstring''' if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] A_ : int = [self.cls_token_id] A_ : int = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def _a ( self : int ,_a : List[int] ,_a : Optional[List[int]] = None ,_a : bool = False ): '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_a ,token_ids_a=_a ,already_has_special_tokens=_a ) if token_ids_a is None: return [1] + ([0] * len(_a )) + [1] return [1] + ([0] * len(_a )) + [1, 1] + ([0] * len(_a )) + [1] def _a ( self : Any ,_a : List[int] ,_a : Optional[List[int]] = None ): '''simple docstring''' A_ : Union[str, Any] = [self.sep_token_id] A_ : Union[str, Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def _a ( self : str ,_a : Optional[int] ,_a : Union[str, Any]=False ,**_a : Dict ): '''simple docstring''' A_ : Any = kwargs.pop("""add_prefix_space""" ,self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(_a ) > 0 and not text[0].isspace()): A_ : Optional[int] = """ """ + text return (text, kwargs)
665
1
import re from filelock import FileLock try: import nltk SCREAMING_SNAKE_CASE__ : Tuple = True except (ImportError, ModuleNotFoundError): SCREAMING_SNAKE_CASE__ : Union[str, Any] = False if NLTK_AVAILABLE: with FileLock(""".lock""") as lock: nltk.download("""punkt""", quiet=True) def __lowercase ( snake_case ): """simple docstring""" re.sub('''<n>''', '''''', snake_case ) # remove pegasus newline char assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)" return "\n".join(nltk.sent_tokenize(snake_case ) )
0
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_convbert import ConvBertTokenizer __magic_name__ = logging.get_logger(__name__) __magic_name__ = {'vocab_file': 'vocab.txt'} __magic_name__ = { 'vocab_file': { 'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt', 'YituTech/conv-bert-medium-small': ( 'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt' ), 'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt', } } __magic_name__ = { 'YituTech/conv-bert-base': 512, 'YituTech/conv-bert-medium-small': 512, 'YituTech/conv-bert-small': 512, } __magic_name__ = { 'YituTech/conv-bert-base': {'do_lower_case': True}, 'YituTech/conv-bert-medium-small': {'do_lower_case': True}, 'YituTech/conv-bert-small': {'do_lower_case': True}, } class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' a_ = VOCAB_FILES_NAMES a_ = PRETRAINED_VOCAB_FILES_MAP a_ = PRETRAINED_INIT_CONFIGURATION a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a_ = ConvBertTokenizer def __init__( self : str ,_a : Dict=None ,_a : List[Any]=None ,_a : Dict=True ,_a : List[str]="[UNK]" ,_a : Any="[SEP]" ,_a : str="[PAD]" ,_a : List[Any]="[CLS]" ,_a : List[str]="[MASK]" ,_a : Union[str, Any]=True ,_a : Any=None ,**_a : Optional[int] ,): '''simple docstring''' super().__init__( _a ,tokenizer_file=_a ,do_lower_case=_a ,unk_token=_a ,sep_token=_a ,pad_token=_a ,cls_token=_a ,mask_token=_a ,tokenize_chinese_chars=_a ,strip_accents=_a ,**_a ,) A_ : Optional[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get("""lowercase""" ,_a ) != do_lower_case or normalizer_state.get("""strip_accents""" ,_a ) != strip_accents or normalizer_state.get("""handle_chinese_chars""" ,_a ) != tokenize_chinese_chars ): A_ : Dict = getattr(_a ,normalizer_state.pop("""type""" ) ) A_ : str = do_lower_case A_ : Any = strip_accents A_ : int = tokenize_chinese_chars A_ : Tuple = normalizer_class(**_a ) A_ : Any = do_lower_case def _a ( self : List[Any] ,_a : List[Any] ,_a : Any=None ): '''simple docstring''' A_ : str = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def _a ( self : Dict ,_a : List[int] ,_a : Optional[List[int]] = None ): '''simple docstring''' A_ : int = [self.sep_token_id] A_ : Any = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def _a ( self : int ,_a : str ,_a : Optional[str] = None ): '''simple docstring''' A_ : List[Any] = self._tokenizer.model.save(_a ,name=_a ) return tuple(_a )
665
0
import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class __lowerCamelCase (_a ): _lowercase = ["""image_processor""", """tokenizer"""] _lowercase = """CLIPImageProcessor""" _lowercase = ("""CLIPTokenizer""", """CLIPTokenizerFast""") def __init__( self: Tuple,A_: Dict=None,A_: Dict=None,**A_: str ): '''simple docstring''' __UpperCamelCase = None if "feature_extractor" in kwargs: warnings.warn( 'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`' ' instead.',A_,) __UpperCamelCase = kwargs.pop('feature_extractor' ) __UpperCamelCase = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('You need to specify an `image_processor`.' ) if tokenizer is None: raise ValueError('You need to specify a `tokenizer`.' ) super().__init__(A_,A_ ) def __call__( self: Optional[int],A_: Union[str, Any]=None,A_: int=None,A_: List[Any]=None,**A_: Optional[Any] ): '''simple docstring''' if text is None and images is None: raise ValueError('You have to specify either text or images. Both cannot be none.' ) if text is not None: __UpperCamelCase = self.tokenizer(A_,return_tensors=A_,**A_ ) if images is not None: __UpperCamelCase = self.image_processor(A_,return_tensors=A_,**A_ ) if text is not None and images is not None: __UpperCamelCase = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**A_ ),tensor_type=A_ ) def snake_case_ ( self: Optional[int],*A_: Tuple,**A_: Union[str, Any] ): '''simple docstring''' return self.tokenizer.batch_decode(*A_,**A_ ) def snake_case_ ( self: Dict,*A_: int,**A_: List[Any] ): '''simple docstring''' return self.tokenizer.decode(*A_,**A_ ) @property def snake_case_ ( self: Any ): '''simple docstring''' __UpperCamelCase = self.tokenizer.model_input_names __UpperCamelCase = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) @property def snake_case_ ( self: Union[str, Any] ): '''simple docstring''' warnings.warn( '`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.',A_,) return self.image_processor_class @property def snake_case_ ( self: List[str] ): '''simple docstring''' warnings.warn( '`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.',A_,) return self.image_processor
1
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_bart import BartTokenizer __magic_name__ = logging.get_logger(__name__) __magic_name__ = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'} # See all BART models at https://huggingface.co/models?filter=bart __magic_name__ = { 'vocab_file': { 'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/vocab.json', 'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/vocab.json', 'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json', 'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json', 'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json', 'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json', }, 'merges_file': { 'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/merges.txt', 'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/merges.txt', 'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt', 'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt', 'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt', 'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt', }, 'tokenizer_file': { 'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json', 'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json', 'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json', 'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json', 'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json', 'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json', }, } __magic_name__ = { 'facebook/bart-base': 1_024, 'facebook/bart-large': 1_024, 'facebook/bart-large-mnli': 1_024, 'facebook/bart-large-cnn': 1_024, 'facebook/bart-large-xsum': 1_024, 'yjernite/bart_eli5': 1_024, } class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' a_ = VOCAB_FILES_NAMES a_ = PRETRAINED_VOCAB_FILES_MAP a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a_ = ["""input_ids""", """attention_mask"""] a_ = BartTokenizer def __init__( self : str ,_a : Any=None ,_a : Optional[int]=None ,_a : int=None ,_a : Optional[int]="replace" ,_a : Dict="<s>" ,_a : Optional[Any]="</s>" ,_a : Dict="</s>" ,_a : Tuple="<s>" ,_a : Optional[Any]="<unk>" ,_a : List[str]="<pad>" ,_a : int="<mask>" ,_a : str=False ,_a : List[str]=True ,**_a : Dict ,): '''simple docstring''' super().__init__( _a ,_a ,tokenizer_file=_a ,errors=_a ,bos_token=_a ,eos_token=_a ,sep_token=_a ,cls_token=_a ,unk_token=_a ,pad_token=_a ,mask_token=_a ,add_prefix_space=_a ,trim_offsets=_a ,**_a ,) A_ : Dict = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get("""add_prefix_space""" ,_a ) != add_prefix_space: A_ : List[str] = getattr(_a ,pre_tok_state.pop("""type""" ) ) A_ : Optional[int] = add_prefix_space A_ : int = pre_tok_class(**_a ) A_ : str = add_prefix_space # the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__` A_ : str = """post_processor""" A_ : List[Any] = getattr(self.backend_tokenizer ,_a ,_a ) if tokenizer_component_instance: A_ : Tuple = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: A_ : Tuple = tuple(state["""sep"""] ) if "cls" in state: A_ : Tuple = tuple(state["""cls"""] ) A_ : List[str] = False if state.get("""add_prefix_space""" ,_a ) != add_prefix_space: A_ : Dict = add_prefix_space A_ : Any = True if state.get("""trim_offsets""" ,_a ) != trim_offsets: A_ : Union[str, Any] = trim_offsets A_ : List[Any] = True if changes_to_apply: A_ : Optional[int] = getattr(_a ,state.pop("""type""" ) ) A_ : Tuple = component_class(**_a ) setattr(self.backend_tokenizer ,_a ,_a ) @property def _a ( self : List[str] ): '''simple docstring''' if self._mask_token is None: if self.verbose: logger.error("""Using mask_token, but it is not set yet.""" ) return None return str(self._mask_token ) @mask_token.setter def _a ( self : Union[str, Any] ,_a : Any ): '''simple docstring''' A_ : int = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else value A_ : List[Any] = value def _a ( self : str ,*_a : str ,**_a : Optional[int] ): '''simple docstring''' A_ : Optional[Any] = kwargs.get("""is_split_into_words""" ,_a ) if is_split_into_words and not self.add_prefix_space: raise ValueError( f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True ' """to use it with pretokenized inputs.""" ) return super()._batch_encode_plus(*_a ,**_a ) def _a ( self : str ,*_a : List[Any] ,**_a : str ): '''simple docstring''' A_ : List[str] = kwargs.get("""is_split_into_words""" ,_a ) if is_split_into_words and not self.add_prefix_space: raise ValueError( f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True ' """to use it with pretokenized inputs.""" ) return super()._encode_plus(*_a ,**_a ) def _a ( self : Optional[int] ,_a : str ,_a : Optional[str] = None ): '''simple docstring''' A_ : str = self._tokenizer.model.save(_a ,name=_a ) return tuple(_a ) def _a ( self : str ,_a : Optional[int] ,_a : int=None ): '''simple docstring''' A_ : Optional[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def _a ( self : Optional[int] ,_a : List[int] ,_a : Optional[List[int]] = None ): '''simple docstring''' A_ : Dict = [self.sep_token_id] A_ : Any = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
665
0
import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = { """microsoft/unispeech-sat-base-100h-libri-ft""": ( """https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json""" ), # See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat } class lowerCamelCase__ ( _A): """simple docstring""" a__ : Union[str, Any] = "unispeech-sat" def __init__( self : Tuple , __lowerCAmelCase : Tuple=32 , __lowerCAmelCase : List[Any]=7_68 , __lowerCAmelCase : Optional[Any]=12 , __lowerCAmelCase : Optional[Any]=12 , __lowerCAmelCase : Dict=30_72 , __lowerCAmelCase : str="gelu" , __lowerCAmelCase : int=0.1 , __lowerCAmelCase : int=0.1 , __lowerCAmelCase : Optional[int]=0.1 , __lowerCAmelCase : Tuple=0.0 , __lowerCAmelCase : Optional[int]=0.0 , __lowerCAmelCase : Dict=0.1 , __lowerCAmelCase : Optional[int]=0.1 , __lowerCAmelCase : Optional[Any]=0.02 , __lowerCAmelCase : Any=1E-5 , __lowerCAmelCase : List[Any]="group" , __lowerCAmelCase : Optional[Any]="gelu" , __lowerCAmelCase : List[Any]=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , __lowerCAmelCase : Any=(5, 2, 2, 2, 2, 2, 2) , __lowerCAmelCase : int=(10, 3, 3, 3, 3, 2, 2) , __lowerCAmelCase : List[str]=False , __lowerCAmelCase : int=1_28 , __lowerCAmelCase : Tuple=16 , __lowerCAmelCase : int=False , __lowerCAmelCase : int=True , __lowerCAmelCase : Union[str, Any]=0.05 , __lowerCAmelCase : str=10 , __lowerCAmelCase : List[str]=2 , __lowerCAmelCase : Optional[int]=0.0 , __lowerCAmelCase : Any=10 , __lowerCAmelCase : str=0 , __lowerCAmelCase : List[str]=3_20 , __lowerCAmelCase : List[Any]=2 , __lowerCAmelCase : Optional[Any]=0.1 , __lowerCAmelCase : Dict=1_00 , __lowerCAmelCase : Optional[int]=2_56 , __lowerCAmelCase : List[Any]=2_56 , __lowerCAmelCase : Any=0.1 , __lowerCAmelCase : List[Any]="mean" , __lowerCAmelCase : Dict=False , __lowerCAmelCase : List[Any]=False , __lowerCAmelCase : Union[str, Any]=2_56 , __lowerCAmelCase : int=(5_12, 5_12, 5_12, 5_12, 15_00) , __lowerCAmelCase : Optional[Any]=(5, 3, 3, 1, 1) , __lowerCAmelCase : str=(1, 2, 3, 1, 1) , __lowerCAmelCase : Union[str, Any]=5_12 , __lowerCAmelCase : List[str]=0 , __lowerCAmelCase : Dict=1 , __lowerCAmelCase : List[str]=2 , __lowerCAmelCase : Dict=5_04 , **__lowerCAmelCase : Optional[Any] , ) -> int: super().__init__(**__lowerCAmelCase , pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase ) _A = hidden_size _A = feat_extract_norm _A = feat_extract_activation _A = list(__lowerCAmelCase ) _A = list(__lowerCAmelCase ) _A = list(__lowerCAmelCase ) _A = conv_bias _A = num_conv_pos_embeddings _A = num_conv_pos_embedding_groups _A = len(self.conv_dim ) _A = num_hidden_layers _A = intermediate_size _A = hidden_act _A = num_attention_heads _A = hidden_dropout _A = attention_dropout _A = activation_dropout _A = feat_proj_dropout _A = final_dropout _A = layerdrop _A = layer_norm_eps _A = initializer_range _A = vocab_size _A = num_clusters _A = do_stable_layer_norm _A = use_weighted_layer_sum if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( '''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==''' ''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =''' f''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,''' f''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 _A = apply_spec_augment _A = mask_time_prob _A = mask_time_length _A = mask_time_min_masks _A = mask_feature_prob _A = mask_feature_length _A = mask_feature_min_masks # parameters for pretraining with codevector quantized representations _A = num_codevectors_per_group _A = num_codevector_groups _A = contrastive_logits_temperature _A = feat_quantizer_dropout _A = num_negatives _A = codevector_dim _A = proj_codevector_dim _A = diversity_loss_weight # ctc loss _A = ctc_loss_reduction _A = ctc_zero_infinity # SequenceClassification-specific parameter. Feel free to ignore for other classes. _A = classifier_proj_size # XVector-specific parameters. Feel free to ignore for other classes. _A = list(__lowerCAmelCase ) _A = list(__lowerCAmelCase ) _A = list(__lowerCAmelCase ) _A = xvector_output_dim @property def snake_case_ ( self : Tuple ) -> Dict: return functools.reduce(operator.mul , self.conv_stride , 1 )
2
'''simple docstring''' import argparse from transformers import ( TapasConfig, TapasForMaskedLM, TapasForQuestionAnswering, TapasForSequenceClassification, TapasModel, TapasTokenizer, load_tf_weights_in_tapas, ) from transformers.utils import logging logging.set_verbosity_info() def lowerCamelCase ( lowerCamelCase : Optional[Any] , lowerCamelCase : Any , lowerCamelCase : Union[str, Any] , lowerCamelCase : Tuple , lowerCamelCase : str): # Initialise PyTorch model. # If you want to convert a checkpoint that uses absolute position embeddings, make sure to set reset_position_index_per_cell of # TapasConfig to False. # initialize configuration from json file A_ : int = TapasConfig.from_json_file(lowerCamelCase) # set absolute/relative position embeddings parameter A_ : List[Any] = reset_position_index_per_cell # set remaining parameters of TapasConfig as well as the model based on the task if task == "SQA": A_ : Optional[int] = TapasForQuestionAnswering(config=lowerCamelCase) elif task == "WTQ": # run_task_main.py hparams A_ : Tuple = 4 A_ : Optional[Any] = True # hparam_utils.py hparams A_ : Any = 0.66_4694 A_ : str = 0.20_7951 A_ : Any = 0.12_1194 A_ : str = True A_ : Dict = True A_ : int = False A_ : int = 0.035_2513 A_ : Tuple = TapasForQuestionAnswering(config=lowerCamelCase) elif task == "WIKISQL_SUPERVISED": # run_task_main.py hparams A_ : int = 4 A_ : Union[str, Any] = False # hparam_utils.py hparams A_ : Dict = 36.4519 A_ : List[Any] = 0.90_3421 A_ : Any = 222.088 A_ : Optional[Any] = True A_ : Optional[int] = True A_ : Optional[Any] = True A_ : Optional[int] = 0.76_3141 A_ : Any = TapasForQuestionAnswering(config=lowerCamelCase) elif task == "TABFACT": A_ : Any = TapasForSequenceClassification(config=lowerCamelCase) elif task == "MLM": A_ : List[Any] = TapasForMaskedLM(config=lowerCamelCase) elif task == "INTERMEDIATE_PRETRAINING": A_ : Union[str, Any] = TapasModel(config=lowerCamelCase) else: raise ValueError(F'Task {task} not supported.') print(F'Building PyTorch model from configuration: {config}') # Load weights from tf checkpoint load_tf_weights_in_tapas(lowerCamelCase , lowerCamelCase , lowerCamelCase) # Save pytorch-model (weights and configuration) print(F'Save PyTorch model to {pytorch_dump_path}') model.save_pretrained(lowerCamelCase) # Save tokenizer files print(F'Save tokenizer files to {pytorch_dump_path}') A_ : Optional[Any] = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + """vocab.txt""" , model_max_length=512) tokenizer.save_pretrained(lowerCamelCase) print("""Used relative position embeddings:""" , model.config.reset_position_index_per_cell) if __name__ == "__main__": __magic_name__ = argparse.ArgumentParser() # Required parameters parser.add_argument( '--task', default='SQA', type=str, help='Model task for which to convert a checkpoint. Defaults to SQA.' ) parser.add_argument( '--reset_position_index_per_cell', default=False, action='store_true', help='Whether to use relative position embeddings or not. Defaults to True.', ) parser.add_argument( '--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.' ) parser.add_argument( '--tapas_config_file', default=None, type=str, required=True, help=( 'The config json file corresponding to the pre-trained TAPAS model. \n' 'This specifies the model architecture.' ), ) parser.add_argument( '--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) __magic_name__ = parser.parse_args() convert_tf_checkpoint_to_pytorch( args.task, args.reset_position_index_per_cell, args.tf_checkpoint_path, args.tapas_config_file, args.pytorch_dump_path, )
665
0
'''simple docstring''' from manim import * class SCREAMING_SNAKE_CASE__ ( snake_case_): def UpperCAmelCase_ ( self )-> str: '''simple docstring''' UpperCamelCase = Rectangle(height=0.5 , width=0.5 ) UpperCamelCase = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 ) UpperCamelCase = [mem.copy() for i in range(6 )] UpperCamelCase = [mem.copy() for i in range(6 )] UpperCamelCase = VGroup(*A_ ).arrange(A_ , buff=0 ) UpperCamelCase = VGroup(*A_ ).arrange(A_ , buff=0 ) UpperCamelCase = VGroup(A_ , A_ ).arrange(A_ , buff=0 ) UpperCamelCase = Text('CPU' , font_size=24 ) UpperCamelCase = Group(A_ , A_ ).arrange(A_ , buff=0.5 , aligned_edge=A_ ) cpu.move_to([-2.5, -0.5, 0] ) self.add(A_ ) UpperCamelCase = [mem.copy() for i in range(4 )] UpperCamelCase = VGroup(*A_ ).arrange(A_ , buff=0 ) UpperCamelCase = Text('GPU' , font_size=24 ) UpperCamelCase = Group(A_ , A_ ).arrange(A_ , buff=0.5 , aligned_edge=A_ ) gpu.move_to([-1, -1, 0] ) self.add(A_ ) UpperCamelCase = [mem.copy() for i in range(6 )] UpperCamelCase = VGroup(*A_ ).arrange(A_ , buff=0 ) UpperCamelCase = Text('Model' , font_size=24 ) UpperCamelCase = Group(A_ , A_ ).arrange(A_ , buff=0.5 , aligned_edge=A_ ) model.move_to([3, -1.0, 0] ) self.add(A_ ) UpperCamelCase = [] for i, rect in enumerate(A_ ): rect.set_stroke(A_ ) # target = fill.copy().set_fill(YELLOW, opacity=0.7) # target.move_to(rect) # self.add(target) UpperCamelCase = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(A_ , opacity=0.7 ) if i == 0: cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=A_ ) cpu_target.set_x(cpu_target.get_x() + 0.1 ) elif i == 3: cpu_target.next_to(cpu_targs[0] , direction=A_ , buff=0.0 ) else: cpu_target.next_to(cpu_targs[i - 1] , direction=A_ , buff=0.0 ) self.add(A_ ) cpu_targs.append(A_ ) UpperCamelCase = [mem.copy() for i in range(6 )] UpperCamelCase = VGroup(*A_ ).arrange(A_ , buff=0 ) UpperCamelCase = Text('Loaded Checkpoint' , font_size=24 ) UpperCamelCase = Group(A_ , A_ ).arrange(A_ , aligned_edge=A_ , buff=0.4 ) checkpoint.move_to([3, 0.5, 0] ) UpperCamelCase = Square(side_length=2.2 ) key.move_to([-5, 2, 0] ) UpperCamelCase = MarkupText( F'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , ) key_text.move_to([-5, 2.4, 0] ) self.add(A_ , A_ ) UpperCamelCase = MarkupText( F'''<span fgcolor=\'{BLUE}\'>●</span> Checkpoint''' , font_size=18 , ) blue_text.next_to(A_ , DOWN * 2.4 , aligned_edge=key_text.get_left() ) UpperCamelCase = MarkupText( F'''Next, a <i><span fgcolor="{BLUE}">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor="{BLUE}">single shard</span>.''' , font_size=24 , ) step_a.move_to([2, 2, 0] ) self.play(Write(A_ ) , Write(A_ ) ) self.play(Write(A_ , run_time=1 ) , Create(A_ , run_time=1 ) ) UpperCamelCase = [] UpperCamelCase = [] for i, rect in enumerate(A_ ): UpperCamelCase = fill.copy().set_fill(A_ , opacity=0.7 ) target.move_to(A_ ) first_animations.append(GrowFromCenter(A_ , run_time=1 ) ) UpperCamelCase = target.copy() cpu_target.generate_target() if i < 5: cpu_target.target.move_to(cpu_left_col_base[i + 1] ) else: cpu_target.target.move_to(cpu_right_col_base[i - 5] ) second_animations.append(MoveToTarget(A_ , run_time=1.5 ) ) self.play(*A_ ) self.play(*A_ ) self.wait()
3
'''simple docstring''' from math import acos, sin from typing import List, Tuple, Union import numpy as np import torch from PIL import Image from ...models import AutoencoderKL, UNetaDConditionModel from ...schedulers import DDIMScheduler, DDPMScheduler from ...utils import randn_tensor from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput from .mel import Mel class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' a_ = ["""vqvae"""] def __init__( self : Optional[Any] ,_a : AutoencoderKL ,_a : UNetaDConditionModel ,_a : Mel ,_a : Union[DDIMScheduler, DDPMScheduler] ,): '''simple docstring''' super().__init__() self.register_modules(unet=_a ,scheduler=_a ,mel=_a ,vqvae=_a ) def _a ( self : str ): '''simple docstring''' return 50 if isinstance(self.scheduler ,_a ) else 1000 @torch.no_grad() def __call__( self : Optional[int] ,_a : int = 1 ,_a : str = None ,_a : np.ndarray = None ,_a : int = 0 ,_a : int = 0 ,_a : int = None ,_a : torch.Generator = None ,_a : float = 0 ,_a : float = 0 ,_a : torch.Generator = None ,_a : float = 0 ,_a : torch.Tensor = None ,_a : torch.Tensor = None ,_a : int=True ,): '''simple docstring''' A_ : List[str] = steps or self.get_default_steps() self.scheduler.set_timesteps(_a ) A_ : Union[str, Any] = step_generator or generator # For backwards compatibility if type(self.unet.config.sample_size ) == int: A_ : Tuple = (self.unet.config.sample_size, self.unet.config.sample_size) if noise is None: A_ : int = randn_tensor( ( batch_size, self.unet.config.in_channels, self.unet.config.sample_size[0], self.unet.config.sample_size[1], ) ,generator=_a ,device=self.device ,) A_ : List[Any] = noise A_ : str = None if audio_file is not None or raw_audio is not None: self.mel.load_audio(_a ,_a ) A_ : Any = self.mel.audio_slice_to_image(_a ) A_ : Union[str, Any] = np.frombuffer(input_image.tobytes() ,dtype="""uint8""" ).reshape( (input_image.height, input_image.width) ) A_ : Optional[Any] = (input_image / 255) * 2 - 1 A_ : Union[str, Any] = torch.tensor(input_image[np.newaxis, :, :] ,dtype=torch.float ).to(self.device ) if self.vqvae is not None: A_ : Union[str, Any] = self.vqvae.encode(torch.unsqueeze(_a ,0 ) ).latent_dist.sample( generator=_a )[0] A_ : List[str] = self.vqvae.config.scaling_factor * input_images if start_step > 0: A_ : Any = self.scheduler.add_noise(_a ,_a ,self.scheduler.timesteps[start_step - 1] ) A_ : Tuple = ( self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length ) A_ : Tuple = int(mask_start_secs * pixels_per_second ) A_ : str = int(mask_end_secs * pixels_per_second ) A_ : int = self.scheduler.add_noise(_a ,_a ,torch.tensor(self.scheduler.timesteps[start_step:] ) ) for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ): if isinstance(self.unet ,_a ): A_ : Optional[Any] = self.unet(_a ,_a ,_a )["""sample"""] else: A_ : List[Any] = self.unet(_a ,_a )["""sample"""] if isinstance(self.scheduler ,_a ): A_ : Dict = self.scheduler.step( model_output=_a ,timestep=_a ,sample=_a ,eta=_a ,generator=_a ,)["""prev_sample"""] else: A_ : Any = self.scheduler.step( model_output=_a ,timestep=_a ,sample=_a ,generator=_a ,)["""prev_sample"""] if mask is not None: if mask_start > 0: A_ : Tuple = mask[:, step, :, :mask_start] if mask_end > 0: A_ : List[str] = mask[:, step, :, -mask_end:] if self.vqvae is not None: # 0.18215 was scaling factor used in training to ensure unit variance A_ : str = 1 / self.vqvae.config.scaling_factor * images A_ : Union[str, Any] = self.vqvae.decode(_a )["""sample"""] A_ : int = (images / 2 + 0.5).clamp(0 ,1 ) A_ : str = images.cpu().permute(0 ,2 ,3 ,1 ).numpy() A_ : Optional[int] = (images * 255).round().astype("""uint8""" ) A_ : List[Any] = list( (Image.fromarray(_[:, :, 0] ) for _ in images) if images.shape[3] == 1 else (Image.fromarray(_a ,mode="""RGB""" ).convert("""L""" ) for _ in images) ) A_ : Tuple = [self.mel.image_to_audio(_a ) for _ in images] if not return_dict: return images, (self.mel.get_sample_rate(), audios) return BaseOutput(**AudioPipelineOutput(np.array(_a )[:, np.newaxis, :] ) ,**ImagePipelineOutput(_a ) ) @torch.no_grad() def _a ( self : Union[str, Any] ,_a : List[Image.Image] ,_a : int = 50 ): '''simple docstring''' assert isinstance(self.scheduler ,_a ) self.scheduler.set_timesteps(_a ) A_ : Optional[Any] = np.array( [np.frombuffer(image.tobytes() ,dtype="""uint8""" ).reshape((1, image.height, image.width) ) for image in images] ) A_ : List[str] = (sample / 255) * 2 - 1 A_ : Optional[int] = torch.Tensor(_a ).to(self.device ) for t in self.progress_bar(torch.flip(self.scheduler.timesteps ,(0,) ) ): A_ : List[str] = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps A_ : Any = self.scheduler.alphas_cumprod[t] A_ : List[Any] = ( self.scheduler.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.scheduler.final_alpha_cumprod ) A_ : str = 1 - alpha_prod_t A_ : List[str] = self.unet(_a ,_a )["""sample"""] A_ : str = (1 - alpha_prod_t_prev) ** 0.5 * model_output A_ : Union[str, Any] = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5) A_ : Optional[int] = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output return sample @staticmethod def _a ( _a : torch.Tensor ,_a : torch.Tensor ,_a : float ): '''simple docstring''' A_ : List[Any] = acos(torch.dot(torch.flatten(_a ) ,torch.flatten(_a ) ) / torch.norm(_a ) / torch.norm(_a ) ) return sin((1 - alpha) * theta ) * xa / sin(_a ) + sin(alpha * theta ) * xa / sin(_a )
665
0
"""simple docstring""" import os from pathlib import Path from unittest.mock import patch import pytest import zstandard as zstd from datasets.download.download_config import DownloadConfig from datasets.utils.file_utils import ( OfflineModeIsEnabled, cached_path, fsspec_get, fsspec_head, ftp_get, ftp_head, get_from_cache, http_get, http_head, ) __UpperCamelCase : Union[str, Any] = '''\ Text data. Second line of data.''' __UpperCamelCase : Tuple = '''file''' @pytest.fixture(scope='session' ) def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : List[Any] ): lowerCAmelCase = tmp_path_factory.mktemp('data' ) / (FILE_PATH + '.zstd') lowerCAmelCase = bytes(_UpperCAmelCase , 'utf-8' ) with zstd.open(_UpperCAmelCase , 'wb' ) as f: f.write(_UpperCAmelCase ) return path @pytest.fixture def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str ): with open(os.path.join(tmpfs.local_root_dir , _UpperCAmelCase ) , 'w' ) as f: f.write(_UpperCAmelCase ) return FILE_PATH @pytest.mark.parametrize('compression_format' , ['gzip', 'xz', 'zstd'] ) def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Optional[Any] , _UpperCAmelCase : int , _UpperCAmelCase : str , _UpperCAmelCase : Tuple , _UpperCAmelCase : Any , _UpperCAmelCase : List[str] ): lowerCAmelCase = {'gzip': gz_file, 'xz': xz_file, 'zstd': zstd_path} lowerCAmelCase = input_paths[compression_format] lowerCAmelCase = tmp_path / 'cache' lowerCAmelCase = DownloadConfig(cache_dir=_UpperCAmelCase , extract_compressed_file=_UpperCAmelCase ) lowerCAmelCase = cached_path(_UpperCAmelCase , download_config=_UpperCAmelCase ) with open(_UpperCAmelCase ) as f: lowerCAmelCase = f.read() with open(_UpperCAmelCase ) as f: lowerCAmelCase = f.read() assert extracted_file_content == expected_file_content @pytest.mark.parametrize('default_extracted' , [True, False] ) @pytest.mark.parametrize('default_cache_dir' , [True, False] ) def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Dict , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Any , _UpperCAmelCase : Tuple , _UpperCAmelCase : str ): lowerCAmelCase = 'custom_cache' lowerCAmelCase = 'custom_extracted_dir' lowerCAmelCase = tmp_path / 'custom_extracted_path' if default_extracted: lowerCAmelCase = ('downloads' if default_cache_dir else custom_cache_dir, 'extracted') else: monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_DIR' , _UpperCAmelCase ) monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_PATH' , str(_UpperCAmelCase ) ) lowerCAmelCase = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir) lowerCAmelCase = xz_file lowerCAmelCase = ( DownloadConfig(extract_compressed_file=_UpperCAmelCase ) if default_cache_dir else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=_UpperCAmelCase ) ) lowerCAmelCase = cached_path(_UpperCAmelCase , download_config=_UpperCAmelCase ) assert Path(_UpperCAmelCase ).parent.parts[-2:] == expected def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Union[str, Any] ): # absolute path lowerCAmelCase = str(Path(_UpperCAmelCase ).resolve() ) assert cached_path(_UpperCAmelCase ) == text_file # relative path lowerCAmelCase = str(Path(_UpperCAmelCase ).resolve().relative_to(Path(os.getcwd() ) ) ) assert cached_path(_UpperCAmelCase ) == text_file def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Optional[Any] ): # absolute path lowerCAmelCase = str(tmp_path.resolve() / '__missing_file__.txt' ) with pytest.raises(_UpperCAmelCase ): cached_path(_UpperCAmelCase ) # relative path lowerCAmelCase = './__missing_file__.txt' with pytest.raises(_UpperCAmelCase ): cached_path(_UpperCAmelCase ) def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int ): lowerCAmelCase = get_from_cache(F'tmp://{tmpfs_file}' ) with open(_UpperCAmelCase ) as f: lowerCAmelCase = f.read() assert output_file_content == FILE_CONTENT @patch('datasets.config.HF_DATASETS_OFFLINE' , _UpperCAmelCase ) def _SCREAMING_SNAKE_CASE (): with pytest.raises(_UpperCAmelCase ): cached_path('https://huggingface.co' ) @patch('datasets.config.HF_DATASETS_OFFLINE' , _UpperCAmelCase ) def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Optional[Any] ): lowerCAmelCase = tmp_path_factory.mktemp('data' ) / 'file.html' with pytest.raises(_UpperCAmelCase ): http_get('https://huggingface.co' , temp_file=_UpperCAmelCase ) with pytest.raises(_UpperCAmelCase ): http_head('https://huggingface.co' ) @patch('datasets.config.HF_DATASETS_OFFLINE' , _UpperCAmelCase ) def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Dict ): lowerCAmelCase = tmp_path_factory.mktemp('data' ) / 'file.html' with pytest.raises(_UpperCAmelCase ): ftp_get('ftp://huggingface.co' , temp_file=_UpperCAmelCase ) with pytest.raises(_UpperCAmelCase ): ftp_head('ftp://huggingface.co' ) @patch('datasets.config.HF_DATASETS_OFFLINE' , _UpperCAmelCase ) def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : List[str] ): lowerCAmelCase = tmp_path_factory.mktemp('data' ) / 'file.html' with pytest.raises(_UpperCAmelCase ): fsspec_get('s3://huggingface.co' , temp_file=_UpperCAmelCase ) with pytest.raises(_UpperCAmelCase ): fsspec_head('s3://huggingface.co' )
4
'''simple docstring''' import argparse import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## __magic_name__ = 16 __magic_name__ = 32 def lowerCamelCase ( lowerCamelCase : Accelerator , lowerCamelCase : int = 16): A_ : Any = AutoTokenizer.from_pretrained("""bert-base-cased""") A_ : str = load_dataset("""glue""" , """mrpc""") def tokenize_function(lowerCamelCase : Dict): # max_length=None => use the model max length (it's actually the default) A_ : List[str] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowerCamelCase , max_length=lowerCamelCase) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): A_ : Tuple = datasets.map( lowerCamelCase , batched=lowerCamelCase , remove_columns=["""idx""", """sentence1""", """sentence2"""] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library A_ : List[str] = tokenized_datasets.rename_column("""label""" , """labels""") def collate_fn(lowerCamelCase : Tuple): # On TPU it's best to pad everything to the same length or training will be very slow. A_ : str = 128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": A_ : List[Any] = 16 elif accelerator.mixed_precision != "no": A_ : Any = 8 else: A_ : Tuple = None return tokenizer.pad( lowerCamelCase , padding="""longest""" , max_length=lowerCamelCase , pad_to_multiple_of=lowerCamelCase , return_tensors="""pt""" , ) # Instantiate dataloaders. A_ : int = DataLoader( tokenized_datasets["""train"""] , shuffle=lowerCamelCase , collate_fn=lowerCamelCase , batch_size=lowerCamelCase , drop_last=lowerCamelCase) A_ : str = DataLoader( tokenized_datasets["""validation"""] , shuffle=lowerCamelCase , collate_fn=lowerCamelCase , batch_size=lowerCamelCase , drop_last=(accelerator.mixed_precision == """fp8""") , ) return train_dataloader, eval_dataloader def lowerCamelCase ( lowerCamelCase : Any , lowerCamelCase : Dict): # Initialize accelerator A_ : Tuple = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs A_ : List[Any] = config["""lr"""] A_ : List[Any] = int(config["""num_epochs"""]) A_ : int = int(config["""seed"""]) A_ : Dict = int(config["""batch_size"""]) A_ : Union[str, Any] = evaluate.load("""glue""" , """mrpc""") # If the batch size is too big we use gradient accumulation A_ : int = 1 if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU: A_ : Any = batch_size // MAX_GPU_BATCH_SIZE A_ : Union[str, Any] = MAX_GPU_BATCH_SIZE set_seed(lowerCamelCase) A_ , A_ : List[str] = get_dataloaders(lowerCamelCase , lowerCamelCase) # Instantiate the model (we build the model here so that the seed also control new weights initialization) A_ : Union[str, Any] = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=lowerCamelCase) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). A_ : str = model.to(accelerator.device) # Instantiate optimizer A_ : str = AdamW(params=model.parameters() , lr=lowerCamelCase) # Instantiate scheduler A_ : Tuple = get_linear_schedule_with_warmup( optimizer=lowerCamelCase , num_warmup_steps=100 , num_training_steps=(len(lowerCamelCase) * num_epochs) // gradient_accumulation_steps , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. A_ , A_ , A_ , A_ , A_ : Union[str, Any] = accelerator.prepare( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase) # Now we train the model for epoch in range(lowerCamelCase): model.train() for step, batch in enumerate(lowerCamelCase): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device) A_ : Optional[int] = model(**lowerCamelCase) A_ : List[Any] = outputs.loss A_ : Tuple = loss / gradient_accumulation_steps accelerator.backward(lowerCamelCase) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(lowerCamelCase): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device) with torch.no_grad(): A_ : Union[str, Any] = model(**lowerCamelCase) A_ : Any = outputs.logits.argmax(dim=-1) A_ , A_ : Tuple = accelerator.gather_for_metrics((predictions, batch["""labels"""])) metric.add_batch( predictions=lowerCamelCase , references=lowerCamelCase , ) A_ : int = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F'epoch {epoch}:' , lowerCamelCase) def lowerCamelCase ( ): A_ : Optional[int] = argparse.ArgumentParser(description="""Simple example of training script.""") parser.add_argument( """--mixed_precision""" , type=lowerCamelCase , default=lowerCamelCase , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose""" """between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.""" """and an Nvidia Ampere GPU.""" , ) parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""") A_ : Dict = parser.parse_args() A_ : Dict = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16} training_function(lowerCamelCase , lowerCamelCase) if __name__ == "__main__": main()
665
0
'''simple docstring''' import argparse import json import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils.deepspeed import DummyOptim, DummyScheduler _lowercase = 16 _lowercase = 32 def A (__lowerCamelCase :Accelerator , __lowerCamelCase :int = 16 , __lowerCamelCase :str = "bert-base-cased" ): _lowerCAmelCase = AutoTokenizer.from_pretrained(__lowerCamelCase ) _lowerCAmelCase = load_dataset("""glue""" , """mrpc""" ) def tokenize_function(__lowerCamelCase :int ): # max_length=None => use the model max length (it's actually the default) _lowerCAmelCase = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=__lowerCamelCase , max_length=__lowerCamelCase ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset _lowerCAmelCase = datasets.map( __lowerCamelCase , batched=__lowerCamelCase , remove_columns=["""idx""", """sentence1""", """sentence2"""] , load_from_cache_file=__lowerCamelCase ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library _lowerCAmelCase = tokenized_datasets.rename_column("""label""" , """labels""" ) def collate_fn(__lowerCamelCase :List[Any] ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(__lowerCamelCase , padding="""max_length""" , max_length=128 , return_tensors="""pt""" ) return tokenizer.pad(__lowerCamelCase , padding="""longest""" , return_tensors="""pt""" ) # Instantiate dataloaders. _lowerCAmelCase = DataLoader( tokenized_datasets["""train"""] , shuffle=__lowerCamelCase , collate_fn=__lowerCamelCase , batch_size=__lowerCamelCase ) _lowerCAmelCase = DataLoader( tokenized_datasets["""validation"""] , shuffle=__lowerCamelCase , collate_fn=__lowerCamelCase , batch_size=__lowerCamelCase ) return train_dataloader, eval_dataloader def A (__lowerCamelCase :Any , __lowerCamelCase :Optional[int] , __lowerCamelCase :Tuple , __lowerCamelCase :Union[str, Any] ): model.eval() _lowerCAmelCase = 0 for step, batch in enumerate(__lowerCamelCase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): _lowerCAmelCase = model(**__lowerCamelCase ) _lowerCAmelCase = outputs.logits.argmax(dim=-1 ) # It is slightly faster to call this once, than multiple times _lowerCAmelCase , _lowerCAmelCase = accelerator.gather( (predictions, batch["""labels"""]) ) # If we are in a multiprocess environment, the last batch has duplicates if accelerator.use_distributed: if step == len(__lowerCamelCase ) - 1: _lowerCAmelCase = predictions[: len(eval_dataloader.dataset ) - samples_seen] _lowerCAmelCase = references[: len(eval_dataloader.dataset ) - samples_seen] else: samples_seen += references.shape[0] metric.add_batch( predictions=__lowerCamelCase , references=__lowerCamelCase , ) _lowerCAmelCase = metric.compute() return eval_metric["accuracy"] def A (__lowerCamelCase :Tuple , __lowerCamelCase :Optional[Any] ): # Initialize accelerator _lowerCAmelCase = Accelerator() # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs _lowerCAmelCase = config["""lr"""] _lowerCAmelCase = int(config["""num_epochs"""] ) _lowerCAmelCase = int(config["""seed"""] ) _lowerCAmelCase = int(config["""batch_size"""] ) _lowerCAmelCase = args.model_name_or_path set_seed(__lowerCamelCase ) _lowerCAmelCase , _lowerCAmelCase = get_dataloaders(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) _lowerCAmelCase = AutoModelForSequenceClassification.from_pretrained(__lowerCamelCase , return_dict=__lowerCamelCase ) # Instantiate optimizer _lowerCAmelCase = ( AdamW if accelerator.state.deepspeed_plugin is None or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config else DummyOptim ) _lowerCAmelCase = optimizer_cls(params=model.parameters() , lr=__lowerCamelCase ) if accelerator.state.deepspeed_plugin is not None: _lowerCAmelCase = accelerator.state.deepspeed_plugin.deepspeed_config[ """gradient_accumulation_steps""" ] else: _lowerCAmelCase = 1 _lowerCAmelCase = (len(__lowerCamelCase ) * num_epochs) // gradient_accumulation_steps # Instantiate scheduler if ( accelerator.state.deepspeed_plugin is None or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config ): _lowerCAmelCase = get_linear_schedule_with_warmup( optimizer=__lowerCamelCase , num_warmup_steps=0 , num_training_steps=__lowerCamelCase , ) else: _lowerCAmelCase = DummyScheduler(__lowerCamelCase , total_num_steps=__lowerCamelCase , warmup_num_steps=0 ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = accelerator.prepare( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) # We need to keep track of how many total steps we have iterated over _lowerCAmelCase = 0 # We also need to keep track of the stating epoch so files are named properly _lowerCAmelCase = 0 _lowerCAmelCase = evaluate.load("""glue""" , """mrpc""" ) _lowerCAmelCase = num_epochs if args.partial_train_epoch is not None: _lowerCAmelCase = args.partial_train_epoch if args.resume_from_checkpoint: accelerator.load_state(args.resume_from_checkpoint ) _lowerCAmelCase = args.resume_from_checkpoint.split("""epoch_""" )[1] _lowerCAmelCase = """""" for char in epoch_string: if char.isdigit(): state_epoch_num += char else: break _lowerCAmelCase = int(__lowerCamelCase ) + 1 _lowerCAmelCase = evaluation_loop(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) accelerator.print("""resumed checkpoint performance:""" , __lowerCamelCase ) accelerator.print("""resumed checkpoint's scheduler's lr:""" , lr_scheduler.get_lr()[0] ) accelerator.print("""resumed optimizers's lr:""" , optimizer.param_groups[0]["""lr"""] ) with open(os.path.join(args.output_dir , f'state_{starting_epoch-1}.json' ) , """r""" ) as f: _lowerCAmelCase = json.load(__lowerCamelCase ) assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed" assert ( resumed_state["lr"] == lr_scheduler.get_lr()[0] ), "Scheduler learning rate mismatch, loading from checkpoint failed" assert ( resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"] ), "Optimizer learning rate mismatch, loading from checkpoint failed" assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed" return # Now we train the model _lowerCAmelCase = {} for epoch in range(__lowerCamelCase , __lowerCamelCase ): model.train() for step, batch in enumerate(__lowerCamelCase ): _lowerCAmelCase = model(**__lowerCamelCase ) _lowerCAmelCase = outputs.loss _lowerCAmelCase = loss / gradient_accumulation_steps accelerator.backward(__lowerCamelCase ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() overall_step += 1 _lowerCAmelCase = f'epoch_{epoch}' _lowerCAmelCase = os.path.join(args.output_dir , __lowerCamelCase ) accelerator.save_state(__lowerCamelCase ) _lowerCAmelCase = evaluation_loop(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) _lowerCAmelCase = accuracy _lowerCAmelCase = lr_scheduler.get_lr()[0] _lowerCAmelCase = optimizer.param_groups[0]["""lr"""] _lowerCAmelCase = epoch _lowerCAmelCase = overall_step accelerator.print(f'epoch {epoch}:' , __lowerCamelCase ) accelerator.wait_for_everyone() if accelerator.is_main_process: with open(os.path.join(args.output_dir , f'state_{epoch}.json' ) , """w""" ) as f: json.dump(__lowerCamelCase , __lowerCamelCase ) def A (): _lowerCAmelCase = argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""" ) parser.add_argument( """--model_name_or_path""" , type=__lowerCamelCase , default="""bert-base-cased""" , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=__lowerCamelCase , ) parser.add_argument( """--output_dir""" , type=__lowerCamelCase , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , ) parser.add_argument( """--resume_from_checkpoint""" , type=__lowerCamelCase , default=__lowerCamelCase , help="""If the training should continue from a checkpoint folder.""" , ) parser.add_argument( """--partial_train_epoch""" , type=__lowerCamelCase , default=__lowerCamelCase , help="""If passed, the training will stop after this number of epochs.""" , ) parser.add_argument( """--num_epochs""" , type=__lowerCamelCase , default=2 , help="""Number of train epochs.""" , ) _lowerCAmelCase = parser.parse_args() _lowerCAmelCase = {"""lr""": 2e-5, """num_epochs""": args.num_epochs, """seed""": 42, """batch_size""": 16} training_function(__lowerCamelCase , __lowerCamelCase ) if __name__ == "__main__": main()
5
'''simple docstring''' import functools def lowerCamelCase ( lowerCamelCase : list[int] , lowerCamelCase : list[int]): # Validation if not isinstance(lowerCamelCase , lowerCamelCase) or not all(isinstance(lowerCamelCase , lowerCamelCase) for day in days): raise ValueError("""The parameter days should be a list of integers""") if len(lowerCamelCase) != 3 or not all(isinstance(lowerCamelCase , lowerCamelCase) for cost in costs): raise ValueError("""The parameter costs should be a list of three integers""") if len(lowerCamelCase) == 0: return 0 if min(lowerCamelCase) <= 0: raise ValueError("""All days elements should be greater than 0""") if max(lowerCamelCase) >= 366: raise ValueError("""All days elements should be less than 366""") A_ : Tuple = set(lowerCamelCase) @functools.cache def dynamic_programming(lowerCamelCase : int) -> int: if index > 365: return 0 if index not in days_set: return dynamic_programming(index + 1) return min( costs[0] + dynamic_programming(index + 1) , costs[1] + dynamic_programming(index + 7) , costs[2] + dynamic_programming(index + 30) , ) return dynamic_programming(1) if __name__ == "__main__": import doctest doctest.testmod()
665
0
from __future__ import annotations from collections import namedtuple from dataclasses import dataclass @dataclass class UpperCamelCase_ : lowerCamelCase_ = 42 lowerCamelCase_ = None lowerCamelCase_ = None _lowerCamelCase = namedtuple('CoinsDistribResult', 'moves excess') def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: TreeNode | None ): if root is None: return 0 # Validation def count_nodes(UpperCamelCase__: TreeNode | None ) -> int: if node is None: return 0 return count_nodes(node.left ) + count_nodes(node.right ) + 1 def count_coins(UpperCamelCase__: TreeNode | None ) -> int: if node is None: return 0 return count_coins(node.left ) + count_coins(node.right ) + node.data if count_nodes(UpperCamelCase__ ) != count_coins(UpperCamelCase__ ): raise ValueError("""The nodes number should be same as the number of coins""" ) # Main calculation def get_distrib(UpperCamelCase__: TreeNode | None ) -> CoinsDistribResult: if node is None: return CoinsDistribResult(0 , 1 ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = get_distrib(node.left ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = get_distrib(node.right ) SCREAMING_SNAKE_CASE__ = 1 - left_distrib_excess SCREAMING_SNAKE_CASE__ = 1 - right_distrib_excess SCREAMING_SNAKE_CASE__ = ( left_distrib_moves + right_distrib_moves + abs(UpperCamelCase__ ) + abs(UpperCamelCase__ ) ) SCREAMING_SNAKE_CASE__ = node.data - coins_to_left - coins_to_right return CoinsDistribResult(UpperCamelCase__ , UpperCamelCase__ ) return get_distrib(UpperCamelCase__ )[0] if __name__ == "__main__": import doctest doctest.testmod()
6
'''simple docstring''' from __future__ import annotations import numpy as np from numpy import floataa from numpy.typing import NDArray def lowerCamelCase ( lowerCamelCase : NDArray[floataa] , lowerCamelCase : NDArray[floataa] , lowerCamelCase : list[int] , lowerCamelCase : int , ): A_ , A_ : int = coefficient_matrix.shape A_ , A_ : Union[str, Any] = constant_matrix.shape if rowsa != colsa: A_ : Any = F'Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}' raise ValueError(lowerCamelCase) if colsa != 1: A_ : Tuple = F'Constant matrix must be nx1 but received {rowsa}x{colsa}' raise ValueError(lowerCamelCase) if rowsa != rowsa: A_ : Dict = ( """Coefficient and constant matrices dimensions must be nxn and nx1 but """ F'received {rowsa}x{colsa} and {rowsa}x{colsa}' ) raise ValueError(lowerCamelCase) if len(lowerCamelCase) != rowsa: A_ : Union[str, Any] = ( """Number of initial values must be equal to number of rows in coefficient """ F'matrix but received {len(lowerCamelCase)} and {rowsa}' ) raise ValueError(lowerCamelCase) if iterations <= 0: raise ValueError("""Iterations must be at least 1""") A_ : NDArray[floataa] = np.concatenate( (coefficient_matrix, constant_matrix) , axis=1) A_ , A_ : int = table.shape strictly_diagonally_dominant(lowerCamelCase) # Iterates the whole matrix for given number of times for _ in range(lowerCamelCase): A_ : List[Any] = [] for row in range(lowerCamelCase): A_ : int = 0 for col in range(lowerCamelCase): if col == row: A_ : List[str] = table[row][col] elif col == cols - 1: A_ : str = table[row][col] else: temp += (-1) * table[row][col] * init_val[col] A_ : Union[str, Any] = (temp + val) / denom new_val.append(lowerCamelCase) A_ : Tuple = new_val return [float(lowerCamelCase) for i in new_val] def lowerCamelCase ( lowerCamelCase : NDArray[floataa]): A_ , A_ : Dict = table.shape A_ : Union[str, Any] = True for i in range(0 , lowerCamelCase): A_ : str = 0 for j in range(0 , cols - 1): if i == j: continue else: total += table[i][j] if table[i][i] <= total: raise ValueError("""Coefficient matrix is not strictly diagonally dominant""") return is_diagonally_dominant # Test Cases if __name__ == "__main__": import doctest doctest.testmod()
665
0
"""simple docstring""" def _snake_case ( _snake_case : str ) -> str: '''simple docstring''' if not all(char in '01' for char in bin_string ): raise ValueError('Non-binary value was passed to the function' ) if not bin_string: raise ValueError('Empty string was passed to the function' ) _A = '' while len(_snake_case ) % 3 != 0: _A = '0' + bin_string _A = [ bin_string[index : index + 3] for index in range(len(_snake_case ) ) if index % 3 == 0 ] for bin_group in bin_string_in_3_list: _A = 0 for index, val in enumerate(_snake_case ): oct_val += int(2 ** (2 - index) * int(_snake_case ) ) oct_string += str(_snake_case ) return oct_string if __name__ == "__main__": from doctest import testmod testmod()
7
'''simple docstring''' def lowerCamelCase ( lowerCamelCase : str , lowerCamelCase : str): A_ : Any = len(lowerCamelCase) A_ : Optional[Any] = len(lowerCamelCase) A_ : Optional[int] = [[False for _ in range(m + 1)] for _ in range(n + 1)] A_ : Union[str, Any] = True for i in range(lowerCamelCase): for j in range(m + 1): if dp[i][j]: if j < m and a[i].upper() == b[j]: A_ : Optional[int] = True if a[i].islower(): A_ : List[Any] = True return dp[n][m] if __name__ == "__main__": import doctest doctest.testmod()
665
0
'''simple docstring''' from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowercase__ : Union[str, Any] = logging.get_logger(__name__) lowercase__ : Optional[int] = { '''xlm-mlm-en-2048''': '''https://huggingface.co/xlm-mlm-en-2048/resolve/main/config.json''', '''xlm-mlm-ende-1024''': '''https://huggingface.co/xlm-mlm-ende-1024/resolve/main/config.json''', '''xlm-mlm-enfr-1024''': '''https://huggingface.co/xlm-mlm-enfr-1024/resolve/main/config.json''', '''xlm-mlm-enro-1024''': '''https://huggingface.co/xlm-mlm-enro-1024/resolve/main/config.json''', '''xlm-mlm-tlm-xnli15-1024''': '''https://huggingface.co/xlm-mlm-tlm-xnli15-1024/resolve/main/config.json''', '''xlm-mlm-xnli15-1024''': '''https://huggingface.co/xlm-mlm-xnli15-1024/resolve/main/config.json''', '''xlm-clm-enfr-1024''': '''https://huggingface.co/xlm-clm-enfr-1024/resolve/main/config.json''', '''xlm-clm-ende-1024''': '''https://huggingface.co/xlm-clm-ende-1024/resolve/main/config.json''', '''xlm-mlm-17-1280''': '''https://huggingface.co/xlm-mlm-17-1280/resolve/main/config.json''', '''xlm-mlm-100-1280''': '''https://huggingface.co/xlm-mlm-100-1280/resolve/main/config.json''', } class SCREAMING_SNAKE_CASE (a__ ): lowerCAmelCase = '''xlm''' lowerCAmelCase = { '''hidden_size''': '''emb_dim''', '''num_attention_heads''': '''n_heads''', '''num_hidden_layers''': '''n_layers''', '''n_words''': '''vocab_size''', # For backward compatibility } def __init__( self , _UpperCAmelCase=3_0145 , _UpperCAmelCase=2048 , _UpperCAmelCase=12 , _UpperCAmelCase=16 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=1 , _UpperCAmelCase=True , _UpperCAmelCase=512 , _UpperCAmelCase=2048**-0.5 , _UpperCAmelCase=1e-1_2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=0 , _UpperCAmelCase=1 , _UpperCAmelCase=2 , _UpperCAmelCase=3 , _UpperCAmelCase=5 , _UpperCAmelCase=True , _UpperCAmelCase="first" , _UpperCAmelCase=True , _UpperCAmelCase=None , _UpperCAmelCase=True , _UpperCAmelCase=0.1 , _UpperCAmelCase=5 , _UpperCAmelCase=5 , _UpperCAmelCase=0 , _UpperCAmelCase=0 , _UpperCAmelCase=2 , _UpperCAmelCase=0 , **_UpperCAmelCase , ): '''simple docstring''' __A : int = vocab_size __A : Optional[int] = emb_dim __A : Any = n_layers __A : Optional[Any] = n_heads __A : Optional[Any] = dropout __A : Optional[int] = attention_dropout __A : List[str] = gelu_activation __A : Any = sinusoidal_embeddings __A : List[Any] = causal __A : Any = asm __A : int = n_langs __A : List[Any] = use_lang_emb __A : Tuple = layer_norm_eps __A : Any = bos_index __A : Any = eos_index __A : Optional[Any] = pad_index __A : int = unk_index __A : List[Any] = mask_index __A : List[str] = is_encoder __A : Dict = max_position_embeddings __A : Any = embed_init_std __A : Tuple = init_std __A : Any = summary_type __A : Dict = summary_use_proj __A : Dict = summary_activation __A : Dict = summary_proj_to_labels __A : List[Any] = summary_first_dropout __A : str = start_n_top __A : Any = end_n_top __A : Tuple = mask_token_id __A : Tuple = lang_id if "n_words" in kwargs: __A : Dict = kwargs['n_words'] super().__init__(pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , **_UpperCAmelCase) class SCREAMING_SNAKE_CASE (a__ ): @property def SCREAMING_SNAKE_CASE ( self): '''simple docstring''' if self.task == "multiple-choice": __A : List[Any] = {0: 'batch', 1: 'choice', 2: 'sequence'} else: __A : List[str] = {0: 'batch', 1: 'sequence'} return OrderedDict( [ ('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ('token_type_ids', dynamic_axis), ])
8
'''simple docstring''' from __future__ import annotations from collections import deque from collections.abc import Iterator from dataclasses import dataclass @dataclass class __lowerCAmelCase : '''simple docstring''' a_ = 42 a_ = 42 class __lowerCAmelCase : '''simple docstring''' def __init__( self : Union[str, Any] ,_a : int ): '''simple docstring''' A_ : list[list[Edge]] = [[] for _ in range(_a )] A_ : List[Any] = size def __getitem__( self : int ,_a : int ): '''simple docstring''' return iter(self._graph[vertex] ) @property def _a ( self : str ): '''simple docstring''' return self._size def _a ( self : str ,_a : int ,_a : int ,_a : int ): '''simple docstring''' if weight not in (0, 1): raise ValueError("""Edge weight must be either 0 or 1.""" ) if to_vertex < 0 or to_vertex >= self.size: raise ValueError("""Vertex indexes must be in [0; size).""" ) self._graph[from_vertex].append(Edge(_a ,_a ) ) def _a ( self : Dict ,_a : int ,_a : int ): '''simple docstring''' A_ : Tuple = deque([start_vertex] ) A_ : list[int | None] = [None] * self.size A_ : Union[str, Any] = 0 while queue: A_ : List[Any] = queue.popleft() A_ : Tuple = distances[current_vertex] if current_distance is None: continue for edge in self[current_vertex]: A_ : Union[str, Any] = current_distance + edge.weight A_ : Optional[Any] = distances[edge.destination_vertex] if ( isinstance(_a ,_a ) and new_distance >= dest_vertex_distance ): continue A_ : Tuple = new_distance if edge.weight == 0: queue.appendleft(edge.destination_vertex ) else: queue.append(edge.destination_vertex ) if distances[finish_vertex] is None: raise ValueError("""No path from start_vertex to finish_vertex.""" ) return distances[finish_vertex] if __name__ == "__main__": import doctest doctest.testmod()
665
0
from dataclasses import dataclass, field from typing import Tuple from ..utils import cached_property, is_tf_available, logging, requires_backends from .benchmark_args_utils import BenchmarkArguments if is_tf_available(): import tensorflow as tf SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) @dataclass class __lowerCAmelCase ( UpperCAmelCase_ ): """simple docstring""" A__ : List[Any] = [ "no_inference", "no_cuda", "no_tpu", "no_speed", "no_memory", "no_env_print", "no_multi_process", ] def __init__( self : Union[str, Any] , **_snake_case : List[str] ): """simple docstring""" for deprecated_arg in self.deprecated_args: if deprecated_arg in kwargs: A__ = deprecated_arg[3:] A__ = not kwargs.pop(_snake_case ) logger.warning( F'''{deprecated_arg} is depreciated. Please use --no-{positive_arg} or''' F''' {positive_arg}={kwargs[positive_arg]}''' ) A__ = kwargs.pop('tpu_name' , self.tpu_name ) A__ = kwargs.pop('device_idx' , self.device_idx ) A__ = kwargs.pop('eager_mode' , self.eager_mode ) A__ = kwargs.pop('use_xla' , self.use_xla ) super().__init__(**_snake_case ) A__ : str = field( default=UpperCAmelCase_ , metadata={"help": "Name of TPU"} , ) A__ : int = field( default=0 , metadata={"help": "CPU / GPU device index. Defaults to 0."} , ) A__ : bool = field(default=UpperCAmelCase_ , metadata={"help": "Benchmark models in eager model."} ) A__ : bool = field( default=UpperCAmelCase_ , metadata={ "help": "Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`." } , ) @cached_property def _a ( self : Dict ): """simple docstring""" requires_backends(self , ['tf'] ) A__ = None if self.tpu: try: if self.tpu_name: A__ = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name ) else: A__ = tf.distribute.cluster_resolver.TPUClusterResolver() except ValueError: A__ = None return tpu @cached_property def _a ( self : str ): """simple docstring""" requires_backends(self , ['tf'] ) if self.is_tpu: tf.config.experimental_connect_to_cluster(self._setup_tpu ) tf.tpu.experimental.initialize_tpu_system(self._setup_tpu ) A__ = tf.distribute.TPUStrategy(self._setup_tpu ) else: # currently no multi gpu is allowed if self.is_gpu: # TODO: Currently only single GPU is supported tf.config.set_visible_devices(self.gpu_list[self.device_idx] , 'GPU' ) A__ = tf.distribute.OneDeviceStrategy(device=F'''/gpu:{self.device_idx}''' ) else: tf.config.set_visible_devices([] , 'GPU' ) # disable GPU A__ = tf.distribute.OneDeviceStrategy(device=F'''/cpu:{self.device_idx}''' ) return strategy @property def _a ( self : Optional[Any] ): """simple docstring""" requires_backends(self , ['tf'] ) return self._setup_tpu is not None @property def _a ( self : Tuple ): """simple docstring""" requires_backends(self , ['tf'] ) return self._setup_strategy @property def _a ( self : Any ): """simple docstring""" requires_backends(self , ['tf'] ) return tf.config.list_physical_devices('GPU' ) @property def _a ( self : Dict ): """simple docstring""" requires_backends(self , ['tf'] ) if self.cuda: return len(self.gpu_list ) return 0 @property def _a ( self : List[Any] ): """simple docstring""" return self.n_gpu > 0
9
'''simple docstring''' def lowerCamelCase ( lowerCamelCase : int = 10**9): A_ : Optional[int] = 1 A_ : int = 2 A_ : List[Any] = 0 A_ : Optional[Any] = 0 A_ : str = 0 while perimeter <= max_perimeter: perimeters_sum += perimeter prev_value += 2 * value value += prev_value A_ : Optional[Any] = 2 * value + 2 if i % 2 == 0 else 2 * value - 2 i += 1 return perimeters_sum if __name__ == "__main__": print(f"""{solution() = }""")
665
0
def _snake_case ( __snake_case = 10**12 ): _UpperCamelCase = 1 _UpperCamelCase = 0 _UpperCamelCase = 1 _UpperCamelCase = 1 while numerator <= 2 * min_total - 1: prev_numerator += 2 * numerator numerator += 2 * prev_numerator prev_denominator += 2 * denominator denominator += 2 * prev_denominator return (denominator + 1) // 2 if __name__ == "__main__": print(f'{solution() = }')
10
'''simple docstring''' # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from argparse import ArgumentParser from accelerate.commands.config import get_config_parser from accelerate.commands.env import env_command_parser from accelerate.commands.launch import launch_command_parser from accelerate.commands.test import test_command_parser from accelerate.commands.tpu import tpu_command_parser def lowerCamelCase ( ): A_ : Optional[int] = ArgumentParser("""Accelerate CLI tool""" , usage="""accelerate <command> [<args>]""" , allow_abbrev=lowerCamelCase) A_ : Optional[int] = parser.add_subparsers(help="""accelerate command helpers""") # Register commands get_config_parser(subparsers=lowerCamelCase) env_command_parser(subparsers=lowerCamelCase) launch_command_parser(subparsers=lowerCamelCase) tpu_command_parser(subparsers=lowerCamelCase) test_command_parser(subparsers=lowerCamelCase) # Let's go A_ : Dict = parser.parse_args() if not hasattr(lowerCamelCase , """func"""): parser.print_help() exit(1) # Run args.func(lowerCamelCase) if __name__ == "__main__": main()
665
0
'''simple docstring''' import argparse import json import os import tensorstore as ts import torch from flax import serialization from flax.traverse_util import flatten_dict, unflatten_dict from tensorflow.io import gfile from transformers.modeling_utils import dtype_byte_size from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import ( rename_keys, ) from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME from transformers.utils.hub import convert_file_size_to_int def lowerCAmelCase (__A , __A): """simple docstring""" if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3: # expert layer _a = flax_key_tuple[:-1] + ('''weight''',) _a = torch.permute(__A , (0, 2, 1)) elif flax_key_tuple[-1] == "kernel" and ".".join(__A): # linear layer _a = flax_key_tuple[:-1] + ('''weight''',) _a = flax_tensor.T elif flax_key_tuple[-1] in ["scale", "embedding"]: _a = flax_key_tuple[:-1] + ('''weight''',) return flax_key_tuple, flax_tensor def lowerCAmelCase (__A , __A , __A): """simple docstring""" if "metadata" in layer: _a = layer.split('''metadata''') _a = ''''''.join(split_layer[0])[:-1] _a = [tuple(('''metadata''' + split_layer[1]).split('''/'''))] elif "kvstore" in layer: _a = layer.split('''kvstore''') _a = ''''''.join(split_layer[0])[:-1] _a = [tuple(('''kvstore''' + split_layer[1]).split('''/'''))] else: _a = layer.split('''/''') _a = '''/'''.join(split_layer[:-1]) _a = (split_layer[-1],) if "kvstore/path" in layer: _a = F'''{switch_checkpoint_path}/{checkpoint_info[layer]}''' elif "kvstore/driver" in layer: _a = '''file''' else: _a = checkpoint_info[layer] return curr_real_layer_name, split_layer, content def lowerCAmelCase (__A , __A): """simple docstring""" _a = rename_keys(__A) _a = {} for k, v in current_block.items(): _a = v _a = new_current_block torch.save(__A , __A) def lowerCAmelCase (__A , __A , __A , __A , __A = WEIGHTS_NAME): """simple docstring""" _a = convert_file_size_to_int(__A) _a = [] _a = {} _a = 0 _a = 0 os.makedirs(__A , exist_ok=__A) with gfile.GFile(switch_checkpoint_path + '''/checkpoint''' , '''rb''') as fp: _a = serialization.msgpack_restore(fp.read())['''optimizer''']['''target'''] _a = flatten_dict(__A , sep='''/''') _a = {} for layer in checkpoint_info.keys(): _a , _a , _a = get_key_and_tensorstore_dict( __A , __A , __A) if curr_real_layer_name in all_layers: _a = content else: _a = {split_layer[-1]: content} for key in all_layers.keys(): # open tensorstore file _a = ts.open(unflatten_dict(all_layers[key])).result().read().result() _a = torch.tensor(__A) _a = raw_weights.numel() * dtype_byte_size(raw_weights.dtype) # use the renaming pattern from the small conversion scripts _a , _a = rename_base_flax_keys(tuple(key.split('''/''')) , __A) _a = '''/'''.join(__A) # If this weight is going to tip up over the maximal size, we split. if current_block_size + weight_size > max_shard_size: _a = os.path.join( __A , weights_name.replace('''.bin''' , F'''-{len(__A)+1:05d}-of-???.bin''')) rename_and_save_block(__A , __A) sharded_state_dicts.append(current_block.keys()) del current_block _a = {} _a = 0 _a = raw_weights.to(getattr(__A , __A)) current_block_size += weight_size total_size += weight_size # Add the last block _a = os.path.join(__A , weights_name.replace('''.bin''' , F'''-{len(__A)+1:05d}-of-???.bin''')) rename_and_save_block(__A , __A) sharded_state_dicts.append(current_block.keys()) # If we only have one shard, we return it if len(__A) == 1: return {weights_name: sharded_state_dicts[0]}, None # Otherwise, let's build the index _a = {} _a = {} for idx, shard in enumerate(__A): _a = weights_name.replace( '''.bin''' , F'''-{idx+1:05d}-of-{len(__A):05d}.bin''') # len(sharded_state_dicts):05d} _a = os.path.join(__A , weights_name.replace('''.bin''' , F'''-{idx+1:05d}-of-???.bin''')) os.rename(__A , os.path.join(__A , __A)) _a = shard for key in shard: _a = shard_file # Add the metadata _a = {'''total_size''': total_size} _a = {'''metadata''': metadata, '''weight_map''': weight_map} with open(os.path.join(__A , __A) , '''w''' , encoding='''utf-8''') as f: _a = json.dumps(__A , indent=2 , sort_keys=__A) + '''\n''' f.write(__A) return metadata, index if __name__ == "__main__": lowercase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( "--switch_t5x_checkpoint_path", default="/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600", type=str, required=False, help="Path to a directory containing a folder per layer. Follows the original Google format.", ) parser.add_argument("--max_shard_size", default="10GB", required=False, help="Max shard size") parser.add_argument("--dtype", default="bfloat16", type=str, required=False, help="dtype of the saved model") parser.add_argument( "--pytorch_dump_folder_path", default="/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted", type=str, required=False, help="Path to the output pytorch model.", ) lowercase_ = parser.parse_args() shard_on_the_fly( args.switch_tax_checkpoint_path, args.pytorch_dump_folder_path, args.max_shard_size, args.dtype, ) def lowerCAmelCase (): """simple docstring""" from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer _a = SwitchTransformersConfig.from_pretrained('''google/switch-base-8''') config.save_pretrained('''/home/arthur_huggingface_co/transformers/switch_converted''') _a = SwitchTransformersForConditionalGeneration.from_pretrained( '''/home/arthur_huggingface_co/transformers/switch_converted''' , device_map='''auto''') _a = TaTokenizer.from_pretrained('''t5-small''') _a = '''A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.''' _a = tokenizer(__A , return_tensors='''pt''').input_ids _a = model.generate(__A , decoder_start_token_id=0) print(tokenizer.decode(out[0]))
11
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __magic_name__ = { 'configuration_altclip': [ 'ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP', 'AltCLIPConfig', 'AltCLIPTextConfig', 'AltCLIPVisionConfig', ], 'processing_altclip': ['AltCLIPProcessor'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __magic_name__ = [ 'ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST', 'AltCLIPPreTrainedModel', 'AltCLIPModel', 'AltCLIPTextModel', 'AltCLIPVisionModel', ] if TYPE_CHECKING: from .configuration_altclip import ( ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, AltCLIPConfig, AltCLIPTextConfig, AltCLIPVisionConfig, ) from .processing_altclip import AltCLIPProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_altclip import ( ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST, AltCLIPModel, AltCLIPPreTrainedModel, AltCLIPTextModel, AltCLIPVisionModel, ) else: import sys __magic_name__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
665
0
import argparse lowerCamelCase__ : int = """docs/source/_static/js/custom.js""" def UpperCamelCase ( lowercase_ ) -> Dict: '''simple docstring''' with open(lowercase_ , encoding="""utf-8""" , newline="""\n""" ) as f: lowercase__ : Optional[int] = f.readlines() lowercase__ : str = 0 # First let's put the right version while not lines[index].startswith("""const stableVersion =""" ): index += 1 lowercase__ : Union[str, Any] = F'const stableVersion = "v{version}"\n' # Then update the dictionary while not lines[index].startswith("""const versionMapping = {""" ): index += 1 # We go until the end while not lines[index].startswith("""}""" ): index += 1 # We add the new version at the end lines[index - 1] += F' "v{version}": "v{version}",\n' with open(lowercase_ , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f: f.writelines(lowercase_ ) if __name__ == "__main__": lowerCamelCase__ : int = argparse.ArgumentParser() parser.add_argument("""--version""", help="""Release version.""") lowerCamelCase__ : Any = parser.parse_args() update_custom_js(args.version)
12
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available __magic_name__ = {'configuration_yolos': ['YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP', 'YolosConfig', 'YolosOnnxConfig']} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __magic_name__ = ['YolosFeatureExtractor'] __magic_name__ = ['YolosImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __magic_name__ = [ 'YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST', 'YolosForObjectDetection', 'YolosModel', 'YolosPreTrainedModel', ] if TYPE_CHECKING: from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_yolos import YolosFeatureExtractor from .image_processing_yolos import YolosImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_yolos import ( YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST, YolosForObjectDetection, YolosModel, YolosPreTrainedModel, ) else: import sys __magic_name__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
665
0
'''simple docstring''' from collections.abc import Sequence def UpperCAmelCase__ ( UpperCAmelCase_ : Sequence[float] , UpperCAmelCase_ : bool = False ) -> float: if not arr: return 0 __lowerCamelCase : str = 0 if allow_empty_subarrays else float('-inf' ) __lowerCamelCase : Optional[Any] = 0.0 for num in arr: __lowerCamelCase : Optional[Any] = max(0 if allow_empty_subarrays else num , curr_sum + num ) __lowerCamelCase : str = max(UpperCAmelCase_ , UpperCAmelCase_ ) return max_sum if __name__ == "__main__": from doctest import testmod testmod() A__ : Tuple = [-2, 1, -3, 4, -1, 2, 1, -5, 4] print(f'''{max_subarray_sum(nums) = }''')
13
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) __magic_name__ = { 'configuration_deberta': ['DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DebertaConfig', 'DebertaOnnxConfig'], 'tokenization_deberta': ['DebertaTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __magic_name__ = ['DebertaTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __magic_name__ = [ 'DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST', 'DebertaForMaskedLM', 'DebertaForQuestionAnswering', 'DebertaForSequenceClassification', 'DebertaForTokenClassification', 'DebertaModel', 'DebertaPreTrainedModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __magic_name__ = [ 'TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFDebertaForMaskedLM', 'TFDebertaForQuestionAnswering', 'TFDebertaForSequenceClassification', 'TFDebertaForTokenClassification', 'TFDebertaModel', 'TFDebertaPreTrainedModel', ] if TYPE_CHECKING: from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig from .tokenization_deberta import DebertaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_deberta_fast import DebertaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_deberta import ( DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, DebertaForMaskedLM, DebertaForQuestionAnswering, DebertaForSequenceClassification, DebertaForTokenClassification, DebertaModel, DebertaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_deberta import ( TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, TFDebertaForMaskedLM, TFDebertaForQuestionAnswering, TFDebertaForSequenceClassification, TFDebertaForTokenClassification, TFDebertaModel, TFDebertaPreTrainedModel, ) else: import sys __magic_name__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
665
0
def __UpperCAmelCase ( __a : float ,__a : list[float] ) -> float: """simple docstring""" if discount_rate < 0: raise ValueError('''Discount rate cannot be negative''' ) if not cash_flows: raise ValueError('''Cash flows list cannot be empty''' ) _a : List[str] = sum( cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(__a ) ) return round(__a ,ndigits=2 ) if __name__ == "__main__": import doctest doctest.testmod()
14
'''simple docstring''' def lowerCamelCase ( lowerCamelCase : Tuple): A_ : str = [0] * len(lowerCamelCase) A_ : Union[str, Any] = [] A_ : Union[str, Any] = [] A_ : Tuple = 0 for values in graph.values(): for i in values: indegree[i] += 1 for i in range(len(lowerCamelCase)): if indegree[i] == 0: queue.append(lowerCamelCase) while queue: A_ : Any = queue.pop(0) cnt += 1 topo.append(lowerCamelCase) for x in graph[vertex]: indegree[x] -= 1 if indegree[x] == 0: queue.append(lowerCamelCase) if cnt != len(lowerCamelCase): print("""Cycle exists""") else: print(lowerCamelCase) # Adjacency List of Graph __magic_name__ = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []} topological_sort(graph)
665
0
import json import os from functools import lru_cache from typing import TYPE_CHECKING, List, Optional, Tuple import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation A : List[str] = logging.get_logger(__name__) A : List[str] = { 'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_config_file': 'tokenizer_config.json', } A : str = { 'vocab_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'}, 'merges_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'}, 'tokenizer_config_file': { 'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json' }, } A : List[Any] = {'facebook/blenderbot-3B': 1_2_8} @lru_cache() # Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode def UpperCamelCase ( ) -> Any: """simple docstring""" lowercase__ = ( list(range(ord("""!""" ) , ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ) , ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ) , ord("""ÿ""" ) + 1 ) ) ) lowercase__ = bs[:] lowercase__ = 0 for b in range(2**8 ): if b not in bs: bs.append(__magic_name__ ) cs.append(2**8 + n ) n += 1 lowercase__ = [chr(__magic_name__ ) for n in cs] return dict(zip(__magic_name__ , __magic_name__ ) ) def UpperCamelCase ( __magic_name__ : Any ) -> List[str]: """simple docstring""" lowercase__ = set() lowercase__ = word[0] for char in word[1:]: pairs.add((prev_char, char) ) lowercase__ = char return pairs class A ( UpperCAmelCase__ ): '''simple docstring''' A__ = VOCAB_FILES_NAMES A__ = PRETRAINED_VOCAB_FILES_MAP A__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES A__ = ['''input_ids''', '''attention_mask'''] def __init__(self : List[Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : List[str] , _UpperCAmelCase : str="replace" , _UpperCAmelCase : int="<s>" , _UpperCAmelCase : List[Any]="</s>" , _UpperCAmelCase : List[str]="</s>" , _UpperCAmelCase : Optional[int]="<s>" , _UpperCAmelCase : Tuple="<unk>" , _UpperCAmelCase : str="<pad>" , _UpperCAmelCase : Optional[Any]="<mask>" , _UpperCAmelCase : List[Any]=False , **_UpperCAmelCase : List[Any] , ) -> List[Any]: """simple docstring""" lowercase__ = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else bos_token lowercase__ = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else eos_token lowercase__ = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else sep_token lowercase__ = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else cls_token lowercase__ = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else unk_token lowercase__ = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else pad_token # Mask token behave like a normal word, i.e. include the space before it lowercase__ = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else mask_token super().__init__( errors=_UpperCAmelCase , bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase , **_UpperCAmelCase , ) with open(_UpperCAmelCase , encoding="""utf-8""" ) as vocab_handle: lowercase__ = json.load(_UpperCAmelCase ) lowercase__ = {v: k for k, v in self.encoder.items()} lowercase__ = errors # how to handle errors in decoding lowercase__ = bytes_to_unicode() lowercase__ = {v: k for k, v in self.byte_encoder.items()} with open(_UpperCAmelCase , encoding="""utf-8""" ) as merges_handle: lowercase__ = merges_handle.read().split("""\n""" )[1:-1] lowercase__ = [tuple(merge.split() ) for merge in bpe_merges] lowercase__ = dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase ) ) ) ) lowercase__ = {} lowercase__ = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions lowercase__ = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" ) @property # Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot def lowerCamelCase__ (self : Tuple ) -> Tuple: """simple docstring""" return len(self.encoder ) def lowerCamelCase__ (self : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" return dict(self.encoder , **self.added_tokens_encoder ) def lowerCamelCase__ (self : List[str] , _UpperCAmelCase : Union[str, Any] ) -> List[Any]: """simple docstring""" if token in self.cache: return self.cache[token] lowercase__ = tuple(_UpperCAmelCase ) lowercase__ = get_pairs(_UpperCAmelCase ) if not pairs: return token while True: lowercase__ = min(_UpperCAmelCase , key=lambda _UpperCAmelCase : self.bpe_ranks.get(_UpperCAmelCase , float("""inf""" ) ) ) if bigram not in self.bpe_ranks: break lowercase__ , lowercase__ = bigram lowercase__ = [] lowercase__ = 0 while i < len(_UpperCAmelCase ): try: lowercase__ = word.index(_UpperCAmelCase , _UpperCAmelCase ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) lowercase__ = j if word[i] == first and i < len(_UpperCAmelCase ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 lowercase__ = tuple(_UpperCAmelCase ) lowercase__ = new_word if len(_UpperCAmelCase ) == 1: break else: lowercase__ = get_pairs(_UpperCAmelCase ) lowercase__ = """ """.join(_UpperCAmelCase ) lowercase__ = word return word def lowerCamelCase__ (self : Optional[int] , _UpperCAmelCase : List[str] ) -> int: """simple docstring""" lowercase__ = [] for token in re.findall(self.pat , _UpperCAmelCase ): lowercase__ = """""".join( self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_UpperCAmelCase ).split(""" """ ) ) return bpe_tokens def lowerCamelCase__ (self : Optional[int] , _UpperCAmelCase : Optional[int] ) -> int: """simple docstring""" return self.encoder.get(_UpperCAmelCase , self.encoder.get(self.unk_token ) ) def lowerCamelCase__ (self : Optional[Any] , _UpperCAmelCase : List[Any] ) -> int: """simple docstring""" return self.decoder.get(_UpperCAmelCase ) def lowerCamelCase__ (self : str , _UpperCAmelCase : Any ) -> Optional[Any]: """simple docstring""" lowercase__ = """""".join(_UpperCAmelCase ) lowercase__ = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" , errors=self.errors ) return text def lowerCamelCase__ (self : Dict , _UpperCAmelCase : str , _UpperCAmelCase : Optional[str] = None ) -> Tuple[str]: """simple docstring""" if not os.path.isdir(_UpperCAmelCase ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return lowercase__ = os.path.join( _UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) lowercase__ = os.path.join( _UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] ) with open(_UpperCAmelCase , """w""" , encoding="""utf-8""" ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=_UpperCAmelCase , ensure_ascii=_UpperCAmelCase ) + """\n""" ) lowercase__ = 0 with open(_UpperCAmelCase , """w""" , encoding="""utf-8""" ) as writer: writer.write("""#version: 0.2\n""" ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _UpperCAmelCase : kv[1] ): if index != token_index: logger.warning( f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.''' """ Please check that the tokenizer is not corrupted!""" ) lowercase__ = token_index writer.write(""" """.join(_UpperCAmelCase ) + """\n""" ) index += 1 return vocab_file, merge_file def lowerCamelCase__ (self : Tuple , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None , _UpperCAmelCase : bool = False ) -> List[int]: """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_UpperCAmelCase , token_ids_a=_UpperCAmelCase , already_has_special_tokens=_UpperCAmelCase ) if token_ids_a is None: return [1] + ([0] * len(_UpperCAmelCase )) + [1] return [1] + ([0] * len(_UpperCAmelCase )) + [1, 1] + ([0] * len(_UpperCAmelCase )) + [1] def lowerCamelCase__ (self : Union[str, Any] , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None ) -> List[int]: """simple docstring""" lowercase__ = [self.sep_token_id] lowercase__ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def lowerCamelCase__ (self : Optional[int] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Any=False , **_UpperCAmelCase : Tuple ) -> Any: """simple docstring""" lowercase__ = kwargs.pop("""add_prefix_space""" , self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(_UpperCAmelCase ) > 0 and not text[0].isspace()): lowercase__ = """ """ + text return (text, kwargs) def lowerCamelCase__ (self : Union[str, Any] , _UpperCAmelCase : List[int] , _UpperCAmelCase : Optional[List[int]] = None ) -> Any: """simple docstring""" return token_ids_a + [self.eos_token_id] def lowerCamelCase__ (self : Optional[Any] , _UpperCAmelCase : "Conversation" ) -> List[int]: """simple docstring""" lowercase__ = [] for is_user, text in conversation.iter_texts(): if is_user: # We need to space prefix as it's being done within blenderbot inputs.append(""" """ + text ) else: # Generated responses should contain them already. inputs.append(_UpperCAmelCase ) lowercase__ = """ """.join(_UpperCAmelCase ) lowercase__ = self.encode(_UpperCAmelCase ) if len(_UpperCAmelCase ) > self.model_max_length: lowercase__ = input_ids[-self.model_max_length :] logger.warning(f'''Trimmed input from conversation as it was longer than {self.model_max_length} tokens.''' ) return input_ids
15
'''simple docstring''' import unittest from parameterized import parameterized from transformers import LlamaConfig, is_torch_available, set_seed from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer class __lowerCAmelCase : '''simple docstring''' def __init__( self : Optional[int] ,_a : List[Any] ,_a : Dict=13 ,_a : List[str]=7 ,_a : Dict=True ,_a : List[Any]=True ,_a : Dict=False ,_a : Optional[int]=True ,_a : List[Any]=99 ,_a : Any=32 ,_a : Optional[int]=5 ,_a : List[Any]=4 ,_a : int=37 ,_a : List[Any]="gelu" ,_a : List[str]=0.1 ,_a : Union[str, Any]=0.1 ,_a : Any=512 ,_a : int=16 ,_a : Optional[int]=2 ,_a : Any=0.02 ,_a : Any=3 ,_a : Any=4 ,_a : List[str]=None ,): '''simple docstring''' A_ : List[str] = parent A_ : Any = batch_size A_ : Tuple = seq_length A_ : List[str] = is_training A_ : Tuple = use_input_mask A_ : Dict = use_token_type_ids A_ : List[Any] = use_labels A_ : Union[str, Any] = vocab_size A_ : Any = hidden_size A_ : str = num_hidden_layers A_ : Optional[Any] = num_attention_heads A_ : str = intermediate_size A_ : Tuple = hidden_act A_ : Any = hidden_dropout_prob A_ : Any = attention_probs_dropout_prob A_ : List[str] = max_position_embeddings A_ : int = type_vocab_size A_ : Union[str, Any] = type_sequence_label_size A_ : Any = initializer_range A_ : List[Any] = num_labels A_ : Optional[Any] = num_choices A_ : List[Any] = scope def _a ( self : Optional[int] ): '''simple docstring''' A_ : str = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) A_ : int = None if self.use_input_mask: A_ : List[str] = random_attention_mask([self.batch_size, self.seq_length] ) A_ : Dict = None if self.use_token_type_ids: A_ : Tuple = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size ) A_ : str = None A_ : Any = None A_ : str = None if self.use_labels: A_ : Dict = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) A_ : Any = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels ) A_ : Optional[int] = ids_tensor([self.batch_size] ,self.num_choices ) A_ : str = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def _a ( self : Optional[Any] ): '''simple docstring''' return LlamaConfig( vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=_a ,initializer_range=self.initializer_range ,) def _a ( self : Union[str, Any] ,_a : Optional[Any] ,_a : Optional[Any] ,_a : Any ,_a : Any ,_a : Optional[Any] ,_a : Optional[Any] ,_a : Tuple ): '''simple docstring''' A_ : Any = LlamaModel(config=_a ) model.to(_a ) model.eval() A_ : Optional[Any] = model(_a ,attention_mask=_a ) A_ : Optional[int] = model(_a ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def _a ( self : Optional[int] ,_a : int ,_a : List[str] ,_a : Any ,_a : Any ,_a : Dict ,_a : List[str] ,_a : Optional[int] ,_a : Any ,_a : List[str] ,): '''simple docstring''' A_ : List[str] = True A_ : Union[str, Any] = LlamaModel(_a ) model.to(_a ) model.eval() A_ : Tuple = model( _a ,attention_mask=_a ,encoder_hidden_states=_a ,encoder_attention_mask=_a ,) A_ : List[Any] = model( _a ,attention_mask=_a ,encoder_hidden_states=_a ,) A_ : int = model(_a ,attention_mask=_a ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def _a ( self : Any ,_a : Any ,_a : Optional[int] ,_a : List[Any] ,_a : List[Any] ,_a : Dict ,_a : Tuple ,_a : Optional[int] ,_a : List[Any] ,_a : Union[str, Any] ,): '''simple docstring''' A_ : List[Any] = LlamaForCausalLM(config=_a ) model.to(_a ) model.eval() A_ : Dict = model(_a ,attention_mask=_a ,labels=_a ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) ) def _a ( self : str ,_a : List[Any] ,_a : Dict ,_a : str ,_a : Tuple ,_a : Tuple ,_a : Tuple ,_a : Optional[Any] ,_a : Dict ,_a : Union[str, Any] ,): '''simple docstring''' A_ : Optional[Any] = True A_ : Any = True A_ : Tuple = LlamaForCausalLM(config=_a ) model.to(_a ) model.eval() # first forward pass A_ : Optional[int] = model( _a ,attention_mask=_a ,encoder_hidden_states=_a ,encoder_attention_mask=_a ,use_cache=_a ,) A_ : Tuple = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids A_ : int = ids_tensor((self.batch_size, 3) ,config.vocab_size ) A_ : List[Any] = ids_tensor((self.batch_size, 3) ,vocab_size=2 ) # append to next input_ids and A_ : Tuple = torch.cat([input_ids, next_tokens] ,dim=-1 ) A_ : int = torch.cat([input_mask, next_mask] ,dim=-1 ) A_ : List[str] = model( _a ,attention_mask=_a ,encoder_hidden_states=_a ,encoder_attention_mask=_a ,output_hidden_states=_a ,)["""hidden_states"""][0] A_ : Any = model( _a ,attention_mask=_a ,encoder_hidden_states=_a ,encoder_attention_mask=_a ,past_key_values=_a ,output_hidden_states=_a ,)["""hidden_states"""][0] # select random slice A_ : List[str] = ids_tensor((1,) ,output_from_past.shape[-1] ).item() A_ : str = output_from_no_past[:, -3:, random_slice_idx].detach() A_ : int = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(_a ,_a ,atol=1e-3 ) ) def _a ( self : Optional[Any] ): '''simple docstring''' A_ : int = self.prepare_config_and_inputs() ( ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ) : Any = config_and_inputs A_ : int = {"""input_ids""": input_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' a_ = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else () a_ = (LlamaForCausalLM,) if is_torch_available() else () a_ = ( { """feature-extraction""": LlamaModel, """text-classification""": LlamaForSequenceClassification, """text-generation""": LlamaForCausalLM, """zero-shot""": LlamaForSequenceClassification, } if is_torch_available() else {} ) a_ = False a_ = False def _a ( self : List[Any] ): '''simple docstring''' A_ : Union[str, Any] = LlamaModelTester(self ) A_ : List[str] = ConfigTester(self ,config_class=_a ,hidden_size=37 ) def _a ( self : Dict ): '''simple docstring''' self.config_tester.run_common_tests() def _a ( self : Optional[Any] ): '''simple docstring''' A_ : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_a ) def _a ( self : Optional[Any] ): '''simple docstring''' A_ : int = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: A_ : Dict = type self.model_tester.create_and_check_model(*_a ) def _a ( self : List[Any] ): '''simple docstring''' A_ , A_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() A_ : List[str] = 3 A_ : Any = input_dict["""input_ids"""] A_ : Union[str, Any] = input_ids.ne(1 ).to(_a ) A_ : Union[str, Any] = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size ) A_ : List[Any] = LlamaForSequenceClassification(_a ) model.to(_a ) model.eval() A_ : int = model(_a ,attention_mask=_a ,labels=_a ) self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) ) def _a ( self : Dict ): '''simple docstring''' A_ , A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() A_ : str = 3 A_ : Union[str, Any] = """single_label_classification""" A_ : Union[str, Any] = input_dict["""input_ids"""] A_ : List[Any] = input_ids.ne(1 ).to(_a ) A_ : Dict = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size ) A_ : List[Any] = LlamaForSequenceClassification(_a ) model.to(_a ) model.eval() A_ : List[str] = model(_a ,attention_mask=_a ,labels=_a ) self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) ) def _a ( self : Optional[Any] ): '''simple docstring''' A_ , A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() A_ : Dict = 3 A_ : Dict = """multi_label_classification""" A_ : Any = input_dict["""input_ids"""] A_ : Optional[Any] = input_ids.ne(1 ).to(_a ) A_ : List[str] = ids_tensor( [self.model_tester.batch_size, config.num_labels] ,self.model_tester.type_sequence_label_size ).to(torch.float ) A_ : Optional[int] = LlamaForSequenceClassification(_a ) model.to(_a ) model.eval() A_ : Any = model(_a ,attention_mask=_a ,labels=_a ) self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) ) @unittest.skip("""LLaMA buffers include complex numbers, which breaks this test""" ) def _a ( self : Any ): '''simple docstring''' pass @parameterized.expand([("""linear""",), ("""dynamic""",)] ) def _a ( self : Optional[Any] ,_a : List[Any] ): '''simple docstring''' A_ , A_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() A_ : Tuple = ids_tensor([1, 10] ,config.vocab_size ) A_ : Union[str, Any] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] ,config.vocab_size ) set_seed(42 ) # Fixed seed at init time so the two models get the same random weights A_ : int = LlamaModel(_a ) original_model.to(_a ) original_model.eval() A_ : Tuple = original_model(_a ).last_hidden_state A_ : Union[str, Any] = original_model(_a ).last_hidden_state set_seed(42 ) # Fixed seed at init time so the two models get the same random weights A_ : Tuple = {"""type""": scaling_type, """factor""": 10.0} A_ : int = LlamaModel(_a ) scaled_model.to(_a ) scaled_model.eval() A_ : List[Any] = scaled_model(_a ).last_hidden_state A_ : Any = scaled_model(_a ).last_hidden_state # Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original # maximum sequence length, so the outputs for the short input should match. if scaling_type == "dynamic": self.assertTrue(torch.allclose(_a ,_a ,atol=1e-5 ) ) else: self.assertFalse(torch.allclose(_a ,_a ,atol=1e-5 ) ) # The output should be different for long inputs self.assertFalse(torch.allclose(_a ,_a ,atol=1e-5 ) ) @require_torch class __lowerCAmelCase ( unittest.TestCase ): '''simple docstring''' @unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" ) @slow def _a ( self : Tuple ): '''simple docstring''' A_ : Any = [1, 306, 4658, 278, 6593, 310, 2834, 338] A_ : List[str] = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-7b-hf""" ,device_map="""auto""" ) A_ : str = model(torch.tensor([input_ids] ) ) # Expected mean on dim = -1 A_ : Union[str, Any] = torch.tensor([[-6.6550, -4.1227, -4.9859, -3.2406, 0.8262, -3.0033, 1.2964, -3.3699]] ) torch.testing.assert_close(out.mean(-1 ) ,_a ,atol=1e-2 ,rtol=1e-2 ) # slicing logits[0, 0, 0:30] # fmt: off A_ : str = torch.tensor([-12.8281, -7.4453, -0.4639, -8.0625, -7.2500, -8.0000, -6.4883, -7.7695, -7.8438, -7.0312, -6.2188, -7.1328, -1.8496, 1.9961, -8.6250, -6.7227, -12.8281, -6.9492, -7.0742, -7.7852, -7.5820, -7.9062, -6.9375, -7.9805, -8.3438, -8.1562, -8.0469, -7.6250, -7.7422, -7.3398,] ) # fmt: on torch.testing.assert_close(out[0, 0, :30] ,_a ,atol=1e-5 ,rtol=1e-5 ) @unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" ) @slow def _a ( self : str ): '''simple docstring''' A_ : Dict = [1, 306, 4658, 278, 6593, 310, 2834, 338] A_ : Optional[int] = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-13b-hf""" ,device_map="""auto""" ) A_ : Tuple = model(torch.tensor(_a ) ) # Expected mean on dim = -1 A_ : str = torch.tensor([[-2.0622, -1.2794, -1.1638, -0.9788, -1.4603, -1.0238, -1.7893, -1.4411]] ) torch.testing.assert_close(out.mean(-1 ) ,_a ,atol=1e-2 ,rtol=1e-2 ) # slicing logits[0, 0, 0:30] # fmt: off A_ : str = torch.tensor([-8.1406, -8.0547, 2.7461, -1.2344, -0.1448, -1.8262, -1.0020, -1.8154, -1.6895, -1.8516, -2.3574, -0.9277, 3.7598, 6.5742, -1.2998, -0.1177, -8.1406, -2.9688, -2.9199, -3.1699, -3.5254, -2.3555, -2.7988, -3.4141, -2.8262, -4.5195, -3.3379, -3.3164, -2.7832, -3.0273] ) # fmt: on torch.testing.assert_close(out[0, 0, :30] ,_a ,atol=1e-5 ,rtol=1e-5 ) @unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" ) @slow def _a ( self : Union[str, Any] ): '''simple docstring''' A_ : Union[str, Any] = [1, 306, 4658, 278, 6593, 310, 2834, 338] A_ : Optional[int] = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-13b-chat-hf""" ,device_map="""auto""" ) A_ : int = model(torch.tensor(_a ) ) # Expected mean on dim = -1 A_ : Union[str, Any] = torch.tensor([[-0.8562, -1.8520, -0.7551, -0.4162, -1.5161, -1.2038, -2.4823, -2.3254]] ) torch.testing.assert_close(out.mean(-1 ) ,_a ,atol=1e-2 ,rtol=1e-2 ) # slicing logits[0, 0, 0:30] # fmt: off A_ : Optional[int] = torch.tensor([-2.2227, 4.8828, 0.9023, -0.4578, -0.7871, -0.1033, -0.6221, -0.5786, -0.7803, -1.0674, -1.2920, -0.1570, 0.8008, 2.0723, -0.9497, 0.2771, -2.2227, -0.7612, -1.4346, -1.2061, -1.6426, -0.3000, -0.7139, -1.1934, -1.8691, -1.6973, -1.5947, -1.2705, -0.3523, -0.5513] ) # fmt: on torch.testing.assert_close(out.mean(-1 ) ,_a ,atol=1e-2 ,rtol=1e-2 ) @unittest.skip( """Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test""" ) @slow def _a ( self : Optional[Any] ): '''simple docstring''' A_ : Optional[int] = [1, 306, 4658, 278, 6593, 310, 2834, 338] A_ : str = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-70b-hf""" ,device_map="""auto""" ) A_ : Tuple = model(torch.tensor(_a ) ) A_ : Dict = torch.tensor( [[-4.2327, -3.3360, -4.6665, -4.7631, -1.8180, -3.4170, -1.4211, -3.1810]] ,dtype=torch.floataa ) torch.testing.assert_close(out.mean(-1 ) ,_a ,atol=1e-2 ,rtol=1e-2 ) # fmt: off A_ : List[str] = torch.tensor([-9.4922, -3.9551, 1.7998, -5.6758, -5.1055, -5.8984, -4.8320, -6.8086, -6.5391, -5.6172, -5.5820, -5.5352, 1.7881, 3.6289, -6.5117, -3.4785, -9.5000, -6.0352, -6.8125, -6.0195, -6.6836, -5.4727, -6.2812, -6.0391, -7.3398, -7.4297, -7.4844, -6.5820, -5.8789, -5.5312] ) # fmt: on torch.testing.assert_close(out[0, 0, :30] ,_a ,atol=1e-5 ,rtol=1e-5 ) @unittest.skip("""Model is curently gated""" ) @slow def _a ( self : Tuple ): '''simple docstring''' A_ : Union[str, Any] = """Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the \"princi""" A_ : List[str] = """Simply put, the theory of relativity states that """ A_ : Any = LlamaTokenizer.from_pretrained("""meta-llama/Llama-2-13b-chat-hf""" ) A_ : Union[str, Any] = tokenizer.encode(_a ,return_tensors="""pt""" ) A_ : List[str] = LlamaForCausalLM.from_pretrained( """meta-llama/Llama-2-13b-chat-hf""" ,device_map="""sequential""" ,use_safetensors=_a ) # greedy generation outputs A_ : str = model.generate(_a ,max_new_tokens=64 ,top_p=_a ,temperature=1 ,do_sample=_a ) A_ : Optional[Any] = tokenizer.decode(generated_ids[0] ,skip_special_tokens=_a ) self.assertEqual(_a ,_a )
665
0
import re def __a ( A__ : str ): SCREAMING_SNAKE_CASE = re.compile(R"^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$" ) if match := re.search(A__ , A__ ): return match.string == phone return False if __name__ == "__main__": print(indian_phone_validator('+918827897895'))
16
'''simple docstring''' import math_equivalence # From: git+https://github.com/hendrycks/math.git import datasets __magic_name__ = '\\n@article{hendrycksmath2021,\n title={Measuring Mathematical Problem Solving With the MATH Dataset},\n author={Dan Hendrycks\n and Collin Burns\n and Saurav Kadavath\n and Akul Arora\n and Steven Basart\n and Eric Tang\n and Dawn Song\n and Jacob Steinhardt},\n journal={arXiv preprint arXiv:2103.03874},\n year={2021}\n}\n' __magic_name__ = '\\nThis metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.\nIt first canonicalizes the inputs (e.g., converting "1/2" to "\\frac{1}{2}") and then computes accuracy.\n' __magic_name__ = r'\nCalculates accuracy after canonicalizing inputs.\n\nArgs:\n predictions: list of predictions to score. Each prediction\n is a string that contains natural language and LaTex.\n references: list of reference for each prediction. Each\n reference is a string that contains natural language\n and LaTex.\nReturns:\n accuracy: accuracy after canonicalizing inputs\n (e.g., converting "1/2" to "\\frac{1}{2}")\n\nExamples:\n >>> metric = datasets.load_metric("competition_math")\n >>> results = metric.compute(references=["\\frac{1}{2}"], predictions=["1/2"])\n >>> print(results)\n {\'accuracy\': 1.0}\n' @datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __lowerCAmelCase ( datasets.Metric ): '''simple docstring''' def _a ( self : Optional[Any] ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features( { """predictions""": datasets.Value("""string""" ), """references""": datasets.Value("""string""" ), } ) ,homepage="""https://github.com/hendrycks/math""" ,codebase_urls=["""https://github.com/hendrycks/math"""] ,) def _a ( self : List[Any] ,_a : Union[str, Any] ,_a : Optional[int] ): '''simple docstring''' A_ : Union[str, Any] = 0.0 for i, j in zip(_a ,_a ): n_correct += 1.0 if math_equivalence.is_equiv(_a ,_a ) else 0.0 A_ : List[str] = n_correct / len(_a ) return { "accuracy": accuracy, }
665
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) UpperCAmelCase_ : Any = {'''configuration_opt''': ['''OPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''OPTConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ : Dict = [ '''OPT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''OPTForCausalLM''', '''OPTModel''', '''OPTPreTrainedModel''', '''OPTForSequenceClassification''', '''OPTForQuestionAnswering''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ : Optional[Any] = ['''TFOPTForCausalLM''', '''TFOPTModel''', '''TFOPTPreTrainedModel'''] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ : str = [ '''FlaxOPTForCausalLM''', '''FlaxOPTModel''', '''FlaxOPTPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_opt import ( OPT_PRETRAINED_MODEL_ARCHIVE_LIST, OPTForCausalLM, OPTForQuestionAnswering, OPTForSequenceClassification, OPTModel, OPTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel else: import sys UpperCAmelCase_ : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
17
'''simple docstring''' from ....configuration_utils import PretrainedConfig from ....utils import logging __magic_name__ = logging.get_logger(__name__) # TODO: upload to AWS __magic_name__ = { 'yjernite/retribert-base-uncased': ( 'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json' ), } class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' a_ = """retribert""" def __init__( self : int ,_a : Dict=30522 ,_a : List[Any]=768 ,_a : Optional[Any]=8 ,_a : str=12 ,_a : str=3072 ,_a : Tuple="gelu" ,_a : Optional[int]=0.1 ,_a : Dict=0.1 ,_a : List[Any]=512 ,_a : Union[str, Any]=2 ,_a : Tuple=0.02 ,_a : List[str]=1e-12 ,_a : Dict=True ,_a : Tuple=128 ,_a : Optional[int]=0 ,**_a : Tuple ,): '''simple docstring''' super().__init__(pad_token_id=_a ,**_a ) A_ : Dict = vocab_size A_ : int = hidden_size A_ : Union[str, Any] = num_hidden_layers A_ : Union[str, Any] = num_attention_heads A_ : Tuple = hidden_act A_ : int = intermediate_size A_ : Tuple = hidden_dropout_prob A_ : Optional[int] = attention_probs_dropout_prob A_ : int = max_position_embeddings A_ : Any = type_vocab_size A_ : Optional[int] = initializer_range A_ : Dict = layer_norm_eps A_ : str = share_encoders A_ : List[Any] = projection_dim
665
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available _SCREAMING_SNAKE_CASE = { "configuration_maskformer": ["MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "MaskFormerConfig"], "configuration_maskformer_swin": ["MaskFormerSwinConfig"], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE = ["MaskFormerFeatureExtractor"] _SCREAMING_SNAKE_CASE = ["MaskFormerImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE = [ "MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "MaskFormerForInstanceSegmentation", "MaskFormerModel", "MaskFormerPreTrainedModel", ] _SCREAMING_SNAKE_CASE = [ "MaskFormerSwinBackbone", "MaskFormerSwinModel", "MaskFormerSwinPreTrainedModel", ] if TYPE_CHECKING: from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig from .configuration_maskformer_swin import MaskFormerSwinConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_maskformer import MaskFormerFeatureExtractor from .image_processing_maskformer import MaskFormerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_maskformer import ( MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, MaskFormerForInstanceSegmentation, MaskFormerModel, MaskFormerPreTrainedModel, ) from .modeling_maskformer_swin import ( MaskFormerSwinBackbone, MaskFormerSwinModel, MaskFormerSwinPreTrainedModel, ) else: import sys _SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["__file__"], _import_structure)
18
'''simple docstring''' import os import re from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging __magic_name__ = logging.get_logger(__name__) __magic_name__ = {'vocab_file': 'spiece.model'} __magic_name__ = { 'vocab_file': { 'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model', 'google/bigbird-roberta-large': ( 'https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model' ), 'google/bigbird-base-trivia-itc': ( 'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model' ), } } __magic_name__ = { 'google/bigbird-roberta-base': 4_096, 'google/bigbird-roberta-large': 4_096, 'google/bigbird-base-trivia-itc': 4_096, } class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' a_ = VOCAB_FILES_NAMES a_ = PRETRAINED_VOCAB_FILES_MAP a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a_ = ["""input_ids""", """attention_mask"""] a_ = [] def __init__( self : Optional[int] ,_a : int ,_a : Optional[Any]="<unk>" ,_a : int="<s>" ,_a : str="</s>" ,_a : Optional[Any]="<pad>" ,_a : Tuple="[SEP]" ,_a : Tuple="[MASK]" ,_a : Union[str, Any]="[CLS]" ,_a : Optional[Dict[str, Any]] = None ,**_a : Any ,): '''simple docstring''' A_ : Dict = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else bos_token A_ : Union[str, Any] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else eos_token A_ : Optional[Any] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else unk_token A_ : Union[str, Any] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else pad_token A_ : Any = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else cls_token A_ : Optional[int] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else sep_token # Mask token behave like a normal word, i.e. include the space before it A_ : List[Any] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else mask_token A_ : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=_a ,eos_token=_a ,unk_token=_a ,pad_token=_a ,sep_token=_a ,mask_token=_a ,cls_token=_a ,sp_model_kwargs=self.sp_model_kwargs ,**_a ,) A_ : Optional[int] = vocab_file A_ : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(_a ) @property def _a ( self : Union[str, Any] ): '''simple docstring''' return self.sp_model.get_piece_size() def _a ( self : Optional[Any] ): '''simple docstring''' A_ : Tuple = {self.convert_ids_to_tokens(_a ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : List[Any] ): '''simple docstring''' A_ : Union[str, Any] = self.__dict__.copy() A_ : Union[str, Any] = None return state def __setstate__( self : List[Any] ,_a : Any ): '''simple docstring''' A_ : Tuple = d # for backward compatibility if not hasattr(self ,"""sp_model_kwargs""" ): A_ : Tuple = {} A_ : int = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def _a ( self : Union[str, Any] ,_a : str ): '''simple docstring''' return self.sp_model.encode(_a ,out_type=_a ) def _a ( self : Optional[int] ,_a : str ): '''simple docstring''' return self.sp_model.piece_to_id(_a ) def _a ( self : int ,_a : Optional[int] ): '''simple docstring''' A_ : List[str] = self.sp_model.IdToPiece(_a ) return token def _a ( self : Dict ,_a : int ): '''simple docstring''' A_ : int = [] A_ : Any = """""" A_ : str = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(_a ) + token A_ : Dict = True A_ : Union[str, Any] = [] else: current_sub_tokens.append(_a ) A_ : str = False out_string += self.sp_model.decode(_a ) return out_string.strip() def _a ( self : int ,_a : List[int] ,_a : bool = False ,_a : bool = None ,_a : bool = True ,**_a : str ,): '''simple docstring''' A_ : Any = kwargs.pop("""use_source_tokenizer""" ,_a ) A_ : Union[str, Any] = self.convert_ids_to_tokens(_a ,skip_special_tokens=_a ) # To avoid mixing byte-level and unicode for byte-level BPT # we need to build string separately for added tokens and byte-level tokens # cf. https://github.com/huggingface/transformers/issues/1133 A_ : str = [] A_ : int = [] for token in filtered_tokens: if skip_special_tokens and token in self.all_special_ids: continue if token in self.added_tokens_encoder: if current_sub_text: sub_texts.append(self.convert_tokens_to_string(_a ) ) A_ : List[str] = [] sub_texts.append(_a ) else: current_sub_text.append(_a ) if current_sub_text: sub_texts.append(self.convert_tokens_to_string(_a ) ) # Mimic the behavior of the Rust tokenizer: # No space before [MASK] and [SEP] if spaces_between_special_tokens: A_ : Optional[int] = re.sub(r""" (\[(MASK|SEP)\])""" ,r"""\1""" ,""" """.join(_a ) ) else: A_ : Tuple = """""".join(_a ) A_ : str = ( clean_up_tokenization_spaces if clean_up_tokenization_spaces is not None else self.clean_up_tokenization_spaces ) if clean_up_tokenization_spaces: A_ : Optional[Any] = self.clean_up_tokenization(_a ) return clean_text else: return text def _a ( self : int ,_a : str ,_a : Optional[str] = None ): '''simple docstring''' if not os.path.isdir(_a ): logger.error(f'Vocabulary path ({save_directory}) should be a directory' ) return A_ : int = os.path.join( _a ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file ,_a ) elif not os.path.isfile(self.vocab_file ): with open(_a ,"""wb""" ) as fi: A_ : str = self.sp_model.serialized_model_proto() fi.write(_a ) return (out_vocab_file,) def _a ( self : Optional[Any] ,_a : List[int] ,_a : Optional[List[int]] = None ): '''simple docstring''' if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] A_ : List[Any] = [self.cls_token_id] A_ : Union[str, Any] = [self.sep_token_id] return cls + token_ids_a + sep + token_ids_a + sep def _a ( self : Optional[int] ,_a : List[int] ,_a : Optional[List[int]] = None ,_a : bool = False ): '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_a ,token_ids_a=_a ,already_has_special_tokens=_a ) if token_ids_a is None: return [1] + ([0] * len(_a )) + [1] return [1] + ([0] * len(_a )) + [1] + ([0] * len(_a )) + [1] def _a ( self : Tuple ,_a : List[int] ,_a : Optional[List[int]] = None ): '''simple docstring''' A_ : Tuple = [self.sep_token_id] A_ : Optional[int] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
665
0
"""simple docstring""" import numpy as np def lowerCamelCase__ ( __snake_case ) -> np.array: """simple docstring""" return 1 / (1 + np.exp(-vector )) if __name__ == "__main__": import doctest doctest.testmod()
19
'''simple docstring''' import unittest from transformers import ( MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, TextaTextGenerationPipeline, pipeline, ) from transformers.testing_utils import is_pipeline_test, require_tf, require_torch from transformers.utils import is_torch_available from .test_pipelines_common import ANY if is_torch_available(): import torch @is_pipeline_test class __lowerCAmelCase ( unittest.TestCase ): '''simple docstring''' a_ = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING a_ = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING def _a ( self : List[str] ,_a : int ,_a : Any ,_a : int ): '''simple docstring''' A_ : Dict = TextaTextGenerationPipeline(model=_a ,tokenizer=_a ) return generator, ["Something to write", "Something else"] def _a ( self : str ,_a : Union[str, Any] ,_a : int ): '''simple docstring''' A_ : Any = generator("""Something there""" ) self.assertEqual(_a ,[{"""generated_text""": ANY(_a )}] ) # These are encoder decoder, they don't just append to incoming string self.assertFalse(outputs[0]["""generated_text"""].startswith("""Something there""" ) ) A_ : List[Any] = generator(["""This is great !""", """Something else"""] ,num_return_sequences=2 ,do_sample=_a ) self.assertEqual( _a ,[ [{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}], [{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}], ] ,) A_ : List[str] = generator( ["""This is great !""", """Something else"""] ,num_return_sequences=2 ,batch_size=2 ,do_sample=_a ) self.assertEqual( _a ,[ [{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}], [{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}], ] ,) with self.assertRaises(_a ): generator(4 ) @require_torch def _a ( self : Union[str, Any] ): '''simple docstring''' A_ : int = pipeline("""text2text-generation""" ,model="""patrickvonplaten/t5-tiny-random""" ,framework="""pt""" ) # do_sample=False necessary for reproducibility A_ : Tuple = generator("""Something there""" ,do_sample=_a ) self.assertEqual(_a ,[{"""generated_text""": """"""}] ) A_ : Optional[int] = 3 A_ : Tuple = generator( """Something there""" ,num_return_sequences=_a ,num_beams=_a ,) A_ : Optional[Any] = [ {"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide Beide"""}, {"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide"""}, {"""generated_text""": """"""}, ] self.assertEqual(_a ,_a ) A_ : Optional[int] = generator("""This is a test""" ,do_sample=_a ,num_return_sequences=2 ,return_tensors=_a ) self.assertEqual( _a ,[ {"""generated_token_ids""": ANY(torch.Tensor )}, {"""generated_token_ids""": ANY(torch.Tensor )}, ] ,) A_ : Dict = generator.model.config.eos_token_id A_ : Optional[int] = """<pad>""" A_ : List[Any] = generator( ["""This is a test""", """This is a second test"""] ,do_sample=_a ,num_return_sequences=2 ,batch_size=2 ,return_tensors=_a ,) self.assertEqual( _a ,[ [ {"""generated_token_ids""": ANY(torch.Tensor )}, {"""generated_token_ids""": ANY(torch.Tensor )}, ], [ {"""generated_token_ids""": ANY(torch.Tensor )}, {"""generated_token_ids""": ANY(torch.Tensor )}, ], ] ,) @require_tf def _a ( self : List[Any] ): '''simple docstring''' A_ : Optional[int] = pipeline("""text2text-generation""" ,model="""patrickvonplaten/t5-tiny-random""" ,framework="""tf""" ) # do_sample=False necessary for reproducibility A_ : Dict = generator("""Something there""" ,do_sample=_a ) self.assertEqual(_a ,[{"""generated_text""": """"""}] )
665
0
class lowercase_ : def __init__( self , lowercase_ , lowercase_=None , lowercase_=None) -> Tuple: a__ =data a__ =previous a__ =next_node def __str__( self) -> str: return F"""{self.data}""" def __UpperCamelCase ( self) -> int: return self.data def __UpperCamelCase ( self) -> Any: return self.next def __UpperCamelCase ( self) -> int: return self.previous class lowercase_ : def __init__( self , lowercase_) -> str: a__ =head def __iter__( self) -> Union[str, Any]: return self def __UpperCamelCase ( self) -> str: if not self.current: raise StopIteration else: a__ =self.current.get_data() a__ =self.current.get_next() return value class lowercase_ : def __init__( self) -> Union[str, Any]: a__ =None # First node in list a__ =None # Last node in list def __str__( self) -> Dict: a__ =self.head a__ =[] while current is not None: nodes.append(current.get_data()) a__ =current.get_next() return " ".join(str(lowercase_) for node in nodes) def __contains__( self , lowercase_) -> Any: a__ =self.head while current: if current.get_data() == value: return True a__ =current.get_next() return False def __iter__( self) -> Any: return LinkedListIterator(self.head) def __UpperCamelCase ( self) -> List[str]: if self.head: return self.head.get_data() return None def __UpperCamelCase ( self) -> Optional[int]: if self.tail: return self.tail.get_data() return None def __UpperCamelCase ( self , lowercase_) -> None: if self.head is None: a__ =node a__ =node else: self.insert_before_node(self.head , lowercase_) def __UpperCamelCase ( self , lowercase_) -> None: if self.head is None: self.set_head(lowercase_) else: self.insert_after_node(self.tail , lowercase_) def __UpperCamelCase ( self , lowercase_) -> None: a__ =Node(lowercase_) if self.head is None: self.set_head(lowercase_) else: self.set_tail(lowercase_) def __UpperCamelCase ( self , lowercase_ , lowercase_) -> None: a__ =node a__ =node.previous if node.get_previous() is None: a__ =node_to_insert else: a__ =node_to_insert a__ =node_to_insert def __UpperCamelCase ( self , lowercase_ , lowercase_) -> None: a__ =node a__ =node.next if node.get_next() is None: a__ =node_to_insert else: a__ =node_to_insert a__ =node_to_insert def __UpperCamelCase ( self , lowercase_ , lowercase_) -> None: a__ =1 a__ =Node(lowercase_) a__ =self.head while node: if current_position == position: self.insert_before_node(lowercase_ , lowercase_) return current_position += 1 a__ =node.next self.insert_after_node(self.tail , lowercase_) def __UpperCamelCase ( self , lowercase_) -> Node: a__ =self.head while node: if node.get_data() == item: return node a__ =node.get_next() raise Exception('Node not found') def __UpperCamelCase ( self , lowercase_) -> Dict: if (node := self.get_node(lowercase_)) is not None: if node == self.head: a__ =self.head.get_next() if node == self.tail: a__ =self.tail.get_previous() self.remove_node_pointers(lowercase_) @staticmethod def __UpperCamelCase ( lowercase_) -> None: if node.get_next(): a__ =node.previous if node.get_previous(): a__ =node.next a__ =None a__ =None def __UpperCamelCase ( self) -> Any: return self.head is None def _lowercase( ): pass if __name__ == "__main__": import doctest doctest.testmod()
20
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging __magic_name__ = logging.get_logger(__name__) __magic_name__ = { 'bigcode/gpt_bigcode-santacoder': 'https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json', } class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' a_ = """gpt_bigcode""" a_ = ["""past_key_values"""] a_ = { """hidden_size""": """n_embd""", """max_position_embeddings""": """n_positions""", """num_attention_heads""": """n_head""", """num_hidden_layers""": """n_layer""", } def __init__( self : Optional[int] ,_a : Optional[int]=50257 ,_a : Dict=1024 ,_a : Union[str, Any]=768 ,_a : Union[str, Any]=12 ,_a : Union[str, Any]=12 ,_a : Tuple=None ,_a : int="gelu_pytorch_tanh" ,_a : Optional[Any]=0.1 ,_a : List[str]=0.1 ,_a : Union[str, Any]=0.1 ,_a : List[Any]=1e-5 ,_a : List[str]=0.02 ,_a : Any=True ,_a : Union[str, Any]=True ,_a : Tuple=50256 ,_a : Optional[int]=50256 ,_a : int=True ,_a : Optional[int]=True ,_a : Optional[int]=True ,**_a : List[str] ,): '''simple docstring''' A_ : Optional[Any] = vocab_size A_ : int = n_positions A_ : Union[str, Any] = n_embd A_ : int = n_layer A_ : Optional[int] = n_head A_ : Union[str, Any] = n_inner A_ : List[Any] = activation_function A_ : Dict = resid_pdrop A_ : int = embd_pdrop A_ : Optional[int] = attn_pdrop A_ : Union[str, Any] = layer_norm_epsilon A_ : int = initializer_range A_ : Union[str, Any] = scale_attn_weights A_ : List[str] = use_cache A_ : Tuple = attention_softmax_in_fpaa A_ : List[str] = scale_attention_softmax_in_fpaa A_ : Union[str, Any] = multi_query A_ : Any = bos_token_id A_ : Optional[int] = eos_token_id super().__init__(bos_token_id=_a ,eos_token_id=_a ,**_a )
665
0
import math def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase ): if 0 not in (x, y): # We use the relation x^y = y*log10(x), where 10 is the base. return y * math.logaa(lowerCamelCase ) else: if x == 0: # 0 raised to any number is 0 return 0 elif y == 0: return 1 # any number raised to 0 is 1 raise AssertionError("""This should never happen""" ) if __name__ == "__main__": # Main function # Read two numbers from input and typecast them to int using map function. # Here x is the base and y is the power. UpperCAmelCase_ : Any = "Enter the base and the power separated by a comma: " UpperCAmelCase_ , UpperCAmelCase_ : Any = map(int, input(prompt).split(",")) UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = map(int, input(prompt).split(",")) # We find the log of each number, using the function res(), which takes two # arguments. UpperCAmelCase_ : Union[str, Any] = res(xa, ya) UpperCAmelCase_ : List[str] = res(xa, ya) # We check for the largest number if resa > resa: print("Largest number is", xa, "^", ya) elif resa > resa: print("Largest number is", xa, "^", ya) else: print("Both are equal")
21
'''simple docstring''' import json import os from functools import lru_cache from typing import List, Optional, Tuple import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging __magic_name__ = logging.get_logger(__name__) __magic_name__ = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'} __magic_name__ = { 'vocab_file': { 'allenai/longformer-base-4096': 'https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json', 'allenai/longformer-large-4096': ( 'https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json' ), 'allenai/longformer-large-4096-finetuned-triviaqa': ( 'https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json' ), 'allenai/longformer-base-4096-extra.pos.embd.only': ( 'https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json' ), 'allenai/longformer-large-4096-extra.pos.embd.only': ( 'https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json' ), }, 'merges_file': { 'allenai/longformer-base-4096': 'https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt', 'allenai/longformer-large-4096': ( 'https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt' ), 'allenai/longformer-large-4096-finetuned-triviaqa': ( 'https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt' ), 'allenai/longformer-base-4096-extra.pos.embd.only': ( 'https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt' ), 'allenai/longformer-large-4096-extra.pos.embd.only': ( 'https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt' ), }, } __magic_name__ = { 'allenai/longformer-base-4096': 4_096, 'allenai/longformer-large-4096': 4_096, 'allenai/longformer-large-4096-finetuned-triviaqa': 4_096, 'allenai/longformer-base-4096-extra.pos.embd.only': 4_096, 'allenai/longformer-large-4096-extra.pos.embd.only': 4_096, } @lru_cache() # Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode def lowerCamelCase ( ): A_ : Union[str, Any] = ( list(range(ord("""!""") , ord("""~""") + 1)) + list(range(ord("""¡""") , ord("""¬""") + 1)) + list(range(ord("""®""") , ord("""ÿ""") + 1)) ) A_ : Optional[Any] = bs[:] A_ : List[str] = 0 for b in range(2**8): if b not in bs: bs.append(lowerCamelCase) cs.append(2**8 + n) n += 1 A_ : List[Any] = [chr(lowerCamelCase) for n in cs] return dict(zip(lowerCamelCase , lowerCamelCase)) def lowerCamelCase ( lowerCamelCase : int): A_ : int = set() A_ : int = word[0] for char in word[1:]: pairs.add((prev_char, char)) A_ : List[str] = char return pairs class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' a_ = VOCAB_FILES_NAMES a_ = PRETRAINED_VOCAB_FILES_MAP a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a_ = ["""input_ids""", """attention_mask"""] def __init__( self : int ,_a : Tuple ,_a : Union[str, Any] ,_a : Optional[Any]="replace" ,_a : Union[str, Any]="<s>" ,_a : Union[str, Any]="</s>" ,_a : int="</s>" ,_a : List[str]="<s>" ,_a : List[Any]="<unk>" ,_a : Any="<pad>" ,_a : Dict="<mask>" ,_a : Optional[int]=False ,**_a : List[Any] ,): '''simple docstring''' A_ : Dict = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else bos_token A_ : Optional[int] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else eos_token A_ : Optional[Any] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else sep_token A_ : int = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else cls_token A_ : int = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else unk_token A_ : Optional[Any] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else pad_token # Mask token behave like a normal word, i.e. include the space before it A_ : Any = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else mask_token super().__init__( errors=_a ,bos_token=_a ,eos_token=_a ,unk_token=_a ,sep_token=_a ,cls_token=_a ,pad_token=_a ,mask_token=_a ,add_prefix_space=_a ,**_a ,) with open(_a ,encoding="""utf-8""" ) as vocab_handle: A_ : str = json.load(_a ) A_ : Optional[int] = {v: k for k, v in self.encoder.items()} A_ : List[str] = errors # how to handle errors in decoding A_ : List[str] = bytes_to_unicode() A_ : str = {v: k for k, v in self.byte_encoder.items()} with open(_a ,encoding="""utf-8""" ) as merges_handle: A_ : Any = merges_handle.read().split("""\n""" )[1:-1] A_ : str = [tuple(merge.split() ) for merge in bpe_merges] A_ : int = dict(zip(_a ,range(len(_a ) ) ) ) A_ : List[Any] = {} A_ : Optional[int] = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions A_ : Optional[Any] = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" ) @property def _a ( self : Any ): '''simple docstring''' return len(self.encoder ) def _a ( self : str ): '''simple docstring''' return dict(self.encoder ,**self.added_tokens_encoder ) def _a ( self : int ,_a : int ): '''simple docstring''' if token in self.cache: return self.cache[token] A_ : Optional[int] = tuple(_a ) A_ : Any = get_pairs(_a ) if not pairs: return token while True: A_ : Optional[Any] = min(_a ,key=lambda _a : self.bpe_ranks.get(_a ,float("""inf""" ) ) ) if bigram not in self.bpe_ranks: break A_ , A_ : Dict = bigram A_ : int = [] A_ : Optional[Any] = 0 while i < len(_a ): try: A_ : List[str] = word.index(_a ,_a ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) A_ : Tuple = j if word[i] == first and i < len(_a ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 A_ : str = tuple(_a ) A_ : str = new_word if len(_a ) == 1: break else: A_ : int = get_pairs(_a ) A_ : Optional[int] = """ """.join(_a ) A_ : List[str] = word return word def _a ( self : Dict ,_a : Optional[int] ): '''simple docstring''' A_ : Any = [] for token in re.findall(self.pat ,_a ): A_ : Any = """""".join( self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_a ).split(""" """ ) ) return bpe_tokens def _a ( self : Union[str, Any] ,_a : Optional[int] ): '''simple docstring''' return self.encoder.get(_a ,self.encoder.get(self.unk_token ) ) def _a ( self : int ,_a : Dict ): '''simple docstring''' return self.decoder.get(_a ) def _a ( self : Optional[int] ,_a : List[Any] ): '''simple docstring''' A_ : Optional[int] = """""".join(_a ) A_ : Dict = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" ,errors=self.errors ) return text def _a ( self : int ,_a : str ,_a : Optional[str] = None ): '''simple docstring''' if not os.path.isdir(_a ): logger.error(f'Vocabulary path ({save_directory}) should be a directory' ) return A_ : int = os.path.join( _a ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) A_ : int = os.path.join( _a ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] ) with open(_a ,"""w""" ,encoding="""utf-8""" ) as f: f.write(json.dumps(self.encoder ,indent=2 ,sort_keys=_a ,ensure_ascii=_a ) + """\n""" ) A_ : int = 0 with open(_a ,"""w""" ,encoding="""utf-8""" ) as writer: writer.write("""#version: 0.2\n""" ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() ,key=lambda _a : kv[1] ): if index != token_index: logger.warning( f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.' """ Please check that the tokenizer is not corrupted!""" ) A_ : Dict = token_index writer.write(""" """.join(_a ) + """\n""" ) index += 1 return vocab_file, merge_file def _a ( self : List[str] ,_a : List[int] ,_a : Optional[List[int]] = None ): '''simple docstring''' if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] A_ : int = [self.cls_token_id] A_ : int = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def _a ( self : int ,_a : List[int] ,_a : Optional[List[int]] = None ,_a : bool = False ): '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_a ,token_ids_a=_a ,already_has_special_tokens=_a ) if token_ids_a is None: return [1] + ([0] * len(_a )) + [1] return [1] + ([0] * len(_a )) + [1, 1] + ([0] * len(_a )) + [1] def _a ( self : Any ,_a : List[int] ,_a : Optional[List[int]] = None ): '''simple docstring''' A_ : Union[str, Any] = [self.sep_token_id] A_ : Union[str, Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def _a ( self : str ,_a : Optional[int] ,_a : Union[str, Any]=False ,**_a : Dict ): '''simple docstring''' A_ : Any = kwargs.pop("""add_prefix_space""" ,self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(_a ) > 0 and not text[0].isspace()): A_ : Optional[int] = """ """ + text return (text, kwargs)
665
0
'''simple docstring''' import json import os from typing import Optional import numpy as np from ...feature_extraction_utils import BatchFeature from ...processing_utils import ProcessorMixin from ...utils import logging from ...utils.hub import get_file_from_repo from ..auto import AutoTokenizer _snake_case : Any = logging.get_logger(__name__) class A ( _a ): lowercase_ = 'AutoTokenizer' lowercase_ = ['tokenizer'] lowercase_ = { 'semantic_prompt': 1, 'coarse_prompt': 2, 'fine_prompt': 2, } def __init__( self : Tuple , lowerCAmelCase_ : str , lowerCAmelCase_ : List[Any]=None ) -> Dict: """simple docstring""" super().__init__(lowerCAmelCase_ ) _a = speaker_embeddings @classmethod def __lowerCAmelCase ( cls : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : List[Any]="speaker_embeddings_path.json" , **lowerCAmelCase_ : Any ) -> str: """simple docstring""" if speaker_embeddings_dict_path is not None: _a = get_file_from_repo( lowerCAmelCase_ , lowerCAmelCase_ , subfolder=kwargs.pop('''subfolder''' , lowerCAmelCase_ ) , cache_dir=kwargs.pop('''cache_dir''' , lowerCAmelCase_ ) , force_download=kwargs.pop('''force_download''' , lowerCAmelCase_ ) , proxies=kwargs.pop('''proxies''' , lowerCAmelCase_ ) , resume_download=kwargs.pop('''resume_download''' , lowerCAmelCase_ ) , local_files_only=kwargs.pop('''local_files_only''' , lowerCAmelCase_ ) , use_auth_token=kwargs.pop('''use_auth_token''' , lowerCAmelCase_ ) , revision=kwargs.pop('''revision''' , lowerCAmelCase_ ) , ) if speaker_embeddings_path is None: logger.warning( F'`{os.path.join(lowerCAmelCase_ , lowerCAmelCase_ )}` does not exists\n , no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json\n dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.' ) _a = None else: with open(lowerCAmelCase_ ) as speaker_embeddings_json: _a = json.load(lowerCAmelCase_ ) else: _a = None _a = AutoTokenizer.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ ) return cls(tokenizer=lowerCAmelCase_ , speaker_embeddings=lowerCAmelCase_ ) def __lowerCAmelCase ( self : Optional[int] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Optional[Any]="speaker_embeddings_path.json" , lowerCAmelCase_ : Dict="speaker_embeddings" , lowerCAmelCase_ : bool = False , **lowerCAmelCase_ : List[str] , ) -> str: """simple docstring""" if self.speaker_embeddings is not None: os.makedirs(os.path.join(lowerCAmelCase_ , lowerCAmelCase_ , '''v2''' ) , exist_ok=lowerCAmelCase_ ) _a = {} _a = save_directory for prompt_key in self.speaker_embeddings: if prompt_key != "repo_or_path": _a = self._load_voice_preset(lowerCAmelCase_ ) _a = {} for key in self.speaker_embeddings[prompt_key]: np.save( os.path.join( embeddings_dict['''repo_or_path'''] , lowerCAmelCase_ , F'{prompt_key}_{key}' ) , voice_preset[key] , allow_pickle=lowerCAmelCase_ , ) _a = os.path.join(lowerCAmelCase_ , F'{prompt_key}_{key}.npy' ) _a = tmp_dict with open(os.path.join(lowerCAmelCase_ , lowerCAmelCase_ ) , '''w''' ) as fp: json.dump(lowerCAmelCase_ , lowerCAmelCase_ ) super().save_pretrained(lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ ) def __lowerCAmelCase ( self : List[str] , lowerCAmelCase_ : str = None , **lowerCAmelCase_ : Union[str, Any] ) -> Any: """simple docstring""" _a = self.speaker_embeddings[voice_preset] _a = {} for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]: if key not in voice_preset_paths: raise ValueError( F'Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].' ) _a = get_file_from_repo( self.speaker_embeddings.get('''repo_or_path''' , '''/''' ) , voice_preset_paths[key] , subfolder=kwargs.pop('''subfolder''' , lowerCAmelCase_ ) , cache_dir=kwargs.pop('''cache_dir''' , lowerCAmelCase_ ) , force_download=kwargs.pop('''force_download''' , lowerCAmelCase_ ) , proxies=kwargs.pop('''proxies''' , lowerCAmelCase_ ) , resume_download=kwargs.pop('''resume_download''' , lowerCAmelCase_ ) , local_files_only=kwargs.pop('''local_files_only''' , lowerCAmelCase_ ) , use_auth_token=kwargs.pop('''use_auth_token''' , lowerCAmelCase_ ) , revision=kwargs.pop('''revision''' , lowerCAmelCase_ ) , ) if path is None: raise ValueError( F'`{os.path.join(self.speaker_embeddings.get("repo_or_path" , "/" ) , voice_preset_paths[key] )}` does not exists\n , no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}\n embeddings.' ) _a = np.load(lowerCAmelCase_ ) return voice_preset_dict def __lowerCAmelCase ( self : int , lowerCAmelCase_ : Optional[dict] = None ) -> Any: """simple docstring""" for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]: if key not in voice_preset: raise ValueError(F'Voice preset unrecognized, missing {key} as a key.' ) if not isinstance(voice_preset[key] , np.ndarray ): raise ValueError(F'{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.' ) if len(voice_preset[key].shape ) != self.preset_shape[key]: raise ValueError(F'{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.' ) def __call__( self : Union[str, Any] , lowerCAmelCase_ : int=None , lowerCAmelCase_ : Union[str, Any]=None , lowerCAmelCase_ : Dict="pt" , lowerCAmelCase_ : Dict=2_56 , lowerCAmelCase_ : Tuple=False , lowerCAmelCase_ : Union[str, Any]=True , lowerCAmelCase_ : Union[str, Any]=False , **lowerCAmelCase_ : int , ) -> str: """simple docstring""" if voice_preset is not None and not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): if ( isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and self.speaker_embeddings is not None and voice_preset in self.speaker_embeddings ): _a = self._load_voice_preset(lowerCAmelCase_ ) else: if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and not voice_preset.endswith('''.npz''' ): _a = voice_preset + '''.npz''' _a = np.load(lowerCAmelCase_ ) if voice_preset is not None: self._validate_voice_preset_dict(lowerCAmelCase_ , **lowerCAmelCase_ ) _a = BatchFeature(data=lowerCAmelCase_ , tensor_type=lowerCAmelCase_ ) _a = self.tokenizer( lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , padding='''max_length''' , max_length=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , **lowerCAmelCase_ , ) if voice_preset is not None: _a = voice_preset return encoded_text
22
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_convbert import ConvBertTokenizer __magic_name__ = logging.get_logger(__name__) __magic_name__ = {'vocab_file': 'vocab.txt'} __magic_name__ = { 'vocab_file': { 'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt', 'YituTech/conv-bert-medium-small': ( 'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt' ), 'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt', } } __magic_name__ = { 'YituTech/conv-bert-base': 512, 'YituTech/conv-bert-medium-small': 512, 'YituTech/conv-bert-small': 512, } __magic_name__ = { 'YituTech/conv-bert-base': {'do_lower_case': True}, 'YituTech/conv-bert-medium-small': {'do_lower_case': True}, 'YituTech/conv-bert-small': {'do_lower_case': True}, } class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' a_ = VOCAB_FILES_NAMES a_ = PRETRAINED_VOCAB_FILES_MAP a_ = PRETRAINED_INIT_CONFIGURATION a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a_ = ConvBertTokenizer def __init__( self : str ,_a : Dict=None ,_a : List[Any]=None ,_a : Dict=True ,_a : List[str]="[UNK]" ,_a : Any="[SEP]" ,_a : str="[PAD]" ,_a : List[Any]="[CLS]" ,_a : List[str]="[MASK]" ,_a : Union[str, Any]=True ,_a : Any=None ,**_a : Optional[int] ,): '''simple docstring''' super().__init__( _a ,tokenizer_file=_a ,do_lower_case=_a ,unk_token=_a ,sep_token=_a ,pad_token=_a ,cls_token=_a ,mask_token=_a ,tokenize_chinese_chars=_a ,strip_accents=_a ,**_a ,) A_ : Optional[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get("""lowercase""" ,_a ) != do_lower_case or normalizer_state.get("""strip_accents""" ,_a ) != strip_accents or normalizer_state.get("""handle_chinese_chars""" ,_a ) != tokenize_chinese_chars ): A_ : Dict = getattr(_a ,normalizer_state.pop("""type""" ) ) A_ : str = do_lower_case A_ : Any = strip_accents A_ : int = tokenize_chinese_chars A_ : Tuple = normalizer_class(**_a ) A_ : Any = do_lower_case def _a ( self : List[Any] ,_a : List[Any] ,_a : Any=None ): '''simple docstring''' A_ : str = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def _a ( self : Dict ,_a : List[int] ,_a : Optional[List[int]] = None ): '''simple docstring''' A_ : int = [self.sep_token_id] A_ : Any = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def _a ( self : int ,_a : str ,_a : Optional[str] = None ): '''simple docstring''' A_ : List[Any] = self._tokenizer.model.save(_a ,name=_a ) return tuple(_a )
665
0
import time from contextlib import contextmanager from pathlib import Path import pytest import requests from huggingface_hub.hf_api import HfApi, HfFolder snake_case__ : Union[str, Any] = """__DUMMY_TRANSFORMERS_USER__""" snake_case__ : Optional[int] = """Dummy User""" snake_case__ : Any = """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""" snake_case__ : List[Any] = """https://hub-ci.huggingface.co""" snake_case__ : Dict = CI_HUB_ENDPOINT + """/datasets/{repo_id}/resolve/{revision}/{path}""" snake_case__ : Dict = CI_HUB_ENDPOINT + """/{repo_id}/resolve/{revision}/{filename}""" snake_case__ : Optional[int] = Path("""~/.huggingface/hub_ci_token""").expanduser() @pytest.fixture def _snake_case (__lowercase): monkeypatch.setattr( 'huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE' , __lowercase) @pytest.fixture def _snake_case (__lowercase): monkeypatch.setattr('datasets.config.HF_ENDPOINT' , __lowercase) monkeypatch.setattr('datasets.config.HUB_DATASETS_URL' , __lowercase) @pytest.fixture def _snake_case (__lowercase): monkeypatch.setattr('huggingface_hub.hf_api.HfFolder.path_token' , __lowercase) @pytest.fixture def _snake_case (__lowercase , __lowercase): HfFolder.save_token(__lowercase) yield HfFolder.delete_token() @pytest.fixture(scope='session') def _snake_case (): return HfApi(endpoint=__lowercase) @pytest.fixture(scope='session') def _snake_case (__lowercase): UpperCamelCase_ = HfFolder.get_token() HfFolder.save_token(__lowercase) yield CI_HUB_USER_TOKEN if previous_token is not None: HfFolder.save_token(__lowercase) @pytest.fixture def _snake_case (__lowercase): def _cleanup_repo(__lowercase): hf_api.delete_repo(__lowercase , token=__lowercase , repo_type='dataset') return _cleanup_repo @pytest.fixture def _snake_case (__lowercase): @contextmanager def _temporary_repo(__lowercase): try: yield repo_id finally: cleanup_repo(__lowercase) return _temporary_repo @pytest.fixture(scope='session') def _snake_case (__lowercase , __lowercase , __lowercase): UpperCamelCase_ = f"""repo_txt_data-{int(time.time() * 10e3)}""" UpperCamelCase_ = f"""{CI_HUB_USER}/{repo_name}""" hf_api.create_repo(__lowercase , token=__lowercase , repo_type='dataset' , private=__lowercase) hf_api.upload_file( token=__lowercase , path_or_fileobj=str(__lowercase) , path_in_repo='data/text_data.txt' , repo_id=__lowercase , repo_type='dataset' , ) yield repo_id try: hf_api.delete_repo(__lowercase , token=__lowercase , repo_type='dataset') except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error pass @pytest.fixture() def _snake_case (__lowercase , __lowercase , __lowercase): return hf_private_dataset_repo_txt_data_ @pytest.fixture(scope='session') def _snake_case (__lowercase , __lowercase , __lowercase): UpperCamelCase_ = f"""repo_zipped_txt_data-{int(time.time() * 10e3)}""" UpperCamelCase_ = f"""{CI_HUB_USER}/{repo_name}""" hf_api.create_repo(__lowercase , token=__lowercase , repo_type='dataset' , private=__lowercase) hf_api.upload_file( token=__lowercase , path_or_fileobj=str(__lowercase) , path_in_repo='data.zip' , repo_id=__lowercase , repo_type='dataset' , ) yield repo_id try: hf_api.delete_repo(__lowercase , token=__lowercase , repo_type='dataset') except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error pass @pytest.fixture() def _snake_case (__lowercase , __lowercase , __lowercase): return hf_private_dataset_repo_zipped_txt_data_ @pytest.fixture(scope='session') def _snake_case (__lowercase , __lowercase , __lowercase): UpperCamelCase_ = f"""repo_zipped_img_data-{int(time.time() * 10e3)}""" UpperCamelCase_ = f"""{CI_HUB_USER}/{repo_name}""" hf_api.create_repo(__lowercase , token=__lowercase , repo_type='dataset' , private=__lowercase) hf_api.upload_file( token=__lowercase , path_or_fileobj=str(__lowercase) , path_in_repo='data.zip' , repo_id=__lowercase , repo_type='dataset' , ) yield repo_id try: hf_api.delete_repo(__lowercase , token=__lowercase , repo_type='dataset') except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error pass @pytest.fixture() def _snake_case (__lowercase , __lowercase , __lowercase): return hf_private_dataset_repo_zipped_img_data_
23
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_bart import BartTokenizer __magic_name__ = logging.get_logger(__name__) __magic_name__ = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'} # See all BART models at https://huggingface.co/models?filter=bart __magic_name__ = { 'vocab_file': { 'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/vocab.json', 'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/vocab.json', 'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json', 'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json', 'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json', 'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json', }, 'merges_file': { 'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/merges.txt', 'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/merges.txt', 'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt', 'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt', 'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt', 'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt', }, 'tokenizer_file': { 'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json', 'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json', 'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json', 'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json', 'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json', 'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json', }, } __magic_name__ = { 'facebook/bart-base': 1_024, 'facebook/bart-large': 1_024, 'facebook/bart-large-mnli': 1_024, 'facebook/bart-large-cnn': 1_024, 'facebook/bart-large-xsum': 1_024, 'yjernite/bart_eli5': 1_024, } class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' a_ = VOCAB_FILES_NAMES a_ = PRETRAINED_VOCAB_FILES_MAP a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a_ = ["""input_ids""", """attention_mask"""] a_ = BartTokenizer def __init__( self : str ,_a : Any=None ,_a : Optional[int]=None ,_a : int=None ,_a : Optional[int]="replace" ,_a : Dict="<s>" ,_a : Optional[Any]="</s>" ,_a : Dict="</s>" ,_a : Tuple="<s>" ,_a : Optional[Any]="<unk>" ,_a : List[str]="<pad>" ,_a : int="<mask>" ,_a : str=False ,_a : List[str]=True ,**_a : Dict ,): '''simple docstring''' super().__init__( _a ,_a ,tokenizer_file=_a ,errors=_a ,bos_token=_a ,eos_token=_a ,sep_token=_a ,cls_token=_a ,unk_token=_a ,pad_token=_a ,mask_token=_a ,add_prefix_space=_a ,trim_offsets=_a ,**_a ,) A_ : Dict = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get("""add_prefix_space""" ,_a ) != add_prefix_space: A_ : List[str] = getattr(_a ,pre_tok_state.pop("""type""" ) ) A_ : Optional[int] = add_prefix_space A_ : int = pre_tok_class(**_a ) A_ : str = add_prefix_space # the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__` A_ : str = """post_processor""" A_ : List[Any] = getattr(self.backend_tokenizer ,_a ,_a ) if tokenizer_component_instance: A_ : Tuple = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: A_ : Tuple = tuple(state["""sep"""] ) if "cls" in state: A_ : Tuple = tuple(state["""cls"""] ) A_ : List[str] = False if state.get("""add_prefix_space""" ,_a ) != add_prefix_space: A_ : Dict = add_prefix_space A_ : Any = True if state.get("""trim_offsets""" ,_a ) != trim_offsets: A_ : Union[str, Any] = trim_offsets A_ : List[Any] = True if changes_to_apply: A_ : Optional[int] = getattr(_a ,state.pop("""type""" ) ) A_ : Tuple = component_class(**_a ) setattr(self.backend_tokenizer ,_a ,_a ) @property def _a ( self : List[str] ): '''simple docstring''' if self._mask_token is None: if self.verbose: logger.error("""Using mask_token, but it is not set yet.""" ) return None return str(self._mask_token ) @mask_token.setter def _a ( self : Union[str, Any] ,_a : Any ): '''simple docstring''' A_ : int = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else value A_ : List[Any] = value def _a ( self : str ,*_a : str ,**_a : Optional[int] ): '''simple docstring''' A_ : Optional[Any] = kwargs.get("""is_split_into_words""" ,_a ) if is_split_into_words and not self.add_prefix_space: raise ValueError( f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True ' """to use it with pretokenized inputs.""" ) return super()._batch_encode_plus(*_a ,**_a ) def _a ( self : str ,*_a : List[Any] ,**_a : str ): '''simple docstring''' A_ : List[str] = kwargs.get("""is_split_into_words""" ,_a ) if is_split_into_words and not self.add_prefix_space: raise ValueError( f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True ' """to use it with pretokenized inputs.""" ) return super()._encode_plus(*_a ,**_a ) def _a ( self : Optional[int] ,_a : str ,_a : Optional[str] = None ): '''simple docstring''' A_ : str = self._tokenizer.model.save(_a ,name=_a ) return tuple(_a ) def _a ( self : str ,_a : Optional[int] ,_a : int=None ): '''simple docstring''' A_ : Optional[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def _a ( self : Optional[int] ,_a : List[int] ,_a : Optional[List[int]] = None ): '''simple docstring''' A_ : Dict = [self.sep_token_id] A_ : Any = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
665
0
'''simple docstring''' from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxSeqaSeqConfigWithPast from ...utils import logging UpperCAmelCase_ : int = logging.get_logger(__name__) UpperCAmelCase_ : Optional[Any] = { '''google/umt5-small''': '''https://huggingface.co/google/umt5-small/resolve/main/config.json''', # See all umt5 models at https://huggingface.co/models?filter=umt5 } class lowerCAmelCase ( __lowerCAmelCase): __lowercase : List[Any] = '''umt5''' __lowercase : Union[str, Any] = ['''past_key_values'''] def __init__( self , __SCREAMING_SNAKE_CASE=25_0112 , __SCREAMING_SNAKE_CASE=512 , __SCREAMING_SNAKE_CASE=64 , __SCREAMING_SNAKE_CASE=1024 , __SCREAMING_SNAKE_CASE=8 , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=6 , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=128 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=1E-6 , __SCREAMING_SNAKE_CASE=1.0 , __SCREAMING_SNAKE_CASE="gated-gelu" , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE="T5Tokenizer" , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=0 , **__SCREAMING_SNAKE_CASE , ) -> Union[str, Any]: '''simple docstring''' super().__init__( is_encoder_decoder=__SCREAMING_SNAKE_CASE , tokenizer_class=__SCREAMING_SNAKE_CASE , tie_word_embeddings=__SCREAMING_SNAKE_CASE , pad_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , decoder_start_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , ) __snake_case = vocab_size __snake_case = d_model __snake_case = d_kv __snake_case = d_ff __snake_case = num_layers __snake_case = ( num_decoder_layers if num_decoder_layers is not None else self.num_layers ) # default = symmetry __snake_case = num_heads __snake_case = relative_attention_num_buckets __snake_case = relative_attention_max_distance __snake_case = dropout_rate __snake_case = layer_norm_epsilon __snake_case = initializer_factor __snake_case = feed_forward_proj __snake_case = use_cache __snake_case = self.feed_forward_proj.split('''-''' ) __snake_case = act_info[-1] __snake_case = act_info[0] == '''gated''' if len(__SCREAMING_SNAKE_CASE ) > 1 and act_info[0] != "gated" or len(__SCREAMING_SNAKE_CASE ) > 2: raise ValueError( F'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.''' '''Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. ''' '''\'gated-gelu\' or \'relu\'''' ) if feed_forward_proj == "gated-gelu": __snake_case = '''gelu_new''' @property def lowerCAmelCase ( self ) -> int: '''simple docstring''' return self.d_model @property def lowerCAmelCase ( self ) -> Any: '''simple docstring''' return self.num_heads @property def lowerCAmelCase ( self ) -> List[Any]: '''simple docstring''' return self.num_layers class lowerCAmelCase ( __lowerCAmelCase): @property # Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs def lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]: '''simple docstring''' __snake_case = { '''input_ids''': {0: '''batch''', 1: '''encoder_sequence'''}, '''attention_mask''': {0: '''batch''', 1: '''encoder_sequence'''}, } if self.use_past: __snake_case = '''past_encoder_sequence + sequence''' __snake_case = {0: '''batch'''} __snake_case = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''} else: __snake_case = {0: '''batch''', 1: '''decoder_sequence'''} __snake_case = {0: '''batch''', 1: '''decoder_sequence'''} if self.use_past: self.fill_with_past_key_values_(__SCREAMING_SNAKE_CASE , direction='''inputs''' ) return common_inputs @property # Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset def lowerCAmelCase ( self ) -> int: '''simple docstring''' return 13 @property def lowerCAmelCase ( self ) -> float: '''simple docstring''' return 5E-4
24
'''simple docstring''' import argparse from transformers import ( TapasConfig, TapasForMaskedLM, TapasForQuestionAnswering, TapasForSequenceClassification, TapasModel, TapasTokenizer, load_tf_weights_in_tapas, ) from transformers.utils import logging logging.set_verbosity_info() def lowerCamelCase ( lowerCamelCase : Optional[Any] , lowerCamelCase : Any , lowerCamelCase : Union[str, Any] , lowerCamelCase : Tuple , lowerCamelCase : str): # Initialise PyTorch model. # If you want to convert a checkpoint that uses absolute position embeddings, make sure to set reset_position_index_per_cell of # TapasConfig to False. # initialize configuration from json file A_ : int = TapasConfig.from_json_file(lowerCamelCase) # set absolute/relative position embeddings parameter A_ : List[Any] = reset_position_index_per_cell # set remaining parameters of TapasConfig as well as the model based on the task if task == "SQA": A_ : Optional[int] = TapasForQuestionAnswering(config=lowerCamelCase) elif task == "WTQ": # run_task_main.py hparams A_ : Tuple = 4 A_ : Optional[Any] = True # hparam_utils.py hparams A_ : Any = 0.66_4694 A_ : str = 0.20_7951 A_ : Any = 0.12_1194 A_ : str = True A_ : Dict = True A_ : int = False A_ : int = 0.035_2513 A_ : Tuple = TapasForQuestionAnswering(config=lowerCamelCase) elif task == "WIKISQL_SUPERVISED": # run_task_main.py hparams A_ : int = 4 A_ : Union[str, Any] = False # hparam_utils.py hparams A_ : Dict = 36.4519 A_ : List[Any] = 0.90_3421 A_ : Any = 222.088 A_ : Optional[Any] = True A_ : Optional[int] = True A_ : Optional[Any] = True A_ : Optional[int] = 0.76_3141 A_ : Any = TapasForQuestionAnswering(config=lowerCamelCase) elif task == "TABFACT": A_ : Any = TapasForSequenceClassification(config=lowerCamelCase) elif task == "MLM": A_ : List[Any] = TapasForMaskedLM(config=lowerCamelCase) elif task == "INTERMEDIATE_PRETRAINING": A_ : Union[str, Any] = TapasModel(config=lowerCamelCase) else: raise ValueError(F'Task {task} not supported.') print(F'Building PyTorch model from configuration: {config}') # Load weights from tf checkpoint load_tf_weights_in_tapas(lowerCamelCase , lowerCamelCase , lowerCamelCase) # Save pytorch-model (weights and configuration) print(F'Save PyTorch model to {pytorch_dump_path}') model.save_pretrained(lowerCamelCase) # Save tokenizer files print(F'Save tokenizer files to {pytorch_dump_path}') A_ : Optional[Any] = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + """vocab.txt""" , model_max_length=512) tokenizer.save_pretrained(lowerCamelCase) print("""Used relative position embeddings:""" , model.config.reset_position_index_per_cell) if __name__ == "__main__": __magic_name__ = argparse.ArgumentParser() # Required parameters parser.add_argument( '--task', default='SQA', type=str, help='Model task for which to convert a checkpoint. Defaults to SQA.' ) parser.add_argument( '--reset_position_index_per_cell', default=False, action='store_true', help='Whether to use relative position embeddings or not. Defaults to True.', ) parser.add_argument( '--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.' ) parser.add_argument( '--tapas_config_file', default=None, type=str, required=True, help=( 'The config json file corresponding to the pre-trained TAPAS model. \n' 'This specifies the model architecture.' ), ) parser.add_argument( '--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) __magic_name__ = parser.parse_args() convert_tf_checkpoint_to_pytorch( args.task, args.reset_position_index_per_cell, args.tf_checkpoint_path, args.tapas_config_file, args.pytorch_dump_path, )
665
0
from dataclasses import dataclass from typing import Dict, Optional, Union import torch import torch.nn.functional as F from torch import nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from .attention import BasicTransformerBlock from .attention_processor import AttentionProcessor, AttnProcessor from .embeddings import TimestepEmbedding, Timesteps from .modeling_utils import ModelMixin @dataclass class _UpperCamelCase ( __A ): '''simple docstring''' lowerCamelCase__ =42 class _UpperCamelCase ( __A , __A ): '''simple docstring''' @register_to_config def __init__( self : Any , a : int = 32 , a : int = 64 , a : int = 20 , a : int = 768 , a : int=77 , a : str=4 , a : float = 0.0 , a : str = "silu" , a : Optional[str] = None , a : Optional[str] = None , a : Optional[str] = "linear" , a : Optional[str] = "prd" , a : Optional[int] = None , a : Optional[int] = None , a : Optional[int] = None , ) -> int: """simple docstring""" super().__init__() SCREAMING_SNAKE_CASE : int = num_attention_heads SCREAMING_SNAKE_CASE : Optional[Any] = attention_head_dim SCREAMING_SNAKE_CASE : Optional[Any] = num_attention_heads * attention_head_dim SCREAMING_SNAKE_CASE : Any = additional_embeddings SCREAMING_SNAKE_CASE : int = time_embed_dim or inner_dim SCREAMING_SNAKE_CASE : List[Any] = embedding_proj_dim or embedding_dim SCREAMING_SNAKE_CASE : Any = clip_embed_dim or embedding_dim SCREAMING_SNAKE_CASE : Any = Timesteps(a , a , 0 ) SCREAMING_SNAKE_CASE : Dict = TimestepEmbedding(a , a , out_dim=a , act_fn=a ) SCREAMING_SNAKE_CASE : str = nn.Linear(a , a ) if embedding_proj_norm_type is None: SCREAMING_SNAKE_CASE : Dict = None elif embedding_proj_norm_type == "layer": SCREAMING_SNAKE_CASE : Tuple = nn.LayerNorm(a ) else: raise ValueError(F"unsupported embedding_proj_norm_type: {embedding_proj_norm_type}" ) SCREAMING_SNAKE_CASE : str = nn.Linear(a , a ) if encoder_hid_proj_type is None: SCREAMING_SNAKE_CASE : Tuple = None elif encoder_hid_proj_type == "linear": SCREAMING_SNAKE_CASE : Any = nn.Linear(a , a ) else: raise ValueError(F"unsupported encoder_hid_proj_type: {encoder_hid_proj_type}" ) SCREAMING_SNAKE_CASE : Any = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , a ) ) if added_emb_type == "prd": SCREAMING_SNAKE_CASE : Tuple = nn.Parameter(torch.zeros(1 , 1 , a ) ) elif added_emb_type is None: SCREAMING_SNAKE_CASE : Union[str, Any] = None else: raise ValueError( F"`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `'prd'` or `None`." ) SCREAMING_SNAKE_CASE : Dict = nn.ModuleList( [ BasicTransformerBlock( a , a , a , dropout=a , activation_fn="gelu" , attention_bias=a , ) for d in range(a ) ] ) if norm_in_type == "layer": SCREAMING_SNAKE_CASE : Union[str, Any] = nn.LayerNorm(a ) elif norm_in_type is None: SCREAMING_SNAKE_CASE : Optional[int] = None else: raise ValueError(F"Unsupported norm_in_type: {norm_in_type}." ) SCREAMING_SNAKE_CASE : Optional[Any] = nn.LayerNorm(a ) SCREAMING_SNAKE_CASE : Tuple = nn.Linear(a , a ) SCREAMING_SNAKE_CASE : Tuple = torch.full( [num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -1_0000.0 ) causal_attention_mask.triu_(1 ) SCREAMING_SNAKE_CASE : Dict = causal_attention_mask[None, ...] self.register_buffer("causal_attention_mask" , a , persistent=a ) SCREAMING_SNAKE_CASE : List[Any] = nn.Parameter(torch.zeros(1 , a ) ) SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Parameter(torch.zeros(1 , a ) ) @property # Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors def __UpperCamelCase ( self : Optional[Any] ) -> Dict[str, AttentionProcessor]: """simple docstring""" SCREAMING_SNAKE_CASE : Optional[int] = {} def fn_recursive_add_processors(a : str , a : torch.nn.Module , a : Dict[str, AttentionProcessor] ): if hasattr(a , "set_processor" ): SCREAMING_SNAKE_CASE : int = module.processor for sub_name, child in module.named_children(): fn_recursive_add_processors(F"{name}.{sub_name}" , a , a ) return processors for name, module in self.named_children(): fn_recursive_add_processors(a , a , a ) return processors def __UpperCamelCase ( self : Tuple , a : Union[AttentionProcessor, Dict[str, AttentionProcessor]] ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE : Union[str, Any] = len(self.attn_processors.keys() ) if isinstance(a , a ) and len(a ) != count: raise ValueError( F"A dict of processors was passed, but the number of processors {len(a )} does not match the" F" number of attention layers: {count}. Please make sure to pass {count} processor classes." ) def fn_recursive_attn_processor(a : str , a : torch.nn.Module , a : int ): if hasattr(a , "set_processor" ): if not isinstance(a , a ): module.set_processor(a ) else: module.set_processor(processor.pop(F"{name}.processor" ) ) for sub_name, child in module.named_children(): fn_recursive_attn_processor(F"{name}.{sub_name}" , a , a ) for name, module in self.named_children(): fn_recursive_attn_processor(a , a , a ) def __UpperCamelCase ( self : List[str] ) -> str: """simple docstring""" self.set_attn_processor(AttnProcessor() ) def __UpperCamelCase ( self : Optional[Any] , a : Union[str, Any] , a : Union[torch.Tensor, float, int] , a : torch.FloatTensor , a : Optional[torch.FloatTensor] = None , a : Optional[torch.BoolTensor] = None , a : bool = True , ) -> Any: """simple docstring""" SCREAMING_SNAKE_CASE : Tuple = hidden_states.shape[0] SCREAMING_SNAKE_CASE : Any = timestep if not torch.is_tensor(a ): SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device ) elif torch.is_tensor(a ) and len(timesteps.shape ) == 0: SCREAMING_SNAKE_CASE : List[str] = timesteps[None].to(hidden_states.device ) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML SCREAMING_SNAKE_CASE : Optional[int] = timesteps * torch.ones(a , dtype=timesteps.dtype , device=timesteps.device ) SCREAMING_SNAKE_CASE : Tuple = self.time_proj(a ) # timesteps does not contain any weights and will always return f32 tensors # but time_embedding might be fp16, so we need to cast here. SCREAMING_SNAKE_CASE : List[Any] = timesteps_projected.to(dtype=self.dtype ) SCREAMING_SNAKE_CASE : Union[str, Any] = self.time_embedding(a ) if self.embedding_proj_norm is not None: SCREAMING_SNAKE_CASE : List[Any] = self.embedding_proj_norm(a ) SCREAMING_SNAKE_CASE : str = self.embedding_proj(a ) if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None: SCREAMING_SNAKE_CASE : Optional[int] = self.encoder_hidden_states_proj(a ) elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None: raise ValueError("`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set" ) SCREAMING_SNAKE_CASE : List[Any] = self.proj_in(a ) SCREAMING_SNAKE_CASE : Optional[Any] = self.positional_embedding.to(hidden_states.dtype ) SCREAMING_SNAKE_CASE : Union[str, Any] = [] SCREAMING_SNAKE_CASE : Optional[int] = 0 if encoder_hidden_states is not None: additional_embeds.append(a ) additional_embeddings_len += encoder_hidden_states.shape[1] if len(proj_embeddings.shape ) == 2: SCREAMING_SNAKE_CASE : Optional[int] = proj_embeddings[:, None, :] if len(hidden_states.shape ) == 2: SCREAMING_SNAKE_CASE : List[Any] = hidden_states[:, None, :] SCREAMING_SNAKE_CASE : Optional[Any] = additional_embeds + [ proj_embeddings, time_embeddings[:, None, :], hidden_states, ] if self.prd_embedding is not None: SCREAMING_SNAKE_CASE : Any = self.prd_embedding.to(hidden_states.dtype ).expand(a , -1 , -1 ) additional_embeds.append(a ) SCREAMING_SNAKE_CASE : Optional[Any] = torch.cat( a , dim=1 , ) # Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens SCREAMING_SNAKE_CASE : str = additional_embeddings_len + proj_embeddings.shape[1] + 1 if positional_embeddings.shape[1] < hidden_states.shape[1]: SCREAMING_SNAKE_CASE : Any = F.pad( a , ( 0, 0, additional_embeddings_len, self.prd_embedding.shape[1] if self.prd_embedding is not None else 0, ) , value=0.0 , ) SCREAMING_SNAKE_CASE : List[Any] = hidden_states + positional_embeddings if attention_mask is not None: SCREAMING_SNAKE_CASE : int = (1 - attention_mask.to(hidden_states.dtype )) * -1_0000.0 SCREAMING_SNAKE_CASE : Optional[int] = F.pad(a , (0, self.additional_embeddings) , value=0.0 ) SCREAMING_SNAKE_CASE : Optional[int] = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype ) SCREAMING_SNAKE_CASE : Optional[int] = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0 ) if self.norm_in is not None: SCREAMING_SNAKE_CASE : Union[str, Any] = self.norm_in(a ) for block in self.transformer_blocks: SCREAMING_SNAKE_CASE : str = block(a , attention_mask=a ) SCREAMING_SNAKE_CASE : List[str] = self.norm_out(a ) if self.prd_embedding is not None: SCREAMING_SNAKE_CASE : Optional[Any] = hidden_states[:, -1] else: SCREAMING_SNAKE_CASE : Any = hidden_states[:, additional_embeddings_len:] SCREAMING_SNAKE_CASE : Union[str, Any] = self.proj_to_clip_embeddings(a ) if not return_dict: return (predicted_image_embedding,) return PriorTransformerOutput(predicted_image_embedding=a ) def __UpperCamelCase ( self : str , a : Optional[Any] ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE : str = (prior_latents * self.clip_std) + self.clip_mean return prior_latents
25
'''simple docstring''' from math import acos, sin from typing import List, Tuple, Union import numpy as np import torch from PIL import Image from ...models import AutoencoderKL, UNetaDConditionModel from ...schedulers import DDIMScheduler, DDPMScheduler from ...utils import randn_tensor from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput from .mel import Mel class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' a_ = ["""vqvae"""] def __init__( self : Optional[Any] ,_a : AutoencoderKL ,_a : UNetaDConditionModel ,_a : Mel ,_a : Union[DDIMScheduler, DDPMScheduler] ,): '''simple docstring''' super().__init__() self.register_modules(unet=_a ,scheduler=_a ,mel=_a ,vqvae=_a ) def _a ( self : str ): '''simple docstring''' return 50 if isinstance(self.scheduler ,_a ) else 1000 @torch.no_grad() def __call__( self : Optional[int] ,_a : int = 1 ,_a : str = None ,_a : np.ndarray = None ,_a : int = 0 ,_a : int = 0 ,_a : int = None ,_a : torch.Generator = None ,_a : float = 0 ,_a : float = 0 ,_a : torch.Generator = None ,_a : float = 0 ,_a : torch.Tensor = None ,_a : torch.Tensor = None ,_a : int=True ,): '''simple docstring''' A_ : List[str] = steps or self.get_default_steps() self.scheduler.set_timesteps(_a ) A_ : Union[str, Any] = step_generator or generator # For backwards compatibility if type(self.unet.config.sample_size ) == int: A_ : Tuple = (self.unet.config.sample_size, self.unet.config.sample_size) if noise is None: A_ : int = randn_tensor( ( batch_size, self.unet.config.in_channels, self.unet.config.sample_size[0], self.unet.config.sample_size[1], ) ,generator=_a ,device=self.device ,) A_ : List[Any] = noise A_ : str = None if audio_file is not None or raw_audio is not None: self.mel.load_audio(_a ,_a ) A_ : Any = self.mel.audio_slice_to_image(_a ) A_ : Union[str, Any] = np.frombuffer(input_image.tobytes() ,dtype="""uint8""" ).reshape( (input_image.height, input_image.width) ) A_ : Optional[Any] = (input_image / 255) * 2 - 1 A_ : Union[str, Any] = torch.tensor(input_image[np.newaxis, :, :] ,dtype=torch.float ).to(self.device ) if self.vqvae is not None: A_ : Union[str, Any] = self.vqvae.encode(torch.unsqueeze(_a ,0 ) ).latent_dist.sample( generator=_a )[0] A_ : List[str] = self.vqvae.config.scaling_factor * input_images if start_step > 0: A_ : Any = self.scheduler.add_noise(_a ,_a ,self.scheduler.timesteps[start_step - 1] ) A_ : Tuple = ( self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length ) A_ : Tuple = int(mask_start_secs * pixels_per_second ) A_ : str = int(mask_end_secs * pixels_per_second ) A_ : int = self.scheduler.add_noise(_a ,_a ,torch.tensor(self.scheduler.timesteps[start_step:] ) ) for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ): if isinstance(self.unet ,_a ): A_ : Optional[Any] = self.unet(_a ,_a ,_a )["""sample"""] else: A_ : List[Any] = self.unet(_a ,_a )["""sample"""] if isinstance(self.scheduler ,_a ): A_ : Dict = self.scheduler.step( model_output=_a ,timestep=_a ,sample=_a ,eta=_a ,generator=_a ,)["""prev_sample"""] else: A_ : Any = self.scheduler.step( model_output=_a ,timestep=_a ,sample=_a ,generator=_a ,)["""prev_sample"""] if mask is not None: if mask_start > 0: A_ : Tuple = mask[:, step, :, :mask_start] if mask_end > 0: A_ : List[str] = mask[:, step, :, -mask_end:] if self.vqvae is not None: # 0.18215 was scaling factor used in training to ensure unit variance A_ : str = 1 / self.vqvae.config.scaling_factor * images A_ : Union[str, Any] = self.vqvae.decode(_a )["""sample"""] A_ : int = (images / 2 + 0.5).clamp(0 ,1 ) A_ : str = images.cpu().permute(0 ,2 ,3 ,1 ).numpy() A_ : Optional[int] = (images * 255).round().astype("""uint8""" ) A_ : List[Any] = list( (Image.fromarray(_[:, :, 0] ) for _ in images) if images.shape[3] == 1 else (Image.fromarray(_a ,mode="""RGB""" ).convert("""L""" ) for _ in images) ) A_ : Tuple = [self.mel.image_to_audio(_a ) for _ in images] if not return_dict: return images, (self.mel.get_sample_rate(), audios) return BaseOutput(**AudioPipelineOutput(np.array(_a )[:, np.newaxis, :] ) ,**ImagePipelineOutput(_a ) ) @torch.no_grad() def _a ( self : Union[str, Any] ,_a : List[Image.Image] ,_a : int = 50 ): '''simple docstring''' assert isinstance(self.scheduler ,_a ) self.scheduler.set_timesteps(_a ) A_ : Optional[Any] = np.array( [np.frombuffer(image.tobytes() ,dtype="""uint8""" ).reshape((1, image.height, image.width) ) for image in images] ) A_ : List[str] = (sample / 255) * 2 - 1 A_ : Optional[int] = torch.Tensor(_a ).to(self.device ) for t in self.progress_bar(torch.flip(self.scheduler.timesteps ,(0,) ) ): A_ : List[str] = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps A_ : Any = self.scheduler.alphas_cumprod[t] A_ : List[Any] = ( self.scheduler.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.scheduler.final_alpha_cumprod ) A_ : str = 1 - alpha_prod_t A_ : List[str] = self.unet(_a ,_a )["""sample"""] A_ : str = (1 - alpha_prod_t_prev) ** 0.5 * model_output A_ : Union[str, Any] = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5) A_ : Optional[int] = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output return sample @staticmethod def _a ( _a : torch.Tensor ,_a : torch.Tensor ,_a : float ): '''simple docstring''' A_ : List[Any] = acos(torch.dot(torch.flatten(_a ) ,torch.flatten(_a ) ) / torch.norm(_a ) / torch.norm(_a ) ) return sin((1 - alpha) * theta ) * xa / sin(_a ) + sin(alpha * theta ) * xa / sin(_a )
665
0
'''simple docstring''' import importlib.metadata import warnings from copy import deepcopy from packaging import version from ..utils import logging from .import_utils import is_accelerate_available, is_bitsandbytes_available if is_bitsandbytes_available(): import bitsandbytes as bnb import torch import torch.nn as nn from ..pytorch_utils import ConvaD if is_accelerate_available(): from accelerate import init_empty_weights from accelerate.utils import find_tied_parameters __UpperCamelCase = logging.get_logger(__name__) def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None ) -> List[Any]: """simple docstring""" if "." in tensor_name: __snake_case : List[str] = tensor_name.split(""".""" ) for split in splits[:-1]: __snake_case : Optional[int] = getattr(_lowerCamelCase , _lowerCamelCase ) if new_module is None: raise ValueError(F'''{module} has no attribute {split}.''' ) __snake_case : str = new_module __snake_case : str = splits[-1] if tensor_name not in module._parameters and tensor_name not in module._buffers: raise ValueError(F'''{module} does not have a parameter or a buffer named {tensor_name}.''' ) __snake_case : Union[str, Any] = tensor_name in module._buffers __snake_case : Union[str, Any] = getattr(_lowerCamelCase , _lowerCamelCase ) if old_value.device == torch.device("""meta""" ) and device not in ["meta", torch.device("""meta""" )] and value is None: raise ValueError(F'''{tensor_name} is on the meta device, we need a `value` to put in on {device}.''' ) __snake_case : Union[str, Any] = False __snake_case : int = False if is_buffer or not is_bitsandbytes_available(): __snake_case : int = False __snake_case : Dict = False else: __snake_case : int = hasattr(bnb.nn , """Params4bit""" ) and isinstance(module._parameters[tensor_name] , bnb.nn.Paramsabit ) __snake_case : Any = isinstance(module._parameters[tensor_name] , bnb.nn.IntaParams ) if is_abit or is_abit: __snake_case : Union[str, Any] = module._parameters[tensor_name] if param.device.type != "cuda": if value is None: __snake_case : Dict = old_value.to(_lowerCamelCase ) elif isinstance(_lowerCamelCase , torch.Tensor ): __snake_case : Tuple = value.to("""cpu""" ) if value.dtype == torch.inta: __snake_case : Tuple = version.parse(importlib.metadata.version("""bitsandbytes""" ) ) > version.parse( """0.37.2""" ) if not is_abit_serializable: raise ValueError( """Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. """ """Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`.""" ) else: __snake_case : Union[str, Any] = torch.tensor(_lowerCamelCase , device="""cpu""" ) # Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization. # Since weights are saved in the correct "orientation", we skip transposing when loading. if issubclass(module.source_cls , _lowerCamelCase ) and fpaa_statistics is None: __snake_case : Optional[int] = new_value.T __snake_case : str = old_value.__dict__ if is_abit: __snake_case : Tuple = bnb.nn.IntaParams(_lowerCamelCase , requires_grad=_lowerCamelCase , **_lowerCamelCase ).to(_lowerCamelCase ) elif is_abit: __snake_case : int = bnb.nn.Paramsabit(_lowerCamelCase , requires_grad=_lowerCamelCase , **_lowerCamelCase ).to(_lowerCamelCase ) __snake_case : Tuple = new_value if fpaa_statistics is not None: setattr(module.weight , """SCB""" , fpaa_statistics.to(_lowerCamelCase ) ) else: if value is None: __snake_case : List[Any] = old_value.to(_lowerCamelCase ) elif isinstance(_lowerCamelCase , torch.Tensor ): __snake_case : Dict = value.to(_lowerCamelCase ) else: __snake_case : List[Any] = torch.tensor(_lowerCamelCase , device=_lowerCamelCase ) if is_buffer: __snake_case : Optional[int] = new_value else: __snake_case : Tuple = nn.Parameter(_lowerCamelCase , requires_grad=old_value.requires_grad ) __snake_case : str = new_value def _a ( _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=False ) -> Optional[Any]: """simple docstring""" for name, module in model.named_children(): if current_key_name is None: __snake_case : Tuple = [] current_key_name.append(_lowerCamelCase ) if (isinstance(_lowerCamelCase , nn.Linear ) or isinstance(_lowerCamelCase , _lowerCamelCase )) and name not in modules_to_not_convert: # Check if the current key is not in the `modules_to_not_convert` if not any(key in """.""".join(_lowerCamelCase ) for key in modules_to_not_convert ): with init_empty_weights(): if isinstance(_lowerCamelCase , _lowerCamelCase ): __snake_case , __snake_case : Union[str, Any] = module.weight.shape else: __snake_case : Dict = module.in_features __snake_case : Any = module.out_features if quantization_config.quantization_method() == "llm_int8": __snake_case : Union[str, Any] = bnb.nn.LinearabitLt( _lowerCamelCase , _lowerCamelCase , module.bias is not None , has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight , threshold=quantization_config.llm_inta_threshold , ) __snake_case : Any = True else: if ( quantization_config.llm_inta_skip_modules is not None and name in quantization_config.llm_inta_skip_modules ): pass else: __snake_case : Any = bnb.nn.Linearabit( _lowerCamelCase , _lowerCamelCase , module.bias is not None , quantization_config.bnb_abit_compute_dtype , compress_statistics=quantization_config.bnb_abit_use_double_quant , quant_type=quantization_config.bnb_abit_quant_type , ) __snake_case : Optional[int] = True # Store the module class in case we need to transpose the weight later __snake_case : List[Any] = type(_lowerCamelCase ) # Force requires grad to False to avoid unexpected errors model._modules[name].requires_grad_(_lowerCamelCase ) if len(list(module.children() ) ) > 0: __snake_case , __snake_case : Optional[Any] = _replace_with_bnb_linear( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , has_been_replaced=_lowerCamelCase , ) # Remove the last key for recursion current_key_name.pop(-1 ) return model, has_been_replaced def _a ( _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None ) -> str: """simple docstring""" __snake_case : Dict = ["""lm_head"""] if modules_to_not_convert is None else modules_to_not_convert __snake_case , __snake_case : int = _replace_with_bnb_linear( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) if not has_been_replaced: logger.warning( """You are loading your model in 8bit or 4bit but no linear modules were found in your model.""" """ Please double check your model architecture, or submit an issue on github if you think this is""" """ a bug.""" ) return model def _a ( *_lowerCamelCase , **_lowerCamelCase ) -> List[str]: """simple docstring""" warnings.warn( """`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead""" , _lowerCamelCase , ) return replace_with_bnb_linear(*_lowerCamelCase , **_lowerCamelCase ) def _a ( *_lowerCamelCase , **_lowerCamelCase ) -> str: """simple docstring""" warnings.warn( """`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead""" , _lowerCamelCase , ) return set_module_quantized_tensor_to_device(*_lowerCamelCase , **_lowerCamelCase ) def _a ( _lowerCamelCase ) -> Any: """simple docstring""" __snake_case : Tuple = deepcopy(_lowerCamelCase ) # this has 0 cost since it is done inside `init_empty_weights` context manager` tied_model.tie_weights() __snake_case : int = find_tied_parameters(_lowerCamelCase ) # For compatibility with Accelerate < 0.18 if isinstance(_lowerCamelCase , _lowerCamelCase ): __snake_case : Union[str, Any] = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() ) else: __snake_case : Dict = sum(_lowerCamelCase , [] ) __snake_case : int = len(_lowerCamelCase ) > 0 # Check if it is a base model __snake_case : Tuple = not hasattr(_lowerCamelCase , model.base_model_prefix ) # Ignore this for base models (BertModel, GPT2Model, etc.) if (not has_tied_params) and is_base_model: return [] # otherwise they have an attached head __snake_case : Union[str, Any] = list(model.named_children() ) __snake_case : List[Any] = [list_modules[-1][0]] # add last module together with tied weights __snake_case : Tuple = set(_lowerCamelCase ) - set(_lowerCamelCase ) __snake_case : str = list(set(_lowerCamelCase ) ) + list(_lowerCamelCase ) # remove ".weight" from the keys __snake_case : List[Any] = [""".weight""", """.bias"""] __snake_case : Union[str, Any] = [] for name in list_untouched: for name_to_remove in names_to_remove: if name_to_remove in name: __snake_case : Optional[int] = name.replace(_lowerCamelCase , """""" ) filtered_module_names.append(_lowerCamelCase ) return filtered_module_names
26
'''simple docstring''' import argparse import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## __magic_name__ = 16 __magic_name__ = 32 def lowerCamelCase ( lowerCamelCase : Accelerator , lowerCamelCase : int = 16): A_ : Any = AutoTokenizer.from_pretrained("""bert-base-cased""") A_ : str = load_dataset("""glue""" , """mrpc""") def tokenize_function(lowerCamelCase : Dict): # max_length=None => use the model max length (it's actually the default) A_ : List[str] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowerCamelCase , max_length=lowerCamelCase) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): A_ : Tuple = datasets.map( lowerCamelCase , batched=lowerCamelCase , remove_columns=["""idx""", """sentence1""", """sentence2"""] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library A_ : List[str] = tokenized_datasets.rename_column("""label""" , """labels""") def collate_fn(lowerCamelCase : Tuple): # On TPU it's best to pad everything to the same length or training will be very slow. A_ : str = 128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": A_ : List[Any] = 16 elif accelerator.mixed_precision != "no": A_ : Any = 8 else: A_ : Tuple = None return tokenizer.pad( lowerCamelCase , padding="""longest""" , max_length=lowerCamelCase , pad_to_multiple_of=lowerCamelCase , return_tensors="""pt""" , ) # Instantiate dataloaders. A_ : int = DataLoader( tokenized_datasets["""train"""] , shuffle=lowerCamelCase , collate_fn=lowerCamelCase , batch_size=lowerCamelCase , drop_last=lowerCamelCase) A_ : str = DataLoader( tokenized_datasets["""validation"""] , shuffle=lowerCamelCase , collate_fn=lowerCamelCase , batch_size=lowerCamelCase , drop_last=(accelerator.mixed_precision == """fp8""") , ) return train_dataloader, eval_dataloader def lowerCamelCase ( lowerCamelCase : Any , lowerCamelCase : Dict): # Initialize accelerator A_ : Tuple = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs A_ : List[Any] = config["""lr"""] A_ : List[Any] = int(config["""num_epochs"""]) A_ : int = int(config["""seed"""]) A_ : Dict = int(config["""batch_size"""]) A_ : Union[str, Any] = evaluate.load("""glue""" , """mrpc""") # If the batch size is too big we use gradient accumulation A_ : int = 1 if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU: A_ : Any = batch_size // MAX_GPU_BATCH_SIZE A_ : Union[str, Any] = MAX_GPU_BATCH_SIZE set_seed(lowerCamelCase) A_ , A_ : List[str] = get_dataloaders(lowerCamelCase , lowerCamelCase) # Instantiate the model (we build the model here so that the seed also control new weights initialization) A_ : Union[str, Any] = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=lowerCamelCase) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). A_ : str = model.to(accelerator.device) # Instantiate optimizer A_ : str = AdamW(params=model.parameters() , lr=lowerCamelCase) # Instantiate scheduler A_ : Tuple = get_linear_schedule_with_warmup( optimizer=lowerCamelCase , num_warmup_steps=100 , num_training_steps=(len(lowerCamelCase) * num_epochs) // gradient_accumulation_steps , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. A_ , A_ , A_ , A_ , A_ : Union[str, Any] = accelerator.prepare( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase) # Now we train the model for epoch in range(lowerCamelCase): model.train() for step, batch in enumerate(lowerCamelCase): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device) A_ : Optional[int] = model(**lowerCamelCase) A_ : List[Any] = outputs.loss A_ : Tuple = loss / gradient_accumulation_steps accelerator.backward(lowerCamelCase) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(lowerCamelCase): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device) with torch.no_grad(): A_ : Union[str, Any] = model(**lowerCamelCase) A_ : Any = outputs.logits.argmax(dim=-1) A_ , A_ : Tuple = accelerator.gather_for_metrics((predictions, batch["""labels"""])) metric.add_batch( predictions=lowerCamelCase , references=lowerCamelCase , ) A_ : int = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F'epoch {epoch}:' , lowerCamelCase) def lowerCamelCase ( ): A_ : Optional[int] = argparse.ArgumentParser(description="""Simple example of training script.""") parser.add_argument( """--mixed_precision""" , type=lowerCamelCase , default=lowerCamelCase , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose""" """between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.""" """and an Nvidia Ampere GPU.""" , ) parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""") A_ : Dict = parser.parse_args() A_ : Dict = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16} training_function(lowerCamelCase , lowerCamelCase) if __name__ == "__main__": main()
665
0
import json import os from functools import lru_cache from typing import TYPE_CHECKING, List, Optional, Tuple import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation __A : Any = logging.get_logger(__name__) __A : List[str] = { "vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_config_file": "tokenizer_config.json", } __A : List[str] = { "vocab_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"}, "merges_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"}, "tokenizer_config_file": { "facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json" }, } __A : Union[str, Any] = {"facebook/blenderbot-3B": 128} @lru_cache() # Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode def __lowerCAmelCase( ) -> List[str]: """simple docstring""" _A = ( list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) ) ) _A = bs[:] _A = 0 for b in range(2**8 ): if b not in bs: bs.append(_SCREAMING_SNAKE_CASE ) cs.append(2**8 + n ) n += 1 _A = [chr(_SCREAMING_SNAKE_CASE ) for n in cs] return dict(zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> List[str]: """simple docstring""" _A = set() _A = word[0] for char in word[1:]: pairs.add((prev_char, char) ) _A = char return pairs class lowerCamelCase( __snake_case ): '''simple docstring''' __magic_name__ = VOCAB_FILES_NAMES __magic_name__ = PRETRAINED_VOCAB_FILES_MAP __magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __magic_name__ = ['input_ids', 'attention_mask'] def __init__( self , snake_case_ , snake_case_ , snake_case_="replace" , snake_case_="<s>" , snake_case_="</s>" , snake_case_="</s>" , snake_case_="<s>" , snake_case_="<unk>" , snake_case_="<pad>" , snake_case_="<mask>" , snake_case_=False , **snake_case_ , ): _A = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else bos_token _A = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else eos_token _A = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else sep_token _A = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else cls_token _A = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else unk_token _A = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else pad_token # Mask token behave like a normal word, i.e. include the space before it _A = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else mask_token super().__init__( errors=snake_case_ , bos_token=snake_case_ , eos_token=snake_case_ , unk_token=snake_case_ , sep_token=snake_case_ , cls_token=snake_case_ , pad_token=snake_case_ , mask_token=snake_case_ , add_prefix_space=snake_case_ , **snake_case_ , ) with open(snake_case_ , encoding='utf-8' ) as vocab_handle: _A = json.load(snake_case_ ) _A = {v: k for k, v in self.encoder.items()} _A = errors # how to handle errors in decoding _A = bytes_to_unicode() _A = {v: k for k, v in self.byte_encoder.items()} with open(snake_case_ , encoding='utf-8' ) as merges_handle: _A = merges_handle.read().split('\n' )[1:-1] _A = [tuple(merge.split() ) for merge in bpe_merges] _A = dict(zip(snake_case_ , range(len(snake_case_ ) ) ) ) _A = {} _A = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions _A = re.compile(R'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' ) @property # Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot def lowerCAmelCase__ ( self ): return len(self.encoder ) def lowerCAmelCase__ ( self ): return dict(self.encoder , **self.added_tokens_encoder ) def lowerCAmelCase__ ( self , snake_case_ ): if token in self.cache: return self.cache[token] _A = tuple(snake_case_ ) _A = get_pairs(snake_case_ ) if not pairs: return token while True: _A = min(snake_case_ , key=lambda snake_case_ : self.bpe_ranks.get(snake_case_ , float('inf' ) ) ) if bigram not in self.bpe_ranks: break _A, _A = bigram _A = [] _A = 0 while i < len(snake_case_ ): try: _A = word.index(snake_case_ , snake_case_ ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) _A = j if word[i] == first and i < len(snake_case_ ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 _A = tuple(snake_case_ ) _A = new_word if len(snake_case_ ) == 1: break else: _A = get_pairs(snake_case_ ) _A = ' '.join(snake_case_ ) _A = word return word def lowerCAmelCase__ ( self , snake_case_ ): _A = [] for token in re.findall(self.pat , snake_case_ ): _A = ''.join( self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(snake_case_ ).split(' ' ) ) return bpe_tokens def lowerCAmelCase__ ( self , snake_case_ ): return self.encoder.get(snake_case_ , self.encoder.get(self.unk_token ) ) def lowerCAmelCase__ ( self , snake_case_ ): return self.decoder.get(snake_case_ ) def lowerCAmelCase__ ( self , snake_case_ ): _A = ''.join(snake_case_ ) _A = bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8' , errors=self.errors ) return text def lowerCAmelCase__ ( self , snake_case_ , snake_case_ = None ): if not os.path.isdir(snake_case_ ): logger.error(F"Vocabulary path ({save_directory}) should be a directory" ) return _A = os.path.join( snake_case_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) _A = os.path.join( snake_case_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] ) with open(snake_case_ , 'w' , encoding='utf-8' ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=snake_case_ , ensure_ascii=snake_case_ ) + '\n' ) _A = 0 with open(snake_case_ , 'w' , encoding='utf-8' ) as writer: writer.write('#version: 0.2\n' ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda snake_case_ : kv[1] ): if index != token_index: logger.warning( F"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive." ' Please check that the tokenizer is not corrupted!' ) _A = token_index writer.write(' '.join(snake_case_ ) + '\n' ) index += 1 return vocab_file, merge_file def lowerCAmelCase__ ( self , snake_case_ , snake_case_ = None , snake_case_ = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=snake_case_ , token_ids_a=snake_case_ , already_has_special_tokens=snake_case_ ) if token_ids_a is None: return [1] + ([0] * len(snake_case_ )) + [1] return [1] + ([0] * len(snake_case_ )) + [1, 1] + ([0] * len(snake_case_ )) + [1] def lowerCAmelCase__ ( self , snake_case_ , snake_case_ = None ): _A = [self.sep_token_id] _A = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def lowerCAmelCase__ ( self , snake_case_ , snake_case_=False , **snake_case_ ): _A = kwargs.pop('add_prefix_space' , self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(snake_case_ ) > 0 and not text[0].isspace()): _A = ' ' + text return (text, kwargs) def lowerCAmelCase__ ( self , snake_case_ , snake_case_ = None ): return token_ids_a + [self.eos_token_id] def lowerCAmelCase__ ( self , snake_case_ ): _A = [] for is_user, text in conversation.iter_texts(): if is_user: # We need to space prefix as it's being done within blenderbot inputs.append(' ' + text ) else: # Generated responses should contain them already. inputs.append(snake_case_ ) _A = ' '.join(snake_case_ ) _A = self.encode(snake_case_ ) if len(snake_case_ ) > self.model_max_length: _A = input_ids[-self.model_max_length :] logger.warning(F"Trimmed input from conversation as it was longer than {self.model_max_length} tokens." ) return input_ids
27
'''simple docstring''' import functools def lowerCamelCase ( lowerCamelCase : list[int] , lowerCamelCase : list[int]): # Validation if not isinstance(lowerCamelCase , lowerCamelCase) or not all(isinstance(lowerCamelCase , lowerCamelCase) for day in days): raise ValueError("""The parameter days should be a list of integers""") if len(lowerCamelCase) != 3 or not all(isinstance(lowerCamelCase , lowerCamelCase) for cost in costs): raise ValueError("""The parameter costs should be a list of three integers""") if len(lowerCamelCase) == 0: return 0 if min(lowerCamelCase) <= 0: raise ValueError("""All days elements should be greater than 0""") if max(lowerCamelCase) >= 366: raise ValueError("""All days elements should be less than 366""") A_ : Tuple = set(lowerCamelCase) @functools.cache def dynamic_programming(lowerCamelCase : int) -> int: if index > 365: return 0 if index not in days_set: return dynamic_programming(index + 1) return min( costs[0] + dynamic_programming(index + 1) , costs[1] + dynamic_programming(index + 7) , costs[2] + dynamic_programming(index + 30) , ) return dynamic_programming(1) if __name__ == "__main__": import doctest doctest.testmod()
665
0
'''simple docstring''' from datetime import datetime import requests from bsa import BeautifulSoup if __name__ == "__main__": UpperCamelCase_ = input("Enter image url: ").strip() print(F"""Downloading image from {url} ...""") UpperCamelCase_ = BeautifulSoup(requests.get(url).content, "html.parser") # The image URL is in the content field of the first meta tag with property og:image UpperCamelCase_ = soup.find("meta", {"property": "og:image"})["content"] UpperCamelCase_ = requests.get(image_url).content UpperCamelCase_ = F"""{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg""" with open(file_name, "wb") as fp: fp.write(image_data) print(F"""Done. Image saved to disk as {file_name}.""")
28
'''simple docstring''' from __future__ import annotations import numpy as np from numpy import floataa from numpy.typing import NDArray def lowerCamelCase ( lowerCamelCase : NDArray[floataa] , lowerCamelCase : NDArray[floataa] , lowerCamelCase : list[int] , lowerCamelCase : int , ): A_ , A_ : int = coefficient_matrix.shape A_ , A_ : Union[str, Any] = constant_matrix.shape if rowsa != colsa: A_ : Any = F'Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}' raise ValueError(lowerCamelCase) if colsa != 1: A_ : Tuple = F'Constant matrix must be nx1 but received {rowsa}x{colsa}' raise ValueError(lowerCamelCase) if rowsa != rowsa: A_ : Dict = ( """Coefficient and constant matrices dimensions must be nxn and nx1 but """ F'received {rowsa}x{colsa} and {rowsa}x{colsa}' ) raise ValueError(lowerCamelCase) if len(lowerCamelCase) != rowsa: A_ : Union[str, Any] = ( """Number of initial values must be equal to number of rows in coefficient """ F'matrix but received {len(lowerCamelCase)} and {rowsa}' ) raise ValueError(lowerCamelCase) if iterations <= 0: raise ValueError("""Iterations must be at least 1""") A_ : NDArray[floataa] = np.concatenate( (coefficient_matrix, constant_matrix) , axis=1) A_ , A_ : int = table.shape strictly_diagonally_dominant(lowerCamelCase) # Iterates the whole matrix for given number of times for _ in range(lowerCamelCase): A_ : List[Any] = [] for row in range(lowerCamelCase): A_ : int = 0 for col in range(lowerCamelCase): if col == row: A_ : List[str] = table[row][col] elif col == cols - 1: A_ : str = table[row][col] else: temp += (-1) * table[row][col] * init_val[col] A_ : Union[str, Any] = (temp + val) / denom new_val.append(lowerCamelCase) A_ : Tuple = new_val return [float(lowerCamelCase) for i in new_val] def lowerCamelCase ( lowerCamelCase : NDArray[floataa]): A_ , A_ : Dict = table.shape A_ : Union[str, Any] = True for i in range(0 , lowerCamelCase): A_ : str = 0 for j in range(0 , cols - 1): if i == j: continue else: total += table[i][j] if table[i][i] <= total: raise ValueError("""Coefficient matrix is not strictly diagonally dominant""") return is_diagonally_dominant # Test Cases if __name__ == "__main__": import doctest doctest.testmod()
665
0
"""simple docstring""" import re def lowercase ( lowerCAmelCase__ ): lowerCamelCase_ = re.compile( R'''^(?:0|94|\+94|0{2}94)''' R'''7(0|1|2|4|5|6|7|8)''' R'''(-| |)''' R'''\d{7}$''' ) return bool(re.search(lowerCAmelCase__ ,lowerCAmelCase__ ) ) if __name__ == "__main__": A_ = """0094702343221""" print(is_sri_lankan_phone_number(phone))
29
'''simple docstring''' def lowerCamelCase ( lowerCamelCase : str , lowerCamelCase : str): A_ : Any = len(lowerCamelCase) A_ : Optional[Any] = len(lowerCamelCase) A_ : Optional[int] = [[False for _ in range(m + 1)] for _ in range(n + 1)] A_ : Union[str, Any] = True for i in range(lowerCamelCase): for j in range(m + 1): if dp[i][j]: if j < m and a[i].upper() == b[j]: A_ : Optional[int] = True if a[i].islower(): A_ : List[Any] = True return dp[n][m] if __name__ == "__main__": import doctest doctest.testmod()
665
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) __a = { 'configuration_swiftformer': [ 'SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'SwiftFormerConfig', 'SwiftFormerOnnxConfig', ] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __a = [ 'SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'SwiftFormerForImageClassification', 'SwiftFormerModel', 'SwiftFormerPreTrainedModel', ] if TYPE_CHECKING: from .configuration_swiftformer import ( SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, SwiftFormerConfig, SwiftFormerOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_swiftformer import ( SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, SwiftFormerForImageClassification, SwiftFormerModel, SwiftFormerPreTrainedModel, ) else: import sys __a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
30
'''simple docstring''' from __future__ import annotations from collections import deque from collections.abc import Iterator from dataclasses import dataclass @dataclass class __lowerCAmelCase : '''simple docstring''' a_ = 42 a_ = 42 class __lowerCAmelCase : '''simple docstring''' def __init__( self : Union[str, Any] ,_a : int ): '''simple docstring''' A_ : list[list[Edge]] = [[] for _ in range(_a )] A_ : List[Any] = size def __getitem__( self : int ,_a : int ): '''simple docstring''' return iter(self._graph[vertex] ) @property def _a ( self : str ): '''simple docstring''' return self._size def _a ( self : str ,_a : int ,_a : int ,_a : int ): '''simple docstring''' if weight not in (0, 1): raise ValueError("""Edge weight must be either 0 or 1.""" ) if to_vertex < 0 or to_vertex >= self.size: raise ValueError("""Vertex indexes must be in [0; size).""" ) self._graph[from_vertex].append(Edge(_a ,_a ) ) def _a ( self : Dict ,_a : int ,_a : int ): '''simple docstring''' A_ : Tuple = deque([start_vertex] ) A_ : list[int | None] = [None] * self.size A_ : Union[str, Any] = 0 while queue: A_ : List[Any] = queue.popleft() A_ : Tuple = distances[current_vertex] if current_distance is None: continue for edge in self[current_vertex]: A_ : Union[str, Any] = current_distance + edge.weight A_ : Optional[Any] = distances[edge.destination_vertex] if ( isinstance(_a ,_a ) and new_distance >= dest_vertex_distance ): continue A_ : Tuple = new_distance if edge.weight == 0: queue.appendleft(edge.destination_vertex ) else: queue.append(edge.destination_vertex ) if distances[finish_vertex] is None: raise ValueError("""No path from start_vertex to finish_vertex.""" ) return distances[finish_vertex] if __name__ == "__main__": import doctest doctest.testmod()
665
0
import os import time import numpy as np import onnxruntime as ort lowerCamelCase__ : int = '1' lowerCamelCase__ : Optional[int] = '0' lowerCamelCase__ : Optional[Any] = '1' lowerCamelCase__ : int = ort.SessionOptions() lowerCamelCase__ : List[Any] = ort.GraphOptimizationLevel.ORT_DISABLE_ALL print('Create inference session...') lowerCamelCase__ : List[str] = ['TensorrtExecutionProvider', 'CUDAExecutionProvider'] lowerCamelCase__ : List[str] = ort.InferenceSession('model.onnx', sess_options=sess_opt, providers=execution_provider) lowerCamelCase__ : Union[str, Any] = ort.RunOptions() lowerCamelCase__ : int = 128 lowerCamelCase__ : Dict = 1 lowerCamelCase__ : Tuple = np.ones((batch, sequence), dtype=np.intaa) lowerCamelCase__ : Union[str, Any] = np.ones((batch, sequence), dtype=np.intaa) lowerCamelCase__ : Any = np.ones((batch, sequence), dtype=np.intaa) print('Warm up phase...') sess.run( None, { sess.get_inputs()[0].name: input_ids, sess.get_inputs()[1].name: attention_mask, sess.get_inputs()[2].name: token_type_ids, }, run_options=run_opt, ) print('Start inference...') lowerCamelCase__ : str = time.time() lowerCamelCase__ : int = 2_000 lowerCamelCase__ : Any = {} for iter in range(max_iters): lowerCamelCase__ : str = sess.run( None, { sess.get_inputs()[0].name: input_ids, sess.get_inputs()[1].name: attention_mask, sess.get_inputs()[2].name: token_type_ids, }, run_options=run_opt, ) print('Average Inference Time = {:.3f} ms'.format((time.time() - start_time) * 1_000 / max_iters))
31
'''simple docstring''' def lowerCamelCase ( lowerCamelCase : int = 10**9): A_ : Optional[int] = 1 A_ : int = 2 A_ : List[Any] = 0 A_ : Optional[Any] = 0 A_ : str = 0 while perimeter <= max_perimeter: perimeters_sum += perimeter prev_value += 2 * value value += prev_value A_ : Optional[Any] = 2 * value + 2 if i % 2 == 0 else 2 * value - 2 i += 1 return perimeters_sum if __name__ == "__main__": print(f"""{solution() = }""")
665
0
import importlib import json import os from collections import OrderedDict from typing import Dict, Optional, Union # Build the list of all feature extractors from ...configuration_utils import PretrainedConfig from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code from ...feature_extraction_utils import FeatureExtractionMixin from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging from .auto_factory import _LazyAutoMapping from .configuration_auto import ( CONFIG_MAPPING_NAMES, AutoConfig, model_type_to_module_name, replace_list_option_in_docstrings, ) UpperCAmelCase_ = logging.get_logger(__name__) UpperCAmelCase_ = OrderedDict( [ ("audio-spectrogram-transformer", "ASTFeatureExtractor"), ("beit", "BeitFeatureExtractor"), ("chinese_clip", "ChineseCLIPFeatureExtractor"), ("clap", "ClapFeatureExtractor"), ("clip", "CLIPFeatureExtractor"), ("clipseg", "ViTFeatureExtractor"), ("conditional_detr", "ConditionalDetrFeatureExtractor"), ("convnext", "ConvNextFeatureExtractor"), ("cvt", "ConvNextFeatureExtractor"), ("data2vec-audio", "Wav2Vec2FeatureExtractor"), ("data2vec-vision", "BeitFeatureExtractor"), ("deformable_detr", "DeformableDetrFeatureExtractor"), ("deit", "DeiTFeatureExtractor"), ("detr", "DetrFeatureExtractor"), ("dinat", "ViTFeatureExtractor"), ("donut-swin", "DonutFeatureExtractor"), ("dpt", "DPTFeatureExtractor"), ("encodec", "EncodecFeatureExtractor"), ("flava", "FlavaFeatureExtractor"), ("glpn", "GLPNFeatureExtractor"), ("groupvit", "CLIPFeatureExtractor"), ("hubert", "Wav2Vec2FeatureExtractor"), ("imagegpt", "ImageGPTFeatureExtractor"), ("layoutlmv2", "LayoutLMv2FeatureExtractor"), ("layoutlmv3", "LayoutLMv3FeatureExtractor"), ("levit", "LevitFeatureExtractor"), ("maskformer", "MaskFormerFeatureExtractor"), ("mctct", "MCTCTFeatureExtractor"), ("mobilenet_v1", "MobileNetV1FeatureExtractor"), ("mobilenet_v2", "MobileNetV2FeatureExtractor"), ("mobilevit", "MobileViTFeatureExtractor"), ("nat", "ViTFeatureExtractor"), ("owlvit", "OwlViTFeatureExtractor"), ("perceiver", "PerceiverFeatureExtractor"), ("poolformer", "PoolFormerFeatureExtractor"), ("regnet", "ConvNextFeatureExtractor"), ("resnet", "ConvNextFeatureExtractor"), ("segformer", "SegformerFeatureExtractor"), ("sew", "Wav2Vec2FeatureExtractor"), ("sew-d", "Wav2Vec2FeatureExtractor"), ("speech_to_text", "Speech2TextFeatureExtractor"), ("speecht5", "SpeechT5FeatureExtractor"), ("swiftformer", "ViTFeatureExtractor"), ("swin", "ViTFeatureExtractor"), ("swinv2", "ViTFeatureExtractor"), ("table-transformer", "DetrFeatureExtractor"), ("timesformer", "VideoMAEFeatureExtractor"), ("tvlt", "TvltFeatureExtractor"), ("unispeech", "Wav2Vec2FeatureExtractor"), ("unispeech-sat", "Wav2Vec2FeatureExtractor"), ("van", "ConvNextFeatureExtractor"), ("videomae", "VideoMAEFeatureExtractor"), ("vilt", "ViltFeatureExtractor"), ("vit", "ViTFeatureExtractor"), ("vit_mae", "ViTFeatureExtractor"), ("vit_msn", "ViTFeatureExtractor"), ("wav2vec2", "Wav2Vec2FeatureExtractor"), ("wav2vec2-conformer", "Wav2Vec2FeatureExtractor"), ("wavlm", "Wav2Vec2FeatureExtractor"), ("whisper", "WhisperFeatureExtractor"), ("xclip", "CLIPFeatureExtractor"), ("yolos", "YolosFeatureExtractor"), ] ) UpperCAmelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES) def A__ ( SCREAMING_SNAKE_CASE_ : str ) -> List[Any]: """simple docstring""" for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items(): if class_name in extractors: _UpperCAmelCase = model_type_to_module_name(SCREAMING_SNAKE_CASE_ ) _UpperCAmelCase = importlib.import_module(F'''.{module_name}''' , '''transformers.models''' ) try: return getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) except AttributeError: continue for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items(): if getattr(SCREAMING_SNAKE_CASE_ , '''__name__''' , SCREAMING_SNAKE_CASE_ ) == class_name: return extractor # We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main # init and we return the proper dummy to get an appropriate error message. _UpperCAmelCase = importlib.import_module('''transformers''' ) if hasattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): return getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) return None def A__ ( SCREAMING_SNAKE_CASE_ : Union[str, os.PathLike] , SCREAMING_SNAKE_CASE_ : Optional[Union[str, os.PathLike]] = None , SCREAMING_SNAKE_CASE_ : bool = False , SCREAMING_SNAKE_CASE_ : bool = False , SCREAMING_SNAKE_CASE_ : Optional[Dict[str, str]] = None , SCREAMING_SNAKE_CASE_ : Optional[Union[bool, str]] = None , SCREAMING_SNAKE_CASE_ : Optional[str] = None , SCREAMING_SNAKE_CASE_ : bool = False , **SCREAMING_SNAKE_CASE_ : Optional[int] , ) -> Optional[Any]: """simple docstring""" _UpperCAmelCase = get_file_from_repo( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , cache_dir=SCREAMING_SNAKE_CASE_ , force_download=SCREAMING_SNAKE_CASE_ , resume_download=SCREAMING_SNAKE_CASE_ , proxies=SCREAMING_SNAKE_CASE_ , use_auth_token=SCREAMING_SNAKE_CASE_ , revision=SCREAMING_SNAKE_CASE_ , local_files_only=SCREAMING_SNAKE_CASE_ , ) if resolved_config_file is None: logger.info( '''Could not locate the feature extractor configuration file, will try to use the model config instead.''' ) return {} with open(SCREAMING_SNAKE_CASE_ , encoding='''utf-8''' ) as reader: return json.load(SCREAMING_SNAKE_CASE_ ) class __UpperCamelCase : def __init__( self ): raise EnvironmentError( '''AutoFeatureExtractor is designed to be instantiated ''' '''using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method.''' ) @classmethod @replace_list_option_in_docstrings(_UpperCamelCase ) def UpperCamelCase( cls , _UpperCamelCase , **_UpperCamelCase ): _UpperCAmelCase = kwargs.pop('''config''' , _UpperCamelCase ) _UpperCAmelCase = kwargs.pop('''trust_remote_code''' , _UpperCamelCase ) _UpperCAmelCase = True _UpperCAmelCase , _UpperCAmelCase = FeatureExtractionMixin.get_feature_extractor_dict(_UpperCamelCase , **_UpperCamelCase ) _UpperCAmelCase = config_dict.get('''feature_extractor_type''' , _UpperCamelCase ) _UpperCAmelCase = None if "AutoFeatureExtractor" in config_dict.get('''auto_map''' , {} ): _UpperCAmelCase = config_dict['''auto_map''']['''AutoFeatureExtractor'''] # If we don't find the feature extractor class in the feature extractor config, let's try the model config. if feature_extractor_class is None and feature_extractor_auto_map is None: if not isinstance(_UpperCamelCase , _UpperCamelCase ): _UpperCAmelCase = AutoConfig.from_pretrained(_UpperCamelCase , **_UpperCamelCase ) # It could be in `config.feature_extractor_type`` _UpperCAmelCase = getattr(_UpperCamelCase , '''feature_extractor_type''' , _UpperCamelCase ) if hasattr(_UpperCamelCase , '''auto_map''' ) and "AutoFeatureExtractor" in config.auto_map: _UpperCAmelCase = config.auto_map['''AutoFeatureExtractor'''] if feature_extractor_class is not None: _UpperCAmelCase = feature_extractor_class_from_name(_UpperCamelCase ) _UpperCAmelCase = feature_extractor_auto_map is not None _UpperCAmelCase = feature_extractor_class is not None or type(_UpperCamelCase ) in FEATURE_EXTRACTOR_MAPPING _UpperCAmelCase = resolve_trust_remote_code( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) if has_remote_code and trust_remote_code: _UpperCAmelCase = get_class_from_dynamic_module( _UpperCamelCase , _UpperCamelCase , **_UpperCamelCase ) _UpperCAmelCase = kwargs.pop('''code_revision''' , _UpperCamelCase ) if os.path.isdir(_UpperCamelCase ): feature_extractor_class.register_for_auto_class() return feature_extractor_class.from_dict(_UpperCamelCase , **_UpperCamelCase ) elif feature_extractor_class is not None: return feature_extractor_class.from_dict(_UpperCamelCase , **_UpperCamelCase ) # Last try: we use the FEATURE_EXTRACTOR_MAPPING. elif type(_UpperCamelCase ) in FEATURE_EXTRACTOR_MAPPING: _UpperCAmelCase = FEATURE_EXTRACTOR_MAPPING[type(_UpperCamelCase )] return feature_extractor_class.from_dict(_UpperCamelCase , **_UpperCamelCase ) raise ValueError( f'''Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a ''' f'''`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following ''' f'''`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}''' ) @staticmethod def UpperCamelCase( _UpperCamelCase , _UpperCamelCase ): FEATURE_EXTRACTOR_MAPPING.register(_UpperCamelCase , _UpperCamelCase )
32
'''simple docstring''' # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from argparse import ArgumentParser from accelerate.commands.config import get_config_parser from accelerate.commands.env import env_command_parser from accelerate.commands.launch import launch_command_parser from accelerate.commands.test import test_command_parser from accelerate.commands.tpu import tpu_command_parser def lowerCamelCase ( ): A_ : Optional[int] = ArgumentParser("""Accelerate CLI tool""" , usage="""accelerate <command> [<args>]""" , allow_abbrev=lowerCamelCase) A_ : Optional[int] = parser.add_subparsers(help="""accelerate command helpers""") # Register commands get_config_parser(subparsers=lowerCamelCase) env_command_parser(subparsers=lowerCamelCase) launch_command_parser(subparsers=lowerCamelCase) tpu_command_parser(subparsers=lowerCamelCase) test_command_parser(subparsers=lowerCamelCase) # Let's go A_ : Dict = parser.parse_args() if not hasattr(lowerCamelCase , """func"""): parser.print_help() exit(1) # Run args.func(lowerCamelCase) if __name__ == "__main__": main()
665
0
import argparse import torch from torch import nn from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> Any: snake_case__ = [ '''encoder.version''', '''decoder.version''', '''model.encoder.version''', '''model.decoder.version''', '''decoder.output_projection.weight''', '''_float_tensor''', '''encoder.embed_positions._float_tensor''', '''decoder.embed_positions._float_tensor''', ] for k in ignore_keys: state_dict.pop(__lowerCAmelCase , __lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> str: snake_case__ , snake_case__ = emb.weight.shape snake_case__ = nn.Linear(__lowerCAmelCase , __lowerCAmelCase , bias=__lowerCAmelCase ) snake_case__ = emb.weight.data return lin_layer def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> Dict: snake_case__ = torch.load(__lowerCAmelCase , map_location='''cpu''' ) snake_case__ = mam_aaa['''args'''] or mam_aaa['''cfg''']['''model'''] snake_case__ = mam_aaa['''model'''] remove_ignore_keys_(__lowerCAmelCase ) snake_case__ = state_dict['''encoder.embed_tokens.weight'''].shape[0] snake_case__ = MaMaaaConfig( vocab_size=__lowerCAmelCase , max_position_embeddings=1024 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='''relu''' , ) snake_case__ = state_dict['''decoder.embed_tokens.weight'''] snake_case__ = MaMaaaForConditionalGeneration(__lowerCAmelCase ) model.model.load_state_dict(__lowerCAmelCase , strict=__lowerCAmelCase ) snake_case__ = make_linear_from_emb(model.model.shared ) return model if __name__ == "__main__": lowerCamelCase__ : Optional[int] = argparse.ArgumentParser() # Required parameters parser.add_argument("""fairseq_path""", type=str, help="""path to a model.pt on local filesystem.""") parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") lowerCamelCase__ : Optional[int] = parser.parse_args() lowerCamelCase__ : List[str] = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß) model.save_pretrained(args.pytorch_dump_folder_path)
33
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __magic_name__ = { 'configuration_altclip': [ 'ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP', 'AltCLIPConfig', 'AltCLIPTextConfig', 'AltCLIPVisionConfig', ], 'processing_altclip': ['AltCLIPProcessor'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __magic_name__ = [ 'ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST', 'AltCLIPPreTrainedModel', 'AltCLIPModel', 'AltCLIPTextModel', 'AltCLIPVisionModel', ] if TYPE_CHECKING: from .configuration_altclip import ( ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, AltCLIPConfig, AltCLIPTextConfig, AltCLIPVisionConfig, ) from .processing_altclip import AltCLIPProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_altclip import ( ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST, AltCLIPModel, AltCLIPPreTrainedModel, AltCLIPTextModel, AltCLIPVisionModel, ) else: import sys __magic_name__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
665
0
"""simple docstring""" import asyncio import os import re import sys import tempfile import unittest from contextlib import contextmanager from copy import deepcopy from distutils.util import strtobool from enum import Enum from importlib.util import find_spec from pathlib import Path from unittest.mock import patch import pyarrow as pa import pytest import requests from packaging import version from datasets import config if config.PY_VERSION < version.parse('3.8'): import importlib_metadata else: import importlib.metadata as importlib_metadata def __snake_case ( _lowercase ,_lowercase=False ): """simple docstring""" try: UpperCamelCase = os.environ[key] except KeyError: # KEY isn't set, default to `default`. UpperCamelCase = default else: # KEY is set, convert it to True or False. try: UpperCamelCase = strtobool(_lowercase ) except ValueError: # More values are supported, but let's keep the message simple. raise ValueError(f'If set, {key} must be yes or no.' ) return _value SCREAMING_SNAKE_CASE_ = parse_flag_from_env('RUN_SLOW', default=False) SCREAMING_SNAKE_CASE_ = parse_flag_from_env('RUN_REMOTE', default=False) SCREAMING_SNAKE_CASE_ = parse_flag_from_env('RUN_LOCAL', default=True) SCREAMING_SNAKE_CASE_ = parse_flag_from_env('RUN_PACKAGED', default=True) # Compression SCREAMING_SNAKE_CASE_ = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='test requires lz4') SCREAMING_SNAKE_CASE_ = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='test requires py7zr') SCREAMING_SNAKE_CASE_ = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='test requires zstandard') # Audio SCREAMING_SNAKE_CASE_ = pytest.mark.skipif( # On Windows and OS X, soundfile installs sndfile find_spec('soundfile') is None or version.parse(importlib_metadata.version('soundfile')) < version.parse('0.12.0'), reason='test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ', ) # Beam SCREAMING_SNAKE_CASE_ = pytest.mark.skipif( not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('0.3.2'), reason='test requires apache-beam and a compatible dill version', ) # Dill-cloudpickle compatibility SCREAMING_SNAKE_CASE_ = pytest.mark.skipif( config.DILL_VERSION <= version.parse('0.3.2'), reason='test requires dill>0.3.2 for cloudpickle compatibility', ) # Windows SCREAMING_SNAKE_CASE_ = pytest.mark.skipif( sys.platform == 'win32', reason='test should not be run on Windows', ) def __snake_case ( _lowercase ): """simple docstring""" try: import faiss # noqa except ImportError: UpperCamelCase = unittest.skip('''test requires faiss''' )(_lowercase ) return test_case def __snake_case ( _lowercase ): """simple docstring""" try: import regex # noqa except ImportError: UpperCamelCase = unittest.skip('''test requires regex''' )(_lowercase ) return test_case def __snake_case ( _lowercase ): """simple docstring""" try: import elasticsearch # noqa except ImportError: UpperCamelCase = unittest.skip('''test requires elasticsearch''' )(_lowercase ) return test_case def __snake_case ( _lowercase ): """simple docstring""" try: import sqlalchemy # noqa except ImportError: UpperCamelCase = unittest.skip('''test requires sqlalchemy''' )(_lowercase ) return test_case def __snake_case ( _lowercase ): """simple docstring""" if not config.TORCH_AVAILABLE: UpperCamelCase = unittest.skip('''test requires PyTorch''' )(_lowercase ) return test_case def __snake_case ( _lowercase ): """simple docstring""" if not config.TF_AVAILABLE: UpperCamelCase = unittest.skip('''test requires TensorFlow''' )(_lowercase ) return test_case def __snake_case ( _lowercase ): """simple docstring""" if not config.JAX_AVAILABLE: UpperCamelCase = unittest.skip('''test requires JAX''' )(_lowercase ) return test_case def __snake_case ( _lowercase ): """simple docstring""" if not config.PIL_AVAILABLE: UpperCamelCase = unittest.skip('''test requires Pillow''' )(_lowercase ) return test_case def __snake_case ( _lowercase ): """simple docstring""" try: import transformers # noqa F401 except ImportError: return unittest.skip('''test requires transformers''' )(_lowercase ) else: return test_case def __snake_case ( _lowercase ): """simple docstring""" try: import tiktoken # noqa F401 except ImportError: return unittest.skip('''test requires tiktoken''' )(_lowercase ) else: return test_case def __snake_case ( _lowercase ): """simple docstring""" try: import spacy # noqa F401 except ImportError: return unittest.skip('''test requires spacy''' )(_lowercase ) else: return test_case def __snake_case ( _lowercase ): """simple docstring""" def _require_spacy_model(_lowercase ): try: import spacy # noqa F401 spacy.load(_lowercase ) except ImportError: return unittest.skip('''test requires spacy''' )(_lowercase ) except OSError: return unittest.skip('''test requires spacy model \'{}\''''.format(_lowercase ) )(_lowercase ) else: return test_case return _require_spacy_model def __snake_case ( _lowercase ): """simple docstring""" try: import pyspark # noqa F401 except ImportError: return unittest.skip('''test requires pyspark''' )(_lowercase ) else: return test_case def __snake_case ( _lowercase ): """simple docstring""" try: import joblibspark # noqa F401 except ImportError: return unittest.skip('''test requires joblibspark''' )(_lowercase ) else: return test_case def __snake_case ( _lowercase ): """simple docstring""" if not _run_slow_tests or _run_slow_tests == 0: UpperCamelCase = unittest.skip('''test is slow''' )(_lowercase ) return test_case def __snake_case ( _lowercase ): """simple docstring""" if not _run_local_tests or _run_local_tests == 0: UpperCamelCase = unittest.skip('''test is local''' )(_lowercase ) return test_case def __snake_case ( _lowercase ): """simple docstring""" if not _run_packaged_tests or _run_packaged_tests == 0: UpperCamelCase = unittest.skip('''test is packaged''' )(_lowercase ) return test_case def __snake_case ( _lowercase ): """simple docstring""" if not _run_remote_tests or _run_remote_tests == 0: UpperCamelCase = unittest.skip('''test requires remote''' )(_lowercase ) return test_case def __snake_case ( *_lowercase ): """simple docstring""" def decorate(cls ): for name, fn in cls.__dict__.items(): if callable(_lowercase ) and name.startswith('''test''' ): for decorator in decorators: UpperCamelCase = decorator(_lowercase ) setattr(cls ,_lowercase ,_lowercase ) return cls return decorate class snake_case_ ( lowerCamelCase_ ): """simple docstring""" pass class snake_case_ ( lowerCamelCase_ ): """simple docstring""" A_ = 0 A_ = 1 A_ = 2 @contextmanager def __snake_case ( _lowercase=OfflineSimulationMode.CONNECTION_FAILS ,_lowercase=1e-16 ): """simple docstring""" UpperCamelCase = requests.Session().request def timeout_request(_lowercase ,_lowercase ,_lowercase ,**_lowercase ): # Change the url to an invalid url so that the connection hangs UpperCamelCase = '''https://10.255.255.1''' if kwargs.get('''timeout''' ) is None: raise RequestWouldHangIndefinitelyError( f'Tried a call to {url} in offline mode with no timeout set. Please set a timeout.' ) UpperCamelCase = timeout try: return online_request(_lowercase ,_lowercase ,**_lowercase ) except Exception as e: # The following changes in the error are just here to make the offline timeout error prettier UpperCamelCase = url UpperCamelCase = e.args[0] UpperCamelCase = (max_retry_error.args[0].replace('''10.255.255.1''' ,f'OfflineMock[{url}]' ),) UpperCamelCase = (max_retry_error,) raise def raise_connection_error(_lowercase ,_lowercase ,**_lowercase ): raise requests.ConnectionError('''Offline mode is enabled.''' ,request=_lowercase ) if mode is OfflineSimulationMode.CONNECTION_FAILS: with patch('''requests.Session.send''' ,_lowercase ): yield elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT: # inspired from https://stackoverflow.com/a/904609 with patch('''requests.Session.request''' ,_lowercase ): yield elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1: with patch('''datasets.config.HF_DATASETS_OFFLINE''' ,_lowercase ): yield else: raise ValueError('''Please use a value from the OfflineSimulationMode enum.''' ) @contextmanager def __snake_case ( *_lowercase ,**_lowercase ): """simple docstring""" UpperCamelCase = str(Path().resolve() ) with tempfile.TemporaryDirectory(*_lowercase ,**_lowercase ) as tmp_dir: try: os.chdir(_lowercase ) yield finally: os.chdir(_lowercase ) @contextmanager def __snake_case ( ): """simple docstring""" import gc gc.collect() UpperCamelCase = pa.total_allocated_bytes() yield assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase." @contextmanager def __snake_case ( ): """simple docstring""" import gc gc.collect() UpperCamelCase = pa.total_allocated_bytes() yield assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase." def __snake_case ( _lowercase ,_lowercase ): """simple docstring""" return deepcopy(_lowercase ).integers(0 ,100 ,10 ).tolist() == deepcopy(_lowercase ).integers(0 ,100 ,10 ).tolist() def __snake_case ( _lowercase ): """simple docstring""" import decorator from requests.exceptions import HTTPError def _wrapper(_lowercase ,*_lowercase ,**_lowercase ): try: return func(*_lowercase ,**_lowercase ) except HTTPError as err: if str(_lowercase ).startswith('''500''' ) or str(_lowercase ).startswith('''502''' ): pytest.xfail(str(_lowercase ) ) raise err return decorator.decorator(_wrapper ,_lowercase ) class snake_case_ : """simple docstring""" def __init__( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) -> Dict: UpperCamelCase = returncode UpperCamelCase = stdout UpperCamelCase = stderr async def __snake_case ( _lowercase ,_lowercase ): """simple docstring""" while True: UpperCamelCase = await stream.readline() if line: callback(_lowercase ) else: break async def __snake_case ( _lowercase ,_lowercase=None ,_lowercase=None ,_lowercase=None ,_lowercase=False ,_lowercase=False ): """simple docstring""" if echo: print('''\nRunning: ''' ,''' '''.join(_lowercase ) ) UpperCamelCase = await asyncio.create_subprocess_exec( cmd[0] ,*cmd[1:] ,stdin=_lowercase ,stdout=asyncio.subprocess.PIPE ,stderr=asyncio.subprocess.PIPE ,env=_lowercase ,) # note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe # https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait # # If it starts hanging, will need to switch to the following code. The problem is that no data # will be seen until it's done and if it hangs for example there will be no debug info. # out, err = await p.communicate() # return _RunOutput(p.returncode, out, err) UpperCamelCase = [] UpperCamelCase = [] def tee(_lowercase ,_lowercase ,_lowercase ,_lowercase="" ): UpperCamelCase = line.decode('''utf-8''' ).rstrip() sink.append(_lowercase ) if not quiet: print(_lowercase ,_lowercase ,file=_lowercase ) # XXX: the timeout doesn't seem to make any difference here await asyncio.wait( [ _read_stream(p.stdout ,lambda _lowercase : tee(_lowercase ,_lowercase ,sys.stdout ,label='''stdout:''' ) ), _read_stream(p.stderr ,lambda _lowercase : tee(_lowercase ,_lowercase ,sys.stderr ,label='''stderr:''' ) ), ] ,timeout=_lowercase ,) return _RunOutput(await p.wait() ,_lowercase ,_lowercase ) def __snake_case ( _lowercase ,_lowercase=None ,_lowercase=None ,_lowercase=180 ,_lowercase=False ,_lowercase=True ): """simple docstring""" UpperCamelCase = asyncio.get_event_loop() UpperCamelCase = loop.run_until_complete( _stream_subprocess(_lowercase ,env=_lowercase ,stdin=_lowercase ,timeout=_lowercase ,quiet=_lowercase ,echo=_lowercase ) ) UpperCamelCase = ''' '''.join(_lowercase ) if result.returncode > 0: UpperCamelCase = '''\n'''.join(result.stderr ) raise RuntimeError( f'\'{cmd_str}\' failed with returncode {result.returncode}\n\n' f'The combined stderr from workers follows:\n{stderr}' ) # check that the subprocess actually did run and produced some output, should the test rely on # the remote side to do the testing if not result.stdout and not result.stderr: raise RuntimeError(f'\'{cmd_str}\' produced no output.' ) return result def __snake_case ( ): """simple docstring""" UpperCamelCase = os.environ.get('''PYTEST_XDIST_WORKER''' ,'''gw0''' ) UpperCamelCase = re.sub(r'''^gw''' ,'''''' ,_lowercase ,0 ,re.M ) return int(_lowercase ) def __snake_case ( ): """simple docstring""" UpperCamelCase = 2_9500 UpperCamelCase = pytest_xdist_worker_id() return port + uniq_delta
34
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available __magic_name__ = {'configuration_yolos': ['YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP', 'YolosConfig', 'YolosOnnxConfig']} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __magic_name__ = ['YolosFeatureExtractor'] __magic_name__ = ['YolosImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __magic_name__ = [ 'YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST', 'YolosForObjectDetection', 'YolosModel', 'YolosPreTrainedModel', ] if TYPE_CHECKING: from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_yolos import YolosFeatureExtractor from .image_processing_yolos import YolosImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_yolos import ( YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST, YolosForObjectDetection, YolosModel, YolosPreTrainedModel, ) else: import sys __magic_name__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
665
0
import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder, delete_repo from requests.exceptions import HTTPError from transformers import AutoImageProcessor, ViTImageProcessor from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test sys.path.append(str(Path(__file__).parent.parent / 'utils')) from test_module.custom_image_processing import CustomImageProcessor # noqa E402 a_ :str = get_tests_dir('fixtures') class lowercase ( unittest.TestCase ): def lowercase__ ( self : Union[str, Any] ): # A mock response for an HTTP head request to emulate server down SCREAMING_SNAKE_CASE__ : Tuple = mock.Mock() SCREAMING_SNAKE_CASE__ : Dict = 5_00 SCREAMING_SNAKE_CASE__ : Optional[int] = {} SCREAMING_SNAKE_CASE__ : List[str] = HTTPError SCREAMING_SNAKE_CASE__ : Optional[Any] = {} # Download this model to make sure it's in the cache. SCREAMING_SNAKE_CASE__ : Optional[Any] = ViTImageProcessor.from_pretrained('''hf-internal-testing/tiny-random-vit''' ) # Under the mock environment we get a 500 error when trying to reach the model. with mock.patch('''requests.Session.request''' , return_value=_lowercase ) as mock_head: SCREAMING_SNAKE_CASE__ : Union[str, Any] = ViTImageProcessor.from_pretrained('''hf-internal-testing/tiny-random-vit''' ) # This check we did call the fake head request mock_head.assert_called() def lowercase__ ( self : Tuple ): # This test is for deprecated behavior and can be removed in v5 SCREAMING_SNAKE_CASE__ : str = ViTImageProcessor.from_pretrained( '''https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json''' ) def lowercase__ ( self : Tuple ): with self.assertRaises(_lowercase ): # config is in subfolder, the following should not work without specifying the subfolder SCREAMING_SNAKE_CASE__ : str = AutoImageProcessor.from_pretrained('''hf-internal-testing/stable-diffusion-all-variants''' ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = AutoImageProcessor.from_pretrained( '''hf-internal-testing/stable-diffusion-all-variants''' , subfolder='''feature_extractor''' ) self.assertIsNotNone(_lowercase ) @is_staging_test class lowercase ( unittest.TestCase ): @classmethod def lowercase__ ( cls : int ): SCREAMING_SNAKE_CASE__ : int = TOKEN HfFolder.save_token(_lowercase ) @classmethod def lowercase__ ( cls : Union[str, Any] ): try: delete_repo(token=cls._token , repo_id='''test-image-processor''' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='''valid_org/test-image-processor-org''' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='''test-dynamic-image-processor''' ) except HTTPError: pass def lowercase__ ( self : Optional[Any] ): SCREAMING_SNAKE_CASE__ : Dict = ViTImageProcessor.from_pretrained(_lowercase ) image_processor.push_to_hub('''test-image-processor''' , use_auth_token=self._token ) SCREAMING_SNAKE_CASE__ : Tuple = ViTImageProcessor.from_pretrained(f"""{USER}/test-image-processor""" ) for k, v in image_processor.__dict__.items(): self.assertEqual(_lowercase , getattr(_lowercase , _lowercase ) ) # Reset repo delete_repo(token=self._token , repo_id='''test-image-processor''' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained( _lowercase , repo_id='''test-image-processor''' , push_to_hub=_lowercase , use_auth_token=self._token ) SCREAMING_SNAKE_CASE__ : Any = ViTImageProcessor.from_pretrained(f"""{USER}/test-image-processor""" ) for k, v in image_processor.__dict__.items(): self.assertEqual(_lowercase , getattr(_lowercase , _lowercase ) ) def lowercase__ ( self : Union[str, Any] ): SCREAMING_SNAKE_CASE__ : Optional[int] = ViTImageProcessor.from_pretrained(_lowercase ) image_processor.push_to_hub('''valid_org/test-image-processor''' , use_auth_token=self._token ) SCREAMING_SNAKE_CASE__ : str = ViTImageProcessor.from_pretrained('''valid_org/test-image-processor''' ) for k, v in image_processor.__dict__.items(): self.assertEqual(_lowercase , getattr(_lowercase , _lowercase ) ) # Reset repo delete_repo(token=self._token , repo_id='''valid_org/test-image-processor''' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained( _lowercase , repo_id='''valid_org/test-image-processor-org''' , push_to_hub=_lowercase , use_auth_token=self._token ) SCREAMING_SNAKE_CASE__ : str = ViTImageProcessor.from_pretrained('''valid_org/test-image-processor-org''' ) for k, v in image_processor.__dict__.items(): self.assertEqual(_lowercase , getattr(_lowercase , _lowercase ) ) def lowercase__ ( self : Optional[Any] ): CustomImageProcessor.register_for_auto_class() SCREAMING_SNAKE_CASE__ : Any = CustomImageProcessor.from_pretrained(_lowercase ) image_processor.push_to_hub('''test-dynamic-image-processor''' , use_auth_token=self._token ) # This has added the proper auto_map field to the config self.assertDictEqual( image_processor.auto_map , {'''AutoImageProcessor''': '''custom_image_processing.CustomImageProcessor'''} , ) SCREAMING_SNAKE_CASE__ : List[str] = AutoImageProcessor.from_pretrained( f"""{USER}/test-dynamic-image-processor""" , trust_remote_code=_lowercase ) # Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module self.assertEqual(new_image_processor.__class__.__name__ , '''CustomImageProcessor''' )
35
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) __magic_name__ = { 'configuration_deberta': ['DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DebertaConfig', 'DebertaOnnxConfig'], 'tokenization_deberta': ['DebertaTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __magic_name__ = ['DebertaTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __magic_name__ = [ 'DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST', 'DebertaForMaskedLM', 'DebertaForQuestionAnswering', 'DebertaForSequenceClassification', 'DebertaForTokenClassification', 'DebertaModel', 'DebertaPreTrainedModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __magic_name__ = [ 'TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFDebertaForMaskedLM', 'TFDebertaForQuestionAnswering', 'TFDebertaForSequenceClassification', 'TFDebertaForTokenClassification', 'TFDebertaModel', 'TFDebertaPreTrainedModel', ] if TYPE_CHECKING: from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig from .tokenization_deberta import DebertaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_deberta_fast import DebertaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_deberta import ( DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, DebertaForMaskedLM, DebertaForQuestionAnswering, DebertaForSequenceClassification, DebertaForTokenClassification, DebertaModel, DebertaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_deberta import ( TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, TFDebertaForMaskedLM, TFDebertaForQuestionAnswering, TFDebertaForSequenceClassification, TFDebertaForTokenClassification, TFDebertaModel, TFDebertaPreTrainedModel, ) else: import sys __magic_name__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
665
0
from ...configuration_utils import PretrainedConfig from ...utils import logging __lowercase : str = logging.get_logger(__name__) __lowercase : Union[str, Any] = { '''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json''', } class _A ( snake_case ): '''simple docstring''' __lowerCamelCase : int = '''gpt_neox_japanese''' def __init__( self ,SCREAMING_SNAKE_CASE_=32000 ,SCREAMING_SNAKE_CASE_=2560 ,SCREAMING_SNAKE_CASE_=32 ,SCREAMING_SNAKE_CASE_=32 ,SCREAMING_SNAKE_CASE_=4 ,SCREAMING_SNAKE_CASE_="gelu" ,SCREAMING_SNAKE_CASE_=1.00 ,SCREAMING_SNAKE_CASE_=10000 ,SCREAMING_SNAKE_CASE_=2048 ,SCREAMING_SNAKE_CASE_=0.02 ,SCREAMING_SNAKE_CASE_=1E-5 ,SCREAMING_SNAKE_CASE_=True ,SCREAMING_SNAKE_CASE_=31996 ,SCREAMING_SNAKE_CASE_=31999 ,SCREAMING_SNAKE_CASE_=0.1 ,SCREAMING_SNAKE_CASE_=0.0 ,**SCREAMING_SNAKE_CASE_ ,): '''simple docstring''' super().__init__(bos_token_id=SCREAMING_SNAKE_CASE_ ,eos_token_id=SCREAMING_SNAKE_CASE_ ,**SCREAMING_SNAKE_CASE_ ) snake_case : Optional[Any] = vocab_size snake_case : Optional[int] = max_position_embeddings snake_case : List[str] = hidden_size snake_case : Union[str, Any] = num_hidden_layers snake_case : Optional[Any] = num_attention_heads snake_case : Tuple = intermediate_multiple_size snake_case : Optional[int] = hidden_act snake_case : List[str] = rotary_pct snake_case : str = rotary_emb_base snake_case : Any = initializer_range snake_case : str = layer_norm_eps snake_case : Optional[Any] = use_cache snake_case : Dict = attention_dropout snake_case : List[str] = hidden_dropout
36
'''simple docstring''' def lowerCamelCase ( lowerCamelCase : Tuple): A_ : str = [0] * len(lowerCamelCase) A_ : Union[str, Any] = [] A_ : Union[str, Any] = [] A_ : Tuple = 0 for values in graph.values(): for i in values: indegree[i] += 1 for i in range(len(lowerCamelCase)): if indegree[i] == 0: queue.append(lowerCamelCase) while queue: A_ : Any = queue.pop(0) cnt += 1 topo.append(lowerCamelCase) for x in graph[vertex]: indegree[x] -= 1 if indegree[x] == 0: queue.append(lowerCamelCase) if cnt != len(lowerCamelCase): print("""Cycle exists""") else: print(lowerCamelCase) # Adjacency List of Graph __magic_name__ = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []} topological_sort(graph)
665
0
from math import factorial, pi def UpperCamelCase_ ( __a , __a = 30 ) -> float: if not isinstance(__a , (int, float) ): raise ValueError("maclaurin_sin() requires either an int or float for theta" ) if not isinstance(__a , __a ) or accuracy <= 0: raise ValueError("maclaurin_sin() requires a positive int for accuracy" ) a__ : Optional[Any] = float(__a ) a__ : Any = theta // (2 * pi) theta -= 2 * div * pi return sum( (-1) ** r * theta ** (2 * r + 1) / factorial(2 * r + 1 ) for r in range(__a ) ) def UpperCamelCase_ ( __a , __a = 30 ) -> float: if not isinstance(__a , (int, float) ): raise ValueError("maclaurin_cos() requires either an int or float for theta" ) if not isinstance(__a , __a ) or accuracy <= 0: raise ValueError("maclaurin_cos() requires a positive int for accuracy" ) a__ : Union[str, Any] = float(__a ) a__ : Dict = theta // (2 * pi) theta -= 2 * div * pi return sum((-1) ** r * theta ** (2 * r) / factorial(2 * r ) for r in range(__a ) ) if __name__ == "__main__": import doctest doctest.testmod() print(maclaurin_sin(10)) print(maclaurin_sin(-10)) print(maclaurin_sin(10, 15)) print(maclaurin_sin(-10, 15)) print(maclaurin_cos(5)) print(maclaurin_cos(-5)) print(maclaurin_cos(10, 15)) print(maclaurin_cos(-10, 15))
37
'''simple docstring''' import unittest from parameterized import parameterized from transformers import LlamaConfig, is_torch_available, set_seed from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer class __lowerCAmelCase : '''simple docstring''' def __init__( self : Optional[int] ,_a : List[Any] ,_a : Dict=13 ,_a : List[str]=7 ,_a : Dict=True ,_a : List[Any]=True ,_a : Dict=False ,_a : Optional[int]=True ,_a : List[Any]=99 ,_a : Any=32 ,_a : Optional[int]=5 ,_a : List[Any]=4 ,_a : int=37 ,_a : List[Any]="gelu" ,_a : List[str]=0.1 ,_a : Union[str, Any]=0.1 ,_a : Any=512 ,_a : int=16 ,_a : Optional[int]=2 ,_a : Any=0.02 ,_a : Any=3 ,_a : Any=4 ,_a : List[str]=None ,): '''simple docstring''' A_ : List[str] = parent A_ : Any = batch_size A_ : Tuple = seq_length A_ : List[str] = is_training A_ : Tuple = use_input_mask A_ : Dict = use_token_type_ids A_ : List[Any] = use_labels A_ : Union[str, Any] = vocab_size A_ : Any = hidden_size A_ : str = num_hidden_layers A_ : Optional[Any] = num_attention_heads A_ : str = intermediate_size A_ : Tuple = hidden_act A_ : Any = hidden_dropout_prob A_ : Any = attention_probs_dropout_prob A_ : List[str] = max_position_embeddings A_ : int = type_vocab_size A_ : Union[str, Any] = type_sequence_label_size A_ : Any = initializer_range A_ : List[Any] = num_labels A_ : Optional[Any] = num_choices A_ : List[Any] = scope def _a ( self : Optional[int] ): '''simple docstring''' A_ : str = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) A_ : int = None if self.use_input_mask: A_ : List[str] = random_attention_mask([self.batch_size, self.seq_length] ) A_ : Dict = None if self.use_token_type_ids: A_ : Tuple = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size ) A_ : str = None A_ : Any = None A_ : str = None if self.use_labels: A_ : Dict = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) A_ : Any = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels ) A_ : Optional[int] = ids_tensor([self.batch_size] ,self.num_choices ) A_ : str = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def _a ( self : Optional[Any] ): '''simple docstring''' return LlamaConfig( vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=_a ,initializer_range=self.initializer_range ,) def _a ( self : Union[str, Any] ,_a : Optional[Any] ,_a : Optional[Any] ,_a : Any ,_a : Any ,_a : Optional[Any] ,_a : Optional[Any] ,_a : Tuple ): '''simple docstring''' A_ : Any = LlamaModel(config=_a ) model.to(_a ) model.eval() A_ : Optional[Any] = model(_a ,attention_mask=_a ) A_ : Optional[int] = model(_a ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def _a ( self : Optional[int] ,_a : int ,_a : List[str] ,_a : Any ,_a : Any ,_a : Dict ,_a : List[str] ,_a : Optional[int] ,_a : Any ,_a : List[str] ,): '''simple docstring''' A_ : List[str] = True A_ : Union[str, Any] = LlamaModel(_a ) model.to(_a ) model.eval() A_ : Tuple = model( _a ,attention_mask=_a ,encoder_hidden_states=_a ,encoder_attention_mask=_a ,) A_ : List[Any] = model( _a ,attention_mask=_a ,encoder_hidden_states=_a ,) A_ : int = model(_a ,attention_mask=_a ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def _a ( self : Any ,_a : Any ,_a : Optional[int] ,_a : List[Any] ,_a : List[Any] ,_a : Dict ,_a : Tuple ,_a : Optional[int] ,_a : List[Any] ,_a : Union[str, Any] ,): '''simple docstring''' A_ : List[Any] = LlamaForCausalLM(config=_a ) model.to(_a ) model.eval() A_ : Dict = model(_a ,attention_mask=_a ,labels=_a ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) ) def _a ( self : str ,_a : List[Any] ,_a : Dict ,_a : str ,_a : Tuple ,_a : Tuple ,_a : Tuple ,_a : Optional[Any] ,_a : Dict ,_a : Union[str, Any] ,): '''simple docstring''' A_ : Optional[Any] = True A_ : Any = True A_ : Tuple = LlamaForCausalLM(config=_a ) model.to(_a ) model.eval() # first forward pass A_ : Optional[int] = model( _a ,attention_mask=_a ,encoder_hidden_states=_a ,encoder_attention_mask=_a ,use_cache=_a ,) A_ : Tuple = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids A_ : int = ids_tensor((self.batch_size, 3) ,config.vocab_size ) A_ : List[Any] = ids_tensor((self.batch_size, 3) ,vocab_size=2 ) # append to next input_ids and A_ : Tuple = torch.cat([input_ids, next_tokens] ,dim=-1 ) A_ : int = torch.cat([input_mask, next_mask] ,dim=-1 ) A_ : List[str] = model( _a ,attention_mask=_a ,encoder_hidden_states=_a ,encoder_attention_mask=_a ,output_hidden_states=_a ,)["""hidden_states"""][0] A_ : Any = model( _a ,attention_mask=_a ,encoder_hidden_states=_a ,encoder_attention_mask=_a ,past_key_values=_a ,output_hidden_states=_a ,)["""hidden_states"""][0] # select random slice A_ : List[str] = ids_tensor((1,) ,output_from_past.shape[-1] ).item() A_ : str = output_from_no_past[:, -3:, random_slice_idx].detach() A_ : int = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(_a ,_a ,atol=1e-3 ) ) def _a ( self : Optional[Any] ): '''simple docstring''' A_ : int = self.prepare_config_and_inputs() ( ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ) : Any = config_and_inputs A_ : int = {"""input_ids""": input_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' a_ = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else () a_ = (LlamaForCausalLM,) if is_torch_available() else () a_ = ( { """feature-extraction""": LlamaModel, """text-classification""": LlamaForSequenceClassification, """text-generation""": LlamaForCausalLM, """zero-shot""": LlamaForSequenceClassification, } if is_torch_available() else {} ) a_ = False a_ = False def _a ( self : List[Any] ): '''simple docstring''' A_ : Union[str, Any] = LlamaModelTester(self ) A_ : List[str] = ConfigTester(self ,config_class=_a ,hidden_size=37 ) def _a ( self : Dict ): '''simple docstring''' self.config_tester.run_common_tests() def _a ( self : Optional[Any] ): '''simple docstring''' A_ : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_a ) def _a ( self : Optional[Any] ): '''simple docstring''' A_ : int = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: A_ : Dict = type self.model_tester.create_and_check_model(*_a ) def _a ( self : List[Any] ): '''simple docstring''' A_ , A_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() A_ : List[str] = 3 A_ : Any = input_dict["""input_ids"""] A_ : Union[str, Any] = input_ids.ne(1 ).to(_a ) A_ : Union[str, Any] = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size ) A_ : List[Any] = LlamaForSequenceClassification(_a ) model.to(_a ) model.eval() A_ : int = model(_a ,attention_mask=_a ,labels=_a ) self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) ) def _a ( self : Dict ): '''simple docstring''' A_ , A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() A_ : str = 3 A_ : Union[str, Any] = """single_label_classification""" A_ : Union[str, Any] = input_dict["""input_ids"""] A_ : List[Any] = input_ids.ne(1 ).to(_a ) A_ : Dict = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size ) A_ : List[Any] = LlamaForSequenceClassification(_a ) model.to(_a ) model.eval() A_ : List[str] = model(_a ,attention_mask=_a ,labels=_a ) self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) ) def _a ( self : Optional[Any] ): '''simple docstring''' A_ , A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() A_ : Dict = 3 A_ : Dict = """multi_label_classification""" A_ : Any = input_dict["""input_ids"""] A_ : Optional[Any] = input_ids.ne(1 ).to(_a ) A_ : List[str] = ids_tensor( [self.model_tester.batch_size, config.num_labels] ,self.model_tester.type_sequence_label_size ).to(torch.float ) A_ : Optional[int] = LlamaForSequenceClassification(_a ) model.to(_a ) model.eval() A_ : Any = model(_a ,attention_mask=_a ,labels=_a ) self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) ) @unittest.skip("""LLaMA buffers include complex numbers, which breaks this test""" ) def _a ( self : Any ): '''simple docstring''' pass @parameterized.expand([("""linear""",), ("""dynamic""",)] ) def _a ( self : Optional[Any] ,_a : List[Any] ): '''simple docstring''' A_ , A_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() A_ : Tuple = ids_tensor([1, 10] ,config.vocab_size ) A_ : Union[str, Any] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] ,config.vocab_size ) set_seed(42 ) # Fixed seed at init time so the two models get the same random weights A_ : int = LlamaModel(_a ) original_model.to(_a ) original_model.eval() A_ : Tuple = original_model(_a ).last_hidden_state A_ : Union[str, Any] = original_model(_a ).last_hidden_state set_seed(42 ) # Fixed seed at init time so the two models get the same random weights A_ : Tuple = {"""type""": scaling_type, """factor""": 10.0} A_ : int = LlamaModel(_a ) scaled_model.to(_a ) scaled_model.eval() A_ : List[Any] = scaled_model(_a ).last_hidden_state A_ : Any = scaled_model(_a ).last_hidden_state # Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original # maximum sequence length, so the outputs for the short input should match. if scaling_type == "dynamic": self.assertTrue(torch.allclose(_a ,_a ,atol=1e-5 ) ) else: self.assertFalse(torch.allclose(_a ,_a ,atol=1e-5 ) ) # The output should be different for long inputs self.assertFalse(torch.allclose(_a ,_a ,atol=1e-5 ) ) @require_torch class __lowerCAmelCase ( unittest.TestCase ): '''simple docstring''' @unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" ) @slow def _a ( self : Tuple ): '''simple docstring''' A_ : Any = [1, 306, 4658, 278, 6593, 310, 2834, 338] A_ : List[str] = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-7b-hf""" ,device_map="""auto""" ) A_ : str = model(torch.tensor([input_ids] ) ) # Expected mean on dim = -1 A_ : Union[str, Any] = torch.tensor([[-6.6550, -4.1227, -4.9859, -3.2406, 0.8262, -3.0033, 1.2964, -3.3699]] ) torch.testing.assert_close(out.mean(-1 ) ,_a ,atol=1e-2 ,rtol=1e-2 ) # slicing logits[0, 0, 0:30] # fmt: off A_ : str = torch.tensor([-12.8281, -7.4453, -0.4639, -8.0625, -7.2500, -8.0000, -6.4883, -7.7695, -7.8438, -7.0312, -6.2188, -7.1328, -1.8496, 1.9961, -8.6250, -6.7227, -12.8281, -6.9492, -7.0742, -7.7852, -7.5820, -7.9062, -6.9375, -7.9805, -8.3438, -8.1562, -8.0469, -7.6250, -7.7422, -7.3398,] ) # fmt: on torch.testing.assert_close(out[0, 0, :30] ,_a ,atol=1e-5 ,rtol=1e-5 ) @unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" ) @slow def _a ( self : str ): '''simple docstring''' A_ : Dict = [1, 306, 4658, 278, 6593, 310, 2834, 338] A_ : Optional[int] = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-13b-hf""" ,device_map="""auto""" ) A_ : Tuple = model(torch.tensor(_a ) ) # Expected mean on dim = -1 A_ : str = torch.tensor([[-2.0622, -1.2794, -1.1638, -0.9788, -1.4603, -1.0238, -1.7893, -1.4411]] ) torch.testing.assert_close(out.mean(-1 ) ,_a ,atol=1e-2 ,rtol=1e-2 ) # slicing logits[0, 0, 0:30] # fmt: off A_ : str = torch.tensor([-8.1406, -8.0547, 2.7461, -1.2344, -0.1448, -1.8262, -1.0020, -1.8154, -1.6895, -1.8516, -2.3574, -0.9277, 3.7598, 6.5742, -1.2998, -0.1177, -8.1406, -2.9688, -2.9199, -3.1699, -3.5254, -2.3555, -2.7988, -3.4141, -2.8262, -4.5195, -3.3379, -3.3164, -2.7832, -3.0273] ) # fmt: on torch.testing.assert_close(out[0, 0, :30] ,_a ,atol=1e-5 ,rtol=1e-5 ) @unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" ) @slow def _a ( self : Union[str, Any] ): '''simple docstring''' A_ : Union[str, Any] = [1, 306, 4658, 278, 6593, 310, 2834, 338] A_ : Optional[int] = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-13b-chat-hf""" ,device_map="""auto""" ) A_ : int = model(torch.tensor(_a ) ) # Expected mean on dim = -1 A_ : Union[str, Any] = torch.tensor([[-0.8562, -1.8520, -0.7551, -0.4162, -1.5161, -1.2038, -2.4823, -2.3254]] ) torch.testing.assert_close(out.mean(-1 ) ,_a ,atol=1e-2 ,rtol=1e-2 ) # slicing logits[0, 0, 0:30] # fmt: off A_ : Optional[int] = torch.tensor([-2.2227, 4.8828, 0.9023, -0.4578, -0.7871, -0.1033, -0.6221, -0.5786, -0.7803, -1.0674, -1.2920, -0.1570, 0.8008, 2.0723, -0.9497, 0.2771, -2.2227, -0.7612, -1.4346, -1.2061, -1.6426, -0.3000, -0.7139, -1.1934, -1.8691, -1.6973, -1.5947, -1.2705, -0.3523, -0.5513] ) # fmt: on torch.testing.assert_close(out.mean(-1 ) ,_a ,atol=1e-2 ,rtol=1e-2 ) @unittest.skip( """Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test""" ) @slow def _a ( self : Optional[Any] ): '''simple docstring''' A_ : Optional[int] = [1, 306, 4658, 278, 6593, 310, 2834, 338] A_ : str = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-70b-hf""" ,device_map="""auto""" ) A_ : Tuple = model(torch.tensor(_a ) ) A_ : Dict = torch.tensor( [[-4.2327, -3.3360, -4.6665, -4.7631, -1.8180, -3.4170, -1.4211, -3.1810]] ,dtype=torch.floataa ) torch.testing.assert_close(out.mean(-1 ) ,_a ,atol=1e-2 ,rtol=1e-2 ) # fmt: off A_ : List[str] = torch.tensor([-9.4922, -3.9551, 1.7998, -5.6758, -5.1055, -5.8984, -4.8320, -6.8086, -6.5391, -5.6172, -5.5820, -5.5352, 1.7881, 3.6289, -6.5117, -3.4785, -9.5000, -6.0352, -6.8125, -6.0195, -6.6836, -5.4727, -6.2812, -6.0391, -7.3398, -7.4297, -7.4844, -6.5820, -5.8789, -5.5312] ) # fmt: on torch.testing.assert_close(out[0, 0, :30] ,_a ,atol=1e-5 ,rtol=1e-5 ) @unittest.skip("""Model is curently gated""" ) @slow def _a ( self : Tuple ): '''simple docstring''' A_ : Union[str, Any] = """Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the \"princi""" A_ : List[str] = """Simply put, the theory of relativity states that """ A_ : Any = LlamaTokenizer.from_pretrained("""meta-llama/Llama-2-13b-chat-hf""" ) A_ : Union[str, Any] = tokenizer.encode(_a ,return_tensors="""pt""" ) A_ : List[str] = LlamaForCausalLM.from_pretrained( """meta-llama/Llama-2-13b-chat-hf""" ,device_map="""sequential""" ,use_safetensors=_a ) # greedy generation outputs A_ : str = model.generate(_a ,max_new_tokens=64 ,top_p=_a ,temperature=1 ,do_sample=_a ) A_ : Optional[Any] = tokenizer.decode(generated_ids[0] ,skip_special_tokens=_a ) self.assertEqual(_a ,_a )
665
0
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging A_ : Optional[Any] = logging.get_logger(__name__) A_ : int = { "uw-madison/mra-base-512-4": "https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json", } class __snake_case ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' lowerCamelCase__ = '''mra''' def __init__( self , __SCREAMING_SNAKE_CASE=5_0_2_6_5 , __SCREAMING_SNAKE_CASE=7_6_8 , __SCREAMING_SNAKE_CASE=1_2 , __SCREAMING_SNAKE_CASE=1_2 , __SCREAMING_SNAKE_CASE=3_0_7_2 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=5_1_2 , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1e-5 , __SCREAMING_SNAKE_CASE="absolute" , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE="full" , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=2 , **__SCREAMING_SNAKE_CASE , ): super().__init__(pad_token_id=__SCREAMING_SNAKE_CASE , bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) snake_case__ : List[Any] = vocab_size snake_case__ : Optional[int] = max_position_embeddings snake_case__ : List[Any] = hidden_size snake_case__ : Tuple = num_hidden_layers snake_case__ : List[Any] = num_attention_heads snake_case__ : Dict = intermediate_size snake_case__ : str = hidden_act snake_case__ : Optional[Any] = hidden_dropout_prob snake_case__ : Tuple = attention_probs_dropout_prob snake_case__ : Dict = initializer_range snake_case__ : List[str] = type_vocab_size snake_case__ : str = layer_norm_eps snake_case__ : Dict = position_embedding_type snake_case__ : Union[str, Any] = block_per_row snake_case__ : str = approx_mode snake_case__ : int = initial_prior_first_n_blocks snake_case__ : str = initial_prior_diagonal_n_blocks
38
'''simple docstring''' import math_equivalence # From: git+https://github.com/hendrycks/math.git import datasets __magic_name__ = '\\n@article{hendrycksmath2021,\n title={Measuring Mathematical Problem Solving With the MATH Dataset},\n author={Dan Hendrycks\n and Collin Burns\n and Saurav Kadavath\n and Akul Arora\n and Steven Basart\n and Eric Tang\n and Dawn Song\n and Jacob Steinhardt},\n journal={arXiv preprint arXiv:2103.03874},\n year={2021}\n}\n' __magic_name__ = '\\nThis metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.\nIt first canonicalizes the inputs (e.g., converting "1/2" to "\\frac{1}{2}") and then computes accuracy.\n' __magic_name__ = r'\nCalculates accuracy after canonicalizing inputs.\n\nArgs:\n predictions: list of predictions to score. Each prediction\n is a string that contains natural language and LaTex.\n references: list of reference for each prediction. Each\n reference is a string that contains natural language\n and LaTex.\nReturns:\n accuracy: accuracy after canonicalizing inputs\n (e.g., converting "1/2" to "\\frac{1}{2}")\n\nExamples:\n >>> metric = datasets.load_metric("competition_math")\n >>> results = metric.compute(references=["\\frac{1}{2}"], predictions=["1/2"])\n >>> print(results)\n {\'accuracy\': 1.0}\n' @datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __lowerCAmelCase ( datasets.Metric ): '''simple docstring''' def _a ( self : Optional[Any] ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features( { """predictions""": datasets.Value("""string""" ), """references""": datasets.Value("""string""" ), } ) ,homepage="""https://github.com/hendrycks/math""" ,codebase_urls=["""https://github.com/hendrycks/math"""] ,) def _a ( self : List[Any] ,_a : Union[str, Any] ,_a : Optional[int] ): '''simple docstring''' A_ : Union[str, Any] = 0.0 for i, j in zip(_a ,_a ): n_correct += 1.0 if math_equivalence.is_equiv(_a ,_a ) else 0.0 A_ : List[str] = n_correct / len(_a ) return { "accuracy": accuracy, }
665
0
import unittest from transformers import SPIECE_UNDERLINE from transformers.models.speechta import SpeechTaTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.tokenization_utils import AddedToken from ...test_tokenization_common import TokenizerTesterMixin lowerCAmelCase_ = get_tests_dir('''fixtures/test_sentencepiece_bpe_char.model''') @require_sentencepiece @require_tokenizers class snake_case_ ( __A , unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = SpeechTaTokenizer SCREAMING_SNAKE_CASE : Tuple = False SCREAMING_SNAKE_CASE : List[Any] = True def snake_case__( self : int ) ->List[Any]: super().setUp() # We have a SentencePiece fixture for testing snake_case_ = SpeechTaTokenizer(_UpperCamelCase ) snake_case_ = AddedToken('''<mask>''' , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) snake_case_ = mask_token tokenizer.add_special_tokens({'''mask_token''': mask_token} ) tokenizer.add_tokens(['''<ctc_blank>'''] ) tokenizer.save_pretrained(self.tmpdirname ) def snake_case__( self : List[Any] , _UpperCamelCase : List[Any] ) ->Tuple: snake_case_ = '''this is a test''' snake_case_ = '''this is a test''' return input_text, output_text def snake_case__( self : str , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Optional[int]=False , _UpperCamelCase : Tuple=2_0 , _UpperCamelCase : Dict=5 ) ->Optional[Any]: snake_case_, snake_case_ = self.get_input_output_texts(_UpperCamelCase ) snake_case_ = tokenizer.encode(_UpperCamelCase , add_special_tokens=_UpperCamelCase ) snake_case_ = tokenizer.decode(_UpperCamelCase , clean_up_tokenization_spaces=_UpperCamelCase ) return text, ids def snake_case__( self : str ) ->Union[str, Any]: snake_case_ = '''<pad>''' snake_case_ = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCamelCase ) , _UpperCamelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCamelCase ) , _UpperCamelCase ) def snake_case__( self : Dict ) ->Union[str, Any]: snake_case_ = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '''<s>''' ) self.assertEqual(vocab_keys[1] , '''<pad>''' ) self.assertEqual(vocab_keys[-4] , '''œ''' ) self.assertEqual(vocab_keys[-2] , '''<mask>''' ) self.assertEqual(vocab_keys[-1] , '''<ctc_blank>''' ) self.assertEqual(len(_UpperCamelCase ) , 8_1 ) def snake_case__( self : Tuple ) ->Union[str, Any]: self.assertEqual(self.get_tokenizer().vocab_size , 7_9 ) def snake_case__( self : int ) ->Optional[int]: snake_case_ = self.get_tokenizers(do_lower_case=_UpperCamelCase ) for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): snake_case_ = tokenizer.vocab_size snake_case_ = len(_UpperCamelCase ) self.assertNotEqual(_UpperCamelCase , 0 ) # We usually have added tokens from the start in tests because our vocab fixtures are # smaller than the original vocabs - let's not assert this # self.assertEqual(vocab_size, all_size) snake_case_ = ['''aaaaa bbbbbb''', '''cccccccccdddddddd'''] snake_case_ = tokenizer.add_tokens(_UpperCamelCase ) snake_case_ = tokenizer.vocab_size snake_case_ = len(_UpperCamelCase ) self.assertNotEqual(_UpperCamelCase , 0 ) self.assertEqual(_UpperCamelCase , _UpperCamelCase ) self.assertEqual(_UpperCamelCase , len(_UpperCamelCase ) ) self.assertEqual(_UpperCamelCase , all_size + len(_UpperCamelCase ) ) snake_case_ = tokenizer.encode('''aaaaa bbbbbb low cccccccccdddddddd l''' , add_special_tokens=_UpperCamelCase ) self.assertGreaterEqual(len(_UpperCamelCase ) , 4 ) self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 ) snake_case_ = {'''eos_token''': '''>>>>|||<||<<|<<''', '''pad_token''': '''<<<<<|||>|>>>>|>'''} snake_case_ = tokenizer.add_special_tokens(_UpperCamelCase ) snake_case_ = tokenizer.vocab_size snake_case_ = len(_UpperCamelCase ) self.assertNotEqual(_UpperCamelCase , 0 ) self.assertEqual(_UpperCamelCase , _UpperCamelCase ) self.assertEqual(_UpperCamelCase , len(_UpperCamelCase ) ) self.assertEqual(_UpperCamelCase , all_size_a + len(_UpperCamelCase ) ) snake_case_ = tokenizer.encode( '''>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l''' , add_special_tokens=_UpperCamelCase ) self.assertGreaterEqual(len(_UpperCamelCase ) , 6 ) self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[0] , tokens[1] ) self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 ) self.assertGreater(tokens[-3] , tokens[-4] ) self.assertEqual(tokens[0] , tokenizer.eos_token_id ) self.assertEqual(tokens[-3] , tokenizer.pad_token_id ) def snake_case__( self : Dict ) ->Optional[int]: pass def snake_case__( self : int ) ->List[Any]: pass def snake_case__( self : str ) ->List[Any]: snake_case_ = self.get_tokenizer() snake_case_ = tokenizer.tokenize('''This is a test''' ) # fmt: off self.assertListEqual(_UpperCamelCase , [SPIECE_UNDERLINE, '''T''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''a''', SPIECE_UNDERLINE, '''t''', '''e''', '''s''', '''t'''] ) # fmt: on self.assertListEqual( tokenizer.convert_tokens_to_ids(_UpperCamelCase ) , [4, 3_2, 1_1, 1_0, 1_2, 4, 1_0, 1_2, 4, 7, 4, 6, 5, 1_2, 6] , ) snake_case_ = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' ) self.assertListEqual( _UpperCamelCase , [SPIECE_UNDERLINE, '''I''', SPIECE_UNDERLINE, '''w''', '''a''', '''s''', SPIECE_UNDERLINE, '''b''', '''o''', '''r''', '''n''', SPIECE_UNDERLINE, '''i''', '''n''', SPIECE_UNDERLINE, '''92000''', ''',''', SPIECE_UNDERLINE, '''a''', '''n''', '''d''', SPIECE_UNDERLINE, '''t''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''f''', '''a''', '''l''', '''s''', '''é''', '''.'''] ) snake_case_ = tokenizer.convert_tokens_to_ids(_UpperCamelCase ) # fmt: off self.assertListEqual(_UpperCamelCase , [4, 3_0, 4, 2_0, 7, 1_2, 4, 2_5, 8, 1_3, 9, 4, 1_0, 9, 4, 3, 2_3, 4, 7, 9, 1_4, 4, 6, 1_1, 1_0, 1_2, 4, 1_0, 1_2, 4, 1_9, 7, 1_5, 1_2, 7_3, 2_6] ) # fmt: on snake_case_ = tokenizer.convert_ids_to_tokens(_UpperCamelCase ) self.assertListEqual( _UpperCamelCase , [SPIECE_UNDERLINE, '''I''', SPIECE_UNDERLINE, '''w''', '''a''', '''s''', SPIECE_UNDERLINE, '''b''', '''o''', '''r''', '''n''', SPIECE_UNDERLINE, '''i''', '''n''', SPIECE_UNDERLINE, '''<unk>''', ''',''', SPIECE_UNDERLINE, '''a''', '''n''', '''d''', SPIECE_UNDERLINE, '''t''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''f''', '''a''', '''l''', '''s''', '''é''', '''.'''] ) @slow def snake_case__( self : Tuple ) ->Dict: # Use custom sequence because this tokenizer does not handle numbers. snake_case_ = [ '''Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides ''' '''general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural ''' '''Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained ''' '''models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.''', '''BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly ''' '''conditioning on both left and right context in all layers.''', '''The quick brown fox jumps over the lazy dog.''', ] # fmt: off snake_case_ = { '''input_ids''': [ [4, 3_2, 1_3, 7, 9, 1_2, 1_9, 8, 1_3, 1_8, 5, 1_3, 1_2, 4, 6_4, 1_9, 8, 1_3, 1_8, 5, 1_3, 1_5, 2_2, 4, 2_8, 9, 8, 2_0, 9, 4, 7, 1_2, 4, 2_4, 2_2, 6, 8, 1_3, 1_7, 1_1, 3_9, 6, 1_3, 7, 9, 1_2, 1_9, 8, 1_3, 1_8, 5, 1_3, 1_2, 4, 7, 9, 1_4, 4, 2_4, 2_2, 6, 8, 1_3, 1_7, 1_1, 3_9, 2_4, 1_3, 5, 6, 1_3, 7, 1_0, 9, 5, 1_4, 3_9, 2_5, 5, 1_3, 6, 6_3, 4, 2_4, 1_3, 8, 2_7, 1_0, 1_4, 5, 1_2, 4, 2_1, 5, 9, 5, 1_3, 7, 1_5, 3_9, 2_4, 1_6, 1_3, 2_4, 8, 1_2, 5, 4, 7, 1_3, 1_7, 1_1, 1_0, 6, 5, 1_7, 6, 1_6, 1_3, 5, 1_2, 4, 6_4, 4_0, 4_7, 5_4, 3_2, 2_3, 4, 5_3, 4_9, 3_2, 2_3, 4, 5_4, 8, 4_0, 4_7, 5_4, 3_2, 7, 2_3, 4, 6_9, 5_2, 4_3, 2_3, 4, 5_1, 1_0, 1_2, 6, 1_0, 1_5, 4_0, 5, 1_3, 6, 2_3, 4, 6_9, 5_2, 4_8, 5, 6, 2_6, 2_6, 2_6, 6_3, 4, 1_9, 8, 1_3, 4, 4_8, 7, 6, 1_6, 1_3, 7, 1_5, 4, 5_2, 7, 9, 2_1, 1_6, 7, 2_1, 5, 4, 6_1, 9, 1_4, 5, 1_3, 1_2, 6, 7, 9, 1_4, 1_0, 9, 2_1, 4, 6_4, 4_8, 5_2, 6_1, 6_3, 4, 7, 9, 1_4, 4, 4_8, 7, 6, 1_6, 1_3, 7, 1_5, 4, 5_2, 7, 9, 2_1, 1_6, 7, 2_1, 5, 4, 5_3, 5, 9, 5, 1_3, 7, 6, 1_0, 8, 9, 4, 6_4, 4_8, 5_2, 5_3, 6_3, 4, 2_0, 1_0, 6, 1_1, 4, 8, 2_7, 5, 1_3, 4, 6, 1_1, 1_0, 1_3, 6, 2_2, 3_9, 6, 2_0, 8, 4, 2_4, 1_3, 5, 6, 1_3, 7, 1_0, 9, 5, 1_4, 4, 1_8, 8, 1_4, 5, 1_5, 1_2, 4, 1_0, 9, 4, 8, 9, 5, 4, 1_1, 1_6, 9, 1_4, 1_3, 5, 1_4, 4, 2_4, 1_5, 1_6, 1_2, 4, 1_5, 7, 9, 2_1, 1_6, 7, 2_1, 5, 1_2, 4, 7, 9, 1_4, 4, 1_4, 5, 5, 2_4, 4, 1_0, 9, 6, 5, 1_3, 8, 2_4, 5, 1_3, 7, 2_5, 1_0, 1_5, 1_0, 6, 2_2, 4, 2_5, 5, 6, 2_0, 5, 5, 9, 4, 5_8, 7, 3_7, 2_3, 4, 4_9, 2_2, 3_2, 8, 1_3, 1_7, 1_1, 4, 7, 9, 1_4, 4, 3_2, 5, 9, 1_2, 8, 1_3, 5_5, 1_5, 8, 2_0, 2_6, 2], [4, 4_0, 4_7, 5_4, 3_2, 4, 1_0, 1_2, 4, 1_4, 5, 1_2, 1_0, 2_1, 9, 5, 1_4, 4, 6, 8, 4, 2_4, 1_3, 5, 3_9, 6, 1_3, 7, 1_0, 9, 4, 1_4, 5, 5, 2_4, 4, 2_5, 1_0, 1_4, 1_0, 1_3, 5, 1_7, 6, 1_0, 8, 9, 7, 1_5, 4, 1_3, 5, 2_4, 1_3, 5, 1_2, 5, 9, 6, 7, 6, 1_0, 8, 9, 1_2, 4, 1_9, 1_3, 8, 1_8, 4, 1_6, 9, 1_5, 7, 2_5, 5, 1_5, 5, 1_4, 4, 6, 5, 3_7, 6, 4, 2_5, 2_2, 4, 4_6, 8, 1_0, 9, 6, 1_5, 2_2, 4, 1_7, 8, 9, 1_4, 1_0, 6, 1_0, 8, 9, 1_0, 9, 2_1, 4, 8, 9, 4, 2_5, 8, 6, 1_1, 4, 1_5, 5, 1_9, 6, 4, 7, 9, 1_4, 4, 1_3, 1_0, 2_1, 1_1, 6, 4, 1_7, 8, 9, 6, 5, 3_7, 6, 4, 1_0, 9, 4, 7, 1_5, 1_5, 4, 1_5, 7, 2_2, 5, 1_3, 1_2, 2_6, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [4, 3_2, 1_1, 5, 4, 4_5, 1_6, 1_0, 1_7, 2_8, 4, 2_5, 1_3, 8, 2_0, 9, 4, 1_9, 8, 3_7, 4, 4_6, 1_6, 1_8, 2_4, 1_2, 4, 8, 2_7, 5, 1_3, 4, 6, 1_1, 5, 4, 1_5, 7, 5_7, 2_2, 4, 1_4, 8, 2_1, 2_6, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], ], '''attention_mask''': [ [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], ] } # fmt: on self.tokenizer_integration_test_util( expected_encoding=_UpperCamelCase , model_name='''microsoft/speecht5_asr''' , revision='''c5ef64c71905caeccde0e4462ef3f9077224c524''' , sequences=_UpperCamelCase , )
39
'''simple docstring''' from ....configuration_utils import PretrainedConfig from ....utils import logging __magic_name__ = logging.get_logger(__name__) # TODO: upload to AWS __magic_name__ = { 'yjernite/retribert-base-uncased': ( 'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json' ), } class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' a_ = """retribert""" def __init__( self : int ,_a : Dict=30522 ,_a : List[Any]=768 ,_a : Optional[Any]=8 ,_a : str=12 ,_a : str=3072 ,_a : Tuple="gelu" ,_a : Optional[int]=0.1 ,_a : Dict=0.1 ,_a : List[Any]=512 ,_a : Union[str, Any]=2 ,_a : Tuple=0.02 ,_a : List[str]=1e-12 ,_a : Dict=True ,_a : Tuple=128 ,_a : Optional[int]=0 ,**_a : Tuple ,): '''simple docstring''' super().__init__(pad_token_id=_a ,**_a ) A_ : Dict = vocab_size A_ : int = hidden_size A_ : Union[str, Any] = num_hidden_layers A_ : Union[str, Any] = num_attention_heads A_ : Tuple = hidden_act A_ : int = intermediate_size A_ : Tuple = hidden_dropout_prob A_ : Optional[int] = attention_probs_dropout_prob A_ : int = max_position_embeddings A_ : Any = type_vocab_size A_ : Optional[int] = initializer_range A_ : Dict = layer_norm_eps A_ : str = share_encoders A_ : List[Any] = projection_dim
665
0
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( SwiftFormerConfig, SwiftFormerForImageClassification, ViTImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() __UpperCAmelCase = logging.get_logger(__name__) __UpperCAmelCase = torch.device('''cpu''') def UpperCamelCase ( ) -> Tuple: UpperCamelCase : List[str] = 'http://images.cocodataset.org/val2017/000000039769.jpg' UpperCamelCase : Tuple = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw ) return im def UpperCamelCase ( snake_case__ : str ) -> int: if swiftformer_name == "swiftformer_xs": return torch.tensor([-2.1_703E00, 2.1_107E00, -2.0_811E00, 8.8_685E-01, 2.4_360E-01] ) elif swiftformer_name == "swiftformer_s": return torch.tensor([3.9_636E-01, 2.3_478E-01, -1.6_963E00, -1.7_381E00, -8.6_337E-01] ) elif swiftformer_name == "swiftformer_l1": return torch.tensor([-4.2_768E-01, -4.7_429E-01, -1.0_897E00, -1.0_248E00, 3.5_523E-02] ) elif swiftformer_name == "swiftformer_l3": return torch.tensor([-2.5_330E-01, 2.4_211E-01, -6.0_185E-01, -8.2_789E-01, -6.0_446E-02] ) def UpperCamelCase ( snake_case__ : List[Any] , snake_case__ : Optional[int] , snake_case__ : List[Any] ) -> str: UpperCamelCase : Optional[int] = dct.pop(snake_case__ ) UpperCamelCase : int = val def UpperCamelCase ( snake_case__ : Optional[int] ) -> Optional[Any]: UpperCamelCase : List[str] = [] for k in state_dict.keys(): UpperCamelCase : Optional[Any] = k if ".pwconv" in k: UpperCamelCase : Dict = k_new.replace('.pwconv' , '.point_wise_conv' ) if ".dwconv" in k: UpperCamelCase : Optional[Any] = k_new.replace('.dwconv' , '.depth_wise_conv' ) if ".Proj." in k: UpperCamelCase : Optional[Any] = k_new.replace('.Proj.' , '.proj.' ) if "patch_embed" in k_new: UpperCamelCase : Optional[Any] = k_new.replace('patch_embed' , 'swiftformer.patch_embed.patch_embedding' ) if "network" in k_new: UpperCamelCase : Dict = k_new.split('.' ) if ls[2].isdigit(): UpperCamelCase : Union[str, Any] = 'swiftformer.encoder.network.' + ls[1] + '.blocks.' + ls[2] + '.' + '.'.join(ls[3:] ) else: UpperCamelCase : int = k_new.replace('network' , 'swiftformer.encoder.network' ) rename_keys.append((k, k_new) ) return rename_keys @torch.no_grad() def UpperCamelCase ( snake_case__ : List[Any] , snake_case__ : List[Any] , snake_case__ : str ) -> List[Any]: UpperCamelCase : Dict = SwiftFormerConfig() # dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size UpperCamelCase : str = 1000 UpperCamelCase : List[Any] = 'huggingface/label-files' UpperCamelCase : str = 'imagenet-1k-id2label.json' UpperCamelCase : List[Any] = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type='dataset' ) , 'r' ) ) UpperCamelCase : List[Any] = {int(snake_case__ ): v for k, v in idalabel.items()} UpperCamelCase : List[Any] = idalabel UpperCamelCase : Dict = {v: k for k, v in idalabel.items()} # size of the architecture if swiftformer_name == "swiftformer_xs": UpperCamelCase : Any = [3, 3, 6, 4] UpperCamelCase : Optional[Any] = [48, 56, 112, 220] elif swiftformer_name == "swiftformer_s": UpperCamelCase : Optional[Any] = [3, 3, 9, 6] UpperCamelCase : str = [48, 64, 168, 224] elif swiftformer_name == "swiftformer_l1": UpperCamelCase : Optional[Any] = [4, 3, 10, 5] UpperCamelCase : Union[str, Any] = [48, 96, 192, 384] elif swiftformer_name == "swiftformer_l3": UpperCamelCase : List[Any] = [4, 4, 12, 6] UpperCamelCase : List[str] = [64, 128, 320, 512] # load state_dict of original model, remove and rename some keys if original_ckpt: if original_ckpt.startswith('https' ): UpperCamelCase : Any = torch.hub.load_state_dict_from_url(snake_case__ , map_location='cpu' , check_hash=snake_case__ ) else: UpperCamelCase : Optional[Any] = torch.load(snake_case__ , map_location='cpu' ) UpperCamelCase : int = checkpoint UpperCamelCase : str = create_rename_keys(snake_case__ ) for rename_key_src, rename_key_dest in rename_keys: rename_key(snake_case__ , snake_case__ , snake_case__ ) # load HuggingFace model UpperCamelCase : str = SwiftFormerForImageClassification(snake_case__ ).eval() hf_model.load_state_dict(snake_case__ ) # prepare test inputs UpperCamelCase : Union[str, Any] = prepare_img() UpperCamelCase : Optional[int] = ViTImageProcessor.from_pretrained('preprocessor_config' ) UpperCamelCase : int = processor(images=snake_case__ , return_tensors='pt' ) # compare outputs from both models UpperCamelCase : Tuple = get_expected_output(snake_case__ ) UpperCamelCase : Optional[Any] = hf_model(inputs['pixel_values'] ).logits assert hf_logits.shape == torch.Size([1, 1000] ) assert torch.allclose(hf_logits[0, 0:5] , snake_case__ , atol=1E-3 ) Path(snake_case__ ).mkdir(exist_ok=snake_case__ ) print(F"""Saving model {swiftformer_name} to {pytorch_dump_folder_path}""" ) hf_model.save_pretrained(snake_case__ ) if __name__ == "__main__": __UpperCAmelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--swiftformer_name''', default='''swiftformer_xs''', choices=['''swiftformer_xs''', '''swiftformer_s''', '''swiftformer_l1''', '''swiftformer_l3'''], type=str, help='''Name of the SwiftFormer model you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default='''./converted_outputs/''', type=str, help='''Path to the output PyTorch model directory.''', ) parser.add_argument('''--original_ckpt''', default=None, type=str, help='''Path to the original model checkpoint.''') __UpperCAmelCase = parser.parse_args() convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
40
'''simple docstring''' import os import re from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging __magic_name__ = logging.get_logger(__name__) __magic_name__ = {'vocab_file': 'spiece.model'} __magic_name__ = { 'vocab_file': { 'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model', 'google/bigbird-roberta-large': ( 'https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model' ), 'google/bigbird-base-trivia-itc': ( 'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model' ), } } __magic_name__ = { 'google/bigbird-roberta-base': 4_096, 'google/bigbird-roberta-large': 4_096, 'google/bigbird-base-trivia-itc': 4_096, } class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' a_ = VOCAB_FILES_NAMES a_ = PRETRAINED_VOCAB_FILES_MAP a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a_ = ["""input_ids""", """attention_mask"""] a_ = [] def __init__( self : Optional[int] ,_a : int ,_a : Optional[Any]="<unk>" ,_a : int="<s>" ,_a : str="</s>" ,_a : Optional[Any]="<pad>" ,_a : Tuple="[SEP]" ,_a : Tuple="[MASK]" ,_a : Union[str, Any]="[CLS]" ,_a : Optional[Dict[str, Any]] = None ,**_a : Any ,): '''simple docstring''' A_ : Dict = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else bos_token A_ : Union[str, Any] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else eos_token A_ : Optional[Any] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else unk_token A_ : Union[str, Any] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else pad_token A_ : Any = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else cls_token A_ : Optional[int] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else sep_token # Mask token behave like a normal word, i.e. include the space before it A_ : List[Any] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else mask_token A_ : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=_a ,eos_token=_a ,unk_token=_a ,pad_token=_a ,sep_token=_a ,mask_token=_a ,cls_token=_a ,sp_model_kwargs=self.sp_model_kwargs ,**_a ,) A_ : Optional[int] = vocab_file A_ : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(_a ) @property def _a ( self : Union[str, Any] ): '''simple docstring''' return self.sp_model.get_piece_size() def _a ( self : Optional[Any] ): '''simple docstring''' A_ : Tuple = {self.convert_ids_to_tokens(_a ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : List[Any] ): '''simple docstring''' A_ : Union[str, Any] = self.__dict__.copy() A_ : Union[str, Any] = None return state def __setstate__( self : List[Any] ,_a : Any ): '''simple docstring''' A_ : Tuple = d # for backward compatibility if not hasattr(self ,"""sp_model_kwargs""" ): A_ : Tuple = {} A_ : int = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def _a ( self : Union[str, Any] ,_a : str ): '''simple docstring''' return self.sp_model.encode(_a ,out_type=_a ) def _a ( self : Optional[int] ,_a : str ): '''simple docstring''' return self.sp_model.piece_to_id(_a ) def _a ( self : int ,_a : Optional[int] ): '''simple docstring''' A_ : List[str] = self.sp_model.IdToPiece(_a ) return token def _a ( self : Dict ,_a : int ): '''simple docstring''' A_ : int = [] A_ : Any = """""" A_ : str = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(_a ) + token A_ : Dict = True A_ : Union[str, Any] = [] else: current_sub_tokens.append(_a ) A_ : str = False out_string += self.sp_model.decode(_a ) return out_string.strip() def _a ( self : int ,_a : List[int] ,_a : bool = False ,_a : bool = None ,_a : bool = True ,**_a : str ,): '''simple docstring''' A_ : Any = kwargs.pop("""use_source_tokenizer""" ,_a ) A_ : Union[str, Any] = self.convert_ids_to_tokens(_a ,skip_special_tokens=_a ) # To avoid mixing byte-level and unicode for byte-level BPT # we need to build string separately for added tokens and byte-level tokens # cf. https://github.com/huggingface/transformers/issues/1133 A_ : str = [] A_ : int = [] for token in filtered_tokens: if skip_special_tokens and token in self.all_special_ids: continue if token in self.added_tokens_encoder: if current_sub_text: sub_texts.append(self.convert_tokens_to_string(_a ) ) A_ : List[str] = [] sub_texts.append(_a ) else: current_sub_text.append(_a ) if current_sub_text: sub_texts.append(self.convert_tokens_to_string(_a ) ) # Mimic the behavior of the Rust tokenizer: # No space before [MASK] and [SEP] if spaces_between_special_tokens: A_ : Optional[int] = re.sub(r""" (\[(MASK|SEP)\])""" ,r"""\1""" ,""" """.join(_a ) ) else: A_ : Tuple = """""".join(_a ) A_ : str = ( clean_up_tokenization_spaces if clean_up_tokenization_spaces is not None else self.clean_up_tokenization_spaces ) if clean_up_tokenization_spaces: A_ : Optional[Any] = self.clean_up_tokenization(_a ) return clean_text else: return text def _a ( self : int ,_a : str ,_a : Optional[str] = None ): '''simple docstring''' if not os.path.isdir(_a ): logger.error(f'Vocabulary path ({save_directory}) should be a directory' ) return A_ : int = os.path.join( _a ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file ,_a ) elif not os.path.isfile(self.vocab_file ): with open(_a ,"""wb""" ) as fi: A_ : str = self.sp_model.serialized_model_proto() fi.write(_a ) return (out_vocab_file,) def _a ( self : Optional[Any] ,_a : List[int] ,_a : Optional[List[int]] = None ): '''simple docstring''' if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] A_ : List[Any] = [self.cls_token_id] A_ : Union[str, Any] = [self.sep_token_id] return cls + token_ids_a + sep + token_ids_a + sep def _a ( self : Optional[int] ,_a : List[int] ,_a : Optional[List[int]] = None ,_a : bool = False ): '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_a ,token_ids_a=_a ,already_has_special_tokens=_a ) if token_ids_a is None: return [1] + ([0] * len(_a )) + [1] return [1] + ([0] * len(_a )) + [1] + ([0] * len(_a )) + [1] def _a ( self : Tuple ,_a : List[int] ,_a : Optional[List[int]] = None ): '''simple docstring''' A_ : Tuple = [self.sep_token_id] A_ : Optional[int] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
665
0
'''simple docstring''' from typing import TYPE_CHECKING from ..utils import _LazyModule lowerCAmelCase__ = { '''config''': [ '''EXTERNAL_DATA_FORMAT_SIZE_LIMIT''', '''OnnxConfig''', '''OnnxConfigWithPast''', '''OnnxSeq2SeqConfigWithPast''', '''PatchingSpec''', ], '''convert''': ['''export''', '''validate_model_outputs'''], '''features''': ['''FeaturesManager'''], '''utils''': ['''ParameterFormat''', '''compute_serialized_parameters_size'''], } if TYPE_CHECKING: from .config import ( EXTERNAL_DATA_FORMAT_SIZE_LIMIT, OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast, PatchingSpec, ) from .convert import export, validate_model_outputs from .features import FeaturesManager from .utils import ParameterFormat, compute_serialized_parameters_size else: import sys lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
41
'''simple docstring''' import unittest from transformers import ( MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, TextaTextGenerationPipeline, pipeline, ) from transformers.testing_utils import is_pipeline_test, require_tf, require_torch from transformers.utils import is_torch_available from .test_pipelines_common import ANY if is_torch_available(): import torch @is_pipeline_test class __lowerCAmelCase ( unittest.TestCase ): '''simple docstring''' a_ = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING a_ = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING def _a ( self : List[str] ,_a : int ,_a : Any ,_a : int ): '''simple docstring''' A_ : Dict = TextaTextGenerationPipeline(model=_a ,tokenizer=_a ) return generator, ["Something to write", "Something else"] def _a ( self : str ,_a : Union[str, Any] ,_a : int ): '''simple docstring''' A_ : Any = generator("""Something there""" ) self.assertEqual(_a ,[{"""generated_text""": ANY(_a )}] ) # These are encoder decoder, they don't just append to incoming string self.assertFalse(outputs[0]["""generated_text"""].startswith("""Something there""" ) ) A_ : List[Any] = generator(["""This is great !""", """Something else"""] ,num_return_sequences=2 ,do_sample=_a ) self.assertEqual( _a ,[ [{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}], [{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}], ] ,) A_ : List[str] = generator( ["""This is great !""", """Something else"""] ,num_return_sequences=2 ,batch_size=2 ,do_sample=_a ) self.assertEqual( _a ,[ [{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}], [{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}], ] ,) with self.assertRaises(_a ): generator(4 ) @require_torch def _a ( self : Union[str, Any] ): '''simple docstring''' A_ : int = pipeline("""text2text-generation""" ,model="""patrickvonplaten/t5-tiny-random""" ,framework="""pt""" ) # do_sample=False necessary for reproducibility A_ : Tuple = generator("""Something there""" ,do_sample=_a ) self.assertEqual(_a ,[{"""generated_text""": """"""}] ) A_ : Optional[int] = 3 A_ : Tuple = generator( """Something there""" ,num_return_sequences=_a ,num_beams=_a ,) A_ : Optional[Any] = [ {"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide Beide"""}, {"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide"""}, {"""generated_text""": """"""}, ] self.assertEqual(_a ,_a ) A_ : Optional[int] = generator("""This is a test""" ,do_sample=_a ,num_return_sequences=2 ,return_tensors=_a ) self.assertEqual( _a ,[ {"""generated_token_ids""": ANY(torch.Tensor )}, {"""generated_token_ids""": ANY(torch.Tensor )}, ] ,) A_ : Dict = generator.model.config.eos_token_id A_ : Optional[int] = """<pad>""" A_ : List[Any] = generator( ["""This is a test""", """This is a second test"""] ,do_sample=_a ,num_return_sequences=2 ,batch_size=2 ,return_tensors=_a ,) self.assertEqual( _a ,[ [ {"""generated_token_ids""": ANY(torch.Tensor )}, {"""generated_token_ids""": ANY(torch.Tensor )}, ], [ {"""generated_token_ids""": ANY(torch.Tensor )}, {"""generated_token_ids""": ANY(torch.Tensor )}, ], ] ,) @require_tf def _a ( self : List[Any] ): '''simple docstring''' A_ : Optional[int] = pipeline("""text2text-generation""" ,model="""patrickvonplaten/t5-tiny-random""" ,framework="""tf""" ) # do_sample=False necessary for reproducibility A_ : Dict = generator("""Something there""" ,do_sample=_a ) self.assertEqual(_a ,[{"""generated_text""": """"""}] )
665
0
'''simple docstring''' import math def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ) -> float: if ( not isinstance(__UpperCamelCase ,(int, float) ) or power_factor < -1 or power_factor > 1 ): raise ValueError('power_factor must be a valid float value between -1 and 1.' ) return apparent_power * power_factor def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ) -> float: if ( not isinstance(__UpperCamelCase ,(int, float) ) or power_factor < -1 or power_factor > 1 ): raise ValueError('power_factor must be a valid float value between -1 and 1.' ) return apparent_power * math.sqrt(1 - power_factor**2 ) if __name__ == "__main__": import doctest doctest.testmod()
42
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging __magic_name__ = logging.get_logger(__name__) __magic_name__ = { 'bigcode/gpt_bigcode-santacoder': 'https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json', } class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' a_ = """gpt_bigcode""" a_ = ["""past_key_values"""] a_ = { """hidden_size""": """n_embd""", """max_position_embeddings""": """n_positions""", """num_attention_heads""": """n_head""", """num_hidden_layers""": """n_layer""", } def __init__( self : Optional[int] ,_a : Optional[int]=50257 ,_a : Dict=1024 ,_a : Union[str, Any]=768 ,_a : Union[str, Any]=12 ,_a : Union[str, Any]=12 ,_a : Tuple=None ,_a : int="gelu_pytorch_tanh" ,_a : Optional[Any]=0.1 ,_a : List[str]=0.1 ,_a : Union[str, Any]=0.1 ,_a : List[Any]=1e-5 ,_a : List[str]=0.02 ,_a : Any=True ,_a : Union[str, Any]=True ,_a : Tuple=50256 ,_a : Optional[int]=50256 ,_a : int=True ,_a : Optional[int]=True ,_a : Optional[int]=True ,**_a : List[str] ,): '''simple docstring''' A_ : Optional[Any] = vocab_size A_ : int = n_positions A_ : Union[str, Any] = n_embd A_ : int = n_layer A_ : Optional[int] = n_head A_ : Union[str, Any] = n_inner A_ : List[Any] = activation_function A_ : Dict = resid_pdrop A_ : int = embd_pdrop A_ : Optional[int] = attn_pdrop A_ : Union[str, Any] = layer_norm_epsilon A_ : int = initializer_range A_ : Union[str, Any] = scale_attn_weights A_ : List[str] = use_cache A_ : Tuple = attention_softmax_in_fpaa A_ : List[str] = scale_attention_softmax_in_fpaa A_ : Union[str, Any] = multi_query A_ : Any = bos_token_id A_ : Optional[int] = eos_token_id super().__init__(bos_token_id=_a ,eos_token_id=_a ,**_a )
665
0
from __future__ import annotations from typing import TypedDict class _a ( UpperCamelCase__ ): _lowercase : str _lowercase : int def _a ( SCREAMING_SNAKE_CASE ): """simple docstring""" if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): raise TypeError('''The parameter s type must be str.''' ) return [s[i:] + s[:i] for i in range(len(SCREAMING_SNAKE_CASE ) )] def _a ( SCREAMING_SNAKE_CASE ): """simple docstring""" if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): raise TypeError('''The parameter s type must be str.''' ) if not s: raise ValueError('''The parameter s must not be empty.''' ) lowercase__ = all_rotations(SCREAMING_SNAKE_CASE ) rotations.sort() # sort the list of rotations in alphabetically order # make a string composed of the last char of each rotation lowercase__ = { "bwt_string": "".join([word[-1] for word in rotations] ), "idx_original_string": rotations.index(SCREAMING_SNAKE_CASE ), } return response def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): """simple docstring""" if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): raise TypeError('''The parameter bwt_string type must be str.''' ) if not bwt_string: raise ValueError('''The parameter bwt_string must not be empty.''' ) try: lowercase__ = int(SCREAMING_SNAKE_CASE ) except ValueError: raise TypeError( '''The parameter idx_original_string type must be int or passive''' ''' of cast to int.''' ) if idx_original_string < 0: raise ValueError('''The parameter idx_original_string must not be lower than 0.''' ) if idx_original_string >= len(SCREAMING_SNAKE_CASE ): raise ValueError( '''The parameter idx_original_string must be lower than''' ''' len(bwt_string).''' ) lowercase__ = [''''''] * len(SCREAMING_SNAKE_CASE ) for _ in range(len(SCREAMING_SNAKE_CASE ) ): for i in range(len(SCREAMING_SNAKE_CASE ) ): lowercase__ = bwt_string[i] + ordered_rotations[i] ordered_rotations.sort() return ordered_rotations[idx_original_string] if __name__ == "__main__": lowerCAmelCase = 'Provide a string that I will generate its BWT transform: ' lowerCAmelCase = input(entry_msg).strip() lowerCAmelCase = bwt_transform(s) print( f"""Burrows Wheeler transform for string '{s}' results """ f"""in '{result["bwt_string"]}'""" ) lowerCAmelCase = reverse_bwt(result['bwt_string'], result['idx_original_string']) print( f"""Reversing Burrows Wheeler transform for entry '{result["bwt_string"]}' """ f"""we get original string '{original_string}'""" )
43
'''simple docstring''' import json import os from functools import lru_cache from typing import List, Optional, Tuple import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging __magic_name__ = logging.get_logger(__name__) __magic_name__ = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'} __magic_name__ = { 'vocab_file': { 'allenai/longformer-base-4096': 'https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json', 'allenai/longformer-large-4096': ( 'https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json' ), 'allenai/longformer-large-4096-finetuned-triviaqa': ( 'https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json' ), 'allenai/longformer-base-4096-extra.pos.embd.only': ( 'https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json' ), 'allenai/longformer-large-4096-extra.pos.embd.only': ( 'https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json' ), }, 'merges_file': { 'allenai/longformer-base-4096': 'https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt', 'allenai/longformer-large-4096': ( 'https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt' ), 'allenai/longformer-large-4096-finetuned-triviaqa': ( 'https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt' ), 'allenai/longformer-base-4096-extra.pos.embd.only': ( 'https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt' ), 'allenai/longformer-large-4096-extra.pos.embd.only': ( 'https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt' ), }, } __magic_name__ = { 'allenai/longformer-base-4096': 4_096, 'allenai/longformer-large-4096': 4_096, 'allenai/longformer-large-4096-finetuned-triviaqa': 4_096, 'allenai/longformer-base-4096-extra.pos.embd.only': 4_096, 'allenai/longformer-large-4096-extra.pos.embd.only': 4_096, } @lru_cache() # Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode def lowerCamelCase ( ): A_ : Union[str, Any] = ( list(range(ord("""!""") , ord("""~""") + 1)) + list(range(ord("""¡""") , ord("""¬""") + 1)) + list(range(ord("""®""") , ord("""ÿ""") + 1)) ) A_ : Optional[Any] = bs[:] A_ : List[str] = 0 for b in range(2**8): if b not in bs: bs.append(lowerCamelCase) cs.append(2**8 + n) n += 1 A_ : List[Any] = [chr(lowerCamelCase) for n in cs] return dict(zip(lowerCamelCase , lowerCamelCase)) def lowerCamelCase ( lowerCamelCase : int): A_ : int = set() A_ : int = word[0] for char in word[1:]: pairs.add((prev_char, char)) A_ : List[str] = char return pairs class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' a_ = VOCAB_FILES_NAMES a_ = PRETRAINED_VOCAB_FILES_MAP a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a_ = ["""input_ids""", """attention_mask"""] def __init__( self : int ,_a : Tuple ,_a : Union[str, Any] ,_a : Optional[Any]="replace" ,_a : Union[str, Any]="<s>" ,_a : Union[str, Any]="</s>" ,_a : int="</s>" ,_a : List[str]="<s>" ,_a : List[Any]="<unk>" ,_a : Any="<pad>" ,_a : Dict="<mask>" ,_a : Optional[int]=False ,**_a : List[Any] ,): '''simple docstring''' A_ : Dict = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else bos_token A_ : Optional[int] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else eos_token A_ : Optional[Any] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else sep_token A_ : int = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else cls_token A_ : int = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else unk_token A_ : Optional[Any] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else pad_token # Mask token behave like a normal word, i.e. include the space before it A_ : Any = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else mask_token super().__init__( errors=_a ,bos_token=_a ,eos_token=_a ,unk_token=_a ,sep_token=_a ,cls_token=_a ,pad_token=_a ,mask_token=_a ,add_prefix_space=_a ,**_a ,) with open(_a ,encoding="""utf-8""" ) as vocab_handle: A_ : str = json.load(_a ) A_ : Optional[int] = {v: k for k, v in self.encoder.items()} A_ : List[str] = errors # how to handle errors in decoding A_ : List[str] = bytes_to_unicode() A_ : str = {v: k for k, v in self.byte_encoder.items()} with open(_a ,encoding="""utf-8""" ) as merges_handle: A_ : Any = merges_handle.read().split("""\n""" )[1:-1] A_ : str = [tuple(merge.split() ) for merge in bpe_merges] A_ : int = dict(zip(_a ,range(len(_a ) ) ) ) A_ : List[Any] = {} A_ : Optional[int] = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions A_ : Optional[Any] = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" ) @property def _a ( self : Any ): '''simple docstring''' return len(self.encoder ) def _a ( self : str ): '''simple docstring''' return dict(self.encoder ,**self.added_tokens_encoder ) def _a ( self : int ,_a : int ): '''simple docstring''' if token in self.cache: return self.cache[token] A_ : Optional[int] = tuple(_a ) A_ : Any = get_pairs(_a ) if not pairs: return token while True: A_ : Optional[Any] = min(_a ,key=lambda _a : self.bpe_ranks.get(_a ,float("""inf""" ) ) ) if bigram not in self.bpe_ranks: break A_ , A_ : Dict = bigram A_ : int = [] A_ : Optional[Any] = 0 while i < len(_a ): try: A_ : List[str] = word.index(_a ,_a ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) A_ : Tuple = j if word[i] == first and i < len(_a ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 A_ : str = tuple(_a ) A_ : str = new_word if len(_a ) == 1: break else: A_ : int = get_pairs(_a ) A_ : Optional[int] = """ """.join(_a ) A_ : List[str] = word return word def _a ( self : Dict ,_a : Optional[int] ): '''simple docstring''' A_ : Any = [] for token in re.findall(self.pat ,_a ): A_ : Any = """""".join( self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_a ).split(""" """ ) ) return bpe_tokens def _a ( self : Union[str, Any] ,_a : Optional[int] ): '''simple docstring''' return self.encoder.get(_a ,self.encoder.get(self.unk_token ) ) def _a ( self : int ,_a : Dict ): '''simple docstring''' return self.decoder.get(_a ) def _a ( self : Optional[int] ,_a : List[Any] ): '''simple docstring''' A_ : Optional[int] = """""".join(_a ) A_ : Dict = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" ,errors=self.errors ) return text def _a ( self : int ,_a : str ,_a : Optional[str] = None ): '''simple docstring''' if not os.path.isdir(_a ): logger.error(f'Vocabulary path ({save_directory}) should be a directory' ) return A_ : int = os.path.join( _a ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) A_ : int = os.path.join( _a ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] ) with open(_a ,"""w""" ,encoding="""utf-8""" ) as f: f.write(json.dumps(self.encoder ,indent=2 ,sort_keys=_a ,ensure_ascii=_a ) + """\n""" ) A_ : int = 0 with open(_a ,"""w""" ,encoding="""utf-8""" ) as writer: writer.write("""#version: 0.2\n""" ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() ,key=lambda _a : kv[1] ): if index != token_index: logger.warning( f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.' """ Please check that the tokenizer is not corrupted!""" ) A_ : Dict = token_index writer.write(""" """.join(_a ) + """\n""" ) index += 1 return vocab_file, merge_file def _a ( self : List[str] ,_a : List[int] ,_a : Optional[List[int]] = None ): '''simple docstring''' if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] A_ : int = [self.cls_token_id] A_ : int = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def _a ( self : int ,_a : List[int] ,_a : Optional[List[int]] = None ,_a : bool = False ): '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_a ,token_ids_a=_a ,already_has_special_tokens=_a ) if token_ids_a is None: return [1] + ([0] * len(_a )) + [1] return [1] + ([0] * len(_a )) + [1, 1] + ([0] * len(_a )) + [1] def _a ( self : Any ,_a : List[int] ,_a : Optional[List[int]] = None ): '''simple docstring''' A_ : Union[str, Any] = [self.sep_token_id] A_ : Union[str, Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def _a ( self : str ,_a : Optional[int] ,_a : Union[str, Any]=False ,**_a : Dict ): '''simple docstring''' A_ : Any = kwargs.pop("""add_prefix_space""" ,self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(_a ) > 0 and not text[0].isspace()): A_ : Optional[int] = """ """ + text return (text, kwargs)
665
0
'''simple docstring''' import unicodedata from dataclasses import dataclass from typing import Optional, Union import numpy as np from transformers.data.data_collator import DataCollatorMixin from transformers.file_utils import PaddingStrategy from transformers.tokenization_utils_base import PreTrainedTokenizerBase def A_ ( _lowerCAmelCase : int , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Optional[Any] ): """simple docstring""" if isinstance(_lowerCAmelCase , _lowerCAmelCase ): _lowerCamelCase : List[Any] = np.full((len(_lowerCAmelCase ), sequence_length, 2) , _lowerCAmelCase ) else: _lowerCamelCase : List[str] = np.full((len(_lowerCAmelCase ), sequence_length) , _lowerCAmelCase ) for i, tensor in enumerate(_lowerCAmelCase ): if padding_side == "right": if isinstance(_lowerCAmelCase , _lowerCAmelCase ): _lowerCamelCase : str = tensor[:sequence_length] else: _lowerCamelCase : Union[str, Any] = tensor[:sequence_length] else: if isinstance(_lowerCAmelCase , _lowerCAmelCase ): _lowerCamelCase : Optional[int] = tensor[:sequence_length] else: _lowerCamelCase : Dict = tensor[:sequence_length] return out_tensor.tolist() def A_ ( _lowerCAmelCase : int ): """simple docstring""" _lowerCamelCase : int = ord(_lowerCAmelCase ) if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126): return True _lowerCamelCase : List[Any] = unicodedata.category(_lowerCAmelCase ) if cat.startswith("P" ): return True return False @dataclass class UpperCAmelCase__ ( A ): lowerCAmelCase_ = 42 lowerCAmelCase_ = True lowerCAmelCase_ = None lowerCAmelCase_ = None lowerCAmelCase_ = -100 lowerCAmelCase_ = "pt" def lowerCamelCase_ ( self : str,__A : List[str] ): import torch _lowerCamelCase : Optional[int] = "label" if "label" in features[0].keys() else "labels" _lowerCamelCase : Tuple = [feature[label_name] for feature in features] if label_name in features[0].keys() else None _lowerCamelCase : Optional[Any] = self.tokenizer.pad( __A,padding=self.padding,max_length=self.max_length,pad_to_multiple_of=self.pad_to_multiple_of,return_tensors="pt" if labels is None else None,) if labels is None: return batch _lowerCamelCase : List[Any] = torch.tensor(batch["entity_ids"] ).shape[1] _lowerCamelCase : List[str] = self.tokenizer.padding_side if padding_side == "right": _lowerCamelCase : Dict = [ list(__A ) + [self.label_pad_token_id] * (sequence_length - len(__A )) for label in labels ] else: _lowerCamelCase : Union[str, Any] = [ [self.label_pad_token_id] * (sequence_length - len(__A )) + list(__A ) for label in labels ] _lowerCamelCase : Tuple = [feature["ner_tags"] for feature in features] _lowerCamelCase : Any = padding_tensor(__A,-1,__A,__A ) _lowerCamelCase : Any = [feature["original_entity_spans"] for feature in features] _lowerCamelCase : Optional[Any] = padding_tensor(__A,(-1, -1),__A,__A ) _lowerCamelCase : Optional[int] = {k: torch.tensor(__A,dtype=torch.intaa ) for k, v in batch.items()} return batch
44
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_convbert import ConvBertTokenizer __magic_name__ = logging.get_logger(__name__) __magic_name__ = {'vocab_file': 'vocab.txt'} __magic_name__ = { 'vocab_file': { 'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt', 'YituTech/conv-bert-medium-small': ( 'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt' ), 'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt', } } __magic_name__ = { 'YituTech/conv-bert-base': 512, 'YituTech/conv-bert-medium-small': 512, 'YituTech/conv-bert-small': 512, } __magic_name__ = { 'YituTech/conv-bert-base': {'do_lower_case': True}, 'YituTech/conv-bert-medium-small': {'do_lower_case': True}, 'YituTech/conv-bert-small': {'do_lower_case': True}, } class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' a_ = VOCAB_FILES_NAMES a_ = PRETRAINED_VOCAB_FILES_MAP a_ = PRETRAINED_INIT_CONFIGURATION a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a_ = ConvBertTokenizer def __init__( self : str ,_a : Dict=None ,_a : List[Any]=None ,_a : Dict=True ,_a : List[str]="[UNK]" ,_a : Any="[SEP]" ,_a : str="[PAD]" ,_a : List[Any]="[CLS]" ,_a : List[str]="[MASK]" ,_a : Union[str, Any]=True ,_a : Any=None ,**_a : Optional[int] ,): '''simple docstring''' super().__init__( _a ,tokenizer_file=_a ,do_lower_case=_a ,unk_token=_a ,sep_token=_a ,pad_token=_a ,cls_token=_a ,mask_token=_a ,tokenize_chinese_chars=_a ,strip_accents=_a ,**_a ,) A_ : Optional[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get("""lowercase""" ,_a ) != do_lower_case or normalizer_state.get("""strip_accents""" ,_a ) != strip_accents or normalizer_state.get("""handle_chinese_chars""" ,_a ) != tokenize_chinese_chars ): A_ : Dict = getattr(_a ,normalizer_state.pop("""type""" ) ) A_ : str = do_lower_case A_ : Any = strip_accents A_ : int = tokenize_chinese_chars A_ : Tuple = normalizer_class(**_a ) A_ : Any = do_lower_case def _a ( self : List[Any] ,_a : List[Any] ,_a : Any=None ): '''simple docstring''' A_ : str = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def _a ( self : Dict ,_a : List[int] ,_a : Optional[List[int]] = None ): '''simple docstring''' A_ : int = [self.sep_token_id] A_ : Any = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def _a ( self : int ,_a : str ,_a : Optional[str] = None ): '''simple docstring''' A_ : List[Any] = self._tokenizer.model.save(_a ,name=_a ) return tuple(_a )
665
0
def A ( lowercase__ : int ) -> Optional[Any]: stooge(lowercase__ , 0 , len(lowercase__ ) - 1 ) return arr def A ( lowercase__ : Union[str, Any] , lowercase__ : Dict , lowercase__ : str ) -> List[str]: if i >= h: return # If first element is smaller than the last then swap them if arr[i] > arr[h]: UpperCamelCase__ , UpperCamelCase__ :List[str] = arr[h], arr[i] # If there are more than 2 elements in the array if h - i + 1 > 2: UpperCamelCase__ :Optional[int] = (int)((h - i + 1) / 3 ) # Recursively sort first 2/3 elements stooge(lowercase__ , lowercase__ , (h - t) ) # Recursively sort last 2/3 elements stooge(lowercase__ , i + t , (lowercase__) ) # Recursively sort first 2/3 elements stooge(lowercase__ , lowercase__ , (h - t) ) if __name__ == "__main__": UpperCamelCase = input("Enter numbers separated by a comma:\n").strip() UpperCamelCase = [int(item) for item in user_input.split(",")] print(stooge_sort(unsorted))
45
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_bart import BartTokenizer __magic_name__ = logging.get_logger(__name__) __magic_name__ = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'} # See all BART models at https://huggingface.co/models?filter=bart __magic_name__ = { 'vocab_file': { 'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/vocab.json', 'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/vocab.json', 'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json', 'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json', 'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json', 'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json', }, 'merges_file': { 'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/merges.txt', 'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/merges.txt', 'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt', 'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt', 'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt', 'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt', }, 'tokenizer_file': { 'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json', 'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json', 'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json', 'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json', 'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json', 'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json', }, } __magic_name__ = { 'facebook/bart-base': 1_024, 'facebook/bart-large': 1_024, 'facebook/bart-large-mnli': 1_024, 'facebook/bart-large-cnn': 1_024, 'facebook/bart-large-xsum': 1_024, 'yjernite/bart_eli5': 1_024, } class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' a_ = VOCAB_FILES_NAMES a_ = PRETRAINED_VOCAB_FILES_MAP a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a_ = ["""input_ids""", """attention_mask"""] a_ = BartTokenizer def __init__( self : str ,_a : Any=None ,_a : Optional[int]=None ,_a : int=None ,_a : Optional[int]="replace" ,_a : Dict="<s>" ,_a : Optional[Any]="</s>" ,_a : Dict="</s>" ,_a : Tuple="<s>" ,_a : Optional[Any]="<unk>" ,_a : List[str]="<pad>" ,_a : int="<mask>" ,_a : str=False ,_a : List[str]=True ,**_a : Dict ,): '''simple docstring''' super().__init__( _a ,_a ,tokenizer_file=_a ,errors=_a ,bos_token=_a ,eos_token=_a ,sep_token=_a ,cls_token=_a ,unk_token=_a ,pad_token=_a ,mask_token=_a ,add_prefix_space=_a ,trim_offsets=_a ,**_a ,) A_ : Dict = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get("""add_prefix_space""" ,_a ) != add_prefix_space: A_ : List[str] = getattr(_a ,pre_tok_state.pop("""type""" ) ) A_ : Optional[int] = add_prefix_space A_ : int = pre_tok_class(**_a ) A_ : str = add_prefix_space # the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__` A_ : str = """post_processor""" A_ : List[Any] = getattr(self.backend_tokenizer ,_a ,_a ) if tokenizer_component_instance: A_ : Tuple = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: A_ : Tuple = tuple(state["""sep"""] ) if "cls" in state: A_ : Tuple = tuple(state["""cls"""] ) A_ : List[str] = False if state.get("""add_prefix_space""" ,_a ) != add_prefix_space: A_ : Dict = add_prefix_space A_ : Any = True if state.get("""trim_offsets""" ,_a ) != trim_offsets: A_ : Union[str, Any] = trim_offsets A_ : List[Any] = True if changes_to_apply: A_ : Optional[int] = getattr(_a ,state.pop("""type""" ) ) A_ : Tuple = component_class(**_a ) setattr(self.backend_tokenizer ,_a ,_a ) @property def _a ( self : List[str] ): '''simple docstring''' if self._mask_token is None: if self.verbose: logger.error("""Using mask_token, but it is not set yet.""" ) return None return str(self._mask_token ) @mask_token.setter def _a ( self : Union[str, Any] ,_a : Any ): '''simple docstring''' A_ : int = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else value A_ : List[Any] = value def _a ( self : str ,*_a : str ,**_a : Optional[int] ): '''simple docstring''' A_ : Optional[Any] = kwargs.get("""is_split_into_words""" ,_a ) if is_split_into_words and not self.add_prefix_space: raise ValueError( f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True ' """to use it with pretokenized inputs.""" ) return super()._batch_encode_plus(*_a ,**_a ) def _a ( self : str ,*_a : List[Any] ,**_a : str ): '''simple docstring''' A_ : List[str] = kwargs.get("""is_split_into_words""" ,_a ) if is_split_into_words and not self.add_prefix_space: raise ValueError( f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True ' """to use it with pretokenized inputs.""" ) return super()._encode_plus(*_a ,**_a ) def _a ( self : Optional[int] ,_a : str ,_a : Optional[str] = None ): '''simple docstring''' A_ : str = self._tokenizer.model.save(_a ,name=_a ) return tuple(_a ) def _a ( self : str ,_a : Optional[int] ,_a : int=None ): '''simple docstring''' A_ : Optional[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def _a ( self : Optional[int] ,_a : List[int] ,_a : Optional[List[int]] = None ): '''simple docstring''' A_ : Dict = [self.sep_token_id] A_ : Any = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
665
0
"""simple docstring""" import logging import os import sys from dataclasses import dataclass, field from typing import Optional import torch from datasets import load_dataset from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor from torchvision.transforms.functional import InterpolationMode import transformers from transformers import ( HfArgumentParser, Trainer, TrainingArguments, ViTImageProcessor, ViTMAEConfig, ViTMAEForPreTraining, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version _lowerCAmelCase : Dict = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version('''4.31.0''') require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt''') @dataclass class A_ : lowerCAmelCase__ = field( default='cifar10' , metadata={'help': 'Name of a dataset from the datasets package'} ) lowerCAmelCase__ = field( default=_a , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} ) lowerCAmelCase__ = field( default=_a , metadata={'help': 'The column name of the images in the files.'} ) lowerCAmelCase__ = field(default=_a , metadata={'help': 'A folder containing the training data.'} ) lowerCAmelCase__ = field(default=_a , metadata={'help': 'A folder containing the validation data.'} ) lowerCAmelCase__ = field( default=0.15 , metadata={'help': 'Percent to split off of train for validation.'} ) lowerCAmelCase__ = field( default=_a , metadata={ 'help': ( 'For debugging purposes or quicker training, truncate the number of training examples to this ' 'value if set.' ) } , ) lowerCAmelCase__ = field( default=_a , metadata={ 'help': ( 'For debugging purposes or quicker training, truncate the number of evaluation examples to this ' 'value if set.' ) } , ) def _lowercase ( self: Optional[Any] ): '''simple docstring''' _lowerCamelCase : Dict = {} if self.train_dir is not None: _lowerCamelCase : Optional[Any] = self.train_dir if self.validation_dir is not None: _lowerCamelCase : str = self.validation_dir _lowerCamelCase : Any = data_files if data_files else None @dataclass class A_ : lowerCAmelCase__ = field( default=_a , metadata={ 'help': ( 'The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.' ) } , ) lowerCAmelCase__ = field( default=_a , metadata={'help': 'Pretrained config name or path if not the same as model_name_or_path'} ) lowerCAmelCase__ = field( default=_a , metadata={ 'help': ( 'Override some existing default config settings when a model is trained from scratch. Example: ' 'n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index' ) } , ) lowerCAmelCase__ = field( default=_a , metadata={'help': 'Where do you want to store the pretrained models downloaded from s3'} ) lowerCAmelCase__ = field( default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , ) lowerCAmelCase__ = field(default=_a , metadata={'help': 'Name or path of preprocessor config.'} ) lowerCAmelCase__ = field( default=_a , metadata={ 'help': ( 'Will use the token generated when running `huggingface-cli login` (necessary to use this script ' 'with private models).' ) } , ) lowerCAmelCase__ = field( default=0.75 , metadata={'help': 'The ratio of the number of masked tokens in the input sequence.'} ) lowerCAmelCase__ = field( default=_a , metadata={'help': 'Whether or not to train with normalized pixel values as target.'} ) @dataclass class A_ ( _a ): lowerCAmelCase__ = field( default=1E-3 , metadata={'help': 'Base learning rate: absolute_lr = base_lr * total_batch_size / 256.'} ) def lowerCamelCase_( _lowerCamelCase ) -> Union[str, Any]: '''simple docstring''' _lowerCamelCase : int = torch.stack([example["pixel_values"] for example in examples] ) return {"pixel_values": pixel_values} def lowerCamelCase_( ) -> Any: '''simple docstring''' _lowerCamelCase : Dict = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Any = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Union[str, Any] = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("run_mae" , _lowerCamelCase , _lowerCamelCase ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() _lowerCamelCase : Union[str, Any] = training_args.get_process_log_level() logger.setLevel(_lowerCamelCase ) transformers.utils.logging.set_verbosity(_lowerCamelCase ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}""" + F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" ) logger.info(F"""Training/evaluation parameters {training_args}""" ) # Detecting last checkpoint. _lowerCamelCase : List[Any] = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: _lowerCamelCase : Tuple = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F"""Output directory ({training_args.output_dir}) already exists and is not empty. """ "Use --overwrite_output_dir to overcome." ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """ "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." ) # Initialize our dataset. _lowerCamelCase : Optional[int] = load_dataset( data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) # If we don't have a validation split, split off a percentage of train as validation. _lowerCamelCase : int = None if "validation" in ds.keys() else data_args.train_val_split if isinstance(data_args.train_val_split , _lowerCamelCase ) and data_args.train_val_split > 0.0: _lowerCamelCase : Dict = ds["train"].train_test_split(data_args.train_val_split ) _lowerCamelCase : List[Any] = split["train"] _lowerCamelCase : List[str] = split["test"] # Load pretrained model and image processor # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. _lowerCamelCase : Tuple = { "cache_dir": model_args.cache_dir, "revision": model_args.model_revision, "use_auth_token": True if model_args.use_auth_token else None, } if model_args.config_name: _lowerCamelCase : Dict = ViTMAEConfig.from_pretrained(model_args.config_name , **_lowerCamelCase ) elif model_args.model_name_or_path: _lowerCamelCase : Optional[Any] = ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **_lowerCamelCase ) else: _lowerCamelCase : str = ViTMAEConfig() logger.warning("You are instantiating a new config instance from scratch." ) if model_args.config_overrides is not None: logger.info(F"""Overriding config: {model_args.config_overrides}""" ) config.update_from_string(model_args.config_overrides ) logger.info(F"""New config: {config}""" ) # adapt config config.update( { "mask_ratio": model_args.mask_ratio, "norm_pix_loss": model_args.norm_pix_loss, } ) # create image processor if model_args.image_processor_name: _lowerCamelCase : Union[str, Any] = ViTImageProcessor.from_pretrained(model_args.image_processor_name , **_lowerCamelCase ) elif model_args.model_name_or_path: _lowerCamelCase : Tuple = ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **_lowerCamelCase ) else: _lowerCamelCase : Optional[Any] = ViTImageProcessor() # create model if model_args.model_name_or_path: _lowerCamelCase : Dict = ViTMAEForPreTraining.from_pretrained( model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=_lowerCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) else: logger.info("Training new model from scratch" ) _lowerCamelCase : Optional[Any] = ViTMAEForPreTraining(_lowerCamelCase ) if training_args.do_train: _lowerCamelCase : int = ds["train"].column_names else: _lowerCamelCase : List[Any] = ds["validation"].column_names if data_args.image_column_name is not None: _lowerCamelCase : List[Any] = data_args.image_column_name elif "image" in column_names: _lowerCamelCase : int = "image" elif "img" in column_names: _lowerCamelCase : Tuple = "img" else: _lowerCamelCase : Tuple = column_names[0] # transformations as done in original MAE paper # source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py if "shortest_edge" in image_processor.size: _lowerCamelCase : Dict = image_processor.size["shortest_edge"] else: _lowerCamelCase : Optional[int] = (image_processor.size["height"], image_processor.size["width"]) _lowerCamelCase : Optional[Any] = Compose( [ Lambda(lambda _lowerCamelCase : img.convert("RGB" ) if img.mode != "RGB" else img ), RandomResizedCrop(_lowerCamelCase , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ), RandomHorizontalFlip(), ToTensor(), Normalize(mean=image_processor.image_mean , std=image_processor.image_std ), ] ) def preprocess_images(_lowerCamelCase ): _lowerCamelCase : Optional[Any] = [transforms(_lowerCamelCase ) for image in examples[image_column_name]] return examples if training_args.do_train: if "train" not in ds: raise ValueError("--do_train requires a train dataset" ) if data_args.max_train_samples is not None: _lowerCamelCase : int = ds["train"].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) ) # Set the training transforms ds["train"].set_transform(_lowerCamelCase ) if training_args.do_eval: if "validation" not in ds: raise ValueError("--do_eval requires a validation dataset" ) if data_args.max_eval_samples is not None: _lowerCamelCase : Optional[int] = ( ds["validation"].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) ) ) # Set the validation transforms ds["validation"].set_transform(_lowerCamelCase ) # Compute absolute learning rate _lowerCamelCase : List[str] = ( training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size ) if training_args.base_learning_rate is not None: _lowerCamelCase : int = training_args.base_learning_rate * total_train_batch_size / 256 # Initialize our trainer _lowerCamelCase : Any = Trainer( model=_lowerCamelCase , args=_lowerCamelCase , train_dataset=ds["train"] if training_args.do_train else None , eval_dataset=ds["validation"] if training_args.do_eval else None , tokenizer=_lowerCamelCase , data_collator=_lowerCamelCase , ) # Training if training_args.do_train: _lowerCamelCase : str = None if training_args.resume_from_checkpoint is not None: _lowerCamelCase : Optional[Any] = training_args.resume_from_checkpoint elif last_checkpoint is not None: _lowerCamelCase : Union[str, Any] = last_checkpoint _lowerCamelCase : List[Any] = trainer.train(resume_from_checkpoint=_lowerCamelCase ) trainer.save_model() trainer.log_metrics("train" , train_result.metrics ) trainer.save_metrics("train" , train_result.metrics ) trainer.save_state() # Evaluation if training_args.do_eval: _lowerCamelCase : Dict = trainer.evaluate() trainer.log_metrics("eval" , _lowerCamelCase ) trainer.save_metrics("eval" , _lowerCamelCase ) # Write model card and (optionally) push to hub _lowerCamelCase : int = { "tasks": "masked-auto-encoding", "dataset": data_args.dataset_name, "tags": ["masked-auto-encoding"], } if training_args.push_to_hub: trainer.push_to_hub(**_lowerCamelCase ) else: trainer.create_model_card(**_lowerCamelCase ) def lowerCamelCase_( _lowerCamelCase ) -> str: '''simple docstring''' main() if __name__ == "__main__": main()
46
'''simple docstring''' import argparse from transformers import ( TapasConfig, TapasForMaskedLM, TapasForQuestionAnswering, TapasForSequenceClassification, TapasModel, TapasTokenizer, load_tf_weights_in_tapas, ) from transformers.utils import logging logging.set_verbosity_info() def lowerCamelCase ( lowerCamelCase : Optional[Any] , lowerCamelCase : Any , lowerCamelCase : Union[str, Any] , lowerCamelCase : Tuple , lowerCamelCase : str): # Initialise PyTorch model. # If you want to convert a checkpoint that uses absolute position embeddings, make sure to set reset_position_index_per_cell of # TapasConfig to False. # initialize configuration from json file A_ : int = TapasConfig.from_json_file(lowerCamelCase) # set absolute/relative position embeddings parameter A_ : List[Any] = reset_position_index_per_cell # set remaining parameters of TapasConfig as well as the model based on the task if task == "SQA": A_ : Optional[int] = TapasForQuestionAnswering(config=lowerCamelCase) elif task == "WTQ": # run_task_main.py hparams A_ : Tuple = 4 A_ : Optional[Any] = True # hparam_utils.py hparams A_ : Any = 0.66_4694 A_ : str = 0.20_7951 A_ : Any = 0.12_1194 A_ : str = True A_ : Dict = True A_ : int = False A_ : int = 0.035_2513 A_ : Tuple = TapasForQuestionAnswering(config=lowerCamelCase) elif task == "WIKISQL_SUPERVISED": # run_task_main.py hparams A_ : int = 4 A_ : Union[str, Any] = False # hparam_utils.py hparams A_ : Dict = 36.4519 A_ : List[Any] = 0.90_3421 A_ : Any = 222.088 A_ : Optional[Any] = True A_ : Optional[int] = True A_ : Optional[Any] = True A_ : Optional[int] = 0.76_3141 A_ : Any = TapasForQuestionAnswering(config=lowerCamelCase) elif task == "TABFACT": A_ : Any = TapasForSequenceClassification(config=lowerCamelCase) elif task == "MLM": A_ : List[Any] = TapasForMaskedLM(config=lowerCamelCase) elif task == "INTERMEDIATE_PRETRAINING": A_ : Union[str, Any] = TapasModel(config=lowerCamelCase) else: raise ValueError(F'Task {task} not supported.') print(F'Building PyTorch model from configuration: {config}') # Load weights from tf checkpoint load_tf_weights_in_tapas(lowerCamelCase , lowerCamelCase , lowerCamelCase) # Save pytorch-model (weights and configuration) print(F'Save PyTorch model to {pytorch_dump_path}') model.save_pretrained(lowerCamelCase) # Save tokenizer files print(F'Save tokenizer files to {pytorch_dump_path}') A_ : Optional[Any] = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + """vocab.txt""" , model_max_length=512) tokenizer.save_pretrained(lowerCamelCase) print("""Used relative position embeddings:""" , model.config.reset_position_index_per_cell) if __name__ == "__main__": __magic_name__ = argparse.ArgumentParser() # Required parameters parser.add_argument( '--task', default='SQA', type=str, help='Model task for which to convert a checkpoint. Defaults to SQA.' ) parser.add_argument( '--reset_position_index_per_cell', default=False, action='store_true', help='Whether to use relative position embeddings or not. Defaults to True.', ) parser.add_argument( '--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.' ) parser.add_argument( '--tapas_config_file', default=None, type=str, required=True, help=( 'The config json file corresponding to the pre-trained TAPAS model. \n' 'This specifies the model architecture.' ), ) parser.add_argument( '--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) __magic_name__ = parser.parse_args() convert_tf_checkpoint_to_pytorch( args.task, args.reset_position_index_per_cell, args.tf_checkpoint_path, args.tapas_config_file, args.pytorch_dump_path, )
665
0
import argparse import csv import logging import os import random import numpy as np import torch from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset from tqdm import tqdm, trange from transformers import ( CONFIG_NAME, WEIGHTS_NAME, AdamW, OpenAIGPTDoubleHeadsModel, OpenAIGPTTokenizer, get_linear_schedule_with_warmup, ) logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO ) SCREAMING_SNAKE_CASE__ = logging.getLogger(__name__) def UpperCAmelCase__ ( lowerCamelCase_ : Any , lowerCamelCase_ : List[Any] ): __a : Any = np.argmax(lowerCamelCase_ , axis=1 ) return np.sum(outputs == labels ) def UpperCAmelCase__ ( lowerCamelCase_ : Tuple ): with open(lowerCamelCase_ , encoding='utf_8' ) as f: __a : Optional[Any] = csv.reader(lowerCamelCase_ ) __a : Optional[int] = [] next(lowerCamelCase_ ) # skip the first line for line in tqdm(lowerCamelCase_ ): output.append((' '.join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) ) return output def UpperCAmelCase__ ( lowerCamelCase_ : Any , lowerCamelCase_ : Dict , lowerCamelCase_ : Dict , lowerCamelCase_ : List[Any] , lowerCamelCase_ : str , lowerCamelCase_ : Optional[Any] ): __a : int = [] for dataset in encoded_datasets: __a : List[Any] = len(lowerCamelCase_ ) __a : Optional[Any] = np.zeros((n_batch, 2, input_len) , dtype=np.intaa ) __a : Optional[int] = np.zeros((n_batch, 2) , dtype=np.intaa ) __a : str = np.full((n_batch, 2, input_len) , fill_value=-1_0_0 , dtype=np.intaa ) __a : List[Any] = np.zeros((n_batch,) , dtype=np.intaa ) for ( i, (story, conta, conta, mc_label), ) in enumerate(lowerCamelCase_ ): __a : str = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token] __a : int = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token] __a : List[str] = with_conta __a : Optional[Any] = with_conta __a : Any = len(lowerCamelCase_ ) - 1 __a : Any = len(lowerCamelCase_ ) - 1 __a : Optional[int] = with_conta __a : Tuple = with_conta __a : Optional[int] = mc_label __a : Union[str, Any] = (input_ids, mc_token_ids, lm_labels, mc_labels) tensor_datasets.append(tuple(torch.tensor(lowerCamelCase_ ) for t in all_inputs ) ) return tensor_datasets def UpperCAmelCase__ ( ): __a : List[str] = argparse.ArgumentParser() parser.add_argument('--model_name' , type=lowerCamelCase_ , default='openai-gpt' , help='pretrained model name' ) parser.add_argument('--do_train' , action='store_true' , help='Whether to run training.' ) parser.add_argument('--do_eval' , action='store_true' , help='Whether to run eval on the dev set.' ) parser.add_argument( '--output_dir' , default=lowerCamelCase_ , type=lowerCamelCase_ , required=lowerCamelCase_ , help='The output directory where the model predictions and checkpoints will be written.' , ) parser.add_argument('--train_dataset' , type=lowerCamelCase_ , default='' ) parser.add_argument('--eval_dataset' , type=lowerCamelCase_ , default='' ) parser.add_argument('--seed' , type=lowerCamelCase_ , default=4_2 ) parser.add_argument('--num_train_epochs' , type=lowerCamelCase_ , default=3 ) parser.add_argument('--train_batch_size' , type=lowerCamelCase_ , default=8 ) parser.add_argument('--eval_batch_size' , type=lowerCamelCase_ , default=1_6 ) parser.add_argument('--adam_epsilon' , default=1e-8 , type=lowerCamelCase_ , help='Epsilon for Adam optimizer.' ) parser.add_argument('--max_grad_norm' , type=lowerCamelCase_ , default=1 ) parser.add_argument( '--max_steps' , default=-1 , type=lowerCamelCase_ , help=( 'If > 0: set total number of training steps to perform. Override num_train_epochs.' ) , ) parser.add_argument( '--gradient_accumulation_steps' , type=lowerCamelCase_ , default=1 , help='Number of updates steps to accumulate before performing a backward/update pass.' , ) parser.add_argument('--learning_rate' , type=lowerCamelCase_ , default=6.25e-5 ) parser.add_argument('--warmup_steps' , default=0 , type=lowerCamelCase_ , help='Linear warmup over warmup_steps.' ) parser.add_argument('--lr_schedule' , type=lowerCamelCase_ , default='warmup_linear' ) parser.add_argument('--weight_decay' , type=lowerCamelCase_ , default=0.01 ) parser.add_argument('--lm_coef' , type=lowerCamelCase_ , default=0.9 ) parser.add_argument('--n_valid' , type=lowerCamelCase_ , default=3_7_4 ) parser.add_argument('--server_ip' , type=lowerCamelCase_ , default='' , help='Can be used for distant debugging.' ) parser.add_argument('--server_port' , type=lowerCamelCase_ , default='' , help='Can be used for distant debugging.' ) __a : int = parser.parse_args() print(lowerCamelCase_ ) if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print('Waiting for debugger attach' ) ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=lowerCamelCase_ ) ptvsd.wait_for_attach() random.seed(args.seed ) np.random.seed(args.seed ) torch.manual_seed(args.seed ) torch.cuda.manual_seed_all(args.seed ) __a : Any = torch.device('cuda' if torch.cuda.is_available() else 'cpu' ) __a : Union[str, Any] = torch.cuda.device_count() logger.info('device: {}, n_gpu {}'.format(lowerCamelCase_ , lowerCamelCase_ ) ) if not args.do_train and not args.do_eval: raise ValueError('At least one of `do_train` or `do_eval` must be True.' ) if not os.path.exists(args.output_dir ): os.makedirs(args.output_dir ) # Load tokenizer and model # This loading functions also add new tokens and embeddings called `special tokens` # These new embeddings will be fine-tuned on the RocStories dataset __a : int = ['_start_', '_delimiter_', '_classify_'] __a : Optional[int] = OpenAIGPTTokenizer.from_pretrained(args.model_name ) tokenizer.add_tokens(lowerCamelCase_ ) __a : Optional[int] = tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) __a : int = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name ) model.resize_token_embeddings(len(lowerCamelCase_ ) ) model.to(lowerCamelCase_ ) # Load and encode the datasets def tokenize_and_encode(lowerCamelCase_ : Optional[Any] ): if isinstance(lowerCamelCase_ , lowerCamelCase_ ): return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(lowerCamelCase_ ) ) elif isinstance(lowerCamelCase_ , lowerCamelCase_ ): return obj return [tokenize_and_encode(lowerCamelCase_ ) for o in obj] logger.info('Encoding dataset...' ) __a : Tuple = load_rocstories_dataset(args.train_dataset ) __a : List[Any] = load_rocstories_dataset(args.eval_dataset ) __a : str = (train_dataset, eval_dataset) __a : str = tokenize_and_encode(lowerCamelCase_ ) # Compute the max input length for the Transformer __a : Any = model.config.n_positions // 2 - 2 __a : Dict = max( len(story[:max_length] ) + max(len(conta[:max_length] ) , len(conta[:max_length] ) ) + 3 for dataset in encoded_datasets for story, conta, conta, _ in dataset ) __a : Dict = min(lowerCamelCase_ , model.config.n_positions ) # Max size of input for the pre-trained model # Prepare inputs tensors and dataloaders __a : Dict = pre_process_datasets(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , *lowerCamelCase_ ) __a , __a : str = tensor_datasets[0], tensor_datasets[1] __a : str = TensorDataset(*lowerCamelCase_ ) __a : int = RandomSampler(lowerCamelCase_ ) __a : str = DataLoader(lowerCamelCase_ , sampler=lowerCamelCase_ , batch_size=args.train_batch_size ) __a : List[Any] = TensorDataset(*lowerCamelCase_ ) __a : str = SequentialSampler(lowerCamelCase_ ) __a : Dict = DataLoader(lowerCamelCase_ , sampler=lowerCamelCase_ , batch_size=args.eval_batch_size ) # Prepare optimizer if args.do_train: if args.max_steps > 0: __a : Optional[Any] = args.max_steps __a : int = args.max_steps // (len(lowerCamelCase_ ) // args.gradient_accumulation_steps) + 1 else: __a : List[Any] = len(lowerCamelCase_ ) // args.gradient_accumulation_steps * args.num_train_epochs __a : Any = list(model.named_parameters() ) __a : Optional[int] = ['bias', 'LayerNorm.bias', 'LayerNorm.weight'] __a : int = [ { 'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )], 'weight_decay': args.weight_decay, }, {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], 'weight_decay': 0.0}, ] __a : List[str] = AdamW(lowerCamelCase_ , lr=args.learning_rate , eps=args.adam_epsilon ) __a : Optional[Any] = get_linear_schedule_with_warmup( lowerCamelCase_ , num_warmup_steps=args.warmup_steps , num_training_steps=lowerCamelCase_ ) if args.do_train: __a , __a , __a : List[Any] = 0, 0, None model.train() for _ in trange(int(args.num_train_epochs ) , desc='Epoch' ): __a : Union[str, Any] = 0 __a : int = 0 __a : str = tqdm(lowerCamelCase_ , desc='Training' ) for step, batch in enumerate(lowerCamelCase_ ): __a : int = tuple(t.to(lowerCamelCase_ ) for t in batch ) __a , __a , __a , __a : Any = batch __a : List[Any] = model(lowerCamelCase_ , mc_token_ids=lowerCamelCase_ , lm_labels=lowerCamelCase_ , mc_labels=lowerCamelCase_ ) __a : Union[str, Any] = args.lm_coef * losses[0] + losses[1] loss.backward() optimizer.step() scheduler.step() optimizer.zero_grad() tr_loss += loss.item() __a : Optional[int] = ( loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item() ) nb_tr_steps += 1 __a : Optional[Any] = 'Training loss: {:.2e} lr: {:.2e}'.format(lowerCamelCase_ , scheduler.get_lr()[0] ) # Save a trained model if args.do_train: # Save a trained model, configuration and tokenizer __a : List[str] = model.module if hasattr(lowerCamelCase_ , 'module' ) else model # Only save the model itself # If we save using the predefined names, we can load using `from_pretrained` __a : Union[str, Any] = os.path.join(args.output_dir , lowerCamelCase_ ) __a : List[Any] = os.path.join(args.output_dir , lowerCamelCase_ ) torch.save(model_to_save.state_dict() , lowerCamelCase_ ) model_to_save.config.to_json_file(lowerCamelCase_ ) tokenizer.save_vocabulary(args.output_dir ) # Load a trained model and vocabulary that you have fine-tuned __a : str = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir ) __a : Tuple = OpenAIGPTTokenizer.from_pretrained(args.output_dir ) model.to(lowerCamelCase_ ) if args.do_eval: model.eval() __a , __a : Union[str, Any] = 0, 0 __a , __a : List[Any] = 0, 0 for batch in tqdm(lowerCamelCase_ , desc='Evaluating' ): __a : int = tuple(t.to(lowerCamelCase_ ) for t in batch ) __a , __a , __a , __a : Dict = batch with torch.no_grad(): __a , __a , __a , __a : Dict = model( lowerCamelCase_ , mc_token_ids=lowerCamelCase_ , lm_labels=lowerCamelCase_ , mc_labels=lowerCamelCase_ ) __a : int = mc_logits.detach().cpu().numpy() __a : List[str] = mc_labels.to('cpu' ).numpy() __a : str = accuracy(lowerCamelCase_ , lowerCamelCase_ ) eval_loss += mc_loss.mean().item() eval_accuracy += tmp_eval_accuracy nb_eval_examples += input_ids.size(0 ) nb_eval_steps += 1 __a : Optional[int] = eval_loss / nb_eval_steps __a : Dict = eval_accuracy / nb_eval_examples __a : Dict = tr_loss / nb_tr_steps if args.do_train else None __a : Dict = {'eval_loss': eval_loss, 'eval_accuracy': eval_accuracy, 'train_loss': train_loss} __a : str = os.path.join(args.output_dir , 'eval_results.txt' ) with open(lowerCamelCase_ , 'w' ) as writer: logger.info('***** Eval results *****' ) for key in sorted(result.keys() ): logger.info(' %s = %s' , lowerCamelCase_ , str(result[key] ) ) writer.write('%s = %s\n' % (key, str(result[key] )) ) if __name__ == "__main__": main()
47
'''simple docstring''' from math import acos, sin from typing import List, Tuple, Union import numpy as np import torch from PIL import Image from ...models import AutoencoderKL, UNetaDConditionModel from ...schedulers import DDIMScheduler, DDPMScheduler from ...utils import randn_tensor from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput from .mel import Mel class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' a_ = ["""vqvae"""] def __init__( self : Optional[Any] ,_a : AutoencoderKL ,_a : UNetaDConditionModel ,_a : Mel ,_a : Union[DDIMScheduler, DDPMScheduler] ,): '''simple docstring''' super().__init__() self.register_modules(unet=_a ,scheduler=_a ,mel=_a ,vqvae=_a ) def _a ( self : str ): '''simple docstring''' return 50 if isinstance(self.scheduler ,_a ) else 1000 @torch.no_grad() def __call__( self : Optional[int] ,_a : int = 1 ,_a : str = None ,_a : np.ndarray = None ,_a : int = 0 ,_a : int = 0 ,_a : int = None ,_a : torch.Generator = None ,_a : float = 0 ,_a : float = 0 ,_a : torch.Generator = None ,_a : float = 0 ,_a : torch.Tensor = None ,_a : torch.Tensor = None ,_a : int=True ,): '''simple docstring''' A_ : List[str] = steps or self.get_default_steps() self.scheduler.set_timesteps(_a ) A_ : Union[str, Any] = step_generator or generator # For backwards compatibility if type(self.unet.config.sample_size ) == int: A_ : Tuple = (self.unet.config.sample_size, self.unet.config.sample_size) if noise is None: A_ : int = randn_tensor( ( batch_size, self.unet.config.in_channels, self.unet.config.sample_size[0], self.unet.config.sample_size[1], ) ,generator=_a ,device=self.device ,) A_ : List[Any] = noise A_ : str = None if audio_file is not None or raw_audio is not None: self.mel.load_audio(_a ,_a ) A_ : Any = self.mel.audio_slice_to_image(_a ) A_ : Union[str, Any] = np.frombuffer(input_image.tobytes() ,dtype="""uint8""" ).reshape( (input_image.height, input_image.width) ) A_ : Optional[Any] = (input_image / 255) * 2 - 1 A_ : Union[str, Any] = torch.tensor(input_image[np.newaxis, :, :] ,dtype=torch.float ).to(self.device ) if self.vqvae is not None: A_ : Union[str, Any] = self.vqvae.encode(torch.unsqueeze(_a ,0 ) ).latent_dist.sample( generator=_a )[0] A_ : List[str] = self.vqvae.config.scaling_factor * input_images if start_step > 0: A_ : Any = self.scheduler.add_noise(_a ,_a ,self.scheduler.timesteps[start_step - 1] ) A_ : Tuple = ( self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length ) A_ : Tuple = int(mask_start_secs * pixels_per_second ) A_ : str = int(mask_end_secs * pixels_per_second ) A_ : int = self.scheduler.add_noise(_a ,_a ,torch.tensor(self.scheduler.timesteps[start_step:] ) ) for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ): if isinstance(self.unet ,_a ): A_ : Optional[Any] = self.unet(_a ,_a ,_a )["""sample"""] else: A_ : List[Any] = self.unet(_a ,_a )["""sample"""] if isinstance(self.scheduler ,_a ): A_ : Dict = self.scheduler.step( model_output=_a ,timestep=_a ,sample=_a ,eta=_a ,generator=_a ,)["""prev_sample"""] else: A_ : Any = self.scheduler.step( model_output=_a ,timestep=_a ,sample=_a ,generator=_a ,)["""prev_sample"""] if mask is not None: if mask_start > 0: A_ : Tuple = mask[:, step, :, :mask_start] if mask_end > 0: A_ : List[str] = mask[:, step, :, -mask_end:] if self.vqvae is not None: # 0.18215 was scaling factor used in training to ensure unit variance A_ : str = 1 / self.vqvae.config.scaling_factor * images A_ : Union[str, Any] = self.vqvae.decode(_a )["""sample"""] A_ : int = (images / 2 + 0.5).clamp(0 ,1 ) A_ : str = images.cpu().permute(0 ,2 ,3 ,1 ).numpy() A_ : Optional[int] = (images * 255).round().astype("""uint8""" ) A_ : List[Any] = list( (Image.fromarray(_[:, :, 0] ) for _ in images) if images.shape[3] == 1 else (Image.fromarray(_a ,mode="""RGB""" ).convert("""L""" ) for _ in images) ) A_ : Tuple = [self.mel.image_to_audio(_a ) for _ in images] if not return_dict: return images, (self.mel.get_sample_rate(), audios) return BaseOutput(**AudioPipelineOutput(np.array(_a )[:, np.newaxis, :] ) ,**ImagePipelineOutput(_a ) ) @torch.no_grad() def _a ( self : Union[str, Any] ,_a : List[Image.Image] ,_a : int = 50 ): '''simple docstring''' assert isinstance(self.scheduler ,_a ) self.scheduler.set_timesteps(_a ) A_ : Optional[Any] = np.array( [np.frombuffer(image.tobytes() ,dtype="""uint8""" ).reshape((1, image.height, image.width) ) for image in images] ) A_ : List[str] = (sample / 255) * 2 - 1 A_ : Optional[int] = torch.Tensor(_a ).to(self.device ) for t in self.progress_bar(torch.flip(self.scheduler.timesteps ,(0,) ) ): A_ : List[str] = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps A_ : Any = self.scheduler.alphas_cumprod[t] A_ : List[Any] = ( self.scheduler.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.scheduler.final_alpha_cumprod ) A_ : str = 1 - alpha_prod_t A_ : List[str] = self.unet(_a ,_a )["""sample"""] A_ : str = (1 - alpha_prod_t_prev) ** 0.5 * model_output A_ : Union[str, Any] = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5) A_ : Optional[int] = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output return sample @staticmethod def _a ( _a : torch.Tensor ,_a : torch.Tensor ,_a : float ): '''simple docstring''' A_ : List[Any] = acos(torch.dot(torch.flatten(_a ) ,torch.flatten(_a ) ) / torch.norm(_a ) / torch.norm(_a ) ) return sin((1 - alpha) * theta ) * xa / sin(_a ) + sin(alpha * theta ) * xa / sin(_a )
665
0
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_bart import BartTokenizer UpperCAmelCase__ : Union[str, Any] = logging.get_logger(__name__) UpperCAmelCase__ : Union[str, Any] = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"} # See all BART models at https://huggingface.co/models?filter=bart UpperCAmelCase__ : Optional[int] = { "vocab_file": { "facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/vocab.json", "facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/vocab.json", "facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json", "facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json", "facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json", "yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json", }, "merges_file": { "facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/merges.txt", "facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/merges.txt", "facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt", "facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt", "facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt", "yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt", }, "tokenizer_file": { "facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json", "facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json", "facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json", "facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json", "facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json", "yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json", }, } UpperCAmelCase__ : int = { "facebook/bart-base": 10_24, "facebook/bart-large": 10_24, "facebook/bart-large-mnli": 10_24, "facebook/bart-large-cnn": 10_24, "facebook/bart-large-xsum": 10_24, "yjernite/bart_eli5": 10_24, } class A ( SCREAMING_SNAKE_CASE__ ): snake_case__ :Optional[int] = VOCAB_FILES_NAMES snake_case__ :Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP snake_case__ :int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES snake_case__ :List[Any] = ['input_ids', 'attention_mask'] snake_case__ :Union[str, Any] = BartTokenizer def __init__( self : List[str] , __magic_name__ : Dict=None , __magic_name__ : Optional[int]=None , __magic_name__ : Tuple=None , __magic_name__ : List[Any]="replace" , __magic_name__ : Union[str, Any]="<s>" , __magic_name__ : Optional[int]="</s>" , __magic_name__ : Dict="</s>" , __magic_name__ : Union[str, Any]="<s>" , __magic_name__ : Optional[int]="<unk>" , __magic_name__ : str="<pad>" , __magic_name__ : Dict="<mask>" , __magic_name__ : Optional[int]=False , __magic_name__ : str=True , **__magic_name__ : Optional[Any] , ): """simple docstring""" super().__init__( __magic_name__ , __magic_name__ , tokenizer_file=__magic_name__ , errors=__magic_name__ , bos_token=__magic_name__ , eos_token=__magic_name__ , sep_token=__magic_name__ , cls_token=__magic_name__ , unk_token=__magic_name__ , pad_token=__magic_name__ , mask_token=__magic_name__ , add_prefix_space=__magic_name__ , trim_offsets=__magic_name__ , **__magic_name__ , ) lowerCAmelCase__ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get("add_prefix_space" , __magic_name__ ) != add_prefix_space: lowerCAmelCase__ = getattr(__magic_name__ , pre_tok_state.pop("type" ) ) lowerCAmelCase__ = add_prefix_space lowerCAmelCase__ = pre_tok_class(**__magic_name__ ) lowerCAmelCase__ = add_prefix_space # the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__` lowerCAmelCase__ = "post_processor" lowerCAmelCase__ = getattr(self.backend_tokenizer , __magic_name__ , __magic_name__ ) if tokenizer_component_instance: lowerCAmelCase__ = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: lowerCAmelCase__ = tuple(state["sep"] ) if "cls" in state: lowerCAmelCase__ = tuple(state["cls"] ) lowerCAmelCase__ = False if state.get("add_prefix_space" , __magic_name__ ) != add_prefix_space: lowerCAmelCase__ = add_prefix_space lowerCAmelCase__ = True if state.get("trim_offsets" , __magic_name__ ) != trim_offsets: lowerCAmelCase__ = trim_offsets lowerCAmelCase__ = True if changes_to_apply: lowerCAmelCase__ = getattr(__magic_name__ , state.pop("type" ) ) lowerCAmelCase__ = component_class(**__magic_name__ ) setattr(self.backend_tokenizer , __magic_name__ , __magic_name__ ) @property def __SCREAMING_SNAKE_CASE ( self : List[str] ): """simple docstring""" if self._mask_token is None: if self.verbose: logger.error("Using mask_token, but it is not set yet." ) return None return str(self._mask_token ) @mask_token.setter def __SCREAMING_SNAKE_CASE ( self : Tuple , __magic_name__ : List[Any] ): """simple docstring""" lowerCAmelCase__ = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else value lowerCAmelCase__ = value def __SCREAMING_SNAKE_CASE ( self : str , *__magic_name__ : List[Any] , **__magic_name__ : Any ): """simple docstring""" lowerCAmelCase__ = kwargs.get("is_split_into_words" , __magic_name__ ) if is_split_into_words and not self.add_prefix_space: raise ValueError( f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """ "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*__magic_name__ , **__magic_name__ ) def __SCREAMING_SNAKE_CASE ( self : str , *__magic_name__ : Any , **__magic_name__ : str ): """simple docstring""" lowerCAmelCase__ = kwargs.get("is_split_into_words" , __magic_name__ ) if is_split_into_words and not self.add_prefix_space: raise ValueError( f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """ "to use it with pretokenized inputs." ) return super()._encode_plus(*__magic_name__ , **__magic_name__ ) def __SCREAMING_SNAKE_CASE ( self : int , __magic_name__ : str , __magic_name__ : Optional[str] = None ): """simple docstring""" lowerCAmelCase__ = self._tokenizer.model.save(__magic_name__ , name=__magic_name__ ) return tuple(__magic_name__ ) def __SCREAMING_SNAKE_CASE ( self : Tuple , __magic_name__ : Any , __magic_name__ : int=None ): """simple docstring""" lowerCAmelCase__ = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def __SCREAMING_SNAKE_CASE ( self : int , __magic_name__ : List[int] , __magic_name__ : Optional[List[int]] = None ): """simple docstring""" lowerCAmelCase__ = [self.sep_token_id] lowerCAmelCase__ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
48
'''simple docstring''' import argparse import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## __magic_name__ = 16 __magic_name__ = 32 def lowerCamelCase ( lowerCamelCase : Accelerator , lowerCamelCase : int = 16): A_ : Any = AutoTokenizer.from_pretrained("""bert-base-cased""") A_ : str = load_dataset("""glue""" , """mrpc""") def tokenize_function(lowerCamelCase : Dict): # max_length=None => use the model max length (it's actually the default) A_ : List[str] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowerCamelCase , max_length=lowerCamelCase) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): A_ : Tuple = datasets.map( lowerCamelCase , batched=lowerCamelCase , remove_columns=["""idx""", """sentence1""", """sentence2"""] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library A_ : List[str] = tokenized_datasets.rename_column("""label""" , """labels""") def collate_fn(lowerCamelCase : Tuple): # On TPU it's best to pad everything to the same length or training will be very slow. A_ : str = 128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": A_ : List[Any] = 16 elif accelerator.mixed_precision != "no": A_ : Any = 8 else: A_ : Tuple = None return tokenizer.pad( lowerCamelCase , padding="""longest""" , max_length=lowerCamelCase , pad_to_multiple_of=lowerCamelCase , return_tensors="""pt""" , ) # Instantiate dataloaders. A_ : int = DataLoader( tokenized_datasets["""train"""] , shuffle=lowerCamelCase , collate_fn=lowerCamelCase , batch_size=lowerCamelCase , drop_last=lowerCamelCase) A_ : str = DataLoader( tokenized_datasets["""validation"""] , shuffle=lowerCamelCase , collate_fn=lowerCamelCase , batch_size=lowerCamelCase , drop_last=(accelerator.mixed_precision == """fp8""") , ) return train_dataloader, eval_dataloader def lowerCamelCase ( lowerCamelCase : Any , lowerCamelCase : Dict): # Initialize accelerator A_ : Tuple = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs A_ : List[Any] = config["""lr"""] A_ : List[Any] = int(config["""num_epochs"""]) A_ : int = int(config["""seed"""]) A_ : Dict = int(config["""batch_size"""]) A_ : Union[str, Any] = evaluate.load("""glue""" , """mrpc""") # If the batch size is too big we use gradient accumulation A_ : int = 1 if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU: A_ : Any = batch_size // MAX_GPU_BATCH_SIZE A_ : Union[str, Any] = MAX_GPU_BATCH_SIZE set_seed(lowerCamelCase) A_ , A_ : List[str] = get_dataloaders(lowerCamelCase , lowerCamelCase) # Instantiate the model (we build the model here so that the seed also control new weights initialization) A_ : Union[str, Any] = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=lowerCamelCase) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). A_ : str = model.to(accelerator.device) # Instantiate optimizer A_ : str = AdamW(params=model.parameters() , lr=lowerCamelCase) # Instantiate scheduler A_ : Tuple = get_linear_schedule_with_warmup( optimizer=lowerCamelCase , num_warmup_steps=100 , num_training_steps=(len(lowerCamelCase) * num_epochs) // gradient_accumulation_steps , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. A_ , A_ , A_ , A_ , A_ : Union[str, Any] = accelerator.prepare( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase) # Now we train the model for epoch in range(lowerCamelCase): model.train() for step, batch in enumerate(lowerCamelCase): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device) A_ : Optional[int] = model(**lowerCamelCase) A_ : List[Any] = outputs.loss A_ : Tuple = loss / gradient_accumulation_steps accelerator.backward(lowerCamelCase) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(lowerCamelCase): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device) with torch.no_grad(): A_ : Union[str, Any] = model(**lowerCamelCase) A_ : Any = outputs.logits.argmax(dim=-1) A_ , A_ : Tuple = accelerator.gather_for_metrics((predictions, batch["""labels"""])) metric.add_batch( predictions=lowerCamelCase , references=lowerCamelCase , ) A_ : int = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F'epoch {epoch}:' , lowerCamelCase) def lowerCamelCase ( ): A_ : Optional[int] = argparse.ArgumentParser(description="""Simple example of training script.""") parser.add_argument( """--mixed_precision""" , type=lowerCamelCase , default=lowerCamelCase , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose""" """between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.""" """and an Nvidia Ampere GPU.""" , ) parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""") A_ : Dict = parser.parse_args() A_ : Dict = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16} training_function(lowerCamelCase , lowerCamelCase) if __name__ == "__main__": main()
665
0
"""simple docstring""" import argparse import json import pickle from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig from transformers.utils import logging logging.set_verbosity_info() _lowercase : Union[str, Any] = logging.get_logger(__name__) def lowercase__ ( snake_case_ :str ): __UpperCAmelCase = SwinConfig.from_pretrained( '''microsoft/swin-tiny-patch4-window7-224''' , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] ) __UpperCAmelCase = MaskFormerConfig(backbone_config=snake_case_ ) __UpperCAmelCase = '''huggingface/label-files''' if "ade20k-full" in model_name: # this should be ok __UpperCAmelCase = 847 __UpperCAmelCase = '''maskformer-ade20k-full-id2label.json''' elif "ade" in model_name: # this should be ok __UpperCAmelCase = 150 __UpperCAmelCase = '''ade20k-id2label.json''' elif "coco-stuff" in model_name: # this should be ok __UpperCAmelCase = 171 __UpperCAmelCase = '''maskformer-coco-stuff-id2label.json''' elif "coco" in model_name: # TODO __UpperCAmelCase = 133 __UpperCAmelCase = '''coco-panoptic-id2label.json''' elif "cityscapes" in model_name: # this should be ok __UpperCAmelCase = 19 __UpperCAmelCase = '''cityscapes-id2label.json''' elif "vistas" in model_name: # this should be ok __UpperCAmelCase = 65 __UpperCAmelCase = '''mapillary-vistas-id2label.json''' __UpperCAmelCase = json.load(open(hf_hub_download(snake_case_ , snake_case_ , repo_type='''dataset''' ) , '''r''' ) ) __UpperCAmelCase = {int(snake_case_ ): v for k, v in idalabel.items()} return config def lowercase__ ( snake_case_ :str ): __UpperCAmelCase = [] # stem # fmt: off rename_keys.append(('''backbone.patch_embed.proj.weight''', '''model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight''') ) rename_keys.append(('''backbone.patch_embed.proj.bias''', '''model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias''') ) rename_keys.append(('''backbone.patch_embed.norm.weight''', '''model.pixel_level_module.encoder.model.embeddings.norm.weight''') ) rename_keys.append(('''backbone.patch_embed.norm.bias''', '''model.pixel_level_module.encoder.model.embeddings.norm.bias''') ) # stages for i in range(len(config.backbone_config.depths ) ): for j in range(config.backbone_config.depths[i] ): rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.norm1.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') ) rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.norm1.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') ) rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') ) rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.attn.relative_position_index''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') ) rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.attn.proj.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') ) rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.attn.proj.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') ) rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.norm2.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') ) rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.norm2.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') ) rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.mlp.fc1.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') ) rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.mlp.fc1.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') ) rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.mlp.fc2.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight''') ) rename_keys.append((F'''backbone.layers.{i}.blocks.{j}.mlp.fc2.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias''') ) if i < 3: rename_keys.append((F'''backbone.layers.{i}.downsample.reduction.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight''') ) rename_keys.append((F'''backbone.layers.{i}.downsample.norm.weight''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight''') ) rename_keys.append((F'''backbone.layers.{i}.downsample.norm.bias''', F'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias''') ) rename_keys.append((F'''backbone.norm{i}.weight''', F'''model.pixel_level_module.encoder.hidden_states_norms.{i}.weight''') ) rename_keys.append((F'''backbone.norm{i}.bias''', F'''model.pixel_level_module.encoder.hidden_states_norms.{i}.bias''') ) # FPN rename_keys.append(('''sem_seg_head.layer_4.weight''', '''model.pixel_level_module.decoder.fpn.stem.0.weight''') ) rename_keys.append(('''sem_seg_head.layer_4.norm.weight''', '''model.pixel_level_module.decoder.fpn.stem.1.weight''') ) rename_keys.append(('''sem_seg_head.layer_4.norm.bias''', '''model.pixel_level_module.decoder.fpn.stem.1.bias''') ) for source_index, target_index in zip(range(3 , 0 , -1 ) , range(0 , 3 ) ): rename_keys.append((F'''sem_seg_head.adapter_{source_index}.weight''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight''') ) rename_keys.append((F'''sem_seg_head.adapter_{source_index}.norm.weight''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight''') ) rename_keys.append((F'''sem_seg_head.adapter_{source_index}.norm.bias''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias''') ) rename_keys.append((F'''sem_seg_head.layer_{source_index}.weight''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight''') ) rename_keys.append((F'''sem_seg_head.layer_{source_index}.norm.weight''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight''') ) rename_keys.append((F'''sem_seg_head.layer_{source_index}.norm.bias''', F'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias''') ) rename_keys.append(('''sem_seg_head.mask_features.weight''', '''model.pixel_level_module.decoder.mask_projection.weight''') ) rename_keys.append(('''sem_seg_head.mask_features.bias''', '''model.pixel_level_module.decoder.mask_projection.bias''') ) # Transformer decoder for idx in range(config.decoder_config.decoder_layers ): # self-attention out projection rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight''', F'''model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight''') ) rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias''', F'''model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias''') ) # cross-attention out projection rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight''', F'''model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight''') ) rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias''', F'''model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias''') ) # MLP 1 rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight''', F'''model.transformer_module.decoder.layers.{idx}.fc1.weight''') ) rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias''', F'''model.transformer_module.decoder.layers.{idx}.fc1.bias''') ) # MLP 2 rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight''', F'''model.transformer_module.decoder.layers.{idx}.fc2.weight''') ) rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias''', F'''model.transformer_module.decoder.layers.{idx}.fc2.bias''') ) # layernorm 1 (self-attention layernorm) rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight''', F'''model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight''') ) rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias''', F'''model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias''') ) # layernorm 2 (cross-attention layernorm) rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight''', F'''model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight''') ) rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias''', F'''model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias''') ) # layernorm 3 (final layernorm) rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight''', F'''model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight''') ) rename_keys.append((F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias''', F'''model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias''') ) rename_keys.append(('''sem_seg_head.predictor.transformer.decoder.norm.weight''', '''model.transformer_module.decoder.layernorm.weight''') ) rename_keys.append(('''sem_seg_head.predictor.transformer.decoder.norm.bias''', '''model.transformer_module.decoder.layernorm.bias''') ) # heads on top rename_keys.append(('''sem_seg_head.predictor.query_embed.weight''', '''model.transformer_module.queries_embedder.weight''') ) rename_keys.append(('''sem_seg_head.predictor.input_proj.weight''', '''model.transformer_module.input_projection.weight''') ) rename_keys.append(('''sem_seg_head.predictor.input_proj.bias''', '''model.transformer_module.input_projection.bias''') ) rename_keys.append(('''sem_seg_head.predictor.class_embed.weight''', '''class_predictor.weight''') ) rename_keys.append(('''sem_seg_head.predictor.class_embed.bias''', '''class_predictor.bias''') ) for i in range(3 ): rename_keys.append((F'''sem_seg_head.predictor.mask_embed.layers.{i}.weight''', F'''mask_embedder.{i}.0.weight''') ) rename_keys.append((F'''sem_seg_head.predictor.mask_embed.layers.{i}.bias''', F'''mask_embedder.{i}.0.bias''') ) # fmt: on return rename_keys def lowercase__ ( snake_case_ :List[str] , snake_case_ :int , snake_case_ :Union[str, Any] ): __UpperCAmelCase = dct.pop(snake_case_ ) __UpperCAmelCase = val def lowercase__ ( snake_case_ :Optional[int] , snake_case_ :Any ): __UpperCAmelCase = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )] for i in range(len(backbone_config.depths ) ): __UpperCAmelCase = num_features[i] for j in range(backbone_config.depths[i] ): # fmt: off # read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias) __UpperCAmelCase = state_dict.pop(F'''backbone.layers.{i}.blocks.{j}.attn.qkv.weight''' ) __UpperCAmelCase = state_dict.pop(F'''backbone.layers.{i}.blocks.{j}.attn.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict __UpperCAmelCase = in_proj_weight[:dim, :] __UpperCAmelCase = in_proj_bias[: dim] __UpperCAmelCase = in_proj_weight[ dim : dim * 2, : ] __UpperCAmelCase = in_proj_bias[ dim : dim * 2 ] __UpperCAmelCase = in_proj_weight[ -dim :, : ] __UpperCAmelCase = in_proj_bias[-dim :] # fmt: on def lowercase__ ( snake_case_ :Optional[Any] , snake_case_ :str ): # fmt: off __UpperCAmelCase = config.decoder_config.hidden_size for idx in range(config.decoder_config.decoder_layers ): # read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias) __UpperCAmelCase = state_dict.pop(F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight''' ) __UpperCAmelCase = state_dict.pop(F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias''' ) # next, add query, keys and values (in that order) to the state dict __UpperCAmelCase = in_proj_weight[: hidden_size, :] __UpperCAmelCase = in_proj_bias[:config.hidden_size] __UpperCAmelCase = in_proj_weight[hidden_size : hidden_size * 2, :] __UpperCAmelCase = in_proj_bias[hidden_size : hidden_size * 2] __UpperCAmelCase = in_proj_weight[-hidden_size :, :] __UpperCAmelCase = in_proj_bias[-hidden_size :] # read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias) __UpperCAmelCase = state_dict.pop(F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight''' ) __UpperCAmelCase = state_dict.pop(F'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias''' ) # next, add query, keys and values (in that order) to the state dict __UpperCAmelCase = in_proj_weight[: hidden_size, :] __UpperCAmelCase = in_proj_bias[:config.hidden_size] __UpperCAmelCase = in_proj_weight[hidden_size : hidden_size * 2, :] __UpperCAmelCase = in_proj_bias[hidden_size : hidden_size * 2] __UpperCAmelCase = in_proj_weight[-hidden_size :, :] __UpperCAmelCase = in_proj_bias[-hidden_size :] # fmt: on def lowercase__ ( ): __UpperCAmelCase = '''http://images.cocodataset.org/val2017/000000039769.jpg''' __UpperCAmelCase = Image.open(requests.get(snake_case_ , stream=snake_case_ ).raw ) return im @torch.no_grad() def lowercase__ ( snake_case_ :str , snake_case_ :str , snake_case_ :str , snake_case_ :bool = False ): __UpperCAmelCase = get_maskformer_config(snake_case_ ) # load original state_dict with open(snake_case_ , '''rb''' ) as f: __UpperCAmelCase = pickle.load(snake_case_ ) __UpperCAmelCase = data['''model'''] # for name, param in state_dict.items(): # print(name, param.shape) # rename keys __UpperCAmelCase = create_rename_keys(snake_case_ ) for src, dest in rename_keys: rename_key(snake_case_ , snake_case_ , snake_case_ ) read_in_swin_q_k_v(snake_case_ , config.backbone_config ) read_in_decoder_q_k_v(snake_case_ , snake_case_ ) # update to torch tensors for key, value in state_dict.items(): __UpperCAmelCase = torch.from_numpy(snake_case_ ) # load 🤗 model __UpperCAmelCase = MaskFormerForInstanceSegmentation(snake_case_ ) model.eval() for name, param in model.named_parameters(): print(snake_case_ , param.shape ) __UpperCAmelCase , __UpperCAmelCase = model.load_state_dict(snake_case_ , strict=snake_case_ ) assert missing_keys == [ "model.pixel_level_module.encoder.model.layernorm.weight", "model.pixel_level_module.encoder.model.layernorm.bias", ] assert len(snake_case_ ) == 0, F'''Unexpected keys: {unexpected_keys}''' # verify results __UpperCAmelCase = prepare_img() if "vistas" in model_name: __UpperCAmelCase = 65 elif "cityscapes" in model_name: __UpperCAmelCase = 65_535 else: __UpperCAmelCase = 255 __UpperCAmelCase = True if '''ade''' in model_name else False __UpperCAmelCase = MaskFormerImageProcessor(ignore_index=snake_case_ , reduce_labels=snake_case_ ) __UpperCAmelCase = image_processor(snake_case_ , return_tensors='''pt''' ) __UpperCAmelCase = model(**snake_case_ ) print('''Logits:''' , outputs.class_queries_logits[0, :3, :3] ) if model_name == "maskformer-swin-tiny-ade": __UpperCAmelCase = torch.tensor( [[3.6353, -4.4770, -2.6065], [0.5081, -4.2394, -3.5343], [2.1909, -5.0353, -1.9323]] ) assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , snake_case_ , atol=1E-4 ) print('''Looks ok!''' ) if pytorch_dump_folder_path is not None: print(F'''Saving model and image processor to {pytorch_dump_folder_path}''' ) Path(snake_case_ ).mkdir(exist_ok=snake_case_ ) model.save_pretrained(snake_case_ ) image_processor.save_pretrained(snake_case_ ) if push_to_hub: print('''Pushing model and image processor to the hub...''' ) model.push_to_hub(F'''nielsr/{model_name}''' ) image_processor.push_to_hub(F'''nielsr/{model_name}''' ) if __name__ == "__main__": _lowercase : Optional[int] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='maskformer-swin-tiny-ade', type=str, help=('Name of the MaskFormer model you\'d like to convert',), ) parser.add_argument( '--checkpoint_path', default='/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl', type=str, help='Path to the original state dict (.pth file).', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.' ) _lowercase : int = parser.parse_args() convert_maskformer_checkpoint( args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub )
49
'''simple docstring''' import functools def lowerCamelCase ( lowerCamelCase : list[int] , lowerCamelCase : list[int]): # Validation if not isinstance(lowerCamelCase , lowerCamelCase) or not all(isinstance(lowerCamelCase , lowerCamelCase) for day in days): raise ValueError("""The parameter days should be a list of integers""") if len(lowerCamelCase) != 3 or not all(isinstance(lowerCamelCase , lowerCamelCase) for cost in costs): raise ValueError("""The parameter costs should be a list of three integers""") if len(lowerCamelCase) == 0: return 0 if min(lowerCamelCase) <= 0: raise ValueError("""All days elements should be greater than 0""") if max(lowerCamelCase) >= 366: raise ValueError("""All days elements should be less than 366""") A_ : Tuple = set(lowerCamelCase) @functools.cache def dynamic_programming(lowerCamelCase : int) -> int: if index > 365: return 0 if index not in days_set: return dynamic_programming(index + 1) return min( costs[0] + dynamic_programming(index + 1) , costs[1] + dynamic_programming(index + 7) , costs[2] + dynamic_programming(index + 30) , ) return dynamic_programming(1) if __name__ == "__main__": import doctest doctest.testmod()
665
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) UpperCamelCase : Optional[int] = { 'configuration_blenderbot': [ 'BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BlenderbotConfig', 'BlenderbotOnnxConfig', ], 'tokenization_blenderbot': ['BlenderbotTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase : Tuple = ['BlenderbotTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase : List[str] = [ 'BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST', 'BlenderbotForCausalLM', 'BlenderbotForConditionalGeneration', 'BlenderbotModel', 'BlenderbotPreTrainedModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase : Tuple = [ 'TFBlenderbotForConditionalGeneration', 'TFBlenderbotModel', 'TFBlenderbotPreTrainedModel', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase : Optional[int] = [ 'FlaxBlenderbotForConditionalGeneration', 'FlaxBlenderbotModel', 'FlaxBlenderbotPreTrainedModel', ] if TYPE_CHECKING: from .configuration_blenderbot import ( BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP, BlenderbotConfig, BlenderbotOnnxConfig, ) from .tokenization_blenderbot import BlenderbotTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_blenderbot_fast import BlenderbotTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_blenderbot import ( BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST, BlenderbotForCausalLM, BlenderbotForConditionalGeneration, BlenderbotModel, BlenderbotPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_blenderbot import ( TFBlenderbotForConditionalGeneration, TFBlenderbotModel, TFBlenderbotPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_blenderbot import ( FlaxBlenderbotForConditionalGeneration, FlaxBlenderbotModel, FlaxBlenderbotPreTrainedModel, ) else: import sys UpperCamelCase : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
50
'''simple docstring''' from __future__ import annotations import numpy as np from numpy import floataa from numpy.typing import NDArray def lowerCamelCase ( lowerCamelCase : NDArray[floataa] , lowerCamelCase : NDArray[floataa] , lowerCamelCase : list[int] , lowerCamelCase : int , ): A_ , A_ : int = coefficient_matrix.shape A_ , A_ : Union[str, Any] = constant_matrix.shape if rowsa != colsa: A_ : Any = F'Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}' raise ValueError(lowerCamelCase) if colsa != 1: A_ : Tuple = F'Constant matrix must be nx1 but received {rowsa}x{colsa}' raise ValueError(lowerCamelCase) if rowsa != rowsa: A_ : Dict = ( """Coefficient and constant matrices dimensions must be nxn and nx1 but """ F'received {rowsa}x{colsa} and {rowsa}x{colsa}' ) raise ValueError(lowerCamelCase) if len(lowerCamelCase) != rowsa: A_ : Union[str, Any] = ( """Number of initial values must be equal to number of rows in coefficient """ F'matrix but received {len(lowerCamelCase)} and {rowsa}' ) raise ValueError(lowerCamelCase) if iterations <= 0: raise ValueError("""Iterations must be at least 1""") A_ : NDArray[floataa] = np.concatenate( (coefficient_matrix, constant_matrix) , axis=1) A_ , A_ : int = table.shape strictly_diagonally_dominant(lowerCamelCase) # Iterates the whole matrix for given number of times for _ in range(lowerCamelCase): A_ : List[Any] = [] for row in range(lowerCamelCase): A_ : int = 0 for col in range(lowerCamelCase): if col == row: A_ : List[str] = table[row][col] elif col == cols - 1: A_ : str = table[row][col] else: temp += (-1) * table[row][col] * init_val[col] A_ : Union[str, Any] = (temp + val) / denom new_val.append(lowerCamelCase) A_ : Tuple = new_val return [float(lowerCamelCase) for i in new_val] def lowerCamelCase ( lowerCamelCase : NDArray[floataa]): A_ , A_ : Dict = table.shape A_ : Union[str, Any] = True for i in range(0 , lowerCamelCase): A_ : str = 0 for j in range(0 , cols - 1): if i == j: continue else: total += table[i][j] if table[i][i] <= total: raise ValueError("""Coefficient matrix is not strictly diagonally dominant""") return is_diagonally_dominant # Test Cases if __name__ == "__main__": import doctest doctest.testmod()
665
0
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging a__ : Optional[int] = logging.get_logger(__name__) a__ : Optional[int] = { 'facebook/s2t-small-librispeech-asr': ( 'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json' ), # See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text } class lowerCAmelCase__ ( UpperCAmelCase_ ): '''simple docstring''' _lowerCamelCase ="speech_to_text" _lowerCamelCase =["past_key_values"] _lowerCamelCase ={"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"} def __init__( self : Dict , a__ : Any=10000 , a__ : Union[str, Any]=12 , a__ : List[str]=2048 , a__ : List[Any]=4 , a__ : Optional[Any]=6 , a__ : Dict=2048 , a__ : Optional[int]=4 , a__ : Dict=0.0 , a__ : List[str]=0.0 , a__ : Dict=True , a__ : str=True , a__ : Tuple="relu" , a__ : str=256 , a__ : Tuple=0.1 , a__ : Tuple=0.0 , a__ : List[str]=0.0 , a__ : Dict=0.02 , a__ : List[Any]=2 , a__ : Union[str, Any]=True , a__ : Optional[Any]=1 , a__ : Tuple=0 , a__ : Optional[int]=2 , a__ : List[str]=6000 , a__ : List[str]=1024 , a__ : Dict=2 , a__ : int=(5, 5) , a__ : Union[str, Any]=1024 , a__ : Optional[Any]=80 , a__ : Tuple=1 , **a__ : Optional[int] , ): UpperCAmelCase = vocab_size UpperCAmelCase = d_model UpperCAmelCase = encoder_ffn_dim UpperCAmelCase = encoder_layers UpperCAmelCase = encoder_attention_heads UpperCAmelCase = decoder_ffn_dim UpperCAmelCase = decoder_layers UpperCAmelCase = decoder_attention_heads UpperCAmelCase = dropout UpperCAmelCase = attention_dropout UpperCAmelCase = activation_dropout UpperCAmelCase = activation_function UpperCAmelCase = init_std UpperCAmelCase = encoder_layerdrop UpperCAmelCase = decoder_layerdrop UpperCAmelCase = use_cache UpperCAmelCase = encoder_layers UpperCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True UpperCAmelCase = max_source_positions UpperCAmelCase = max_target_positions UpperCAmelCase = num_conv_layers UpperCAmelCase = list(a__ ) UpperCAmelCase = conv_channels UpperCAmelCase = input_feat_per_channel UpperCAmelCase = input_channels if len(self.conv_kernel_sizes ) != self.num_conv_layers: raise ValueError( '''Configuration for convolutional module is incorrect. ''' '''It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` ''' f"but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes )}`, " f"`config.num_conv_layers = {self.num_conv_layers}`." ) super().__init__( pad_token_id=a__ , bos_token_id=a__ , eos_token_id=a__ , is_encoder_decoder=a__ , decoder_start_token_id=a__ , **a__ , )
51
'''simple docstring''' def lowerCamelCase ( lowerCamelCase : str , lowerCamelCase : str): A_ : Any = len(lowerCamelCase) A_ : Optional[Any] = len(lowerCamelCase) A_ : Optional[int] = [[False for _ in range(m + 1)] for _ in range(n + 1)] A_ : Union[str, Any] = True for i in range(lowerCamelCase): for j in range(m + 1): if dp[i][j]: if j < m and a[i].upper() == b[j]: A_ : Optional[int] = True if a[i].islower(): A_ : List[Any] = True return dp[n][m] if __name__ == "__main__": import doctest doctest.testmod()
665
0
"""simple docstring""" from collections import deque def __A ( a_ :Dict) -> int: __a : int = len(a_) __a : Any = deque() __a : Union[str, Any] = [False for _ in range(a_)] __a : Any = [-1 for _ in range(a_)] __a : Optional[Any] = index_of[:] def strong_connect(a_ :str , a_ :int , a_ :Union[str, Any]): __a : List[str] = index # the number when this node is seen __a : int = index # lowest rank node reachable from here index += 1 stack.append(a_) __a : str = True for w in g[v]: if index_of[w] == -1: __a : Any = strong_connect(a_ , a_ , a_) __a : Optional[int] = ( lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v] ) elif on_stack[w]: __a : Tuple = ( lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v] ) if lowlink_of[v] == index_of[v]: __a : Dict = [] __a : Dict = stack.pop() __a : int = False component.append(a_) while w != v: __a : int = stack.pop() __a : List[str] = False component.append(a_) components.append(a_) return index __a : List[str] = [] for v in range(a_): if index_of[v] == -1: strong_connect(a_ , 0 , a_) return components def __A ( a_ :int , a_ :str) -> Dict: __a : Dict = [[] for _ in range(a_)] for u, v in edges: g[u].append(a_) return g if __name__ == "__main__": # Test A = 7 A = [0, 0, 1, 2, 3, 3, 4, 4, 6] A = [1, 3, 2, 0, 1, 4, 5, 6, 5] A = [(u, v) for u, v in zip(source, target)] A = create_graph(n_vertices, edges) assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g)
52
'''simple docstring''' from __future__ import annotations from collections import deque from collections.abc import Iterator from dataclasses import dataclass @dataclass class __lowerCAmelCase : '''simple docstring''' a_ = 42 a_ = 42 class __lowerCAmelCase : '''simple docstring''' def __init__( self : Union[str, Any] ,_a : int ): '''simple docstring''' A_ : list[list[Edge]] = [[] for _ in range(_a )] A_ : List[Any] = size def __getitem__( self : int ,_a : int ): '''simple docstring''' return iter(self._graph[vertex] ) @property def _a ( self : str ): '''simple docstring''' return self._size def _a ( self : str ,_a : int ,_a : int ,_a : int ): '''simple docstring''' if weight not in (0, 1): raise ValueError("""Edge weight must be either 0 or 1.""" ) if to_vertex < 0 or to_vertex >= self.size: raise ValueError("""Vertex indexes must be in [0; size).""" ) self._graph[from_vertex].append(Edge(_a ,_a ) ) def _a ( self : Dict ,_a : int ,_a : int ): '''simple docstring''' A_ : Tuple = deque([start_vertex] ) A_ : list[int | None] = [None] * self.size A_ : Union[str, Any] = 0 while queue: A_ : List[Any] = queue.popleft() A_ : Tuple = distances[current_vertex] if current_distance is None: continue for edge in self[current_vertex]: A_ : Union[str, Any] = current_distance + edge.weight A_ : Optional[Any] = distances[edge.destination_vertex] if ( isinstance(_a ,_a ) and new_distance >= dest_vertex_distance ): continue A_ : Tuple = new_distance if edge.weight == 0: queue.appendleft(edge.destination_vertex ) else: queue.append(edge.destination_vertex ) if distances[finish_vertex] is None: raise ValueError("""No path from start_vertex to finish_vertex.""" ) return distances[finish_vertex] if __name__ == "__main__": import doctest doctest.testmod()
665
0
import collections import json import os import re from typing import TYPE_CHECKING, List, Optional, Tuple import numpy as np from ...tokenization_utils_fast import PreTrainedTokenizer from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation _snake_case : Optional[Any] = logging.get_logger(__name__) _snake_case : Optional[int] = {'vocab_file': 'vocab.txt', 'emoji_file': 'emoji.json'} _snake_case : Dict = { 'vocab_file': { 'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt', }, 'emoji_file': { 'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json', }, } _snake_case : str = { 'abeja/gpt-neox-japanese-2.7b': 2048, } def a_ ( lowerCAmelCase_ : Optional[Any], lowerCAmelCase_ : List[str] ): with open(lowerCAmelCase_, 'r', encoding='utf-8' ) as f: __lowerCAmelCase = json.loads(f.read() ) __lowerCAmelCase = collections.OrderedDict() __lowerCAmelCase = collections.OrderedDict() __lowerCAmelCase = collections.OrderedDict() with open(lowerCAmelCase_, 'r', encoding='utf-8' ) as f: __lowerCAmelCase = f.readlines() __lowerCAmelCase = [[t.rstrip('\n' )] if (t == ',' or ',' not in t) else t.rstrip('\n' ).split(',' ) for t in token] for idx, b in enumerate(lowerCAmelCase_ ): __lowerCAmelCase = b __lowerCAmelCase = idx for wd in b: __lowerCAmelCase = idx return vocab, raw_vocab, ids_to_tokens, emoji class _UpperCAmelCase ( _UpperCamelCase ): """simple docstring""" a_ = VOCAB_FILES_NAMES a_ = PRETRAINED_VOCAB_FILES_MAP a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a_ = ["""input_ids""", """attention_mask"""] def __init__( self : Tuple , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Tuple="<|endoftext|>" , lowerCAmelCase_ : Optional[int]="<|endoftext|>" , lowerCAmelCase_ : Optional[Any]="<|startoftext|>" , lowerCAmelCase_ : Optional[Any]="<|endoftext|>" , lowerCAmelCase_ : Union[str, Any]=False , **lowerCAmelCase_ : List[str] , ) -> Any: super().__init__( unk_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , do_clean_text=lowerCAmelCase_ , **lowerCAmelCase_ , ) if not os.path.isfile(lowerCAmelCase_ ): raise ValueError( f"""Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained""" ' model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`' ) if not os.path.isfile(lowerCAmelCase_ ): raise ValueError( f"""Can't find a emoji file at path '{emoji_file}'. To load the emoji information from a Google""" ' pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`' ) __lowerCAmelCase = do_clean_text __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = load_vocab_and_emoji(lowerCAmelCase_ , lowerCAmelCase_ ) __lowerCAmelCase = SubWordJapaneseTokenizer( vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji ) @property def lowercase ( self : List[Any] ) -> List[Any]: # self.vocab contains support for character fluctuation unique to Japanese, and has a large number of vocab return len(self.raw_vocab ) def lowercase ( self : Any ) -> Tuple: return dict(self.raw_vocab , **self.added_tokens_encoder ) def lowercase ( self : Any , lowerCAmelCase_ : Optional[Any] ) -> Tuple: return self.subword_tokenizer.tokenize(lowerCAmelCase_ , clean=self.do_clean_text ) def lowercase ( self : Optional[int] , lowerCAmelCase_ : Dict ) -> Optional[int]: return self.vocab.get(lowerCAmelCase_ , self.vocab.get(self.unk_token ) ) def lowercase ( self : List[str] , lowerCAmelCase_ : int ) -> int: return self.subword_tokenizer.convert_id_to_token(lowerCAmelCase_ ) def lowercase ( self : Optional[Any] , lowerCAmelCase_ : Dict ) -> Optional[int]: __lowerCAmelCase = ''.join(lowerCAmelCase_ ).strip() return out_string def lowercase ( self : List[str] , lowerCAmelCase_ : "Conversation" ) -> List[int]: __lowerCAmelCase = [] for is_user, text in conversation.iter_texts(): input_ids.extend(self.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) + [self.eos_token_id] ) if len(lowerCAmelCase_ ) > self.model_max_length: __lowerCAmelCase = input_ids[-self.model_max_length :] return input_ids def lowercase ( self : List[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None ) -> Tuple[str]: __lowerCAmelCase = 0 if os.path.isdir(lowerCAmelCase_ ): __lowerCAmelCase = os.path.join( lowerCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) __lowerCAmelCase = os.path.join( lowerCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['emoji_file'] ) else: __lowerCAmelCase = ( (filename_prefix + '-' if filename_prefix else '') + save_directory + VOCAB_FILES_NAMES['vocab_file'] ) __lowerCAmelCase = ( (filename_prefix + '-' if filename_prefix else '') + save_directory + VOCAB_FILES_NAMES['emoji_file'] ) with open(lowerCAmelCase_ , 'w' , encoding='utf-8' ) as writer: for token_index, token in self.ids_to_tokens.items(): if index != token_index: logger.warning( f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.""" ' Please check that the vocabulary is not corrupted!' ) __lowerCAmelCase = token_index writer.write(','.join(lowerCAmelCase_ ) + '\n' ) index += 1 with open(lowerCAmelCase_ , 'w' , encoding='utf-8' ) as writer: json.dump(self.emoji , lowerCAmelCase_ ) return vocab_file, emoji_file class _UpperCAmelCase ( _UpperCamelCase ): """simple docstring""" def __init__( self : Any , lowerCAmelCase_ : Any , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[str] ) -> str: __lowerCAmelCase = vocab # same as swe __lowerCAmelCase = ids_to_tokens # same as bpe __lowerCAmelCase = emoji __lowerCAmelCase = np.max([len(lowerCAmelCase_ ) for w in self.vocab.keys()] ) __lowerCAmelCase = re.compile(R'(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)' ) __lowerCAmelCase = re.compile(R'[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*' ) __lowerCAmelCase = re.compile(R'[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}' ) __lowerCAmelCase = re.compile( R'([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*' ) __lowerCAmelCase = re.compile( R'(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*' ) __lowerCAmelCase = re.compile( R'((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*' ) __lowerCAmelCase = '─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿' __lowerCAmelCase = '▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟' __lowerCAmelCase = str.maketrans({k: '<BLOCK>' for k in keisen + blocks} ) def __len__( self : Union[str, Any] ) -> int: return len(self.ids_to_tokens ) def lowercase ( self : Optional[int] , lowerCAmelCase_ : Union[str, Any] ) -> Optional[int]: __lowerCAmelCase = self.content_repattera.sub('<URL>' , lowerCAmelCase_ ) __lowerCAmelCase = self.content_repattera.sub('<EMAIL>' , lowerCAmelCase_ ) __lowerCAmelCase = self.content_repattera.sub('<TEL>' , lowerCAmelCase_ ) __lowerCAmelCase = self.content_repattera.sub('<DATE>' , lowerCAmelCase_ ) __lowerCAmelCase = self.content_repattera.sub('<DATE>' , lowerCAmelCase_ ) __lowerCAmelCase = self.content_repattera.sub('<PRICE>' , lowerCAmelCase_ ) __lowerCAmelCase = content.translate(self.content_transa ) while "<BLOCK><BLOCK>" in content: __lowerCAmelCase = content.replace('<BLOCK><BLOCK>' , '<BLOCK>' ) return content def lowercase ( self : List[str] , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[Any]=False ) -> Any: __lowerCAmelCase = text.replace(' ' , '<SP>' ) __lowerCAmelCase = text.replace(' ' , '<SP>' ) __lowerCAmelCase = text.replace('\r\n' , '<BR>' ) __lowerCAmelCase = text.replace('\n' , '<BR>' ) __lowerCAmelCase = text.replace('\r' , '<BR>' ) __lowerCAmelCase = text.replace('\t' , '<TAB>' ) __lowerCAmelCase = text.replace('—' , 'ー' ) __lowerCAmelCase = text.replace('−' , 'ー' ) for k, v in self.emoji["emoji"].items(): if k in text: __lowerCAmelCase = text.replace(lowerCAmelCase_ , lowerCAmelCase_ ) if clean: __lowerCAmelCase = self.clean_text(lowerCAmelCase_ ) def check_simbol(lowerCAmelCase_ : List[str] ): __lowerCAmelCase = x.encode() if len(lowerCAmelCase_ ) == 1 and len(lowerCAmelCase_ ) == 2: __lowerCAmelCase = (int(e[0] ) << 8) + int(e[1] ) if ( (c >= 0Xc2_a1 and c <= 0Xc2_bf) or (c >= 0Xc7_80 and c <= 0Xc7_83) or (c >= 0Xca_b9 and c <= 0Xcb_bf) or (c >= 0Xcc_80 and c <= 0Xcd_a2) ): return True return False def checkuae(lowerCAmelCase_ : List[str] ): __lowerCAmelCase = x.encode() if len(lowerCAmelCase_ ) == 1 and len(lowerCAmelCase_ ) == 3: __lowerCAmelCase = (int(e[0] ) << 1_6) + (int(e[1] ) << 8) + int(e[2] ) if c >= 0Xe2_80_80 and c <= 0Xe2_b0_7f: return True return False __lowerCAmelCase = 0 __lowerCAmelCase = [] while pos < len(lowerCAmelCase_ ): __lowerCAmelCase = min(len(lowerCAmelCase_ ) , pos + self.maxlen + 1 ) if text[pos] == '<' else pos + 3 __lowerCAmelCase = [] # (token_id, token, pos) for e in range(lowerCAmelCase_ , lowerCAmelCase_ , -1 ): __lowerCAmelCase = text[pos:e] if wd in self.vocab: if wd[0] == "<" and len(lowerCAmelCase_ ) > 2: __lowerCAmelCase = [(self.vocab[wd], wd, e)] break else: candidates.append((self.vocab[wd], wd, e) ) if len(lowerCAmelCase_ ) > 0: # the smallest token_id is adopted __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = sorted(lowerCAmelCase_ , key=lambda lowerCAmelCase_ : x[0] )[0] result.append(lowerCAmelCase_ ) __lowerCAmelCase = e else: __lowerCAmelCase = pos + 1 __lowerCAmelCase = text[pos:end] if check_simbol(lowerCAmelCase_ ): result.append('<KIGOU>' ) elif checkuae(lowerCAmelCase_ ): result.append('<U2000U2BFF>' ) else: for i in wd.encode('utf-8' ): result.append('<|byte%d|>' % i ) __lowerCAmelCase = end return result def lowercase ( self : Dict , lowerCAmelCase_ : Any , lowerCAmelCase_ : List[Any]="\n" ) -> Tuple: __lowerCAmelCase = [] __lowerCAmelCase = [] __lowerCAmelCase = self.ids_to_tokens[index][0] if word[:6] == "<|byte" and word[-2:] == "|>": byte_tokens.append(int(word[6:-2] ) ) else: if len(lowerCAmelCase_ ) > 0: words.append(bytearray(lowerCAmelCase_ ).decode('utf-8' , errors='replace' ) ) __lowerCAmelCase = [] if word[:7] == "<|emoji" and word[-2:] == "|>": words.append(self.emoji['emoji_inv'][word] ) elif word == "<SP>": words.append(' ' ) elif word == "<BR>": words.append(lowerCAmelCase_ ) elif word == "<TAB>": words.append('\t' ) elif word == "<BLOCK>": words.append('▀' ) elif word == "<KIGOU>": words.append('ǀ' ) elif word == "<U2000U2BFF>": words.append('‖' ) else: words.append(lowerCAmelCase_ ) if len(lowerCAmelCase_ ) > 0: words.append(bytearray(lowerCAmelCase_ ).decode('utf-8' , errors='replace' ) ) __lowerCAmelCase = ''.join(lowerCAmelCase_ ) return text
53
'''simple docstring''' def lowerCamelCase ( lowerCamelCase : int = 10**9): A_ : Optional[int] = 1 A_ : int = 2 A_ : List[Any] = 0 A_ : Optional[Any] = 0 A_ : str = 0 while perimeter <= max_perimeter: perimeters_sum += perimeter prev_value += 2 * value value += prev_value A_ : Optional[Any] = 2 * value + 2 if i % 2 == 0 else 2 * value - 2 i += 1 return perimeters_sum if __name__ == "__main__": print(f"""{solution() = }""")
665
0
import gc import random import unittest import numpy as np import torch from diffusers import ( DDIMScheduler, KandinskyVaaControlnetPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class A ( __lowercase , unittest.TestCase ): _snake_case =KandinskyVaaControlnetPipeline _snake_case =['''image_embeds''', '''negative_image_embeds''', '''hint'''] _snake_case =['''image_embeds''', '''negative_image_embeds''', '''hint'''] _snake_case =[ '''generator''', '''height''', '''width''', '''latents''', '''guidance_scale''', '''num_inference_steps''', '''return_dict''', '''guidance_scale''', '''num_images_per_prompt''', '''output_type''', '''return_dict''', ] _snake_case =False @property def lowerCAmelCase__ ( self: int ) -> Dict: '''simple docstring''' return 32 @property def lowerCAmelCase__ ( self: Union[str, Any] ) -> str: '''simple docstring''' return 32 @property def lowerCAmelCase__ ( self: List[Any] ) -> Dict: '''simple docstring''' return self.time_input_dim @property def lowerCAmelCase__ ( self: int ) -> int: '''simple docstring''' return self.time_input_dim * 4 @property def lowerCAmelCase__ ( self: Tuple ) -> Dict: '''simple docstring''' return 100 @property def lowerCAmelCase__ ( self: Dict ) -> Union[str, Any]: '''simple docstring''' torch.manual_seed(0 ) UpperCAmelCase_ ={ "in_channels": 8, # Out channels is double in channels because predicts mean and variance "out_channels": 8, "addition_embed_type": "image_hint", "down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"), "up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"), "mid_block_type": "UNetMidBlock2DSimpleCrossAttn", "block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2), "layers_per_block": 1, "encoder_hid_dim": self.text_embedder_hidden_size, "encoder_hid_dim_type": "image_proj", "cross_attention_dim": self.cross_attention_dim, "attention_head_dim": 4, "resnet_time_scale_shift": "scale_shift", "class_embed_type": None, } UpperCAmelCase_ =UNetaDConditionModel(**_lowerCAmelCase ) return model @property def lowerCAmelCase__ ( self: Union[str, Any] ) -> Optional[int]: '''simple docstring''' return { "block_out_channels": [32, 32, 64, 64], "down_block_types": [ "DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D", "AttnDownEncoderBlock2D", ], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"], "vq_embed_dim": 4, } @property def lowerCAmelCase__ ( self: List[str] ) -> Dict: '''simple docstring''' torch.manual_seed(0 ) UpperCAmelCase_ =VQModel(**self.dummy_movq_kwargs ) return model def lowerCAmelCase__ ( self: List[str] ) -> List[str]: '''simple docstring''' UpperCAmelCase_ =self.dummy_unet UpperCAmelCase_ =self.dummy_movq UpperCAmelCase_ =DDIMScheduler( num_train_timesteps=1000 , beta_schedule="linear" , beta_start=0.0_00_85 , beta_end=0.0_12 , clip_sample=_lowerCAmelCase , set_alpha_to_one=_lowerCAmelCase , steps_offset=1 , prediction_type="epsilon" , thresholding=_lowerCAmelCase , ) UpperCAmelCase_ ={ "unet": unet, "scheduler": scheduler, "movq": movq, } return components def lowerCAmelCase__ ( self: Dict , _lowerCAmelCase: Optional[int] , _lowerCAmelCase: List[Any]=0 ) -> List[str]: '''simple docstring''' UpperCAmelCase_ =floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_lowerCAmelCase ) ).to(_lowerCAmelCase ) UpperCAmelCase_ =floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to( _lowerCAmelCase ) # create hint UpperCAmelCase_ =floats_tensor((1, 3, 64, 64) , rng=random.Random(_lowerCAmelCase ) ).to(_lowerCAmelCase ) if str(_lowerCAmelCase ).startswith("mps" ): UpperCAmelCase_ =torch.manual_seed(_lowerCAmelCase ) else: UpperCAmelCase_ =torch.Generator(device=_lowerCAmelCase ).manual_seed(_lowerCAmelCase ) UpperCAmelCase_ ={ "image_embeds": image_embeds, "negative_image_embeds": negative_image_embeds, "hint": hint, "generator": generator, "height": 64, "width": 64, "guidance_scale": 4.0, "num_inference_steps": 2, "output_type": "np", } return inputs def lowerCAmelCase__ ( self: Union[str, Any] ) -> Union[str, Any]: '''simple docstring''' UpperCAmelCase_ ="cpu" UpperCAmelCase_ =self.get_dummy_components() UpperCAmelCase_ =self.pipeline_class(**_lowerCAmelCase ) UpperCAmelCase_ =pipe.to(_lowerCAmelCase ) pipe.set_progress_bar_config(disable=_lowerCAmelCase ) UpperCAmelCase_ =pipe(**self.get_dummy_inputs(_lowerCAmelCase ) ) UpperCAmelCase_ =output.images UpperCAmelCase_ =pipe( **self.get_dummy_inputs(_lowerCAmelCase ) , return_dict=_lowerCAmelCase , )[0] UpperCAmelCase_ =image[0, -3:, -3:, -1] UpperCAmelCase_ =image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) UpperCAmelCase_ =np.array( [0.6_95_98_26, 0.86_82_79, 0.7_55_80_92, 0.68_76_94_67, 0.85_80_58_04, 0.65_97_74_96, 0.44_88_53_02, 0.5_95_91_11, 0.4_25_15_95] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 ), F' expected_slice {expected_slice}, but got {image_slice.flatten()}' assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 ), F' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}' @slow @require_torch_gpu class A ( unittest.TestCase ): def lowerCAmelCase__ ( self: List[Any] ) -> List[Any]: '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCAmelCase__ ( self: int ) -> str: '''simple docstring''' UpperCAmelCase_ =load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy" ) UpperCAmelCase_ =load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinskyv22/hint_image_cat.png" ) UpperCAmelCase_ =torch.from_numpy(np.array(_lowerCAmelCase ) ).float() / 2_55.0 UpperCAmelCase_ =hint.permute(2 , 0 , 1 ).unsqueeze(0 ) UpperCAmelCase_ =KandinskyVaaPriorPipeline.from_pretrained( "kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa ) pipe_prior.to(_lowerCAmelCase ) UpperCAmelCase_ =KandinskyVaaControlnetPipeline.from_pretrained( "kandinsky-community/kandinsky-2-2-controlnet-depth" , torch_dtype=torch.floataa ) UpperCAmelCase_ =pipeline.to(_lowerCAmelCase ) pipeline.set_progress_bar_config(disable=_lowerCAmelCase ) UpperCAmelCase_ ="A robot, 4k photo" UpperCAmelCase_ =torch.Generator(device="cuda" ).manual_seed(0 ) UpperCAmelCase_ , UpperCAmelCase_ =pipe_prior( _lowerCAmelCase , generator=_lowerCAmelCase , num_inference_steps=5 , negative_prompt="" , ).to_tuple() UpperCAmelCase_ =torch.Generator(device="cuda" ).manual_seed(0 ) UpperCAmelCase_ =pipeline( image_embeds=_lowerCAmelCase , negative_image_embeds=_lowerCAmelCase , hint=_lowerCAmelCase , generator=_lowerCAmelCase , num_inference_steps=100 , output_type="np" , ) UpperCAmelCase_ =output.images[0] assert image.shape == (512, 512, 3) assert_mean_pixel_difference(_lowerCAmelCase , _lowerCAmelCase )
54
'''simple docstring''' # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from argparse import ArgumentParser from accelerate.commands.config import get_config_parser from accelerate.commands.env import env_command_parser from accelerate.commands.launch import launch_command_parser from accelerate.commands.test import test_command_parser from accelerate.commands.tpu import tpu_command_parser def lowerCamelCase ( ): A_ : Optional[int] = ArgumentParser("""Accelerate CLI tool""" , usage="""accelerate <command> [<args>]""" , allow_abbrev=lowerCamelCase) A_ : Optional[int] = parser.add_subparsers(help="""accelerate command helpers""") # Register commands get_config_parser(subparsers=lowerCamelCase) env_command_parser(subparsers=lowerCamelCase) launch_command_parser(subparsers=lowerCamelCase) tpu_command_parser(subparsers=lowerCamelCase) test_command_parser(subparsers=lowerCamelCase) # Let's go A_ : Dict = parser.parse_args() if not hasattr(lowerCamelCase , """func"""): parser.print_help() exit(1) # Run args.func(lowerCamelCase) if __name__ == "__main__": main()
665
0
import copy from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import ClassLabel, Features, Image from .base import TaskTemplate @dataclass(frozen=__SCREAMING_SNAKE_CASE ) class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' snake_case_ = field(default="image-classification" , metadata={"include_in_asdict_even_if_is_default": True} ) snake_case_ = Features({"image": Image()} ) snake_case_ = Features({"labels": ClassLabel} ) snake_case_ = "image" snake_case_ = "labels" def UpperCamelCase_ ( self : Optional[Any] ,A : Tuple ): if self.label_column not in features: raise ValueError(f'''Column {self.label_column} is not present in features.''' ) if not isinstance(features[self.label_column] ,A ): raise ValueError(f'''Column {self.label_column} is not a ClassLabel.''' ) __A = copy.deepcopy(self ) __A = self.label_schema.copy() __A = features[self.label_column] __A = label_schema return task_template @property def UpperCamelCase_ ( self : Any ): return { self.image_column: "image", self.label_column: "labels", }
55
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __magic_name__ = { 'configuration_altclip': [ 'ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP', 'AltCLIPConfig', 'AltCLIPTextConfig', 'AltCLIPVisionConfig', ], 'processing_altclip': ['AltCLIPProcessor'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __magic_name__ = [ 'ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST', 'AltCLIPPreTrainedModel', 'AltCLIPModel', 'AltCLIPTextModel', 'AltCLIPVisionModel', ] if TYPE_CHECKING: from .configuration_altclip import ( ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, AltCLIPConfig, AltCLIPTextConfig, AltCLIPVisionConfig, ) from .processing_altclip import AltCLIPProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_altclip import ( ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST, AltCLIPModel, AltCLIPPreTrainedModel, AltCLIPTextModel, AltCLIPVisionModel, ) else: import sys __magic_name__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
665
0
'''simple docstring''' from typing import TYPE_CHECKING from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available from ...utils import OptionalDependencyNotAvailable _a : int = {"configuration_dpt": ["DPT_PRETRAINED_CONFIG_ARCHIVE_MAP", "DPTConfig"]} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a : Union[str, Any] = ["DPTFeatureExtractor"] _a : List[str] = ["DPTImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _a : List[str] = [ "DPT_PRETRAINED_MODEL_ARCHIVE_LIST", "DPTForDepthEstimation", "DPTForSemanticSegmentation", "DPTModel", "DPTPreTrainedModel", ] if TYPE_CHECKING: from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_dpt import DPTFeatureExtractor from .image_processing_dpt import DPTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_dpt import ( DPT_PRETRAINED_MODEL_ARCHIVE_LIST, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel, DPTPreTrainedModel, ) else: import sys _a : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
56
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available __magic_name__ = {'configuration_yolos': ['YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP', 'YolosConfig', 'YolosOnnxConfig']} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __magic_name__ = ['YolosFeatureExtractor'] __magic_name__ = ['YolosImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __magic_name__ = [ 'YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST', 'YolosForObjectDetection', 'YolosModel', 'YolosPreTrainedModel', ] if TYPE_CHECKING: from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_yolos import YolosFeatureExtractor from .image_processing_yolos import YolosImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_yolos import ( YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST, YolosForObjectDetection, YolosModel, YolosPreTrainedModel, ) else: import sys __magic_name__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
665
0
import numpy # List of input, output pairs A_ : Any = ( ((5, 2, 3), 15), ((6, 5, 9), 25), ((11, 12, 13), 41), ((1, 1, 1), 8), ((11, 12, 13), 41), ) A_ : List[Any] = (((515, 22, 13), 555), ((61, 35, 49), 150)) A_ : Any = [2, 4, 1, 5] A_ : List[Any] = len(train_data) A_ : List[Any] = 0.009 def snake_case (UpperCAmelCase__ , UpperCAmelCase__="train" ) -> Optional[int]: return calculate_hypothesis_value(UpperCAmelCase__ , UpperCAmelCase__ ) - output( UpperCAmelCase__ , UpperCAmelCase__ ) def snake_case (UpperCAmelCase__ ) -> Optional[Any]: UpperCamelCase_: Optional[Any] = 0 for i in range(len(UpperCAmelCase__ ) - 1 ): hyp_val += data_input_tuple[i] * parameter_vector[i + 1] hyp_val += parameter_vector[0] return hyp_val def snake_case (UpperCAmelCase__ , UpperCAmelCase__ ) -> List[Any]: if data_set == "train": return train_data[example_no][1] elif data_set == "test": return test_data[example_no][1] return None def snake_case (UpperCAmelCase__ , UpperCAmelCase__ ) -> Optional[Any]: if data_set == "train": return _hypothesis_value(train_data[example_no][0] ) elif data_set == "test": return _hypothesis_value(test_data[example_no][0] ) return None def snake_case (UpperCAmelCase__ , UpperCAmelCase__=m ) -> Optional[Any]: UpperCamelCase_: Any = 0 for i in range(UpperCAmelCase__ ): if index == -1: summation_value += _error(UpperCAmelCase__ ) else: summation_value += _error(UpperCAmelCase__ ) * train_data[i][0][index] return summation_value def snake_case (UpperCAmelCase__ ) -> Optional[Any]: UpperCamelCase_: Optional[int] = summation_of_cost_derivative(UpperCAmelCase__ , UpperCAmelCase__ ) / m return cost_derivative_value def snake_case () -> Union[str, Any]: global parameter_vector # Tune these values to set a tolerance value for predicted output UpperCamelCase_: str = 0.00_0002 UpperCamelCase_: Any = 0 UpperCamelCase_: int = 0 while True: j += 1 UpperCamelCase_: int = [0, 0, 0, 0] for i in range(0 , len(UpperCAmelCase__ ) ): UpperCamelCase_: Any = get_cost_derivative(i - 1 ) UpperCamelCase_: Optional[int] = ( parameter_vector[i] - LEARNING_RATE * cost_derivative ) if numpy.allclose( UpperCAmelCase__ , UpperCAmelCase__ , atol=UpperCAmelCase__ , rtol=UpperCAmelCase__ , ): break UpperCamelCase_: Optional[int] = temp_parameter_vector print(('Number of iterations:', j) ) def snake_case () -> int: for i in range(len(UpperCAmelCase__ ) ): print(('Actual output value:', output(UpperCAmelCase__ , 'test' )) ) print(('Hypothesis output:', calculate_hypothesis_value(UpperCAmelCase__ , 'test' )) ) if __name__ == "__main__": run_gradient_descent() print('\nTesting gradient descent for a linear hypothesis function.\n') test_gradient_descent()
57
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) __magic_name__ = { 'configuration_deberta': ['DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DebertaConfig', 'DebertaOnnxConfig'], 'tokenization_deberta': ['DebertaTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __magic_name__ = ['DebertaTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __magic_name__ = [ 'DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST', 'DebertaForMaskedLM', 'DebertaForQuestionAnswering', 'DebertaForSequenceClassification', 'DebertaForTokenClassification', 'DebertaModel', 'DebertaPreTrainedModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __magic_name__ = [ 'TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFDebertaForMaskedLM', 'TFDebertaForQuestionAnswering', 'TFDebertaForSequenceClassification', 'TFDebertaForTokenClassification', 'TFDebertaModel', 'TFDebertaPreTrainedModel', ] if TYPE_CHECKING: from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig from .tokenization_deberta import DebertaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_deberta_fast import DebertaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_deberta import ( DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, DebertaForMaskedLM, DebertaForQuestionAnswering, DebertaForSequenceClassification, DebertaForTokenClassification, DebertaModel, DebertaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_deberta import ( TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, TFDebertaForMaskedLM, TFDebertaForQuestionAnswering, TFDebertaForSequenceClassification, TFDebertaForTokenClassification, TFDebertaModel, TFDebertaPreTrainedModel, ) else: import sys __magic_name__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
665
0
"""simple docstring""" import argparse import json import os from collections import OrderedDict import numpy as np import tensorflow as tf import torch def __lowerCAmelCase ( __UpperCamelCase : Tuple ): '''simple docstring''' snake_case_ : Any = os.path.join(args.tf_model_dir , """parameters.json""" ) snake_case_ : List[str] = json.loads(open(__UpperCamelCase ).read() ) if not params: raise ValueError( F'It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.' ) if not args.output.endswith(""".pt""" ): snake_case_ : Optional[Any] = args.output + """.pt""" snake_case_ : List[Any] = OrderedDict() with tf.device("""/CPU:0""" ): snake_case_ : Any = tf.train.load_checkpoint(args.tf_model_dir ) snake_case_ : Tuple = reader.get_variable_to_shape_map() for key_name in shapes.keys(): snake_case_ : Union[str, Any] = reader.get_tensor(__UpperCamelCase ).astype(np.floataa ) if key_name.endswith("""/adam_m""" ) or key_name.endswith("""/adam_v""" ): continue if key_name.startswith("""pasts/""" ): if key_name.startswith("""pasts/mlp""" ): snake_case_ : Any = int(key_name[9] ) elif key_name.startswith("""pasts/out""" ): snake_case_ : Union[str, Any] = 8 snake_case_ : Optional[Any] = """model.sqout.%d.weight""" % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time snake_case_ : str = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix snake_case_ : Dict = torch.tensor(__UpperCamelCase ) elif key_name.startswith("""model/moe""" ): snake_case_ : Optional[int] = int(key_name[9:].split("""/""" )[0] ) if key_name.endswith("""/switch_gating/kernel""" ): snake_case_ : Dict = """model.blocks.%d.feed_forward.mlp.router.classifier.weight""" % player snake_case_ : str = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix snake_case_ : Union[str, Any] = torch.tensor(__UpperCamelCase ) elif key_name.endswith("""/softmlp/kernel""" ): snake_case_ : Tuple = """model.blocks.%d.feed_forward.soft_bypass_mlp.weight""" % player snake_case_ : Dict = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix snake_case_ : List[str] = torch.tensor(__UpperCamelCase ) elif key_name.endswith("""/wo/kernel""" ) or key_name.endswith("""/wi/kernel""" ): snake_case_ : List[Any] = key_name[-9:-7] for i in range(1_6 ): snake_case_ : int = """model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight""" % (player, i, nlayer) snake_case_ : Union[str, Any] = ( vnp[i].transpose([1, 0] ).copy() ) # In Mesh-Tensorflow, it is one array, so it is divided snake_case_ : Optional[Any] = torch.tensor(__UpperCamelCase ) elif key_name.startswith("""model/mlp""" ): snake_case_ : int = int(key_name[9:].split("""/""" )[0] ) if key_name.endswith("""/p1/kernel""" ): snake_case_ : Optional[int] = """model.blocks.%d.feed_forward.mlp.wi.weight""" % player snake_case_ : List[str] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix snake_case_ : Optional[int] = torch.tensor(__UpperCamelCase ) elif key_name.endswith("""/p1/bias""" ): snake_case_ : str = """model.blocks.%d.feed_forward.mlp.wi.bias""" % player snake_case_ : Optional[int] = vnp.copy() # same because it is one dimensional snake_case_ : Any = torch.tensor(__UpperCamelCase ) elif key_name.endswith("""/p2/kernel""" ): snake_case_ : Any = """model.blocks.%d.feed_forward.mlp.wo.weight""" % player snake_case_ : Union[str, Any] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix snake_case_ : str = torch.tensor(__UpperCamelCase ) elif key_name.endswith("""/p2/bias""" ): snake_case_ : List[Any] = """model.blocks.%d.feed_forward.mlp.wo.bias""" % player snake_case_ : List[str] = vnp.copy() # same because it is one dimensional snake_case_ : Tuple = torch.tensor(__UpperCamelCase ) elif key_name.startswith("""model/ln""" ): snake_case_ : Optional[Any] = int(key_name[8:].split("""/""" )[0] ) if key_name.endswith("""/b""" ): snake_case_ : Optional[int] = """model.blocks.%d.feed_forward.norm.bias""" % player snake_case_ : Tuple = vnp.copy() # same because it is one dimensional snake_case_ : Optional[Any] = torch.tensor(__UpperCamelCase ) elif key_name.endswith("""/g""" ): snake_case_ : List[Any] = """model.blocks.%d.feed_forward.norm.weight""" % player snake_case_ : Dict = vnp.copy() # same because it is one dimensional snake_case_ : Optional[int] = torch.tensor(__UpperCamelCase ) elif key_name.startswith("""model/att""" ): snake_case_ : List[Any] = int(key_name[9:].split("""/""" )[0] ) if key_name.endswith("""/qkv/kernel""" ): snake_case_ : Tuple = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum snake_case_ : str = state[:, 0, :, :] snake_case_ : str = state[:, 1, :, :] snake_case_ : Optional[Any] = state[:, 2, :, :] snake_case_ : Any = ( state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] ) .transpose([1, 0] ) .copy() ) # Mesh-Tensorflow is a diagonal matrix snake_case_ : str = ( state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] ) .transpose([1, 0] ) .copy() ) # Mesh-Tensorflow is a diagonal matrix snake_case_ : List[str] = ( state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] ) .transpose([1, 0] ) .copy() ) # Mesh-Tensorflow is a diagonal matrix snake_case_ : str = """model.blocks.%d.self_attn.self_attn.q_proj.weight""" % player snake_case_ : Optional[Any] = torch.tensor(__UpperCamelCase ) snake_case_ : Dict = """model.blocks.%d.self_attn.self_attn.k_proj.weight""" % player snake_case_ : int = torch.tensor(__UpperCamelCase ) snake_case_ : Tuple = """model.blocks.%d.self_attn.self_attn.v_proj.weight""" % player snake_case_ : Tuple = torch.tensor(__UpperCamelCase ) elif key_name.endswith("""/o/kernel""" ): snake_case_ : List[str] = """model.blocks.%d.self_attn.self_attn.out_proj.weight""" % player snake_case_ : Dict = ( vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy() ) # Mesh-Tensorflow is a diagonal matrix snake_case_ : Union[str, Any] = torch.tensor(__UpperCamelCase ) elif key_name.startswith("""model/an""" ): snake_case_ : Optional[int] = int(key_name[8:].split("""/""" )[0] ) if key_name.endswith("""/b""" ): snake_case_ : Optional[int] = """model.blocks.%d.self_attn.norm.bias""" % player snake_case_ : Optional[Any] = vnp.copy() # same because it is one dimensional snake_case_ : Optional[Any] = torch.tensor(__UpperCamelCase ) elif key_name.endswith("""/g""" ): snake_case_ : str = """model.blocks.%d.self_attn.norm.weight""" % player snake_case_ : str = vnp.copy() # same because it is one dimensional snake_case_ : Dict = torch.tensor(__UpperCamelCase ) elif ( key_name.startswith("""model/wte""" ) or key_name.startswith("""model/wpe""" ) or key_name.startswith("""model/ete""" ) ): snake_case_ : Optional[int] = {"""wte""": """embed_tokens""", """wpe""": """position_embeddings""", """ete""": """extra_position_embeddings"""}[ key_name[-3:] ] snake_case_ : Union[str, Any] = """model.%s.weight""" % nlayer snake_case_ : Optional[Any] = vnp.copy() # same in embedded snake_case_ : str = torch.tensor(__UpperCamelCase ) if key_name.startswith("""model/wte""" ): snake_case_ : Optional[int] = """lm_head.weight""" snake_case_ : Tuple = vnp.copy() # same in embedded snake_case_ : Union[str, Any] = torch.tensor(__UpperCamelCase ) elif key_name.startswith("""model/wob""" ): snake_case_ : List[str] = """final_logits_bias""" snake_case_ : Optional[int] = vnp.copy() # same in embedded snake_case_ : List[str] = state.reshape((1, -1) ) snake_case_ : str = torch.tensor(__UpperCamelCase ) elif key_name == "model/dense/kernel": snake_case_ : Tuple = """model.last_project.weight""" snake_case_ : List[str] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix snake_case_ : Tuple = torch.tensor(__UpperCamelCase ) elif key_name == "model/dense_1/bias": snake_case_ : Tuple = """model.last_project.bias""" snake_case_ : Tuple = vnp.copy() # same because it is one dimensional snake_case_ : int = torch.tensor(__UpperCamelCase ) torch.save(__UpperCamelCase , args.output ) if __name__ == "__main__": __lowerCAmelCase : Dict = argparse.ArgumentParser( description='''model converter.''', formatter_class=argparse.ArgumentDefaultsHelpFormatter ) parser.add_argument('''--tf_model_dir''', metavar='''PATH''', type=str, required=True, help='''import model''') parser.add_argument('''--output''', metavar='''PATH''', type=str, required=True, help='''output model''') __lowerCAmelCase : Dict = parser.parse_args() convert_tf_gptsan_to_pt(args)
58
'''simple docstring''' def lowerCamelCase ( lowerCamelCase : Tuple): A_ : str = [0] * len(lowerCamelCase) A_ : Union[str, Any] = [] A_ : Union[str, Any] = [] A_ : Tuple = 0 for values in graph.values(): for i in values: indegree[i] += 1 for i in range(len(lowerCamelCase)): if indegree[i] == 0: queue.append(lowerCamelCase) while queue: A_ : Any = queue.pop(0) cnt += 1 topo.append(lowerCamelCase) for x in graph[vertex]: indegree[x] -= 1 if indegree[x] == 0: queue.append(lowerCamelCase) if cnt != len(lowerCamelCase): print("""Cycle exists""") else: print(lowerCamelCase) # Adjacency List of Graph __magic_name__ = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []} topological_sort(graph)
665
0
import argparse import os import pickle import sys import torch from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging logging.set_verbosity_info() # We do this to be able to load python 2 datasets pickles # See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918 __A = data_utils.TransfoXLTokenizer __A = data_utils.TransfoXLCorpus __A = data_utils __A = data_utils def lowerCAmelCase_ ( __a , __a , __a , __a ) -> List[str]: """simple docstring""" if transfo_xl_dataset_file: # Convert a pre-processed corpus (see original TensorFlow repo) with open(__a , "rb" ) as fp: lowerCamelCase__: Optional[Any] =pickle.load(__a , encoding="latin1" ) # Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term) lowerCamelCase__: Union[str, Any] =pytorch_dump_folder_path + "/" + VOCAB_FILES_NAMES["pretrained_vocab_file"] print(F"""Save vocabulary to {pytorch_vocab_dump_path}""" ) lowerCamelCase__: Any =corpus.vocab.__dict__ torch.save(__a , __a ) lowerCamelCase__: Dict =corpus.__dict__ corpus_dict_no_vocab.pop("vocab" , __a ) lowerCamelCase__: List[str] =pytorch_dump_folder_path + "/" + CORPUS_NAME print(F"""Save dataset to {pytorch_dataset_dump_path}""" ) torch.save(__a , __a ) if tf_checkpoint_path: # Convert a pre-trained TensorFlow model lowerCamelCase__: Optional[Any] =os.path.abspath(__a ) lowerCamelCase__: Dict =os.path.abspath(__a ) print(F"""Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.""" ) # Initialise PyTorch model if transfo_xl_config_file == "": lowerCamelCase__: int =TransfoXLConfig() else: lowerCamelCase__: Any =TransfoXLConfig.from_json_file(__a ) print(F"""Building PyTorch model from configuration: {config}""" ) lowerCamelCase__: List[Any] =TransfoXLLMHeadModel(__a ) lowerCamelCase__: List[str] =load_tf_weights_in_transfo_xl(__a , __a , __a ) # Save pytorch-model lowerCamelCase__: List[str] =os.path.join(__a , __a ) lowerCamelCase__: Tuple =os.path.join(__a , __a ) print(F"""Save PyTorch model to {os.path.abspath(__a )}""" ) torch.save(model.state_dict() , __a ) print(F"""Save configuration file to {os.path.abspath(__a )}""" ) with open(__a , "w" , encoding="utf-8" ) as f: f.write(config.to_json_string() ) if __name__ == "__main__": __A = argparse.ArgumentParser() parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the folder to store the PyTorch model or dataset/vocab.", ) parser.add_argument( "--tf_checkpoint_path", default="", type=str, help="An optional path to a TensorFlow checkpoint path to be converted.", ) parser.add_argument( "--transfo_xl_config_file", default="", type=str, help=( "An optional config json file corresponding to the pre-trained BERT model. \n" "This specifies the model architecture." ), ) parser.add_argument( "--transfo_xl_dataset_file", default="", type=str, help="An optional dataset file to be converted in a vocabulary.", ) __A = parser.parse_args() convert_transfo_xl_checkpoint_to_pytorch( args.tf_checkpoint_path, args.transfo_xl_config_file, args.pytorch_dump_folder_path, args.transfo_xl_dataset_file, )
59
'''simple docstring''' import unittest from parameterized import parameterized from transformers import LlamaConfig, is_torch_available, set_seed from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer class __lowerCAmelCase : '''simple docstring''' def __init__( self : Optional[int] ,_a : List[Any] ,_a : Dict=13 ,_a : List[str]=7 ,_a : Dict=True ,_a : List[Any]=True ,_a : Dict=False ,_a : Optional[int]=True ,_a : List[Any]=99 ,_a : Any=32 ,_a : Optional[int]=5 ,_a : List[Any]=4 ,_a : int=37 ,_a : List[Any]="gelu" ,_a : List[str]=0.1 ,_a : Union[str, Any]=0.1 ,_a : Any=512 ,_a : int=16 ,_a : Optional[int]=2 ,_a : Any=0.02 ,_a : Any=3 ,_a : Any=4 ,_a : List[str]=None ,): '''simple docstring''' A_ : List[str] = parent A_ : Any = batch_size A_ : Tuple = seq_length A_ : List[str] = is_training A_ : Tuple = use_input_mask A_ : Dict = use_token_type_ids A_ : List[Any] = use_labels A_ : Union[str, Any] = vocab_size A_ : Any = hidden_size A_ : str = num_hidden_layers A_ : Optional[Any] = num_attention_heads A_ : str = intermediate_size A_ : Tuple = hidden_act A_ : Any = hidden_dropout_prob A_ : Any = attention_probs_dropout_prob A_ : List[str] = max_position_embeddings A_ : int = type_vocab_size A_ : Union[str, Any] = type_sequence_label_size A_ : Any = initializer_range A_ : List[Any] = num_labels A_ : Optional[Any] = num_choices A_ : List[Any] = scope def _a ( self : Optional[int] ): '''simple docstring''' A_ : str = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) A_ : int = None if self.use_input_mask: A_ : List[str] = random_attention_mask([self.batch_size, self.seq_length] ) A_ : Dict = None if self.use_token_type_ids: A_ : Tuple = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size ) A_ : str = None A_ : Any = None A_ : str = None if self.use_labels: A_ : Dict = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) A_ : Any = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels ) A_ : Optional[int] = ids_tensor([self.batch_size] ,self.num_choices ) A_ : str = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def _a ( self : Optional[Any] ): '''simple docstring''' return LlamaConfig( vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=_a ,initializer_range=self.initializer_range ,) def _a ( self : Union[str, Any] ,_a : Optional[Any] ,_a : Optional[Any] ,_a : Any ,_a : Any ,_a : Optional[Any] ,_a : Optional[Any] ,_a : Tuple ): '''simple docstring''' A_ : Any = LlamaModel(config=_a ) model.to(_a ) model.eval() A_ : Optional[Any] = model(_a ,attention_mask=_a ) A_ : Optional[int] = model(_a ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def _a ( self : Optional[int] ,_a : int ,_a : List[str] ,_a : Any ,_a : Any ,_a : Dict ,_a : List[str] ,_a : Optional[int] ,_a : Any ,_a : List[str] ,): '''simple docstring''' A_ : List[str] = True A_ : Union[str, Any] = LlamaModel(_a ) model.to(_a ) model.eval() A_ : Tuple = model( _a ,attention_mask=_a ,encoder_hidden_states=_a ,encoder_attention_mask=_a ,) A_ : List[Any] = model( _a ,attention_mask=_a ,encoder_hidden_states=_a ,) A_ : int = model(_a ,attention_mask=_a ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def _a ( self : Any ,_a : Any ,_a : Optional[int] ,_a : List[Any] ,_a : List[Any] ,_a : Dict ,_a : Tuple ,_a : Optional[int] ,_a : List[Any] ,_a : Union[str, Any] ,): '''simple docstring''' A_ : List[Any] = LlamaForCausalLM(config=_a ) model.to(_a ) model.eval() A_ : Dict = model(_a ,attention_mask=_a ,labels=_a ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) ) def _a ( self : str ,_a : List[Any] ,_a : Dict ,_a : str ,_a : Tuple ,_a : Tuple ,_a : Tuple ,_a : Optional[Any] ,_a : Dict ,_a : Union[str, Any] ,): '''simple docstring''' A_ : Optional[Any] = True A_ : Any = True A_ : Tuple = LlamaForCausalLM(config=_a ) model.to(_a ) model.eval() # first forward pass A_ : Optional[int] = model( _a ,attention_mask=_a ,encoder_hidden_states=_a ,encoder_attention_mask=_a ,use_cache=_a ,) A_ : Tuple = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids A_ : int = ids_tensor((self.batch_size, 3) ,config.vocab_size ) A_ : List[Any] = ids_tensor((self.batch_size, 3) ,vocab_size=2 ) # append to next input_ids and A_ : Tuple = torch.cat([input_ids, next_tokens] ,dim=-1 ) A_ : int = torch.cat([input_mask, next_mask] ,dim=-1 ) A_ : List[str] = model( _a ,attention_mask=_a ,encoder_hidden_states=_a ,encoder_attention_mask=_a ,output_hidden_states=_a ,)["""hidden_states"""][0] A_ : Any = model( _a ,attention_mask=_a ,encoder_hidden_states=_a ,encoder_attention_mask=_a ,past_key_values=_a ,output_hidden_states=_a ,)["""hidden_states"""][0] # select random slice A_ : List[str] = ids_tensor((1,) ,output_from_past.shape[-1] ).item() A_ : str = output_from_no_past[:, -3:, random_slice_idx].detach() A_ : int = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(_a ,_a ,atol=1e-3 ) ) def _a ( self : Optional[Any] ): '''simple docstring''' A_ : int = self.prepare_config_and_inputs() ( ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ( A_ ) , ) : Any = config_and_inputs A_ : int = {"""input_ids""": input_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' a_ = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else () a_ = (LlamaForCausalLM,) if is_torch_available() else () a_ = ( { """feature-extraction""": LlamaModel, """text-classification""": LlamaForSequenceClassification, """text-generation""": LlamaForCausalLM, """zero-shot""": LlamaForSequenceClassification, } if is_torch_available() else {} ) a_ = False a_ = False def _a ( self : List[Any] ): '''simple docstring''' A_ : Union[str, Any] = LlamaModelTester(self ) A_ : List[str] = ConfigTester(self ,config_class=_a ,hidden_size=37 ) def _a ( self : Dict ): '''simple docstring''' self.config_tester.run_common_tests() def _a ( self : Optional[Any] ): '''simple docstring''' A_ : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_a ) def _a ( self : Optional[Any] ): '''simple docstring''' A_ : int = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: A_ : Dict = type self.model_tester.create_and_check_model(*_a ) def _a ( self : List[Any] ): '''simple docstring''' A_ , A_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() A_ : List[str] = 3 A_ : Any = input_dict["""input_ids"""] A_ : Union[str, Any] = input_ids.ne(1 ).to(_a ) A_ : Union[str, Any] = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size ) A_ : List[Any] = LlamaForSequenceClassification(_a ) model.to(_a ) model.eval() A_ : int = model(_a ,attention_mask=_a ,labels=_a ) self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) ) def _a ( self : Dict ): '''simple docstring''' A_ , A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() A_ : str = 3 A_ : Union[str, Any] = """single_label_classification""" A_ : Union[str, Any] = input_dict["""input_ids"""] A_ : List[Any] = input_ids.ne(1 ).to(_a ) A_ : Dict = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size ) A_ : List[Any] = LlamaForSequenceClassification(_a ) model.to(_a ) model.eval() A_ : List[str] = model(_a ,attention_mask=_a ,labels=_a ) self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) ) def _a ( self : Optional[Any] ): '''simple docstring''' A_ , A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() A_ : Dict = 3 A_ : Dict = """multi_label_classification""" A_ : Any = input_dict["""input_ids"""] A_ : Optional[Any] = input_ids.ne(1 ).to(_a ) A_ : List[str] = ids_tensor( [self.model_tester.batch_size, config.num_labels] ,self.model_tester.type_sequence_label_size ).to(torch.float ) A_ : Optional[int] = LlamaForSequenceClassification(_a ) model.to(_a ) model.eval() A_ : Any = model(_a ,attention_mask=_a ,labels=_a ) self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) ) @unittest.skip("""LLaMA buffers include complex numbers, which breaks this test""" ) def _a ( self : Any ): '''simple docstring''' pass @parameterized.expand([("""linear""",), ("""dynamic""",)] ) def _a ( self : Optional[Any] ,_a : List[Any] ): '''simple docstring''' A_ , A_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() A_ : Tuple = ids_tensor([1, 10] ,config.vocab_size ) A_ : Union[str, Any] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] ,config.vocab_size ) set_seed(42 ) # Fixed seed at init time so the two models get the same random weights A_ : int = LlamaModel(_a ) original_model.to(_a ) original_model.eval() A_ : Tuple = original_model(_a ).last_hidden_state A_ : Union[str, Any] = original_model(_a ).last_hidden_state set_seed(42 ) # Fixed seed at init time so the two models get the same random weights A_ : Tuple = {"""type""": scaling_type, """factor""": 10.0} A_ : int = LlamaModel(_a ) scaled_model.to(_a ) scaled_model.eval() A_ : List[Any] = scaled_model(_a ).last_hidden_state A_ : Any = scaled_model(_a ).last_hidden_state # Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original # maximum sequence length, so the outputs for the short input should match. if scaling_type == "dynamic": self.assertTrue(torch.allclose(_a ,_a ,atol=1e-5 ) ) else: self.assertFalse(torch.allclose(_a ,_a ,atol=1e-5 ) ) # The output should be different for long inputs self.assertFalse(torch.allclose(_a ,_a ,atol=1e-5 ) ) @require_torch class __lowerCAmelCase ( unittest.TestCase ): '''simple docstring''' @unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" ) @slow def _a ( self : Tuple ): '''simple docstring''' A_ : Any = [1, 306, 4658, 278, 6593, 310, 2834, 338] A_ : List[str] = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-7b-hf""" ,device_map="""auto""" ) A_ : str = model(torch.tensor([input_ids] ) ) # Expected mean on dim = -1 A_ : Union[str, Any] = torch.tensor([[-6.6550, -4.1227, -4.9859, -3.2406, 0.8262, -3.0033, 1.2964, -3.3699]] ) torch.testing.assert_close(out.mean(-1 ) ,_a ,atol=1e-2 ,rtol=1e-2 ) # slicing logits[0, 0, 0:30] # fmt: off A_ : str = torch.tensor([-12.8281, -7.4453, -0.4639, -8.0625, -7.2500, -8.0000, -6.4883, -7.7695, -7.8438, -7.0312, -6.2188, -7.1328, -1.8496, 1.9961, -8.6250, -6.7227, -12.8281, -6.9492, -7.0742, -7.7852, -7.5820, -7.9062, -6.9375, -7.9805, -8.3438, -8.1562, -8.0469, -7.6250, -7.7422, -7.3398,] ) # fmt: on torch.testing.assert_close(out[0, 0, :30] ,_a ,atol=1e-5 ,rtol=1e-5 ) @unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" ) @slow def _a ( self : str ): '''simple docstring''' A_ : Dict = [1, 306, 4658, 278, 6593, 310, 2834, 338] A_ : Optional[int] = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-13b-hf""" ,device_map="""auto""" ) A_ : Tuple = model(torch.tensor(_a ) ) # Expected mean on dim = -1 A_ : str = torch.tensor([[-2.0622, -1.2794, -1.1638, -0.9788, -1.4603, -1.0238, -1.7893, -1.4411]] ) torch.testing.assert_close(out.mean(-1 ) ,_a ,atol=1e-2 ,rtol=1e-2 ) # slicing logits[0, 0, 0:30] # fmt: off A_ : str = torch.tensor([-8.1406, -8.0547, 2.7461, -1.2344, -0.1448, -1.8262, -1.0020, -1.8154, -1.6895, -1.8516, -2.3574, -0.9277, 3.7598, 6.5742, -1.2998, -0.1177, -8.1406, -2.9688, -2.9199, -3.1699, -3.5254, -2.3555, -2.7988, -3.4141, -2.8262, -4.5195, -3.3379, -3.3164, -2.7832, -3.0273] ) # fmt: on torch.testing.assert_close(out[0, 0, :30] ,_a ,atol=1e-5 ,rtol=1e-5 ) @unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" ) @slow def _a ( self : Union[str, Any] ): '''simple docstring''' A_ : Union[str, Any] = [1, 306, 4658, 278, 6593, 310, 2834, 338] A_ : Optional[int] = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-13b-chat-hf""" ,device_map="""auto""" ) A_ : int = model(torch.tensor(_a ) ) # Expected mean on dim = -1 A_ : Union[str, Any] = torch.tensor([[-0.8562, -1.8520, -0.7551, -0.4162, -1.5161, -1.2038, -2.4823, -2.3254]] ) torch.testing.assert_close(out.mean(-1 ) ,_a ,atol=1e-2 ,rtol=1e-2 ) # slicing logits[0, 0, 0:30] # fmt: off A_ : Optional[int] = torch.tensor([-2.2227, 4.8828, 0.9023, -0.4578, -0.7871, -0.1033, -0.6221, -0.5786, -0.7803, -1.0674, -1.2920, -0.1570, 0.8008, 2.0723, -0.9497, 0.2771, -2.2227, -0.7612, -1.4346, -1.2061, -1.6426, -0.3000, -0.7139, -1.1934, -1.8691, -1.6973, -1.5947, -1.2705, -0.3523, -0.5513] ) # fmt: on torch.testing.assert_close(out.mean(-1 ) ,_a ,atol=1e-2 ,rtol=1e-2 ) @unittest.skip( """Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test""" ) @slow def _a ( self : Optional[Any] ): '''simple docstring''' A_ : Optional[int] = [1, 306, 4658, 278, 6593, 310, 2834, 338] A_ : str = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-70b-hf""" ,device_map="""auto""" ) A_ : Tuple = model(torch.tensor(_a ) ) A_ : Dict = torch.tensor( [[-4.2327, -3.3360, -4.6665, -4.7631, -1.8180, -3.4170, -1.4211, -3.1810]] ,dtype=torch.floataa ) torch.testing.assert_close(out.mean(-1 ) ,_a ,atol=1e-2 ,rtol=1e-2 ) # fmt: off A_ : List[str] = torch.tensor([-9.4922, -3.9551, 1.7998, -5.6758, -5.1055, -5.8984, -4.8320, -6.8086, -6.5391, -5.6172, -5.5820, -5.5352, 1.7881, 3.6289, -6.5117, -3.4785, -9.5000, -6.0352, -6.8125, -6.0195, -6.6836, -5.4727, -6.2812, -6.0391, -7.3398, -7.4297, -7.4844, -6.5820, -5.8789, -5.5312] ) # fmt: on torch.testing.assert_close(out[0, 0, :30] ,_a ,atol=1e-5 ,rtol=1e-5 ) @unittest.skip("""Model is curently gated""" ) @slow def _a ( self : Tuple ): '''simple docstring''' A_ : Union[str, Any] = """Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the \"princi""" A_ : List[str] = """Simply put, the theory of relativity states that """ A_ : Any = LlamaTokenizer.from_pretrained("""meta-llama/Llama-2-13b-chat-hf""" ) A_ : Union[str, Any] = tokenizer.encode(_a ,return_tensors="""pt""" ) A_ : List[str] = LlamaForCausalLM.from_pretrained( """meta-llama/Llama-2-13b-chat-hf""" ,device_map="""sequential""" ,use_safetensors=_a ) # greedy generation outputs A_ : str = model.generate(_a ,max_new_tokens=64 ,top_p=_a ,temperature=1 ,do_sample=_a ) A_ : Optional[Any] = tokenizer.decode(generated_ids[0] ,skip_special_tokens=_a ) self.assertEqual(_a ,_a )
665
0
import os import random import sys from . import cryptomath_module as cryptomath from . import rabin_miller lowerCAmelCase_ = 3 def lowerCamelCase_ ( _UpperCamelCase ) -> int: """simple docstring""" print('''Generating primitive root of p''' ) while True: snake_case_ : Any = random.randrange(3 , _UpperCamelCase ) if pow(_UpperCamelCase , 2 , _UpperCamelCase ) == 1: continue if pow(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) == 1: continue return g def lowerCamelCase_ ( _UpperCamelCase ) -> tuple[tuple[int, int, int, int], tuple[int, int]]: """simple docstring""" print('''Generating prime p...''' ) snake_case_ : Optional[int] = rabin_miller.generate_large_prime(_UpperCamelCase ) # select large prime number. snake_case_ : str = primitive_root(_UpperCamelCase ) # one primitive root on modulo p. snake_case_ : Union[str, Any] = random.randrange(3 , _UpperCamelCase ) # private_key -> have to be greater than 2 for safety. snake_case_ : Optional[int] = cryptomath.find_mod_inverse(pow(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) , _UpperCamelCase ) snake_case_ : Dict = (key_size, e_a, e_a, p) snake_case_ : Any = (key_size, d) return public_key, private_key def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> None: """simple docstring""" if os.path.exists(f'''{name}_pubkey.txt''' ) or os.path.exists(f'''{name}_privkey.txt''' ): print('''\nWARNING:''' ) print( f'''"{name}_pubkey.txt" or "{name}_privkey.txt" already exists. \n''' '''Use a different name or delete these files and re-run this program.''' ) sys.exit() snake_case_ , snake_case_ : str = generate_key(_UpperCamelCase ) print(f'''\nWriting public key to file {name}_pubkey.txt...''' ) with open(f'''{name}_pubkey.txt''' , '''w''' ) as fo: fo.write(f'''{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}''' ) print(f'''Writing private key to file {name}_privkey.txt...''' ) with open(f'''{name}_privkey.txt''' , '''w''' ) as fo: fo.write(f'''{private_key[0]},{private_key[1]}''' ) def lowerCamelCase_ ( ) -> None: """simple docstring""" print('''Making key files...''' ) make_key_files('''elgamal''' , 2_048 ) print('''Key files generation successful''' ) if __name__ == "__main__": main()
60
'''simple docstring''' import math_equivalence # From: git+https://github.com/hendrycks/math.git import datasets __magic_name__ = '\\n@article{hendrycksmath2021,\n title={Measuring Mathematical Problem Solving With the MATH Dataset},\n author={Dan Hendrycks\n and Collin Burns\n and Saurav Kadavath\n and Akul Arora\n and Steven Basart\n and Eric Tang\n and Dawn Song\n and Jacob Steinhardt},\n journal={arXiv preprint arXiv:2103.03874},\n year={2021}\n}\n' __magic_name__ = '\\nThis metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.\nIt first canonicalizes the inputs (e.g., converting "1/2" to "\\frac{1}{2}") and then computes accuracy.\n' __magic_name__ = r'\nCalculates accuracy after canonicalizing inputs.\n\nArgs:\n predictions: list of predictions to score. Each prediction\n is a string that contains natural language and LaTex.\n references: list of reference for each prediction. Each\n reference is a string that contains natural language\n and LaTex.\nReturns:\n accuracy: accuracy after canonicalizing inputs\n (e.g., converting "1/2" to "\\frac{1}{2}")\n\nExamples:\n >>> metric = datasets.load_metric("competition_math")\n >>> results = metric.compute(references=["\\frac{1}{2}"], predictions=["1/2"])\n >>> print(results)\n {\'accuracy\': 1.0}\n' @datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __lowerCAmelCase ( datasets.Metric ): '''simple docstring''' def _a ( self : Optional[Any] ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features( { """predictions""": datasets.Value("""string""" ), """references""": datasets.Value("""string""" ), } ) ,homepage="""https://github.com/hendrycks/math""" ,codebase_urls=["""https://github.com/hendrycks/math"""] ,) def _a ( self : List[Any] ,_a : Union[str, Any] ,_a : Optional[int] ): '''simple docstring''' A_ : Union[str, Any] = 0.0 for i, j in zip(_a ,_a ): n_correct += 1.0 if math_equivalence.is_equiv(_a ,_a ) else 0.0 A_ : List[str] = n_correct / len(_a ) return { "accuracy": accuracy, }
665
0
class __lowerCamelCase : """simple docstring""" def __init__( self : int , SCREAMING_SNAKE_CASE__ : list ) -> None: lowerCAmelCase__ = set_counts lowerCAmelCase__ = max(SCREAMING_SNAKE_CASE__ ) lowerCAmelCase__ = len(SCREAMING_SNAKE_CASE__ ) lowerCAmelCase__ = [1] * num_sets lowerCAmelCase__ = list(range(SCREAMING_SNAKE_CASE__ ) ) def a ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ) -> bool: lowerCAmelCase__ = self.get_parent(SCREAMING_SNAKE_CASE__ ) lowerCAmelCase__ = self.get_parent(SCREAMING_SNAKE_CASE__ ) if src_parent == dst_parent: return False if self.ranks[dst_parent] >= self.ranks[src_parent]: self.set_counts[dst_parent] += self.set_counts[src_parent] lowerCAmelCase__ = 0 lowerCAmelCase__ = dst_parent if self.ranks[dst_parent] == self.ranks[src_parent]: self.ranks[dst_parent] += 1 lowerCAmelCase__ = self.set_counts[dst_parent] else: self.set_counts[src_parent] += self.set_counts[dst_parent] lowerCAmelCase__ = 0 lowerCAmelCase__ = src_parent lowerCAmelCase__ = self.set_counts[src_parent] lowerCAmelCase__ = max(self.max_set , SCREAMING_SNAKE_CASE__ ) return True def a ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : int ) -> int: if self.parents[disj_set] == disj_set: return disj_set lowerCAmelCase__ = self.get_parent(self.parents[disj_set] ) return self.parents[disj_set]
61
'''simple docstring''' from ....configuration_utils import PretrainedConfig from ....utils import logging __magic_name__ = logging.get_logger(__name__) # TODO: upload to AWS __magic_name__ = { 'yjernite/retribert-base-uncased': ( 'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json' ), } class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' a_ = """retribert""" def __init__( self : int ,_a : Dict=30522 ,_a : List[Any]=768 ,_a : Optional[Any]=8 ,_a : str=12 ,_a : str=3072 ,_a : Tuple="gelu" ,_a : Optional[int]=0.1 ,_a : Dict=0.1 ,_a : List[Any]=512 ,_a : Union[str, Any]=2 ,_a : Tuple=0.02 ,_a : List[str]=1e-12 ,_a : Dict=True ,_a : Tuple=128 ,_a : Optional[int]=0 ,**_a : Tuple ,): '''simple docstring''' super().__init__(pad_token_id=_a ,**_a ) A_ : Dict = vocab_size A_ : int = hidden_size A_ : Union[str, Any] = num_hidden_layers A_ : Union[str, Any] = num_attention_heads A_ : Tuple = hidden_act A_ : int = intermediate_size A_ : Tuple = hidden_dropout_prob A_ : Optional[int] = attention_probs_dropout_prob A_ : int = max_position_embeddings A_ : Any = type_vocab_size A_ : Optional[int] = initializer_range A_ : Dict = layer_norm_eps A_ : str = share_encoders A_ : List[Any] = projection_dim
665
0
import dataclasses import json import sys import types from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError from copy import copy from enum import Enum from inspect import isclass from pathlib import Path from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints import yaml snake_case = NewType("""DataClass""", Any) snake_case = NewType("""DataClassType""", Any) def lowerCamelCase__ ( lowercase ): """simple docstring""" if isinstance(lowercase , lowercase ): return v if v.lower() in ("yes", "true", "t", "y", "1"): return True elif v.lower() in ("no", "false", "f", "n", "0"): return False else: raise ArgumentTypeError( F'''Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).''' ) def lowerCamelCase__ ( lowercase ): """simple docstring""" SCREAMING_SNAKE_CASE : Any = {str(lowercase ): choice for choice in choices} return lambda lowercase : str_to_choice.get(lowercase , lowercase ) def lowerCamelCase__ ( *, lowercase = None , lowercase = None , lowercase = dataclasses.MISSING , lowercase = dataclasses.MISSING , lowercase = None , **lowercase , ): """simple docstring""" if metadata is None: # Important, don't use as default param in function signature because dict is mutable and shared across function calls SCREAMING_SNAKE_CASE : Optional[Any] = {} if aliases is not None: SCREAMING_SNAKE_CASE : Optional[Any] = aliases if help is not None: SCREAMING_SNAKE_CASE : Tuple = help return dataclasses.field(metadata=lowercase , default=lowercase , default_factory=lowercase , **lowercase ) class SCREAMING_SNAKE_CASE ( lowerCAmelCase ): '''simple docstring''' UpperCamelCase_ : Iterable[DataClassType] def __init__( self : Optional[int] , UpperCAmelCase_ : Union[DataClassType, Iterable[DataClassType]] , **UpperCAmelCase_ : Optional[int] ): # To make the default appear when using --help if "formatter_class" not in kwargs: SCREAMING_SNAKE_CASE : Optional[int] = ArgumentDefaultsHelpFormatter super().__init__(**UpperCAmelCase_ ) if dataclasses.is_dataclass(UpperCAmelCase_ ): SCREAMING_SNAKE_CASE : List[Any] = [dataclass_types] SCREAMING_SNAKE_CASE : Optional[int] = list(UpperCAmelCase_ ) for dtype in self.dataclass_types: self._add_dataclass_arguments(UpperCAmelCase_ ) @staticmethod def _A ( UpperCAmelCase_ : ArgumentParser , UpperCAmelCase_ : dataclasses.Field ): SCREAMING_SNAKE_CASE : Any = f'''--{field.name}''' SCREAMING_SNAKE_CASE : Tuple = field.metadata.copy() # field.metadata is not used at all by Data Classes, # it is provided as a third-party extension mechanism. if isinstance(field.type , UpperCAmelCase_ ): raise RuntimeError( "Unresolved type detected, which should have been done with the help of " "`typing.get_type_hints` method by default" ) SCREAMING_SNAKE_CASE : List[str] = kwargs.pop("aliases" , [] ) if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): SCREAMING_SNAKE_CASE : str = [aliases] SCREAMING_SNAKE_CASE : Dict = getattr(field.type , "__origin__" , field.type ) if origin_type is Union or (hasattr(UpperCAmelCase_ , "UnionType" ) and isinstance(UpperCAmelCase_ , types.UnionType )): if str not in field.type.__args__ and ( len(field.type.__args__ ) != 2 or type(UpperCAmelCase_ ) not in field.type.__args__ ): raise ValueError( "Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because" " the argument parser only supports one type per argument." f''' Problem encountered in field \'{field.name}\'.''' ) if type(UpperCAmelCase_ ) not in field.type.__args__: # filter `str` in Union SCREAMING_SNAKE_CASE : List[str] = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1] SCREAMING_SNAKE_CASE : Tuple = getattr(field.type , "__origin__" , field.type ) elif bool not in field.type.__args__: # filter `NoneType` in Union (except for `Union[bool, NoneType]`) SCREAMING_SNAKE_CASE : Dict = ( field.type.__args__[0] if isinstance(UpperCAmelCase_ , field.type.__args__[1] ) else field.type.__args__[1] ) SCREAMING_SNAKE_CASE : Optional[Any] = getattr(field.type , "__origin__" , field.type ) # A variable to store kwargs for a boolean field, if needed # so that we can init a `no_*` complement argument (see below) SCREAMING_SNAKE_CASE : Dict = {} if origin_type is Literal or (isinstance(field.type , UpperCAmelCase_ ) and issubclass(field.type , UpperCAmelCase_ )): if origin_type is Literal: SCREAMING_SNAKE_CASE : Any = field.type.__args__ else: SCREAMING_SNAKE_CASE : Union[str, Any] = [x.value for x in field.type] SCREAMING_SNAKE_CASE : Tuple = make_choice_type_function(kwargs["choices"] ) if field.default is not dataclasses.MISSING: SCREAMING_SNAKE_CASE : Any = field.default else: SCREAMING_SNAKE_CASE : str = True elif field.type is bool or field.type == Optional[bool]: # Copy the currect kwargs to use to instantiate a `no_*` complement argument below. # We do not initialize it here because the `no_*` alternative must be instantiated after the real argument SCREAMING_SNAKE_CASE : List[str] = copy(UpperCAmelCase_ ) # Hack because type=bool in argparse does not behave as we want. SCREAMING_SNAKE_CASE : Any = string_to_bool if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING): # Default value is False if we have no default when of type bool. SCREAMING_SNAKE_CASE : str = False if field.default is dataclasses.MISSING else field.default # This is the value that will get picked if we don't include --field_name in any way SCREAMING_SNAKE_CASE : Optional[int] = default # This tells argparse we accept 0 or 1 value after --field_name SCREAMING_SNAKE_CASE : Tuple = "?" # This is the value that will get picked if we do --field_name (without value) SCREAMING_SNAKE_CASE : List[Any] = True elif isclass(UpperCAmelCase_ ) and issubclass(UpperCAmelCase_ , UpperCAmelCase_ ): SCREAMING_SNAKE_CASE : List[str] = field.type.__args__[0] SCREAMING_SNAKE_CASE : List[str] = "+" if field.default_factory is not dataclasses.MISSING: SCREAMING_SNAKE_CASE : Any = field.default_factory() elif field.default is dataclasses.MISSING: SCREAMING_SNAKE_CASE : Optional[int] = True else: SCREAMING_SNAKE_CASE : Optional[Any] = field.type if field.default is not dataclasses.MISSING: SCREAMING_SNAKE_CASE : str = field.default elif field.default_factory is not dataclasses.MISSING: SCREAMING_SNAKE_CASE : Union[str, Any] = field.default_factory() else: SCREAMING_SNAKE_CASE : Tuple = True parser.add_argument(UpperCAmelCase_ , *UpperCAmelCase_ , **UpperCAmelCase_ ) # Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added. # Order is important for arguments with the same destination! # We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down # here and we do not need those changes/additional keys. if field.default is True and (field.type is bool or field.type == Optional[bool]): SCREAMING_SNAKE_CASE : Tuple = False parser.add_argument(f'''--no_{field.name}''' , action="store_false" , dest=field.name , **UpperCAmelCase_ ) def _A ( self : int , UpperCAmelCase_ : DataClassType ): if hasattr(UpperCAmelCase_ , "_argument_group_name" ): SCREAMING_SNAKE_CASE : Tuple = self.add_argument_group(dtype._argument_group_name ) else: SCREAMING_SNAKE_CASE : Dict = self try: SCREAMING_SNAKE_CASE : Dict[str, type] = get_type_hints(UpperCAmelCase_ ) except NameError: raise RuntimeError( f'''Type resolution failed for {dtype}. Try declaring the class in global scope or ''' "removing line of `from __future__ import annotations` which opts in Postponed " "Evaluation of Annotations (PEP 563)" ) except TypeError as ex: # Remove this block when we drop Python 3.9 support if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(UpperCAmelCase_ ): SCREAMING_SNAKE_CASE : Union[str, Any] = ".".join(map(UpperCAmelCase_ , sys.version_info[:3] ) ) raise RuntimeError( f'''Type resolution failed for {dtype} on Python {python_version}. Try removing ''' "line of `from __future__ import annotations` which opts in union types as " "`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To " "support Python versions that lower than 3.10, you need to use " "`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of " "`X | None`." ) from ex raise for field in dataclasses.fields(UpperCAmelCase_ ): if not field.init: continue SCREAMING_SNAKE_CASE : Tuple = type_hints[field.name] self._parse_dataclass_field(UpperCAmelCase_ , UpperCAmelCase_ ) def _A ( self : List[Any] , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : Optional[Any]=False , UpperCAmelCase_ : str=True , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : str=None , ): if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )): SCREAMING_SNAKE_CASE : Tuple = [] if args_filename: args_files.append(Path(UpperCAmelCase_ ) ) elif look_for_args_file and len(sys.argv ): args_files.append(Path(sys.argv[0] ).with_suffix(".args" ) ) # args files specified via command line flag should overwrite default args files so we add them last if args_file_flag: # Create special parser just to extract the args_file_flag values SCREAMING_SNAKE_CASE : Dict = ArgumentParser() args_file_parser.add_argument(UpperCAmelCase_ , type=UpperCAmelCase_ , action="append" ) # Use only remaining args for further parsing (remove the args_file_flag) SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = args_file_parser.parse_known_args(args=UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : int = vars(UpperCAmelCase_ ).get(args_file_flag.lstrip("-" ) , UpperCAmelCase_ ) if cmd_args_file_paths: args_files.extend([Path(UpperCAmelCase_ ) for p in cmd_args_file_paths] ) SCREAMING_SNAKE_CASE : Union[str, Any] = [] for args_file in args_files: if args_file.exists(): file_args += args_file.read_text().split() # in case of duplicate arguments the last one has precedence # args specified via the command line should overwrite args from files, so we add them last SCREAMING_SNAKE_CASE : Any = file_args + args if args is not None else file_args + sys.argv[1:] SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = self.parse_known_args(args=UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : Any = [] for dtype in self.dataclass_types: SCREAMING_SNAKE_CASE : Tuple = {f.name for f in dataclasses.fields(UpperCAmelCase_ ) if f.init} SCREAMING_SNAKE_CASE : Dict = {k: v for k, v in vars(UpperCAmelCase_ ).items() if k in keys} for k in keys: delattr(UpperCAmelCase_ , UpperCAmelCase_ ) SCREAMING_SNAKE_CASE : int = dtype(**UpperCAmelCase_ ) outputs.append(UpperCAmelCase_ ) if len(namespace.__dict__ ) > 0: # additional namespace. outputs.append(UpperCAmelCase_ ) if return_remaining_strings: return (*outputs, remaining_args) else: if remaining_args: raise ValueError(f'''Some specified arguments are not used by the HfArgumentParser: {remaining_args}''' ) return (*outputs,) def _A ( self : Optional[Any] , UpperCAmelCase_ : Dict[str, Any] , UpperCAmelCase_ : bool = False ): SCREAMING_SNAKE_CASE : Tuple = set(args.keys() ) SCREAMING_SNAKE_CASE : Tuple = [] for dtype in self.dataclass_types: SCREAMING_SNAKE_CASE : Optional[Any] = {f.name for f in dataclasses.fields(UpperCAmelCase_ ) if f.init} SCREAMING_SNAKE_CASE : Dict = {k: v for k, v in args.items() if k in keys} unused_keys.difference_update(inputs.keys() ) SCREAMING_SNAKE_CASE : Tuple = dtype(**UpperCAmelCase_ ) outputs.append(UpperCAmelCase_ ) if not allow_extra_keys and unused_keys: raise ValueError(f'''Some keys are not used by the HfArgumentParser: {sorted(UpperCAmelCase_ )}''' ) return tuple(UpperCAmelCase_ ) def _A ( self : Optional[int] , UpperCAmelCase_ : str , UpperCAmelCase_ : bool = False ): with open(Path(UpperCAmelCase_ ) , encoding="utf-8" ) as open_json_file: SCREAMING_SNAKE_CASE : Dict = json.loads(open_json_file.read() ) SCREAMING_SNAKE_CASE : Dict = self.parse_dict(UpperCAmelCase_ , allow_extra_keys=UpperCAmelCase_ ) return tuple(UpperCAmelCase_ ) def _A ( self : Optional[int] , UpperCAmelCase_ : str , UpperCAmelCase_ : bool = False ): SCREAMING_SNAKE_CASE : Any = self.parse_dict(yaml.safe_load(Path(UpperCAmelCase_ ).read_text() ) , allow_extra_keys=UpperCAmelCase_ ) return tuple(UpperCAmelCase_ )
62
'''simple docstring''' import os import re from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging __magic_name__ = logging.get_logger(__name__) __magic_name__ = {'vocab_file': 'spiece.model'} __magic_name__ = { 'vocab_file': { 'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model', 'google/bigbird-roberta-large': ( 'https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model' ), 'google/bigbird-base-trivia-itc': ( 'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model' ), } } __magic_name__ = { 'google/bigbird-roberta-base': 4_096, 'google/bigbird-roberta-large': 4_096, 'google/bigbird-base-trivia-itc': 4_096, } class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' a_ = VOCAB_FILES_NAMES a_ = PRETRAINED_VOCAB_FILES_MAP a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a_ = ["""input_ids""", """attention_mask"""] a_ = [] def __init__( self : Optional[int] ,_a : int ,_a : Optional[Any]="<unk>" ,_a : int="<s>" ,_a : str="</s>" ,_a : Optional[Any]="<pad>" ,_a : Tuple="[SEP]" ,_a : Tuple="[MASK]" ,_a : Union[str, Any]="[CLS]" ,_a : Optional[Dict[str, Any]] = None ,**_a : Any ,): '''simple docstring''' A_ : Dict = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else bos_token A_ : Union[str, Any] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else eos_token A_ : Optional[Any] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else unk_token A_ : Union[str, Any] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else pad_token A_ : Any = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else cls_token A_ : Optional[int] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else sep_token # Mask token behave like a normal word, i.e. include the space before it A_ : List[Any] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else mask_token A_ : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=_a ,eos_token=_a ,unk_token=_a ,pad_token=_a ,sep_token=_a ,mask_token=_a ,cls_token=_a ,sp_model_kwargs=self.sp_model_kwargs ,**_a ,) A_ : Optional[int] = vocab_file A_ : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(_a ) @property def _a ( self : Union[str, Any] ): '''simple docstring''' return self.sp_model.get_piece_size() def _a ( self : Optional[Any] ): '''simple docstring''' A_ : Tuple = {self.convert_ids_to_tokens(_a ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : List[Any] ): '''simple docstring''' A_ : Union[str, Any] = self.__dict__.copy() A_ : Union[str, Any] = None return state def __setstate__( self : List[Any] ,_a : Any ): '''simple docstring''' A_ : Tuple = d # for backward compatibility if not hasattr(self ,"""sp_model_kwargs""" ): A_ : Tuple = {} A_ : int = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def _a ( self : Union[str, Any] ,_a : str ): '''simple docstring''' return self.sp_model.encode(_a ,out_type=_a ) def _a ( self : Optional[int] ,_a : str ): '''simple docstring''' return self.sp_model.piece_to_id(_a ) def _a ( self : int ,_a : Optional[int] ): '''simple docstring''' A_ : List[str] = self.sp_model.IdToPiece(_a ) return token def _a ( self : Dict ,_a : int ): '''simple docstring''' A_ : int = [] A_ : Any = """""" A_ : str = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(_a ) + token A_ : Dict = True A_ : Union[str, Any] = [] else: current_sub_tokens.append(_a ) A_ : str = False out_string += self.sp_model.decode(_a ) return out_string.strip() def _a ( self : int ,_a : List[int] ,_a : bool = False ,_a : bool = None ,_a : bool = True ,**_a : str ,): '''simple docstring''' A_ : Any = kwargs.pop("""use_source_tokenizer""" ,_a ) A_ : Union[str, Any] = self.convert_ids_to_tokens(_a ,skip_special_tokens=_a ) # To avoid mixing byte-level and unicode for byte-level BPT # we need to build string separately for added tokens and byte-level tokens # cf. https://github.com/huggingface/transformers/issues/1133 A_ : str = [] A_ : int = [] for token in filtered_tokens: if skip_special_tokens and token in self.all_special_ids: continue if token in self.added_tokens_encoder: if current_sub_text: sub_texts.append(self.convert_tokens_to_string(_a ) ) A_ : List[str] = [] sub_texts.append(_a ) else: current_sub_text.append(_a ) if current_sub_text: sub_texts.append(self.convert_tokens_to_string(_a ) ) # Mimic the behavior of the Rust tokenizer: # No space before [MASK] and [SEP] if spaces_between_special_tokens: A_ : Optional[int] = re.sub(r""" (\[(MASK|SEP)\])""" ,r"""\1""" ,""" """.join(_a ) ) else: A_ : Tuple = """""".join(_a ) A_ : str = ( clean_up_tokenization_spaces if clean_up_tokenization_spaces is not None else self.clean_up_tokenization_spaces ) if clean_up_tokenization_spaces: A_ : Optional[Any] = self.clean_up_tokenization(_a ) return clean_text else: return text def _a ( self : int ,_a : str ,_a : Optional[str] = None ): '''simple docstring''' if not os.path.isdir(_a ): logger.error(f'Vocabulary path ({save_directory}) should be a directory' ) return A_ : int = os.path.join( _a ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file ,_a ) elif not os.path.isfile(self.vocab_file ): with open(_a ,"""wb""" ) as fi: A_ : str = self.sp_model.serialized_model_proto() fi.write(_a ) return (out_vocab_file,) def _a ( self : Optional[Any] ,_a : List[int] ,_a : Optional[List[int]] = None ): '''simple docstring''' if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] A_ : List[Any] = [self.cls_token_id] A_ : Union[str, Any] = [self.sep_token_id] return cls + token_ids_a + sep + token_ids_a + sep def _a ( self : Optional[int] ,_a : List[int] ,_a : Optional[List[int]] = None ,_a : bool = False ): '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_a ,token_ids_a=_a ,already_has_special_tokens=_a ) if token_ids_a is None: return [1] + ([0] * len(_a )) + [1] return [1] + ([0] * len(_a )) + [1] + ([0] * len(_a )) + [1] def _a ( self : Tuple ,_a : List[int] ,_a : Optional[List[int]] = None ): '''simple docstring''' A_ : Tuple = [self.sep_token_id] A_ : Optional[int] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
665
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) a : Tuple = { "configuration_blenderbot_small": [ "BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP", "BlenderbotSmallConfig", "BlenderbotSmallOnnxConfig", ], "tokenization_blenderbot_small": ["BlenderbotSmallTokenizer"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a : List[Any] = ["BlenderbotSmallTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a : Optional[Any] = [ "BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST", "BlenderbotSmallForCausalLM", "BlenderbotSmallForConditionalGeneration", "BlenderbotSmallModel", "BlenderbotSmallPreTrainedModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a : str = [ "TFBlenderbotSmallForConditionalGeneration", "TFBlenderbotSmallModel", "TFBlenderbotSmallPreTrainedModel", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a : str = [ "FlaxBlenderbotSmallForConditionalGeneration", "FlaxBlenderbotSmallModel", "FlaxBlenderbotSmallPreTrainedModel", ] if TYPE_CHECKING: from .configuration_blenderbot_small import ( BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP, BlenderbotSmallConfig, BlenderbotSmallOnnxConfig, ) from .tokenization_blenderbot_small import BlenderbotSmallTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_blenderbot_small_fast import BlenderbotSmallTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_blenderbot_small import ( BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST, BlenderbotSmallForCausalLM, BlenderbotSmallForConditionalGeneration, BlenderbotSmallModel, BlenderbotSmallPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_blenderbot_small import ( TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel, TFBlenderbotSmallPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_blenderbot_small import ( FlaxBlenderbotSmallForConditionalGeneration, FlaxBlenderbotSmallModel, FlaxBlenderbotSmallPreTrainedModel, ) else: import sys a : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
63
'''simple docstring''' import unittest from transformers import ( MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, TextaTextGenerationPipeline, pipeline, ) from transformers.testing_utils import is_pipeline_test, require_tf, require_torch from transformers.utils import is_torch_available from .test_pipelines_common import ANY if is_torch_available(): import torch @is_pipeline_test class __lowerCAmelCase ( unittest.TestCase ): '''simple docstring''' a_ = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING a_ = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING def _a ( self : List[str] ,_a : int ,_a : Any ,_a : int ): '''simple docstring''' A_ : Dict = TextaTextGenerationPipeline(model=_a ,tokenizer=_a ) return generator, ["Something to write", "Something else"] def _a ( self : str ,_a : Union[str, Any] ,_a : int ): '''simple docstring''' A_ : Any = generator("""Something there""" ) self.assertEqual(_a ,[{"""generated_text""": ANY(_a )}] ) # These are encoder decoder, they don't just append to incoming string self.assertFalse(outputs[0]["""generated_text"""].startswith("""Something there""" ) ) A_ : List[Any] = generator(["""This is great !""", """Something else"""] ,num_return_sequences=2 ,do_sample=_a ) self.assertEqual( _a ,[ [{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}], [{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}], ] ,) A_ : List[str] = generator( ["""This is great !""", """Something else"""] ,num_return_sequences=2 ,batch_size=2 ,do_sample=_a ) self.assertEqual( _a ,[ [{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}], [{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}], ] ,) with self.assertRaises(_a ): generator(4 ) @require_torch def _a ( self : Union[str, Any] ): '''simple docstring''' A_ : int = pipeline("""text2text-generation""" ,model="""patrickvonplaten/t5-tiny-random""" ,framework="""pt""" ) # do_sample=False necessary for reproducibility A_ : Tuple = generator("""Something there""" ,do_sample=_a ) self.assertEqual(_a ,[{"""generated_text""": """"""}] ) A_ : Optional[int] = 3 A_ : Tuple = generator( """Something there""" ,num_return_sequences=_a ,num_beams=_a ,) A_ : Optional[Any] = [ {"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide Beide"""}, {"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide"""}, {"""generated_text""": """"""}, ] self.assertEqual(_a ,_a ) A_ : Optional[int] = generator("""This is a test""" ,do_sample=_a ,num_return_sequences=2 ,return_tensors=_a ) self.assertEqual( _a ,[ {"""generated_token_ids""": ANY(torch.Tensor )}, {"""generated_token_ids""": ANY(torch.Tensor )}, ] ,) A_ : Dict = generator.model.config.eos_token_id A_ : Optional[int] = """<pad>""" A_ : List[Any] = generator( ["""This is a test""", """This is a second test"""] ,do_sample=_a ,num_return_sequences=2 ,batch_size=2 ,return_tensors=_a ,) self.assertEqual( _a ,[ [ {"""generated_token_ids""": ANY(torch.Tensor )}, {"""generated_token_ids""": ANY(torch.Tensor )}, ], [ {"""generated_token_ids""": ANY(torch.Tensor )}, {"""generated_token_ids""": ANY(torch.Tensor )}, ], ] ,) @require_tf def _a ( self : List[Any] ): '''simple docstring''' A_ : Optional[int] = pipeline("""text2text-generation""" ,model="""patrickvonplaten/t5-tiny-random""" ,framework="""tf""" ) # do_sample=False necessary for reproducibility A_ : Dict = generator("""Something there""" ,do_sample=_a ) self.assertEqual(_a ,[{"""generated_text""": """"""}] )
665
0
import torch from torch import nn from transformers import CLIPPreTrainedModel, CLIPVisionModel from ...models.attention import BasicTransformerBlock from ...utils import logging lowercase_ : int = logging.get_logger(__name__) # pylint: disable=invalid-name class _lowerCamelCase ( UpperCamelCase_ ): def __init__( self , lowerCAmelCase , lowerCAmelCase=768 ) -> Union[str, Any]: super().__init__(lowerCAmelCase ) SCREAMING_SNAKE_CASE__: List[str]= proj_size SCREAMING_SNAKE_CASE__: Tuple= CLIPVisionModel(lowerCAmelCase ) SCREAMING_SNAKE_CASE__: Dict= PaintByExampleMapper(lowerCAmelCase ) SCREAMING_SNAKE_CASE__: Tuple= nn.LayerNorm(config.hidden_size ) SCREAMING_SNAKE_CASE__: Union[str, Any]= nn.Linear(config.hidden_size , self.proj_size ) # uncondition for scaling SCREAMING_SNAKE_CASE__: Optional[Any]= nn.Parameter(torch.randn((1, 1, self.proj_size) ) ) def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase=False ) -> Dict: SCREAMING_SNAKE_CASE__: str= self.model(pixel_values=lowerCAmelCase ) SCREAMING_SNAKE_CASE__: List[Any]= clip_output.pooler_output SCREAMING_SNAKE_CASE__: List[Any]= self.mapper(latent_states[:, None] ) SCREAMING_SNAKE_CASE__: str= self.final_layer_norm(lowerCAmelCase ) SCREAMING_SNAKE_CASE__: Dict= self.proj_out(lowerCAmelCase ) if return_uncond_vector: return latent_states, self.uncond_vector return latent_states class _lowerCamelCase ( nn.Module ): def __init__( self , lowerCAmelCase ) -> int: super().__init__() SCREAMING_SNAKE_CASE__: List[str]= (config.num_hidden_layers + 1) // 5 SCREAMING_SNAKE_CASE__: Tuple= config.hidden_size SCREAMING_SNAKE_CASE__: List[Any]= 1 SCREAMING_SNAKE_CASE__: List[Any]= nn.ModuleList( [ BasicTransformerBlock(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , activation_fn='''gelu''' , attention_bias=lowerCAmelCase ) for _ in range(lowerCAmelCase ) ] ) def UpperCamelCase_ ( self , lowerCAmelCase ) -> Optional[Any]: for block in self.blocks: SCREAMING_SNAKE_CASE__: List[Any]= block(lowerCAmelCase ) return hidden_states
64
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging __magic_name__ = logging.get_logger(__name__) __magic_name__ = { 'bigcode/gpt_bigcode-santacoder': 'https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json', } class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' a_ = """gpt_bigcode""" a_ = ["""past_key_values"""] a_ = { """hidden_size""": """n_embd""", """max_position_embeddings""": """n_positions""", """num_attention_heads""": """n_head""", """num_hidden_layers""": """n_layer""", } def __init__( self : Optional[int] ,_a : Optional[int]=50257 ,_a : Dict=1024 ,_a : Union[str, Any]=768 ,_a : Union[str, Any]=12 ,_a : Union[str, Any]=12 ,_a : Tuple=None ,_a : int="gelu_pytorch_tanh" ,_a : Optional[Any]=0.1 ,_a : List[str]=0.1 ,_a : Union[str, Any]=0.1 ,_a : List[Any]=1e-5 ,_a : List[str]=0.02 ,_a : Any=True ,_a : Union[str, Any]=True ,_a : Tuple=50256 ,_a : Optional[int]=50256 ,_a : int=True ,_a : Optional[int]=True ,_a : Optional[int]=True ,**_a : List[str] ,): '''simple docstring''' A_ : Optional[Any] = vocab_size A_ : int = n_positions A_ : Union[str, Any] = n_embd A_ : int = n_layer A_ : Optional[int] = n_head A_ : Union[str, Any] = n_inner A_ : List[Any] = activation_function A_ : Dict = resid_pdrop A_ : int = embd_pdrop A_ : Optional[int] = attn_pdrop A_ : Union[str, Any] = layer_norm_epsilon A_ : int = initializer_range A_ : Union[str, Any] = scale_attn_weights A_ : List[str] = use_cache A_ : Tuple = attention_softmax_in_fpaa A_ : List[str] = scale_attention_softmax_in_fpaa A_ : Union[str, Any] = multi_query A_ : Any = bos_token_id A_ : Optional[int] = eos_token_id super().__init__(bos_token_id=_a ,eos_token_id=_a ,**_a )
665
0
"""simple docstring""" from __future__ import annotations from typing import Any class __lowercase : def __init__( self : int ,A : int ): '''simple docstring''' UpperCAmelCase__ : List[str] = num_of_nodes UpperCAmelCase__ : list[list[int]] = [] UpperCAmelCase__ : dict[int, int] = {} def __lowercase ( self : Any ,A : int ,A : int ,A : int ): '''simple docstring''' self.m_edges.append([u_node, v_node, weight] ) def __lowercase ( self : Tuple ,A : int ): '''simple docstring''' if self.m_component[u_node] == u_node: return u_node return self.find_component(self.m_component[u_node] ) def __lowercase ( self : List[Any] ,A : int ): '''simple docstring''' if self.m_component[u_node] != u_node: for k in self.m_component: UpperCAmelCase__ : List[Any] = self.find_component(A ) def __lowercase ( self : List[str] ,A : list[int] ,A : int ,A : int ): '''simple docstring''' if component_size[u_node] <= component_size[v_node]: UpperCAmelCase__ : Any = v_node component_size[v_node] += component_size[u_node] self.set_component(A ) elif component_size[u_node] >= component_size[v_node]: UpperCAmelCase__ : List[str] = self.find_component(A ) component_size[u_node] += component_size[v_node] self.set_component(A ) def __lowercase ( self : int ): '''simple docstring''' UpperCAmelCase__ : Dict = [] UpperCAmelCase__ : str = 0 UpperCAmelCase__ : list[Any] = [-1] * self.m_num_of_nodes # A list of components (initialized to all of the nodes) for node in range(self.m_num_of_nodes ): self.m_component.update({node: node} ) component_size.append(1 ) UpperCAmelCase__ : Tuple = self.m_num_of_nodes while num_of_components > 1: for edge in self.m_edges: UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = edge UpperCAmelCase__ : Tuple = self.m_component[u] UpperCAmelCase__ : Optional[int] = self.m_component[v] if u_component != v_component: for component in (u_component, v_component): if ( minimum_weight_edge[component] == -1 or minimum_weight_edge[component][2] > w ): UpperCAmelCase__ : Optional[int] = [u, v, w] for edge in minimum_weight_edge: if isinstance(A ,A ): UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Tuple = edge UpperCAmelCase__ : str = self.m_component[u] UpperCAmelCase__ : List[str] = self.m_component[v] if u_component != v_component: mst_weight += w self.union(A ,A ,A ) print(f"Added edge [{u} - {v}]\nAdded weight: {w}\n" ) num_of_components -= 1 UpperCAmelCase__ : Union[str, Any] = [-1] * self.m_num_of_nodes print(f"The total weight of the minimal spanning tree is: {mst_weight}" ) def lowerCAmelCase ( ): '''simple docstring''' if __name__ == "__main__": import doctest doctest.testmod()
65
'''simple docstring''' import json import os from functools import lru_cache from typing import List, Optional, Tuple import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging __magic_name__ = logging.get_logger(__name__) __magic_name__ = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'} __magic_name__ = { 'vocab_file': { 'allenai/longformer-base-4096': 'https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json', 'allenai/longformer-large-4096': ( 'https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json' ), 'allenai/longformer-large-4096-finetuned-triviaqa': ( 'https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json' ), 'allenai/longformer-base-4096-extra.pos.embd.only': ( 'https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json' ), 'allenai/longformer-large-4096-extra.pos.embd.only': ( 'https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json' ), }, 'merges_file': { 'allenai/longformer-base-4096': 'https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt', 'allenai/longformer-large-4096': ( 'https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt' ), 'allenai/longformer-large-4096-finetuned-triviaqa': ( 'https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt' ), 'allenai/longformer-base-4096-extra.pos.embd.only': ( 'https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt' ), 'allenai/longformer-large-4096-extra.pos.embd.only': ( 'https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt' ), }, } __magic_name__ = { 'allenai/longformer-base-4096': 4_096, 'allenai/longformer-large-4096': 4_096, 'allenai/longformer-large-4096-finetuned-triviaqa': 4_096, 'allenai/longformer-base-4096-extra.pos.embd.only': 4_096, 'allenai/longformer-large-4096-extra.pos.embd.only': 4_096, } @lru_cache() # Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode def lowerCamelCase ( ): A_ : Union[str, Any] = ( list(range(ord("""!""") , ord("""~""") + 1)) + list(range(ord("""¡""") , ord("""¬""") + 1)) + list(range(ord("""®""") , ord("""ÿ""") + 1)) ) A_ : Optional[Any] = bs[:] A_ : List[str] = 0 for b in range(2**8): if b not in bs: bs.append(lowerCamelCase) cs.append(2**8 + n) n += 1 A_ : List[Any] = [chr(lowerCamelCase) for n in cs] return dict(zip(lowerCamelCase , lowerCamelCase)) def lowerCamelCase ( lowerCamelCase : int): A_ : int = set() A_ : int = word[0] for char in word[1:]: pairs.add((prev_char, char)) A_ : List[str] = char return pairs class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' a_ = VOCAB_FILES_NAMES a_ = PRETRAINED_VOCAB_FILES_MAP a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a_ = ["""input_ids""", """attention_mask"""] def __init__( self : int ,_a : Tuple ,_a : Union[str, Any] ,_a : Optional[Any]="replace" ,_a : Union[str, Any]="<s>" ,_a : Union[str, Any]="</s>" ,_a : int="</s>" ,_a : List[str]="<s>" ,_a : List[Any]="<unk>" ,_a : Any="<pad>" ,_a : Dict="<mask>" ,_a : Optional[int]=False ,**_a : List[Any] ,): '''simple docstring''' A_ : Dict = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else bos_token A_ : Optional[int] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else eos_token A_ : Optional[Any] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else sep_token A_ : int = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else cls_token A_ : int = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else unk_token A_ : Optional[Any] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else pad_token # Mask token behave like a normal word, i.e. include the space before it A_ : Any = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else mask_token super().__init__( errors=_a ,bos_token=_a ,eos_token=_a ,unk_token=_a ,sep_token=_a ,cls_token=_a ,pad_token=_a ,mask_token=_a ,add_prefix_space=_a ,**_a ,) with open(_a ,encoding="""utf-8""" ) as vocab_handle: A_ : str = json.load(_a ) A_ : Optional[int] = {v: k for k, v in self.encoder.items()} A_ : List[str] = errors # how to handle errors in decoding A_ : List[str] = bytes_to_unicode() A_ : str = {v: k for k, v in self.byte_encoder.items()} with open(_a ,encoding="""utf-8""" ) as merges_handle: A_ : Any = merges_handle.read().split("""\n""" )[1:-1] A_ : str = [tuple(merge.split() ) for merge in bpe_merges] A_ : int = dict(zip(_a ,range(len(_a ) ) ) ) A_ : List[Any] = {} A_ : Optional[int] = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions A_ : Optional[Any] = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" ) @property def _a ( self : Any ): '''simple docstring''' return len(self.encoder ) def _a ( self : str ): '''simple docstring''' return dict(self.encoder ,**self.added_tokens_encoder ) def _a ( self : int ,_a : int ): '''simple docstring''' if token in self.cache: return self.cache[token] A_ : Optional[int] = tuple(_a ) A_ : Any = get_pairs(_a ) if not pairs: return token while True: A_ : Optional[Any] = min(_a ,key=lambda _a : self.bpe_ranks.get(_a ,float("""inf""" ) ) ) if bigram not in self.bpe_ranks: break A_ , A_ : Dict = bigram A_ : int = [] A_ : Optional[Any] = 0 while i < len(_a ): try: A_ : List[str] = word.index(_a ,_a ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) A_ : Tuple = j if word[i] == first and i < len(_a ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 A_ : str = tuple(_a ) A_ : str = new_word if len(_a ) == 1: break else: A_ : int = get_pairs(_a ) A_ : Optional[int] = """ """.join(_a ) A_ : List[str] = word return word def _a ( self : Dict ,_a : Optional[int] ): '''simple docstring''' A_ : Any = [] for token in re.findall(self.pat ,_a ): A_ : Any = """""".join( self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_a ).split(""" """ ) ) return bpe_tokens def _a ( self : Union[str, Any] ,_a : Optional[int] ): '''simple docstring''' return self.encoder.get(_a ,self.encoder.get(self.unk_token ) ) def _a ( self : int ,_a : Dict ): '''simple docstring''' return self.decoder.get(_a ) def _a ( self : Optional[int] ,_a : List[Any] ): '''simple docstring''' A_ : Optional[int] = """""".join(_a ) A_ : Dict = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" ,errors=self.errors ) return text def _a ( self : int ,_a : str ,_a : Optional[str] = None ): '''simple docstring''' if not os.path.isdir(_a ): logger.error(f'Vocabulary path ({save_directory}) should be a directory' ) return A_ : int = os.path.join( _a ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) A_ : int = os.path.join( _a ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] ) with open(_a ,"""w""" ,encoding="""utf-8""" ) as f: f.write(json.dumps(self.encoder ,indent=2 ,sort_keys=_a ,ensure_ascii=_a ) + """\n""" ) A_ : int = 0 with open(_a ,"""w""" ,encoding="""utf-8""" ) as writer: writer.write("""#version: 0.2\n""" ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() ,key=lambda _a : kv[1] ): if index != token_index: logger.warning( f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.' """ Please check that the tokenizer is not corrupted!""" ) A_ : Dict = token_index writer.write(""" """.join(_a ) + """\n""" ) index += 1 return vocab_file, merge_file def _a ( self : List[str] ,_a : List[int] ,_a : Optional[List[int]] = None ): '''simple docstring''' if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] A_ : int = [self.cls_token_id] A_ : int = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def _a ( self : int ,_a : List[int] ,_a : Optional[List[int]] = None ,_a : bool = False ): '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_a ,token_ids_a=_a ,already_has_special_tokens=_a ) if token_ids_a is None: return [1] + ([0] * len(_a )) + [1] return [1] + ([0] * len(_a )) + [1, 1] + ([0] * len(_a )) + [1] def _a ( self : Any ,_a : List[int] ,_a : Optional[List[int]] = None ): '''simple docstring''' A_ : Union[str, Any] = [self.sep_token_id] A_ : Union[str, Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def _a ( self : str ,_a : Optional[int] ,_a : Union[str, Any]=False ,**_a : Dict ): '''simple docstring''' A_ : Any = kwargs.pop("""add_prefix_space""" ,self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(_a ) > 0 and not text[0].isspace()): A_ : Optional[int] = """ """ + text return (text, kwargs)
665
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available UpperCamelCase = { "configuration_poolformer": [ "POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "PoolFormerConfig", "PoolFormerOnnxConfig", ] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = ["PoolFormerFeatureExtractor"] UpperCamelCase = ["PoolFormerImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = [ "POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "PoolFormerForImageClassification", "PoolFormerModel", "PoolFormerPreTrainedModel", ] if TYPE_CHECKING: from .configuration_poolformer import ( POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, PoolFormerConfig, PoolFormerOnnxConfig, ) try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_poolformer import PoolFormerFeatureExtractor from .image_processing_poolformer import PoolFormerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_poolformer import ( POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, PoolFormerForImageClassification, PoolFormerModel, PoolFormerPreTrainedModel, ) else: import sys UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure)
66
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_convbert import ConvBertTokenizer __magic_name__ = logging.get_logger(__name__) __magic_name__ = {'vocab_file': 'vocab.txt'} __magic_name__ = { 'vocab_file': { 'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt', 'YituTech/conv-bert-medium-small': ( 'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt' ), 'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt', } } __magic_name__ = { 'YituTech/conv-bert-base': 512, 'YituTech/conv-bert-medium-small': 512, 'YituTech/conv-bert-small': 512, } __magic_name__ = { 'YituTech/conv-bert-base': {'do_lower_case': True}, 'YituTech/conv-bert-medium-small': {'do_lower_case': True}, 'YituTech/conv-bert-small': {'do_lower_case': True}, } class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' a_ = VOCAB_FILES_NAMES a_ = PRETRAINED_VOCAB_FILES_MAP a_ = PRETRAINED_INIT_CONFIGURATION a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a_ = ConvBertTokenizer def __init__( self : str ,_a : Dict=None ,_a : List[Any]=None ,_a : Dict=True ,_a : List[str]="[UNK]" ,_a : Any="[SEP]" ,_a : str="[PAD]" ,_a : List[Any]="[CLS]" ,_a : List[str]="[MASK]" ,_a : Union[str, Any]=True ,_a : Any=None ,**_a : Optional[int] ,): '''simple docstring''' super().__init__( _a ,tokenizer_file=_a ,do_lower_case=_a ,unk_token=_a ,sep_token=_a ,pad_token=_a ,cls_token=_a ,mask_token=_a ,tokenize_chinese_chars=_a ,strip_accents=_a ,**_a ,) A_ : Optional[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get("""lowercase""" ,_a ) != do_lower_case or normalizer_state.get("""strip_accents""" ,_a ) != strip_accents or normalizer_state.get("""handle_chinese_chars""" ,_a ) != tokenize_chinese_chars ): A_ : Dict = getattr(_a ,normalizer_state.pop("""type""" ) ) A_ : str = do_lower_case A_ : Any = strip_accents A_ : int = tokenize_chinese_chars A_ : Tuple = normalizer_class(**_a ) A_ : Any = do_lower_case def _a ( self : List[Any] ,_a : List[Any] ,_a : Any=None ): '''simple docstring''' A_ : str = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def _a ( self : Dict ,_a : List[int] ,_a : Optional[List[int]] = None ): '''simple docstring''' A_ : int = [self.sep_token_id] A_ : Any = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def _a ( self : int ,_a : str ,_a : Optional[str] = None ): '''simple docstring''' A_ : List[Any] = self._tokenizer.model.save(_a ,name=_a ) return tuple(_a )
665
0
from typing import Optional from torch import nn from .transformer_ad import TransformeraDModel, TransformeraDModelOutput class A_ ( nn.Module ): """simple docstring""" def __init__( self : str ,__A : int = 16 ,__A : int = 88 ,__A : Optional[int] = None ,__A : int = 1 ,__A : float = 0.0 ,__A : int = 32 ,__A : Optional[int] = None ,__A : bool = False ,__A : Optional[int] = None ,__A : Optional[int] = None ,__A : str = "geglu" ,__A : Optional[int] = None ,) -> Optional[int]: super().__init__() _lowercase = nn.ModuleList( [ TransformeraDModel( num_attention_heads=__A ,attention_head_dim=__A ,in_channels=__A ,num_layers=__A ,dropout=__A ,norm_num_groups=__A ,cross_attention_dim=__A ,attention_bias=__A ,sample_size=__A ,num_vector_embeds=__A ,activation_fn=__A ,num_embeds_ada_norm=__A ,) for _ in range(2 ) ] ) # Variables that can be set by a pipeline: # The ratio of transformer1 to transformer2's output states to be combined during inference _lowercase = 0.5 # The shape of `encoder_hidden_states` is expected to be # `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)` _lowercase = [77, 257] # Which transformer to use to encode which condition. # E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])` _lowercase = [1, 0] def __UpperCAmelCase ( self : Union[str, Any] ,__A : Union[str, Any] ,__A : Tuple ,__A : Any=None ,__A : Optional[int]=None ,__A : Tuple=None ,__A : bool = True ,) -> Optional[Any]: _lowercase = hidden_states _lowercase = [] _lowercase = 0 # attention_mask is not used yet for i in range(2 ): # for each of the two transformers, pass the corresponding condition tokens _lowercase = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]] _lowercase = self.transformer_index_for_condition[i] _lowercase = self.transformers[transformer_index]( __A ,encoder_hidden_states=__A ,timestep=__A ,cross_attention_kwargs=__A ,return_dict=__A ,)[0] encoded_states.append(encoded_state - input_states ) tokens_start += self.condition_lengths[i] _lowercase = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio) _lowercase = output_states + input_states if not return_dict: return (output_states,) return TransformeraDModelOutput(sample=__A )
67
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_bart import BartTokenizer __magic_name__ = logging.get_logger(__name__) __magic_name__ = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'} # See all BART models at https://huggingface.co/models?filter=bart __magic_name__ = { 'vocab_file': { 'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/vocab.json', 'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/vocab.json', 'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json', 'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json', 'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json', 'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json', }, 'merges_file': { 'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/merges.txt', 'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/merges.txt', 'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt', 'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt', 'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt', 'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt', }, 'tokenizer_file': { 'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json', 'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json', 'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json', 'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json', 'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json', 'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json', }, } __magic_name__ = { 'facebook/bart-base': 1_024, 'facebook/bart-large': 1_024, 'facebook/bart-large-mnli': 1_024, 'facebook/bart-large-cnn': 1_024, 'facebook/bart-large-xsum': 1_024, 'yjernite/bart_eli5': 1_024, } class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' a_ = VOCAB_FILES_NAMES a_ = PRETRAINED_VOCAB_FILES_MAP a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a_ = ["""input_ids""", """attention_mask"""] a_ = BartTokenizer def __init__( self : str ,_a : Any=None ,_a : Optional[int]=None ,_a : int=None ,_a : Optional[int]="replace" ,_a : Dict="<s>" ,_a : Optional[Any]="</s>" ,_a : Dict="</s>" ,_a : Tuple="<s>" ,_a : Optional[Any]="<unk>" ,_a : List[str]="<pad>" ,_a : int="<mask>" ,_a : str=False ,_a : List[str]=True ,**_a : Dict ,): '''simple docstring''' super().__init__( _a ,_a ,tokenizer_file=_a ,errors=_a ,bos_token=_a ,eos_token=_a ,sep_token=_a ,cls_token=_a ,unk_token=_a ,pad_token=_a ,mask_token=_a ,add_prefix_space=_a ,trim_offsets=_a ,**_a ,) A_ : Dict = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get("""add_prefix_space""" ,_a ) != add_prefix_space: A_ : List[str] = getattr(_a ,pre_tok_state.pop("""type""" ) ) A_ : Optional[int] = add_prefix_space A_ : int = pre_tok_class(**_a ) A_ : str = add_prefix_space # the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__` A_ : str = """post_processor""" A_ : List[Any] = getattr(self.backend_tokenizer ,_a ,_a ) if tokenizer_component_instance: A_ : Tuple = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: A_ : Tuple = tuple(state["""sep"""] ) if "cls" in state: A_ : Tuple = tuple(state["""cls"""] ) A_ : List[str] = False if state.get("""add_prefix_space""" ,_a ) != add_prefix_space: A_ : Dict = add_prefix_space A_ : Any = True if state.get("""trim_offsets""" ,_a ) != trim_offsets: A_ : Union[str, Any] = trim_offsets A_ : List[Any] = True if changes_to_apply: A_ : Optional[int] = getattr(_a ,state.pop("""type""" ) ) A_ : Tuple = component_class(**_a ) setattr(self.backend_tokenizer ,_a ,_a ) @property def _a ( self : List[str] ): '''simple docstring''' if self._mask_token is None: if self.verbose: logger.error("""Using mask_token, but it is not set yet.""" ) return None return str(self._mask_token ) @mask_token.setter def _a ( self : Union[str, Any] ,_a : Any ): '''simple docstring''' A_ : int = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else value A_ : List[Any] = value def _a ( self : str ,*_a : str ,**_a : Optional[int] ): '''simple docstring''' A_ : Optional[Any] = kwargs.get("""is_split_into_words""" ,_a ) if is_split_into_words and not self.add_prefix_space: raise ValueError( f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True ' """to use it with pretokenized inputs.""" ) return super()._batch_encode_plus(*_a ,**_a ) def _a ( self : str ,*_a : List[Any] ,**_a : str ): '''simple docstring''' A_ : List[str] = kwargs.get("""is_split_into_words""" ,_a ) if is_split_into_words and not self.add_prefix_space: raise ValueError( f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True ' """to use it with pretokenized inputs.""" ) return super()._encode_plus(*_a ,**_a ) def _a ( self : Optional[int] ,_a : str ,_a : Optional[str] = None ): '''simple docstring''' A_ : str = self._tokenizer.model.save(_a ,name=_a ) return tuple(_a ) def _a ( self : str ,_a : Optional[int] ,_a : int=None ): '''simple docstring''' A_ : Optional[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def _a ( self : Optional[int] ,_a : List[int] ,_a : Optional[List[int]] = None ): '''simple docstring''' A_ : Dict = [self.sep_token_id] A_ : Any = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
665
0
from __future__ import annotations import collections import tempfile import unittest import numpy as np from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import is_tf_available, is_vision_available from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask from ..bert.test_modeling_tf_bert import TFBertModelTester from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester from ..deit.test_modeling_tf_deit import TFDeiTModelTester from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester from ..vit.test_modeling_tf_vit import TFViTModelTester if is_tf_available(): from transformers import ( TFBertModel, TFCLIPVisionModel, TFDeiTModel, TFRobertaModel, TFVisionTextDualEncoderModel, TFViTModel, VisionTextDualEncoderConfig, ) if is_vision_available(): from PIL import Image from transformers import VisionTextDualEncoderProcessor def lowercase__ ( A_: Dict ) -> str: """simple docstring""" if isinstance(A_ , collections.abc.Iterable ): return x return (x, x) @require_tf class _A : """simple docstring""" def _a ( self : Dict , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : int ) -> List[str]: pass def _a ( self : Optional[int] ) -> Tuple: pass def _a ( self : int ) -> str: pass def _a ( self : Tuple , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : str=None , **__SCREAMING_SNAKE_CASE : List[Any] ) -> List[Any]: __UpperCAmelCase =VisionTextDualEncoderConfig.from_vision_text_configs(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) __UpperCAmelCase =TFVisionTextDualEncoderModel(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =model(input_ids=__SCREAMING_SNAKE_CASE , pixel_values=__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE ) self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], config.projection_dim) ) self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], config.projection_dim) ) def _a ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : str=None , **__SCREAMING_SNAKE_CASE : List[Any] ) -> List[str]: __UpperCAmelCase , __UpperCAmelCase =self.get_vision_text_model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) __UpperCAmelCase =TFVisionTextDualEncoderModel(vision_model=__SCREAMING_SNAKE_CASE , text_model=__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =model(input_ids=__SCREAMING_SNAKE_CASE , pixel_values=__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE ) self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim) ) self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim) ) def _a ( self : Any , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : List[str]=None , **__SCREAMING_SNAKE_CASE : Optional[int] ) -> Union[str, Any]: __UpperCAmelCase , __UpperCAmelCase =self.get_vision_text_model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) __UpperCAmelCase ={"""vision_model""": vision_model, """text_model""": text_model} __UpperCAmelCase =TFVisionTextDualEncoderModel.from_vision_text_pretrained(**__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =model(input_ids=__SCREAMING_SNAKE_CASE , pixel_values=__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE ) self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim) ) self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim) ) def _a ( self : List[str] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[int]=None , **__SCREAMING_SNAKE_CASE : int ) -> Tuple: __UpperCAmelCase , __UpperCAmelCase =self.get_vision_text_model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) __UpperCAmelCase =TFVisionTextDualEncoderModel(vision_model=__SCREAMING_SNAKE_CASE , text_model=__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =model(input_ids=__SCREAMING_SNAKE_CASE , pixel_values=__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =output[0].numpy() with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =TFVisionTextDualEncoderModel.from_pretrained(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =model(input_ids=__SCREAMING_SNAKE_CASE , pixel_values=__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =after_output[0].numpy() __UpperCAmelCase =np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(__SCREAMING_SNAKE_CASE , 1e-5 ) def _a ( self : Tuple , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Union[str, Any]=None , **__SCREAMING_SNAKE_CASE : Optional[Any] ) -> Optional[int]: __UpperCAmelCase , __UpperCAmelCase =self.get_vision_text_model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) __UpperCAmelCase =TFVisionTextDualEncoderModel(vision_model=__SCREAMING_SNAKE_CASE , text_model=__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =model( input_ids=__SCREAMING_SNAKE_CASE , pixel_values=__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , output_attentions=__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =output.vision_model_output.attentions self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , vision_config.num_hidden_layers ) # in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token) __UpperCAmelCase =to_atuple(vision_model.config.image_size ) __UpperCAmelCase =to_atuple(vision_model.config.patch_size ) __UpperCAmelCase =(image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) __UpperCAmelCase =num_patches + 1 self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) ) __UpperCAmelCase =output.text_model_output.attentions self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , text_config.num_hidden_layers ) self.assertEqual( text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , ) def _a ( self : Tuple , __SCREAMING_SNAKE_CASE : np.ndarray , __SCREAMING_SNAKE_CASE : np.ndarray , __SCREAMING_SNAKE_CASE : float ) -> Tuple: __UpperCAmelCase =np.abs((a - b) ).max() self.assertLessEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , f'''Difference between torch and flax is {diff} (>= {tol}).''' ) def _a ( self : List[Any] ) -> Optional[int]: __UpperCAmelCase =self.prepare_config_and_inputs() self.check_vision_text_dual_encoder_model(**__SCREAMING_SNAKE_CASE ) def _a ( self : Union[str, Any] ) -> int: __UpperCAmelCase =self.prepare_config_and_inputs() self.check_model_from_pretrained_configs(**__SCREAMING_SNAKE_CASE ) def _a ( self : List[Any] ) -> Any: __UpperCAmelCase =self.prepare_config_and_inputs() self.check_vision_text_dual_encoder_from_pretrained(**__SCREAMING_SNAKE_CASE ) def _a ( self : List[Any] ) -> Dict: __UpperCAmelCase =self.prepare_config_and_inputs() self.check_save_load(**__SCREAMING_SNAKE_CASE ) def _a ( self : Any ) -> Dict: __UpperCAmelCase =self.prepare_config_and_inputs() self.check_vision_text_output_attention(**__SCREAMING_SNAKE_CASE ) @slow def _a ( self : Any ) -> Optional[Any]: __UpperCAmelCase , __UpperCAmelCase =self.get_pretrained_model_and_inputs() __UpperCAmelCase =model_a(**__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =outputs[0].numpy() with tempfile.TemporaryDirectory() as tmp_dirname: model_a.save_pretrained(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =TFVisionTextDualEncoderModel.from_pretrained(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =model_a(**__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =after_outputs[0].numpy() __UpperCAmelCase =np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(__SCREAMING_SNAKE_CASE , 1e-5 ) @require_tf class _A ( UpperCamelCase , unittest.TestCase ): """simple docstring""" def _a ( self : str ) -> List[Any]: __UpperCAmelCase =TFVisionTextDualEncoderModel.from_vision_text_pretrained( """hf-internal-testing/tiny-random-vit""" , """hf-internal-testing/tiny-random-bert""" ) __UpperCAmelCase =13 __UpperCAmelCase =floats_tensor( [ batch_size, model.vision_model.config.num_channels, model.vision_model.config.image_size, model.vision_model.config.image_size, ] ) __UpperCAmelCase =ids_tensor([batch_size, 4] , model.text_model.config.vocab_size ) __UpperCAmelCase =random_attention_mask([batch_size, 4] ) __UpperCAmelCase ={"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask} return model, inputs def _a ( self : int , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Any ) -> Optional[Any]: __UpperCAmelCase =TFViTModel(__SCREAMING_SNAKE_CASE , name="""vision_model""" ) __UpperCAmelCase =TFBertModel(__SCREAMING_SNAKE_CASE , name="""text_model""" ) return vision_model, text_model def _a ( self : Union[str, Any] ) -> Tuple: __UpperCAmelCase =TFViTModelTester(self ) __UpperCAmelCase =TFBertModelTester(self ) __UpperCAmelCase =vit_model_tester.prepare_config_and_inputs() __UpperCAmelCase =bert_model_tester.prepare_config_and_inputs() __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =vision_config_and_inputs ( ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ) =text_config_and_inputs return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": input_mask, "input_ids": input_ids, "text_token_type_ids": token_type_ids, "text_sequence_labels": sequence_labels, "text_token_labels": token_labels, "text_choice_labels": choice_labels, } @require_tf class _A ( UpperCamelCase , unittest.TestCase ): """simple docstring""" def _a ( self : Optional[Any] ) -> int: # DeiT repo doesn't have TF weights, but we don't actually use the weights at all so let's # just reinitialize it. __UpperCAmelCase =TFVisionTextDualEncoderModel.from_vision_text_pretrained( """Rocketknight1/tiny-random-deit-tf""" , """hf-internal-testing/tiny-random-roberta""" ) __UpperCAmelCase =13 __UpperCAmelCase =floats_tensor( [ batch_size, model.vision_model.config.num_channels, model.vision_model.config.image_size, model.vision_model.config.image_size, ] ) __UpperCAmelCase =ids_tensor([batch_size, 4] , model.text_model.config.vocab_size ) __UpperCAmelCase =random_attention_mask([batch_size, 4] ) __UpperCAmelCase ={"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask} return model, inputs def _a ( self : List[str] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Dict=None , **__SCREAMING_SNAKE_CASE : Any ) -> Dict: __UpperCAmelCase , __UpperCAmelCase =self.get_vision_text_model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) __UpperCAmelCase =TFVisionTextDualEncoderModel(vision_model=__SCREAMING_SNAKE_CASE , text_model=__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =model( input_ids=__SCREAMING_SNAKE_CASE , pixel_values=__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , output_attentions=__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =output.vision_model_output.attentions self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , vision_config.num_hidden_layers ) # in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens) __UpperCAmelCase =to_atuple(vision_model.config.image_size ) __UpperCAmelCase =to_atuple(vision_model.config.patch_size ) __UpperCAmelCase =(image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) __UpperCAmelCase =num_patches + 2 self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) ) __UpperCAmelCase =output.text_model_output.attentions self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , text_config.num_hidden_layers ) self.assertEqual( text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , ) def _a ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Optional[int] ) -> Any: __UpperCAmelCase =TFDeiTModel(__SCREAMING_SNAKE_CASE , name="""vision_model""" ) __UpperCAmelCase =TFRobertaModel(__SCREAMING_SNAKE_CASE , name="""text_model""" ) return vision_model, text_model def _a ( self : Any ) -> Union[str, Any]: __UpperCAmelCase =TFDeiTModelTester(self ) __UpperCAmelCase =TFRobertaModelTester(self ) __UpperCAmelCase =vit_model_tester.prepare_config_and_inputs() __UpperCAmelCase =bert_model_tester.prepare_config_and_inputs() __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =vision_config_and_inputs ( ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ) =text_config_and_inputs return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": input_mask, "input_ids": input_ids, "text_token_type_ids": token_type_ids, "text_sequence_labels": sequence_labels, "text_token_labels": token_labels, "text_choice_labels": choice_labels, } @require_tf class _A ( UpperCamelCase , unittest.TestCase ): """simple docstring""" def _a ( self : Any ) -> Optional[Any]: __UpperCAmelCase =TFVisionTextDualEncoderModel.from_vision_text_pretrained( """Rocketknight1/tiny-random-clip-tf""" , """hf-internal-testing/tiny-random-bert""" ) __UpperCAmelCase =13 __UpperCAmelCase =floats_tensor( [ batch_size, model.vision_model.config.num_channels, model.vision_model.config.image_size, model.vision_model.config.image_size, ] ) __UpperCAmelCase =ids_tensor([batch_size, 4] , model.text_model.config.vocab_size ) __UpperCAmelCase =random_attention_mask([batch_size, 4] ) __UpperCAmelCase ={"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask} return model, inputs def _a ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : int ) -> Tuple: __UpperCAmelCase =TFCLIPVisionModel(__SCREAMING_SNAKE_CASE , name="""vision_model""" ) __UpperCAmelCase =TFBertModel(__SCREAMING_SNAKE_CASE , name="""text_model""" ) return vision_model, text_model def _a ( self : int ) -> Tuple: __UpperCAmelCase =TFCLIPVisionModelTester(self ) __UpperCAmelCase =TFBertModelTester(self ) __UpperCAmelCase =clip_model_tester.prepare_config_and_inputs() __UpperCAmelCase =bert_model_tester.prepare_config_and_inputs() __UpperCAmelCase , __UpperCAmelCase =vision_config_and_inputs ( ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ( __UpperCAmelCase ) , ) =text_config_and_inputs return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": input_mask, "input_ids": input_ids, "text_token_type_ids": token_type_ids, "text_sequence_labels": sequence_labels, "text_token_labels": token_labels, "text_choice_labels": choice_labels, } @require_vision @require_tf class _A ( unittest.TestCase ): """simple docstring""" @slow def _a ( self : Optional[Any] ) -> Optional[int]: __UpperCAmelCase =TFVisionTextDualEncoderModel.from_pretrained( """clip-italian/clip-italian""" , logit_scale_init_value=1.0 , from_pt=__SCREAMING_SNAKE_CASE ) __UpperCAmelCase =VisionTextDualEncoderProcessor.from_pretrained("""clip-italian/clip-italian""" ) __UpperCAmelCase =Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) __UpperCAmelCase =processor( text=["""una foto di un gatto""", """una foto di un cane"""] , images=__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , return_tensors="""np""" ) __UpperCAmelCase =model(**__SCREAMING_SNAKE_CASE ) # verify the logits self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) ) self.assertEqual( outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , ) __UpperCAmelCase =np.array([[1.2_284_727, 0.3_104_122]] ) self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , __SCREAMING_SNAKE_CASE , atol=1e-3 ) )
68
'''simple docstring''' import argparse from transformers import ( TapasConfig, TapasForMaskedLM, TapasForQuestionAnswering, TapasForSequenceClassification, TapasModel, TapasTokenizer, load_tf_weights_in_tapas, ) from transformers.utils import logging logging.set_verbosity_info() def lowerCamelCase ( lowerCamelCase : Optional[Any] , lowerCamelCase : Any , lowerCamelCase : Union[str, Any] , lowerCamelCase : Tuple , lowerCamelCase : str): # Initialise PyTorch model. # If you want to convert a checkpoint that uses absolute position embeddings, make sure to set reset_position_index_per_cell of # TapasConfig to False. # initialize configuration from json file A_ : int = TapasConfig.from_json_file(lowerCamelCase) # set absolute/relative position embeddings parameter A_ : List[Any] = reset_position_index_per_cell # set remaining parameters of TapasConfig as well as the model based on the task if task == "SQA": A_ : Optional[int] = TapasForQuestionAnswering(config=lowerCamelCase) elif task == "WTQ": # run_task_main.py hparams A_ : Tuple = 4 A_ : Optional[Any] = True # hparam_utils.py hparams A_ : Any = 0.66_4694 A_ : str = 0.20_7951 A_ : Any = 0.12_1194 A_ : str = True A_ : Dict = True A_ : int = False A_ : int = 0.035_2513 A_ : Tuple = TapasForQuestionAnswering(config=lowerCamelCase) elif task == "WIKISQL_SUPERVISED": # run_task_main.py hparams A_ : int = 4 A_ : Union[str, Any] = False # hparam_utils.py hparams A_ : Dict = 36.4519 A_ : List[Any] = 0.90_3421 A_ : Any = 222.088 A_ : Optional[Any] = True A_ : Optional[int] = True A_ : Optional[Any] = True A_ : Optional[int] = 0.76_3141 A_ : Any = TapasForQuestionAnswering(config=lowerCamelCase) elif task == "TABFACT": A_ : Any = TapasForSequenceClassification(config=lowerCamelCase) elif task == "MLM": A_ : List[Any] = TapasForMaskedLM(config=lowerCamelCase) elif task == "INTERMEDIATE_PRETRAINING": A_ : Union[str, Any] = TapasModel(config=lowerCamelCase) else: raise ValueError(F'Task {task} not supported.') print(F'Building PyTorch model from configuration: {config}') # Load weights from tf checkpoint load_tf_weights_in_tapas(lowerCamelCase , lowerCamelCase , lowerCamelCase) # Save pytorch-model (weights and configuration) print(F'Save PyTorch model to {pytorch_dump_path}') model.save_pretrained(lowerCamelCase) # Save tokenizer files print(F'Save tokenizer files to {pytorch_dump_path}') A_ : Optional[Any] = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + """vocab.txt""" , model_max_length=512) tokenizer.save_pretrained(lowerCamelCase) print("""Used relative position embeddings:""" , model.config.reset_position_index_per_cell) if __name__ == "__main__": __magic_name__ = argparse.ArgumentParser() # Required parameters parser.add_argument( '--task', default='SQA', type=str, help='Model task for which to convert a checkpoint. Defaults to SQA.' ) parser.add_argument( '--reset_position_index_per_cell', default=False, action='store_true', help='Whether to use relative position embeddings or not. Defaults to True.', ) parser.add_argument( '--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.' ) parser.add_argument( '--tapas_config_file', default=None, type=str, required=True, help=( 'The config json file corresponding to the pre-trained TAPAS model. \n' 'This specifies the model architecture.' ), ) parser.add_argument( '--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) __magic_name__ = parser.parse_args() convert_tf_checkpoint_to_pytorch( args.task, args.reset_position_index_per_cell, args.tf_checkpoint_path, args.tapas_config_file, args.pytorch_dump_path, )
665
0
'''simple docstring''' import unittest from transformers import GPTSwaTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin a : List[Any] = get_tests_dir('''fixtures/test_sentencepiece_with_bytefallback.model''') @require_sentencepiece @require_tokenizers class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase , unittest.TestCase ): __SCREAMING_SNAKE_CASE = GPTSwaTokenizer __SCREAMING_SNAKE_CASE = False __SCREAMING_SNAKE_CASE = True __SCREAMING_SNAKE_CASE = False def A ( self : int ): """simple docstring""" super().setUp() # We have a SentencePiece fixture for testing __snake_case = GPTSwaTokenizer(a_ , eos_token="<unk>" , bos_token="<unk>" , pad_token="<unk>" ) tokenizer.save_pretrained(self.tmpdirname ) def A ( self : str , a_ : List[Any] ): """simple docstring""" __snake_case = "This is a test" __snake_case = "This is a test" return input_text, output_text def A ( self : Union[str, Any] ): """simple docstring""" __snake_case = "<s>" __snake_case = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(a_ ) , a_ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(a_ ) , a_ ) def A ( self : Tuple ): """simple docstring""" __snake_case = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "<unk>" ) self.assertEqual(vocab_keys[1] , "<s>" ) self.assertEqual(vocab_keys[-1] , "j" ) self.assertEqual(len(a_ ) , 2_000 ) def A ( self : Optional[int] ): """simple docstring""" self.assertEqual(self.get_tokenizer().vocab_size , 2_000 ) def A ( self : Dict ): """simple docstring""" __snake_case = GPTSwaTokenizer(a_ ) __snake_case = tokenizer.tokenize("This is a test" ) self.assertListEqual(a_ , ["▁This", "▁is", "▁a", "▁t", "est"] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(a_ ) , [465, 287, 265, 631, 842] ) __snake_case = tokenizer.tokenize("I was born in 92000, and this is falsé." ) # fmt: off self.assertListEqual( a_ , ["▁I", "▁was", "▁bor", "n", "▁in", "▁", "<0x39>", "2", "0", "0", "0", ",", "▁and", "▁this", "▁is", "▁f", "al", "s", "<0xC3>", "<0xA9>", "."] , ) # fmt: on __snake_case = tokenizer.convert_tokens_to_ids(a_ ) self.assertListEqual( a_ , [262, 272, 1_525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260] , ) __snake_case = tokenizer.convert_ids_to_tokens(a_ ) # fmt: off self.assertListEqual( a_ , ["▁I", "▁was", "▁bor", "n", "▁in", "▁", "<0x39>", "2", "0", "0", "0", ",", "▁and", "▁this", "▁is", "▁f", "al", "s", "<0xC3>", "<0xA9>", "."] ) # fmt: on def A ( self : List[str] ): """simple docstring""" __snake_case = GPTSwaTokenizer(a_ ) __snake_case = ["This is a test", "I was born in 92000, and this is falsé."] __snake_case = [ [465, 287, 265, 631, 842], [262, 272, 1_525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260], ] # Test that encode_fast returns the same as tokenize + convert_tokens_to_ids for text, expected_ids in zip(a_ , a_ ): self.assertListEqual(tokenizer.encode_fast(a_ ) , a_ ) # Test that decode_fast returns the input text for text, token_ids in zip(a_ , a_ ): self.assertEqual(tokenizer.decode_fast(a_ ) , a_ ) @slow def A ( self : Any ): """simple docstring""" __snake_case = [ "<|python|>def fibonacci(n)\n if n < 0:\n print('Incorrect input')", "Hey there, how are you doing this fine day?", "This is a text with a trailing spaces followed by a dot .", "Häj sväjs lillebrör! =)", "Det är inget fel på Mr. Cool", ] # fmt: off __snake_case = {"input_ids": [[63_423, 5, 6_811, 14_954, 282, 816, 3_821, 63_466, 63_425, 63_462, 18, 63_978, 678, 301, 1_320, 63_423, 63_455, 63_458, 18, 63_982, 4_246, 3_940, 1_901, 47_789, 5_547, 18_994], [19_630, 1_100, 63_446, 1_342, 633, 544, 4_488, 593, 5_102, 2_416, 63_495, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1_652, 428, 268, 1_936, 515, 268, 58_593, 22_413, 9_106, 546, 268, 33_213, 63_979, 698, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [55_130, 63_450, 924, 63_449, 2_249, 4_062, 1_558, 318, 63_504, 21_498, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [509, 377, 2_827, 2_559, 332, 6_575, 63_443, 26_801, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # fmt: on self.tokenizer_integration_test_util( expected_encoding=a_ , model_name="AI-Sweden/gpt-sw3-126m" , sequences=a_ , )
69
'''simple docstring''' from math import acos, sin from typing import List, Tuple, Union import numpy as np import torch from PIL import Image from ...models import AutoencoderKL, UNetaDConditionModel from ...schedulers import DDIMScheduler, DDPMScheduler from ...utils import randn_tensor from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput from .mel import Mel class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' a_ = ["""vqvae"""] def __init__( self : Optional[Any] ,_a : AutoencoderKL ,_a : UNetaDConditionModel ,_a : Mel ,_a : Union[DDIMScheduler, DDPMScheduler] ,): '''simple docstring''' super().__init__() self.register_modules(unet=_a ,scheduler=_a ,mel=_a ,vqvae=_a ) def _a ( self : str ): '''simple docstring''' return 50 if isinstance(self.scheduler ,_a ) else 1000 @torch.no_grad() def __call__( self : Optional[int] ,_a : int = 1 ,_a : str = None ,_a : np.ndarray = None ,_a : int = 0 ,_a : int = 0 ,_a : int = None ,_a : torch.Generator = None ,_a : float = 0 ,_a : float = 0 ,_a : torch.Generator = None ,_a : float = 0 ,_a : torch.Tensor = None ,_a : torch.Tensor = None ,_a : int=True ,): '''simple docstring''' A_ : List[str] = steps or self.get_default_steps() self.scheduler.set_timesteps(_a ) A_ : Union[str, Any] = step_generator or generator # For backwards compatibility if type(self.unet.config.sample_size ) == int: A_ : Tuple = (self.unet.config.sample_size, self.unet.config.sample_size) if noise is None: A_ : int = randn_tensor( ( batch_size, self.unet.config.in_channels, self.unet.config.sample_size[0], self.unet.config.sample_size[1], ) ,generator=_a ,device=self.device ,) A_ : List[Any] = noise A_ : str = None if audio_file is not None or raw_audio is not None: self.mel.load_audio(_a ,_a ) A_ : Any = self.mel.audio_slice_to_image(_a ) A_ : Union[str, Any] = np.frombuffer(input_image.tobytes() ,dtype="""uint8""" ).reshape( (input_image.height, input_image.width) ) A_ : Optional[Any] = (input_image / 255) * 2 - 1 A_ : Union[str, Any] = torch.tensor(input_image[np.newaxis, :, :] ,dtype=torch.float ).to(self.device ) if self.vqvae is not None: A_ : Union[str, Any] = self.vqvae.encode(torch.unsqueeze(_a ,0 ) ).latent_dist.sample( generator=_a )[0] A_ : List[str] = self.vqvae.config.scaling_factor * input_images if start_step > 0: A_ : Any = self.scheduler.add_noise(_a ,_a ,self.scheduler.timesteps[start_step - 1] ) A_ : Tuple = ( self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length ) A_ : Tuple = int(mask_start_secs * pixels_per_second ) A_ : str = int(mask_end_secs * pixels_per_second ) A_ : int = self.scheduler.add_noise(_a ,_a ,torch.tensor(self.scheduler.timesteps[start_step:] ) ) for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ): if isinstance(self.unet ,_a ): A_ : Optional[Any] = self.unet(_a ,_a ,_a )["""sample"""] else: A_ : List[Any] = self.unet(_a ,_a )["""sample"""] if isinstance(self.scheduler ,_a ): A_ : Dict = self.scheduler.step( model_output=_a ,timestep=_a ,sample=_a ,eta=_a ,generator=_a ,)["""prev_sample"""] else: A_ : Any = self.scheduler.step( model_output=_a ,timestep=_a ,sample=_a ,generator=_a ,)["""prev_sample"""] if mask is not None: if mask_start > 0: A_ : Tuple = mask[:, step, :, :mask_start] if mask_end > 0: A_ : List[str] = mask[:, step, :, -mask_end:] if self.vqvae is not None: # 0.18215 was scaling factor used in training to ensure unit variance A_ : str = 1 / self.vqvae.config.scaling_factor * images A_ : Union[str, Any] = self.vqvae.decode(_a )["""sample"""] A_ : int = (images / 2 + 0.5).clamp(0 ,1 ) A_ : str = images.cpu().permute(0 ,2 ,3 ,1 ).numpy() A_ : Optional[int] = (images * 255).round().astype("""uint8""" ) A_ : List[Any] = list( (Image.fromarray(_[:, :, 0] ) for _ in images) if images.shape[3] == 1 else (Image.fromarray(_a ,mode="""RGB""" ).convert("""L""" ) for _ in images) ) A_ : Tuple = [self.mel.image_to_audio(_a ) for _ in images] if not return_dict: return images, (self.mel.get_sample_rate(), audios) return BaseOutput(**AudioPipelineOutput(np.array(_a )[:, np.newaxis, :] ) ,**ImagePipelineOutput(_a ) ) @torch.no_grad() def _a ( self : Union[str, Any] ,_a : List[Image.Image] ,_a : int = 50 ): '''simple docstring''' assert isinstance(self.scheduler ,_a ) self.scheduler.set_timesteps(_a ) A_ : Optional[Any] = np.array( [np.frombuffer(image.tobytes() ,dtype="""uint8""" ).reshape((1, image.height, image.width) ) for image in images] ) A_ : List[str] = (sample / 255) * 2 - 1 A_ : Optional[int] = torch.Tensor(_a ).to(self.device ) for t in self.progress_bar(torch.flip(self.scheduler.timesteps ,(0,) ) ): A_ : List[str] = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps A_ : Any = self.scheduler.alphas_cumprod[t] A_ : List[Any] = ( self.scheduler.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.scheduler.final_alpha_cumprod ) A_ : str = 1 - alpha_prod_t A_ : List[str] = self.unet(_a ,_a )["""sample"""] A_ : str = (1 - alpha_prod_t_prev) ** 0.5 * model_output A_ : Union[str, Any] = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5) A_ : Optional[int] = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output return sample @staticmethod def _a ( _a : torch.Tensor ,_a : torch.Tensor ,_a : float ): '''simple docstring''' A_ : List[Any] = acos(torch.dot(torch.flatten(_a ) ,torch.flatten(_a ) ) / torch.norm(_a ) / torch.norm(_a ) ) return sin((1 - alpha) * theta ) * xa / sin(_a ) + sin(alpha * theta ) * xa / sin(_a )
665
0
import os import unittest from transformers import BatchEncoding from transformers.models.bert.tokenization_bert import ( BasicTokenizer, WordpieceTokenizer, _is_control, _is_punctuation, _is_whitespace, ) from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer from transformers.testing_utils import require_torch, slow from ...test_tokenization_common import TokenizerTesterMixin class A( UpperCamelCase , unittest.TestCase ): '''simple docstring''' UpperCamelCase = ProphetNetTokenizer UpperCamelCase = False def a__ ( self : List[Any] ) -> List[str]: """simple docstring""" super().setUp() lowerCamelCase_ = [ '[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing', ',', 'low', 'lowest', ] lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) ) def a__ ( self : Union[str, Any] , A_ : str ) -> List[Any]: """simple docstring""" lowerCamelCase_ = 'UNwant\u00E9d,running' lowerCamelCase_ = 'unwanted, running' return input_text, output_text def a__ ( self : Optional[int] ) -> int: """simple docstring""" lowerCamelCase_ = self.tokenizer_class(self.vocab_file ) lowerCamelCase_ = tokenizer.tokenize('UNwant\u00E9d,running' ) self.assertListEqual(A_ , ['un', '##want', '##ed', ',', 'runn', '##ing'] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) , [9, 6, 7, 12, 10, 11] ) def a__ ( self : List[str] ) -> Optional[Any]: """simple docstring""" lowerCamelCase_ = BasicTokenizer() self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) , ['ah', '\u535A', '\u63A8', 'zz'] ) def a__ ( self : List[Any] ) -> Dict: """simple docstring""" lowerCamelCase_ = BasicTokenizer(do_lower_case=A_ ) self.assertListEqual( tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] ) self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] ) def a__ ( self : Optional[Any] ) -> Optional[int]: """simple docstring""" lowerCamelCase_ = BasicTokenizer(do_lower_case=A_ , strip_accents=A_ ) self.assertListEqual( tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hällo', '!', 'how', 'are', 'you', '?'] ) self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['h\u00E9llo'] ) def a__ ( self : List[str] ) -> Optional[int]: """simple docstring""" lowerCamelCase_ = BasicTokenizer(do_lower_case=A_ , strip_accents=A_ ) self.assertListEqual( tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] ) self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] ) def a__ ( self : Union[str, Any] ) -> Tuple: """simple docstring""" lowerCamelCase_ = BasicTokenizer(do_lower_case=A_ ) self.assertListEqual( tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] ) self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] ) def a__ ( self : Tuple ) -> Any: """simple docstring""" lowerCamelCase_ = BasicTokenizer(do_lower_case=A_ ) self.assertListEqual( tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] ) def a__ ( self : Union[str, Any] ) -> Dict: """simple docstring""" lowerCamelCase_ = BasicTokenizer(do_lower_case=A_ , strip_accents=A_ ) self.assertListEqual( tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HäLLo', '!', 'how', 'Are', 'yoU', '?'] ) def a__ ( self : Union[str, Any] ) -> Optional[int]: """simple docstring""" lowerCamelCase_ = BasicTokenizer(do_lower_case=A_ , strip_accents=A_ ) self.assertListEqual( tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HaLLo', '!', 'how', 'Are', 'yoU', '?'] ) def a__ ( self : Union[str, Any] ) -> int: """simple docstring""" lowerCamelCase_ = BasicTokenizer(do_lower_case=A_ , never_split=['[UNK]'] ) self.assertListEqual( tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] ) def a__ ( self : List[str] ) -> Dict: """simple docstring""" lowerCamelCase_ = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing'] lowerCamelCase_ = {} for i, token in enumerate(A_ ): lowerCamelCase_ = i lowerCamelCase_ = WordpieceTokenizer(vocab=A_ , unk_token='[UNK]' ) self.assertListEqual(tokenizer.tokenize('' ) , [] ) self.assertListEqual(tokenizer.tokenize('unwanted running' ) , ['un', '##want', '##ed', 'runn', '##ing'] ) self.assertListEqual(tokenizer.tokenize('unwantedX running' ) , ['[UNK]', 'runn', '##ing'] ) @require_torch def a__ ( self : Union[str, Any] ) -> Dict: """simple docstring""" lowerCamelCase_ = self.tokenizer_class.from_pretrained('microsoft/prophetnet-large-uncased' ) lowerCamelCase_ = ['A long paragraph for summarization.', 'Another paragraph for summarization.'] lowerCamelCase_ = [1037, 2146, 20423, 2005, 7680, 7849, 3989, 1012, 102] lowerCamelCase_ = tokenizer(A_ , padding=A_ , return_tensors='pt' ) self.assertIsInstance(A_ , A_ ) lowerCamelCase_ = list(batch.input_ids.numpy()[0] ) self.assertListEqual(A_ , A_ ) self.assertEqual((2, 9) , batch.input_ids.shape ) self.assertEqual((2, 9) , batch.attention_mask.shape ) def a__ ( self : int ) -> Union[str, Any]: """simple docstring""" self.assertTrue(_is_whitespace(' ' ) ) self.assertTrue(_is_whitespace('\t' ) ) self.assertTrue(_is_whitespace('\r' ) ) self.assertTrue(_is_whitespace('\n' ) ) self.assertTrue(_is_whitespace('\u00A0' ) ) self.assertFalse(_is_whitespace('A' ) ) self.assertFalse(_is_whitespace('-' ) ) def a__ ( self : List[str] ) -> Union[str, Any]: """simple docstring""" self.assertTrue(_is_control('\u0005' ) ) self.assertFalse(_is_control('A' ) ) self.assertFalse(_is_control(' ' ) ) self.assertFalse(_is_control('\t' ) ) self.assertFalse(_is_control('\r' ) ) def a__ ( self : int ) -> str: """simple docstring""" self.assertTrue(_is_punctuation('-' ) ) self.assertTrue(_is_punctuation('$' ) ) self.assertTrue(_is_punctuation('`' ) ) self.assertTrue(_is_punctuation('.' ) ) self.assertFalse(_is_punctuation('A' ) ) self.assertFalse(_is_punctuation(' ' ) ) @slow def a__ ( self : int ) -> int: """simple docstring""" lowerCamelCase_ = self.tokenizer_class.from_pretrained('microsoft/prophetnet-large-uncased' ) lowerCamelCase_ = tokenizer.encode('sequence builders' , add_special_tokens=A_ ) lowerCamelCase_ = tokenizer.encode('multi-sequence build' , add_special_tokens=A_ ) lowerCamelCase_ = tokenizer.build_inputs_with_special_tokens(A_ ) lowerCamelCase_ = tokenizer.build_inputs_with_special_tokens(A_ , A_ ) assert encoded_sentence == text + [102] assert encoded_pair == text + [102] + text_a + [102]
70
'''simple docstring''' import argparse import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## __magic_name__ = 16 __magic_name__ = 32 def lowerCamelCase ( lowerCamelCase : Accelerator , lowerCamelCase : int = 16): A_ : Any = AutoTokenizer.from_pretrained("""bert-base-cased""") A_ : str = load_dataset("""glue""" , """mrpc""") def tokenize_function(lowerCamelCase : Dict): # max_length=None => use the model max length (it's actually the default) A_ : List[str] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowerCamelCase , max_length=lowerCamelCase) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): A_ : Tuple = datasets.map( lowerCamelCase , batched=lowerCamelCase , remove_columns=["""idx""", """sentence1""", """sentence2"""] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library A_ : List[str] = tokenized_datasets.rename_column("""label""" , """labels""") def collate_fn(lowerCamelCase : Tuple): # On TPU it's best to pad everything to the same length or training will be very slow. A_ : str = 128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": A_ : List[Any] = 16 elif accelerator.mixed_precision != "no": A_ : Any = 8 else: A_ : Tuple = None return tokenizer.pad( lowerCamelCase , padding="""longest""" , max_length=lowerCamelCase , pad_to_multiple_of=lowerCamelCase , return_tensors="""pt""" , ) # Instantiate dataloaders. A_ : int = DataLoader( tokenized_datasets["""train"""] , shuffle=lowerCamelCase , collate_fn=lowerCamelCase , batch_size=lowerCamelCase , drop_last=lowerCamelCase) A_ : str = DataLoader( tokenized_datasets["""validation"""] , shuffle=lowerCamelCase , collate_fn=lowerCamelCase , batch_size=lowerCamelCase , drop_last=(accelerator.mixed_precision == """fp8""") , ) return train_dataloader, eval_dataloader def lowerCamelCase ( lowerCamelCase : Any , lowerCamelCase : Dict): # Initialize accelerator A_ : Tuple = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs A_ : List[Any] = config["""lr"""] A_ : List[Any] = int(config["""num_epochs"""]) A_ : int = int(config["""seed"""]) A_ : Dict = int(config["""batch_size"""]) A_ : Union[str, Any] = evaluate.load("""glue""" , """mrpc""") # If the batch size is too big we use gradient accumulation A_ : int = 1 if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU: A_ : Any = batch_size // MAX_GPU_BATCH_SIZE A_ : Union[str, Any] = MAX_GPU_BATCH_SIZE set_seed(lowerCamelCase) A_ , A_ : List[str] = get_dataloaders(lowerCamelCase , lowerCamelCase) # Instantiate the model (we build the model here so that the seed also control new weights initialization) A_ : Union[str, Any] = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=lowerCamelCase) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). A_ : str = model.to(accelerator.device) # Instantiate optimizer A_ : str = AdamW(params=model.parameters() , lr=lowerCamelCase) # Instantiate scheduler A_ : Tuple = get_linear_schedule_with_warmup( optimizer=lowerCamelCase , num_warmup_steps=100 , num_training_steps=(len(lowerCamelCase) * num_epochs) // gradient_accumulation_steps , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. A_ , A_ , A_ , A_ , A_ : Union[str, Any] = accelerator.prepare( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase) # Now we train the model for epoch in range(lowerCamelCase): model.train() for step, batch in enumerate(lowerCamelCase): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device) A_ : Optional[int] = model(**lowerCamelCase) A_ : List[Any] = outputs.loss A_ : Tuple = loss / gradient_accumulation_steps accelerator.backward(lowerCamelCase) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(lowerCamelCase): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device) with torch.no_grad(): A_ : Union[str, Any] = model(**lowerCamelCase) A_ : Any = outputs.logits.argmax(dim=-1) A_ , A_ : Tuple = accelerator.gather_for_metrics((predictions, batch["""labels"""])) metric.add_batch( predictions=lowerCamelCase , references=lowerCamelCase , ) A_ : int = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F'epoch {epoch}:' , lowerCamelCase) def lowerCamelCase ( ): A_ : Optional[int] = argparse.ArgumentParser(description="""Simple example of training script.""") parser.add_argument( """--mixed_precision""" , type=lowerCamelCase , default=lowerCamelCase , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose""" """between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.""" """and an Nvidia Ampere GPU.""" , ) parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""") A_ : Dict = parser.parse_args() A_ : Dict = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16} training_function(lowerCamelCase , lowerCamelCase) if __name__ == "__main__": main()
665
0
'''simple docstring''' import logging from pathlib import Path import numpy as np import pytorch_lightning as pl import torch from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint from pytorch_lightning.utilities import rank_zero_only from utils_rag import save_json def a__ ( _SCREAMING_SNAKE_CASE : int ) -> Optional[Any]: """simple docstring""" UpperCAmelCase_ : int = filter(lambda _SCREAMING_SNAKE_CASE : p.requires_grad , model.parameters() ) UpperCAmelCase_ : Union[str, Any] = sum([np.prod(p.size() ) for p in model_parameters] ) return params _lowerCamelCase = logging.getLogger(__name__) def a__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str ) -> str: """simple docstring""" if metric == "rouge2": UpperCAmelCase_ : int = "{val_avg_rouge2:.4f}-{step_count}" elif metric == "bleu": UpperCAmelCase_ : Dict = "{val_avg_bleu:.4f}-{step_count}" elif metric == "em": UpperCAmelCase_ : List[Any] = "{val_avg_em:.4f}-{step_count}" else: raise NotImplementedError( F'''seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this''' " function." ) UpperCAmelCase_ : Tuple = ModelCheckpoint( dirpath=_SCREAMING_SNAKE_CASE , filename=_SCREAMING_SNAKE_CASE , monitor=F'''val_{metric}''' , mode="max" , save_top_k=3 , every_n_epochs=1 , ) return checkpoint_callback def a__ ( _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Optional[Any] ) -> List[str]: """simple docstring""" return EarlyStopping( monitor=F'''val_{metric}''' , mode="min" if "loss" in metric else "max" , patience=_SCREAMING_SNAKE_CASE , verbose=_SCREAMING_SNAKE_CASE , ) class _snake_case (pl.Callback): def UpperCamelCase__ ( self ,_snake_case ,_snake_case ): UpperCAmelCase_ : Union[str, Any] = {f'''lr_group_{i}''': param["lr"] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )} pl_module.logger.log_metrics(_snake_case ) @rank_zero_only def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case ,_snake_case=True ): logger.info(f'''***** {type_path} results at step {trainer.global_step:05d} *****''' ) UpperCAmelCase_ : Optional[int] = trainer.callback_metrics trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["log", "progress_bar", "preds"]} ) # Log results UpperCAmelCase_ : Union[str, Any] = Path(pl_module.hparams.output_dir ) if type_path == "test": UpperCAmelCase_ : Optional[Any] = od / "test_results.txt" UpperCAmelCase_ : List[str] = od / "test_generations.txt" else: # this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json # If people want this it will be easy enough to add back. UpperCAmelCase_ : List[Any] = od / f'''{type_path}_results/{trainer.global_step:05d}.txt''' UpperCAmelCase_ : List[str] = od / f'''{type_path}_generations/{trainer.global_step:05d}.txt''' results_file.parent.mkdir(exist_ok=_snake_case ) generations_file.parent.mkdir(exist_ok=_snake_case ) with open(_snake_case ,"a+" ) as writer: for key in sorted(_snake_case ): if key in ["log", "progress_bar", "preds"]: continue UpperCAmelCase_ : Union[str, Any] = metrics[key] if isinstance(_snake_case ,torch.Tensor ): UpperCAmelCase_ : Optional[int] = val.item() UpperCAmelCase_ : Dict = f'''{key}: {val:.6f}\n''' writer.write(_snake_case ) if not save_generations: return if "preds" in metrics: UpperCAmelCase_ : Any = "\n".join(metrics["preds"] ) generations_file.open("w+" ).write(_snake_case ) @rank_zero_only def UpperCamelCase__ ( self ,_snake_case ,_snake_case ): try: UpperCAmelCase_ : List[Any] = pl_module.model.model.num_parameters() except AttributeError: UpperCAmelCase_ : Any = pl_module.model.num_parameters() UpperCAmelCase_ : Union[str, Any] = count_trainable_parameters(_snake_case ) # mp stands for million parameters trainer.logger.log_metrics({"n_params": npars, "mp": npars / 1E6, "grad_mp": n_trainable_pars / 1E6} ) @rank_zero_only def UpperCamelCase__ ( self ,_snake_case ,_snake_case ): save_json(pl_module.metrics ,pl_module.metrics_save_path ) return self._write_logs(_snake_case ,_snake_case ,"test" ) @rank_zero_only def UpperCamelCase__ ( self ,_snake_case ,_snake_case ): save_json(pl_module.metrics ,pl_module.metrics_save_path ) # Uncommenting this will save val generations # return self._write_logs(trainer, pl_module, "valid")
71
'''simple docstring''' import functools def lowerCamelCase ( lowerCamelCase : list[int] , lowerCamelCase : list[int]): # Validation if not isinstance(lowerCamelCase , lowerCamelCase) or not all(isinstance(lowerCamelCase , lowerCamelCase) for day in days): raise ValueError("""The parameter days should be a list of integers""") if len(lowerCamelCase) != 3 or not all(isinstance(lowerCamelCase , lowerCamelCase) for cost in costs): raise ValueError("""The parameter costs should be a list of three integers""") if len(lowerCamelCase) == 0: return 0 if min(lowerCamelCase) <= 0: raise ValueError("""All days elements should be greater than 0""") if max(lowerCamelCase) >= 366: raise ValueError("""All days elements should be less than 366""") A_ : Tuple = set(lowerCamelCase) @functools.cache def dynamic_programming(lowerCamelCase : int) -> int: if index > 365: return 0 if index not in days_set: return dynamic_programming(index + 1) return min( costs[0] + dynamic_programming(index + 1) , costs[1] + dynamic_programming(index + 7) , costs[2] + dynamic_programming(index + 30) , ) return dynamic_programming(1) if __name__ == "__main__": import doctest doctest.testmod()
665
0
'''simple docstring''' from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxSeqaSeqConfigWithPast from ...utils import logging _UpperCAmelCase : List[str] = logging.get_logger(__name__) _UpperCAmelCase : str = { '''google/umt5-small''': '''https://huggingface.co/google/umt5-small/resolve/main/config.json''', # See all umt5 models at https://huggingface.co/models?filter=umt5 } class __magic_name__ ( __SCREAMING_SNAKE_CASE ): UpperCamelCase__ = 'umt5' UpperCamelCase__ = ['past_key_values'] def __init__( self , snake_case_=25_01_12 , snake_case_=5_12 , snake_case_=64 , snake_case_=10_24 , snake_case_=8 , snake_case_=None , snake_case_=6 , snake_case_=32 , snake_case_=1_28 , snake_case_=0.1 , snake_case_=1E-6 , snake_case_=1.0 , snake_case_="gated-gelu" , snake_case_=True , snake_case_=True , snake_case_="T5Tokenizer" , snake_case_=True , snake_case_=0 , snake_case_=1 , snake_case_=0 , **snake_case_ , ): super().__init__( is_encoder_decoder=snake_case_ , tokenizer_class=snake_case_ , tie_word_embeddings=snake_case_ , pad_token_id=snake_case_ , eos_token_id=snake_case_ , decoder_start_token_id=snake_case_ , **snake_case_ , ) lowercase =vocab_size lowercase =d_model lowercase =d_kv lowercase =d_ff lowercase =num_layers lowercase =( num_decoder_layers if num_decoder_layers is not None else self.num_layers ) # default = symmetry lowercase =num_heads lowercase =relative_attention_num_buckets lowercase =relative_attention_max_distance lowercase =dropout_rate lowercase =layer_norm_epsilon lowercase =initializer_factor lowercase =feed_forward_proj lowercase =use_cache lowercase =self.feed_forward_proj.split('''-''' ) lowercase =act_info[-1] lowercase =act_info[0] == '''gated''' if len(snake_case_ ) > 1 and act_info[0] != "gated" or len(snake_case_ ) > 2: raise ValueError( f'`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.' '''Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. ''' '''\'gated-gelu\' or \'relu\'''' ) if feed_forward_proj == "gated-gelu": lowercase ='''gelu_new''' @property def _A( self ): return self.d_model @property def _A( self ): return self.num_heads @property def _A( self ): return self.num_layers class __magic_name__ ( __SCREAMING_SNAKE_CASE ): @property # Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs def _A( self ): lowercase ={ '''input_ids''': {0: '''batch''', 1: '''encoder_sequence'''}, '''attention_mask''': {0: '''batch''', 1: '''encoder_sequence'''}, } if self.use_past: lowercase ='''past_encoder_sequence + sequence''' lowercase ={0: '''batch'''} lowercase ={0: '''batch''', 1: '''past_decoder_sequence + sequence'''} else: lowercase ={0: '''batch''', 1: '''decoder_sequence'''} lowercase ={0: '''batch''', 1: '''decoder_sequence'''} if self.use_past: self.fill_with_past_key_values_(snake_case_ , direction='''inputs''' ) return common_inputs @property # Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset def _A( self ): return 13 @property def _A( self ): return 5E-4
72
'''simple docstring''' from __future__ import annotations import numpy as np from numpy import floataa from numpy.typing import NDArray def lowerCamelCase ( lowerCamelCase : NDArray[floataa] , lowerCamelCase : NDArray[floataa] , lowerCamelCase : list[int] , lowerCamelCase : int , ): A_ , A_ : int = coefficient_matrix.shape A_ , A_ : Union[str, Any] = constant_matrix.shape if rowsa != colsa: A_ : Any = F'Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}' raise ValueError(lowerCamelCase) if colsa != 1: A_ : Tuple = F'Constant matrix must be nx1 but received {rowsa}x{colsa}' raise ValueError(lowerCamelCase) if rowsa != rowsa: A_ : Dict = ( """Coefficient and constant matrices dimensions must be nxn and nx1 but """ F'received {rowsa}x{colsa} and {rowsa}x{colsa}' ) raise ValueError(lowerCamelCase) if len(lowerCamelCase) != rowsa: A_ : Union[str, Any] = ( """Number of initial values must be equal to number of rows in coefficient """ F'matrix but received {len(lowerCamelCase)} and {rowsa}' ) raise ValueError(lowerCamelCase) if iterations <= 0: raise ValueError("""Iterations must be at least 1""") A_ : NDArray[floataa] = np.concatenate( (coefficient_matrix, constant_matrix) , axis=1) A_ , A_ : int = table.shape strictly_diagonally_dominant(lowerCamelCase) # Iterates the whole matrix for given number of times for _ in range(lowerCamelCase): A_ : List[Any] = [] for row in range(lowerCamelCase): A_ : int = 0 for col in range(lowerCamelCase): if col == row: A_ : List[str] = table[row][col] elif col == cols - 1: A_ : str = table[row][col] else: temp += (-1) * table[row][col] * init_val[col] A_ : Union[str, Any] = (temp + val) / denom new_val.append(lowerCamelCase) A_ : Tuple = new_val return [float(lowerCamelCase) for i in new_val] def lowerCamelCase ( lowerCamelCase : NDArray[floataa]): A_ , A_ : Dict = table.shape A_ : Union[str, Any] = True for i in range(0 , lowerCamelCase): A_ : str = 0 for j in range(0 , cols - 1): if i == j: continue else: total += table[i][j] if table[i][i] <= total: raise ValueError("""Coefficient matrix is not strictly diagonally dominant""") return is_diagonally_dominant # Test Cases if __name__ == "__main__": import doctest doctest.testmod()
665
0
import os import re import shutil from argparse import ArgumentParser, Namespace from datasets.commands import BaseDatasetsCLICommand from datasets.utils.logging import get_logger a_ : Union[str, Any] = '<<<<<<< This should probably be modified because it mentions: ' a_ : int = '=======\n>>>>>>>\n' a_ : Tuple = [ 'TextEncoderConfig', 'ByteTextEncoder', 'SubwordTextEncoder', 'encoder_config', 'maybe_build_from_corpus', 'manual_dir', ] a_ : Optional[int] = [ # (pattern, replacement) # Order is important here for some replacements (R'tfds\.core', R'datasets'), (R'tf\.io\.gfile\.GFile', R'open'), (R'tf\.([\w\d]+)', R'datasets.Value(\'\1\')'), (R'tfds\.features\.Text\(\)', R'datasets.Value(\'string\')'), (R'tfds\.features\.Text\(', R'datasets.Value(\'string\'),'), (R'features\s*=\s*tfds.features.FeaturesDict\(', R'features=datasets.Features('), (R'tfds\.features\.FeaturesDict\(', R'dict('), (R'The TensorFlow Datasets Authors', R'The TensorFlow Datasets Authors and the HuggingFace Datasets Authors'), (R'tfds\.', R'datasets.'), (R'dl_manager\.manual_dir', R'self.config.data_dir'), (R'self\.builder_config', R'self.config'), ] def lowerCamelCase__ (_UpperCAmelCase): return ConvertCommand(args.tfds_path , args.datasets_directory) class _snake_case ( A__ ): @staticmethod def SCREAMING_SNAKE_CASE__ ( a) -> Union[str, Any]: SCREAMING_SNAKE_CASE = parser.add_parser( 'convert' , help='Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.' , ) train_parser.add_argument( '--tfds_path' , type=a , required=a , help='Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.' , ) train_parser.add_argument( '--datasets_directory' , type=a , required=a , help='Path to the HuggingFace Datasets folder.') train_parser.set_defaults(func=a) def __init__( self , a , a , *a) -> Union[str, Any]: SCREAMING_SNAKE_CASE = get_logger('datasets-cli/converting') SCREAMING_SNAKE_CASE = tfds_path SCREAMING_SNAKE_CASE = datasets_directory def SCREAMING_SNAKE_CASE__ ( self) -> Optional[Any]: if os.path.isdir(self._tfds_path): SCREAMING_SNAKE_CASE = os.path.abspath(self._tfds_path) elif os.path.isfile(self._tfds_path): SCREAMING_SNAKE_CASE = os.path.dirname(self._tfds_path) else: raise ValueError('--tfds_path is neither a directory nor a file. Please check path.') SCREAMING_SNAKE_CASE = os.path.abspath(self._datasets_directory) self._logger.info(f'''Converting datasets from {abs_tfds_path} to {abs_datasets_path}''') SCREAMING_SNAKE_CASE = [] SCREAMING_SNAKE_CASE = [] SCREAMING_SNAKE_CASE = {} if os.path.isdir(self._tfds_path): SCREAMING_SNAKE_CASE = os.listdir(a) else: SCREAMING_SNAKE_CASE = [os.path.basename(self._tfds_path)] for f_name in file_names: self._logger.info(f'''Looking at file {f_name}''') SCREAMING_SNAKE_CASE = os.path.join(a , a) SCREAMING_SNAKE_CASE = os.path.join(a , a) if not os.path.isfile(a) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name: self._logger.info('Skipping file') continue with open(a , encoding='utf-8') as f: SCREAMING_SNAKE_CASE = f.readlines() SCREAMING_SNAKE_CASE = [] SCREAMING_SNAKE_CASE = False SCREAMING_SNAKE_CASE = False SCREAMING_SNAKE_CASE = [] for line in lines: SCREAMING_SNAKE_CASE = line # Convert imports if "import tensorflow.compat.v2 as tf" in out_line: continue elif "@tfds.core" in out_line: continue elif "builder=self" in out_line: continue elif "import tensorflow_datasets.public_api as tfds" in out_line: SCREAMING_SNAKE_CASE = 'import datasets\n' elif "import tensorflow" in out_line: # order is important here SCREAMING_SNAKE_CASE = '' continue elif "from absl import logging" in out_line: SCREAMING_SNAKE_CASE = 'from datasets import logging\n' elif "getLogger" in out_line: SCREAMING_SNAKE_CASE = out_line.replace('getLogger' , 'get_logger') elif any(expression in out_line for expression in TO_HIGHLIGHT): SCREAMING_SNAKE_CASE = True SCREAMING_SNAKE_CASE = list(filter(lambda a: e in out_line , a)) out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(a) + '\n') out_lines.append(a) out_lines.append(a) continue else: for pattern, replacement in TO_CONVERT: SCREAMING_SNAKE_CASE = re.sub(a , a , a) # Take care of saving utilities (to later move them together with main script) if "tensorflow_datasets" in out_line: SCREAMING_SNAKE_CASE = re.match(R'from\stensorflow_datasets.*import\s([^\.\r\n]+)' , a) tfds_imports.extend(imp.strip() for imp in match.group(1).split(',')) SCREAMING_SNAKE_CASE = 'from . import ' + match.group(1) # Check we have not forget anything if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line: raise ValueError(f'''Error converting {out_line.strip()}''') if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line: SCREAMING_SNAKE_CASE = True out_lines.append(a) if is_builder or "wmt" in f_name: # We create a new directory for each dataset SCREAMING_SNAKE_CASE = f_name.replace('.py' , '') SCREAMING_SNAKE_CASE = os.path.join(a , a) SCREAMING_SNAKE_CASE = os.path.join(a , a) os.makedirs(a , exist_ok=a) self._logger.info(f'''Adding directory {output_dir}''') imports_to_builder_map.update({imp: output_dir for imp in tfds_imports}) else: # Utilities will be moved at the end utils_files.append(a) if needs_manual_update: with_manual_update.append(a) with open(a , 'w' , encoding='utf-8') as f: f.writelines(a) self._logger.info(f'''Converted in {output_file}''') for utils_file in utils_files: try: SCREAMING_SNAKE_CASE = os.path.basename(a) SCREAMING_SNAKE_CASE = imports_to_builder_map[f_name.replace('.py' , '')] self._logger.info(f'''Moving {dest_folder} to {utils_file}''') shutil.copy(a , a) except KeyError: self._logger.error(f'''Cannot find destination folder for {utils_file}. Please copy manually.''') if with_manual_update: for file_path in with_manual_update: self._logger.warning( f'''You need to manually update file {file_path} to remove configurations using \'TextEncoderConfig\'.''')
73
'''simple docstring''' def lowerCamelCase ( lowerCamelCase : str , lowerCamelCase : str): A_ : Any = len(lowerCamelCase) A_ : Optional[Any] = len(lowerCamelCase) A_ : Optional[int] = [[False for _ in range(m + 1)] for _ in range(n + 1)] A_ : Union[str, Any] = True for i in range(lowerCamelCase): for j in range(m + 1): if dp[i][j]: if j < m and a[i].upper() == b[j]: A_ : Optional[int] = True if a[i].islower(): A_ : List[Any] = True return dp[n][m] if __name__ == "__main__": import doctest doctest.testmod()
665
0
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import MobileNetVaImageProcessor class __UpperCamelCase ( unittest.TestCase ): """simple docstring""" def __init__( self : Any , _A : int , _A : Any=7 , _A : List[str]=3 , _A : Optional[Any]=18 , _A : List[str]=30 , _A : Optional[Any]=400 , _A : Any=True , _A : List[str]=None , _A : Union[str, Any]=True , _A : Optional[int]=None , ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = size if size is not None else {'''shortest_edge''': 20} __SCREAMING_SNAKE_CASE : List[str] = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18} __SCREAMING_SNAKE_CASE : int = parent __SCREAMING_SNAKE_CASE : Optional[int] = batch_size __SCREAMING_SNAKE_CASE : Optional[Any] = num_channels __SCREAMING_SNAKE_CASE : List[str] = image_size __SCREAMING_SNAKE_CASE : int = min_resolution __SCREAMING_SNAKE_CASE : Optional[int] = max_resolution __SCREAMING_SNAKE_CASE : List[Any] = do_resize __SCREAMING_SNAKE_CASE : Union[str, Any] = size __SCREAMING_SNAKE_CASE : str = do_center_crop __SCREAMING_SNAKE_CASE : Any = crop_size def UpperCAmelCase__ ( self : Dict ): """simple docstring""" return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, } @require_torch @require_vision class __UpperCamelCase ( lowerCAmelCase__ , unittest.TestCase ): """simple docstring""" lowerCAmelCase_ = MobileNetVaImageProcessor if is_vision_available() else None def UpperCAmelCase__ ( self : Optional[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : str = MobileNetVaImageProcessingTester(self ) @property def UpperCAmelCase__ ( self : Union[str, Any] ): """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def UpperCAmelCase__ ( self : Union[str, Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_A , '''do_resize''' ) ) self.assertTrue(hasattr(_A , '''size''' ) ) self.assertTrue(hasattr(_A , '''do_center_crop''' ) ) self.assertTrue(hasattr(_A , '''crop_size''' ) ) def UpperCAmelCase__ ( self : Optional[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''shortest_edge''': 20} ) self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} ) __SCREAMING_SNAKE_CASE : Tuple = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 ) self.assertEqual(image_processor.size , {'''shortest_edge''': 42} ) self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} ) def UpperCAmelCase__ ( self : int ): """simple docstring""" pass def UpperCAmelCase__ ( self : Dict ): """simple docstring""" __SCREAMING_SNAKE_CASE : str = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __SCREAMING_SNAKE_CASE : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A ) for image in image_inputs: self.assertIsInstance(_A , Image.Image ) # Test not batched input __SCREAMING_SNAKE_CASE : Dict = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched __SCREAMING_SNAKE_CASE : List[Any] = image_processing(_A , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def UpperCAmelCase__ ( self : Optional[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : int = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __SCREAMING_SNAKE_CASE : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , numpify=_A ) for image in image_inputs: self.assertIsInstance(_A , np.ndarray ) # Test not batched input __SCREAMING_SNAKE_CASE : List[str] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched __SCREAMING_SNAKE_CASE : Any = image_processing(_A , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def UpperCAmelCase__ ( self : Optional[Any] ): """simple docstring""" __SCREAMING_SNAKE_CASE : Any = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __SCREAMING_SNAKE_CASE : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A ) for image in image_inputs: self.assertIsInstance(_A , torch.Tensor ) # Test not batched input __SCREAMING_SNAKE_CASE : int = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched __SCREAMING_SNAKE_CASE : Dict = image_processing(_A , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , )
74
'''simple docstring''' from __future__ import annotations from collections import deque from collections.abc import Iterator from dataclasses import dataclass @dataclass class __lowerCAmelCase : '''simple docstring''' a_ = 42 a_ = 42 class __lowerCAmelCase : '''simple docstring''' def __init__( self : Union[str, Any] ,_a : int ): '''simple docstring''' A_ : list[list[Edge]] = [[] for _ in range(_a )] A_ : List[Any] = size def __getitem__( self : int ,_a : int ): '''simple docstring''' return iter(self._graph[vertex] ) @property def _a ( self : str ): '''simple docstring''' return self._size def _a ( self : str ,_a : int ,_a : int ,_a : int ): '''simple docstring''' if weight not in (0, 1): raise ValueError("""Edge weight must be either 0 or 1.""" ) if to_vertex < 0 or to_vertex >= self.size: raise ValueError("""Vertex indexes must be in [0; size).""" ) self._graph[from_vertex].append(Edge(_a ,_a ) ) def _a ( self : Dict ,_a : int ,_a : int ): '''simple docstring''' A_ : Tuple = deque([start_vertex] ) A_ : list[int | None] = [None] * self.size A_ : Union[str, Any] = 0 while queue: A_ : List[Any] = queue.popleft() A_ : Tuple = distances[current_vertex] if current_distance is None: continue for edge in self[current_vertex]: A_ : Union[str, Any] = current_distance + edge.weight A_ : Optional[Any] = distances[edge.destination_vertex] if ( isinstance(_a ,_a ) and new_distance >= dest_vertex_distance ): continue A_ : Tuple = new_distance if edge.weight == 0: queue.appendleft(edge.destination_vertex ) else: queue.append(edge.destination_vertex ) if distances[finish_vertex] is None: raise ValueError("""No path from start_vertex to finish_vertex.""" ) return distances[finish_vertex] if __name__ == "__main__": import doctest doctest.testmod()
665
0
'''simple docstring''' class lowerCamelCase_ : def __init__( self : str , _A : Union[str, Any] , _A : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = name UpperCAmelCase__ : Union[str, Any] = val def __str__( self : Tuple ): '''simple docstring''' return f"""{self.__class__.__name__}({self.name}, {self.val})""" def __lt__( self : Union[str, Any] , _A : Dict ): '''simple docstring''' return self.val < other.val class lowerCamelCase_ : def __init__( self : int , _A : List[Any] ): '''simple docstring''' UpperCAmelCase__ : Tuple = {} UpperCAmelCase__ : int = {} UpperCAmelCase__ : Any = self.build_heap(_A ) def __getitem__( self : Any , _A : Any ): '''simple docstring''' return self.get_value(_A ) def lowercase_ ( self : Any , _A : List[Any] ): '''simple docstring''' return (idx - 1) // 2 def lowercase_ ( self : Union[str, Any] , _A : Optional[int] ): '''simple docstring''' return idx * 2 + 1 def lowercase_ ( self : Tuple , _A : List[Any] ): '''simple docstring''' return idx * 2 + 2 def lowercase_ ( self : List[str] , _A : Tuple ): '''simple docstring''' return self.heap_dict[key] def lowercase_ ( self : str , _A : List[Any] ): '''simple docstring''' UpperCAmelCase__ : Any = len(_A ) - 1 UpperCAmelCase__ : Tuple = self.get_parent_idx(_A ) for idx, i in enumerate(_A ): UpperCAmelCase__ : Dict = idx UpperCAmelCase__ : Optional[Any] = i.val for i in range(_A , -1 , -1 ): self.sift_down(_A , _A ) return array def lowercase_ ( self : Optional[Any] , _A : str , _A : List[Any] ): '''simple docstring''' while True: UpperCAmelCase__ : Any = self.get_left_child_idx(_A ) # noqa: E741 UpperCAmelCase__ : Optional[Any] = self.get_right_child_idx(_A ) UpperCAmelCase__ : Tuple = idx if l < len(_A ) and array[l] < array[idx]: UpperCAmelCase__ : int = l if r < len(_A ) and array[r] < array[smallest]: UpperCAmelCase__ : Dict = r if smallest != idx: UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = array[smallest], array[idx] ( ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ) : List[str] = ( self.idx_of_element[array[smallest]], self.idx_of_element[array[idx]], ) UpperCAmelCase__ : str = smallest else: break def lowercase_ ( self : List[str] , _A : int ): '''simple docstring''' UpperCAmelCase__ : str = self.get_parent_idx(_A ) while p >= 0 and self.heap[p] > self.heap[idx]: UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = self.heap[idx], self.heap[p] UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = ( self.idx_of_element[self.heap[idx]], self.idx_of_element[self.heap[p]], ) UpperCAmelCase__ : Union[str, Any] = p UpperCAmelCase__ : List[Any] = self.get_parent_idx(_A ) def lowercase_ ( self : Optional[int] ): '''simple docstring''' return self.heap[0] def lowercase_ ( self : Dict ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : Any = self.heap[-1], self.heap[0] UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = ( self.idx_of_element[self.heap[-1]], self.idx_of_element[self.heap[0]], ) UpperCAmelCase__ : int = self.heap.pop() del self.idx_of_element[x] self.sift_down(0 , self.heap ) return x def lowercase_ ( self : int , _A : Union[str, Any] ): '''simple docstring''' self.heap.append(_A ) UpperCAmelCase__ : Union[str, Any] = len(self.heap ) - 1 UpperCAmelCase__ : Optional[Any] = node.val self.sift_up(len(self.heap ) - 1 ) def lowercase_ ( self : str ): '''simple docstring''' return len(self.heap ) == 0 def lowercase_ ( self : int , _A : Optional[Any] , _A : str ): '''simple docstring''' assert ( self.heap[self.idx_of_element[node]].val > new_value ), "newValue must be less that current value" UpperCAmelCase__ : Optional[Any] = new_value UpperCAmelCase__ : List[str] = new_value self.sift_up(self.idx_of_element[node] ) UpperCamelCase__ = Node('''R''', -1) UpperCamelCase__ = Node('''B''', 6) UpperCamelCase__ = Node('''A''', 3) UpperCamelCase__ = Node('''X''', 1) UpperCamelCase__ = Node('''E''', 4) # Use one of these two ways to generate Min-Heap # Generating Min-Heap from array UpperCamelCase__ = MinHeap([r, b, a, x, e]) # Generating Min-Heap by Insert method # myMinHeap.insert(a) # myMinHeap.insert(b) # myMinHeap.insert(x) # myMinHeap.insert(r) # myMinHeap.insert(e) # Before print('''Min Heap - before decrease key''') for i in my_min_heap.heap: print(i) print('''Min Heap - After decrease key of node [B -> -17]''') my_min_heap.decrease_key(b, -1_7) # After for i in my_min_heap.heap: print(i) if __name__ == "__main__": import doctest doctest.testmod()
75
'''simple docstring''' def lowerCamelCase ( lowerCamelCase : int = 10**9): A_ : Optional[int] = 1 A_ : int = 2 A_ : List[Any] = 0 A_ : Optional[Any] = 0 A_ : str = 0 while perimeter <= max_perimeter: perimeters_sum += perimeter prev_value += 2 * value value += prev_value A_ : Optional[Any] = 2 * value + 2 if i % 2 == 0 else 2 * value - 2 i += 1 return perimeters_sum if __name__ == "__main__": print(f"""{solution() = }""")
665
0
"""simple docstring""" import numpy as np import skfuzzy as fuzz if __name__ == "__main__": # Create universe of discourse in Python using linspace () a_ = np.linspace(start=0, stop=7_5, num=7_5, endpoint=True, retstep=False) # Create two fuzzy sets by defining any membership function # (trapmf(), gbellmf(), gaussmf(), etc). a_ = [0, 2_5, 5_0] a_ = [2_5, 5_0, 7_5] a_ = fuzz.membership.trimf(X, abca) a_ = fuzz.membership.trimf(X, abca) # Compute the different operations using inbuilt functions. a_ = np.ones(7_5) a_ = np.zeros((7_5,)) # 1. Union = max(µA(x), µB(x)) a_ = fuzz.fuzzy_or(X, young, X, middle_aged)[1] # 2. Intersection = min(µA(x), µB(x)) a_ = fuzz.fuzzy_and(X, young, X, middle_aged)[1] # 3. Complement (A) = (1- min(µA(x)) a_ = fuzz.fuzzy_not(young) # 4. Difference (A/B) = min(µA(x),(1- µB(x))) a_ = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1] # 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))] a_ = young + middle_aged - (young * middle_aged) # 6. Algebraic Product = (µA(x) * µB(x)) a_ = young * middle_aged # 7. Bounded Sum = min[1,(µA(x), µB(x))] a_ = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1] # 8. Bounded difference = min[0,(µA(x), µB(x))] a_ = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1] # max-min composition # max-product composition # Plot each set A, set B and each operation result using plot() and subplot(). from matplotlib import pyplot as plt plt.figure() plt.subplot(4, 3, 1) plt.plot(X, young) plt.title('Young') plt.grid(True) plt.subplot(4, 3, 2) plt.plot(X, middle_aged) plt.title('Middle aged') plt.grid(True) plt.subplot(4, 3, 3) plt.plot(X, union) plt.title('union') plt.grid(True) plt.subplot(4, 3, 4) plt.plot(X, intersection) plt.title('intersection') plt.grid(True) plt.subplot(4, 3, 5) plt.plot(X, complement_a) plt.title('complement_a') plt.grid(True) plt.subplot(4, 3, 6) plt.plot(X, difference) plt.title('difference a/b') plt.grid(True) plt.subplot(4, 3, 7) plt.plot(X, alg_sum) plt.title('alg_sum') plt.grid(True) plt.subplot(4, 3, 8) plt.plot(X, alg_product) plt.title('alg_product') plt.grid(True) plt.subplot(4, 3, 9) plt.plot(X, bdd_sum) plt.title('bdd_sum') plt.grid(True) plt.subplot(4, 3, 1_0) plt.plot(X, bdd_difference) plt.title('bdd_difference') plt.grid(True) plt.subplots_adjust(hspace=0.5) plt.show()
76
'''simple docstring''' # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from argparse import ArgumentParser from accelerate.commands.config import get_config_parser from accelerate.commands.env import env_command_parser from accelerate.commands.launch import launch_command_parser from accelerate.commands.test import test_command_parser from accelerate.commands.tpu import tpu_command_parser def lowerCamelCase ( ): A_ : Optional[int] = ArgumentParser("""Accelerate CLI tool""" , usage="""accelerate <command> [<args>]""" , allow_abbrev=lowerCamelCase) A_ : Optional[int] = parser.add_subparsers(help="""accelerate command helpers""") # Register commands get_config_parser(subparsers=lowerCamelCase) env_command_parser(subparsers=lowerCamelCase) launch_command_parser(subparsers=lowerCamelCase) tpu_command_parser(subparsers=lowerCamelCase) test_command_parser(subparsers=lowerCamelCase) # Let's go A_ : Dict = parser.parse_args() if not hasattr(lowerCamelCase , """func"""): parser.print_help() exit(1) # Run args.func(lowerCamelCase) if __name__ == "__main__": main()
665
0
"""simple docstring""" from numpy import exp, pi, sqrt def _UpperCamelCase ( UpperCamelCase , UpperCamelCase = 0.0 , UpperCamelCase = 1.0 ) -> int: """simple docstring""" return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) ) if __name__ == "__main__": import doctest doctest.testmod()
77
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __magic_name__ = { 'configuration_altclip': [ 'ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP', 'AltCLIPConfig', 'AltCLIPTextConfig', 'AltCLIPVisionConfig', ], 'processing_altclip': ['AltCLIPProcessor'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __magic_name__ = [ 'ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST', 'AltCLIPPreTrainedModel', 'AltCLIPModel', 'AltCLIPTextModel', 'AltCLIPVisionModel', ] if TYPE_CHECKING: from .configuration_altclip import ( ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, AltCLIPConfig, AltCLIPTextConfig, AltCLIPVisionConfig, ) from .processing_altclip import AltCLIPProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_altclip import ( ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST, AltCLIPModel, AltCLIPPreTrainedModel, AltCLIPTextModel, AltCLIPVisionModel, ) else: import sys __magic_name__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
665
0
'''simple docstring''' import math def lowerCAmelCase_ ( snake_case_ : int ) -> bool: '''simple docstring''' assert isinstance(snake_case_ , snake_case_ ) and ( number >= 0 ), "'number' must been an int and positive" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or not number % 2: # Negatives, 0, 1 and all even numbers are not primes return False UpperCAmelCase_ = range(3 , int(math.sqrt(snake_case_ ) + 1 ) , 2 ) return not any(not number % i for i in odd_numbers ) def lowerCAmelCase_ ( snake_case_ : List[Any] , snake_case_ : Optional[int]=1 , **snake_case_ : Union[str, Any] ) -> Tuple: '''simple docstring''' UpperCAmelCase_ = factor * value UpperCAmelCase_ = value while not is_prime(snake_case_ ): value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1 if value == first_value_val: return next_prime(value + 1 , **snake_case_ ) return value
78
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available __magic_name__ = {'configuration_yolos': ['YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP', 'YolosConfig', 'YolosOnnxConfig']} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __magic_name__ = ['YolosFeatureExtractor'] __magic_name__ = ['YolosImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __magic_name__ = [ 'YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST', 'YolosForObjectDetection', 'YolosModel', 'YolosPreTrainedModel', ] if TYPE_CHECKING: from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_yolos import YolosFeatureExtractor from .image_processing_yolos import YolosImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_yolos import ( YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST, YolosForObjectDetection, YolosModel, YolosPreTrainedModel, ) else: import sys __magic_name__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
665
0
from ...configuration_utils import PretrainedConfig from ...utils import logging SCREAMING_SNAKE_CASE__ : Optional[int] = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ : List[str] = { """facebook/s2t-small-librispeech-asr""": ( """https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json""" ), # See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text } class UpperCAmelCase_ ( __lowerCamelCase ): __lowerCamelCase = 'speech_to_text' __lowerCamelCase = ['past_key_values'] __lowerCamelCase = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'} def __init__( self , _lowerCAmelCase=10000 , _lowerCAmelCase=12 , _lowerCAmelCase=2048 , _lowerCAmelCase=4 , _lowerCAmelCase=6 , _lowerCAmelCase=2048 , _lowerCAmelCase=4 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase="relu" , _lowerCAmelCase=256 , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0_2 , _lowerCAmelCase=2 , _lowerCAmelCase=True , _lowerCAmelCase=1 , _lowerCAmelCase=0 , _lowerCAmelCase=2 , _lowerCAmelCase=6000 , _lowerCAmelCase=1024 , _lowerCAmelCase=2 , _lowerCAmelCase=(5, 5) , _lowerCAmelCase=1024 , _lowerCAmelCase=80 , _lowerCAmelCase=1 , **_lowerCAmelCase , ): UpperCAmelCase__ : List[Any] = vocab_size UpperCAmelCase__ : Optional[Any] = d_model UpperCAmelCase__ : Any = encoder_ffn_dim UpperCAmelCase__ : Dict = encoder_layers UpperCAmelCase__ : Any = encoder_attention_heads UpperCAmelCase__ : Dict = decoder_ffn_dim UpperCAmelCase__ : Tuple = decoder_layers UpperCAmelCase__ : Dict = decoder_attention_heads UpperCAmelCase__ : Optional[Any] = dropout UpperCAmelCase__ : Optional[Any] = attention_dropout UpperCAmelCase__ : str = activation_dropout UpperCAmelCase__ : Dict = activation_function UpperCAmelCase__ : Optional[int] = init_std UpperCAmelCase__ : int = encoder_layerdrop UpperCAmelCase__ : Tuple = decoder_layerdrop UpperCAmelCase__ : Tuple = use_cache UpperCAmelCase__ : str = encoder_layers UpperCAmelCase__ : List[str] = scale_embedding # scale factor will be sqrt(d_model) if True UpperCAmelCase__ : Union[str, Any] = max_source_positions UpperCAmelCase__ : Union[str, Any] = max_target_positions UpperCAmelCase__ : Optional[Any] = num_conv_layers UpperCAmelCase__ : Dict = list(_lowerCAmelCase ) UpperCAmelCase__ : str = conv_channels UpperCAmelCase__ : Dict = input_feat_per_channel UpperCAmelCase__ : int = input_channels if len(self.conv_kernel_sizes ) != self.num_conv_layers: raise ValueError( """Configuration for convolutional module is incorrect. """ """It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` """ f"but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes )}`, " f"`config.num_conv_layers = {self.num_conv_layers}`." ) super().__init__( pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , is_encoder_decoder=_lowerCAmelCase , decoder_start_token_id=_lowerCAmelCase , **_lowerCAmelCase , )
79
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) __magic_name__ = { 'configuration_deberta': ['DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DebertaConfig', 'DebertaOnnxConfig'], 'tokenization_deberta': ['DebertaTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __magic_name__ = ['DebertaTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __magic_name__ = [ 'DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST', 'DebertaForMaskedLM', 'DebertaForQuestionAnswering', 'DebertaForSequenceClassification', 'DebertaForTokenClassification', 'DebertaModel', 'DebertaPreTrainedModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __magic_name__ = [ 'TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFDebertaForMaskedLM', 'TFDebertaForQuestionAnswering', 'TFDebertaForSequenceClassification', 'TFDebertaForTokenClassification', 'TFDebertaModel', 'TFDebertaPreTrainedModel', ] if TYPE_CHECKING: from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig from .tokenization_deberta import DebertaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_deberta_fast import DebertaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_deberta import ( DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, DebertaForMaskedLM, DebertaForQuestionAnswering, DebertaForSequenceClassification, DebertaForTokenClassification, DebertaModel, DebertaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_deberta import ( TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, TFDebertaForMaskedLM, TFDebertaForQuestionAnswering, TFDebertaForSequenceClassification, TFDebertaForTokenClassification, TFDebertaModel, TFDebertaPreTrainedModel, ) else: import sys __magic_name__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
665
0