code
stringlengths
82
54.1k
code_codestyle
int64
0
699
style_context
stringlengths
111
35.6k
style_context_codestyle
int64
0
699
label
int64
0
1
'''simple docstring''' from typing import Dict from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available from transformers.testing_utils import ( TestCasePlus, execute_subprocess_async, get_torch_dist_unique_port, require_torch_multi_gpu, require_torch_neuroncore, ) from transformers.training_args import ParallelMode from transformers.utils import logging lowerCAmelCase : int = logging.get_logger(__name__) if is_torch_available(): import torch from torch import nn from torch.utils.data import Dataset from transformers import Trainer class SCREAMING_SNAKE_CASE__ ( lowercase_): def __init__( self , A_ = 101 )-> Optional[Any]: '''simple docstring''' UpperCamelCase = length def __len__( self )-> List[str]: '''simple docstring''' return self.length def __getitem__( self , A_ )-> List[str]: '''simple docstring''' return i class SCREAMING_SNAKE_CASE__ : def __call__( self , A_ )-> str: '''simple docstring''' return {"input_ids": torch.tensor(UpperCamelCase__ ), "labels": torch.tensor(UpperCamelCase__ )} class SCREAMING_SNAKE_CASE__ ( nn.Module): def __init__( self )-> str: '''simple docstring''' super().__init__() # Add some (unused) params otherwise DDP will complain. UpperCamelCase = nn.Linear(120 , 80 ) def UpperCAmelCase_ ( self , A_ , A_=None )-> Dict: '''simple docstring''' if labels is not None: return torch.tensor(0.0 , device=input_ids.device ), input_ids else: return input_ids class SCREAMING_SNAKE_CASE__ ( lowercase_): @require_torch_neuroncore def UpperCAmelCase_ ( self )-> str: '''simple docstring''' UpperCamelCase = F'''--nproc_per_node=2 --master_port={get_torch_dist_unique_port()} {self.test_file_dir}/test_trainer_distributed.py '''.split() UpperCamelCase = self.get_auto_remove_tmp_dir() UpperCamelCase = F'''--output_dir {output_dir}'''.split() UpperCamelCase = ['torchrun'] + distributed_args + args execute_subprocess_async(UpperCamelCase__ , env=self.get_env() ) # successful return here == success - any errors would have caused an error in the sub-call class SCREAMING_SNAKE_CASE__ ( lowercase_): @require_torch_multi_gpu def UpperCAmelCase_ ( self )-> Tuple: '''simple docstring''' UpperCamelCase = F'''--nproc_per_node={torch.cuda.device_count()} --master_port={get_torch_dist_unique_port()} {self.test_file_dir}/test_trainer_distributed.py '''.split() UpperCamelCase = self.get_auto_remove_tmp_dir() UpperCamelCase = F'''--output_dir {output_dir}'''.split() UpperCamelCase = ['torchrun'] + distributed_args + args execute_subprocess_async(UpperCamelCase__ , env=self.get_env() ) # successful return here == success - any errors would have caused an error in the sub-call if __name__ == "__main__": # The script below is meant to be run under torch.distributed, on a machine with multiple GPUs: # # PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py lowerCAmelCase : str = HfArgumentParser((TrainingArguments,)) lowerCAmelCase : str = parser.parse_args_into_dataclasses()[0] logger.warning( f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, """ f"""distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}""" ) # Essentially, what we want to verify in the distributed case is that we get all samples back, # in the right order. (this is crucial for prediction for instance) for dataset_length in [1_01, 40, 7]: lowerCAmelCase : Tuple = DummyDataset(dataset_length) def A_( A : EvalPrediction): UpperCamelCase = list(range(len(A))) UpperCamelCase = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential if not success and training_args.local_rank == 0: logger.warning( 'Predictions and/or labels do not match expected results:\n - predictions: ' f'''{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}''') return {"success": success} lowerCAmelCase : str = Trainer( model=DummyModel(), args=training_args, data_collator=DummyDataCollator(), eval_dataset=dataset, compute_metrics=compute_metrics, ) lowerCAmelCase : Optional[Any] = trainer.evaluate() logger.info(metrics) if metrics["eval_success"] is not True: logger.error(metrics) exit(1) lowerCAmelCase : Dict = trainer.predict(dataset) logger.info(p.metrics) if p.metrics["test_success"] is not True: logger.error(p.metrics) exit(1) lowerCAmelCase : List[str] = 2 lowerCAmelCase : str = trainer.evaluate() logger.info(metrics) if metrics["eval_success"] is not True: logger.error(metrics) exit(1) lowerCAmelCase : Optional[int] = trainer.predict(dataset) logger.info(p.metrics) if p.metrics["test_success"] is not True: logger.error(p.metrics) exit(1) lowerCAmelCase : int = None
3
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np import torch from ..models.clipseg import CLIPSegForImageSegmentation from ..utils import is_vision_available, requires_backends from .base import PipelineTool if is_vision_available(): from PIL import Image class _lowerCAmelCase ( lowercase_ ): """simple docstring""" _lowercase : Optional[Any] = ( '''This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image.''' '''It takes two arguments named `image` which should be the original image, and `label` which should be a text ''' '''describing the elements what should be identified in the segmentation mask. The tool returns the mask.''' ) _lowercase : Dict = '''CIDAS/clipseg-rd64-refined''' _lowercase : List[Any] = '''image_segmenter''' _lowercase : Tuple = CLIPSegForImageSegmentation _lowercase : str = ['''image''', '''text'''] _lowercase : Dict = ['''image'''] def __init__( self : Optional[int] , *UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : List[Any]): '''simple docstring''' requires_backends(self , ["""vision"""]) super().__init__(*UpperCamelCase__ , **UpperCamelCase__) def __magic_name__ ( self : str , UpperCamelCase__ : "Image" , UpperCamelCase__ : str): '''simple docstring''' return self.pre_processor(text=[label] , images=[image] , padding=UpperCamelCase__ , return_tensors="""pt""") def __magic_name__ ( self : Any , UpperCamelCase__ : Optional[Any]): '''simple docstring''' with torch.no_grad(): snake_case__ = self.model(**UpperCamelCase__).logits return logits def __magic_name__ ( self : Optional[int] , UpperCamelCase__ : Union[str, Any]): '''simple docstring''' snake_case__ = outputs.cpu().detach().numpy() snake_case__ = 0 snake_case__ = 1 return Image.fromarray((array * 2_5_5).astype(np.uinta))
654
0
"""simple docstring""" from ..utils import DummyObject, requires_backends class a__ ( metaclass=lowercase_ ): snake_case__ = ['''torch'''] def __init__( self : Any ,*a__ : Union[str, Any] ,**a__ : Union[str, Any]) -> List[str]: """simple docstring""" requires_backends(self ,['''torch''']) @classmethod def __UpperCamelCase ( cls : List[Any] ,*a__ : Union[str, Any] ,**a__ : str) -> Optional[int]: """simple docstring""" requires_backends(cls ,['''torch''']) @classmethod def __UpperCamelCase ( cls : Optional[int] ,*a__ : Optional[int] ,**a__ : Optional[int]) -> int: """simple docstring""" requires_backends(cls ,['''torch''']) class a__ ( metaclass=lowercase_ ): snake_case__ = ['''torch'''] def __init__( self : Dict ,*a__ : Optional[int] ,**a__ : Optional[Any]) -> List[str]: """simple docstring""" requires_backends(self ,['''torch''']) @classmethod def __UpperCamelCase ( cls : Any ,*a__ : List[Any] ,**a__ : int) -> Optional[Any]: """simple docstring""" requires_backends(cls ,['''torch''']) @classmethod def __UpperCamelCase ( cls : str ,*a__ : Tuple ,**a__ : Optional[Any]) -> Optional[int]: """simple docstring""" requires_backends(cls ,['''torch''']) class a__ ( metaclass=lowercase_ ): snake_case__ = ['''torch'''] def __init__( self : Dict ,*a__ : List[Any] ,**a__ : Optional[Any]) -> Dict: """simple docstring""" requires_backends(self ,['''torch''']) @classmethod def __UpperCamelCase ( cls : List[Any] ,*a__ : str ,**a__ : Tuple) -> Union[str, Any]: """simple docstring""" requires_backends(cls ,['''torch''']) @classmethod def __UpperCamelCase ( cls : Union[str, Any] ,*a__ : List[Any] ,**a__ : Any) -> Optional[int]: """simple docstring""" requires_backends(cls ,['''torch''']) class a__ ( metaclass=lowercase_ ): snake_case__ = ['''torch'''] def __init__( self : List[Any] ,*a__ : Optional[Any] ,**a__ : Any) -> Optional[int]: """simple docstring""" requires_backends(self ,['''torch''']) @classmethod def __UpperCamelCase ( cls : Union[str, Any] ,*a__ : Optional[Any] ,**a__ : Union[str, Any]) -> int: """simple docstring""" requires_backends(cls ,['''torch''']) @classmethod def __UpperCamelCase ( cls : Dict ,*a__ : int ,**a__ : Tuple) -> List[Any]: """simple docstring""" requires_backends(cls ,['''torch''']) class a__ ( metaclass=lowercase_ ): snake_case__ = ['''torch'''] def __init__( self : Tuple ,*a__ : int ,**a__ : Union[str, Any]) -> Optional[Any]: """simple docstring""" requires_backends(self ,['''torch''']) @classmethod def __UpperCamelCase ( cls : Optional[int] ,*a__ : Dict ,**a__ : str) -> Dict: """simple docstring""" requires_backends(cls ,['''torch''']) @classmethod def __UpperCamelCase ( cls : Union[str, Any] ,*a__ : Dict ,**a__ : Union[str, Any]) -> Optional[Any]: """simple docstring""" requires_backends(cls ,['''torch''']) class a__ ( metaclass=lowercase_ ): snake_case__ = ['''torch'''] def __init__( self : Any ,*a__ : Optional[Any] ,**a__ : Optional[Any]) -> Dict: """simple docstring""" requires_backends(self ,['''torch''']) @classmethod def __UpperCamelCase ( cls : List[Any] ,*a__ : Any ,**a__ : List[str]) -> Optional[int]: """simple docstring""" requires_backends(cls ,['''torch''']) @classmethod def __UpperCamelCase ( cls : Dict ,*a__ : str ,**a__ : Optional[int]) -> Optional[int]: """simple docstring""" requires_backends(cls ,['''torch''']) class a__ ( metaclass=lowercase_ ): snake_case__ = ['''torch'''] def __init__( self : Any ,*a__ : int ,**a__ : Union[str, Any]) -> Dict: """simple docstring""" requires_backends(self ,['''torch''']) @classmethod def __UpperCamelCase ( cls : List[Any] ,*a__ : Optional[Any] ,**a__ : Union[str, Any]) -> Optional[int]: """simple docstring""" requires_backends(cls ,['''torch''']) @classmethod def __UpperCamelCase ( cls : List[Any] ,*a__ : Dict ,**a__ : Union[str, Any]) -> Union[str, Any]: """simple docstring""" requires_backends(cls ,['''torch''']) class a__ ( metaclass=lowercase_ ): snake_case__ = ['''torch'''] def __init__( self : Any ,*a__ : Union[str, Any] ,**a__ : Any) -> List[str]: """simple docstring""" requires_backends(self ,['''torch''']) @classmethod def __UpperCamelCase ( cls : Any ,*a__ : Dict ,**a__ : Optional[Any]) -> Optional[int]: """simple docstring""" requires_backends(cls ,['''torch''']) @classmethod def __UpperCamelCase ( cls : int ,*a__ : Tuple ,**a__ : Tuple) -> str: """simple docstring""" requires_backends(cls ,['''torch''']) class a__ ( metaclass=lowercase_ ): snake_case__ = ['''torch'''] def __init__( self : Optional[Any] ,*a__ : Optional[Any] ,**a__ : Tuple) -> Union[str, Any]: """simple docstring""" requires_backends(self ,['''torch''']) @classmethod def __UpperCamelCase ( cls : Optional[int] ,*a__ : List[str] ,**a__ : Optional[Any]) -> Dict: """simple docstring""" requires_backends(cls ,['''torch''']) @classmethod def __UpperCamelCase ( cls : Union[str, Any] ,*a__ : Optional[int] ,**a__ : Optional[Any]) -> List[str]: """simple docstring""" requires_backends(cls ,['''torch''']) class a__ ( metaclass=lowercase_ ): snake_case__ = ['''torch'''] def __init__( self : Optional[Any] ,*a__ : Dict ,**a__ : Optional[Any]) -> int: """simple docstring""" requires_backends(self ,['''torch''']) @classmethod def __UpperCamelCase ( cls : Optional[Any] ,*a__ : Optional[Any] ,**a__ : Optional[Any]) -> Dict: """simple docstring""" requires_backends(cls ,['''torch''']) @classmethod def __UpperCamelCase ( cls : List[str] ,*a__ : Tuple ,**a__ : Optional[int]) -> Optional[int]: """simple docstring""" requires_backends(cls ,['''torch''']) class a__ ( metaclass=lowercase_ ): snake_case__ = ['''torch'''] def __init__( self : str ,*a__ : Any ,**a__ : Optional[int]) -> List[str]: """simple docstring""" requires_backends(self ,['''torch''']) @classmethod def __UpperCamelCase ( cls : Dict ,*a__ : str ,**a__ : int) -> Optional[Any]: """simple docstring""" requires_backends(cls ,['''torch''']) @classmethod def __UpperCamelCase ( cls : List[Any] ,*a__ : int ,**a__ : Dict) -> Any: """simple docstring""" requires_backends(cls ,['''torch''']) def UpperCAmelCase ( *snake_case : str , **snake_case : List[Any] ): requires_backends(snake_case , ['''torch'''] ) def UpperCAmelCase ( *snake_case : Tuple , **snake_case : Tuple ): requires_backends(snake_case , ['''torch'''] ) def UpperCAmelCase ( *snake_case : Optional[Any] , **snake_case : Any ): requires_backends(snake_case , ['''torch'''] ) def UpperCAmelCase ( *snake_case : Optional[Any] , **snake_case : Union[str, Any] ): requires_backends(snake_case , ['''torch'''] ) def UpperCAmelCase ( *snake_case : Any , **snake_case : Optional[int] ): requires_backends(snake_case , ['''torch'''] ) def UpperCAmelCase ( *snake_case : Optional[Any] , **snake_case : Any ): requires_backends(snake_case , ['''torch'''] ) def UpperCAmelCase ( *snake_case : Optional[Any] , **snake_case : int ): requires_backends(snake_case , ['''torch'''] ) class a__ ( metaclass=lowercase_ ): snake_case__ = ['''torch'''] def __init__( self : List[Any] ,*a__ : Union[str, Any] ,**a__ : Union[str, Any]) -> Optional[Any]: """simple docstring""" requires_backends(self ,['''torch''']) @classmethod def __UpperCamelCase ( cls : Union[str, Any] ,*a__ : Optional[Any] ,**a__ : str) -> Optional[int]: """simple docstring""" requires_backends(cls ,['''torch''']) @classmethod def __UpperCamelCase ( cls : int ,*a__ : Dict ,**a__ : Dict) -> Dict: """simple docstring""" requires_backends(cls ,['''torch''']) class a__ ( metaclass=lowercase_ ): snake_case__ = ['''torch'''] def __init__( self : Optional[int] ,*a__ : Optional[Any] ,**a__ : Optional[Any]) -> Optional[int]: """simple docstring""" requires_backends(self ,['''torch''']) @classmethod def __UpperCamelCase ( cls : Any ,*a__ : Union[str, Any] ,**a__ : Optional[int]) -> Union[str, Any]: """simple docstring""" requires_backends(cls ,['''torch''']) @classmethod def __UpperCamelCase ( cls : Optional[int] ,*a__ : Optional[int] ,**a__ : Any) -> List[str]: """simple docstring""" requires_backends(cls ,['''torch''']) class a__ ( metaclass=lowercase_ ): snake_case__ = ['''torch'''] def __init__( self : List[str] ,*a__ : Dict ,**a__ : Optional[int]) -> int: """simple docstring""" requires_backends(self ,['''torch''']) @classmethod def __UpperCamelCase ( cls : Any ,*a__ : int ,**a__ : List[Any]) -> int: """simple docstring""" requires_backends(cls ,['''torch''']) @classmethod def __UpperCamelCase ( cls : Optional[int] ,*a__ : Optional[int] ,**a__ : str) -> int: """simple docstring""" requires_backends(cls ,['''torch''']) class a__ ( metaclass=lowercase_ ): snake_case__ = ['''torch'''] def __init__( self : str ,*a__ : Optional[int] ,**a__ : Optional[Any]) -> List[str]: """simple docstring""" requires_backends(self ,['''torch''']) @classmethod def __UpperCamelCase ( cls : str ,*a__ : Optional[Any] ,**a__ : Any) -> Tuple: """simple docstring""" requires_backends(cls ,['''torch''']) @classmethod def __UpperCamelCase ( cls : str ,*a__ : Tuple ,**a__ : Optional[Any]) -> List[str]: """simple docstring""" requires_backends(cls ,['''torch''']) class a__ ( metaclass=lowercase_ ): snake_case__ = ['''torch'''] def __init__( self : Optional[int] ,*a__ : Union[str, Any] ,**a__ : Union[str, Any]) -> Optional[Any]: """simple docstring""" requires_backends(self ,['''torch''']) @classmethod def __UpperCamelCase ( cls : List[str] ,*a__ : str ,**a__ : Dict) -> Union[str, Any]: """simple docstring""" requires_backends(cls ,['''torch''']) @classmethod def __UpperCamelCase ( cls : Optional[int] ,*a__ : Tuple ,**a__ : Union[str, Any]) -> Tuple: """simple docstring""" requires_backends(cls ,['''torch''']) class a__ ( metaclass=lowercase_ ): snake_case__ = ['''torch'''] def __init__( self : str ,*a__ : Dict ,**a__ : Any) -> List[Any]: """simple docstring""" requires_backends(self ,['''torch''']) @classmethod def __UpperCamelCase ( cls : Any ,*a__ : Optional[int] ,**a__ : Any) -> str: """simple docstring""" requires_backends(cls ,['''torch''']) @classmethod def __UpperCamelCase ( cls : Dict ,*a__ : Tuple ,**a__ : Dict) -> Union[str, Any]: """simple docstring""" requires_backends(cls ,['''torch''']) class a__ ( metaclass=lowercase_ ): snake_case__ = ['''torch'''] def __init__( self : List[Any] ,*a__ : List[Any] ,**a__ : Any) -> Tuple: """simple docstring""" requires_backends(self ,['''torch''']) @classmethod def __UpperCamelCase ( cls : List[Any] ,*a__ : str ,**a__ : Any) -> Any: """simple docstring""" requires_backends(cls ,['''torch''']) @classmethod def __UpperCamelCase ( cls : Dict ,*a__ : str ,**a__ : List[str]) -> Tuple: """simple docstring""" requires_backends(cls ,['''torch''']) class a__ ( metaclass=lowercase_ ): snake_case__ = ['''torch'''] def __init__( self : List[Any] ,*a__ : Dict ,**a__ : Tuple) -> Union[str, Any]: """simple docstring""" requires_backends(self ,['''torch''']) @classmethod def __UpperCamelCase ( cls : Dict ,*a__ : List[str] ,**a__ : str) -> Optional[int]: """simple docstring""" requires_backends(cls ,['''torch''']) @classmethod def __UpperCamelCase ( cls : Any ,*a__ : List[str] ,**a__ : Any) -> Optional[int]: """simple docstring""" requires_backends(cls ,['''torch''']) class a__ ( metaclass=lowercase_ ): snake_case__ = ['''torch'''] def __init__( self : Optional[int] ,*a__ : Optional[Any] ,**a__ : Any) -> List[Any]: """simple docstring""" requires_backends(self ,['''torch''']) @classmethod def __UpperCamelCase ( cls : int ,*a__ : int ,**a__ : List[str]) -> int: """simple docstring""" requires_backends(cls ,['''torch''']) @classmethod def __UpperCamelCase ( cls : Any ,*a__ : str ,**a__ : Optional[int]) -> int: """simple docstring""" requires_backends(cls ,['''torch''']) class a__ ( metaclass=lowercase_ ): snake_case__ = ['''torch'''] def __init__( self : Union[str, Any] ,*a__ : Dict ,**a__ : Any) -> int: """simple docstring""" requires_backends(self ,['''torch''']) @classmethod def __UpperCamelCase ( cls : Tuple ,*a__ : Union[str, Any] ,**a__ : Union[str, Any]) -> Optional[Any]: """simple docstring""" requires_backends(cls ,['''torch''']) @classmethod def __UpperCamelCase ( cls : Any ,*a__ : int ,**a__ : List[Any]) -> Dict: """simple docstring""" requires_backends(cls ,['''torch''']) class a__ ( metaclass=lowercase_ ): snake_case__ = ['''torch'''] def __init__( self : str ,*a__ : Any ,**a__ : List[str]) -> int: """simple docstring""" requires_backends(self ,['''torch''']) @classmethod def __UpperCamelCase ( cls : str ,*a__ : Optional[int] ,**a__ : Dict) -> Any: """simple docstring""" requires_backends(cls ,['''torch''']) @classmethod def __UpperCamelCase ( cls : List[Any] ,*a__ : str ,**a__ : Optional[Any]) -> Optional[int]: """simple docstring""" requires_backends(cls ,['''torch''']) class a__ ( metaclass=lowercase_ ): snake_case__ = ['''torch'''] def __init__( self : Dict ,*a__ : Union[str, Any] ,**a__ : str) -> str: """simple docstring""" requires_backends(self ,['''torch''']) @classmethod def __UpperCamelCase ( cls : Union[str, Any] ,*a__ : Optional[int] ,**a__ : Dict) -> Optional[int]: """simple docstring""" requires_backends(cls ,['''torch''']) @classmethod def __UpperCamelCase ( cls : Dict ,*a__ : Tuple ,**a__ : Optional[Any]) -> Union[str, Any]: """simple docstring""" requires_backends(cls ,['''torch''']) class a__ ( metaclass=lowercase_ ): snake_case__ = ['''torch'''] def __init__( self : int ,*a__ : List[Any] ,**a__ : Optional[int]) -> List[Any]: """simple docstring""" requires_backends(self ,['''torch''']) @classmethod def __UpperCamelCase ( cls : Tuple ,*a__ : Tuple ,**a__ : List[Any]) -> Union[str, Any]: """simple docstring""" requires_backends(cls ,['''torch''']) @classmethod def __UpperCamelCase ( cls : Union[str, Any] ,*a__ : Any ,**a__ : Optional[int]) -> List[Any]: """simple docstring""" requires_backends(cls ,['''torch''']) class a__ ( metaclass=lowercase_ ): snake_case__ = ['''torch'''] def __init__( self : Optional[int] ,*a__ : Optional[Any] ,**a__ : List[Any]) -> Tuple: """simple docstring""" requires_backends(self ,['''torch''']) @classmethod def __UpperCamelCase ( cls : Optional[Any] ,*a__ : Union[str, Any] ,**a__ : Any) -> Tuple: """simple docstring""" requires_backends(cls ,['''torch''']) @classmethod def __UpperCamelCase ( cls : int ,*a__ : List[Any] ,**a__ : Union[str, Any]) -> Tuple: """simple docstring""" requires_backends(cls ,['''torch''']) class a__ ( metaclass=lowercase_ ): snake_case__ = ['''torch'''] def __init__( self : str ,*a__ : int ,**a__ : Dict) -> Optional[Any]: """simple docstring""" requires_backends(self ,['''torch''']) @classmethod def __UpperCamelCase ( cls : int ,*a__ : Union[str, Any] ,**a__ : Any) -> Tuple: """simple docstring""" requires_backends(cls ,['''torch''']) @classmethod def __UpperCamelCase ( cls : Tuple ,*a__ : Union[str, Any] ,**a__ : List[Any]) -> Union[str, Any]: """simple docstring""" requires_backends(cls ,['''torch''']) class a__ ( metaclass=lowercase_ ): snake_case__ = ['''torch'''] def __init__( self : List[Any] ,*a__ : List[str] ,**a__ : Any) -> Any: """simple docstring""" requires_backends(self ,['''torch''']) @classmethod def __UpperCamelCase ( cls : List[Any] ,*a__ : str ,**a__ : int) -> Tuple: """simple docstring""" requires_backends(cls ,['''torch''']) @classmethod def __UpperCamelCase ( cls : Any ,*a__ : Union[str, Any] ,**a__ : Any) -> List[str]: """simple docstring""" requires_backends(cls ,['''torch''']) class a__ ( metaclass=lowercase_ ): snake_case__ = ['''torch'''] def __init__( self : int ,*a__ : Any ,**a__ : Tuple) -> Union[str, Any]: """simple docstring""" requires_backends(self ,['''torch''']) @classmethod def __UpperCamelCase ( cls : Optional[int] ,*a__ : Optional[Any] ,**a__ : str) -> List[Any]: """simple docstring""" requires_backends(cls ,['''torch''']) @classmethod def __UpperCamelCase ( cls : Tuple ,*a__ : List[str] ,**a__ : List[Any]) -> int: """simple docstring""" requires_backends(cls ,['''torch''']) class a__ ( metaclass=lowercase_ ): snake_case__ = ['''torch'''] def __init__( self : List[str] ,*a__ : Optional[Any] ,**a__ : Optional[Any]) -> List[Any]: """simple docstring""" requires_backends(self ,['''torch''']) @classmethod def __UpperCamelCase ( cls : Optional[Any] ,*a__ : str ,**a__ : str) -> int: """simple docstring""" requires_backends(cls ,['''torch''']) @classmethod def __UpperCamelCase ( cls : Any ,*a__ : str ,**a__ : Any) -> List[str]: """simple docstring""" requires_backends(cls ,['''torch''']) class a__ ( metaclass=lowercase_ ): snake_case__ = ['''torch'''] def __init__( self : Dict ,*a__ : Any ,**a__ : Dict) -> Any: """simple docstring""" requires_backends(self ,['''torch''']) @classmethod def __UpperCamelCase ( cls : List[Any] ,*a__ : Union[str, Any] ,**a__ : Union[str, Any]) -> int: """simple docstring""" requires_backends(cls ,['''torch''']) @classmethod def __UpperCamelCase ( cls : List[str] ,*a__ : str ,**a__ : List[str]) -> Dict: """simple docstring""" requires_backends(cls ,['''torch''']) class a__ ( metaclass=lowercase_ ): snake_case__ = ['''torch'''] def __init__( self : Union[str, Any] ,*a__ : str ,**a__ : Union[str, Any]) -> List[str]: """simple docstring""" requires_backends(self ,['''torch''']) @classmethod def __UpperCamelCase ( cls : List[str] ,*a__ : Union[str, Any] ,**a__ : Tuple) -> Optional[Any]: """simple docstring""" requires_backends(cls ,['''torch''']) @classmethod def __UpperCamelCase ( cls : List[str] ,*a__ : Tuple ,**a__ : List[Any]) -> Optional[int]: """simple docstring""" requires_backends(cls ,['''torch''']) class a__ ( metaclass=lowercase_ ): snake_case__ = ['''torch'''] def __init__( self : Union[str, Any] ,*a__ : str ,**a__ : Any) -> Optional[Any]: """simple docstring""" requires_backends(self ,['''torch''']) @classmethod def __UpperCamelCase ( cls : Union[str, Any] ,*a__ : List[Any] ,**a__ : int) -> List[Any]: """simple docstring""" requires_backends(cls ,['''torch''']) @classmethod def __UpperCamelCase ( cls : List[str] ,*a__ : Union[str, Any] ,**a__ : Any) -> Tuple: """simple docstring""" requires_backends(cls ,['''torch''']) class a__ ( metaclass=lowercase_ ): snake_case__ = ['''torch'''] def __init__( self : Optional[Any] ,*a__ : List[str] ,**a__ : int) -> str: """simple docstring""" requires_backends(self ,['''torch''']) @classmethod def __UpperCamelCase ( cls : Optional[Any] ,*a__ : Any ,**a__ : Optional[int]) -> str: """simple docstring""" requires_backends(cls ,['''torch''']) @classmethod def __UpperCamelCase ( cls : List[Any] ,*a__ : Optional[int] ,**a__ : List[Any]) -> Union[str, Any]: """simple docstring""" requires_backends(cls ,['''torch''']) class a__ ( metaclass=lowercase_ ): snake_case__ = ['''torch'''] def __init__( self : List[Any] ,*a__ : Optional[int] ,**a__ : Dict) -> Dict: """simple docstring""" requires_backends(self ,['''torch''']) @classmethod def __UpperCamelCase ( cls : List[Any] ,*a__ : List[Any] ,**a__ : Optional[Any]) -> Optional[Any]: """simple docstring""" requires_backends(cls ,['''torch''']) @classmethod def __UpperCamelCase ( cls : int ,*a__ : Optional[int] ,**a__ : Tuple) -> int: """simple docstring""" requires_backends(cls ,['''torch''']) class a__ ( metaclass=lowercase_ ): snake_case__ = ['''torch'''] def __init__( self : Dict ,*a__ : List[str] ,**a__ : int) -> int: """simple docstring""" requires_backends(self ,['''torch''']) @classmethod def __UpperCamelCase ( cls : Any ,*a__ : List[Any] ,**a__ : str) -> int: """simple docstring""" requires_backends(cls ,['''torch''']) @classmethod def __UpperCamelCase ( cls : int ,*a__ : str ,**a__ : Union[str, Any]) -> Union[str, Any]: """simple docstring""" requires_backends(cls ,['''torch''']) class a__ ( metaclass=lowercase_ ): snake_case__ = ['''torch'''] def __init__( self : str ,*a__ : int ,**a__ : Optional[Any]) -> Tuple: """simple docstring""" requires_backends(self ,['''torch''']) @classmethod def __UpperCamelCase ( cls : Any ,*a__ : int ,**a__ : List[Any]) -> Union[str, Any]: """simple docstring""" requires_backends(cls ,['''torch''']) @classmethod def __UpperCamelCase ( cls : Optional[int] ,*a__ : Optional[Any] ,**a__ : int) -> Dict: """simple docstring""" requires_backends(cls ,['''torch''']) class a__ ( metaclass=lowercase_ ): snake_case__ = ['''torch'''] def __init__( self : Optional[Any] ,*a__ : List[str] ,**a__ : List[Any]) -> List[Any]: """simple docstring""" requires_backends(self ,['''torch''']) @classmethod def __UpperCamelCase ( cls : Tuple ,*a__ : Optional[Any] ,**a__ : Optional[int]) -> Optional[int]: """simple docstring""" requires_backends(cls ,['''torch''']) @classmethod def __UpperCamelCase ( cls : int ,*a__ : int ,**a__ : Tuple) -> List[str]: """simple docstring""" requires_backends(cls ,['''torch''']) class a__ ( metaclass=lowercase_ ): snake_case__ = ['''torch'''] def __init__( self : Dict ,*a__ : Union[str, Any] ,**a__ : List[str]) -> List[str]: """simple docstring""" requires_backends(self ,['''torch''']) @classmethod def __UpperCamelCase ( cls : str ,*a__ : Any ,**a__ : Optional[int]) -> Optional[Any]: """simple docstring""" requires_backends(cls ,['''torch''']) @classmethod def __UpperCamelCase ( cls : Optional[int] ,*a__ : Optional[Any] ,**a__ : int) -> int: """simple docstring""" requires_backends(cls ,['''torch''']) class a__ ( metaclass=lowercase_ ): snake_case__ = ['''torch'''] def __init__( self : Tuple ,*a__ : Dict ,**a__ : Optional[Any]) -> Optional[Any]: """simple docstring""" requires_backends(self ,['''torch''']) @classmethod def __UpperCamelCase ( cls : Optional[int] ,*a__ : Dict ,**a__ : Union[str, Any]) -> int: """simple docstring""" requires_backends(cls ,['''torch''']) @classmethod def __UpperCamelCase ( cls : Tuple ,*a__ : Dict ,**a__ : List[Any]) -> Union[str, Any]: """simple docstring""" requires_backends(cls ,['''torch''']) class a__ ( metaclass=lowercase_ ): snake_case__ = ['''torch'''] def __init__( self : Tuple ,*a__ : Any ,**a__ : List[str]) -> Optional[Any]: """simple docstring""" requires_backends(self ,['''torch''']) @classmethod def __UpperCamelCase ( cls : Optional[int] ,*a__ : Union[str, Any] ,**a__ : List[Any]) -> Tuple: """simple docstring""" requires_backends(cls ,['''torch''']) @classmethod def __UpperCamelCase ( cls : int ,*a__ : Optional[int] ,**a__ : Tuple) -> List[str]: """simple docstring""" requires_backends(cls ,['''torch''']) class a__ ( metaclass=lowercase_ ): snake_case__ = ['''torch'''] def __init__( self : List[str] ,*a__ : Any ,**a__ : List[Any]) -> Union[str, Any]: """simple docstring""" requires_backends(self ,['''torch''']) @classmethod def __UpperCamelCase ( cls : Dict ,*a__ : Tuple ,**a__ : List[Any]) -> List[Any]: """simple docstring""" requires_backends(cls ,['''torch''']) @classmethod def __UpperCamelCase ( cls : List[str] ,*a__ : List[str] ,**a__ : Any) -> Union[str, Any]: """simple docstring""" requires_backends(cls ,['''torch''']) class a__ ( metaclass=lowercase_ ): snake_case__ = ['''torch'''] def __init__( self : Tuple ,*a__ : int ,**a__ : str) -> str: """simple docstring""" requires_backends(self ,['''torch''']) @classmethod def __UpperCamelCase ( cls : List[Any] ,*a__ : List[str] ,**a__ : Tuple) -> Any: """simple docstring""" requires_backends(cls ,['''torch''']) @classmethod def __UpperCamelCase ( cls : Optional[Any] ,*a__ : str ,**a__ : int) -> Optional[int]: """simple docstring""" requires_backends(cls ,['''torch''']) class a__ ( metaclass=lowercase_ ): snake_case__ = ['''torch'''] def __init__( self : str ,*a__ : Union[str, Any] ,**a__ : Dict) -> Optional[int]: """simple docstring""" requires_backends(self ,['''torch''']) @classmethod def __UpperCamelCase ( cls : Dict ,*a__ : Optional[Any] ,**a__ : Union[str, Any]) -> Optional[Any]: """simple docstring""" requires_backends(cls ,['''torch''']) @classmethod def __UpperCamelCase ( cls : Optional[int] ,*a__ : List[str] ,**a__ : Any) -> List[str]: """simple docstring""" requires_backends(cls ,['''torch''']) class a__ ( metaclass=lowercase_ ): snake_case__ = ['''torch'''] def __init__( self : int ,*a__ : Optional[int] ,**a__ : Union[str, Any]) -> List[Any]: """simple docstring""" requires_backends(self ,['''torch''']) @classmethod def __UpperCamelCase ( cls : int ,*a__ : str ,**a__ : Union[str, Any]) -> List[str]: """simple docstring""" requires_backends(cls ,['''torch''']) @classmethod def __UpperCamelCase ( cls : Optional[int] ,*a__ : List[str] ,**a__ : Tuple) -> Optional[Any]: """simple docstring""" requires_backends(cls ,['''torch''']) class a__ ( metaclass=lowercase_ ): snake_case__ = ['''torch'''] def __init__( self : List[Any] ,*a__ : List[str] ,**a__ : str) -> Optional[Any]: """simple docstring""" requires_backends(self ,['''torch''']) @classmethod def __UpperCamelCase ( cls : List[str] ,*a__ : List[Any] ,**a__ : List[str]) -> Union[str, Any]: """simple docstring""" requires_backends(cls ,['''torch''']) @classmethod def __UpperCamelCase ( cls : Any ,*a__ : Tuple ,**a__ : int) -> List[Any]: """simple docstring""" requires_backends(cls ,['''torch''']) class a__ ( metaclass=lowercase_ ): snake_case__ = ['''torch'''] def __init__( self : str ,*a__ : Tuple ,**a__ : Dict) -> str: """simple docstring""" requires_backends(self ,['''torch''']) @classmethod def __UpperCamelCase ( cls : str ,*a__ : Dict ,**a__ : int) -> Union[str, Any]: """simple docstring""" requires_backends(cls ,['''torch''']) @classmethod def __UpperCamelCase ( cls : str ,*a__ : List[str] ,**a__ : List[str]) -> int: """simple docstring""" requires_backends(cls ,['''torch''']) class a__ ( metaclass=lowercase_ ): snake_case__ = ['''torch'''] def __init__( self : Tuple ,*a__ : Optional[Any] ,**a__ : Any) -> List[Any]: """simple docstring""" requires_backends(self ,['''torch''']) @classmethod def __UpperCamelCase ( cls : Optional[Any] ,*a__ : Optional[int] ,**a__ : Optional[int]) -> List[str]: """simple docstring""" requires_backends(cls ,['''torch''']) @classmethod def __UpperCamelCase ( cls : List[str] ,*a__ : Dict ,**a__ : Union[str, Any]) -> Union[str, Any]: """simple docstring""" requires_backends(cls ,['''torch''']) class a__ ( metaclass=lowercase_ ): snake_case__ = ['''torch'''] def __init__( self : Tuple ,*a__ : List[str] ,**a__ : Tuple) -> Optional[Any]: """simple docstring""" requires_backends(self ,['''torch''']) @classmethod def __UpperCamelCase ( cls : Dict ,*a__ : Optional[int] ,**a__ : List[str]) -> Dict: """simple docstring""" requires_backends(cls ,['''torch''']) @classmethod def __UpperCamelCase ( cls : Optional[int] ,*a__ : Dict ,**a__ : List[str]) -> Optional[int]: """simple docstring""" requires_backends(cls ,['''torch''']) class a__ ( metaclass=lowercase_ ): snake_case__ = ['''torch'''] def __init__( self : Optional[int] ,*a__ : List[Any] ,**a__ : List[str]) -> Optional[int]: """simple docstring""" requires_backends(self ,['''torch''']) @classmethod def __UpperCamelCase ( cls : Dict ,*a__ : Optional[Any] ,**a__ : Optional[int]) -> Union[str, Any]: """simple docstring""" requires_backends(cls ,['''torch''']) @classmethod def __UpperCamelCase ( cls : int ,*a__ : Any ,**a__ : List[Any]) -> Union[str, Any]: """simple docstring""" requires_backends(cls ,['''torch''']) class a__ ( metaclass=lowercase_ ): snake_case__ = ['''torch'''] def __init__( self : Any ,*a__ : List[str] ,**a__ : Dict) -> List[str]: """simple docstring""" requires_backends(self ,['''torch''']) @classmethod def __UpperCamelCase ( cls : str ,*a__ : List[Any] ,**a__ : Tuple) -> List[str]: """simple docstring""" requires_backends(cls ,['''torch''']) @classmethod def __UpperCamelCase ( cls : str ,*a__ : List[str] ,**a__ : Tuple) -> Optional[int]: """simple docstring""" requires_backends(cls ,['''torch'''])
227
import unittest import numpy as np from transformers.testing_utils import require_pytesseract, require_torch from transformers.utils import is_pytesseract_available, is_torch_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_pytesseract_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class _lowerCAmelCase ( unittest.TestCase ): """simple docstring""" def __init__( self : List[str] , UpperCamelCase__ : str , UpperCamelCase__ : Tuple=7 , UpperCamelCase__ : List[Any]=3 , UpperCamelCase__ : Dict=1_8 , UpperCamelCase__ : Any=3_0 , UpperCamelCase__ : List[Any]=4_0_0 , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : Any=None , UpperCamelCase__ : Optional[int]=True , ): '''simple docstring''' snake_case__ = size if size is not None else {"""height""": 1_8, """width""": 1_8} snake_case__ = parent snake_case__ = batch_size snake_case__ = num_channels snake_case__ = image_size snake_case__ = min_resolution snake_case__ = max_resolution snake_case__ = do_resize snake_case__ = size snake_case__ = apply_ocr def __magic_name__ ( self : Optional[Any]): '''simple docstring''' return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr} @require_torch @require_pytesseract class _lowerCAmelCase ( lowercase_ , unittest.TestCase ): """simple docstring""" _lowercase : str = LayoutLMvaImageProcessor if is_pytesseract_available() else None def __magic_name__ ( self : Optional[int]): '''simple docstring''' snake_case__ = LayoutLMvaImageProcessingTester(self) @property def __magic_name__ ( self : Tuple): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def __magic_name__ ( self : List[Any]): '''simple docstring''' snake_case__ = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(UpperCamelCase__ , """do_resize""")) self.assertTrue(hasattr(UpperCamelCase__ , """size""")) self.assertTrue(hasattr(UpperCamelCase__ , """apply_ocr""")) def __magic_name__ ( self : Optional[int]): '''simple docstring''' snake_case__ = self.image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size , {"""height""": 1_8, """width""": 1_8}) snake_case__ = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2) self.assertEqual(image_processor.size , {"""height""": 4_2, """width""": 4_2}) def __magic_name__ ( self : List[str]): '''simple docstring''' pass def __magic_name__ ( self : List[str]): '''simple docstring''' snake_case__ = self.image_processing_class(**self.image_processor_dict) # create random PIL images snake_case__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__) for image in image_inputs: self.assertIsInstance(UpperCamelCase__ , Image.Image) # Test not batched input snake_case__ = image_processing(image_inputs[0] , return_tensors="""pt""") self.assertEqual( encoding.pixel_values.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) self.assertIsInstance(encoding.words , UpperCamelCase__) self.assertIsInstance(encoding.boxes , UpperCamelCase__) # Test batched snake_case__ = image_processing(UpperCamelCase__ , return_tensors="""pt""").pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) def __magic_name__ ( self : List[Any]): '''simple docstring''' snake_case__ = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors snake_case__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ , numpify=UpperCamelCase__) for image in image_inputs: self.assertIsInstance(UpperCamelCase__ , np.ndarray) # Test not batched input snake_case__ = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) # Test batched snake_case__ = image_processing(UpperCamelCase__ , return_tensors="""pt""").pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) def __magic_name__ ( self : Dict): '''simple docstring''' snake_case__ = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors snake_case__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ , torchify=UpperCamelCase__) for image in image_inputs: self.assertIsInstance(UpperCamelCase__ , torch.Tensor) # Test not batched input snake_case__ = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) # Test batched snake_case__ = image_processing(UpperCamelCase__ , return_tensors="""pt""").pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) def __magic_name__ ( self : Any): '''simple docstring''' snake_case__ = LayoutLMvaImageProcessor() from datasets import load_dataset snake_case__ = load_dataset("""hf-internal-testing/fixtures_docvqa""" , split="""test""") snake_case__ = Image.open(ds[0]["""file"""]).convert("""RGB""") snake_case__ = image_processing(UpperCamelCase__ , return_tensors="""pt""") self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_2_4, 2_2_4)) self.assertEqual(len(encoding.words) , len(encoding.boxes)) # fmt: off # the words and boxes were obtained with Tesseract 4.1.1 snake_case__ = [["""11:14""", """to""", """11:39""", """a.m""", """11:39""", """to""", """11:44""", """a.m.""", """11:44""", """a.m.""", """to""", """12:25""", """p.m.""", """12:25""", """to""", """12:58""", """p.m.""", """12:58""", """to""", """4:00""", """p.m.""", """2:00""", """to""", """5:00""", """p.m.""", """Coffee""", """Break""", """Coffee""", """will""", """be""", """served""", """for""", """men""", """and""", """women""", """in""", """the""", """lobby""", """adjacent""", """to""", """exhibit""", """area.""", """Please""", """move""", """into""", """exhibit""", """area.""", """(Exhibits""", """Open)""", """TRRF""", """GENERAL""", """SESSION""", """(PART""", """|)""", """Presiding:""", """Lee""", """A.""", """Waller""", """TRRF""", """Vice""", """President""", """“Introductory""", """Remarks”""", """Lee""", """A.""", """Waller,""", """TRRF""", """Vice""", """Presi-""", """dent""", """Individual""", """Interviews""", """with""", """TRRF""", """Public""", """Board""", """Members""", """and""", """Sci-""", """entific""", """Advisory""", """Council""", """Mem-""", """bers""", """Conducted""", """by""", """TRRF""", """Treasurer""", """Philip""", """G.""", """Kuehn""", """to""", """get""", """answers""", """which""", """the""", """public""", """refrigerated""", """warehousing""", """industry""", """is""", """looking""", """for.""", """Plus""", """questions""", """from""", """the""", """floor.""", """Dr.""", """Emil""", """M.""", """Mrak,""", """University""", """of""", """Cal-""", """ifornia,""", """Chairman,""", """TRRF""", """Board;""", """Sam""", """R.""", """Cecil,""", """University""", """of""", """Georgia""", """College""", """of""", """Agriculture;""", """Dr.""", """Stanley""", """Charm,""", """Tufts""", """University""", """School""", """of""", """Medicine;""", """Dr.""", """Robert""", """H.""", """Cotton,""", """ITT""", """Continental""", """Baking""", """Company;""", """Dr.""", """Owen""", """Fennema,""", """University""", """of""", """Wis-""", """consin;""", """Dr.""", """Robert""", """E.""", """Hardenburg,""", """USDA.""", """Questions""", """and""", """Answers""", """Exhibits""", """Open""", """Capt.""", """Jack""", """Stoney""", """Room""", """TRRF""", """Scientific""", """Advisory""", """Council""", """Meeting""", """Ballroom""", """Foyer"""]] # noqa: E231 snake_case__ = [[[1_4_1, 5_7, 2_1_4, 6_9], [2_2_8, 5_8, 2_5_2, 6_9], [1_4_1, 7_5, 2_1_6, 8_8], [2_3_0, 7_9, 2_8_0, 8_8], [1_4_2, 2_6_0, 2_1_8, 2_7_3], [2_3_0, 2_6_1, 2_5_5, 2_7_3], [1_4_3, 2_7_9, 2_1_8, 2_9_0], [2_3_1, 2_8_2, 2_9_0, 2_9_1], [1_4_3, 3_4_2, 2_1_8, 3_5_4], [2_3_1, 3_4_5, 2_8_9, 3_5_5], [2_0_2, 3_6_2, 2_2_7, 3_7_3], [1_4_3, 3_7_9, 2_2_0, 3_9_2], [2_3_1, 3_8_2, 2_9_1, 3_9_4], [1_4_4, 7_1_4, 2_2_0, 7_2_6], [2_3_1, 7_1_5, 2_5_6, 7_2_6], [1_4_4, 7_3_2, 2_2_0, 7_4_5], [2_3_2, 7_3_6, 2_9_1, 7_4_7], [1_4_4, 7_6_9, 2_1_8, 7_8_2], [2_3_1, 7_7_0, 2_5_6, 7_8_2], [1_4_1, 7_8_8, 2_0_2, 8_0_1], [2_1_5, 7_9_1, 2_7_4, 8_0_4], [1_4_3, 8_2_6, 2_0_4, 8_3_8], [2_1_5, 8_2_6, 2_4_0, 8_3_8], [1_4_2, 8_4_4, 2_0_2, 8_5_7], [2_1_5, 8_4_7, 2_7_4, 8_5_9], [3_3_4, 5_7, 4_2_7, 6_9], [4_4_0, 5_7, 5_2_2, 6_9], [3_6_9, 7_5, 4_6_1, 8_8], [4_6_9, 7_5, 5_1_6, 8_8], [5_2_8, 7_6, 5_6_2, 8_8], [5_7_0, 7_6, 6_6_7, 8_8], [6_7_5, 7_5, 7_1_1, 8_7], [7_2_1, 7_9, 7_7_8, 8_8], [7_8_9, 7_5, 8_4_0, 8_8], [3_6_9, 9_7, 4_7_0, 1_0_7], [4_8_4, 9_4, 5_0_7, 1_0_6], [5_1_8, 9_4, 5_6_2, 1_0_7], [5_7_6, 9_4, 6_5_5, 1_1_0], [6_6_8, 9_4, 7_9_2, 1_0_9], [8_0_4, 9_5, 8_2_9, 1_0_7], [3_6_9, 1_1_3, 4_6_5, 1_2_5], [4_7_7, 1_1_6, 5_4_7, 1_2_5], [5_6_2, 1_1_3, 6_5_8, 1_2_5], [6_7_1, 1_1_6, 7_4_8, 1_2_5], [7_6_1, 1_1_3, 8_1_1, 1_2_5], [3_6_9, 1_3_1, 4_6_5, 1_4_3], [4_7_7, 1_3_3, 5_4_8, 1_4_3], [5_6_3, 1_3_0, 6_9_8, 1_4_5], [7_1_0, 1_3_0, 8_0_2, 1_4_6], [3_3_6, 1_7_1, 4_1_2, 1_8_3], [4_2_3, 1_7_1, 5_7_2, 1_8_3], [5_8_2, 1_7_0, 7_1_6, 1_8_4], [7_2_8, 1_7_1, 8_1_7, 1_8_7], [8_2_9, 1_7_1, 8_4_4, 1_8_6], [3_3_8, 1_9_7, 4_8_2, 2_1_2], [5_0_7, 1_9_6, 5_5_7, 2_0_9], [5_6_9, 1_9_6, 5_9_5, 2_0_8], [6_1_0, 1_9_6, 7_0_2, 2_0_9], [5_0_5, 2_1_4, 5_8_3, 2_2_6], [5_9_5, 2_1_4, 6_5_6, 2_2_7], [6_7_0, 2_1_5, 8_0_7, 2_2_7], [3_3_5, 2_5_9, 5_4_3, 2_7_4], [5_5_6, 2_5_9, 7_0_8, 2_7_2], [3_7_2, 2_7_9, 4_2_2, 2_9_1], [4_3_5, 2_7_9, 4_6_0, 2_9_1], [4_7_4, 2_7_9, 5_7_4, 2_9_2], [5_8_7, 2_7_8, 6_6_4, 2_9_1], [6_7_6, 2_7_8, 7_3_8, 2_9_1], [7_5_1, 2_7_9, 8_3_4, 2_9_1], [3_7_2, 2_9_8, 4_3_4, 3_1_0], [3_3_5, 3_4_1, 4_8_3, 3_5_4], [4_9_7, 3_4_1, 6_5_5, 3_5_4], [6_6_7, 3_4_1, 7_2_8, 3_5_4], [7_4_0, 3_4_1, 8_2_5, 3_5_4], [3_3_5, 3_6_0, 4_3_0, 3_7_2], [4_4_2, 3_6_0, 5_3_4, 3_7_2], [5_4_5, 3_5_9, 6_8_7, 3_7_2], [6_9_7, 3_6_0, 7_5_4, 3_7_2], [7_6_5, 3_6_0, 8_2_3, 3_7_3], [3_3_4, 3_7_8, 4_2_8, 3_9_1], [4_4_0, 3_7_8, 5_7_7, 3_9_4], [5_9_0, 3_7_8, 7_0_5, 3_9_1], [7_2_0, 3_7_8, 8_0_1, 3_9_1], [3_3_4, 3_9_7, 4_0_0, 4_0_9], [3_7_0, 4_1_6, 5_2_9, 4_2_9], [5_4_4, 4_1_6, 5_7_6, 4_3_2], [5_8_7, 4_1_6, 6_6_5, 4_2_8], [6_7_7, 4_1_6, 8_1_4, 4_2_9], [3_7_2, 4_3_5, 4_5_2, 4_5_0], [4_6_5, 4_3_4, 4_9_5, 4_4_7], [5_1_1, 4_3_4, 6_0_0, 4_4_7], [6_1_1, 4_3_6, 6_3_7, 4_4_7], [6_4_9, 4_3_6, 6_9_4, 4_5_1], [7_0_5, 4_3_8, 8_2_4, 4_4_7], [3_6_9, 4_5_3, 4_5_2, 4_6_6], [4_6_4, 4_5_4, 5_0_9, 4_6_6], [5_2_2, 4_5_3, 6_1_1, 4_6_9], [6_2_5, 4_5_3, 7_9_2, 4_6_9], [3_7_0, 4_7_2, 5_5_6, 4_8_8], [5_7_0, 4_7_2, 6_8_4, 4_8_7], [6_9_7, 4_7_2, 7_1_8, 4_8_5], [7_3_2, 4_7_2, 8_3_5, 4_8_8], [3_6_9, 4_9_0, 4_1_1, 5_0_3], [4_2_5, 4_9_0, 4_8_4, 5_0_3], [4_9_6, 4_9_0, 6_3_5, 5_0_6], [6_4_5, 4_9_0, 7_0_7, 5_0_3], [7_1_8, 4_9_1, 7_6_1, 5_0_3], [7_7_1, 4_9_0, 8_4_0, 5_0_3], [3_3_6, 5_1_0, 3_7_4, 5_2_1], [3_8_8, 5_1_0, 4_4_7, 5_2_2], [4_6_0, 5_1_0, 4_8_9, 5_2_1], [5_0_3, 5_1_0, 5_8_0, 5_2_2], [5_9_2, 5_0_9, 7_3_6, 5_2_5], [7_4_5, 5_0_9, 7_7_0, 5_2_2], [7_8_1, 5_0_9, 8_4_0, 5_2_2], [3_3_8, 5_2_8, 4_3_4, 5_4_1], [4_4_8, 5_2_8, 5_9_6, 5_4_1], [6_0_9, 5_2_7, 6_8_7, 5_4_0], [7_0_0, 5_2_8, 7_9_2, 5_4_1], [3_3_6, 5_4_6, 3_9_7, 5_5_9], [4_0_7, 5_4_6, 4_3_1, 5_5_9], [4_4_3, 5_4_6, 5_2_5, 5_6_0], [5_3_7, 5_4_6, 6_8_0, 5_6_2], [6_8_8, 5_4_6, 7_1_4, 5_5_9], [7_2_2, 5_4_6, 8_3_7, 5_6_2], [3_3_6, 5_6_5, 4_4_9, 5_8_1], [4_6_1, 5_6_5, 4_8_5, 5_7_7], [4_9_7, 5_6_5, 6_6_5, 5_8_1], [6_8_1, 5_6_5, 7_1_8, 5_7_7], [7_3_2, 5_6_5, 8_3_7, 5_8_0], [3_3_7, 5_8_4, 4_3_8, 5_9_7], [4_5_2, 5_8_3, 5_2_1, 5_9_6], [5_3_5, 5_8_4, 6_7_7, 5_9_9], [6_9_0, 5_8_3, 7_8_7, 5_9_6], [8_0_1, 5_8_3, 8_2_5, 5_9_6], [3_3_8, 6_0_2, 4_7_8, 6_1_5], [4_9_2, 6_0_2, 5_3_0, 6_1_4], [5_4_3, 6_0_2, 6_3_8, 6_1_5], [6_5_0, 6_0_2, 6_7_6, 6_1_4], [6_8_8, 6_0_2, 7_8_8, 6_1_5], [8_0_2, 6_0_2, 8_4_3, 6_1_4], [3_3_7, 6_2_1, 5_0_2, 6_3_3], [5_1_6, 6_2_1, 6_1_5, 6_3_7], [6_2_9, 6_2_1, 7_7_4, 6_3_6], [7_8_9, 6_2_1, 8_2_7, 6_3_3], [3_3_7, 6_3_9, 4_1_8, 6_5_2], [4_3_2, 6_4_0, 5_7_1, 6_5_3], [5_8_7, 6_3_9, 7_3_1, 6_5_5], [7_4_3, 6_3_9, 7_6_9, 6_5_2], [7_8_0, 6_3_9, 8_4_1, 6_5_2], [3_3_8, 6_5_8, 4_4_0, 6_7_3], [4_5_5, 6_5_8, 4_9_1, 6_7_0], [5_0_8, 6_5_8, 6_0_2, 6_7_1], [6_1_6, 6_5_8, 6_3_8, 6_7_0], [6_5_4, 6_5_8, 8_3_5, 6_7_4], [3_3_7, 6_7_7, 4_2_9, 6_8_9], [3_3_7, 7_1_4, 4_8_2, 7_2_6], [4_9_5, 7_1_4, 5_4_8, 7_2_6], [5_6_1, 7_1_4, 6_8_3, 7_2_6], [3_3_8, 7_7_0, 4_6_1, 7_8_2], [4_7_4, 7_6_9, 5_5_4, 7_8_5], [4_8_9, 7_8_8, 5_6_2, 8_0_3], [5_7_6, 7_8_8, 6_4_3, 8_0_1], [6_5_6, 7_8_7, 7_5_1, 8_0_4], [7_6_4, 7_8_8, 8_4_4, 8_0_1], [3_3_4, 8_2_5, 4_2_1, 8_3_8], [4_3_0, 8_2_4, 5_7_4, 8_3_8], [5_8_4, 8_2_4, 7_2_3, 8_4_1], [3_3_5, 8_4_4, 4_5_0, 8_5_7], [4_6_4, 8_4_3, 5_8_3, 8_6_0], [6_2_8, 8_6_2, 7_5_5, 8_7_5], [7_6_9, 8_6_1, 8_4_8, 8_7_8]]] # noqa: E231 # fmt: on self.assertListEqual(encoding.words , UpperCamelCase__) self.assertListEqual(encoding.boxes , UpperCamelCase__) # with apply_OCR = False snake_case__ = LayoutLMvaImageProcessor(apply_ocr=UpperCamelCase__) snake_case__ = image_processing(UpperCamelCase__ , return_tensors="""pt""") self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_2_4, 2_2_4))
654
0
"""simple docstring""" import warnings from diffusers import StableDiffusionImgaImgPipeline # noqa F401 warnings.warn( """The `image_to_image.py` script is outdated. Please use directly `from diffusers import""" """ StableDiffusionImg2ImgPipeline` instead.""" )
359
import numpy as np import torch from torch.utils.data import Dataset from utils import logger class _lowerCAmelCase ( lowercase_ ): """simple docstring""" def __init__( self : int , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Union[str, Any]): '''simple docstring''' snake_case__ = params snake_case__ = np.array(UpperCamelCase__) snake_case__ = np.array([len(UpperCamelCase__) for t in data]) self.check() self.remove_long_sequences() self.remove_empty_sequences() self.remove_unknown_sequences() self.check() self.print_statistics() def __getitem__( self : Dict , UpperCamelCase__ : Any): '''simple docstring''' return (self.token_ids[index], self.lengths[index]) def __len__( self : Union[str, Any]): '''simple docstring''' return len(self.lengths) def __magic_name__ ( self : str): '''simple docstring''' assert len(self.token_ids) == len(self.lengths) assert all(self.lengths[i] == len(self.token_ids[i]) for i in range(len(self.lengths))) def __magic_name__ ( self : Optional[int]): '''simple docstring''' snake_case__ = self.params.max_model_input_size snake_case__ = self.lengths > max_len logger.info(F'''Splitting {sum(UpperCamelCase__)} too long sequences.''') def divide_chunks(UpperCamelCase__ : str , UpperCamelCase__ : Tuple): return [l[i : i + n] for i in range(0 , len(UpperCamelCase__) , UpperCamelCase__)] snake_case__ = [] snake_case__ = [] if self.params.mlm: snake_case__ , snake_case__ = self.params.special_tok_ids["""cls_token"""], self.params.special_tok_ids["""sep_token"""] else: snake_case__ , snake_case__ = self.params.special_tok_ids["""bos_token"""], self.params.special_tok_ids["""eos_token"""] for seq_, len_ in zip(self.token_ids , self.lengths): assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_ if len_ <= max_len: new_tok_ids.append(seq_) new_lengths.append(len_) else: snake_case__ = [] for sub_s in divide_chunks(seq_ , max_len - 2): if sub_s[0] != cls_id: snake_case__ = np.insert(UpperCamelCase__ , 0 , UpperCamelCase__) if sub_s[-1] != sep_id: snake_case__ = np.insert(UpperCamelCase__ , len(UpperCamelCase__) , UpperCamelCase__) assert len(UpperCamelCase__) <= max_len assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s sub_seqs.append(UpperCamelCase__) new_tok_ids.extend(UpperCamelCase__) new_lengths.extend([len(UpperCamelCase__) for l in sub_seqs]) snake_case__ = np.array(UpperCamelCase__) snake_case__ = np.array(UpperCamelCase__) def __magic_name__ ( self : Any): '''simple docstring''' snake_case__ = len(self) snake_case__ = self.lengths > 1_1 snake_case__ = self.token_ids[indices] snake_case__ = self.lengths[indices] snake_case__ = len(self) logger.info(F'''Remove {init_size - new_size} too short (<=11 tokens) sequences.''') def __magic_name__ ( self : List[str]): '''simple docstring''' if "unk_token" not in self.params.special_tok_ids: return else: snake_case__ = self.params.special_tok_ids["""unk_token"""] snake_case__ = len(self) snake_case__ = np.array([np.count_nonzero(a == unk_token_id) for a in self.token_ids]) snake_case__ = (unk_occs / self.lengths) < 0.5 snake_case__ = self.token_ids[indices] snake_case__ = self.lengths[indices] snake_case__ = len(self) logger.info(F'''Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).''') def __magic_name__ ( self : Optional[Any]): '''simple docstring''' if not self.params.is_master: return logger.info(F'''{len(self)} sequences''') # data_len = sum(self.lengths) # nb_unique_tokens = len(Counter(list(chain(*self.token_ids)))) # logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)') # unk_idx = self.params.special_tok_ids['unk_token'] # nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids]) # logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)') def __magic_name__ ( self : int , UpperCamelCase__ : Optional[int]): '''simple docstring''' snake_case__ = [t[0] for t in batch] snake_case__ = [t[1] for t in batch] assert len(UpperCamelCase__) == len(UpperCamelCase__) # Max for paddings snake_case__ = max(UpperCamelCase__) # Pad token ids if self.params.mlm: snake_case__ = self.params.special_tok_ids["""pad_token"""] else: snake_case__ = self.params.special_tok_ids["""unk_token"""] snake_case__ = [list(t.astype(UpperCamelCase__)) + [pad_idx] * (max_seq_len_ - len(UpperCamelCase__)) for t in token_ids] assert len(tk_) == len(UpperCamelCase__) assert all(len(UpperCamelCase__) == max_seq_len_ for t in tk_) snake_case__ = torch.tensor(tk_) # (bs, max_seq_len_) snake_case__ = torch.tensor(UpperCamelCase__) # (bs) return tk_t, lg_t
654
0
'''simple docstring''' from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL A_ = logging.get_logger(__name__) class UpperCAmelCase ( lowercase_ ): '''simple docstring''' SCREAMING_SNAKE_CASE_ = ['''pixel_values'''] def __init__( self , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = PIL.Image.BICUBIC , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 1 / 255 , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ) -> Optional[int]: '''simple docstring''' super().__init__(**UpperCamelCase__ ) lowerCamelCase_ = size if size is not None else {'height': 256, 'width': 256} lowerCamelCase_ = get_size_dict(UpperCamelCase__ ) lowerCamelCase_ = crop_size if crop_size is not None else {'height': 224, 'width': 224} lowerCamelCase_ = get_size_dict(UpperCamelCase__ , param_name='crop_size' ) lowerCamelCase_ = do_resize lowerCamelCase_ = size lowerCamelCase_ = resample lowerCamelCase_ = do_center_crop lowerCamelCase_ = crop_size lowerCamelCase_ = do_rescale lowerCamelCase_ = rescale_factor lowerCamelCase_ = do_normalize lowerCamelCase_ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN lowerCamelCase_ = image_std if image_std is not None else IMAGENET_STANDARD_STD def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = PIL.Image.BICUBIC , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ) -> Tuple: '''simple docstring''' lowerCamelCase_ = get_size_dict(UpperCamelCase__ ) if "height" not in size or "width" not in size: raise ValueError(f'''The size dictionary must have keys \'height\' and \'width\'. Got {size.keys()}''' ) return resize( UpperCamelCase__ , size=(size['height'], size['width']) , resample=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ ) def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ) -> Union[str, Any]: '''simple docstring''' lowerCamelCase_ = get_size_dict(UpperCamelCase__ ) if "height" not in size or "width" not in size: raise ValueError(f'''The size dictionary must have keys \'height\' and \'width\'. Got {size.keys()}''' ) return center_crop(UpperCamelCase__ , size=(size['height'], size['width']) , data_format=UpperCamelCase__ , **UpperCamelCase__ ) def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ) -> Tuple: '''simple docstring''' return rescale(UpperCamelCase__ , scale=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ ) def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ , ) -> Dict: '''simple docstring''' return normalize(UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ ) def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE_ , ) -> Tuple: '''simple docstring''' lowerCamelCase_ = do_resize if do_resize is not None else self.do_resize lowerCamelCase_ = resample if resample is not None else self.resample lowerCamelCase_ = do_center_crop if do_center_crop is not None else self.do_center_crop lowerCamelCase_ = do_rescale if do_rescale is not None else self.do_rescale lowerCamelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor lowerCamelCase_ = do_normalize if do_normalize is not None else self.do_normalize lowerCamelCase_ = image_mean if image_mean is not None else self.image_mean lowerCamelCase_ = image_std if image_std is not None else self.image_std lowerCamelCase_ = size if size is not None else self.size lowerCamelCase_ = get_size_dict(UpperCamelCase__ ) lowerCamelCase_ = crop_size if crop_size is not None else self.crop_size lowerCamelCase_ = get_size_dict(UpperCamelCase__ , param_name='crop_size' ) lowerCamelCase_ = make_list_of_images(UpperCamelCase__ ) if not valid_images(UpperCamelCase__ ): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.' ) if do_resize and size is None or resample is None: raise ValueError('Size and resample must be specified if do_resize is True.' ) if do_center_crop and crop_size is None: raise ValueError('Crop size must be specified if do_center_crop is True.' ) if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('Image mean and std must be specified if do_normalize is True.' ) # All transformations expect numpy arrays. lowerCamelCase_ = [to_numpy_array(UpperCamelCase__ ) for image in images] if do_resize: lowerCamelCase_ = [self.resize(image=UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ ) for image in images] if do_center_crop: lowerCamelCase_ = [self.center_crop(image=UpperCamelCase__ , size=UpperCamelCase__ ) for image in images] if do_rescale: lowerCamelCase_ = [self.rescale(image=UpperCamelCase__ , scale=UpperCamelCase__ ) for image in images] if do_normalize: lowerCamelCase_ = [self.normalize(image=UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ ) for image in images] lowerCamelCase_ = [to_channel_dimension_format(UpperCamelCase__ , UpperCamelCase__ ) for image in images] lowerCamelCase_ = {'pixel_values': images} return BatchFeature(data=UpperCamelCase__ , tensor_type=UpperCamelCase__ )
42
import argparse import torch from transformers import YosoConfig, YosoForMaskedLM def _UpperCAmelCase ( a : str ): if "model" in orig_key: snake_case__ = orig_key.replace("""model.""" , """""" ) if "norm1" in orig_key: snake_case__ = orig_key.replace("""norm1""" , """attention.output.LayerNorm""" ) if "norm2" in orig_key: snake_case__ = orig_key.replace("""norm2""" , """output.LayerNorm""" ) if "norm" in orig_key: snake_case__ = orig_key.replace("""norm""" , """LayerNorm""" ) if "transformer" in orig_key: snake_case__ = orig_key.split(""".""" )[0].split("""_""" )[-1] snake_case__ = orig_key.replace(F'''transformer_{layer_num}''' , F'''encoder.layer.{layer_num}''' ) if "mha.attn" in orig_key: snake_case__ = orig_key.replace("""mha.attn""" , """attention.self""" ) if "mha" in orig_key: snake_case__ = orig_key.replace("""mha""" , """attention""" ) if "W_q" in orig_key: snake_case__ = orig_key.replace("""W_q""" , """self.query""" ) if "W_k" in orig_key: snake_case__ = orig_key.replace("""W_k""" , """self.key""" ) if "W_v" in orig_key: snake_case__ = orig_key.replace("""W_v""" , """self.value""" ) if "ff1" in orig_key: snake_case__ = orig_key.replace("""ff1""" , """intermediate.dense""" ) if "ff2" in orig_key: snake_case__ = orig_key.replace("""ff2""" , """output.dense""" ) if "ff" in orig_key: snake_case__ = orig_key.replace("""ff""" , """output.dense""" ) if "mlm_class" in orig_key: snake_case__ = orig_key.replace("""mlm.mlm_class""" , """cls.predictions.decoder""" ) if "mlm" in orig_key: snake_case__ = orig_key.replace("""mlm""" , """cls.predictions.transform""" ) if "cls" not in orig_key: snake_case__ = """yoso.""" + orig_key return orig_key def _UpperCAmelCase ( a : Tuple , a : Dict ): for key in orig_state_dict.copy().keys(): snake_case__ = orig_state_dict.pop(a ) if ("pooler" in key) or ("sen_class" in key): continue else: snake_case__ = val snake_case__ = orig_state_dict["""cls.predictions.decoder.bias"""] snake_case__ = torch.arange(a ).expand((1, -1) ) + 2 return orig_state_dict def _UpperCAmelCase ( a : int , a : List[Any] , a : List[Any] ): snake_case__ = torch.load(a , map_location="""cpu""" )["""model_state_dict"""] snake_case__ = YosoConfig.from_json_file(a ) snake_case__ = YosoForMaskedLM(a ) snake_case__ = convert_checkpoint_helper(config.max_position_embeddings , a ) print(model.load_state_dict(a ) ) model.eval() model.save_pretrained(a ) print(F'''Checkpoint successfuly converted. Model saved at {pytorch_dump_path}''' ) if __name__ == "__main__": a__ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--pytorch_model_path""", default=None, type=str, required=True, help="""Path to YOSO pytorch checkpoint.""" ) parser.add_argument( """--config_file""", default=None, type=str, required=True, help="""The json file for YOSO model config.""", ) parser.add_argument( """--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) a__ = parser.parse_args() convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
654
0
from argparse import ArgumentParser from .env import EnvironmentCommand def __lowercase( ): """simple docstring""" lowerCamelCase = ArgumentParser("Diffusers CLI tool" , usage="diffusers-cli <command> [<args>]" ) lowerCamelCase = parser.add_subparsers(help="diffusers-cli command helpers" ) # Register commands EnvironmentCommand.register_subcommand(UpperCAmelCase__ ) # Let's go lowerCamelCase = parser.parse_args() if not hasattr(UpperCAmelCase__ , "func" ): parser.print_help() exit(1 ) # Run lowerCamelCase = args.func(UpperCAmelCase__ ) service.run() if __name__ == "__main__": main()
623
import os from typing import Optional import fsspec from fsspec.archive import AbstractArchiveFileSystem from fsspec.utils import DEFAULT_BLOCK_SIZE class _lowerCAmelCase ( lowercase_ ): """simple docstring""" _lowercase : Optional[int] = '''''' _lowercase : str = ( None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz ) _lowercase : str = None # compression type in fsspec. ex: "gzip" _lowercase : str = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz def __init__( self : List[Any] , UpperCamelCase__ : str = "" , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : Optional[dict] = None , **UpperCamelCase__ : List[Any]): '''simple docstring''' super().__init__(self , **UpperCamelCase__) # always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode snake_case__ = fsspec.open( UpperCamelCase__ , mode="""rb""" , protocol=UpperCamelCase__ , compression=self.compression , client_kwargs={ """requote_redirect_url""": False, # see https://github.com/huggingface/datasets/pull/5459 """trust_env""": True, # Enable reading proxy env variables. **(target_options or {}).pop("""client_kwargs""" , {}), # To avoid issues if it was already passed. } , **(target_options or {}) , ) snake_case__ = os.path.basename(self.file.path.split("""::""")[0]) snake_case__ = ( self.compressed_name[: self.compressed_name.rindex(""".""")] if """.""" in self.compressed_name else self.compressed_name ) snake_case__ = None @classmethod def __magic_name__ ( cls : Union[str, Any] , UpperCamelCase__ : List[Any]): '''simple docstring''' return super()._strip_protocol(UpperCamelCase__).lstrip("""/""") def __magic_name__ ( self : Dict): '''simple docstring''' if self.dir_cache is None: snake_case__ = {**self.file.fs.info(self.file.path), """name""": self.uncompressed_name} snake_case__ = {f["""name"""]: f} def __magic_name__ ( self : Optional[int] , UpperCamelCase__ : str): '''simple docstring''' return self.file.open().read() def __magic_name__ ( self : int , UpperCamelCase__ : str , UpperCamelCase__ : str = "rb" , UpperCamelCase__ : Any=None , UpperCamelCase__ : Union[str, Any]=True , UpperCamelCase__ : Optional[int]=None , **UpperCamelCase__ : Optional[Any] , ): '''simple docstring''' snake_case__ = self._strip_protocol(UpperCamelCase__) if mode != "rb": raise ValueError(F'''Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'''') return self.file.open() class _lowerCAmelCase ( lowercase_ ): """simple docstring""" _lowercase : Dict = '''bz2''' _lowercase : Dict = '''bz2''' _lowercase : Optional[int] = '''.bz2''' class _lowerCAmelCase ( lowercase_ ): """simple docstring""" _lowercase : Dict = '''gzip''' _lowercase : List[str] = '''gzip''' _lowercase : Any = '''.gz''' class _lowerCAmelCase ( lowercase_ ): """simple docstring""" _lowercase : str = '''lz4''' _lowercase : List[Any] = '''lz4''' _lowercase : Dict = '''.lz4''' class _lowerCAmelCase ( lowercase_ ): """simple docstring""" _lowercase : Optional[int] = '''xz''' _lowercase : Union[str, Any] = '''xz''' _lowercase : Optional[int] = '''.xz''' class _lowerCAmelCase ( lowercase_ ): """simple docstring""" _lowercase : Optional[int] = '''zstd''' _lowercase : Tuple = '''zstd''' _lowercase : Union[str, Any] = '''.zst''' def __init__( self : str , UpperCamelCase__ : str , UpperCamelCase__ : str = "rb" , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : Optional[dict] = None , UpperCamelCase__ : int = DEFAULT_BLOCK_SIZE , **UpperCamelCase__ : int , ): '''simple docstring''' super().__init__( fo=UpperCamelCase__ , mode=UpperCamelCase__ , target_protocol=UpperCamelCase__ , target_options=UpperCamelCase__ , block_size=UpperCamelCase__ , **UpperCamelCase__ , ) # We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2: # # File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open # out.close = close # AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only # # see https://github.com/intake/filesystem_spec/issues/725 snake_case__ = self.file.__enter__ class _lowerCAmelCase : """simple docstring""" def __init__( self : Tuple , UpperCamelCase__ : str): '''simple docstring''' snake_case__ = file_ def __enter__( self : List[str]): '''simple docstring''' self._file.__enter__() return self def __exit__( self : Dict , *UpperCamelCase__ : str , **UpperCamelCase__ : Optional[Any]): '''simple docstring''' self._file.__exit__(*UpperCamelCase__ , **UpperCamelCase__) def __iter__( self : Any): '''simple docstring''' return iter(self._file) def __magic_name__ ( self : List[str]): '''simple docstring''' return next(self._file) def __getattr__( self : Any , UpperCamelCase__ : int): '''simple docstring''' return getattr(self._file , UpperCamelCase__) def fixed_enter(*UpperCamelCase__ : int , **UpperCamelCase__ : int): return WrappedFile(_enter(*UpperCamelCase__ , **UpperCamelCase__)) snake_case__ = fixed_enter
654
0
'''simple docstring''' def _lowerCAmelCase ( __magic_name__ : str , __magic_name__ : str ) -> Optional[Any]: if len(__magic_name__ ) != len(__magic_name__ ): raise ValueError('''String lengths must match!''' ) lowercase : Union[str, Any] =0 for chara, chara in zip(__magic_name__ , __magic_name__ ): if chara != chara: count += 1 return count if __name__ == "__main__": import doctest doctest.testmod()
92
def _UpperCAmelCase ( a : int ): if number < 0: raise ValueError("""number must not be negative""" ) return number & (number - 1) == 0 if __name__ == "__main__": import doctest doctest.testmod()
654
0
'''simple docstring''' import gc import unittest from parameterized import parameterized from diffusers import FlaxUNetaDConditionModel from diffusers.utils import is_flax_available from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow if is_flax_available(): import jax import jax.numpy as jnp @slow @require_flax class UpperCAmelCase ( unittest.TestCase): """simple docstring""" def UpperCamelCase__ ( self : Tuple , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Any ) -> Optional[int]: return F'''gaussian_noise_s={seed}_shape={'_'.join([str(UpperCamelCase__ ) for s in shape] )}.npy''' def UpperCamelCase__ ( self : Tuple ) -> Union[str, Any]: super().tearDown() gc.collect() def UpperCamelCase__ ( self : str , UpperCamelCase__ : Union[str, Any]=0 , UpperCamelCase__ : Optional[int]=(4, 4, 64, 64) , UpperCamelCase__ : Tuple=False ) -> Tuple: _UpperCamelCase =jnp.bfloataa if fpaa else jnp.floataa _UpperCamelCase =jnp.array(load_hf_numpy(self.get_file_format(UpperCamelCase__ , UpperCamelCase__ ) ) , dtype=UpperCamelCase__ ) return image def UpperCamelCase__ ( self : Union[str, Any] , UpperCamelCase__ : Any=False , UpperCamelCase__ : List[Any]="CompVis/stable-diffusion-v1-4" ) -> Optional[Any]: _UpperCamelCase =jnp.bfloataa if fpaa else jnp.floataa _UpperCamelCase ='''bf16''' if fpaa else None _UpperCamelCase , _UpperCamelCase =FlaxUNetaDConditionModel.from_pretrained( UpperCamelCase__ , subfolder='''unet''' , dtype=UpperCamelCase__ , revision=UpperCamelCase__ ) return model, params def UpperCamelCase__ ( self : Tuple , UpperCamelCase__ : Tuple=0 , UpperCamelCase__ : List[str]=(4, 77, 768) , UpperCamelCase__ : str=False ) -> List[Any]: _UpperCamelCase =jnp.bfloataa if fpaa else jnp.floataa _UpperCamelCase =jnp.array(load_hf_numpy(self.get_file_format(UpperCamelCase__ , UpperCamelCase__ ) ) , dtype=UpperCamelCase__ ) return hidden_states @parameterized.expand( [ # fmt: off [83, 4, [-0.2323, -0.1304, 0.0813, -0.3093, -0.0919, -0.1571, -0.1125, -0.5806]], [17, 0.55, [-0.0831, -0.2443, 0.0901, -0.0919, 0.3396, 0.0103, -0.3743, 0.0701]], [8, 0.89, [-0.4863, 0.0859, 0.0875, -0.1658, 0.9199, -0.0114, 0.4839, 0.4639]], [3, 1000, [-0.5649, 0.2402, -0.5518, 0.1248, 1.1328, -0.2443, -0.0325, -1.0078]], # fmt: on ] ) def UpperCamelCase__ ( self : int , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[Any] ) -> List[Any]: _UpperCamelCase , _UpperCamelCase =self.get_unet_model(model_id='''CompVis/stable-diffusion-v1-4''' , fpaa=UpperCamelCase__ ) _UpperCamelCase =self.get_latents(UpperCamelCase__ , fpaa=UpperCamelCase__ ) _UpperCamelCase =self.get_encoder_hidden_states(UpperCamelCase__ , fpaa=UpperCamelCase__ ) _UpperCamelCase =model.apply( {'''params''': params} , UpperCamelCase__ , jnp.array(UpperCamelCase__ , dtype=jnp.intaa ) , encoder_hidden_states=UpperCamelCase__ , ).sample assert sample.shape == latents.shape _UpperCamelCase =jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa ) _UpperCamelCase =jnp.array(UpperCamelCase__ , dtype=jnp.floataa ) # Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware assert jnp.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1E-2 ) @parameterized.expand( [ # fmt: off [83, 4, [0.1514, 0.0807, 0.1624, 0.1016, -0.1896, 0.0263, 0.0677, 0.2310]], [17, 0.55, [0.1164, -0.0216, 0.0170, 0.1589, -0.3120, 0.1005, -0.0581, -0.1458]], [8, 0.89, [-0.1758, -0.0169, 0.1004, -0.1411, 0.1312, 0.1103, -0.1996, 0.2139]], [3, 1000, [0.1214, 0.0352, -0.0731, -0.1562, -0.0994, -0.0906, -0.2340, -0.0539]], # fmt: on ] ) def UpperCamelCase__ ( self : Union[str, Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Any ) -> str: _UpperCamelCase , _UpperCamelCase =self.get_unet_model(model_id='''stabilityai/stable-diffusion-2''' , fpaa=UpperCamelCase__ ) _UpperCamelCase =self.get_latents(UpperCamelCase__ , shape=(4, 4, 96, 96) , fpaa=UpperCamelCase__ ) _UpperCamelCase =self.get_encoder_hidden_states(UpperCamelCase__ , shape=(4, 77, 1024) , fpaa=UpperCamelCase__ ) _UpperCamelCase =model.apply( {'''params''': params} , UpperCamelCase__ , jnp.array(UpperCamelCase__ , dtype=jnp.intaa ) , encoder_hidden_states=UpperCamelCase__ , ).sample assert sample.shape == latents.shape _UpperCamelCase =jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa ) _UpperCamelCase =jnp.array(UpperCamelCase__ , dtype=jnp.floataa ) # Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware assert jnp.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1E-2 )
404
class _lowerCAmelCase : """simple docstring""" def __init__( self : List[Any] , UpperCamelCase__ : int): '''simple docstring''' snake_case__ = size snake_case__ = [0] * size snake_case__ = [0] * size @staticmethod def __magic_name__ ( UpperCamelCase__ : int): '''simple docstring''' return index | (index + 1) @staticmethod def __magic_name__ ( UpperCamelCase__ : int): '''simple docstring''' return (index & (index + 1)) - 1 def __magic_name__ ( self : Any , UpperCamelCase__ : int , UpperCamelCase__ : int): '''simple docstring''' snake_case__ = value while index < self.size: snake_case__ = self.get_prev(UpperCamelCase__) + 1 if current_left_border == index: snake_case__ = value else: snake_case__ = max(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__) snake_case__ = self.get_next(UpperCamelCase__) def __magic_name__ ( self : int , UpperCamelCase__ : int , UpperCamelCase__ : int): '''simple docstring''' right -= 1 # Because of right is exclusive snake_case__ = 0 while left <= right: snake_case__ = self.get_prev(UpperCamelCase__) if left <= current_left: snake_case__ = max(UpperCamelCase__ , self.tree[right]) snake_case__ = current_left else: snake_case__ = max(UpperCamelCase__ , self.arr[right]) right -= 1 return result if __name__ == "__main__": import doctest doctest.testmod()
654
0
import collections import json import math import os import re import time from fnmatch import fnmatch from typing import Dict import requests from slack_sdk import WebClient __snake_case : Dict =WebClient(token=os.environ['CI_SLACK_BOT_TOKEN']) def lowerCAmelCase__ ( lowerCamelCase_ : str): '''simple docstring''' lowerCAmelCase__ : int = test_results.split(''' ''') lowerCAmelCase__ : List[Any] = 0 lowerCAmelCase__ : Tuple = 0 # When the output is short enough, the output is surrounded by = signs: "== OUTPUT ==" # When it is too long, those signs are not present. lowerCAmelCase__ : int = expressions[-2] if '''=''' in expressions[-1] else expressions[-1] for i, expression in enumerate(lowerCamelCase_): if "failed" in expression: failed += int(expressions[i - 1]) if "passed" in expression: success += int(expressions[i - 1]) return failed, success, time_spent def lowerCAmelCase__ ( lowerCamelCase_ : Dict): '''simple docstring''' lowerCAmelCase__ : Optional[int] = {} lowerCAmelCase__ : int = None lowerCAmelCase__ : str = False for line in failures_short_lines.split('''\n'''): if re.search(r'''_ \[doctest\]''' ,lowerCamelCase_): lowerCAmelCase__ : Union[str, Any] = True lowerCAmelCase__ : Tuple = line.split(''' ''')[2] elif in_error and not line.split(''' ''')[0].isdigit(): lowerCAmelCase__ : Optional[int] = line lowerCAmelCase__ : Dict = False return failures class lowerCamelCase__ : '''simple docstring''' def __init__(self ,__lowerCamelCase ,__lowerCamelCase ) -> int: """simple docstring""" lowerCAmelCase__ : Union[str, Any] = title lowerCAmelCase__ : int = doc_test_results['''time_spent'''].split(''',''' )[0] lowerCAmelCase__ : Union[str, Any] = doc_test_results['''success'''] lowerCAmelCase__ : Optional[int] = doc_test_results['''failures'''] lowerCAmelCase__ : Optional[int] = self.n_success + self.n_failures # Failures and success of the modeling tests lowerCAmelCase__ : Any = doc_test_results @property def lowerCAmelCase__ (self ) -> Optional[int]: """simple docstring""" lowerCAmelCase__ : List[str] = [self._time_spent] lowerCAmelCase__ : Union[str, Any] = 0 for time in time_spent: lowerCAmelCase__ : Dict = time.split(''':''' ) # Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute. if len(UpperCamelCase__ ) == 1: lowerCAmelCase__ : int = [0, 0, time_parts[0]] lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : str = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] ) total_secs += hours * 36_00 + minutes * 60 + seconds lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = total_secs // 36_00, (total_secs % 36_00) // 60, total_secs % 60 return f"""{int(UpperCamelCase__ )}h{int(UpperCamelCase__ )}m{int(UpperCamelCase__ )}s""" @property def lowerCAmelCase__ (self ) -> List[str]: """simple docstring""" return {"type": "header", "text": {"type": "plain_text", "text": self.title}} @property def lowerCAmelCase__ (self ) -> Optional[int]: """simple docstring""" return { "type": "section", "text": { "type": "plain_text", "text": f"""🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.""", "emoji": True, }, "accessory": { "type": "button", "text": {"type": "plain_text", "text": "Check Action results", "emoji": True}, "url": f"""https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}""", }, } @property def lowerCAmelCase__ (self ) -> Tuple: """simple docstring""" return { "type": "section", "text": { "type": "plain_text", "text": ( f"""There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in""" f""" {self.time}.""" ), "emoji": True, }, "accessory": { "type": "button", "text": {"type": "plain_text", "text": "Check Action results", "emoji": True}, "url": f"""https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}""", }, } @property def lowerCAmelCase__ (self ) -> Dict: """simple docstring""" lowerCAmelCase__ : int = 40 lowerCAmelCase__ : List[Any] = {k: v['''failed'''] for k, v in doc_test_results.items() if isinstance(UpperCamelCase__ ,UpperCamelCase__ )} lowerCAmelCase__ : int = '''''' for category, failures in category_failures.items(): if len(UpperCamelCase__ ) == 0: continue if report != "": report += "\n\n" report += f"""*{category} failures*:""".ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n" report += "`" report += "`\n`".join(UpperCamelCase__ ) report += "`" return { "type": "section", "text": { "type": "mrkdwn", "text": f"""The following examples had failures:\n\n\n{report}\n""", }, } @property def lowerCAmelCase__ (self ) -> int: """simple docstring""" lowerCAmelCase__ : Optional[int] = [self.header] if self.n_failures > 0: blocks.append(self.failures ) if self.n_failures > 0: blocks.extend([self.category_failures] ) if self.n_failures == 0: blocks.append(self.no_failures ) return json.dumps(UpperCamelCase__ ) @staticmethod def lowerCAmelCase__ () -> Any: """simple docstring""" lowerCAmelCase__ : Any = [ { '''type''': '''section''', '''text''': { '''type''': '''plain_text''', '''text''': '''There was an issue running the tests.''', }, '''accessory''': { '''type''': '''button''', '''text''': {'''type''': '''plain_text''', '''text''': '''Check Action results''', '''emoji''': True}, '''url''': f"""https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}""", }, } ] print('''Sending the following payload''' ) print(json.dumps({'''blocks''': json.loads(UpperCamelCase__ )} ) ) client.chat_postMessage( channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''] ,text='''There was an issue running the tests.''' ,blocks=UpperCamelCase__ ,) def lowerCAmelCase__ (self ) -> Tuple: """simple docstring""" print('''Sending the following payload''' ) print(json.dumps({'''blocks''': json.loads(self.payload )} ) ) lowerCAmelCase__ : str = f"""{self.n_failures} failures out of {self.n_tests} tests,""" if self.n_failures else '''All tests passed.''' lowerCAmelCase__ : Optional[Any] = client.chat_postMessage( channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''] ,blocks=self.payload ,text=UpperCamelCase__ ,) def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ) -> Tuple: """simple docstring""" lowerCAmelCase__ : Any = '''''' for key, value in failures.items(): lowerCAmelCase__ : str = value[:2_00] + ''' [Truncated]''' if len(UpperCamelCase__ ) > 2_50 else value failures_text += f"""*{key}*\n_{value}_\n\n""" lowerCAmelCase__ : Optional[int] = job_name lowerCAmelCase__ : Optional[int] = {'''type''': '''section''', '''text''': {'''type''': '''mrkdwn''', '''text''': text}} if job_link is not None: lowerCAmelCase__ : Optional[int] = { '''type''': '''button''', '''text''': {'''type''': '''plain_text''', '''text''': '''GitHub Action job''', '''emoji''': True}, '''url''': job_link, } return [ {"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}}, content, {"type": "section", "text": {"type": "mrkdwn", "text": failures_text}}, ] def lowerCAmelCase__ (self ) -> Optional[Any]: """simple docstring""" if self.thread_ts is None: raise ValueError('''Can only post reply if a post has been made.''' ) lowerCAmelCase__ : Tuple = self.doc_test_results.pop('''job_link''' ) self.doc_test_results.pop('''failures''' ) self.doc_test_results.pop('''success''' ) self.doc_test_results.pop('''time_spent''' ) lowerCAmelCase__ : str = sorted(self.doc_test_results.items() ,key=lambda __lowerCamelCase : t[0] ) for job, job_result in sorted_dict: if len(job_result['''failures'''] ): lowerCAmelCase__ : List[str] = f"""*Num failures* :{len(job_result['failed'] )} \n""" lowerCAmelCase__ : Union[str, Any] = job_result['''failures'''] lowerCAmelCase__ : Optional[Any] = self.get_reply_blocks(UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ ,text=UpperCamelCase__ ) print('''Sending the following reply''' ) print(json.dumps({'''blocks''': blocks} ) ) client.chat_postMessage( channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''] ,text=f"""Results for {job}""" ,blocks=UpperCamelCase__ ,thread_ts=self.thread_ts['''ts'''] ,) time.sleep(1 ) def lowerCAmelCase__ ( ): '''simple docstring''' lowerCAmelCase__ : Any = os.environ['''GITHUB_RUN_ID'''] lowerCAmelCase__ : Tuple = f"""https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100""" lowerCAmelCase__ : Dict = requests.get(lowerCamelCase_).json() lowerCAmelCase__ : Dict = {} try: jobs.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']}) lowerCAmelCase__ : List[str] = math.ceil((result['''total_count'''] - 100) / 100) for i in range(lowerCamelCase_): lowerCAmelCase__ : int = requests.get(url + f"""&page={i + 2}""").json() jobs.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']}) return jobs except Exception as e: print('''Unknown error, could not fetch links.''' ,lowerCamelCase_) return {} def lowerCAmelCase__ ( lowerCamelCase_ : str): '''simple docstring''' lowerCAmelCase__ : Tuple = {} if os.path.exists(lowerCamelCase_): lowerCAmelCase__ : Union[str, Any] = os.listdir(lowerCamelCase_) for file in files: try: with open(os.path.join(lowerCamelCase_ ,lowerCamelCase_) ,encoding='''utf-8''') as f: lowerCAmelCase__ : Optional[Any] = f.read() except UnicodeDecodeError as e: raise ValueError(f"""Could not open {os.path.join(lowerCamelCase_ ,lowerCamelCase_)}.""") from e return _artifact def lowerCAmelCase__ ( ): '''simple docstring''' class lowerCamelCase__ : '''simple docstring''' def __init__(self ,__lowerCamelCase ) -> Union[str, Any]: """simple docstring""" lowerCAmelCase__ : List[Any] = name lowerCAmelCase__ : Optional[int] = [] def __str__(self ) -> Any: """simple docstring""" return self.name def lowerCAmelCase__ (self ,__lowerCamelCase ) -> Union[str, Any]: """simple docstring""" self.paths.append({'''name''': self.name, '''path''': path} ) lowerCAmelCase__ : Union[str, Any] = {} lowerCAmelCase__ : List[Any] = filter(os.path.isdir ,os.listdir()) for directory in directories: lowerCAmelCase__ : Any = directory if artifact_name not in _available_artifacts: lowerCAmelCase__ : List[str] = Artifact(lowerCamelCase_) _available_artifacts[artifact_name].add_path(lowerCamelCase_) return _available_artifacts if __name__ == "__main__": __snake_case : Optional[Any] =get_job_links() __snake_case : int =retrieve_available_artifacts() __snake_case : Dict =collections.OrderedDict( [ ('*.py', 'API Examples'), ('*.md', 'MD Examples'), ] ) # This dict will contain all the information relative to each doc test category: # - failed: list of failed tests # - failures: dict in the format 'test': 'error_message' __snake_case : Optional[Any] ={ v: { 'failed': [], 'failures': {}, } for v in docs.values() } # Link to the GitHub Action job __snake_case : Any =github_actions_job_links.get('run_doctests') __snake_case : Union[str, Any] =available_artifacts['doc_tests_gpu_test_reports'].paths[0] __snake_case : Union[str, Any] =retrieve_artifact(artifact_path['name']) if "stats" in artifact: __snake_case , __snake_case , __snake_case : str =handle_test_results(artifact['stats']) __snake_case : Dict =failed __snake_case : List[str] =success __snake_case : Optional[int] =time_spent[1:-1] + ', ' __snake_case : Optional[Any] =extract_first_line_failure(artifact['failures_short']) for line in artifact["summary_short"].split('\n'): if re.search('FAILED', line): __snake_case : int =line.replace('FAILED ', '') __snake_case : Union[str, Any] =line.split()[0].replace('\n', '') if "::" in line: __snake_case , __snake_case : Optional[Any] =line.split('::') else: __snake_case , __snake_case : Optional[Any] =line, line for file_regex in docs.keys(): if fnmatch(file_path, file_regex): __snake_case : Any =docs[file_regex] doc_test_results[category]["failed"].append(test) __snake_case : List[str] =all_failures[test] if test in all_failures else 'N/A' __snake_case : Tuple =failure break __snake_case : Any =Message('🤗 Results of the doc tests.', doc_test_results) message.post() message.post_reply()
647
from __future__ import annotations import unittest from transformers import AutoTokenizer, PegasusConfig, is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel @require_tf class _lowerCAmelCase : """simple docstring""" _lowercase : List[str] = PegasusConfig _lowercase : Union[str, Any] = {} _lowercase : Tuple = '''gelu''' def __init__( self : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[int]=1_3 , UpperCamelCase__ : Any=7 , UpperCamelCase__ : Optional[Any]=True , UpperCamelCase__ : Optional[int]=False , UpperCamelCase__ : int=9_9 , UpperCamelCase__ : Dict=3_2 , UpperCamelCase__ : str=2 , UpperCamelCase__ : int=4 , UpperCamelCase__ : Tuple=3_7 , UpperCamelCase__ : Union[str, Any]=0.1 , UpperCamelCase__ : Any=0.1 , UpperCamelCase__ : str=4_0 , UpperCamelCase__ : Optional[int]=2 , UpperCamelCase__ : Optional[Any]=1 , UpperCamelCase__ : Dict=0 , ): '''simple docstring''' snake_case__ = parent snake_case__ = batch_size snake_case__ = seq_length snake_case__ = is_training snake_case__ = use_labels snake_case__ = vocab_size snake_case__ = hidden_size snake_case__ = num_hidden_layers snake_case__ = num_attention_heads snake_case__ = intermediate_size snake_case__ = hidden_dropout_prob snake_case__ = attention_probs_dropout_prob snake_case__ = max_position_embeddings snake_case__ = eos_token_id snake_case__ = pad_token_id snake_case__ = bos_token_id def __magic_name__ ( self : Optional[Any]): '''simple docstring''' snake_case__ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size) snake_case__ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size) , 1) snake_case__ = tf.concat([input_ids, eos_tensor] , axis=1) snake_case__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) snake_case__ = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) snake_case__ = prepare_pegasus_inputs_dict(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__) return config, inputs_dict def __magic_name__ ( self : Any , UpperCamelCase__ : Tuple , UpperCamelCase__ : Union[str, Any]): '''simple docstring''' snake_case__ = TFPegasusModel(config=UpperCamelCase__).get_decoder() snake_case__ = inputs_dict["""input_ids"""] snake_case__ = input_ids[:1, :] snake_case__ = inputs_dict["""attention_mask"""][:1, :] snake_case__ = inputs_dict["""head_mask"""] snake_case__ = 1 # first forward pass snake_case__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , head_mask=UpperCamelCase__ , use_cache=UpperCamelCase__) snake_case__ , snake_case__ = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids snake_case__ = ids_tensor((self.batch_size, 3) , config.vocab_size) snake_case__ = tf.cast(ids_tensor((self.batch_size, 3) , 2) , tf.inta) # append to next input_ids and snake_case__ = tf.concat([input_ids, next_tokens] , axis=-1) snake_case__ = tf.concat([attention_mask, next_attn_mask] , axis=-1) snake_case__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__)[0] snake_case__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , past_key_values=UpperCamelCase__)[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1]) # select random slice snake_case__ = int(ids_tensor((1,) , output_from_past.shape[-1])) snake_case__ = output_from_no_past[:, -3:, random_slice_idx] snake_case__ = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(UpperCamelCase__ , UpperCamelCase__ , rtol=1E-3) def _UpperCAmelCase ( a : str , a : Union[str, Any] , a : List[str] , a : str=None , a : int=None , a : int=None , a : int=None , a : Optional[int]=None , ): if attention_mask is None: snake_case__ = tf.cast(tf.math.not_equal(a , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: snake_case__ = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: snake_case__ = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: snake_case__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: snake_case__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class _lowerCAmelCase ( lowercase_ , lowercase_ , unittest.TestCase ): """simple docstring""" _lowercase : int = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else () _lowercase : List[Any] = (TFPegasusForConditionalGeneration,) if is_tf_available() else () _lowercase : List[Any] = ( { '''conversational''': TFPegasusForConditionalGeneration, '''feature-extraction''': TFPegasusModel, '''summarization''': TFPegasusForConditionalGeneration, '''text2text-generation''': TFPegasusForConditionalGeneration, '''translation''': TFPegasusForConditionalGeneration, } if is_tf_available() else {} ) _lowercase : Optional[int] = True _lowercase : Dict = False _lowercase : Any = False def __magic_name__ ( self : str): '''simple docstring''' snake_case__ = TFPegasusModelTester(self) snake_case__ = ConfigTester(self , config_class=UpperCamelCase__) def __magic_name__ ( self : List[Any]): '''simple docstring''' self.config_tester.run_common_tests() def __magic_name__ ( self : Optional[int]): '''simple docstring''' snake_case__ = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*UpperCamelCase__) @require_sentencepiece @require_tokenizers @require_tf class _lowerCAmelCase ( unittest.TestCase ): """simple docstring""" _lowercase : List[str] = [ ''' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.''', ''' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ''', ] _lowercase : str = [ '''California\'s largest electricity provider has cut power to hundreds of thousands of customers in an effort to''' ''' reduce the risk of wildfires.''', '''N-Dubz have revealed they\'re "grateful" to have been nominated for four Mobo Awards.''', ] # differs slightly from pytorch, likely due to numerical differences in linear layers _lowercase : int = '''google/pegasus-xsum''' @cached_property def __magic_name__ ( self : Dict): '''simple docstring''' return AutoTokenizer.from_pretrained(self.model_name) @cached_property def __magic_name__ ( self : int): '''simple docstring''' snake_case__ = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name) return model def __magic_name__ ( self : Dict , **UpperCamelCase__ : List[Any]): '''simple docstring''' snake_case__ = self.translate_src_text(**UpperCamelCase__) assert self.expected_text == generated_words def __magic_name__ ( self : str , **UpperCamelCase__ : List[Any]): '''simple docstring''' snake_case__ = self.tokenizer(self.src_text , **UpperCamelCase__ , padding=UpperCamelCase__ , return_tensors="""tf""") snake_case__ = self.model.generate( model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=UpperCamelCase__ , ) snake_case__ = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=UpperCamelCase__) return generated_words @slow def __magic_name__ ( self : List[str]): '''simple docstring''' self._assert_generated_batch_equal_expected()
654
0
class a_ : def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE_ = data SCREAMING_SNAKE_CASE_ = previous SCREAMING_SNAKE_CASE_ = next_node def __str__( self ) -> str: """simple docstring""" return f'{self.data}' def A_( self ) -> Optional[int]: """simple docstring""" return self.data def A_( self ) -> List[str]: """simple docstring""" return self.next def A_( self ) -> Any: """simple docstring""" return self.previous class a_ : def __init__( self , SCREAMING_SNAKE_CASE ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE_ = head def __iter__( self ) -> Dict: """simple docstring""" return self def A_( self ) -> Tuple: """simple docstring""" if not self.current: raise StopIteration else: SCREAMING_SNAKE_CASE_ = self.current.get_data() SCREAMING_SNAKE_CASE_ = self.current.get_next() return value class a_ : def __init__( self ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE_ = None # First node in list SCREAMING_SNAKE_CASE_ = None # Last node in list def __str__( self ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE_ = self.head SCREAMING_SNAKE_CASE_ = [] while current is not None: nodes.append(current.get_data() ) SCREAMING_SNAKE_CASE_ = current.get_next() return " ".join(str(UpperCamelCase__ ) for node in nodes ) def __contains__( self , SCREAMING_SNAKE_CASE ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE_ = self.head while current: if current.get_data() == value: return True SCREAMING_SNAKE_CASE_ = current.get_next() return False def __iter__( self ) -> Optional[int]: """simple docstring""" return LinkedListIterator(self.head ) def A_( self ) -> Optional[Any]: """simple docstring""" if self.head: return self.head.get_data() return None def A_( self ) -> int: """simple docstring""" if self.tail: return self.tail.get_data() return None def A_( self , SCREAMING_SNAKE_CASE ) -> str: """simple docstring""" if self.head is None: SCREAMING_SNAKE_CASE_ = node SCREAMING_SNAKE_CASE_ = node else: self.insert_before_node(self.head , UpperCamelCase__ ) def A_( self , SCREAMING_SNAKE_CASE ) -> str: """simple docstring""" if self.head is None: self.set_head(UpperCamelCase__ ) else: self.insert_after_node(self.tail , UpperCamelCase__ ) def A_( self , SCREAMING_SNAKE_CASE ) -> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE_ = Node(UpperCamelCase__ ) if self.head is None: self.set_head(UpperCamelCase__ ) else: self.set_tail(UpperCamelCase__ ) def A_( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE_ = node SCREAMING_SNAKE_CASE_ = node.previous if node.get_previous() is None: SCREAMING_SNAKE_CASE_ = node_to_insert else: SCREAMING_SNAKE_CASE_ = node_to_insert SCREAMING_SNAKE_CASE_ = node_to_insert def A_( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE_ = node SCREAMING_SNAKE_CASE_ = node.next if node.get_next() is None: SCREAMING_SNAKE_CASE_ = node_to_insert else: SCREAMING_SNAKE_CASE_ = node_to_insert SCREAMING_SNAKE_CASE_ = node_to_insert def A_( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE_ = 1 SCREAMING_SNAKE_CASE_ = Node(UpperCamelCase__ ) SCREAMING_SNAKE_CASE_ = self.head while node: if current_position == position: self.insert_before_node(UpperCamelCase__ , UpperCamelCase__ ) return current_position += 1 SCREAMING_SNAKE_CASE_ = node.next self.insert_after_node(self.tail , UpperCamelCase__ ) def A_( self , SCREAMING_SNAKE_CASE ) -> Any: """simple docstring""" SCREAMING_SNAKE_CASE_ = self.head while node: if node.get_data() == item: return node SCREAMING_SNAKE_CASE_ = node.get_next() raise Exception('Node not found' ) def A_( self , SCREAMING_SNAKE_CASE ) -> List[Any]: """simple docstring""" if (node := self.get_node(UpperCamelCase__ )) is not None: if node == self.head: SCREAMING_SNAKE_CASE_ = self.head.get_next() if node == self.tail: SCREAMING_SNAKE_CASE_ = self.tail.get_previous() self.remove_node_pointers(UpperCamelCase__ ) @staticmethod def A_( SCREAMING_SNAKE_CASE ) -> List[Any]: """simple docstring""" if node.get_next(): SCREAMING_SNAKE_CASE_ = node.previous if node.get_previous(): SCREAMING_SNAKE_CASE_ = node.next SCREAMING_SNAKE_CASE_ = None SCREAMING_SNAKE_CASE_ = None def A_( self ) -> Union[str, Any]: """simple docstring""" return self.head is None def lowercase ( ) -> str: '''simple docstring''' pass if __name__ == "__main__": import doctest doctest.testmod()
205
import json import os import re import unicodedata from json.encoder import INFINITY from typing import Any, Dict, List, Optional, Tuple, Union import numpy as np import regex from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...tokenization_utils_base import BatchEncoding from ...utils import TensorType, is_flax_available, is_tf_available, is_torch_available, logging from ...utils.generic import _is_jax, _is_numpy a__ = logging.get_logger(__name__) a__ = { """artists_file""": """artists.json""", """lyrics_file""": """lyrics.json""", """genres_file""": """genres.json""", } a__ = { """artists_file""": { """jukebox""": """https://huggingface.co/ArthurZ/jukebox/blob/main/artists.json""", }, """genres_file""": { """jukebox""": """https://huggingface.co/ArthurZ/jukebox/blob/main/genres.json""", }, """lyrics_file""": { """jukebox""": """https://huggingface.co/ArthurZ/jukebox/blob/main/lyrics.json""", }, } a__ = { """jukebox""": 5_1_2, } class _lowerCAmelCase ( lowercase_ ): """simple docstring""" _lowercase : str = VOCAB_FILES_NAMES _lowercase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP _lowercase : str = PRETRAINED_LYRIC_TOKENS_SIZES _lowercase : Any = ['''input_ids''', '''attention_mask'''] def __init__( self : int , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int=["v3", "v2", "v2"] , UpperCamelCase__ : List[str]=5_1_2 , UpperCamelCase__ : Union[str, Any]=5 , UpperCamelCase__ : List[Any]="<|endoftext|>" , **UpperCamelCase__ : List[Any] , ): '''simple docstring''' snake_case__ = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__) if isinstance(UpperCamelCase__ , UpperCamelCase__) else unk_token super().__init__( unk_token=UpperCamelCase__ , n_genres=UpperCamelCase__ , version=UpperCamelCase__ , max_n_lyric_tokens=UpperCamelCase__ , **UpperCamelCase__ , ) snake_case__ = version snake_case__ = max_n_lyric_tokens snake_case__ = n_genres with open(UpperCamelCase__ , encoding="""utf-8""") as vocab_handle: snake_case__ = json.load(UpperCamelCase__) with open(UpperCamelCase__ , encoding="""utf-8""") as vocab_handle: snake_case__ = json.load(UpperCamelCase__) with open(UpperCamelCase__ , encoding="""utf-8""") as vocab_handle: snake_case__ = json.load(UpperCamelCase__) snake_case__ = R"""[^A-Za-z0-9.,:;!?\-'\"()\[\] \t\n]+""" # In v2, we had a n_vocab=80 and in v3 we missed + and so n_vocab=79 of characters. if len(self.lyrics_encoder) == 7_9: snake_case__ = oov.replace(R"""\-'""" , R"""\-+'""") snake_case__ = regex.compile(UpperCamelCase__) snake_case__ = {v: k for k, v in self.artists_encoder.items()} snake_case__ = {v: k for k, v in self.genres_encoder.items()} snake_case__ = {v: k for k, v in self.lyrics_encoder.items()} @property def __magic_name__ ( self : List[str]): '''simple docstring''' return len(self.artists_encoder) + len(self.genres_encoder) + len(self.lyrics_encoder) def __magic_name__ ( self : Union[str, Any]): '''simple docstring''' return dict(self.artists_encoder , self.genres_encoder , self.lyrics_encoder) def __magic_name__ ( self : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int): '''simple docstring''' snake_case__ = [self.artists_encoder.get(UpperCamelCase__ , 0) for artist in list_artists] for genres in range(len(UpperCamelCase__)): snake_case__ = [self.genres_encoder.get(UpperCamelCase__ , 0) for genre in list_genres[genres]] snake_case__ = list_genres[genres] + [-1] * (self.n_genres - len(list_genres[genres])) snake_case__ = [[self.lyrics_encoder.get(UpperCamelCase__ , 0) for character in list_lyrics[0]], [], []] return artists_id, list_genres, lyric_ids def __magic_name__ ( self : Optional[int] , UpperCamelCase__ : Optional[int]): '''simple docstring''' return list(UpperCamelCase__) def __magic_name__ ( self : int , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Any , **UpperCamelCase__ : List[str]): '''simple docstring''' snake_case__ , snake_case__ , snake_case__ = self.prepare_for_tokenization(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__) snake_case__ = self._tokenize(UpperCamelCase__) return artist, genre, lyrics def __magic_name__ ( self : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : bool = False): '''simple docstring''' for idx in range(len(self.version)): if self.version[idx] == "v3": snake_case__ = artists[idx].lower() snake_case__ = [genres[idx].lower()] else: snake_case__ = self._normalize(artists[idx]) + """.v2""" snake_case__ = [ self._normalize(UpperCamelCase__) + """.v2""" for genre in genres[idx].split("""_""") ] # split is for the full dictionary with combined genres if self.version[0] == "v2": snake_case__ = regex.compile(R"""[^A-Za-z0-9.,:;!?\-'\"()\[\] \t\n]+""") snake_case__ = """ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,:;!?-+'\"()[] \t\n""" snake_case__ = {vocab[index]: index + 1 for index in range(len(UpperCamelCase__))} snake_case__ = 0 snake_case__ = len(UpperCamelCase__) + 1 snake_case__ = self.vocab snake_case__ = {v: k for k, v in self.vocab.items()} snake_case__ = """""" else: snake_case__ = regex.compile(R"""[^A-Za-z0-9.,:;!?\-+'\"()\[\] \t\n]+""") snake_case__ = self._run_strip_accents(UpperCamelCase__) snake_case__ = lyrics.replace("""\\""" , """\n""") snake_case__ = self.out_of_vocab.sub("""""" , UpperCamelCase__), [], [] return artists, genres, lyrics def __magic_name__ ( self : Union[str, Any] , UpperCamelCase__ : str): '''simple docstring''' snake_case__ = unicodedata.normalize("""NFD""" , UpperCamelCase__) snake_case__ = [] for char in text: snake_case__ = unicodedata.category(UpperCamelCase__) if cat == "Mn": continue output.append(UpperCamelCase__) return "".join(UpperCamelCase__) def __magic_name__ ( self : List[str] , UpperCamelCase__ : str): '''simple docstring''' snake_case__ = ( [chr(UpperCamelCase__) for i in range(ord("""a""") , ord("""z""") + 1)] + [chr(UpperCamelCase__) for i in range(ord("""A""") , ord("""Z""") + 1)] + [chr(UpperCamelCase__) for i in range(ord("""0""") , ord("""9""") + 1)] + ["""."""] ) snake_case__ = frozenset(UpperCamelCase__) snake_case__ = re.compile(R"""_+""") snake_case__ = """""".join([c if c in accepted else """_""" for c in text.lower()]) snake_case__ = pattern.sub("""_""" , UpperCamelCase__).strip("""_""") return text def __magic_name__ ( self : List[Any] , UpperCamelCase__ : List[str]): '''simple docstring''' return " ".join(UpperCamelCase__) def __magic_name__ ( self : Any , UpperCamelCase__ : str , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , UpperCamelCase__ : bool = False): '''simple docstring''' if not isinstance(UpperCamelCase__ , UpperCamelCase__): snake_case__ = TensorType(UpperCamelCase__) # Get a function reference for the correct framework if tensor_type == TensorType.TENSORFLOW: if not is_tf_available(): raise ImportError( """Unable to convert output to TensorFlow tensors format, TensorFlow is not installed.""") import tensorflow as tf snake_case__ = tf.constant snake_case__ = tf.is_tensor elif tensor_type == TensorType.PYTORCH: if not is_torch_available(): raise ImportError("""Unable to convert output to PyTorch tensors format, PyTorch is not installed.""") import torch snake_case__ = torch.tensor snake_case__ = torch.is_tensor elif tensor_type == TensorType.JAX: if not is_flax_available(): raise ImportError("""Unable to convert output to JAX tensors format, JAX is not installed.""") import jax.numpy as jnp # noqa: F811 snake_case__ = jnp.array snake_case__ = _is_jax else: snake_case__ = np.asarray snake_case__ = _is_numpy # Do the tensor conversion in batch try: if prepend_batch_axis: snake_case__ = [inputs] if not is_tensor(UpperCamelCase__): snake_case__ = as_tensor(UpperCamelCase__) except: # noqa E722 raise ValueError( """Unable to create tensor, you should probably activate truncation and/or padding """ """with 'padding=True' 'truncation=True' to have batched tensors with the same length.""") return inputs def __call__( self : str , UpperCamelCase__ : Any , UpperCamelCase__ : Dict , UpperCamelCase__ : Any="" , UpperCamelCase__ : Dict="pt"): '''simple docstring''' snake_case__ = [0, 0, 0] snake_case__ = [artist] * len(self.version) snake_case__ = [genres] * len(self.version) snake_case__ , snake_case__ , snake_case__ = self.tokenize(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__) snake_case__ , snake_case__ , snake_case__ = self._convert_token_to_id(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__) snake_case__ = [-INFINITY] * len(full_tokens[-1]) snake_case__ = [ self.convert_to_tensors( [input_ids + [artists_id[i]] + genres_ids[i] + full_tokens[i]] , tensor_type=UpperCamelCase__) for i in range(len(self.version)) ] return BatchEncoding({"""input_ids""": input_ids, """attention_masks""": attention_masks}) def __magic_name__ ( self : Any , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None): '''simple docstring''' if not os.path.isdir(UpperCamelCase__): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''') return snake_case__ = os.path.join( UpperCamelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""artists_file"""]) with open(UpperCamelCase__ , """w""" , encoding="""utf-8""") as f: f.write(json.dumps(self.artists_encoder , ensure_ascii=UpperCamelCase__)) snake_case__ = os.path.join( UpperCamelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""genres_file"""]) with open(UpperCamelCase__ , """w""" , encoding="""utf-8""") as f: f.write(json.dumps(self.genres_encoder , ensure_ascii=UpperCamelCase__)) snake_case__ = os.path.join( UpperCamelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""lyrics_file"""]) with open(UpperCamelCase__ , """w""" , encoding="""utf-8""") as f: f.write(json.dumps(self.lyrics_encoder , ensure_ascii=UpperCamelCase__)) return (artists_file, genres_file, lyrics_file) def __magic_name__ ( self : List[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[str]): '''simple docstring''' snake_case__ = self.artists_decoder.get(UpperCamelCase__) snake_case__ = [self.genres_decoder.get(UpperCamelCase__) for genre in genres_index] snake_case__ = [self.lyrics_decoder.get(UpperCamelCase__) for character in lyric_index] return artist, genres, lyrics
654
0
'''simple docstring''' from __future__ import annotations def _A ( A__ ): """simple docstring""" for i in range(1 , len(matrix[0] ) ): matrix[0][i] += matrix[0][i - 1] # preprocessing the first column for i in range(1 , len(A__ ) ): matrix[i][0] += matrix[i - 1][0] # updating the path cost for current position for i in range(1 , len(A__ ) ): for j in range(1 , len(matrix[0] ) ): matrix[i][j] += min(matrix[i - 1][j] , matrix[i][j - 1] ) return matrix[-1][-1] if __name__ == "__main__": import doctest doctest.testmod()
41
import sys from typing import Tuple import numpy as np import torch from PIL import Image from torch import nn from transformers.image_utils import PILImageResampling from utils import img_tensorize class _lowerCAmelCase : """simple docstring""" def __init__( self : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : List[str]=sys.maxsize): '''simple docstring''' snake_case__ = """bilinear""" snake_case__ = max_size snake_case__ = short_edge_length def __call__( self : List[str] , UpperCamelCase__ : Tuple): '''simple docstring''' snake_case__ = [] for img in imgs: snake_case__ , snake_case__ = img.shape[:2] # later: provide list and randomly choose index for resize snake_case__ = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1) if size == 0: return img snake_case__ = size * 1.0 / min(UpperCamelCase__ , UpperCamelCase__) if h < w: snake_case__ , snake_case__ = size, scale * w else: snake_case__ , snake_case__ = scale * h, size if max(UpperCamelCase__ , UpperCamelCase__) > self.max_size: snake_case__ = self.max_size * 1.0 / max(UpperCamelCase__ , UpperCamelCase__) snake_case__ = newh * scale snake_case__ = neww * scale snake_case__ = int(neww + 0.5) snake_case__ = int(newh + 0.5) if img.dtype == np.uinta: snake_case__ = Image.fromarray(UpperCamelCase__) snake_case__ = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR) snake_case__ = np.asarray(UpperCamelCase__) else: snake_case__ = img.permute(2 , 0 , 1).unsqueeze(0) # 3, 0, 1) # hw(c) -> nchw snake_case__ = nn.functional.interpolate( UpperCamelCase__ , (newh, neww) , mode=self.interp_method , align_corners=UpperCamelCase__).squeeze(0) img_augs.append(UpperCamelCase__) return img_augs class _lowerCAmelCase : """simple docstring""" def __init__( self : Dict , UpperCamelCase__ : Optional[int]): '''simple docstring''' snake_case__ = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST) snake_case__ = cfg.INPUT.FORMAT snake_case__ = cfg.SIZE_DIVISIBILITY snake_case__ = cfg.PAD_VALUE snake_case__ = cfg.INPUT.MAX_SIZE_TEST snake_case__ = cfg.MODEL.DEVICE snake_case__ = torch.tensor(cfg.MODEL.PIXEL_STD).to(self.device).view(len(cfg.MODEL.PIXEL_STD) , 1 , 1) snake_case__ = torch.tensor(cfg.MODEL.PIXEL_MEAN).to(self.device).view(len(cfg.MODEL.PIXEL_STD) , 1 , 1) snake_case__ = lambda UpperCamelCase__: (x - self.pixel_mean) / self.pixel_std def __magic_name__ ( self : Dict , UpperCamelCase__ : Dict): '''simple docstring''' snake_case__ = tuple(max(UpperCamelCase__) for s in zip(*[img.shape for img in images])) snake_case__ = [im.shape[-2:] for im in images] snake_case__ = [ nn.functional.pad( UpperCamelCase__ , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , ) for size, im in zip(UpperCamelCase__ , UpperCamelCase__) ] return torch.stack(UpperCamelCase__), torch.tensor(UpperCamelCase__) def __call__( self : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : str=False): '''simple docstring''' with torch.no_grad(): if not isinstance(UpperCamelCase__ , UpperCamelCase__): snake_case__ = [images] if single_image: assert len(UpperCamelCase__) == 1 for i in range(len(UpperCamelCase__)): if isinstance(images[i] , torch.Tensor): images.insert(UpperCamelCase__ , images.pop(UpperCamelCase__).to(self.device).float()) elif not isinstance(images[i] , torch.Tensor): images.insert( UpperCamelCase__ , torch.as_tensor(img_tensorize(images.pop(UpperCamelCase__) , input_format=self.input_format)) .to(self.device) .float() , ) # resize smallest edge snake_case__ = torch.tensor([im.shape[:2] for im in images]) snake_case__ = self.aug(UpperCamelCase__) # transpose images and convert to torch tensors # images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images] # now normalize before pad to avoid useless arithmetic snake_case__ = [self.normalizer(UpperCamelCase__) for x in images] # now pad them to do the following operations snake_case__ , snake_case__ = self.pad(UpperCamelCase__) # Normalize if self.size_divisibility > 0: raise NotImplementedError() # pad snake_case__ = torch.true_divide(UpperCamelCase__ , UpperCamelCase__) if single_image: return images[0], sizes[0], scales_yx[0] else: return images, sizes, scales_yx def _UpperCAmelCase ( a : Optional[Any] , a : Any ): boxes[:, 0::2] *= scale_yx[:, 1] boxes[:, 1::2] *= scale_yx[:, 0] return boxes def _UpperCAmelCase ( a : Any , a : Tuple[int, int] ): assert torch.isfinite(a ).all(), "Box tensor contains infinite or NaN!" snake_case__ , snake_case__ = box_size tensor[:, 0].clamp_(min=0 , max=a ) tensor[:, 1].clamp_(min=0 , max=a ) tensor[:, 2].clamp_(min=0 , max=a ) tensor[:, 3].clamp_(min=0 , max=a )
654
0
"""simple docstring""" import unittest from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available from transformers.pipelines import pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class a__ : @staticmethod def __magic_name__ ( *_a , **_a ): pass @is_pipeline_test @require_torch @require_vision class a__ ( unittest.TestCase ): __lowerCAmelCase = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING def __magic_name__ ( self , _a , _a , _a ): lowercase : List[Any] = pipeline("visual-question-answering" , model="hf-internal-testing/tiny-vilt-random-vqa" ) lowercase : int = [ { "image": Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ), "question": "How many cats are there?", }, { "image": "./tests/fixtures/tests_samples/COCO/000000039769.png", "question": "How many cats are there?", }, ] return vqa_pipeline, examples def __magic_name__ ( self , _a , _a ): lowercase : Optional[int] = vqa_pipeline(UpperCamelCase__ , top_k=1 ) self.assertEqual( UpperCamelCase__ , [ [{"score": ANY(UpperCamelCase__ ), "answer": ANY(UpperCamelCase__ )}], [{"score": ANY(UpperCamelCase__ ), "answer": ANY(UpperCamelCase__ )}], ] , ) @require_torch def __magic_name__ ( self ): lowercase : Optional[Any] = pipeline("visual-question-answering" , model="hf-internal-testing/tiny-vilt-random-vqa" ) lowercase : Tuple = "./tests/fixtures/tests_samples/COCO/000000039769.png" lowercase : str = "How many cats are there?" lowercase : Optional[Any] = vqa_pipeline(image=UpperCamelCase__ , question="How many cats are there?" , top_k=2 ) self.assertEqual( UpperCamelCase__ , [{"score": ANY(UpperCamelCase__ ), "answer": ANY(UpperCamelCase__ )}, {"score": ANY(UpperCamelCase__ ), "answer": ANY(UpperCamelCase__ )}] ) lowercase : Dict = vqa_pipeline({"image": image, "question": question} , top_k=2 ) self.assertEqual( UpperCamelCase__ , [{"score": ANY(UpperCamelCase__ ), "answer": ANY(UpperCamelCase__ )}, {"score": ANY(UpperCamelCase__ ), "answer": ANY(UpperCamelCase__ )}] ) @slow @require_torch def __magic_name__ ( self ): lowercase : List[Any] = pipeline("visual-question-answering" , model="dandelin/vilt-b32-finetuned-vqa" ) lowercase : List[Any] = "./tests/fixtures/tests_samples/COCO/000000039769.png" lowercase : int = "How many cats are there?" lowercase : List[str] = vqa_pipeline(image=UpperCamelCase__ , question=UpperCamelCase__ , top_k=2 ) self.assertEqual( nested_simplify(UpperCamelCase__ , decimals=4 ) , [{"score": 0.8_7_9_9, "answer": "2"}, {"score": 0.2_9_6, "answer": "1"}] ) lowercase : Tuple = vqa_pipeline({"image": image, "question": question} , top_k=2 ) self.assertEqual( nested_simplify(UpperCamelCase__ , decimals=4 ) , [{"score": 0.8_7_9_9, "answer": "2"}, {"score": 0.2_9_6, "answer": "1"}] ) lowercase : List[Any] = vqa_pipeline( [{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 ) self.assertEqual( nested_simplify(UpperCamelCase__ , decimals=4 ) , [[{"score": 0.8_7_9_9, "answer": "2"}, {"score": 0.2_9_6, "answer": "1"}]] * 2 , ) @require_tf @unittest.skip("Visual question answering not implemented in TF" ) def __magic_name__ ( self ): pass
361
import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging a__ = logging.get_logger(__name__) a__ = { """microsoft/wavlm-base""": """https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json""", # See all WavLM models at https://huggingface.co/models?filter=wavlm } class _lowerCAmelCase ( lowercase_ ): """simple docstring""" _lowercase : Dict = '''wavlm''' def __init__( self : Tuple , UpperCamelCase__ : str=3_2 , UpperCamelCase__ : Any=7_6_8 , UpperCamelCase__ : Any=1_2 , UpperCamelCase__ : Tuple=1_2 , UpperCamelCase__ : str=3_0_7_2 , UpperCamelCase__ : Optional[Any]="gelu" , UpperCamelCase__ : Tuple=0.1 , UpperCamelCase__ : Union[str, Any]=0.1 , UpperCamelCase__ : List[Any]=0.1 , UpperCamelCase__ : str=0.0 , UpperCamelCase__ : Optional[Any]=0.1 , UpperCamelCase__ : List[Any]=0.1 , UpperCamelCase__ : Optional[int]=0.02 , UpperCamelCase__ : Optional[int]=1E-5 , UpperCamelCase__ : Any="group" , UpperCamelCase__ : List[str]="gelu" , UpperCamelCase__ : Any=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , UpperCamelCase__ : List[str]=(5, 2, 2, 2, 2, 2, 2) , UpperCamelCase__ : Dict=(1_0, 3, 3, 3, 3, 2, 2) , UpperCamelCase__ : int=False , UpperCamelCase__ : Optional[Any]=1_2_8 , UpperCamelCase__ : Optional[int]=1_6 , UpperCamelCase__ : Optional[Any]=3_2_0 , UpperCamelCase__ : Any=8_0_0 , UpperCamelCase__ : Any=False , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : Optional[Any]=0.05 , UpperCamelCase__ : Optional[Any]=1_0 , UpperCamelCase__ : Union[str, Any]=2 , UpperCamelCase__ : Optional[Any]=0.0 , UpperCamelCase__ : Tuple=1_0 , UpperCamelCase__ : Optional[int]=3_2_0 , UpperCamelCase__ : Optional[Any]=2 , UpperCamelCase__ : Any=0.1 , UpperCamelCase__ : Tuple=1_0_0 , UpperCamelCase__ : Dict=2_5_6 , UpperCamelCase__ : Optional[int]=2_5_6 , UpperCamelCase__ : List[Any]=0.1 , UpperCamelCase__ : Tuple="mean" , UpperCamelCase__ : List[Any]=False , UpperCamelCase__ : Tuple=False , UpperCamelCase__ : Union[str, Any]=2_5_6 , UpperCamelCase__ : int=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0) , UpperCamelCase__ : Optional[Any]=(5, 3, 3, 1, 1) , UpperCamelCase__ : Any=(1, 2, 3, 1, 1) , UpperCamelCase__ : Dict=5_1_2 , UpperCamelCase__ : str=8_0 , UpperCamelCase__ : Optional[int]=0 , UpperCamelCase__ : Any=1 , UpperCamelCase__ : Tuple=2 , UpperCamelCase__ : str=False , UpperCamelCase__ : List[Any]=3 , UpperCamelCase__ : Optional[Any]=2 , UpperCamelCase__ : List[Any]=3 , UpperCamelCase__ : Optional[int]=None , **UpperCamelCase__ : List[str] , ): '''simple docstring''' super().__init__(**UpperCamelCase__ , pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__) snake_case__ = hidden_size snake_case__ = feat_extract_norm snake_case__ = feat_extract_activation snake_case__ = list(UpperCamelCase__) snake_case__ = list(UpperCamelCase__) snake_case__ = list(UpperCamelCase__) snake_case__ = conv_bias snake_case__ = num_buckets snake_case__ = max_bucket_distance snake_case__ = num_conv_pos_embeddings snake_case__ = num_conv_pos_embedding_groups snake_case__ = len(self.conv_dim) snake_case__ = num_hidden_layers snake_case__ = intermediate_size snake_case__ = hidden_act snake_case__ = num_attention_heads snake_case__ = hidden_dropout snake_case__ = attention_dropout snake_case__ = activation_dropout snake_case__ = feat_proj_dropout snake_case__ = final_dropout snake_case__ = layerdrop snake_case__ = layer_norm_eps snake_case__ = initializer_range snake_case__ = num_ctc_classes snake_case__ = vocab_size snake_case__ = do_stable_layer_norm snake_case__ = use_weighted_layer_sum snake_case__ = classifier_proj_size if ( (len(self.conv_stride) != self.num_feat_extract_layers) or (len(self.conv_kernel) != self.num_feat_extract_layers) or (len(self.conv_dim) != self.num_feat_extract_layers) ): raise ValueError( """Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==""" """ `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =""" F''' {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`,''' F''' `len(config.conv_kernel) = {len(self.conv_kernel)}`.''') # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 snake_case__ = apply_spec_augment snake_case__ = mask_time_prob snake_case__ = mask_time_length snake_case__ = mask_time_min_masks snake_case__ = mask_feature_prob snake_case__ = mask_feature_length # parameters for pretraining with codevector quantized representations snake_case__ = num_codevectors_per_group snake_case__ = num_codevector_groups snake_case__ = contrastive_logits_temperature snake_case__ = num_negatives snake_case__ = codevector_dim snake_case__ = proj_codevector_dim snake_case__ = diversity_loss_weight # ctc loss snake_case__ = ctc_loss_reduction snake_case__ = ctc_zero_infinity # adapter snake_case__ = add_adapter snake_case__ = adapter_kernel_size snake_case__ = adapter_stride snake_case__ = num_adapter_layers snake_case__ = output_hidden_size or hidden_size # SequenceClassification-specific parameter. Feel free to ignore for other classes. snake_case__ = classifier_proj_size # XVector-specific parameters. Feel free to ignore for other classes. snake_case__ = list(UpperCamelCase__) snake_case__ = list(UpperCamelCase__) snake_case__ = list(UpperCamelCase__) snake_case__ = xvector_output_dim @property def __magic_name__ ( self : Optional[int]): '''simple docstring''' return functools.reduce(operator.mul , self.conv_stride , 1)
654
0
'''simple docstring''' import unittest import numpy as np import timeout_decorator # noqa from transformers import BlenderbotConfig, is_flax_available from transformers.testing_utils import jax_device, require_flax, slow from ...generation.test_flax_utils import FlaxGenerationTesterMixin from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor if is_flax_available(): import os # The slow tests are often failing with OOM error on GPU # This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed # but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html lowerCAmelCase : Dict = 'platform' import jax import jax.numpy as jnp from transformers import BlenderbotTokenizer from transformers.models.blenderbot.modeling_flax_blenderbot import ( FlaxBlenderbotForConditionalGeneration, FlaxBlenderbotModel, shift_tokens_right, ) def A_( A : List[str] , A : Union[str, Any] , A : List[str]=None , A : Dict=None , A : List[Any]=None , A : int=None , A : Tuple=None , A : int=None , ): if attention_mask is None: UpperCamelCase = np.where(input_ids != config.pad_token_id , 1 , 0) if decoder_attention_mask is None: UpperCamelCase = np.where(decoder_input_ids != config.pad_token_id , 1 , 0) if head_mask is None: UpperCamelCase = np.ones((config.encoder_layers, config.encoder_attention_heads)) if decoder_head_mask is None: UpperCamelCase = np.ones((config.decoder_layers, config.decoder_attention_heads)) if cross_attn_head_mask is None: UpperCamelCase = np.ones((config.decoder_layers, config.decoder_attention_heads)) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": attention_mask, } class SCREAMING_SNAKE_CASE__ : def __init__( self , A_ , A_=13 , A_=7 , A_=True , A_=False , A_=99 , A_=16 , A_=2 , A_=4 , A_=4 , A_="gelu" , A_=0.1 , A_=0.1 , A_=32 , A_=2 , A_=1 , A_=0 , A_=0.02 , )-> Any: '''simple docstring''' UpperCamelCase = parent UpperCamelCase = batch_size UpperCamelCase = seq_length UpperCamelCase = is_training UpperCamelCase = use_labels UpperCamelCase = vocab_size UpperCamelCase = hidden_size UpperCamelCase = num_hidden_layers UpperCamelCase = num_attention_heads UpperCamelCase = intermediate_size UpperCamelCase = hidden_act UpperCamelCase = hidden_dropout_prob UpperCamelCase = attention_probs_dropout_prob UpperCamelCase = max_position_embeddings UpperCamelCase = eos_token_id UpperCamelCase = pad_token_id UpperCamelCase = bos_token_id UpperCamelCase = initializer_range def UpperCAmelCase_ ( self )-> Any: '''simple docstring''' UpperCamelCase = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size ) UpperCamelCase = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 ) UpperCamelCase = shift_tokens_right(UpperCamelCase__ , 1 , 2 ) UpperCamelCase = BlenderbotConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=UpperCamelCase__ , ) UpperCamelCase = prepare_blenderbot_inputs_dict(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) return config, inputs_dict def UpperCAmelCase_ ( self )-> Any: '''simple docstring''' UpperCamelCase , UpperCamelCase = self.prepare_config_and_inputs() return config, inputs_dict def UpperCAmelCase_ ( self , A_ , A_ , A_ )-> Dict: '''simple docstring''' UpperCamelCase = 20 UpperCamelCase = model_class_name(UpperCamelCase__ ) UpperCamelCase = model.encode(inputs_dict['input_ids'] ) UpperCamelCase , UpperCamelCase = ( inputs_dict['decoder_input_ids'], inputs_dict['decoder_attention_mask'], ) UpperCamelCase = model.init_cache(decoder_input_ids.shape[0] , UpperCamelCase__ , UpperCamelCase__ ) UpperCamelCase = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='i4' ) UpperCamelCase = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) UpperCamelCase = model.decode( decoder_input_ids[:, :-1] , UpperCamelCase__ , decoder_attention_mask=UpperCamelCase__ , past_key_values=UpperCamelCase__ , decoder_position_ids=UpperCamelCase__ , ) UpperCamelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' ) UpperCamelCase = model.decode( decoder_input_ids[:, -1:] , UpperCamelCase__ , decoder_attention_mask=UpperCamelCase__ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=UpperCamelCase__ , ) UpperCamelCase = model.decode(UpperCamelCase__ , UpperCamelCase__ ) UpperCamelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=F'''Max diff is {diff}''' ) def UpperCAmelCase_ ( self , A_ , A_ , A_ )-> int: '''simple docstring''' UpperCamelCase = 20 UpperCamelCase = model_class_name(UpperCamelCase__ ) UpperCamelCase = model.encode(inputs_dict['input_ids'] ) UpperCamelCase , UpperCamelCase = ( inputs_dict['decoder_input_ids'], inputs_dict['decoder_attention_mask'], ) UpperCamelCase = jnp.concatenate( [ decoder_attention_mask, jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ), ] , axis=-1 , ) UpperCamelCase = model.init_cache(decoder_input_ids.shape[0] , UpperCamelCase__ , UpperCamelCase__ ) UpperCamelCase = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) UpperCamelCase = model.decode( decoder_input_ids[:, :-1] , UpperCamelCase__ , decoder_attention_mask=UpperCamelCase__ , past_key_values=UpperCamelCase__ , decoder_position_ids=UpperCamelCase__ , ) UpperCamelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' ) UpperCamelCase = model.decode( decoder_input_ids[:, -1:] , UpperCamelCase__ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=UpperCamelCase__ , decoder_position_ids=UpperCamelCase__ , ) UpperCamelCase = model.decode(UpperCamelCase__ , UpperCamelCase__ , decoder_attention_mask=UpperCamelCase__ ) UpperCamelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=F'''Max diff is {diff}''' ) @require_flax class SCREAMING_SNAKE_CASE__ ( unittest.TestCase): lowerCAmelCase_ = 99 def UpperCAmelCase_ ( self )-> List[str]: '''simple docstring''' UpperCamelCase = np.array( [ [71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 82, 2], [5, 97, 17, 39, 94, 40, 2], [76, 83, 94, 25, 70, 78, 2], [87, 59, 41, 35, 48, 66, 2], [55, 13, 16, 58, 5, 2, 1], # note padding [64, 27, 31, 51, 12, 75, 2], [52, 64, 86, 17, 83, 39, 2], [48, 61, 9, 24, 71, 82, 2], [26, 1, 60, 48, 22, 13, 2], [21, 5, 62, 28, 14, 76, 2], [45, 98, 37, 86, 59, 48, 2], [70, 70, 50, 9, 28, 0, 2], ] , dtype=np.intaa , ) UpperCamelCase = input_ids.shape[0] UpperCamelCase = BlenderbotConfig( vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , ) return config, input_ids, batch_size def UpperCAmelCase_ ( self )-> List[Any]: '''simple docstring''' UpperCamelCase , UpperCamelCase , UpperCamelCase = self._get_config_and_data() UpperCamelCase = FlaxBlenderbotForConditionalGeneration(UpperCamelCase__ ) UpperCamelCase = lm_model(input_ids=UpperCamelCase__ ) UpperCamelCase = (batch_size, input_ids.shape[1], config.vocab_size) self.assertEqual(outputs['logits'].shape , UpperCamelCase__ ) def UpperCAmelCase_ ( self )-> Any: '''simple docstring''' UpperCamelCase = BlenderbotConfig( vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , ) UpperCamelCase = FlaxBlenderbotForConditionalGeneration(UpperCamelCase__ ) UpperCamelCase = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa ) UpperCamelCase = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa ) UpperCamelCase = lm_model(input_ids=UpperCamelCase__ , decoder_input_ids=UpperCamelCase__ ) UpperCamelCase = (*summary.shape, config.vocab_size) self.assertEqual(outputs['logits'].shape , UpperCamelCase__ ) def UpperCAmelCase_ ( self )-> Union[str, Any]: '''simple docstring''' UpperCamelCase = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa ) UpperCamelCase = shift_tokens_right(UpperCamelCase__ , 1 , 2 ) UpperCamelCase = np.equal(UpperCamelCase__ , 1 ).astype(np.floataa ).sum() UpperCamelCase = np.equal(UpperCamelCase__ , 1 ).astype(np.floataa ).sum() self.assertEqual(shifted.shape , input_ids.shape ) self.assertEqual(UpperCamelCase__ , n_pad_before - 1 ) self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() ) @require_flax class SCREAMING_SNAKE_CASE__ ( lowercase_ , unittest.TestCase , lowercase_): lowerCAmelCase_ = True lowerCAmelCase_ = ( ( FlaxBlenderbotModel, FlaxBlenderbotForConditionalGeneration, ) if is_flax_available() else () ) lowerCAmelCase_ = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else () def UpperCAmelCase_ ( self )-> Tuple: '''simple docstring''' UpperCamelCase = FlaxBlenderbotModelTester(self ) def UpperCAmelCase_ ( self )-> Optional[Any]: '''simple docstring''' UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) def UpperCAmelCase_ ( self )-> List[Any]: '''simple docstring''' UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward_with_attn_mask(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) def UpperCAmelCase_ ( self )-> Optional[int]: '''simple docstring''' UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): UpperCamelCase = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) UpperCamelCase = model_class(UpperCamelCase__ ) @jax.jit def encode_jitted(A_ , A_=None , **A_ ): return model.encode(input_ids=UpperCamelCase__ , attention_mask=UpperCamelCase__ ) with self.subTest('JIT Enabled' ): UpperCamelCase = encode_jitted(**UpperCamelCase__ ).to_tuple() with self.subTest('JIT Disabled' ): with jax.disable_jit(): UpperCamelCase = encode_jitted(**UpperCamelCase__ ).to_tuple() self.assertEqual(len(UpperCamelCase__ ) , len(UpperCamelCase__ ) ) for jitted_output, output in zip(UpperCamelCase__ , UpperCamelCase__ ): self.assertEqual(jitted_output.shape , output.shape ) def UpperCAmelCase_ ( self )-> List[Any]: '''simple docstring''' UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): UpperCamelCase = model_class(UpperCamelCase__ ) UpperCamelCase = model.encode(inputs_dict['input_ids'] , inputs_dict['attention_mask'] ) UpperCamelCase = { 'decoder_input_ids': inputs_dict['decoder_input_ids'], 'decoder_attention_mask': inputs_dict['decoder_attention_mask'], 'encoder_outputs': encoder_outputs, } @jax.jit def decode_jitted(A_ , A_ , A_ ): return model.decode( decoder_input_ids=UpperCamelCase__ , decoder_attention_mask=UpperCamelCase__ , encoder_outputs=UpperCamelCase__ , ) with self.subTest('JIT Enabled' ): UpperCamelCase = decode_jitted(**UpperCamelCase__ ).to_tuple() with self.subTest('JIT Disabled' ): with jax.disable_jit(): UpperCamelCase = decode_jitted(**UpperCamelCase__ ).to_tuple() self.assertEqual(len(UpperCamelCase__ ) , len(UpperCamelCase__ ) ) for jitted_output, output in zip(UpperCamelCase__ , UpperCamelCase__ ): self.assertEqual(jitted_output.shape , output.shape ) @slow def UpperCAmelCase_ ( self )-> str: '''simple docstring''' for model_class_name in self.all_model_classes: UpperCamelCase = model_class_name.from_pretrained('facebook/blenderbot-400M-distill' ) # FlaxBlenderbotForSequenceClassification expects eos token in input_ids UpperCamelCase = np.ones((1, 1) ) * model.config.eos_token_id UpperCamelCase = model(UpperCamelCase__ ) self.assertIsNotNone(UpperCamelCase__ ) @unittest.skipUnless(jax_device != 'cpu' , '3B test too slow on CPU.' ) @slow def UpperCAmelCase_ ( self )-> Optional[int]: '''simple docstring''' UpperCamelCase = {'num_beams': 1, 'early_stopping': True, 'min_length': 15, 'max_length': 25} UpperCamelCase = {'skip_special_tokens': True, 'clean_up_tokenization_spaces': True} UpperCamelCase = FlaxBlenderbotForConditionalGeneration.from_pretrained('facebook/blenderbot-3B' , from_pt=UpperCamelCase__ ) UpperCamelCase = BlenderbotTokenizer.from_pretrained('facebook/blenderbot-3B' ) UpperCamelCase = ['Sam'] UpperCamelCase = tokenizer(UpperCamelCase__ , return_tensors='jax' ) UpperCamelCase = model.generate(**UpperCamelCase__ , **UpperCamelCase__ ) UpperCamelCase = 'Sam is a great name. It means \"sun\" in Gaelic.' UpperCamelCase = tokenizer.batch_decode(UpperCamelCase__ , **UpperCamelCase__ ) assert generated_txt[0].strip() == tgt_text
3
from typing import List, Optional, Tuple, Union import torch from ...models import UNetaDModel from ...schedulers import ScoreSdeVeScheduler from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class _lowerCAmelCase ( lowercase_ ): """simple docstring""" _lowercase : UNetaDModel _lowercase : ScoreSdeVeScheduler def __init__( self : Union[str, Any] , UpperCamelCase__ : UNetaDModel , UpperCamelCase__ : ScoreSdeVeScheduler): '''simple docstring''' super().__init__() self.register_modules(unet=UpperCamelCase__ , scheduler=UpperCamelCase__) @torch.no_grad() def __call__( self : Union[str, Any] , UpperCamelCase__ : int = 1 , UpperCamelCase__ : int = 2_0_0_0 , UpperCamelCase__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCamelCase__ : Optional[str] = "pil" , UpperCamelCase__ : bool = True , **UpperCamelCase__ : List[str] , ): '''simple docstring''' snake_case__ = self.unet.config.sample_size snake_case__ = (batch_size, 3, img_size, img_size) snake_case__ = self.unet snake_case__ = randn_tensor(UpperCamelCase__ , generator=UpperCamelCase__) * self.scheduler.init_noise_sigma snake_case__ = sample.to(self.device) self.scheduler.set_timesteps(UpperCamelCase__) self.scheduler.set_sigmas(UpperCamelCase__) for i, t in enumerate(self.progress_bar(self.scheduler.timesteps)): snake_case__ = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device) # correction step for _ in range(self.scheduler.config.correct_steps): snake_case__ = self.unet(UpperCamelCase__ , UpperCamelCase__).sample snake_case__ = self.scheduler.step_correct(UpperCamelCase__ , UpperCamelCase__ , generator=UpperCamelCase__).prev_sample # prediction step snake_case__ = model(UpperCamelCase__ , UpperCamelCase__).sample snake_case__ = self.scheduler.step_pred(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , generator=UpperCamelCase__) snake_case__ , snake_case__ = output.prev_sample, output.prev_sample_mean snake_case__ = sample_mean.clamp(0 , 1) snake_case__ = sample.cpu().permute(0 , 2 , 3 , 1).numpy() if output_type == "pil": snake_case__ = self.numpy_to_pil(UpperCamelCase__) if not return_dict: return (sample,) return ImagePipelineOutput(images=UpperCamelCase__)
654
0
"""simple docstring""" def UpperCAmelCase ( snake_case : int , snake_case : int ): return number | (1 << position) def UpperCAmelCase ( snake_case : int , snake_case : int ): return number & ~(1 << position) def UpperCAmelCase ( snake_case : int , snake_case : int ): return number ^ (1 << position) def UpperCAmelCase ( snake_case : int , snake_case : int ): return ((number >> position) & 1) == 1 def UpperCAmelCase ( snake_case : int , snake_case : int ): return int((number & (1 << position)) != 0 ) if __name__ == "__main__": import doctest doctest.testmod()
227
import random import unittest import torch from diffusers import IFInpaintingSuperResolutionPipeline from diffusers.utils import floats_tensor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import skip_mps, torch_device from ..pipeline_params import ( TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin from . import IFPipelineTesterMixin @skip_mps class _lowerCAmelCase ( lowercase_ , lowercase_ , unittest.TestCase ): """simple docstring""" _lowercase : Optional[int] = IFInpaintingSuperResolutionPipeline _lowercase : int = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''width''', '''height'''} _lowercase : Optional[Any] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({'''original_image'''} ) _lowercase : int = PipelineTesterMixin.required_optional_params - {'''latents'''} def __magic_name__ ( self : Union[str, Any]): '''simple docstring''' return self._get_superresolution_dummy_components() def __magic_name__ ( self : Optional[int] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[int]=0): '''simple docstring''' if str(UpperCamelCase__).startswith("""mps"""): snake_case__ = torch.manual_seed(UpperCamelCase__) else: snake_case__ = torch.Generator(device=UpperCamelCase__).manual_seed(UpperCamelCase__) snake_case__ = floats_tensor((1, 3, 1_6, 1_6) , rng=random.Random(UpperCamelCase__)).to(UpperCamelCase__) snake_case__ = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(UpperCamelCase__)).to(UpperCamelCase__) snake_case__ = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(UpperCamelCase__)).to(UpperCamelCase__) snake_case__ = { """prompt""": """A painting of a squirrel eating a burger""", """image""": image, """original_image""": original_image, """mask_image""": mask_image, """generator""": generator, """num_inference_steps""": 2, """output_type""": """numpy""", } return inputs @unittest.skipIf( torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , ) def __magic_name__ ( self : Dict): '''simple docstring''' self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3) def __magic_name__ ( self : int): '''simple docstring''' self._test_save_load_optional_components() @unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""") def __magic_name__ ( self : Optional[Any]): '''simple docstring''' super().test_save_load_floataa(expected_max_diff=1E-1) def __magic_name__ ( self : List[Any]): '''simple docstring''' self._test_attention_slicing_forward_pass(expected_max_diff=1E-2) def __magic_name__ ( self : Union[str, Any]): '''simple docstring''' self._test_save_load_local() def __magic_name__ ( self : str): '''simple docstring''' self._test_inference_batch_single_identical( expected_max_diff=1E-2 , )
654
0
"""simple docstring""" import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor from transformers.utils import logging logging.set_verbosity_info() __lowerCAmelCase : List[Any] =logging.get_logger(__name__) def UpperCAmelCase__ ( lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Any=False ) -> Optional[int]: '''simple docstring''' lowercase = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((f'blocks.{i}.norm1.weight', f'deit.encoder.layer.{i}.layernorm_before.weight') ) rename_keys.append((f'blocks.{i}.norm1.bias', f'deit.encoder.layer.{i}.layernorm_before.bias') ) rename_keys.append((f'blocks.{i}.attn.proj.weight', f'deit.encoder.layer.{i}.attention.output.dense.weight') ) rename_keys.append((f'blocks.{i}.attn.proj.bias', f'deit.encoder.layer.{i}.attention.output.dense.bias') ) rename_keys.append((f'blocks.{i}.norm2.weight', f'deit.encoder.layer.{i}.layernorm_after.weight') ) rename_keys.append((f'blocks.{i}.norm2.bias', f'deit.encoder.layer.{i}.layernorm_after.bias') ) rename_keys.append((f'blocks.{i}.mlp.fc1.weight', f'deit.encoder.layer.{i}.intermediate.dense.weight') ) rename_keys.append((f'blocks.{i}.mlp.fc1.bias', f'deit.encoder.layer.{i}.intermediate.dense.bias') ) rename_keys.append((f'blocks.{i}.mlp.fc2.weight', f'deit.encoder.layer.{i}.output.dense.weight') ) rename_keys.append((f'blocks.{i}.mlp.fc2.bias', f'deit.encoder.layer.{i}.output.dense.bias') ) # projection layer + position embeddings rename_keys.extend( [ ("""cls_token""", """deit.embeddings.cls_token"""), ("""dist_token""", """deit.embeddings.distillation_token"""), ("""patch_embed.proj.weight""", """deit.embeddings.patch_embeddings.projection.weight"""), ("""patch_embed.proj.bias""", """deit.embeddings.patch_embeddings.projection.bias"""), ("""pos_embed""", """deit.embeddings.position_embeddings"""), ] ) if base_model: # layernorm + pooler rename_keys.extend( [ ("""norm.weight""", """layernorm.weight"""), ("""norm.bias""", """layernorm.bias"""), ("""pre_logits.fc.weight""", """pooler.dense.weight"""), ("""pre_logits.fc.bias""", """pooler.dense.bias"""), ] ) # if just the base model, we should remove "deit" from all keys that start with "deit" lowercase = [(pair[0], pair[1][4:]) if pair[1].startswith("""deit""" ) else pair for pair in rename_keys] else: # layernorm + classification heads rename_keys.extend( [ ("""norm.weight""", """deit.layernorm.weight"""), ("""norm.bias""", """deit.layernorm.bias"""), ("""head.weight""", """cls_classifier.weight"""), ("""head.bias""", """cls_classifier.bias"""), ("""head_dist.weight""", """distillation_classifier.weight"""), ("""head_dist.bias""", """distillation_classifier.bias"""), ] ) return rename_keys def UpperCAmelCase__ ( lowerCAmelCase__ :int , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Union[str, Any]=False ) -> int: '''simple docstring''' for i in range(config.num_hidden_layers ): if base_model: lowercase = """""" else: lowercase = """deit.""" # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) lowercase = state_dict.pop(f'blocks.{i}.attn.qkv.weight' ) lowercase = state_dict.pop(f'blocks.{i}.attn.qkv.bias' ) # next, add query, keys and values (in that order) to the state dict lowercase = in_proj_weight[ : config.hidden_size, : ] lowercase = in_proj_bias[: config.hidden_size] lowercase = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] lowercase = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] lowercase = in_proj_weight[ -config.hidden_size :, : ] lowercase = in_proj_bias[-config.hidden_size :] def UpperCAmelCase__ ( lowerCAmelCase__ :Dict , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :int ) -> Optional[int]: '''simple docstring''' lowercase = dct.pop(lowerCAmelCase__ ) lowercase = val def UpperCAmelCase__ ( ) -> int: '''simple docstring''' lowercase = """http://images.cocodataset.org/val2017/000000039769.jpg""" lowercase = Image.open(requests.get(lowerCAmelCase__ , stream=lowerCAmelCase__ ).raw ) return im @torch.no_grad() def UpperCAmelCase__ ( lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Tuple ) -> Optional[int]: '''simple docstring''' lowercase = DeiTConfig() # all deit models have fine-tuned heads lowercase = False # dataset (fine-tuned on ImageNet 2012), patch_size and image_size lowercase = 1_0_0_0 lowercase = """huggingface/label-files""" lowercase = """imagenet-1k-id2label.json""" lowercase = json.load(open(hf_hub_download(lowerCAmelCase__ , lowerCAmelCase__ , repo_type="""dataset""" ) , """r""" ) ) lowercase = {int(lowerCAmelCase__ ): v for k, v in idalabel.items()} lowercase = idalabel lowercase = {v: k for k, v in idalabel.items()} lowercase = int(deit_name[-6:-4] ) lowercase = int(deit_name[-3:] ) # size of the architecture if deit_name[9:].startswith("""tiny""" ): lowercase = 1_9_2 lowercase = 7_6_8 lowercase = 1_2 lowercase = 3 elif deit_name[9:].startswith("""small""" ): lowercase = 3_8_4 lowercase = 1_5_3_6 lowercase = 1_2 lowercase = 6 if deit_name[9:].startswith("""base""" ): pass elif deit_name[4:].startswith("""large""" ): lowercase = 1_0_2_4 lowercase = 4_0_9_6 lowercase = 2_4 lowercase = 1_6 # load original model from timm lowercase = timm.create_model(lowerCAmelCase__ , pretrained=lowerCAmelCase__ ) timm_model.eval() # load state_dict of original model, remove and rename some keys lowercase = timm_model.state_dict() lowercase = create_rename_keys(lowerCAmelCase__ , lowerCAmelCase__ ) for src, dest in rename_keys: rename_key(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) read_in_q_k_v(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) # load HuggingFace model lowercase = DeiTForImageClassificationWithTeacher(lowerCAmelCase__ ).eval() model.load_state_dict(lowerCAmelCase__ ) # Check outputs on an image, prepared by DeiTImageProcessor lowercase = int( (2_5_6 / 2_2_4) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103 lowercase = DeiTImageProcessor(size=lowerCAmelCase__ , crop_size=config.image_size ) lowercase = image_processor(images=prepare_img() , return_tensors="""pt""" ) lowercase = encoding["""pixel_values"""] lowercase = model(lowerCAmelCase__ ) lowercase = timm_model(lowerCAmelCase__ ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(lowerCAmelCase__ , outputs.logits , atol=1e-3 ) Path(lowerCAmelCase__ ).mkdir(exist_ok=lowerCAmelCase__ ) print(f'Saving model {deit_name} to {pytorch_dump_folder_path}' ) model.save_pretrained(lowerCAmelCase__ ) print(f'Saving image processor to {pytorch_dump_folder_path}' ) image_processor.save_pretrained(lowerCAmelCase__ ) if __name__ == "__main__": __lowerCAmelCase : List[Any] =argparse.ArgumentParser() # Required parameters parser.add_argument( """--deit_name""", default="""vit_deit_base_distilled_patch16_224""", type=str, help="""Name of the DeiT timm model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) __lowerCAmelCase : Optional[Any] =parser.parse_args() convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
359
a__ = [0, 2, 4, 6, 8] a__ = [1, 3, 5, 7, 9] def _UpperCAmelCase ( a : int , a : int , a : list[int] , a : int ): if remaining_length == 0: if digits[0] == 0 or digits[-1] == 0: return 0 for i in range(length // 2 - 1 , -1 , -1 ): remainder += digits[i] + digits[length - i - 1] if remainder % 2 == 0: return 0 remainder //= 10 return 1 if remaining_length == 1: if remainder % 2 == 0: return 0 snake_case__ = 0 for digit in range(10 ): snake_case__ = digit result += reversible_numbers( 0 , (remainder + 2 * digit) // 10 , a , a ) return result snake_case__ = 0 for digita in range(10 ): snake_case__ = digita if (remainder + digita) % 2 == 0: snake_case__ = ODD_DIGITS else: snake_case__ = EVEN_DIGITS for digita in other_parity_digits: snake_case__ = digita result += reversible_numbers( remaining_length - 2 , (remainder + digita + digita) // 10 , a , a , ) return result def _UpperCAmelCase ( a : int = 9 ): snake_case__ = 0 for length in range(1 , max_power + 1 ): result += reversible_numbers(a , 0 , [0] * length , a ) return result if __name__ == "__main__": print(F'''{solution() = }''')
654
0
'''simple docstring''' import math import random from typing import Any from .hill_climbing import SearchProblem def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase = True ,__UpperCamelCase = math.inf ,__UpperCamelCase = -math.inf ,__UpperCamelCase = math.inf ,__UpperCamelCase = -math.inf ,__UpperCamelCase = False ,__UpperCamelCase = 1_00 ,__UpperCamelCase = 0.01 ,__UpperCamelCase = 1 ,) -> List[str]: lowerCamelCase_ = False lowerCamelCase_ = search_prob lowerCamelCase_ = start_temperate lowerCamelCase_ = [] lowerCamelCase_ = 0 lowerCamelCase_ = None while not search_end: lowerCamelCase_ = current_state.score() if best_state is None or current_score > best_state.score(): lowerCamelCase_ = current_state scores.append(__UpperCamelCase ) iterations += 1 lowerCamelCase_ = None lowerCamelCase_ = current_state.get_neighbors() while ( next_state is None and neighbors ): # till we do not find a neighbor that we can move to lowerCamelCase_ = random.randint(0 ,len(__UpperCamelCase ) - 1 ) # picking a random neighbor lowerCamelCase_ = neighbors.pop(__UpperCamelCase ) lowerCamelCase_ = picked_neighbor.score() - current_score if ( picked_neighbor.x > max_x or picked_neighbor.x < min_x or picked_neighbor.y > max_y or picked_neighbor.y < min_y ): continue # neighbor outside our bounds if not find_max: lowerCamelCase_ = change * -1 # in case we are finding minimum if change > 0: # improves the solution lowerCamelCase_ = picked_neighbor else: lowerCamelCase_ = (math.e) ** ( change / current_temp ) # probability generation function if random.random() < probability: # random number within probability lowerCamelCase_ = picked_neighbor lowerCamelCase_ = current_temp - (current_temp * rate_of_decrease) if current_temp < threshold_temp or next_state is None: # temperature below threshold, or could not find a suitable neighbor lowerCamelCase_ = True else: lowerCamelCase_ = next_state if visualization: from matplotlib import pyplot as plt plt.plot(range(__UpperCamelCase ) ,__UpperCamelCase ) plt.xlabel('Iterations' ) plt.ylabel('Function values' ) plt.show() return best_state if __name__ == "__main__": def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ) -> Union[str, Any]: return (x**2) + (y**2) # starting the problem with initial coordinates (12, 47) A_ = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa) A_ = simulated_annealing( prob, find_max=False, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True ) print( "The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 " f'''and 50 > y > - 5 found via hill climbing: {local_min.score()}''' ) # starting the problem with initial coordinates (12, 47) A_ = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa) A_ = simulated_annealing( prob, find_max=True, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True ) print( "The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 " f'''and 50 > y > - 5 found via hill climbing: {local_min.score()}''' ) def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ) -> str: return (3 * x**2) - (6 * y) A_ = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa) A_ = simulated_annealing(prob, find_max=False, visualization=True) print( "The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: " f'''{local_min.score()}''' ) A_ = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa) A_ = simulated_annealing(prob, find_max=True, visualization=True) print( "The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: " f'''{local_min.score()}''' )
42
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer from .base import PipelineTool a__ = { """Acehnese Arabic""": """ace_Arab""", """Acehnese Latin""": """ace_Latn""", """Mesopotamian Arabic""": """acm_Arab""", """Ta'izzi-Adeni Arabic""": """acq_Arab""", """Tunisian Arabic""": """aeb_Arab""", """Afrikaans""": """afr_Latn""", """South Levantine Arabic""": """ajp_Arab""", """Akan""": """aka_Latn""", """Amharic""": """amh_Ethi""", """North Levantine Arabic""": """apc_Arab""", """Modern Standard Arabic""": """arb_Arab""", """Modern Standard Arabic Romanized""": """arb_Latn""", """Najdi Arabic""": """ars_Arab""", """Moroccan Arabic""": """ary_Arab""", """Egyptian Arabic""": """arz_Arab""", """Assamese""": """asm_Beng""", """Asturian""": """ast_Latn""", """Awadhi""": """awa_Deva""", """Central Aymara""": """ayr_Latn""", """South Azerbaijani""": """azb_Arab""", """North Azerbaijani""": """azj_Latn""", """Bashkir""": """bak_Cyrl""", """Bambara""": """bam_Latn""", """Balinese""": """ban_Latn""", """Belarusian""": """bel_Cyrl""", """Bemba""": """bem_Latn""", """Bengali""": """ben_Beng""", """Bhojpuri""": """bho_Deva""", """Banjar Arabic""": """bjn_Arab""", """Banjar Latin""": """bjn_Latn""", """Standard Tibetan""": """bod_Tibt""", """Bosnian""": """bos_Latn""", """Buginese""": """bug_Latn""", """Bulgarian""": """bul_Cyrl""", """Catalan""": """cat_Latn""", """Cebuano""": """ceb_Latn""", """Czech""": """ces_Latn""", """Chokwe""": """cjk_Latn""", """Central Kurdish""": """ckb_Arab""", """Crimean Tatar""": """crh_Latn""", """Welsh""": """cym_Latn""", """Danish""": """dan_Latn""", """German""": """deu_Latn""", """Southwestern Dinka""": """dik_Latn""", """Dyula""": """dyu_Latn""", """Dzongkha""": """dzo_Tibt""", """Greek""": """ell_Grek""", """English""": """eng_Latn""", """Esperanto""": """epo_Latn""", """Estonian""": """est_Latn""", """Basque""": """eus_Latn""", """Ewe""": """ewe_Latn""", """Faroese""": """fao_Latn""", """Fijian""": """fij_Latn""", """Finnish""": """fin_Latn""", """Fon""": """fon_Latn""", """French""": """fra_Latn""", """Friulian""": """fur_Latn""", """Nigerian Fulfulde""": """fuv_Latn""", """Scottish Gaelic""": """gla_Latn""", """Irish""": """gle_Latn""", """Galician""": """glg_Latn""", """Guarani""": """grn_Latn""", """Gujarati""": """guj_Gujr""", """Haitian Creole""": """hat_Latn""", """Hausa""": """hau_Latn""", """Hebrew""": """heb_Hebr""", """Hindi""": """hin_Deva""", """Chhattisgarhi""": """hne_Deva""", """Croatian""": """hrv_Latn""", """Hungarian""": """hun_Latn""", """Armenian""": """hye_Armn""", """Igbo""": """ibo_Latn""", """Ilocano""": """ilo_Latn""", """Indonesian""": """ind_Latn""", """Icelandic""": """isl_Latn""", """Italian""": """ita_Latn""", """Javanese""": """jav_Latn""", """Japanese""": """jpn_Jpan""", """Kabyle""": """kab_Latn""", """Jingpho""": """kac_Latn""", """Kamba""": """kam_Latn""", """Kannada""": """kan_Knda""", """Kashmiri Arabic""": """kas_Arab""", """Kashmiri Devanagari""": """kas_Deva""", """Georgian""": """kat_Geor""", """Central Kanuri Arabic""": """knc_Arab""", """Central Kanuri Latin""": """knc_Latn""", """Kazakh""": """kaz_Cyrl""", """Kabiyè""": """kbp_Latn""", """Kabuverdianu""": """kea_Latn""", """Khmer""": """khm_Khmr""", """Kikuyu""": """kik_Latn""", """Kinyarwanda""": """kin_Latn""", """Kyrgyz""": """kir_Cyrl""", """Kimbundu""": """kmb_Latn""", """Northern Kurdish""": """kmr_Latn""", """Kikongo""": """kon_Latn""", """Korean""": """kor_Hang""", """Lao""": """lao_Laoo""", """Ligurian""": """lij_Latn""", """Limburgish""": """lim_Latn""", """Lingala""": """lin_Latn""", """Lithuanian""": """lit_Latn""", """Lombard""": """lmo_Latn""", """Latgalian""": """ltg_Latn""", """Luxembourgish""": """ltz_Latn""", """Luba-Kasai""": """lua_Latn""", """Ganda""": """lug_Latn""", """Luo""": """luo_Latn""", """Mizo""": """lus_Latn""", """Standard Latvian""": """lvs_Latn""", """Magahi""": """mag_Deva""", """Maithili""": """mai_Deva""", """Malayalam""": """mal_Mlym""", """Marathi""": """mar_Deva""", """Minangkabau Arabic """: """min_Arab""", """Minangkabau Latin""": """min_Latn""", """Macedonian""": """mkd_Cyrl""", """Plateau Malagasy""": """plt_Latn""", """Maltese""": """mlt_Latn""", """Meitei Bengali""": """mni_Beng""", """Halh Mongolian""": """khk_Cyrl""", """Mossi""": """mos_Latn""", """Maori""": """mri_Latn""", """Burmese""": """mya_Mymr""", """Dutch""": """nld_Latn""", """Norwegian Nynorsk""": """nno_Latn""", """Norwegian Bokmål""": """nob_Latn""", """Nepali""": """npi_Deva""", """Northern Sotho""": """nso_Latn""", """Nuer""": """nus_Latn""", """Nyanja""": """nya_Latn""", """Occitan""": """oci_Latn""", """West Central Oromo""": """gaz_Latn""", """Odia""": """ory_Orya""", """Pangasinan""": """pag_Latn""", """Eastern Panjabi""": """pan_Guru""", """Papiamento""": """pap_Latn""", """Western Persian""": """pes_Arab""", """Polish""": """pol_Latn""", """Portuguese""": """por_Latn""", """Dari""": """prs_Arab""", """Southern Pashto""": """pbt_Arab""", """Ayacucho Quechua""": """quy_Latn""", """Romanian""": """ron_Latn""", """Rundi""": """run_Latn""", """Russian""": """rus_Cyrl""", """Sango""": """sag_Latn""", """Sanskrit""": """san_Deva""", """Santali""": """sat_Olck""", """Sicilian""": """scn_Latn""", """Shan""": """shn_Mymr""", """Sinhala""": """sin_Sinh""", """Slovak""": """slk_Latn""", """Slovenian""": """slv_Latn""", """Samoan""": """smo_Latn""", """Shona""": """sna_Latn""", """Sindhi""": """snd_Arab""", """Somali""": """som_Latn""", """Southern Sotho""": """sot_Latn""", """Spanish""": """spa_Latn""", """Tosk Albanian""": """als_Latn""", """Sardinian""": """srd_Latn""", """Serbian""": """srp_Cyrl""", """Swati""": """ssw_Latn""", """Sundanese""": """sun_Latn""", """Swedish""": """swe_Latn""", """Swahili""": """swh_Latn""", """Silesian""": """szl_Latn""", """Tamil""": """tam_Taml""", """Tatar""": """tat_Cyrl""", """Telugu""": """tel_Telu""", """Tajik""": """tgk_Cyrl""", """Tagalog""": """tgl_Latn""", """Thai""": """tha_Thai""", """Tigrinya""": """tir_Ethi""", """Tamasheq Latin""": """taq_Latn""", """Tamasheq Tifinagh""": """taq_Tfng""", """Tok Pisin""": """tpi_Latn""", """Tswana""": """tsn_Latn""", """Tsonga""": """tso_Latn""", """Turkmen""": """tuk_Latn""", """Tumbuka""": """tum_Latn""", """Turkish""": """tur_Latn""", """Twi""": """twi_Latn""", """Central Atlas Tamazight""": """tzm_Tfng""", """Uyghur""": """uig_Arab""", """Ukrainian""": """ukr_Cyrl""", """Umbundu""": """umb_Latn""", """Urdu""": """urd_Arab""", """Northern Uzbek""": """uzn_Latn""", """Venetian""": """vec_Latn""", """Vietnamese""": """vie_Latn""", """Waray""": """war_Latn""", """Wolof""": """wol_Latn""", """Xhosa""": """xho_Latn""", """Eastern Yiddish""": """ydd_Hebr""", """Yoruba""": """yor_Latn""", """Yue Chinese""": """yue_Hant""", """Chinese Simplified""": """zho_Hans""", """Chinese Traditional""": """zho_Hant""", """Standard Malay""": """zsm_Latn""", """Zulu""": """zul_Latn""", } class _lowerCAmelCase ( lowercase_ ): """simple docstring""" _lowercase : List[str] = '''facebook/nllb-200-distilled-600M''' _lowercase : List[Any] = ( '''This is a tool that translates text from a language to another. It takes three inputs: `text`, which should ''' '''be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, ''' '''which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in ''' '''plain English, such as \'Romanian\', or \'Albanian\'. It returns the text translated in `tgt_lang`.''' ) _lowercase : Optional[int] = '''translator''' _lowercase : Optional[Any] = AutoTokenizer _lowercase : Dict = AutoModelForSeqaSeqLM _lowercase : List[str] = LANGUAGE_CODES _lowercase : Optional[Any] = ['''text''', '''text''', '''text'''] _lowercase : Tuple = ['''text'''] def __magic_name__ ( self : Any , UpperCamelCase__ : str , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int): '''simple docstring''' if src_lang not in self.lang_to_code: raise ValueError(F'''{src_lang} is not a supported language.''') if tgt_lang not in self.lang_to_code: raise ValueError(F'''{tgt_lang} is not a supported language.''') snake_case__ = self.lang_to_code[src_lang] snake_case__ = self.lang_to_code[tgt_lang] return self.pre_processor._build_translation_inputs( UpperCamelCase__ , return_tensors="""pt""" , src_lang=UpperCamelCase__ , tgt_lang=UpperCamelCase__) def __magic_name__ ( self : Dict , UpperCamelCase__ : Dict): '''simple docstring''' return self.model.generate(**UpperCamelCase__) def __magic_name__ ( self : List[str] , UpperCamelCase__ : Dict): '''simple docstring''' return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=UpperCamelCase__)
654
0
from __future__ import annotations from collections.abc import Sequence from typing import Literal def __lowercase( UpperCAmelCase__ , UpperCAmelCase__ ): """simple docstring""" lowerCamelCase = list(UpperCAmelCase__ ) lowerCamelCase = list(UpperCAmelCase__ ) lowerCamelCase = 0 for i in range(len(UpperCAmelCase__ ) ): if lista[i] != lista[i]: count += 1 lowerCamelCase = "_" if count > 1: return False else: return "".join(UpperCAmelCase__ ) def __lowercase( UpperCAmelCase__ ): """simple docstring""" lowerCamelCase = [] while True: lowerCamelCase = ["$"] * len(UpperCAmelCase__ ) lowerCamelCase = [] for i in range(len(UpperCAmelCase__ ) ): for j in range(i + 1 , len(UpperCAmelCase__ ) ): lowerCamelCase = compare_string(binary[i] , binary[j] ) if k is False: lowerCamelCase = "*" lowerCamelCase = "*" temp.append("X" ) for i in range(len(UpperCAmelCase__ ) ): if checka[i] == "$": pi.append(binary[i] ) if len(UpperCAmelCase__ ) == 0: return pi lowerCamelCase = list(set(UpperCAmelCase__ ) ) def __lowercase( UpperCAmelCase__ , UpperCAmelCase__ ): """simple docstring""" lowerCamelCase = [] for minterm in minterms: lowerCamelCase = "" for _ in range(UpperCAmelCase__ ): lowerCamelCase = str(minterm % 2 ) + string minterm //= 2 temp.append(UpperCAmelCase__ ) return temp def __lowercase( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ): """simple docstring""" lowerCamelCase = list(UpperCAmelCase__ ) lowerCamelCase = list(UpperCAmelCase__ ) lowerCamelCase = 0 for i in range(len(UpperCAmelCase__ ) ): if lista[i] != lista[i]: count_n += 1 return count_n == count def __lowercase( UpperCAmelCase__ , UpperCAmelCase__ ): """simple docstring""" lowerCamelCase = [] lowerCamelCase = [0] * len(UpperCAmelCase__ ) for i in range(len(chart[0] ) ): lowerCamelCase = 0 lowerCamelCase = -1 for j in range(len(UpperCAmelCase__ ) ): if chart[j][i] == 1: count += 1 lowerCamelCase = j if count == 1: lowerCamelCase = 1 for i in range(len(UpperCAmelCase__ ) ): if select[i] == 1: for j in range(len(chart[0] ) ): if chart[i][j] == 1: for k in range(len(UpperCAmelCase__ ) ): lowerCamelCase = 0 temp.append(prime_implicants[i] ) while True: lowerCamelCase = 0 lowerCamelCase = -1 lowerCamelCase = 0 for i in range(len(UpperCAmelCase__ ) ): lowerCamelCase = chart[i].count(1 ) if count_n > max_n: lowerCamelCase = count_n lowerCamelCase = i if max_n == 0: return temp temp.append(prime_implicants[rem] ) for i in range(len(chart[0] ) ): if chart[rem][i] == 1: for j in range(len(UpperCAmelCase__ ) ): lowerCamelCase = 0 def __lowercase( UpperCAmelCase__ , UpperCAmelCase__ ): """simple docstring""" lowerCamelCase = [[0 for x in range(len(UpperCAmelCase__ ) )] for x in range(len(UpperCAmelCase__ ) )] for i in range(len(UpperCAmelCase__ ) ): lowerCamelCase = prime_implicants[i].count("_" ) for j in range(len(UpperCAmelCase__ ) ): if is_for_table(prime_implicants[i] , binary[j] , UpperCAmelCase__ ): lowerCamelCase = 1 return chart def __lowercase( ): """simple docstring""" lowerCamelCase = int(input("Enter the no. of variables\n" ) ) lowerCamelCase = [ float(UpperCAmelCase__ ) for x in input( "Enter the decimal representation of Minterms 'Spaces Separated'\n" ).split() ] lowerCamelCase = decimal_to_binary(UpperCAmelCase__ , UpperCAmelCase__ ) lowerCamelCase = check(UpperCAmelCase__ ) print("Prime Implicants are:" ) print(UpperCAmelCase__ ) lowerCamelCase = prime_implicant_chart(UpperCAmelCase__ , UpperCAmelCase__ ) lowerCamelCase = selection(UpperCAmelCase__ , UpperCAmelCase__ ) print("Essential Prime Implicants are:" ) print(UpperCAmelCase__ ) if __name__ == "__main__": import doctest doctest.testmod() main()
623
import gc import importlib.metadata import tempfile import unittest from packaging import version from transformers import ( AutoModel, AutoModelForCausalLM, AutoModelForSeqaSeqLM, AutoModelForSequenceClassification, AutoTokenizer, BitsAndBytesConfig, pipeline, ) from transformers.testing_utils import ( is_torch_available, require_accelerate, require_bitsandbytes, require_torch, require_torch_gpu, require_torch_multi_gpu, slow, ) def _UpperCAmelCase ( a : Optional[int] ): if model.config.model_type == "gpt2": return model.transformer.h[0].mlp.c_fc return model.transformer.h[0].mlp.dense_ah_to_h if is_torch_available(): import torch import torch.nn as nn class _lowerCAmelCase ( nn.Module ): """simple docstring""" def __init__( self : int , UpperCamelCase__ : nn.Module , UpperCamelCase__ : int): '''simple docstring''' super().__init__() snake_case__ = module snake_case__ = nn.Sequential( nn.Linear(module.in_features , UpperCamelCase__ , bias=UpperCamelCase__) , nn.Linear(UpperCamelCase__ , module.out_features , bias=UpperCamelCase__) , ) snake_case__ = (2.0 / (5 * min(module.in_features , module.out_features))) ** 0.5 nn.init.normal_(self.adapter[0].weight , std=UpperCamelCase__) nn.init.zeros_(self.adapter[1].weight) self.adapter.to(module.weight.device) def __magic_name__ ( self : Tuple , UpperCamelCase__ : int , *UpperCamelCase__ : Dict , **UpperCamelCase__ : str): '''simple docstring''' return self.module(UpperCamelCase__ , *UpperCamelCase__ , **UpperCamelCase__) + self.adapter(UpperCamelCase__) @require_bitsandbytes @require_accelerate @require_torch @require_torch_gpu @slow class _lowerCAmelCase ( unittest.TestCase ): """simple docstring""" _lowercase : Dict = '''bigscience/bloom-1b7''' # Constant values _lowercase : Any = 2.109_6595_5269_2574 _lowercase : Tuple = '''Hello my name is''' _lowercase : List[Any] = set() EXPECTED_OUTPUTS.add('''Hello my name is John and I am a professional photographer. I''' ) EXPECTED_OUTPUTS.add('''Hello my name is John.\nI am a friend of your father.\n''' ) EXPECTED_OUTPUTS.add('''Hello my name is John Doe, I am a student at the University''' ) _lowercase : List[str] = 10 def __magic_name__ ( self : Optional[int]): '''simple docstring''' snake_case__ = AutoTokenizer.from_pretrained(self.model_name) class _lowerCAmelCase ( lowercase_ ): """simple docstring""" def __magic_name__ ( self : str): '''simple docstring''' super().setUp() # Models and tokenizer snake_case__ = AutoModelForCausalLM.from_pretrained( self.model_name , torch_dtype=torch.floataa , device_map="""auto""") snake_case__ = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=UpperCamelCase__ , device_map="""auto""") def __magic_name__ ( self : Tuple): '''simple docstring''' del self.model_fpaa del self.model_abit gc.collect() torch.cuda.empty_cache() def __magic_name__ ( self : str): '''simple docstring''' snake_case__ = self.model_abit.config self.assertTrue(hasattr(UpperCamelCase__ , """quantization_config""")) snake_case__ = config.to_dict() snake_case__ = config.to_diff_dict() snake_case__ = config.to_json_string() def __magic_name__ ( self : Dict): '''simple docstring''' from bitsandbytes.nn import Paramsabit snake_case__ = self.model_fpaa.get_memory_footprint() snake_case__ = self.model_abit.get_memory_footprint() self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE) snake_case__ = get_some_linear_layer(self.model_abit) self.assertTrue(linear.weight.__class__ == Paramsabit) def __magic_name__ ( self : Optional[int]): '''simple docstring''' from transformers import TaPreTrainedModel self.model_fpaa.get_memory_footprint() self.model_abit.get_memory_footprint() for name, module in self.model_abit.named_modules(): if isinstance(UpperCamelCase__ , torch.nn.Linear): if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules: # 4-bit parameters are packed in uint8 variables self.assertTrue(module.weight.dtype == torch.uinta) def __magic_name__ ( self : Dict): '''simple docstring''' snake_case__ = self.tokenizer(self.input_text , return_tensors="""pt""") snake_case__ = self.model_abit.generate(input_ids=encoded_input["""input_ids"""].to(0) , max_new_tokens=1_0) self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=UpperCamelCase__) , self.EXPECTED_OUTPUTS) def __magic_name__ ( self : str): '''simple docstring''' snake_case__ = BitsAndBytesConfig() snake_case__ = True snake_case__ = AutoModelForCausalLM.from_pretrained( self.model_name , quantization_config=UpperCamelCase__ , device_map="""auto""") snake_case__ = self.tokenizer(self.input_text , return_tensors="""pt""") snake_case__ = model_abit_from_config.generate( input_ids=encoded_input["""input_ids"""].to(0) , max_new_tokens=1_0) self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=UpperCamelCase__) , self.EXPECTED_OUTPUTS) def __magic_name__ ( self : Optional[int]): '''simple docstring''' with self.assertRaises(UpperCamelCase__), tempfile.TemporaryDirectory() as tmpdirname: self.model_abit.save_pretrained(UpperCamelCase__) def __magic_name__ ( self : List[str]): '''simple docstring''' snake_case__ = BitsAndBytesConfig() with self.assertRaises(UpperCamelCase__): snake_case__ = AutoModelForCausalLM.from_pretrained( self.model_name , quantization_config=UpperCamelCase__ , load_in_abit=UpperCamelCase__ , device_map="""auto""" , bnb_abit_quant_type="""nf4""" , ) def __magic_name__ ( self : List[Any]): '''simple docstring''' with self.assertRaises(UpperCamelCase__): # Tries with `str` self.model_abit.to("""cpu""") with self.assertRaises(UpperCamelCase__): # Tries with a `dtype`` self.model_abit.to(torch.floataa) with self.assertRaises(UpperCamelCase__): # Tries with a `device` self.model_abit.to(torch.device("""cuda:0""")) with self.assertRaises(UpperCamelCase__): # Tries with a `device` self.model_abit.float() with self.assertRaises(UpperCamelCase__): # Tries with a `device` self.model_abit.half() # Test if we did not break anything snake_case__ = self.tokenizer(self.input_text , return_tensors="""pt""") snake_case__ = self.model_fpaa.to(torch.floataa) snake_case__ = self.model_fpaa.generate(input_ids=encoded_input["""input_ids"""].to(0) , max_new_tokens=1_0) # Check this does not throw an error snake_case__ = self.model_fpaa.to("""cpu""") # Check this does not throw an error snake_case__ = self.model_fpaa.half() # Check this does not throw an error snake_case__ = self.model_fpaa.float() def __magic_name__ ( self : Dict): '''simple docstring''' snake_case__ = AutoModelForSeqaSeqLM.from_pretrained("""t5-small""" , load_in_abit=UpperCamelCase__ , device_map="""auto""") self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa) @require_bitsandbytes @require_accelerate @require_torch @require_torch_gpu @slow class _lowerCAmelCase ( unittest.TestCase ): """simple docstring""" @classmethod def __magic_name__ ( cls : Optional[Any]): '''simple docstring''' snake_case__ = """t5-small""" snake_case__ = """google/flan-t5-small""" # flan-t5 uses dense-act instead of dense-relu-dense snake_case__ = AutoTokenizer.from_pretrained(cls.model_name) snake_case__ = """Translate in German: Hello, my dog is cute""" def __magic_name__ ( self : Optional[int]): '''simple docstring''' gc.collect() torch.cuda.empty_cache() def __magic_name__ ( self : Any): '''simple docstring''' from transformers import TaForConditionalGeneration snake_case__ = TaForConditionalGeneration._keep_in_fpaa_modules snake_case__ = None # test with `t5-small` snake_case__ = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=UpperCamelCase__ , device_map="""auto""") snake_case__ = self.tokenizer(self.input_text , return_tensors="""pt""").to(0) snake_case__ = model.generate(**UpperCamelCase__) # test with `flan-t5-small` snake_case__ = TaForConditionalGeneration.from_pretrained( self.dense_act_model_name , load_in_abit=UpperCamelCase__ , device_map="""auto""") snake_case__ = self.tokenizer(self.input_text , return_tensors="""pt""").to(0) snake_case__ = model.generate(**UpperCamelCase__) snake_case__ = modules def __magic_name__ ( self : Union[str, Any]): '''simple docstring''' import bitsandbytes as bnb from transformers import TaForConditionalGeneration # test with `t5-small` snake_case__ = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=UpperCamelCase__ , device_map="""auto""") # there was a bug with decoders - this test checks that it is fixed self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit)) snake_case__ = self.tokenizer(self.input_text , return_tensors="""pt""").to(0) snake_case__ = model.generate(**UpperCamelCase__) # test with `flan-t5-small` snake_case__ = TaForConditionalGeneration.from_pretrained( self.dense_act_model_name , load_in_abit=UpperCamelCase__ , device_map="""auto""") snake_case__ = self.tokenizer(self.input_text , return_tensors="""pt""").to(0) snake_case__ = model.generate(**UpperCamelCase__) class _lowerCAmelCase ( lowercase_ ): """simple docstring""" def __magic_name__ ( self : int): '''simple docstring''' super().setUp() # model_name snake_case__ = """bigscience/bloom-560m""" snake_case__ = """t5-small""" # Different types of model snake_case__ = AutoModel.from_pretrained(self.model_name , load_in_abit=UpperCamelCase__ , device_map="""auto""") # Sequence classification model snake_case__ = AutoModelForSequenceClassification.from_pretrained( self.model_name , load_in_abit=UpperCamelCase__ , device_map="""auto""") # CausalLM model snake_case__ = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=UpperCamelCase__ , device_map="""auto""") # Seq2seq model snake_case__ = AutoModelForSeqaSeqLM.from_pretrained( self.seq_to_seq_name , load_in_abit=UpperCamelCase__ , device_map="""auto""") def __magic_name__ ( self : List[str]): '''simple docstring''' del self.base_model del self.sequence_model del self.model_abit del self.seq_to_seq_model gc.collect() torch.cuda.empty_cache() def __magic_name__ ( self : Union[str, Any]): '''simple docstring''' from bitsandbytes.nn import Paramsabit self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit) # Other heads should be nn.Parameter self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter) self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter) self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter) class _lowerCAmelCase ( lowercase_ ): """simple docstring""" def __magic_name__ ( self : Tuple): '''simple docstring''' super().setUp() def __magic_name__ ( self : int): '''simple docstring''' del self.pipe gc.collect() torch.cuda.empty_cache() def __magic_name__ ( self : Tuple): '''simple docstring''' snake_case__ = pipeline( """text-generation""" , model=self.model_name , model_kwargs={"""device_map""": """auto""", """load_in_4bit""": True, """torch_dtype""": torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , ) # Real second forward pass snake_case__ = self.pipe(self.input_text) self.assertIn(pipeline_output[0]["""generated_text"""] , self.EXPECTED_OUTPUTS) @require_torch_multi_gpu class _lowerCAmelCase ( lowercase_ ): """simple docstring""" def __magic_name__ ( self : Union[str, Any]): '''simple docstring''' super().setUp() def __magic_name__ ( self : int): '''simple docstring''' snake_case__ = AutoModelForCausalLM.from_pretrained( self.model_name , load_in_abit=UpperCamelCase__ , device_map="""balanced""") # Check correct device map self.assertEqual(set(model_parallel.hf_device_map.values()) , {0, 1}) # Check that inference pass works on the model snake_case__ = self.tokenizer(self.input_text , return_tensors="""pt""") # Second real batch snake_case__ = model_parallel.generate(input_ids=encoded_input["""input_ids"""].to(0) , max_new_tokens=1_0) self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=UpperCamelCase__) , self.EXPECTED_OUTPUTS) class _lowerCAmelCase ( lowercase_ ): """simple docstring""" def __magic_name__ ( self : Any): '''simple docstring''' snake_case__ = """facebook/opt-350m""" super().setUp() def __magic_name__ ( self : Any): '''simple docstring''' if version.parse(importlib.metadata.version("""bitsandbytes""")) < version.parse("""0.37.0"""): return # Step 1: freeze all parameters snake_case__ = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=UpperCamelCase__) self.assertEqual(set(model.hf_device_map.values()) , {torch.cuda.current_device()}) for param in model.parameters(): snake_case__ = False # freeze the model - train adapters later if param.ndim == 1: # cast the small parameters (e.g. layernorm) to fp32 for stability snake_case__ = param.data.to(torch.floataa) # Step 2: add adapters for _, module in model.named_modules(): if "OPTAttention" in repr(type(UpperCamelCase__)): snake_case__ = LoRALayer(module.q_proj , rank=1_6) snake_case__ = LoRALayer(module.k_proj , rank=1_6) snake_case__ = LoRALayer(module.v_proj , rank=1_6) # Step 3: dummy batch snake_case__ = self.tokenizer("""Test batch """ , return_tensors="""pt""").to(0) # Step 4: Check if the gradient is not None with torch.cuda.amp.autocast(): snake_case__ = model.forward(**UpperCamelCase__) out.logits.norm().backward() for module in model.modules(): if isinstance(UpperCamelCase__ , UpperCamelCase__): self.assertTrue(module.adapter[1].weight.grad is not None) self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0) elif isinstance(UpperCamelCase__ , nn.Embedding): self.assertTrue(module.weight.grad is None) class _lowerCAmelCase ( lowercase_ ): """simple docstring""" _lowercase : List[Any] = '''gpt2-xl''' _lowercase : Any = 3.3191_8548_5415_2187
654
0
'''simple docstring''' import doctest import logging import os import unittest from pathlib import Path from typing import List, Union import transformers from transformers.testing_utils import require_tf, require_torch, slow UpperCamelCase_ = logging.getLogger() @unittest.skip('Temporarily disable the doc tests.' ) @require_torch @require_tf @slow class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): def lowerCamelCase_ ( self : Tuple , UpperCAmelCase__ : Path , UpperCAmelCase__ : Union[str, None] = None , UpperCAmelCase__ : Union[List[str], None] = None , UpperCAmelCase__ : Union[str, List[str], None] = None , UpperCAmelCase__ : bool = True , ): '''simple docstring''' lowercase : Optional[int] =[file for file in os.listdir(UpperCamelCase__ ) if os.path.isfile(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) )] if identifier is not None: lowercase : Tuple =[file for file in files if identifier in file] if n_identifier is not None: if isinstance(UpperCamelCase__ , UpperCamelCase__ ): for n_ in n_identifier: lowercase : List[Any] =[file for file in files if n_ not in file] else: lowercase : Dict =[file for file in files if n_identifier not in file] lowercase : str =ignore_files or [] ignore_files.append('''__init__.py''' ) lowercase : Tuple =[file for file in files if file not in ignore_files] for file in files: # Open all files print('''Testing''' , UpperCamelCase__ ) if only_modules: lowercase : List[str] =file.split('''.''' )[0] try: lowercase : Optional[Any] =getattr(UpperCamelCase__ , UpperCamelCase__ ) lowercase : List[Any] =doctest.DocTestSuite(UpperCamelCase__ ) lowercase : Any =unittest.TextTestRunner().run(UpperCamelCase__ ) self.assertIs(len(result.failures ) , 0 ) except AttributeError: logger.info(F'''{module_identifier} is not a module.''' ) else: lowercase : Any =doctest.testfile(str('''..''' / directory / file ) , optionflags=doctest.ELLIPSIS ) self.assertIs(result.failed , 0 ) def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' lowercase : Union[str, Any] =Path('''src/transformers''' ) lowercase : str ='''modeling''' lowercase : List[str] =[ '''modeling_ctrl.py''', '''modeling_tf_ctrl.py''', ] self.analyze_directory(UpperCamelCase__ , identifier=UpperCamelCase__ , ignore_files=UpperCamelCase__ ) def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' lowercase : List[str] =Path('''src/transformers''' ) lowercase : Any ='''tokenization''' self.analyze_directory(UpperCamelCase__ , identifier=UpperCamelCase__ ) def lowerCamelCase_ ( self : Dict ): '''simple docstring''' lowercase : int =Path('''src/transformers''' ) lowercase : Dict ='''configuration''' self.analyze_directory(UpperCamelCase__ , identifier=UpperCamelCase__ ) def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' lowercase : List[Any] =Path('''src/transformers''' ) lowercase : List[Any] =['''configuration''', '''modeling''', '''tokenization'''] self.analyze_directory(UpperCamelCase__ , n_identifier=UpperCamelCase__ ) def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' lowercase : List[Any] =Path('''docs/source''' ) lowercase : Tuple =['''favicon.ico'''] self.analyze_directory(UpperCamelCase__ , ignore_files=UpperCamelCase__ , only_modules=UpperCamelCase__ )
92
import glob import os import random from string import ascii_lowercase, digits import cva a__ = """""" a__ = """""" a__ = """""" a__ = 1 # (0 is vertical, 1 is horizontal) def _UpperCAmelCase ( ): snake_case__ , snake_case__ = get_dataset(a , a ) print("""Processing...""" ) snake_case__ , snake_case__ , snake_case__ = update_image_and_anno(a , a , a ) for index, image in enumerate(a ): # Get random string code: '7b7ad245cdff75241935e4dd860f3bad' snake_case__ = random_chars(32 ) snake_case__ = paths[index].split(os.sep )[-1].rsplit(""".""" , 1 )[0] snake_case__ = F'''{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}''' cva.imwrite(F'''/{file_root}.jpg''' , a , [cva.IMWRITE_JPEG_QUALITY, 85] ) print(F'''Success {index+1}/{len(a )} with {file_name}''' ) snake_case__ = [] for anno in new_annos[index]: snake_case__ = F'''{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}''' annos_list.append(a ) with open(F'''/{file_root}.txt''' , """w""" ) as outfile: outfile.write("""\n""".join(line for line in annos_list ) ) def _UpperCAmelCase ( a : str , a : str ): snake_case__ = [] snake_case__ = [] for label_file in glob.glob(os.path.join(a , """*.txt""" ) ): snake_case__ = label_file.split(os.sep )[-1].rsplit(""".""" , 1 )[0] with open(a ) as in_file: snake_case__ = in_file.readlines() snake_case__ = os.path.join(a , F'''{label_name}.jpg''' ) snake_case__ = [] for obj_list in obj_lists: snake_case__ = obj_list.rstrip("""\n""" ).split(""" """ ) boxes.append( [ int(obj[0] ), float(obj[1] ), float(obj[2] ), float(obj[3] ), float(obj[4] ), ] ) if not boxes: continue img_paths.append(a ) labels.append(a ) return img_paths, labels def _UpperCAmelCase ( a : list , a : list , a : int = 1 ): snake_case__ = [] snake_case__ = [] snake_case__ = [] for idx in range(len(a ) ): snake_case__ = [] snake_case__ = img_list[idx] path_list.append(a ) snake_case__ = anno_list[idx] snake_case__ = cva.imread(a ) if flip_type == 1: snake_case__ = cva.flip(a , a ) for bbox in img_annos: snake_case__ = 1 - bbox[1] new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] ) elif flip_type == 0: snake_case__ = cva.flip(a , a ) for bbox in img_annos: snake_case__ = 1 - bbox[2] new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] ) new_annos_lists.append(a ) new_imgs_list.append(a ) return new_imgs_list, new_annos_lists, path_list def _UpperCAmelCase ( a : int = 32 ): assert number_char > 1, "The number of character should greater than 1" snake_case__ = ascii_lowercase + digits return "".join(random.choice(a ) for _ in range(a ) ) if __name__ == "__main__": main() print("""DONE ✅""")
654
0
'''simple docstring''' import itertools import os import random import tempfile import unittest import numpy as np from transformers import TvltFeatureExtractor, is_datasets_available from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio from transformers.utils.import_utils import is_torch_available from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin if is_torch_available(): import torch if is_datasets_available(): from datasets import load_dataset __lowerCamelCase : Tuple = random.Random() def _a (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=1.0 , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None ): """simple docstring""" if rng is None: _UpperCamelCase =global_rng _UpperCamelCase =[] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values class UpperCAmelCase ( unittest.TestCase): """simple docstring""" def __init__( self : List[Any] , UpperCamelCase__ : int , UpperCamelCase__ : Optional[Any]=7 , UpperCamelCase__ : Optional[Any]=400 , UpperCamelCase__ : Dict=2000 , UpperCamelCase__ : str=2048 , UpperCamelCase__ : Optional[Any]=128 , UpperCamelCase__ : List[str]=1 , UpperCamelCase__ : str=512 , UpperCamelCase__ : Any=30 , UpperCamelCase__ : Tuple=4_4100 , ) -> Any: _UpperCamelCase =parent _UpperCamelCase =batch_size _UpperCamelCase =min_seq_length _UpperCamelCase =max_seq_length _UpperCamelCase =(self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) _UpperCamelCase =spectrogram_length _UpperCamelCase =feature_size _UpperCamelCase =num_audio_channels _UpperCamelCase =hop_length _UpperCamelCase =chunk_length _UpperCamelCase =sampling_rate def UpperCamelCase__ ( self : Tuple ) -> Dict: return { "spectrogram_length": self.spectrogram_length, "feature_size": self.feature_size, "num_audio_channels": self.num_audio_channels, "hop_length": self.hop_length, "chunk_length": self.chunk_length, "sampling_rate": self.sampling_rate, } def UpperCamelCase__ ( self : Optional[Any] , UpperCamelCase__ : Optional[int]=False , UpperCamelCase__ : Dict=False ) -> Any: def _flatten(UpperCamelCase__ : Dict ): return list(itertools.chain(*UpperCamelCase__ ) ) if equal_length: _UpperCamelCase =[floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )] else: # make sure that inputs increase in size _UpperCamelCase =[ floats_list((x, self.feature_size) ) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff ) ] if numpify: _UpperCamelCase =[np.asarray(UpperCamelCase__ ) for x in speech_inputs] return speech_inputs @require_torch @require_torchaudio class UpperCAmelCase ( lowercase_ , unittest.TestCase): """simple docstring""" lowerCAmelCase_ = TvltFeatureExtractor def UpperCamelCase__ ( self : List[str] ) -> Dict: _UpperCamelCase =TvltFeatureExtractionTester(self ) def UpperCamelCase__ ( self : Optional[int] ) -> str: _UpperCamelCase =self.feature_extraction_class(**self.feat_extract_dict ) self.assertTrue(hasattr(UpperCamelCase__ , '''spectrogram_length''' ) ) self.assertTrue(hasattr(UpperCamelCase__ , '''feature_size''' ) ) self.assertTrue(hasattr(UpperCamelCase__ , '''num_audio_channels''' ) ) self.assertTrue(hasattr(UpperCamelCase__ , '''hop_length''' ) ) self.assertTrue(hasattr(UpperCamelCase__ , '''chunk_length''' ) ) self.assertTrue(hasattr(UpperCamelCase__ , '''sampling_rate''' ) ) def UpperCamelCase__ ( self : int ) -> int: _UpperCamelCase =self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: _UpperCamelCase =feat_extract_first.save_pretrained(UpperCamelCase__ )[0] check_json_file_has_correct_format(UpperCamelCase__ ) _UpperCamelCase =self.feature_extraction_class.from_pretrained(UpperCamelCase__ ) _UpperCamelCase =feat_extract_first.to_dict() _UpperCamelCase =feat_extract_second.to_dict() _UpperCamelCase =dict_first.pop('''mel_filters''' ) _UpperCamelCase =dict_second.pop('''mel_filters''' ) self.assertTrue(np.allclose(UpperCamelCase__ , UpperCamelCase__ ) ) self.assertEqual(UpperCamelCase__ , UpperCamelCase__ ) def UpperCamelCase__ ( self : int ) -> Dict: _UpperCamelCase =self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: _UpperCamelCase =os.path.join(UpperCamelCase__ , '''feat_extract.json''' ) feat_extract_first.to_json_file(UpperCamelCase__ ) _UpperCamelCase =self.feature_extraction_class.from_json_file(UpperCamelCase__ ) _UpperCamelCase =feat_extract_first.to_dict() _UpperCamelCase =feat_extract_second.to_dict() _UpperCamelCase =dict_first.pop('''mel_filters''' ) _UpperCamelCase =dict_second.pop('''mel_filters''' ) self.assertTrue(np.allclose(UpperCamelCase__ , UpperCamelCase__ ) ) self.assertEqual(UpperCamelCase__ , UpperCamelCase__ ) def UpperCamelCase__ ( self : int ) -> List[str]: _UpperCamelCase =self.feature_extraction_class(**self.feat_extract_dict ) # create three inputs of length 800, 1000, and 1200 _UpperCamelCase =[floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] _UpperCamelCase =[np.asarray(UpperCamelCase__ ) for speech_input in speech_inputs] # Test not batched input _UpperCamelCase =feature_extractor(np_speech_inputs[0] , return_tensors='''np''' , sampling_rate=4_4100 ).audio_values self.assertTrue(encoded_audios.ndim == 4 ) self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size ) self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length ) self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels ) # Test batched _UpperCamelCase =feature_extractor(UpperCamelCase__ , return_tensors='''np''' , sampling_rate=4_4100 ).audio_values self.assertTrue(encoded_audios.ndim == 4 ) self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size ) self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length ) self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels ) # Test audio masking _UpperCamelCase =feature_extractor( UpperCamelCase__ , return_tensors='''np''' , sampling_rate=4_4100 , mask_audio=UpperCamelCase__ ).audio_values self.assertTrue(encoded_audios.ndim == 4 ) self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size ) self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length ) self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels ) # Test 2-D numpy arrays are batched. _UpperCamelCase =[floats_list((1, x) )[0] for x in (800, 800, 800)] _UpperCamelCase =np.asarray(UpperCamelCase__ ) _UpperCamelCase =feature_extractor(UpperCamelCase__ , return_tensors='''np''' , sampling_rate=4_4100 ).audio_values self.assertTrue(encoded_audios.ndim == 4 ) self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size ) self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length ) self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels ) def UpperCamelCase__ ( self : Optional[Any] , UpperCamelCase__ : Dict ) -> Dict: _UpperCamelCase =load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' ) # automatic decoding with librispeech _UpperCamelCase =ds.sort('''id''' ).select(range(UpperCamelCase__ ) )[:num_samples]['''audio'''] return [x["array"] for x in speech_samples] def UpperCamelCase__ ( self : Optional[Any] ) -> str: _UpperCamelCase =self._load_datasamples(1 ) _UpperCamelCase =TvltFeatureExtractor() _UpperCamelCase =feature_extractor(UpperCamelCase__ , return_tensors='''pt''' ).audio_values self.assertEquals(audio_values.shape , (1, 1, 192, 128) ) _UpperCamelCase =torch.tensor([[-0.3032, -0.2708], [-0.4434, -0.4007]] ) self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , UpperCamelCase__ , atol=1E-4 ) )
404
import json import os import tempfile import transformers import datasets from utils import generate_example_dataset, get_duration a__ = 5_0_0_0_0_0 a__ , a__ = os.path.split(__file__) a__ = os.path.join(RESULTS_BASEPATH, """results""", RESULTS_FILENAME.replace(""".py""", """.json""")) @get_duration def _UpperCAmelCase ( a : datasets.Dataset , **a : Tuple ): snake_case__ = dataset.map(**a ) @get_duration def _UpperCAmelCase ( a : datasets.Dataset , **a : Optional[Any] ): snake_case__ = dataset.filter(**a ) def _UpperCAmelCase ( ): snake_case__ = {"""num examples""": SPEED_TEST_N_EXAMPLES} with tempfile.TemporaryDirectory() as tmp_dir: snake_case__ = datasets.Features({"""text""": datasets.Value("""string""" ), """numbers""": datasets.Value("""float32""" )} ) snake_case__ = generate_example_dataset( os.path.join(a , """dataset.arrow""" ) , a , num_examples=a ) snake_case__ = transformers.AutoTokenizer.from_pretrained("""bert-base-cased""" , use_fast=a ) def tokenize(a : Union[str, Any] ): return tokenizer(examples["""text"""] ) snake_case__ = map(a ) snake_case__ = map(a , batched=a ) snake_case__ = map(a , function=lambda a : None , batched=a ) with dataset.formatted_as(type="""numpy""" ): snake_case__ = map(a , function=lambda a : None , batched=a ) with dataset.formatted_as(type="""pandas""" ): snake_case__ = map(a , function=lambda a : None , batched=a ) with dataset.formatted_as(type="""torch""" , columns="""numbers""" ): snake_case__ = map(a , function=lambda a : None , batched=a ) with dataset.formatted_as(type="""tensorflow""" , columns="""numbers""" ): snake_case__ = map(a , function=lambda a : None , batched=a ) snake_case__ = map(a , function=a , batched=a ) snake_case__ = filter(a ) # Activate later when tokenizer support batched inputs # with dataset.formatted_as(type='numpy'): # times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True) with open(a , """wb""" ) as f: f.write(json.dumps(a ).encode("""utf-8""" ) ) if __name__ == "__main__": # useful to run the profiler benchmark_map_filter()
654
0
import glob import os import random from string import ascii_lowercase, digits import cva __snake_case : List[Any] ='' __snake_case : Tuple ='' __snake_case : List[Any] ='' __snake_case : str =1 # (0 is vertical, 1 is horizontal) def lowerCAmelCase__ ( ): '''simple docstring''' lowerCAmelCase__ , lowerCAmelCase__ : Tuple = get_dataset(lowerCamelCase_ ,lowerCamelCase_) print('''Processing...''') lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : int = update_image_and_anno(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_) for index, image in enumerate(lowerCamelCase_): # Get random string code: '7b7ad245cdff75241935e4dd860f3bad' lowerCAmelCase__ : Optional[int] = random_chars(32) lowerCAmelCase__ : Tuple = paths[index].split(os.sep)[-1].rsplit('''.''' ,1)[0] lowerCAmelCase__ : List[str] = f"""{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}""" cva.imwrite(f"""/{file_root}.jpg""" ,lowerCamelCase_ ,[cva.IMWRITE_JPEG_QUALITY, 85]) print(f"""Success {index+1}/{len(lowerCamelCase_)} with {file_name}""") lowerCAmelCase__ : int = [] for anno in new_annos[index]: lowerCAmelCase__ : Tuple = f"""{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}""" annos_list.append(lowerCamelCase_) with open(f"""/{file_root}.txt""" ,'''w''') as outfile: outfile.write('''\n'''.join(line for line in annos_list)) def lowerCAmelCase__ ( lowerCamelCase_ : str ,lowerCamelCase_ : str): '''simple docstring''' lowerCAmelCase__ : List[str] = [] lowerCAmelCase__ : Tuple = [] for label_file in glob.glob(os.path.join(lowerCamelCase_ ,'''*.txt''')): lowerCAmelCase__ : Union[str, Any] = label_file.split(os.sep)[-1].rsplit('''.''' ,1)[0] with open(lowerCamelCase_) as in_file: lowerCAmelCase__ : str = in_file.readlines() lowerCAmelCase__ : List[str] = os.path.join(lowerCamelCase_ ,f"""{label_name}.jpg""") lowerCAmelCase__ : List[str] = [] for obj_list in obj_lists: lowerCAmelCase__ : Any = obj_list.rstrip('''\n''').split(''' ''') boxes.append( [ int(obj[0]), float(obj[1]), float(obj[2]), float(obj[3]), float(obj[4]), ]) if not boxes: continue img_paths.append(lowerCamelCase_) labels.append(lowerCamelCase_) return img_paths, labels def lowerCAmelCase__ ( lowerCamelCase_ : list ,lowerCamelCase_ : list ,lowerCamelCase_ : int = 1): '''simple docstring''' lowerCAmelCase__ : str = [] lowerCAmelCase__ : List[Any] = [] lowerCAmelCase__ : Optional[int] = [] for idx in range(len(lowerCamelCase_)): lowerCAmelCase__ : Optional[Any] = [] lowerCAmelCase__ : str = img_list[idx] path_list.append(lowerCamelCase_) lowerCAmelCase__ : Union[str, Any] = anno_list[idx] lowerCAmelCase__ : List[str] = cva.imread(lowerCamelCase_) if flip_type == 1: lowerCAmelCase__ : Union[str, Any] = cva.flip(lowerCamelCase_ ,lowerCamelCase_) for bbox in img_annos: lowerCAmelCase__ : Dict = 1 - bbox[1] new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]]) elif flip_type == 0: lowerCAmelCase__ : int = cva.flip(lowerCamelCase_ ,lowerCamelCase_) for bbox in img_annos: lowerCAmelCase__ : Optional[Any] = 1 - bbox[2] new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]]) new_annos_lists.append(lowerCamelCase_) new_imgs_list.append(lowerCamelCase_) return new_imgs_list, new_annos_lists, path_list def lowerCAmelCase__ ( lowerCamelCase_ : int = 32): '''simple docstring''' assert number_char > 1, "The number of character should greater than 1" lowerCAmelCase__ : Optional[int] = ascii_lowercase + digits return "".join(random.choice(lowerCamelCase_) for _ in range(lowerCamelCase_)) if __name__ == "__main__": main() print('DONE ✅')
647
import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor from transformers.utils import logging logging.set_verbosity_info() a__ = logging.get_logger(__name__) def _UpperCAmelCase ( a : List[str] , a : Any=False ): snake_case__ = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((F'''blocks.{i}.norm1.weight''', F'''deit.encoder.layer.{i}.layernorm_before.weight''') ) rename_keys.append((F'''blocks.{i}.norm1.bias''', F'''deit.encoder.layer.{i}.layernorm_before.bias''') ) rename_keys.append((F'''blocks.{i}.attn.proj.weight''', F'''deit.encoder.layer.{i}.attention.output.dense.weight''') ) rename_keys.append((F'''blocks.{i}.attn.proj.bias''', F'''deit.encoder.layer.{i}.attention.output.dense.bias''') ) rename_keys.append((F'''blocks.{i}.norm2.weight''', F'''deit.encoder.layer.{i}.layernorm_after.weight''') ) rename_keys.append((F'''blocks.{i}.norm2.bias''', F'''deit.encoder.layer.{i}.layernorm_after.bias''') ) rename_keys.append((F'''blocks.{i}.mlp.fc1.weight''', F'''deit.encoder.layer.{i}.intermediate.dense.weight''') ) rename_keys.append((F'''blocks.{i}.mlp.fc1.bias''', F'''deit.encoder.layer.{i}.intermediate.dense.bias''') ) rename_keys.append((F'''blocks.{i}.mlp.fc2.weight''', F'''deit.encoder.layer.{i}.output.dense.weight''') ) rename_keys.append((F'''blocks.{i}.mlp.fc2.bias''', F'''deit.encoder.layer.{i}.output.dense.bias''') ) # projection layer + position embeddings rename_keys.extend( [ ("""cls_token""", """deit.embeddings.cls_token"""), ("""dist_token""", """deit.embeddings.distillation_token"""), ("""patch_embed.proj.weight""", """deit.embeddings.patch_embeddings.projection.weight"""), ("""patch_embed.proj.bias""", """deit.embeddings.patch_embeddings.projection.bias"""), ("""pos_embed""", """deit.embeddings.position_embeddings"""), ] ) if base_model: # layernorm + pooler rename_keys.extend( [ ("""norm.weight""", """layernorm.weight"""), ("""norm.bias""", """layernorm.bias"""), ("""pre_logits.fc.weight""", """pooler.dense.weight"""), ("""pre_logits.fc.bias""", """pooler.dense.bias"""), ] ) # if just the base model, we should remove "deit" from all keys that start with "deit" snake_case__ = [(pair[0], pair[1][4:]) if pair[1].startswith("""deit""" ) else pair for pair in rename_keys] else: # layernorm + classification heads rename_keys.extend( [ ("""norm.weight""", """deit.layernorm.weight"""), ("""norm.bias""", """deit.layernorm.bias"""), ("""head.weight""", """cls_classifier.weight"""), ("""head.bias""", """cls_classifier.bias"""), ("""head_dist.weight""", """distillation_classifier.weight"""), ("""head_dist.bias""", """distillation_classifier.bias"""), ] ) return rename_keys def _UpperCAmelCase ( a : int , a : List[Any] , a : Union[str, Any]=False ): for i in range(config.num_hidden_layers ): if base_model: snake_case__ = """""" else: snake_case__ = """deit.""" # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) snake_case__ = state_dict.pop(F'''blocks.{i}.attn.qkv.weight''' ) snake_case__ = state_dict.pop(F'''blocks.{i}.attn.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict snake_case__ = in_proj_weight[ : config.hidden_size, : ] snake_case__ = in_proj_bias[: config.hidden_size] snake_case__ = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] snake_case__ = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] snake_case__ = in_proj_weight[ -config.hidden_size :, : ] snake_case__ = in_proj_bias[-config.hidden_size :] def _UpperCAmelCase ( a : Dict , a : Union[str, Any] , a : int ): snake_case__ = dct.pop(a ) snake_case__ = val def _UpperCAmelCase ( ): snake_case__ = """http://images.cocodataset.org/val2017/000000039769.jpg""" snake_case__ = Image.open(requests.get(a , stream=a ).raw ) return im @torch.no_grad() def _UpperCAmelCase ( a : List[str] , a : Tuple ): snake_case__ = DeiTConfig() # all deit models have fine-tuned heads snake_case__ = False # dataset (fine-tuned on ImageNet 2012), patch_size and image_size snake_case__ = 1000 snake_case__ = """huggingface/label-files""" snake_case__ = """imagenet-1k-id2label.json""" snake_case__ = json.load(open(hf_hub_download(a , a , repo_type="""dataset""" ) , """r""" ) ) snake_case__ = {int(a ): v for k, v in idalabel.items()} snake_case__ = idalabel snake_case__ = {v: k for k, v in idalabel.items()} snake_case__ = int(deit_name[-6:-4] ) snake_case__ = int(deit_name[-3:] ) # size of the architecture if deit_name[9:].startswith("""tiny""" ): snake_case__ = 192 snake_case__ = 768 snake_case__ = 12 snake_case__ = 3 elif deit_name[9:].startswith("""small""" ): snake_case__ = 384 snake_case__ = 1536 snake_case__ = 12 snake_case__ = 6 if deit_name[9:].startswith("""base""" ): pass elif deit_name[4:].startswith("""large""" ): snake_case__ = 1024 snake_case__ = 4096 snake_case__ = 24 snake_case__ = 16 # load original model from timm snake_case__ = timm.create_model(a , pretrained=a ) timm_model.eval() # load state_dict of original model, remove and rename some keys snake_case__ = timm_model.state_dict() snake_case__ = create_rename_keys(a , a ) for src, dest in rename_keys: rename_key(a , a , a ) read_in_q_k_v(a , a , a ) # load HuggingFace model snake_case__ = DeiTForImageClassificationWithTeacher(a ).eval() model.load_state_dict(a ) # Check outputs on an image, prepared by DeiTImageProcessor snake_case__ = int( (256 / 224) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103 snake_case__ = DeiTImageProcessor(size=a , crop_size=config.image_size ) snake_case__ = image_processor(images=prepare_img() , return_tensors="""pt""" ) snake_case__ = encoding["""pixel_values"""] snake_case__ = model(a ) snake_case__ = timm_model(a ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(a , outputs.logits , atol=1e-3 ) Path(a ).mkdir(exist_ok=a ) print(F'''Saving model {deit_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(a ) print(F'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(a ) if __name__ == "__main__": a__ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--deit_name""", default="""vit_deit_base_distilled_patch16_224""", type=str, help="""Name of the DeiT timm model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) a__ = parser.parse_args() convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
654
0
import os from typing import Optional import fsspec from fsspec.archive import AbstractArchiveFileSystem from fsspec.utils import DEFAULT_BLOCK_SIZE class a_ ( lowercase_ ): A = '''''' A = ( None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz ) A = None # compression type in fsspec. ex: "gzip" A = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz def __init__( self , SCREAMING_SNAKE_CASE = "" , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE ) -> Optional[int]: """simple docstring""" super().__init__(self , **UpperCamelCase__ ) # always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode SCREAMING_SNAKE_CASE_ = fsspec.open( UpperCamelCase__ , mode='rb' , protocol=UpperCamelCase__ , compression=self.compression , client_kwargs={ 'requote_redirect_url': False, # see https://github.com/huggingface/datasets/pull/5459 'trust_env': True, # Enable reading proxy env variables. **(target_options or {}).pop('client_kwargs' , {} ), # To avoid issues if it was already passed. } , **(target_options or {}) , ) SCREAMING_SNAKE_CASE_ = os.path.basename(self.file.path.split('::' )[0] ) SCREAMING_SNAKE_CASE_ = ( self.compressed_name[: self.compressed_name.rindex('.' )] if '.' in self.compressed_name else self.compressed_name ) SCREAMING_SNAKE_CASE_ = None @classmethod def A_( cls , SCREAMING_SNAKE_CASE ) -> Tuple: """simple docstring""" return super()._strip_protocol(UpperCamelCase__ ).lstrip('/' ) def A_( self ) -> Optional[Any]: """simple docstring""" if self.dir_cache is None: SCREAMING_SNAKE_CASE_ = {**self.file.fs.info(self.file.path ), 'name': self.uncompressed_name} SCREAMING_SNAKE_CASE_ = {f['name']: f} def A_( self , SCREAMING_SNAKE_CASE ) -> Union[str, Any]: """simple docstring""" return self.file.open().read() def A_( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = "rb" , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=None , **SCREAMING_SNAKE_CASE , ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE_ = self._strip_protocol(UpperCamelCase__ ) if mode != "rb": raise ValueError(f'Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'' ) return self.file.open() class a_ ( lowercase_ ): A = '''bz2''' A = '''bz2''' A = '''.bz2''' class a_ ( lowercase_ ): A = '''gzip''' A = '''gzip''' A = '''.gz''' class a_ ( lowercase_ ): A = '''lz4''' A = '''lz4''' A = '''.lz4''' class a_ ( lowercase_ ): A = '''xz''' A = '''xz''' A = '''.xz''' class a_ ( lowercase_ ): A = '''zstd''' A = '''zstd''' A = '''.zst''' def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = "rb" , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = DEFAULT_BLOCK_SIZE , **SCREAMING_SNAKE_CASE , ) -> int: """simple docstring""" super().__init__( fo=UpperCamelCase__ , mode=UpperCamelCase__ , target_protocol=UpperCamelCase__ , target_options=UpperCamelCase__ , block_size=UpperCamelCase__ , **UpperCamelCase__ , ) # We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2: # # File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open # out.close = close # AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only # # see https://github.com/intake/filesystem_spec/issues/725 SCREAMING_SNAKE_CASE_ = self.file.__enter__ class a_ : def __init__( self , SCREAMING_SNAKE_CASE ) -> str: """simple docstring""" SCREAMING_SNAKE_CASE_ = file_ def __enter__( self ) -> Optional[int]: """simple docstring""" self._file.__enter__() return self def __exit__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Union[str, Any]: """simple docstring""" self._file.__exit__(*UpperCamelCase__ , **UpperCamelCase__ ) def __iter__( self ) -> Optional[int]: """simple docstring""" return iter(self._file ) def A_( self ) -> List[Any]: """simple docstring""" return next(self._file ) def __getattr__( self , SCREAMING_SNAKE_CASE ) -> Tuple: """simple docstring""" return getattr(self._file , UpperCamelCase__ ) def fixed_enter(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ): return WrappedFile(_enter(*UpperCamelCase__ , **UpperCamelCase__ ) ) SCREAMING_SNAKE_CASE_ = fixed_enter
205
from dataclasses import dataclass from typing import Dict, Optional, Union import torch import torch.nn.functional as F from torch import nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from .attention import BasicTransformerBlock from .attention_processor import AttentionProcessor, AttnProcessor from .embeddings import TimestepEmbedding, Timesteps from .modeling_utils import ModelMixin @dataclass class _lowerCAmelCase ( lowercase_ ): """simple docstring""" _lowercase : torch.FloatTensor class _lowerCAmelCase ( lowercase_ , lowercase_ ): """simple docstring""" @register_to_config def __init__( self : Tuple , UpperCamelCase__ : int = 3_2 , UpperCamelCase__ : int = 6_4 , UpperCamelCase__ : int = 2_0 , UpperCamelCase__ : int = 7_6_8 , UpperCamelCase__ : Optional[Any]=7_7 , UpperCamelCase__ : str=4 , UpperCamelCase__ : float = 0.0 , UpperCamelCase__ : str = "silu" , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : Optional[str] = "linear" , UpperCamelCase__ : Optional[str] = "prd" , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[int] = None , ): '''simple docstring''' super().__init__() snake_case__ = num_attention_heads snake_case__ = attention_head_dim snake_case__ = num_attention_heads * attention_head_dim snake_case__ = additional_embeddings snake_case__ = time_embed_dim or inner_dim snake_case__ = embedding_proj_dim or embedding_dim snake_case__ = clip_embed_dim or embedding_dim snake_case__ = Timesteps(UpperCamelCase__ , UpperCamelCase__ , 0) snake_case__ = TimestepEmbedding(UpperCamelCase__ , UpperCamelCase__ , out_dim=UpperCamelCase__ , act_fn=UpperCamelCase__) snake_case__ = nn.Linear(UpperCamelCase__ , UpperCamelCase__) if embedding_proj_norm_type is None: snake_case__ = None elif embedding_proj_norm_type == "layer": snake_case__ = nn.LayerNorm(UpperCamelCase__) else: raise ValueError(F'''unsupported embedding_proj_norm_type: {embedding_proj_norm_type}''') snake_case__ = nn.Linear(UpperCamelCase__ , UpperCamelCase__) if encoder_hid_proj_type is None: snake_case__ = None elif encoder_hid_proj_type == "linear": snake_case__ = nn.Linear(UpperCamelCase__ , UpperCamelCase__) else: raise ValueError(F'''unsupported encoder_hid_proj_type: {encoder_hid_proj_type}''') snake_case__ = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , UpperCamelCase__)) if added_emb_type == "prd": snake_case__ = nn.Parameter(torch.zeros(1 , 1 , UpperCamelCase__)) elif added_emb_type is None: snake_case__ = None else: raise ValueError( F'''`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `\'prd\'` or `None`.''') snake_case__ = nn.ModuleList( [ BasicTransformerBlock( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , dropout=UpperCamelCase__ , activation_fn="""gelu""" , attention_bias=UpperCamelCase__ , ) for d in range(UpperCamelCase__) ]) if norm_in_type == "layer": snake_case__ = nn.LayerNorm(UpperCamelCase__) elif norm_in_type is None: snake_case__ = None else: raise ValueError(F'''Unsupported norm_in_type: {norm_in_type}.''') snake_case__ = nn.LayerNorm(UpperCamelCase__) snake_case__ = nn.Linear(UpperCamelCase__ , UpperCamelCase__) snake_case__ = torch.full( [num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -1_00_00.0) causal_attention_mask.triu_(1) snake_case__ = causal_attention_mask[None, ...] self.register_buffer("""causal_attention_mask""" , UpperCamelCase__ , persistent=UpperCamelCase__) snake_case__ = nn.Parameter(torch.zeros(1 , UpperCamelCase__)) snake_case__ = nn.Parameter(torch.zeros(1 , UpperCamelCase__)) @property # Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors def __magic_name__ ( self : Optional[int]): '''simple docstring''' snake_case__ = {} def fn_recursive_add_processors(UpperCamelCase__ : str , UpperCamelCase__ : torch.nn.Module , UpperCamelCase__ : Dict[str, AttentionProcessor]): if hasattr(UpperCamelCase__ , """set_processor"""): snake_case__ = module.processor for sub_name, child in module.named_children(): fn_recursive_add_processors(F'''{name}.{sub_name}''' , UpperCamelCase__ , UpperCamelCase__) return processors for name, module in self.named_children(): fn_recursive_add_processors(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__) return processors def __magic_name__ ( self : Union[str, Any] , UpperCamelCase__ : Union[AttentionProcessor, Dict[str, AttentionProcessor]]): '''simple docstring''' snake_case__ = len(self.attn_processors.keys()) if isinstance(UpperCamelCase__ , UpperCamelCase__) and len(UpperCamelCase__) != count: raise ValueError( F'''A dict of processors was passed, but the number of processors {len(UpperCamelCase__)} does not match the''' F''' number of attention layers: {count}. Please make sure to pass {count} processor classes.''') def fn_recursive_attn_processor(UpperCamelCase__ : str , UpperCamelCase__ : torch.nn.Module , UpperCamelCase__ : Optional[int]): if hasattr(UpperCamelCase__ , """set_processor"""): if not isinstance(UpperCamelCase__ , UpperCamelCase__): module.set_processor(UpperCamelCase__) else: module.set_processor(processor.pop(F'''{name}.processor''')) for sub_name, child in module.named_children(): fn_recursive_attn_processor(F'''{name}.{sub_name}''' , UpperCamelCase__ , UpperCamelCase__) for name, module in self.named_children(): fn_recursive_attn_processor(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__) def __magic_name__ ( self : Dict): '''simple docstring''' self.set_attn_processor(AttnProcessor()) def __magic_name__ ( self : Union[str, Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : Union[torch.Tensor, float, int] , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : Optional[torch.FloatTensor] = None , UpperCamelCase__ : Optional[torch.BoolTensor] = None , UpperCamelCase__ : bool = True , ): '''simple docstring''' snake_case__ = hidden_states.shape[0] snake_case__ = timestep if not torch.is_tensor(UpperCamelCase__): snake_case__ = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device) elif torch.is_tensor(UpperCamelCase__) and len(timesteps.shape) == 0: snake_case__ = timesteps[None].to(hidden_states.device) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML snake_case__ = timesteps * torch.ones(UpperCamelCase__ , dtype=timesteps.dtype , device=timesteps.device) snake_case__ = self.time_proj(UpperCamelCase__) # timesteps does not contain any weights and will always return f32 tensors # but time_embedding might be fp16, so we need to cast here. snake_case__ = timesteps_projected.to(dtype=self.dtype) snake_case__ = self.time_embedding(UpperCamelCase__) if self.embedding_proj_norm is not None: snake_case__ = self.embedding_proj_norm(UpperCamelCase__) snake_case__ = self.embedding_proj(UpperCamelCase__) if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None: snake_case__ = self.encoder_hidden_states_proj(UpperCamelCase__) elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None: raise ValueError("""`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set""") snake_case__ = self.proj_in(UpperCamelCase__) snake_case__ = self.positional_embedding.to(hidden_states.dtype) snake_case__ = [] snake_case__ = 0 if encoder_hidden_states is not None: additional_embeds.append(UpperCamelCase__) additional_embeddings_len += encoder_hidden_states.shape[1] if len(proj_embeddings.shape) == 2: snake_case__ = proj_embeddings[:, None, :] if len(hidden_states.shape) == 2: snake_case__ = hidden_states[:, None, :] snake_case__ = additional_embeds + [ proj_embeddings, time_embeddings[:, None, :], hidden_states, ] if self.prd_embedding is not None: snake_case__ = self.prd_embedding.to(hidden_states.dtype).expand(UpperCamelCase__ , -1 , -1) additional_embeds.append(UpperCamelCase__) snake_case__ = torch.cat( UpperCamelCase__ , dim=1 , ) # Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens snake_case__ = additional_embeddings_len + proj_embeddings.shape[1] + 1 if positional_embeddings.shape[1] < hidden_states.shape[1]: snake_case__ = F.pad( UpperCamelCase__ , ( 0, 0, additional_embeddings_len, self.prd_embedding.shape[1] if self.prd_embedding is not None else 0, ) , value=0.0 , ) snake_case__ = hidden_states + positional_embeddings if attention_mask is not None: snake_case__ = (1 - attention_mask.to(hidden_states.dtype)) * -1_00_00.0 snake_case__ = F.pad(UpperCamelCase__ , (0, self.additional_embeddings) , value=0.0) snake_case__ = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype) snake_case__ = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0) if self.norm_in is not None: snake_case__ = self.norm_in(UpperCamelCase__) for block in self.transformer_blocks: snake_case__ = block(UpperCamelCase__ , attention_mask=UpperCamelCase__) snake_case__ = self.norm_out(UpperCamelCase__) if self.prd_embedding is not None: snake_case__ = hidden_states[:, -1] else: snake_case__ = hidden_states[:, additional_embeddings_len:] snake_case__ = self.proj_to_clip_embeddings(UpperCamelCase__) if not return_dict: return (predicted_image_embedding,) return PriorTransformerOutput(predicted_image_embedding=UpperCamelCase__) def __magic_name__ ( self : Any , UpperCamelCase__ : Any): '''simple docstring''' snake_case__ = (prior_latents * self.clip_std) + self.clip_mean return prior_latents
654
0
'''simple docstring''' def _A ( A__ , A__ ): """simple docstring""" if density <= 0: raise ValueError('''Impossible fluid density''' ) if bulk_modulus <= 0: raise ValueError('''Impossible bulk modulus''' ) return (bulk_modulus / density) ** 0.5 if __name__ == "__main__": import doctest doctest.testmod()
41
import unittest from pathlib import Path from tempfile import TemporaryDirectory from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available from transformers.models.gpta.tokenization_gpta import GPTaTokenizer from transformers.testing_utils import require_keras_nlp, require_tf, slow if is_tf_available(): import tensorflow as tf if is_keras_nlp_available(): from transformers.models.gpta import TFGPTaTokenizer a__ = ["""gpt2"""] a__ = """gpt2""" if is_tf_available(): class _lowerCAmelCase ( tf.Module ): """simple docstring""" def __init__( self : List[Any] , UpperCamelCase__ : int): '''simple docstring''' super().__init__() snake_case__ = tokenizer snake_case__ = AutoConfig.from_pretrained(UpperCamelCase__) snake_case__ = TFGPTaLMHeadModel.from_config(UpperCamelCase__) @tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name="""text"""),)) def __magic_name__ ( self : Tuple , UpperCamelCase__ : int): '''simple docstring''' snake_case__ = self.tokenizer(UpperCamelCase__) snake_case__ = tokenized["""input_ids"""].to_tensor() snake_case__ = tf.cast(input_ids_dense > 0 , tf.intaa) # input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN]) snake_case__ = self.model(input_ids=UpperCamelCase__ , attention_mask=UpperCamelCase__)["""logits"""] return outputs @require_tf @require_keras_nlp class _lowerCAmelCase ( unittest.TestCase ): """simple docstring""" def __magic_name__ ( self : List[Any]): '''simple docstring''' super().setUp() snake_case__ = [GPTaTokenizer.from_pretrained(UpperCamelCase__) for checkpoint in (TOKENIZER_CHECKPOINTS)] snake_case__ = [TFGPTaTokenizer.from_pretrained(UpperCamelCase__) for checkpoint in TOKENIZER_CHECKPOINTS] assert len(self.tokenizers) == len(self.tf_tokenizers) snake_case__ = [ """This is a straightforward English test sentence.""", """This one has some weird characters\rto\nsee\r\nif those\u00E9break things.""", """Now we're going to add some Chinese: 一 二 三 一二三""", """And some much more rare Chinese: 齉 堃 齉堃""", """Je vais aussi écrire en français pour tester les accents""", """Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ""", ] snake_case__ = list(zip(self.test_sentences , self.test_sentences[::-1])) def __magic_name__ ( self : Optional[int]): '''simple docstring''' for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers): for test_inputs in self.test_sentences: snake_case__ = tokenizer([test_inputs] , return_tensors="""tf""") snake_case__ = tf_tokenizer([test_inputs]) for key in python_outputs.keys(): # convert them to numpy to avoid messing with ragged tensors snake_case__ = python_outputs[key].numpy() snake_case__ = tf_outputs[key].numpy() self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape)) self.assertTrue(tf.reduce_all(tf.cast(UpperCamelCase__ , tf.intaa) == tf_outputs_values)) @slow def __magic_name__ ( self : Optional[int]): '''simple docstring''' for tf_tokenizer in self.tf_tokenizers: snake_case__ = tf.function(UpperCamelCase__) for test_inputs in self.test_sentences: snake_case__ = tf.constant(UpperCamelCase__) snake_case__ = compiled_tokenizer(UpperCamelCase__) snake_case__ = tf_tokenizer(UpperCamelCase__) for key in eager_outputs.keys(): self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key])) @slow def __magic_name__ ( self : Optional[Any]): '''simple docstring''' for tf_tokenizer in self.tf_tokenizers: snake_case__ = ModelToSave(tokenizer=UpperCamelCase__) snake_case__ = tf.convert_to_tensor([self.test_sentences[0]]) snake_case__ = model.serving(UpperCamelCase__) # Build model with some sample inputs with TemporaryDirectory() as tempdir: snake_case__ = Path(UpperCamelCase__) / """saved.model""" tf.saved_model.save(UpperCamelCase__ , UpperCamelCase__ , signatures={"""serving_default""": model.serving}) snake_case__ = tf.saved_model.load(UpperCamelCase__) snake_case__ = loaded_model.signatures["""serving_default"""](UpperCamelCase__)["""output_0"""] # We may see small differences because the loaded model is compiled, so we need an epsilon for the test self.assertTrue(tf.reduce_all(out == loaded_output)) @slow def __magic_name__ ( self : Tuple): '''simple docstring''' for tf_tokenizer in self.tf_tokenizers: snake_case__ = tf.convert_to_tensor([self.test_sentences[0]]) snake_case__ = tf_tokenizer(UpperCamelCase__) # Build model with some sample inputs snake_case__ = tf_tokenizer.get_config() snake_case__ = TFGPTaTokenizer.from_config(UpperCamelCase__) snake_case__ = model_from_config(UpperCamelCase__) for key in from_config_output.keys(): self.assertTrue(tf.reduce_all(from_config_output[key] == out[key])) @slow def __magic_name__ ( self : Dict): '''simple docstring''' for tf_tokenizer in self.tf_tokenizers: # for the test to run snake_case__ = 1_2_3_1_2_3 for max_length in [3, 5, 1_0_2_4]: snake_case__ = tf.convert_to_tensor([self.test_sentences[0]]) snake_case__ = tf_tokenizer(UpperCamelCase__ , max_length=UpperCamelCase__) snake_case__ = out["""input_ids"""].numpy().shape[1] assert out_length == max_length
654
0
"""simple docstring""" import doctest import glob import importlib import inspect import os import re from contextlib import contextmanager from functools import wraps from unittest.mock import patch import numpy as np import pytest from absl.testing import parameterized import datasets from datasets import load_metric from .utils import for_all_test_methods, local, slow # mark all tests as integration _A : int = pytest.mark.integration _A : int = {"""comet"""} _A : str = importlib.util.find_spec("""fairseq""") is not None _A : List[Any] = {"""code_eval"""} _A : List[str] = os.name == """nt""" _A : int = {"""bertscore""", """frugalscore""", """perplexity"""} _A : Dict = importlib.util.find_spec("""transformers""") is not None def __magic_name__ ( __snake_case : Tuple ) -> List[Any]: @wraps(__snake_case ) def wrapper(self : Union[str, Any] , __snake_case : Union[str, Any] ): if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ: self.skipTest("\"test requires Fairseq\"" ) else: test_case(self , __snake_case ) return wrapper def __magic_name__ ( __snake_case : str ) -> Union[str, Any]: @wraps(__snake_case ) def wrapper(self : Union[str, Any] , __snake_case : Union[str, Any] ): if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS: self.skipTest("\"test requires transformers\"" ) else: test_case(self , __snake_case ) return wrapper def __magic_name__ ( __snake_case : Optional[int] ) -> Optional[int]: @wraps(__snake_case ) def wrapper(self : List[Any] , __snake_case : Any ): if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS: self.skipTest("\"test not supported on Windows\"" ) else: test_case(self , __snake_case ) return wrapper def __magic_name__ ( ) -> List[str]: lowercase : int = [metric_dir.split(os.sep )[-2] for metric_dir in glob.glob("./metrics/*/" )] return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished @parameterized.named_parameters(get_local_metric_names() ) @for_all_test_methods( lowercase_, lowercase_, lowercase_ ) @local class a__ ( parameterized.TestCase ): __lowerCAmelCase = {} __lowerCAmelCase = None @pytest.mark.filterwarnings("ignore:metric_module_factory is deprecated:FutureWarning" ) @pytest.mark.filterwarnings("ignore:load_metric is deprecated:FutureWarning" ) def __magic_name__ ( self , _a ): lowercase : Optional[Any] = "[...]" lowercase : int = importlib.import_module( datasets.load.metric_module_factory(os.path.join("metrics" , UpperCamelCase__ ) ).module_path ) lowercase : str = datasets.load.import_main_class(metric_module.__name__ , dataset=UpperCamelCase__ ) # check parameters lowercase : int = inspect.signature(metric._compute ).parameters self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values() ) ) # no **kwargs # run doctest with self.patch_intensive_calls(UpperCamelCase__ , metric_module.__name__ ): with self.use_local_metrics(): try: lowercase : List[str] = doctest.testmod(UpperCamelCase__ , verbose=UpperCamelCase__ , raise_on_error=UpperCamelCase__ ) except doctest.UnexpectedException as e: raise e.exc_info[1] # raise the exception that doctest caught self.assertEqual(results.failed , 0 ) self.assertGreater(results.attempted , 1 ) @slow def __magic_name__ ( self , _a ): lowercase : Any = "[...]" lowercase : str = importlib.import_module( datasets.load.metric_module_factory(os.path.join("metrics" , UpperCamelCase__ ) ).module_path ) # run doctest with self.use_local_metrics(): lowercase : List[str] = doctest.testmod(UpperCamelCase__ , verbose=UpperCamelCase__ , raise_on_error=UpperCamelCase__ ) self.assertEqual(results.failed , 0 ) self.assertGreater(results.attempted , 1 ) @contextmanager def __magic_name__ ( self , _a , _a ): if metric_name in self.INTENSIVE_CALLS_PATCHER: with self.INTENSIVE_CALLS_PATCHER[metric_name](UpperCamelCase__ ): yield else: yield @contextmanager def __magic_name__ ( self ): def load_local_metric(_a , *_a , **_a ): return load_metric(os.path.join("metrics" , UpperCamelCase__ ) , *UpperCamelCase__ , **UpperCamelCase__ ) with patch("datasets.load_metric" ) as mock_load_metric: lowercase : int = load_local_metric yield @classmethod def __magic_name__ ( cls , _a ): def wrapper(_a ): lowercase : Optional[Any] = contextmanager(UpperCamelCase__ ) lowercase : Dict = patcher return patcher return wrapper @LocalMetricTest.register_intensive_calls_patcher("bleurt" ) def __magic_name__ ( __snake_case : str ) -> List[str]: import tensorflow.compat.va as tf from bleurt.score import Predictor tf.flags.DEFINE_string("sv" , "" , "" ) # handle pytest cli flags class a__ ( lowercase_ ): def __magic_name__ ( self , _a ): assert len(input_dict["input_ids"] ) == 2 return np.array([1.0_3, 1.0_4] ) # mock predict_fn which is supposed to do a forward pass with a bleurt model with patch("bleurt.score._create_predictor" ) as mock_create_predictor: lowercase : List[Any] = MockedPredictor() yield @LocalMetricTest.register_intensive_calls_patcher("bertscore" ) def __magic_name__ ( __snake_case : Union[str, Any] ) -> str: import torch def bert_cos_score_idf(__snake_case : Tuple , __snake_case : Tuple , *__snake_case : List[str] , **__snake_case : List[Any] ): return torch.tensor([[1.0, 1.0, 1.0]] * len(__snake_case ) ) # mock get_model which is supposed to do download a bert model # mock bert_cos_score_idf which is supposed to do a forward pass with a bert model with patch("bert_score.scorer.get_model" ), patch( "bert_score.scorer.bert_cos_score_idf" ) as mock_bert_cos_score_idf: lowercase : Any = bert_cos_score_idf yield @LocalMetricTest.register_intensive_calls_patcher("comet" ) def __magic_name__ ( __snake_case : List[Any] ) -> Dict: def load_from_checkpoint(__snake_case : Optional[Any] ): class a__ : def __magic_name__ ( self , _a , *_a , **_a ): assert len(UpperCamelCase__ ) == 2 lowercase : int = [0.1_9, 0.9_2] return scores, sum(UpperCamelCase__ ) / len(UpperCamelCase__ ) return Model() # mock load_from_checkpoint which is supposed to do download a bert model # mock load_from_checkpoint which is supposed to do download a bert model with patch("comet.download_model" ) as mock_download_model: lowercase : List[Any] = None with patch("comet.load_from_checkpoint" ) as mock_load_from_checkpoint: lowercase : Dict = load_from_checkpoint yield def __magic_name__ ( ) -> Any: lowercase : Optional[Any] = load_metric(os.path.join("metrics" , "seqeval" ) ) lowercase : Tuple = "ERROR" lowercase : str = f"""Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}""" with pytest.raises(__snake_case , match=re.escape(__snake_case ) ): metric.compute(predictions=[] , references=[] , scheme=__snake_case )
361
import tempfile import torch from diffusers import IPNDMScheduler from .test_schedulers import SchedulerCommonTest class _lowerCAmelCase ( lowercase_ ): """simple docstring""" _lowercase : int = (IPNDMScheduler,) _lowercase : int = (('''num_inference_steps''', 50),) def __magic_name__ ( self : Any , **UpperCamelCase__ : Tuple): '''simple docstring''' snake_case__ = {"""num_train_timesteps""": 1_0_0_0} config.update(**UpperCamelCase__) return config def __magic_name__ ( self : int , UpperCamelCase__ : Dict=0 , **UpperCamelCase__ : int): '''simple docstring''' snake_case__ = dict(self.forward_default_kwargs) snake_case__ = kwargs.pop("""num_inference_steps""" , UpperCamelCase__) snake_case__ = self.dummy_sample snake_case__ = 0.1 * sample snake_case__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: snake_case__ = self.get_scheduler_config(**UpperCamelCase__) snake_case__ = scheduler_class(**UpperCamelCase__) scheduler.set_timesteps(UpperCamelCase__) # copy over dummy past residuals snake_case__ = dummy_past_residuals[:] if time_step is None: snake_case__ = scheduler.timesteps[len(scheduler.timesteps) // 2] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(UpperCamelCase__) snake_case__ = scheduler_class.from_pretrained(UpperCamelCase__) new_scheduler.set_timesteps(UpperCamelCase__) # copy over dummy past residuals snake_case__ = dummy_past_residuals[:] snake_case__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample snake_case__ = new_scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical" snake_case__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample snake_case__ = new_scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical" def __magic_name__ ( self : List[Any]): '''simple docstring''' pass def __magic_name__ ( self : Tuple , UpperCamelCase__ : Union[str, Any]=0 , **UpperCamelCase__ : Tuple): '''simple docstring''' snake_case__ = dict(self.forward_default_kwargs) snake_case__ = kwargs.pop("""num_inference_steps""" , UpperCamelCase__) snake_case__ = self.dummy_sample snake_case__ = 0.1 * sample snake_case__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: snake_case__ = self.get_scheduler_config() snake_case__ = scheduler_class(**UpperCamelCase__) scheduler.set_timesteps(UpperCamelCase__) # copy over dummy past residuals (must be after setting timesteps) snake_case__ = dummy_past_residuals[:] if time_step is None: snake_case__ = scheduler.timesteps[len(scheduler.timesteps) // 2] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(UpperCamelCase__) snake_case__ = scheduler_class.from_pretrained(UpperCamelCase__) # copy over dummy past residuals new_scheduler.set_timesteps(UpperCamelCase__) # copy over dummy past residual (must be after setting timesteps) snake_case__ = dummy_past_residuals[:] snake_case__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample snake_case__ = new_scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical" snake_case__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample snake_case__ = new_scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical" def __magic_name__ ( self : Union[str, Any] , **UpperCamelCase__ : Dict): '''simple docstring''' snake_case__ = self.scheduler_classes[0] snake_case__ = self.get_scheduler_config(**UpperCamelCase__) snake_case__ = scheduler_class(**UpperCamelCase__) snake_case__ = 1_0 snake_case__ = self.dummy_model() snake_case__ = self.dummy_sample_deter scheduler.set_timesteps(UpperCamelCase__) for i, t in enumerate(scheduler.timesteps): snake_case__ = model(UpperCamelCase__ , UpperCamelCase__) snake_case__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__).prev_sample for i, t in enumerate(scheduler.timesteps): snake_case__ = model(UpperCamelCase__ , UpperCamelCase__) snake_case__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__).prev_sample return sample def __magic_name__ ( self : Optional[int]): '''simple docstring''' snake_case__ = dict(self.forward_default_kwargs) snake_case__ = kwargs.pop("""num_inference_steps""" , UpperCamelCase__) for scheduler_class in self.scheduler_classes: snake_case__ = self.get_scheduler_config() snake_case__ = scheduler_class(**UpperCamelCase__) snake_case__ = self.dummy_sample snake_case__ = 0.1 * sample if num_inference_steps is not None and hasattr(UpperCamelCase__ , """set_timesteps"""): scheduler.set_timesteps(UpperCamelCase__) elif num_inference_steps is not None and not hasattr(UpperCamelCase__ , """set_timesteps"""): snake_case__ = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) snake_case__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] snake_case__ = dummy_past_residuals[:] snake_case__ = scheduler.timesteps[5] snake_case__ = scheduler.timesteps[6] snake_case__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample snake_case__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample self.assertEqual(output_a.shape , sample.shape) self.assertEqual(output_a.shape , output_a.shape) snake_case__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample snake_case__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample self.assertEqual(output_a.shape , sample.shape) self.assertEqual(output_a.shape , output_a.shape) def __magic_name__ ( self : Union[str, Any]): '''simple docstring''' for timesteps in [1_0_0, 1_0_0_0]: self.check_over_configs(num_train_timesteps=UpperCamelCase__ , time_step=UpperCamelCase__) def __magic_name__ ( self : Dict): '''simple docstring''' for t, num_inference_steps in zip([1, 5, 1_0] , [1_0, 5_0, 1_0_0]): self.check_over_forward(num_inference_steps=UpperCamelCase__ , time_step=UpperCamelCase__) def __magic_name__ ( self : List[str]): '''simple docstring''' snake_case__ = self.full_loop() snake_case__ = torch.mean(torch.abs(UpperCamelCase__)) assert abs(result_mean.item() - 2_5_4_0_5_2_9) < 1_0
654
0
'''simple docstring''' import gc import random import unittest import numpy as np import torch from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline from diffusers.pipelines.shap_e import ShapERenderer from diffusers.utils import floats_tensor, load_image, load_numpy, slow from diffusers.utils.testing_utils import require_torch_gpu, torch_device from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference class SCREAMING_SNAKE_CASE__ ( lowercase_ , unittest.TestCase): lowerCAmelCase_ = ShapEImgaImgPipeline lowerCAmelCase_ = ['''image'''] lowerCAmelCase_ = ['''image'''] lowerCAmelCase_ = [ '''num_images_per_prompt''', '''num_inference_steps''', '''generator''', '''latents''', '''guidance_scale''', '''frame_size''', '''output_type''', '''return_dict''', ] lowerCAmelCase_ = False @property def UpperCAmelCase_ ( self )-> Union[str, Any]: '''simple docstring''' return 32 @property def UpperCAmelCase_ ( self )-> List[str]: '''simple docstring''' return 32 @property def UpperCAmelCase_ ( self )-> Dict: '''simple docstring''' return self.time_input_dim * 4 @property def UpperCAmelCase_ ( self )-> Union[str, Any]: '''simple docstring''' return 8 @property def UpperCAmelCase_ ( self )-> Union[str, Any]: '''simple docstring''' torch.manual_seed(0 ) UpperCamelCase = CLIPVisionConfig( hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , ) UpperCamelCase = CLIPVisionModel(UpperCamelCase__ ) return model @property def UpperCAmelCase_ ( self )-> Optional[Any]: '''simple docstring''' UpperCamelCase = CLIPImageProcessor( crop_size=224 , do_center_crop=UpperCamelCase__ , do_normalize=UpperCamelCase__ , do_resize=UpperCamelCase__ , image_mean=[0.48_145_466, 0.4_578_275, 0.40_821_073] , image_std=[0.26_862_954, 0.26_130_258, 0.27_577_711] , resample=3 , size=224 , ) return image_processor @property def UpperCAmelCase_ ( self )-> Union[str, Any]: '''simple docstring''' torch.manual_seed(0 ) UpperCamelCase = { 'num_attention_heads': 2, 'attention_head_dim': 16, 'embedding_dim': self.time_input_dim, 'num_embeddings': 32, 'embedding_proj_dim': self.text_embedder_hidden_size, 'time_embed_dim': self.time_embed_dim, 'num_layers': 1, 'clip_embed_dim': self.time_input_dim * 2, 'additional_embeddings': 0, 'time_embed_act_fn': 'gelu', 'norm_in_type': 'layer', 'embedding_proj_norm_type': 'layer', 'encoder_hid_proj_type': None, 'added_emb_type': None, } UpperCamelCase = PriorTransformer(**UpperCamelCase__ ) return model @property def UpperCAmelCase_ ( self )-> Dict: '''simple docstring''' torch.manual_seed(0 ) UpperCamelCase = { 'param_shapes': ( (self.renderer_dim, 93), (self.renderer_dim, 8), (self.renderer_dim, 8), (self.renderer_dim, 8), ), 'd_latent': self.time_input_dim, 'd_hidden': self.renderer_dim, 'n_output': 12, 'background': ( 0.1, 0.1, 0.1, ), } UpperCamelCase = ShapERenderer(**UpperCamelCase__ ) return model def UpperCAmelCase_ ( self )-> List[str]: '''simple docstring''' UpperCamelCase = self.dummy_prior UpperCamelCase = self.dummy_image_encoder UpperCamelCase = self.dummy_image_processor UpperCamelCase = self.dummy_renderer UpperCamelCase = HeunDiscreteScheduler( beta_schedule='exp' , num_train_timesteps=1024 , prediction_type='sample' , use_karras_sigmas=UpperCamelCase__ , clip_sample=UpperCamelCase__ , clip_sample_range=1.0 , ) UpperCamelCase = { 'prior': prior, 'image_encoder': image_encoder, 'image_processor': image_processor, 'renderer': renderer, 'scheduler': scheduler, } return components def UpperCAmelCase_ ( self , A_ , A_=0 )-> int: '''simple docstring''' UpperCamelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ ) if str(UpperCamelCase__ ).startswith('mps' ): UpperCamelCase = torch.manual_seed(UpperCamelCase__ ) else: UpperCamelCase = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ ) UpperCamelCase = { 'image': input_image, 'generator': generator, 'num_inference_steps': 1, 'frame_size': 32, 'output_type': 'np', } return inputs def UpperCAmelCase_ ( self )-> Optional[int]: '''simple docstring''' UpperCamelCase = 'cpu' UpperCamelCase = self.get_dummy_components() UpperCamelCase = self.pipeline_class(**UpperCamelCase__ ) UpperCamelCase = pipe.to(UpperCamelCase__ ) pipe.set_progress_bar_config(disable=UpperCamelCase__ ) UpperCamelCase = pipe(**self.get_dummy_inputs(UpperCamelCase__ ) ) UpperCamelCase = output.images[0] UpperCamelCase = image[0, -3:, -3:, -1] assert image.shape == (20, 32, 32, 3) UpperCamelCase = np.array( [ 0.00_039_216, 0.00_039_216, 0.00_039_216, 0.00_039_216, 0.00_039_216, 0.00_039_216, 0.00_039_216, 0.00_039_216, 0.00_039_216, ] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def UpperCAmelCase_ ( self )-> List[Any]: '''simple docstring''' self._test_inference_batch_consistent(batch_sizes=[1, 2] ) def UpperCAmelCase_ ( self )-> Dict: '''simple docstring''' UpperCamelCase = torch_device == 'cpu' UpperCamelCase = True self._test_inference_batch_single_identical( batch_size=2 , test_max_difference=UpperCamelCase__ , relax_max_difference=UpperCamelCase__ , ) def UpperCAmelCase_ ( self )-> int: '''simple docstring''' UpperCamelCase = self.get_dummy_components() UpperCamelCase = self.pipeline_class(**UpperCamelCase__ ) UpperCamelCase = pipe.to(UpperCamelCase__ ) pipe.set_progress_bar_config(disable=UpperCamelCase__ ) UpperCamelCase = 1 UpperCamelCase = 2 UpperCamelCase = self.get_dummy_inputs(UpperCamelCase__ ) for key in inputs.keys(): if key in self.batch_params: UpperCamelCase = batch_size * [inputs[key]] UpperCamelCase = pipe(**UpperCamelCase__ , num_images_per_prompt=UpperCamelCase__ )[0] assert images.shape[0] == batch_size * num_images_per_prompt @slow @require_torch_gpu class SCREAMING_SNAKE_CASE__ ( unittest.TestCase): def UpperCAmelCase_ ( self )-> List[str]: '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCAmelCase_ ( self )-> List[str]: '''simple docstring''' UpperCamelCase = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/shap_e/corgi.png' ) UpperCamelCase = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/shap_e/test_shap_e_img2img_out.npy' ) UpperCamelCase = ShapEImgaImgPipeline.from_pretrained('openai/shap-e-img2img' ) UpperCamelCase = pipe.to(UpperCamelCase__ ) pipe.set_progress_bar_config(disable=UpperCamelCase__ ) UpperCamelCase = torch.Generator(device=UpperCamelCase__ ).manual_seed(0 ) UpperCamelCase = pipe( UpperCamelCase__ , generator=UpperCamelCase__ , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type='np' , ).images[0] assert images.shape == (20, 64, 64, 3) assert_mean_pixel_difference(UpperCamelCase__ , UpperCamelCase__ )
3
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np import torch from ..models.clipseg import CLIPSegForImageSegmentation from ..utils import is_vision_available, requires_backends from .base import PipelineTool if is_vision_available(): from PIL import Image class _lowerCAmelCase ( lowercase_ ): """simple docstring""" _lowercase : Optional[Any] = ( '''This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image.''' '''It takes two arguments named `image` which should be the original image, and `label` which should be a text ''' '''describing the elements what should be identified in the segmentation mask. The tool returns the mask.''' ) _lowercase : Dict = '''CIDAS/clipseg-rd64-refined''' _lowercase : List[Any] = '''image_segmenter''' _lowercase : Tuple = CLIPSegForImageSegmentation _lowercase : str = ['''image''', '''text'''] _lowercase : Dict = ['''image'''] def __init__( self : Optional[int] , *UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : List[Any]): '''simple docstring''' requires_backends(self , ["""vision"""]) super().__init__(*UpperCamelCase__ , **UpperCamelCase__) def __magic_name__ ( self : str , UpperCamelCase__ : "Image" , UpperCamelCase__ : str): '''simple docstring''' return self.pre_processor(text=[label] , images=[image] , padding=UpperCamelCase__ , return_tensors="""pt""") def __magic_name__ ( self : Any , UpperCamelCase__ : Optional[Any]): '''simple docstring''' with torch.no_grad(): snake_case__ = self.model(**UpperCamelCase__).logits return logits def __magic_name__ ( self : Optional[int] , UpperCamelCase__ : Union[str, Any]): '''simple docstring''' snake_case__ = outputs.cpu().detach().numpy() snake_case__ = 0 snake_case__ = 1 return Image.fromarray((array * 2_5_5).astype(np.uinta))
654
0
"""simple docstring""" import argparse import json import os from tensorflow.core.protobuf.saved_model_pba import SavedModel # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_copies.py UpperCamelCase__ = '''.''' # Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model) UpperCamelCase__ = [ '''Assert''', '''AssignVariableOp''', '''EmptyTensorList''', '''MergeV2Checkpoints''', '''ReadVariableOp''', '''ResourceGather''', '''RestoreV2''', '''SaveV2''', '''ShardedFilename''', '''StatefulPartitionedCall''', '''StaticRegexFullMatch''', '''VarHandleOp''', ] def UpperCAmelCase ( snake_case : Any , snake_case : List[str] , snake_case : List[Any] ): _lowerCAmelCase:Optional[int] = SavedModel() _lowerCAmelCase:str = [] with open(os.path.join(snake_case , '''utils''' , '''tf_ops''' , '''onnx.json''' ) ) as f: _lowerCAmelCase:List[str] = json.load(snake_case )['''opsets'''] for i in range(1 , opset + 1 ): onnx_ops.extend(onnx_opsets[str(snake_case )] ) with open(snake_case , '''rb''' ) as f: saved_model.ParseFromString(f.read() ) _lowerCAmelCase:List[str] = set() # Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs) for meta_graph in saved_model.meta_graphs: # Add operations in the graph definition model_op_names.update(node.op for node in meta_graph.graph_def.node ) # Go through the functions in the graph definition for func in meta_graph.graph_def.library.function: # Add operations in each function model_op_names.update(node.op for node in func.node_def ) # Convert to list, sorted if you want _lowerCAmelCase:List[str] = sorted(snake_case ) _lowerCAmelCase:Dict = [] for op in model_op_names: if op not in onnx_ops and op not in INTERNAL_OPS: incompatible_ops.append(snake_case ) if strict and len(snake_case ) > 0: raise Exception(F'Found the following incompatible ops for the opset {opset}:\n' + incompatible_ops ) elif len(snake_case ) > 0: print(F'Found the following incompatible ops for the opset {opset}:' ) print(*snake_case , sep='''\n''' ) else: print(F'The saved model {saved_model_path} can properly be converted with ONNX.' ) if __name__ == "__main__": UpperCamelCase__ = argparse.ArgumentParser() parser.add_argument('''--saved_model_path''', help='''Path of the saved model to check (the .pb file).''') parser.add_argument( '''--opset''', default=1_2, type=int, help='''The ONNX opset against which the model has to be tested.''' ) parser.add_argument( '''--framework''', choices=['''onnx'''], default='''onnx''', help='''Frameworks against which to test the saved model.''' ) parser.add_argument( '''--strict''', action='''store_true''', help='''Whether make the checking strict (raise errors) or not (raise warnings)''' ) UpperCamelCase__ = parser.parse_args() if args.framework == "onnx": onnx_compliancy(args.saved_model_path, args.strict, args.opset)
227
import unittest import numpy as np from transformers.testing_utils import require_pytesseract, require_torch from transformers.utils import is_pytesseract_available, is_torch_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_pytesseract_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class _lowerCAmelCase ( unittest.TestCase ): """simple docstring""" def __init__( self : List[str] , UpperCamelCase__ : str , UpperCamelCase__ : Tuple=7 , UpperCamelCase__ : List[Any]=3 , UpperCamelCase__ : Dict=1_8 , UpperCamelCase__ : Any=3_0 , UpperCamelCase__ : List[Any]=4_0_0 , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : Any=None , UpperCamelCase__ : Optional[int]=True , ): '''simple docstring''' snake_case__ = size if size is not None else {"""height""": 1_8, """width""": 1_8} snake_case__ = parent snake_case__ = batch_size snake_case__ = num_channels snake_case__ = image_size snake_case__ = min_resolution snake_case__ = max_resolution snake_case__ = do_resize snake_case__ = size snake_case__ = apply_ocr def __magic_name__ ( self : Optional[Any]): '''simple docstring''' return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr} @require_torch @require_pytesseract class _lowerCAmelCase ( lowercase_ , unittest.TestCase ): """simple docstring""" _lowercase : str = LayoutLMvaImageProcessor if is_pytesseract_available() else None def __magic_name__ ( self : Optional[int]): '''simple docstring''' snake_case__ = LayoutLMvaImageProcessingTester(self) @property def __magic_name__ ( self : Tuple): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def __magic_name__ ( self : List[Any]): '''simple docstring''' snake_case__ = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(UpperCamelCase__ , """do_resize""")) self.assertTrue(hasattr(UpperCamelCase__ , """size""")) self.assertTrue(hasattr(UpperCamelCase__ , """apply_ocr""")) def __magic_name__ ( self : Optional[int]): '''simple docstring''' snake_case__ = self.image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size , {"""height""": 1_8, """width""": 1_8}) snake_case__ = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2) self.assertEqual(image_processor.size , {"""height""": 4_2, """width""": 4_2}) def __magic_name__ ( self : List[str]): '''simple docstring''' pass def __magic_name__ ( self : List[str]): '''simple docstring''' snake_case__ = self.image_processing_class(**self.image_processor_dict) # create random PIL images snake_case__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__) for image in image_inputs: self.assertIsInstance(UpperCamelCase__ , Image.Image) # Test not batched input snake_case__ = image_processing(image_inputs[0] , return_tensors="""pt""") self.assertEqual( encoding.pixel_values.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) self.assertIsInstance(encoding.words , UpperCamelCase__) self.assertIsInstance(encoding.boxes , UpperCamelCase__) # Test batched snake_case__ = image_processing(UpperCamelCase__ , return_tensors="""pt""").pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) def __magic_name__ ( self : List[Any]): '''simple docstring''' snake_case__ = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors snake_case__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ , numpify=UpperCamelCase__) for image in image_inputs: self.assertIsInstance(UpperCamelCase__ , np.ndarray) # Test not batched input snake_case__ = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) # Test batched snake_case__ = image_processing(UpperCamelCase__ , return_tensors="""pt""").pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) def __magic_name__ ( self : Dict): '''simple docstring''' snake_case__ = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors snake_case__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ , torchify=UpperCamelCase__) for image in image_inputs: self.assertIsInstance(UpperCamelCase__ , torch.Tensor) # Test not batched input snake_case__ = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) # Test batched snake_case__ = image_processing(UpperCamelCase__ , return_tensors="""pt""").pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) def __magic_name__ ( self : Any): '''simple docstring''' snake_case__ = LayoutLMvaImageProcessor() from datasets import load_dataset snake_case__ = load_dataset("""hf-internal-testing/fixtures_docvqa""" , split="""test""") snake_case__ = Image.open(ds[0]["""file"""]).convert("""RGB""") snake_case__ = image_processing(UpperCamelCase__ , return_tensors="""pt""") self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_2_4, 2_2_4)) self.assertEqual(len(encoding.words) , len(encoding.boxes)) # fmt: off # the words and boxes were obtained with Tesseract 4.1.1 snake_case__ = [["""11:14""", """to""", """11:39""", """a.m""", """11:39""", """to""", """11:44""", """a.m.""", """11:44""", """a.m.""", """to""", """12:25""", """p.m.""", """12:25""", """to""", """12:58""", """p.m.""", """12:58""", """to""", """4:00""", """p.m.""", """2:00""", """to""", """5:00""", """p.m.""", """Coffee""", """Break""", """Coffee""", """will""", """be""", """served""", """for""", """men""", """and""", """women""", """in""", """the""", """lobby""", """adjacent""", """to""", """exhibit""", """area.""", """Please""", """move""", """into""", """exhibit""", """area.""", """(Exhibits""", """Open)""", """TRRF""", """GENERAL""", """SESSION""", """(PART""", """|)""", """Presiding:""", """Lee""", """A.""", """Waller""", """TRRF""", """Vice""", """President""", """“Introductory""", """Remarks”""", """Lee""", """A.""", """Waller,""", """TRRF""", """Vice""", """Presi-""", """dent""", """Individual""", """Interviews""", """with""", """TRRF""", """Public""", """Board""", """Members""", """and""", """Sci-""", """entific""", """Advisory""", """Council""", """Mem-""", """bers""", """Conducted""", """by""", """TRRF""", """Treasurer""", """Philip""", """G.""", """Kuehn""", """to""", """get""", """answers""", """which""", """the""", """public""", """refrigerated""", """warehousing""", """industry""", """is""", """looking""", """for.""", """Plus""", """questions""", """from""", """the""", """floor.""", """Dr.""", """Emil""", """M.""", """Mrak,""", """University""", """of""", """Cal-""", """ifornia,""", """Chairman,""", """TRRF""", """Board;""", """Sam""", """R.""", """Cecil,""", """University""", """of""", """Georgia""", """College""", """of""", """Agriculture;""", """Dr.""", """Stanley""", """Charm,""", """Tufts""", """University""", """School""", """of""", """Medicine;""", """Dr.""", """Robert""", """H.""", """Cotton,""", """ITT""", """Continental""", """Baking""", """Company;""", """Dr.""", """Owen""", """Fennema,""", """University""", """of""", """Wis-""", """consin;""", """Dr.""", """Robert""", """E.""", """Hardenburg,""", """USDA.""", """Questions""", """and""", """Answers""", """Exhibits""", """Open""", """Capt.""", """Jack""", """Stoney""", """Room""", """TRRF""", """Scientific""", """Advisory""", """Council""", """Meeting""", """Ballroom""", """Foyer"""]] # noqa: E231 snake_case__ = [[[1_4_1, 5_7, 2_1_4, 6_9], [2_2_8, 5_8, 2_5_2, 6_9], [1_4_1, 7_5, 2_1_6, 8_8], [2_3_0, 7_9, 2_8_0, 8_8], [1_4_2, 2_6_0, 2_1_8, 2_7_3], [2_3_0, 2_6_1, 2_5_5, 2_7_3], [1_4_3, 2_7_9, 2_1_8, 2_9_0], [2_3_1, 2_8_2, 2_9_0, 2_9_1], [1_4_3, 3_4_2, 2_1_8, 3_5_4], [2_3_1, 3_4_5, 2_8_9, 3_5_5], [2_0_2, 3_6_2, 2_2_7, 3_7_3], [1_4_3, 3_7_9, 2_2_0, 3_9_2], [2_3_1, 3_8_2, 2_9_1, 3_9_4], [1_4_4, 7_1_4, 2_2_0, 7_2_6], [2_3_1, 7_1_5, 2_5_6, 7_2_6], [1_4_4, 7_3_2, 2_2_0, 7_4_5], [2_3_2, 7_3_6, 2_9_1, 7_4_7], [1_4_4, 7_6_9, 2_1_8, 7_8_2], [2_3_1, 7_7_0, 2_5_6, 7_8_2], [1_4_1, 7_8_8, 2_0_2, 8_0_1], [2_1_5, 7_9_1, 2_7_4, 8_0_4], [1_4_3, 8_2_6, 2_0_4, 8_3_8], [2_1_5, 8_2_6, 2_4_0, 8_3_8], [1_4_2, 8_4_4, 2_0_2, 8_5_7], [2_1_5, 8_4_7, 2_7_4, 8_5_9], [3_3_4, 5_7, 4_2_7, 6_9], [4_4_0, 5_7, 5_2_2, 6_9], [3_6_9, 7_5, 4_6_1, 8_8], [4_6_9, 7_5, 5_1_6, 8_8], [5_2_8, 7_6, 5_6_2, 8_8], [5_7_0, 7_6, 6_6_7, 8_8], [6_7_5, 7_5, 7_1_1, 8_7], [7_2_1, 7_9, 7_7_8, 8_8], [7_8_9, 7_5, 8_4_0, 8_8], [3_6_9, 9_7, 4_7_0, 1_0_7], [4_8_4, 9_4, 5_0_7, 1_0_6], [5_1_8, 9_4, 5_6_2, 1_0_7], [5_7_6, 9_4, 6_5_5, 1_1_0], [6_6_8, 9_4, 7_9_2, 1_0_9], [8_0_4, 9_5, 8_2_9, 1_0_7], [3_6_9, 1_1_3, 4_6_5, 1_2_5], [4_7_7, 1_1_6, 5_4_7, 1_2_5], [5_6_2, 1_1_3, 6_5_8, 1_2_5], [6_7_1, 1_1_6, 7_4_8, 1_2_5], [7_6_1, 1_1_3, 8_1_1, 1_2_5], [3_6_9, 1_3_1, 4_6_5, 1_4_3], [4_7_7, 1_3_3, 5_4_8, 1_4_3], [5_6_3, 1_3_0, 6_9_8, 1_4_5], [7_1_0, 1_3_0, 8_0_2, 1_4_6], [3_3_6, 1_7_1, 4_1_2, 1_8_3], [4_2_3, 1_7_1, 5_7_2, 1_8_3], [5_8_2, 1_7_0, 7_1_6, 1_8_4], [7_2_8, 1_7_1, 8_1_7, 1_8_7], [8_2_9, 1_7_1, 8_4_4, 1_8_6], [3_3_8, 1_9_7, 4_8_2, 2_1_2], [5_0_7, 1_9_6, 5_5_7, 2_0_9], [5_6_9, 1_9_6, 5_9_5, 2_0_8], [6_1_0, 1_9_6, 7_0_2, 2_0_9], [5_0_5, 2_1_4, 5_8_3, 2_2_6], [5_9_5, 2_1_4, 6_5_6, 2_2_7], [6_7_0, 2_1_5, 8_0_7, 2_2_7], [3_3_5, 2_5_9, 5_4_3, 2_7_4], [5_5_6, 2_5_9, 7_0_8, 2_7_2], [3_7_2, 2_7_9, 4_2_2, 2_9_1], [4_3_5, 2_7_9, 4_6_0, 2_9_1], [4_7_4, 2_7_9, 5_7_4, 2_9_2], [5_8_7, 2_7_8, 6_6_4, 2_9_1], [6_7_6, 2_7_8, 7_3_8, 2_9_1], [7_5_1, 2_7_9, 8_3_4, 2_9_1], [3_7_2, 2_9_8, 4_3_4, 3_1_0], [3_3_5, 3_4_1, 4_8_3, 3_5_4], [4_9_7, 3_4_1, 6_5_5, 3_5_4], [6_6_7, 3_4_1, 7_2_8, 3_5_4], [7_4_0, 3_4_1, 8_2_5, 3_5_4], [3_3_5, 3_6_0, 4_3_0, 3_7_2], [4_4_2, 3_6_0, 5_3_4, 3_7_2], [5_4_5, 3_5_9, 6_8_7, 3_7_2], [6_9_7, 3_6_0, 7_5_4, 3_7_2], [7_6_5, 3_6_0, 8_2_3, 3_7_3], [3_3_4, 3_7_8, 4_2_8, 3_9_1], [4_4_0, 3_7_8, 5_7_7, 3_9_4], [5_9_0, 3_7_8, 7_0_5, 3_9_1], [7_2_0, 3_7_8, 8_0_1, 3_9_1], [3_3_4, 3_9_7, 4_0_0, 4_0_9], [3_7_0, 4_1_6, 5_2_9, 4_2_9], [5_4_4, 4_1_6, 5_7_6, 4_3_2], [5_8_7, 4_1_6, 6_6_5, 4_2_8], [6_7_7, 4_1_6, 8_1_4, 4_2_9], [3_7_2, 4_3_5, 4_5_2, 4_5_0], [4_6_5, 4_3_4, 4_9_5, 4_4_7], [5_1_1, 4_3_4, 6_0_0, 4_4_7], [6_1_1, 4_3_6, 6_3_7, 4_4_7], [6_4_9, 4_3_6, 6_9_4, 4_5_1], [7_0_5, 4_3_8, 8_2_4, 4_4_7], [3_6_9, 4_5_3, 4_5_2, 4_6_6], [4_6_4, 4_5_4, 5_0_9, 4_6_6], [5_2_2, 4_5_3, 6_1_1, 4_6_9], [6_2_5, 4_5_3, 7_9_2, 4_6_9], [3_7_0, 4_7_2, 5_5_6, 4_8_8], [5_7_0, 4_7_2, 6_8_4, 4_8_7], [6_9_7, 4_7_2, 7_1_8, 4_8_5], [7_3_2, 4_7_2, 8_3_5, 4_8_8], [3_6_9, 4_9_0, 4_1_1, 5_0_3], [4_2_5, 4_9_0, 4_8_4, 5_0_3], [4_9_6, 4_9_0, 6_3_5, 5_0_6], [6_4_5, 4_9_0, 7_0_7, 5_0_3], [7_1_8, 4_9_1, 7_6_1, 5_0_3], [7_7_1, 4_9_0, 8_4_0, 5_0_3], [3_3_6, 5_1_0, 3_7_4, 5_2_1], [3_8_8, 5_1_0, 4_4_7, 5_2_2], [4_6_0, 5_1_0, 4_8_9, 5_2_1], [5_0_3, 5_1_0, 5_8_0, 5_2_2], [5_9_2, 5_0_9, 7_3_6, 5_2_5], [7_4_5, 5_0_9, 7_7_0, 5_2_2], [7_8_1, 5_0_9, 8_4_0, 5_2_2], [3_3_8, 5_2_8, 4_3_4, 5_4_1], [4_4_8, 5_2_8, 5_9_6, 5_4_1], [6_0_9, 5_2_7, 6_8_7, 5_4_0], [7_0_0, 5_2_8, 7_9_2, 5_4_1], [3_3_6, 5_4_6, 3_9_7, 5_5_9], [4_0_7, 5_4_6, 4_3_1, 5_5_9], [4_4_3, 5_4_6, 5_2_5, 5_6_0], [5_3_7, 5_4_6, 6_8_0, 5_6_2], [6_8_8, 5_4_6, 7_1_4, 5_5_9], [7_2_2, 5_4_6, 8_3_7, 5_6_2], [3_3_6, 5_6_5, 4_4_9, 5_8_1], [4_6_1, 5_6_5, 4_8_5, 5_7_7], [4_9_7, 5_6_5, 6_6_5, 5_8_1], [6_8_1, 5_6_5, 7_1_8, 5_7_7], [7_3_2, 5_6_5, 8_3_7, 5_8_0], [3_3_7, 5_8_4, 4_3_8, 5_9_7], [4_5_2, 5_8_3, 5_2_1, 5_9_6], [5_3_5, 5_8_4, 6_7_7, 5_9_9], [6_9_0, 5_8_3, 7_8_7, 5_9_6], [8_0_1, 5_8_3, 8_2_5, 5_9_6], [3_3_8, 6_0_2, 4_7_8, 6_1_5], [4_9_2, 6_0_2, 5_3_0, 6_1_4], [5_4_3, 6_0_2, 6_3_8, 6_1_5], [6_5_0, 6_0_2, 6_7_6, 6_1_4], [6_8_8, 6_0_2, 7_8_8, 6_1_5], [8_0_2, 6_0_2, 8_4_3, 6_1_4], [3_3_7, 6_2_1, 5_0_2, 6_3_3], [5_1_6, 6_2_1, 6_1_5, 6_3_7], [6_2_9, 6_2_1, 7_7_4, 6_3_6], [7_8_9, 6_2_1, 8_2_7, 6_3_3], [3_3_7, 6_3_9, 4_1_8, 6_5_2], [4_3_2, 6_4_0, 5_7_1, 6_5_3], [5_8_7, 6_3_9, 7_3_1, 6_5_5], [7_4_3, 6_3_9, 7_6_9, 6_5_2], [7_8_0, 6_3_9, 8_4_1, 6_5_2], [3_3_8, 6_5_8, 4_4_0, 6_7_3], [4_5_5, 6_5_8, 4_9_1, 6_7_0], [5_0_8, 6_5_8, 6_0_2, 6_7_1], [6_1_6, 6_5_8, 6_3_8, 6_7_0], [6_5_4, 6_5_8, 8_3_5, 6_7_4], [3_3_7, 6_7_7, 4_2_9, 6_8_9], [3_3_7, 7_1_4, 4_8_2, 7_2_6], [4_9_5, 7_1_4, 5_4_8, 7_2_6], [5_6_1, 7_1_4, 6_8_3, 7_2_6], [3_3_8, 7_7_0, 4_6_1, 7_8_2], [4_7_4, 7_6_9, 5_5_4, 7_8_5], [4_8_9, 7_8_8, 5_6_2, 8_0_3], [5_7_6, 7_8_8, 6_4_3, 8_0_1], [6_5_6, 7_8_7, 7_5_1, 8_0_4], [7_6_4, 7_8_8, 8_4_4, 8_0_1], [3_3_4, 8_2_5, 4_2_1, 8_3_8], [4_3_0, 8_2_4, 5_7_4, 8_3_8], [5_8_4, 8_2_4, 7_2_3, 8_4_1], [3_3_5, 8_4_4, 4_5_0, 8_5_7], [4_6_4, 8_4_3, 5_8_3, 8_6_0], [6_2_8, 8_6_2, 7_5_5, 8_7_5], [7_6_9, 8_6_1, 8_4_8, 8_7_8]]] # noqa: E231 # fmt: on self.assertListEqual(encoding.words , UpperCamelCase__) self.assertListEqual(encoding.boxes , UpperCamelCase__) # with apply_OCR = False snake_case__ = LayoutLMvaImageProcessor(apply_ocr=UpperCamelCase__) snake_case__ = image_processing(UpperCamelCase__ , return_tensors="""pt""") self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_2_4, 2_2_4))
654
0
"""simple docstring""" from dataclasses import dataclass from typing import List, Optional, Union import numpy as np import torch from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available @dataclass class _A ( lowercase_ ): snake_case__ : Union[List[np.ndarray], torch.FloatTensor] try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .pipeline_text_to_video_synth import TextToVideoSDPipeline from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401 from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
359
import numpy as np import torch from torch.utils.data import Dataset from utils import logger class _lowerCAmelCase ( lowercase_ ): """simple docstring""" def __init__( self : int , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Union[str, Any]): '''simple docstring''' snake_case__ = params snake_case__ = np.array(UpperCamelCase__) snake_case__ = np.array([len(UpperCamelCase__) for t in data]) self.check() self.remove_long_sequences() self.remove_empty_sequences() self.remove_unknown_sequences() self.check() self.print_statistics() def __getitem__( self : Dict , UpperCamelCase__ : Any): '''simple docstring''' return (self.token_ids[index], self.lengths[index]) def __len__( self : Union[str, Any]): '''simple docstring''' return len(self.lengths) def __magic_name__ ( self : str): '''simple docstring''' assert len(self.token_ids) == len(self.lengths) assert all(self.lengths[i] == len(self.token_ids[i]) for i in range(len(self.lengths))) def __magic_name__ ( self : Optional[int]): '''simple docstring''' snake_case__ = self.params.max_model_input_size snake_case__ = self.lengths > max_len logger.info(F'''Splitting {sum(UpperCamelCase__)} too long sequences.''') def divide_chunks(UpperCamelCase__ : str , UpperCamelCase__ : Tuple): return [l[i : i + n] for i in range(0 , len(UpperCamelCase__) , UpperCamelCase__)] snake_case__ = [] snake_case__ = [] if self.params.mlm: snake_case__ , snake_case__ = self.params.special_tok_ids["""cls_token"""], self.params.special_tok_ids["""sep_token"""] else: snake_case__ , snake_case__ = self.params.special_tok_ids["""bos_token"""], self.params.special_tok_ids["""eos_token"""] for seq_, len_ in zip(self.token_ids , self.lengths): assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_ if len_ <= max_len: new_tok_ids.append(seq_) new_lengths.append(len_) else: snake_case__ = [] for sub_s in divide_chunks(seq_ , max_len - 2): if sub_s[0] != cls_id: snake_case__ = np.insert(UpperCamelCase__ , 0 , UpperCamelCase__) if sub_s[-1] != sep_id: snake_case__ = np.insert(UpperCamelCase__ , len(UpperCamelCase__) , UpperCamelCase__) assert len(UpperCamelCase__) <= max_len assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s sub_seqs.append(UpperCamelCase__) new_tok_ids.extend(UpperCamelCase__) new_lengths.extend([len(UpperCamelCase__) for l in sub_seqs]) snake_case__ = np.array(UpperCamelCase__) snake_case__ = np.array(UpperCamelCase__) def __magic_name__ ( self : Any): '''simple docstring''' snake_case__ = len(self) snake_case__ = self.lengths > 1_1 snake_case__ = self.token_ids[indices] snake_case__ = self.lengths[indices] snake_case__ = len(self) logger.info(F'''Remove {init_size - new_size} too short (<=11 tokens) sequences.''') def __magic_name__ ( self : List[str]): '''simple docstring''' if "unk_token" not in self.params.special_tok_ids: return else: snake_case__ = self.params.special_tok_ids["""unk_token"""] snake_case__ = len(self) snake_case__ = np.array([np.count_nonzero(a == unk_token_id) for a in self.token_ids]) snake_case__ = (unk_occs / self.lengths) < 0.5 snake_case__ = self.token_ids[indices] snake_case__ = self.lengths[indices] snake_case__ = len(self) logger.info(F'''Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).''') def __magic_name__ ( self : Optional[Any]): '''simple docstring''' if not self.params.is_master: return logger.info(F'''{len(self)} sequences''') # data_len = sum(self.lengths) # nb_unique_tokens = len(Counter(list(chain(*self.token_ids)))) # logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)') # unk_idx = self.params.special_tok_ids['unk_token'] # nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids]) # logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)') def __magic_name__ ( self : int , UpperCamelCase__ : Optional[int]): '''simple docstring''' snake_case__ = [t[0] for t in batch] snake_case__ = [t[1] for t in batch] assert len(UpperCamelCase__) == len(UpperCamelCase__) # Max for paddings snake_case__ = max(UpperCamelCase__) # Pad token ids if self.params.mlm: snake_case__ = self.params.special_tok_ids["""pad_token"""] else: snake_case__ = self.params.special_tok_ids["""unk_token"""] snake_case__ = [list(t.astype(UpperCamelCase__)) + [pad_idx] * (max_seq_len_ - len(UpperCamelCase__)) for t in token_ids] assert len(tk_) == len(UpperCamelCase__) assert all(len(UpperCamelCase__) == max_seq_len_ for t in tk_) snake_case__ = torch.tensor(tk_) # (bs, max_seq_len_) snake_case__ = torch.tensor(UpperCamelCase__) # (bs) return tk_t, lg_t
654
0
'''simple docstring''' def _UpperCamelCase ( __UpperCamelCase = 1_00 ) -> Optional[int]: lowerCamelCase_ = 0 lowerCamelCase_ = 0 for i in range(1 ,n + 1 ): sum_of_squares += i**2 sum_of_ints += i return sum_of_ints**2 - sum_of_squares if __name__ == "__main__": print(f'''{solution() = }''')
42
import argparse import torch from transformers import YosoConfig, YosoForMaskedLM def _UpperCAmelCase ( a : str ): if "model" in orig_key: snake_case__ = orig_key.replace("""model.""" , """""" ) if "norm1" in orig_key: snake_case__ = orig_key.replace("""norm1""" , """attention.output.LayerNorm""" ) if "norm2" in orig_key: snake_case__ = orig_key.replace("""norm2""" , """output.LayerNorm""" ) if "norm" in orig_key: snake_case__ = orig_key.replace("""norm""" , """LayerNorm""" ) if "transformer" in orig_key: snake_case__ = orig_key.split(""".""" )[0].split("""_""" )[-1] snake_case__ = orig_key.replace(F'''transformer_{layer_num}''' , F'''encoder.layer.{layer_num}''' ) if "mha.attn" in orig_key: snake_case__ = orig_key.replace("""mha.attn""" , """attention.self""" ) if "mha" in orig_key: snake_case__ = orig_key.replace("""mha""" , """attention""" ) if "W_q" in orig_key: snake_case__ = orig_key.replace("""W_q""" , """self.query""" ) if "W_k" in orig_key: snake_case__ = orig_key.replace("""W_k""" , """self.key""" ) if "W_v" in orig_key: snake_case__ = orig_key.replace("""W_v""" , """self.value""" ) if "ff1" in orig_key: snake_case__ = orig_key.replace("""ff1""" , """intermediate.dense""" ) if "ff2" in orig_key: snake_case__ = orig_key.replace("""ff2""" , """output.dense""" ) if "ff" in orig_key: snake_case__ = orig_key.replace("""ff""" , """output.dense""" ) if "mlm_class" in orig_key: snake_case__ = orig_key.replace("""mlm.mlm_class""" , """cls.predictions.decoder""" ) if "mlm" in orig_key: snake_case__ = orig_key.replace("""mlm""" , """cls.predictions.transform""" ) if "cls" not in orig_key: snake_case__ = """yoso.""" + orig_key return orig_key def _UpperCAmelCase ( a : Tuple , a : Dict ): for key in orig_state_dict.copy().keys(): snake_case__ = orig_state_dict.pop(a ) if ("pooler" in key) or ("sen_class" in key): continue else: snake_case__ = val snake_case__ = orig_state_dict["""cls.predictions.decoder.bias"""] snake_case__ = torch.arange(a ).expand((1, -1) ) + 2 return orig_state_dict def _UpperCAmelCase ( a : int , a : List[Any] , a : List[Any] ): snake_case__ = torch.load(a , map_location="""cpu""" )["""model_state_dict"""] snake_case__ = YosoConfig.from_json_file(a ) snake_case__ = YosoForMaskedLM(a ) snake_case__ = convert_checkpoint_helper(config.max_position_embeddings , a ) print(model.load_state_dict(a ) ) model.eval() model.save_pretrained(a ) print(F'''Checkpoint successfuly converted. Model saved at {pytorch_dump_path}''' ) if __name__ == "__main__": a__ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--pytorch_model_path""", default=None, type=str, required=True, help="""Path to YOSO pytorch checkpoint.""" ) parser.add_argument( """--config_file""", default=None, type=str, required=True, help="""The json file for YOSO model config.""", ) parser.add_argument( """--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) a__ = parser.parse_args() convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
654
0
from transformers import BertTokenizerFast from .custom_tokenization import CustomTokenizer class lowerCamelCase__ ( lowercase_): """simple docstring""" _A = CustomTokenizer pass
623
import os from typing import Optional import fsspec from fsspec.archive import AbstractArchiveFileSystem from fsspec.utils import DEFAULT_BLOCK_SIZE class _lowerCAmelCase ( lowercase_ ): """simple docstring""" _lowercase : Optional[int] = '''''' _lowercase : str = ( None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz ) _lowercase : str = None # compression type in fsspec. ex: "gzip" _lowercase : str = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz def __init__( self : List[Any] , UpperCamelCase__ : str = "" , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : Optional[dict] = None , **UpperCamelCase__ : List[Any]): '''simple docstring''' super().__init__(self , **UpperCamelCase__) # always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode snake_case__ = fsspec.open( UpperCamelCase__ , mode="""rb""" , protocol=UpperCamelCase__ , compression=self.compression , client_kwargs={ """requote_redirect_url""": False, # see https://github.com/huggingface/datasets/pull/5459 """trust_env""": True, # Enable reading proxy env variables. **(target_options or {}).pop("""client_kwargs""" , {}), # To avoid issues if it was already passed. } , **(target_options or {}) , ) snake_case__ = os.path.basename(self.file.path.split("""::""")[0]) snake_case__ = ( self.compressed_name[: self.compressed_name.rindex(""".""")] if """.""" in self.compressed_name else self.compressed_name ) snake_case__ = None @classmethod def __magic_name__ ( cls : Union[str, Any] , UpperCamelCase__ : List[Any]): '''simple docstring''' return super()._strip_protocol(UpperCamelCase__).lstrip("""/""") def __magic_name__ ( self : Dict): '''simple docstring''' if self.dir_cache is None: snake_case__ = {**self.file.fs.info(self.file.path), """name""": self.uncompressed_name} snake_case__ = {f["""name"""]: f} def __magic_name__ ( self : Optional[int] , UpperCamelCase__ : str): '''simple docstring''' return self.file.open().read() def __magic_name__ ( self : int , UpperCamelCase__ : str , UpperCamelCase__ : str = "rb" , UpperCamelCase__ : Any=None , UpperCamelCase__ : Union[str, Any]=True , UpperCamelCase__ : Optional[int]=None , **UpperCamelCase__ : Optional[Any] , ): '''simple docstring''' snake_case__ = self._strip_protocol(UpperCamelCase__) if mode != "rb": raise ValueError(F'''Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'''') return self.file.open() class _lowerCAmelCase ( lowercase_ ): """simple docstring""" _lowercase : Dict = '''bz2''' _lowercase : Dict = '''bz2''' _lowercase : Optional[int] = '''.bz2''' class _lowerCAmelCase ( lowercase_ ): """simple docstring""" _lowercase : Dict = '''gzip''' _lowercase : List[str] = '''gzip''' _lowercase : Any = '''.gz''' class _lowerCAmelCase ( lowercase_ ): """simple docstring""" _lowercase : str = '''lz4''' _lowercase : List[Any] = '''lz4''' _lowercase : Dict = '''.lz4''' class _lowerCAmelCase ( lowercase_ ): """simple docstring""" _lowercase : Optional[int] = '''xz''' _lowercase : Union[str, Any] = '''xz''' _lowercase : Optional[int] = '''.xz''' class _lowerCAmelCase ( lowercase_ ): """simple docstring""" _lowercase : Optional[int] = '''zstd''' _lowercase : Tuple = '''zstd''' _lowercase : Union[str, Any] = '''.zst''' def __init__( self : str , UpperCamelCase__ : str , UpperCamelCase__ : str = "rb" , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : Optional[dict] = None , UpperCamelCase__ : int = DEFAULT_BLOCK_SIZE , **UpperCamelCase__ : int , ): '''simple docstring''' super().__init__( fo=UpperCamelCase__ , mode=UpperCamelCase__ , target_protocol=UpperCamelCase__ , target_options=UpperCamelCase__ , block_size=UpperCamelCase__ , **UpperCamelCase__ , ) # We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2: # # File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open # out.close = close # AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only # # see https://github.com/intake/filesystem_spec/issues/725 snake_case__ = self.file.__enter__ class _lowerCAmelCase : """simple docstring""" def __init__( self : Tuple , UpperCamelCase__ : str): '''simple docstring''' snake_case__ = file_ def __enter__( self : List[str]): '''simple docstring''' self._file.__enter__() return self def __exit__( self : Dict , *UpperCamelCase__ : str , **UpperCamelCase__ : Optional[Any]): '''simple docstring''' self._file.__exit__(*UpperCamelCase__ , **UpperCamelCase__) def __iter__( self : Any): '''simple docstring''' return iter(self._file) def __magic_name__ ( self : List[str]): '''simple docstring''' return next(self._file) def __getattr__( self : Any , UpperCamelCase__ : int): '''simple docstring''' return getattr(self._file , UpperCamelCase__) def fixed_enter(*UpperCamelCase__ : int , **UpperCamelCase__ : int): return WrappedFile(_enter(*UpperCamelCase__ , **UpperCamelCase__)) snake_case__ = fixed_enter
654
0
'''simple docstring''' from typing import List, Optional, Tuple from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_herbert import HerbertTokenizer UpperCamelCase_ = logging.get_logger(__name__) UpperCamelCase_ = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""} UpperCamelCase_ = { """vocab_file""": { """allegro/herbert-base-cased""": """https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json""" }, """merges_file""": { """allegro/herbert-base-cased""": """https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt""" }, } UpperCamelCase_ = {"""allegro/herbert-base-cased""": 514} UpperCamelCase_ = {} class __SCREAMING_SNAKE_CASE ( lowercase_ ): lowerCamelCase_ = VOCAB_FILES_NAMES lowerCamelCase_ = PRETRAINED_VOCAB_FILES_MAP lowerCamelCase_ = PRETRAINED_INIT_CONFIGURATION lowerCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase_ = HerbertTokenizer def __init__( self : Any , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : Any=None , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : Optional[int]="<s>" , UpperCAmelCase__ : List[str]="<unk>" , UpperCAmelCase__ : Union[str, Any]="<pad>" , UpperCAmelCase__ : str="<mask>" , UpperCAmelCase__ : Any="</s>" , **UpperCAmelCase__ : Optional[Any] , ): '''simple docstring''' super().__init__( UpperCamelCase__ , UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , cls_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , **UpperCamelCase__ , ) def lowerCamelCase_ ( self : List[str] , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ): '''simple docstring''' lowercase : Union[str, Any] =[self.cls_token_id] lowercase : Optional[int] =[self.sep_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None , UpperCAmelCase__ : bool = False ): '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=UpperCamelCase__ , token_ids_a=UpperCamelCase__ , already_has_special_tokens=UpperCamelCase__ ) if token_ids_a is None: return [1] + ([0] * len(UpperCamelCase__ )) + [1] return [1] + ([0] * len(UpperCamelCase__ )) + [1] + ([0] * len(UpperCamelCase__ )) + [1] def lowerCamelCase_ ( self : str , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ): '''simple docstring''' lowercase : Tuple =[self.sep_token_id] lowercase : str =[self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def lowerCamelCase_ ( self : Any , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None ): '''simple docstring''' lowercase : List[Any] =self._tokenizer.model.save(UpperCamelCase__ , name=UpperCamelCase__ ) return tuple(UpperCamelCase__ )
92
def _UpperCAmelCase ( a : int ): if number < 0: raise ValueError("""number must not be negative""" ) return number & (number - 1) == 0 if __name__ == "__main__": import doctest doctest.testmod()
654
0
'''simple docstring''' # Lint as: python3 import dataclasses import re from dataclasses import dataclass from functools import total_ordering from typing import Optional, Union __lowerCamelCase : Any = re.compile(R'^(?P<major>\d+)' R'\.(?P<minor>\d+)' R'\.(?P<patch>\d+)$') @total_ordering @dataclass class UpperCAmelCase : """simple docstring""" lowerCAmelCase_ = 42 lowerCAmelCase_ = None lowerCAmelCase_ = None lowerCAmelCase_ = None lowerCAmelCase_ = None def UpperCamelCase__ ( self : List[Any] ) -> List[str]: _UpperCamelCase , _UpperCamelCase , _UpperCamelCase =_str_to_version_tuple(self.version_str ) def __repr__( self : List[str] ) -> List[str]: return F'''{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}''' @property def UpperCamelCase__ ( self : Optional[Any] ) -> Any: return self.major, self.minor, self.patch def UpperCamelCase__ ( self : Any , UpperCamelCase__ : Optional[int] ) -> Any: if isinstance(UpperCamelCase__ , UpperCamelCase__ ): return Version(UpperCamelCase__ ) elif isinstance(UpperCamelCase__ , UpperCamelCase__ ): return other raise TypeError(F'''{other} (type {type(UpperCamelCase__ )}) cannot be compared to version.''' ) def __eq__( self : List[Any] , UpperCamelCase__ : Optional[int] ) -> int: try: _UpperCamelCase =self._validate_operand(UpperCamelCase__ ) except (TypeError, ValueError): return False else: return self.tuple == other.tuple def __lt__( self : List[Any] , UpperCamelCase__ : Dict ) -> Any: _UpperCamelCase =self._validate_operand(UpperCamelCase__ ) return self.tuple < other.tuple def __hash__( self : Any ) -> Union[str, Any]: return hash(_version_tuple_to_str(self.tuple ) ) @classmethod def UpperCamelCase__ ( cls : Any , UpperCamelCase__ : int ) -> Any: _UpperCamelCase ={f.name for f in dataclasses.fields(cls )} return cls(**{k: v for k, v in dic.items() if k in field_names} ) def UpperCamelCase__ ( self : List[str] ) -> str: return self.version_str def _a (__SCREAMING_SNAKE_CASE ): """simple docstring""" _UpperCamelCase =_VERSION_REG.match(__SCREAMING_SNAKE_CASE ) if not res: raise ValueError(f'''Invalid version \'{version_str}\'. Format should be x.y.z with {{x,y,z}} being digits.''' ) return tuple(int(__SCREAMING_SNAKE_CASE ) for v in [res.group('''major''' ), res.group('''minor''' ), res.group('''patch''' )] ) def _a (__SCREAMING_SNAKE_CASE ): """simple docstring""" return ".".join(str(__SCREAMING_SNAKE_CASE ) for v in version_tuple )
404
class _lowerCAmelCase : """simple docstring""" def __init__( self : List[Any] , UpperCamelCase__ : int): '''simple docstring''' snake_case__ = size snake_case__ = [0] * size snake_case__ = [0] * size @staticmethod def __magic_name__ ( UpperCamelCase__ : int): '''simple docstring''' return index | (index + 1) @staticmethod def __magic_name__ ( UpperCamelCase__ : int): '''simple docstring''' return (index & (index + 1)) - 1 def __magic_name__ ( self : Any , UpperCamelCase__ : int , UpperCamelCase__ : int): '''simple docstring''' snake_case__ = value while index < self.size: snake_case__ = self.get_prev(UpperCamelCase__) + 1 if current_left_border == index: snake_case__ = value else: snake_case__ = max(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__) snake_case__ = self.get_next(UpperCamelCase__) def __magic_name__ ( self : int , UpperCamelCase__ : int , UpperCamelCase__ : int): '''simple docstring''' right -= 1 # Because of right is exclusive snake_case__ = 0 while left <= right: snake_case__ = self.get_prev(UpperCamelCase__) if left <= current_left: snake_case__ = max(UpperCamelCase__ , self.tree[right]) snake_case__ = current_left else: snake_case__ = max(UpperCamelCase__ , self.arr[right]) right -= 1 return result if __name__ == "__main__": import doctest doctest.testmod()
654
0
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __snake_case : List[str] =logging.get_logger(__name__) __snake_case : Any ={ 'microsoft/beit-base-patch16-224-pt22k': ( 'https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json' ), # See all BEiT models at https://huggingface.co/models?filter=beit } class lowerCamelCase__ ( lowercase_): '''simple docstring''' snake_case_ ='''beit''' def __init__(self ,__lowerCamelCase=81_92 ,__lowerCamelCase=7_68 ,__lowerCamelCase=12 ,__lowerCamelCase=12 ,__lowerCamelCase=30_72 ,__lowerCamelCase="gelu" ,__lowerCamelCase=0.0 ,__lowerCamelCase=0.0 ,__lowerCamelCase=0.02 ,__lowerCamelCase=1e-12 ,__lowerCamelCase=2_24 ,__lowerCamelCase=16 ,__lowerCamelCase=3 ,__lowerCamelCase=False ,__lowerCamelCase=False ,__lowerCamelCase=False ,__lowerCamelCase=False ,__lowerCamelCase=0.1 ,__lowerCamelCase=0.1 ,__lowerCamelCase=True ,__lowerCamelCase=[3, 5, 7, 11] ,__lowerCamelCase=[1, 2, 3, 6] ,__lowerCamelCase=True ,__lowerCamelCase=0.4 ,__lowerCamelCase=2_56 ,__lowerCamelCase=1 ,__lowerCamelCase=False ,__lowerCamelCase=2_55 ,**__lowerCamelCase ,) -> List[str]: """simple docstring""" super().__init__(**UpperCamelCase__ ) lowerCAmelCase__ : List[Any] = vocab_size lowerCAmelCase__ : Tuple = hidden_size lowerCAmelCase__ : Tuple = num_hidden_layers lowerCAmelCase__ : str = num_attention_heads lowerCAmelCase__ : Any = intermediate_size lowerCAmelCase__ : Tuple = hidden_act lowerCAmelCase__ : int = hidden_dropout_prob lowerCAmelCase__ : str = attention_probs_dropout_prob lowerCAmelCase__ : Optional[Any] = initializer_range lowerCAmelCase__ : Any = layer_norm_eps lowerCAmelCase__ : List[Any] = image_size lowerCAmelCase__ : Any = patch_size lowerCAmelCase__ : List[Any] = num_channels lowerCAmelCase__ : Tuple = use_mask_token lowerCAmelCase__ : int = use_absolute_position_embeddings lowerCAmelCase__ : Tuple = use_relative_position_bias lowerCAmelCase__ : List[str] = use_shared_relative_position_bias lowerCAmelCase__ : Dict = layer_scale_init_value lowerCAmelCase__ : Optional[Any] = drop_path_rate lowerCAmelCase__ : int = use_mean_pooling # decode head attributes (semantic segmentation) lowerCAmelCase__ : str = out_indices lowerCAmelCase__ : Tuple = pool_scales # auxiliary head attributes (semantic segmentation) lowerCAmelCase__ : Any = use_auxiliary_head lowerCAmelCase__ : int = auxiliary_loss_weight lowerCAmelCase__ : List[Any] = auxiliary_channels lowerCAmelCase__ : List[str] = auxiliary_num_convs lowerCAmelCase__ : str = auxiliary_concat_input lowerCAmelCase__ : Union[str, Any] = semantic_loss_ignore_index class lowerCamelCase__ ( lowercase_): '''simple docstring''' snake_case_ =version.parse("""1.11""") @property def lowerCAmelCase__ (self ) -> int: """simple docstring""" return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ] ) @property def lowerCAmelCase__ (self ) -> Union[str, Any]: """simple docstring""" return 1e-4
647
from __future__ import annotations import unittest from transformers import AutoTokenizer, PegasusConfig, is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel @require_tf class _lowerCAmelCase : """simple docstring""" _lowercase : List[str] = PegasusConfig _lowercase : Union[str, Any] = {} _lowercase : Tuple = '''gelu''' def __init__( self : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[int]=1_3 , UpperCamelCase__ : Any=7 , UpperCamelCase__ : Optional[Any]=True , UpperCamelCase__ : Optional[int]=False , UpperCamelCase__ : int=9_9 , UpperCamelCase__ : Dict=3_2 , UpperCamelCase__ : str=2 , UpperCamelCase__ : int=4 , UpperCamelCase__ : Tuple=3_7 , UpperCamelCase__ : Union[str, Any]=0.1 , UpperCamelCase__ : Any=0.1 , UpperCamelCase__ : str=4_0 , UpperCamelCase__ : Optional[int]=2 , UpperCamelCase__ : Optional[Any]=1 , UpperCamelCase__ : Dict=0 , ): '''simple docstring''' snake_case__ = parent snake_case__ = batch_size snake_case__ = seq_length snake_case__ = is_training snake_case__ = use_labels snake_case__ = vocab_size snake_case__ = hidden_size snake_case__ = num_hidden_layers snake_case__ = num_attention_heads snake_case__ = intermediate_size snake_case__ = hidden_dropout_prob snake_case__ = attention_probs_dropout_prob snake_case__ = max_position_embeddings snake_case__ = eos_token_id snake_case__ = pad_token_id snake_case__ = bos_token_id def __magic_name__ ( self : Optional[Any]): '''simple docstring''' snake_case__ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size) snake_case__ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size) , 1) snake_case__ = tf.concat([input_ids, eos_tensor] , axis=1) snake_case__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) snake_case__ = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) snake_case__ = prepare_pegasus_inputs_dict(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__) return config, inputs_dict def __magic_name__ ( self : Any , UpperCamelCase__ : Tuple , UpperCamelCase__ : Union[str, Any]): '''simple docstring''' snake_case__ = TFPegasusModel(config=UpperCamelCase__).get_decoder() snake_case__ = inputs_dict["""input_ids"""] snake_case__ = input_ids[:1, :] snake_case__ = inputs_dict["""attention_mask"""][:1, :] snake_case__ = inputs_dict["""head_mask"""] snake_case__ = 1 # first forward pass snake_case__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , head_mask=UpperCamelCase__ , use_cache=UpperCamelCase__) snake_case__ , snake_case__ = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids snake_case__ = ids_tensor((self.batch_size, 3) , config.vocab_size) snake_case__ = tf.cast(ids_tensor((self.batch_size, 3) , 2) , tf.inta) # append to next input_ids and snake_case__ = tf.concat([input_ids, next_tokens] , axis=-1) snake_case__ = tf.concat([attention_mask, next_attn_mask] , axis=-1) snake_case__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__)[0] snake_case__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , past_key_values=UpperCamelCase__)[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1]) # select random slice snake_case__ = int(ids_tensor((1,) , output_from_past.shape[-1])) snake_case__ = output_from_no_past[:, -3:, random_slice_idx] snake_case__ = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(UpperCamelCase__ , UpperCamelCase__ , rtol=1E-3) def _UpperCAmelCase ( a : str , a : Union[str, Any] , a : List[str] , a : str=None , a : int=None , a : int=None , a : int=None , a : Optional[int]=None , ): if attention_mask is None: snake_case__ = tf.cast(tf.math.not_equal(a , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: snake_case__ = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: snake_case__ = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: snake_case__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: snake_case__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class _lowerCAmelCase ( lowercase_ , lowercase_ , unittest.TestCase ): """simple docstring""" _lowercase : int = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else () _lowercase : List[Any] = (TFPegasusForConditionalGeneration,) if is_tf_available() else () _lowercase : List[Any] = ( { '''conversational''': TFPegasusForConditionalGeneration, '''feature-extraction''': TFPegasusModel, '''summarization''': TFPegasusForConditionalGeneration, '''text2text-generation''': TFPegasusForConditionalGeneration, '''translation''': TFPegasusForConditionalGeneration, } if is_tf_available() else {} ) _lowercase : Optional[int] = True _lowercase : Dict = False _lowercase : Any = False def __magic_name__ ( self : str): '''simple docstring''' snake_case__ = TFPegasusModelTester(self) snake_case__ = ConfigTester(self , config_class=UpperCamelCase__) def __magic_name__ ( self : List[Any]): '''simple docstring''' self.config_tester.run_common_tests() def __magic_name__ ( self : Optional[int]): '''simple docstring''' snake_case__ = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*UpperCamelCase__) @require_sentencepiece @require_tokenizers @require_tf class _lowerCAmelCase ( unittest.TestCase ): """simple docstring""" _lowercase : List[str] = [ ''' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.''', ''' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ''', ] _lowercase : str = [ '''California\'s largest electricity provider has cut power to hundreds of thousands of customers in an effort to''' ''' reduce the risk of wildfires.''', '''N-Dubz have revealed they\'re "grateful" to have been nominated for four Mobo Awards.''', ] # differs slightly from pytorch, likely due to numerical differences in linear layers _lowercase : int = '''google/pegasus-xsum''' @cached_property def __magic_name__ ( self : Dict): '''simple docstring''' return AutoTokenizer.from_pretrained(self.model_name) @cached_property def __magic_name__ ( self : int): '''simple docstring''' snake_case__ = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name) return model def __magic_name__ ( self : Dict , **UpperCamelCase__ : List[Any]): '''simple docstring''' snake_case__ = self.translate_src_text(**UpperCamelCase__) assert self.expected_text == generated_words def __magic_name__ ( self : str , **UpperCamelCase__ : List[Any]): '''simple docstring''' snake_case__ = self.tokenizer(self.src_text , **UpperCamelCase__ , padding=UpperCamelCase__ , return_tensors="""tf""") snake_case__ = self.model.generate( model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=UpperCamelCase__ , ) snake_case__ = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=UpperCamelCase__) return generated_words @slow def __magic_name__ ( self : List[str]): '''simple docstring''' self._assert_generated_batch_equal_expected()
654
0
SCREAMING_SNAKE_CASE__ : int = frozenset( [ "prompt", "height", "width", "guidance_scale", "negative_prompt", "prompt_embeds", "negative_prompt_embeds", "cross_attention_kwargs", ] ) SCREAMING_SNAKE_CASE__ : List[Any] = frozenset(["prompt", "negative_prompt"]) SCREAMING_SNAKE_CASE__ : Dict = frozenset([]) SCREAMING_SNAKE_CASE__ : Any = frozenset(["image"]) SCREAMING_SNAKE_CASE__ : List[Any] = frozenset( [ "image", "height", "width", "guidance_scale", ] ) SCREAMING_SNAKE_CASE__ : Optional[Any] = frozenset(["image"]) SCREAMING_SNAKE_CASE__ : Union[str, Any] = frozenset( [ "prompt", "image", "height", "width", "guidance_scale", "negative_prompt", "prompt_embeds", "negative_prompt_embeds", ] ) SCREAMING_SNAKE_CASE__ : Optional[Any] = frozenset(["prompt", "image", "negative_prompt"]) SCREAMING_SNAKE_CASE__ : Any = frozenset( [ # Text guided image variation with an image mask "prompt", "image", "mask_image", "height", "width", "guidance_scale", "negative_prompt", "prompt_embeds", "negative_prompt_embeds", ] ) SCREAMING_SNAKE_CASE__ : Optional[int] = frozenset(["prompt", "image", "mask_image", "negative_prompt"]) SCREAMING_SNAKE_CASE__ : str = frozenset( [ # image variation with an image mask "image", "mask_image", "height", "width", "guidance_scale", ] ) SCREAMING_SNAKE_CASE__ : Optional[Any] = frozenset(["image", "mask_image"]) SCREAMING_SNAKE_CASE__ : Any = frozenset( [ "example_image", "image", "mask_image", "height", "width", "guidance_scale", ] ) SCREAMING_SNAKE_CASE__ : List[str] = frozenset(["example_image", "image", "mask_image"]) SCREAMING_SNAKE_CASE__ : List[str] = frozenset(["class_labels"]) SCREAMING_SNAKE_CASE__ : Dict = frozenset(["class_labels"]) SCREAMING_SNAKE_CASE__ : Tuple = frozenset(["batch_size"]) SCREAMING_SNAKE_CASE__ : Any = frozenset([]) SCREAMING_SNAKE_CASE__ : int = frozenset(["batch_size"]) SCREAMING_SNAKE_CASE__ : Optional[int] = frozenset([]) SCREAMING_SNAKE_CASE__ : Optional[int] = frozenset( [ "prompt", "audio_length_in_s", "guidance_scale", "negative_prompt", "prompt_embeds", "negative_prompt_embeds", "cross_attention_kwargs", ] ) SCREAMING_SNAKE_CASE__ : Tuple = frozenset(["prompt", "negative_prompt"]) SCREAMING_SNAKE_CASE__ : int = frozenset(["input_tokens"]) SCREAMING_SNAKE_CASE__ : Optional[Any] = frozenset(["input_tokens"])
205
import json import os import re import unicodedata from json.encoder import INFINITY from typing import Any, Dict, List, Optional, Tuple, Union import numpy as np import regex from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...tokenization_utils_base import BatchEncoding from ...utils import TensorType, is_flax_available, is_tf_available, is_torch_available, logging from ...utils.generic import _is_jax, _is_numpy a__ = logging.get_logger(__name__) a__ = { """artists_file""": """artists.json""", """lyrics_file""": """lyrics.json""", """genres_file""": """genres.json""", } a__ = { """artists_file""": { """jukebox""": """https://huggingface.co/ArthurZ/jukebox/blob/main/artists.json""", }, """genres_file""": { """jukebox""": """https://huggingface.co/ArthurZ/jukebox/blob/main/genres.json""", }, """lyrics_file""": { """jukebox""": """https://huggingface.co/ArthurZ/jukebox/blob/main/lyrics.json""", }, } a__ = { """jukebox""": 5_1_2, } class _lowerCAmelCase ( lowercase_ ): """simple docstring""" _lowercase : str = VOCAB_FILES_NAMES _lowercase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP _lowercase : str = PRETRAINED_LYRIC_TOKENS_SIZES _lowercase : Any = ['''input_ids''', '''attention_mask'''] def __init__( self : int , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int=["v3", "v2", "v2"] , UpperCamelCase__ : List[str]=5_1_2 , UpperCamelCase__ : Union[str, Any]=5 , UpperCamelCase__ : List[Any]="<|endoftext|>" , **UpperCamelCase__ : List[Any] , ): '''simple docstring''' snake_case__ = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__) if isinstance(UpperCamelCase__ , UpperCamelCase__) else unk_token super().__init__( unk_token=UpperCamelCase__ , n_genres=UpperCamelCase__ , version=UpperCamelCase__ , max_n_lyric_tokens=UpperCamelCase__ , **UpperCamelCase__ , ) snake_case__ = version snake_case__ = max_n_lyric_tokens snake_case__ = n_genres with open(UpperCamelCase__ , encoding="""utf-8""") as vocab_handle: snake_case__ = json.load(UpperCamelCase__) with open(UpperCamelCase__ , encoding="""utf-8""") as vocab_handle: snake_case__ = json.load(UpperCamelCase__) with open(UpperCamelCase__ , encoding="""utf-8""") as vocab_handle: snake_case__ = json.load(UpperCamelCase__) snake_case__ = R"""[^A-Za-z0-9.,:;!?\-'\"()\[\] \t\n]+""" # In v2, we had a n_vocab=80 and in v3 we missed + and so n_vocab=79 of characters. if len(self.lyrics_encoder) == 7_9: snake_case__ = oov.replace(R"""\-'""" , R"""\-+'""") snake_case__ = regex.compile(UpperCamelCase__) snake_case__ = {v: k for k, v in self.artists_encoder.items()} snake_case__ = {v: k for k, v in self.genres_encoder.items()} snake_case__ = {v: k for k, v in self.lyrics_encoder.items()} @property def __magic_name__ ( self : List[str]): '''simple docstring''' return len(self.artists_encoder) + len(self.genres_encoder) + len(self.lyrics_encoder) def __magic_name__ ( self : Union[str, Any]): '''simple docstring''' return dict(self.artists_encoder , self.genres_encoder , self.lyrics_encoder) def __magic_name__ ( self : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int): '''simple docstring''' snake_case__ = [self.artists_encoder.get(UpperCamelCase__ , 0) for artist in list_artists] for genres in range(len(UpperCamelCase__)): snake_case__ = [self.genres_encoder.get(UpperCamelCase__ , 0) for genre in list_genres[genres]] snake_case__ = list_genres[genres] + [-1] * (self.n_genres - len(list_genres[genres])) snake_case__ = [[self.lyrics_encoder.get(UpperCamelCase__ , 0) for character in list_lyrics[0]], [], []] return artists_id, list_genres, lyric_ids def __magic_name__ ( self : Optional[int] , UpperCamelCase__ : Optional[int]): '''simple docstring''' return list(UpperCamelCase__) def __magic_name__ ( self : int , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Any , **UpperCamelCase__ : List[str]): '''simple docstring''' snake_case__ , snake_case__ , snake_case__ = self.prepare_for_tokenization(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__) snake_case__ = self._tokenize(UpperCamelCase__) return artist, genre, lyrics def __magic_name__ ( self : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : bool = False): '''simple docstring''' for idx in range(len(self.version)): if self.version[idx] == "v3": snake_case__ = artists[idx].lower() snake_case__ = [genres[idx].lower()] else: snake_case__ = self._normalize(artists[idx]) + """.v2""" snake_case__ = [ self._normalize(UpperCamelCase__) + """.v2""" for genre in genres[idx].split("""_""") ] # split is for the full dictionary with combined genres if self.version[0] == "v2": snake_case__ = regex.compile(R"""[^A-Za-z0-9.,:;!?\-'\"()\[\] \t\n]+""") snake_case__ = """ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,:;!?-+'\"()[] \t\n""" snake_case__ = {vocab[index]: index + 1 for index in range(len(UpperCamelCase__))} snake_case__ = 0 snake_case__ = len(UpperCamelCase__) + 1 snake_case__ = self.vocab snake_case__ = {v: k for k, v in self.vocab.items()} snake_case__ = """""" else: snake_case__ = regex.compile(R"""[^A-Za-z0-9.,:;!?\-+'\"()\[\] \t\n]+""") snake_case__ = self._run_strip_accents(UpperCamelCase__) snake_case__ = lyrics.replace("""\\""" , """\n""") snake_case__ = self.out_of_vocab.sub("""""" , UpperCamelCase__), [], [] return artists, genres, lyrics def __magic_name__ ( self : Union[str, Any] , UpperCamelCase__ : str): '''simple docstring''' snake_case__ = unicodedata.normalize("""NFD""" , UpperCamelCase__) snake_case__ = [] for char in text: snake_case__ = unicodedata.category(UpperCamelCase__) if cat == "Mn": continue output.append(UpperCamelCase__) return "".join(UpperCamelCase__) def __magic_name__ ( self : List[str] , UpperCamelCase__ : str): '''simple docstring''' snake_case__ = ( [chr(UpperCamelCase__) for i in range(ord("""a""") , ord("""z""") + 1)] + [chr(UpperCamelCase__) for i in range(ord("""A""") , ord("""Z""") + 1)] + [chr(UpperCamelCase__) for i in range(ord("""0""") , ord("""9""") + 1)] + ["""."""] ) snake_case__ = frozenset(UpperCamelCase__) snake_case__ = re.compile(R"""_+""") snake_case__ = """""".join([c if c in accepted else """_""" for c in text.lower()]) snake_case__ = pattern.sub("""_""" , UpperCamelCase__).strip("""_""") return text def __magic_name__ ( self : List[Any] , UpperCamelCase__ : List[str]): '''simple docstring''' return " ".join(UpperCamelCase__) def __magic_name__ ( self : Any , UpperCamelCase__ : str , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , UpperCamelCase__ : bool = False): '''simple docstring''' if not isinstance(UpperCamelCase__ , UpperCamelCase__): snake_case__ = TensorType(UpperCamelCase__) # Get a function reference for the correct framework if tensor_type == TensorType.TENSORFLOW: if not is_tf_available(): raise ImportError( """Unable to convert output to TensorFlow tensors format, TensorFlow is not installed.""") import tensorflow as tf snake_case__ = tf.constant snake_case__ = tf.is_tensor elif tensor_type == TensorType.PYTORCH: if not is_torch_available(): raise ImportError("""Unable to convert output to PyTorch tensors format, PyTorch is not installed.""") import torch snake_case__ = torch.tensor snake_case__ = torch.is_tensor elif tensor_type == TensorType.JAX: if not is_flax_available(): raise ImportError("""Unable to convert output to JAX tensors format, JAX is not installed.""") import jax.numpy as jnp # noqa: F811 snake_case__ = jnp.array snake_case__ = _is_jax else: snake_case__ = np.asarray snake_case__ = _is_numpy # Do the tensor conversion in batch try: if prepend_batch_axis: snake_case__ = [inputs] if not is_tensor(UpperCamelCase__): snake_case__ = as_tensor(UpperCamelCase__) except: # noqa E722 raise ValueError( """Unable to create tensor, you should probably activate truncation and/or padding """ """with 'padding=True' 'truncation=True' to have batched tensors with the same length.""") return inputs def __call__( self : str , UpperCamelCase__ : Any , UpperCamelCase__ : Dict , UpperCamelCase__ : Any="" , UpperCamelCase__ : Dict="pt"): '''simple docstring''' snake_case__ = [0, 0, 0] snake_case__ = [artist] * len(self.version) snake_case__ = [genres] * len(self.version) snake_case__ , snake_case__ , snake_case__ = self.tokenize(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__) snake_case__ , snake_case__ , snake_case__ = self._convert_token_to_id(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__) snake_case__ = [-INFINITY] * len(full_tokens[-1]) snake_case__ = [ self.convert_to_tensors( [input_ids + [artists_id[i]] + genres_ids[i] + full_tokens[i]] , tensor_type=UpperCamelCase__) for i in range(len(self.version)) ] return BatchEncoding({"""input_ids""": input_ids, """attention_masks""": attention_masks}) def __magic_name__ ( self : Any , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None): '''simple docstring''' if not os.path.isdir(UpperCamelCase__): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''') return snake_case__ = os.path.join( UpperCamelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""artists_file"""]) with open(UpperCamelCase__ , """w""" , encoding="""utf-8""") as f: f.write(json.dumps(self.artists_encoder , ensure_ascii=UpperCamelCase__)) snake_case__ = os.path.join( UpperCamelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""genres_file"""]) with open(UpperCamelCase__ , """w""" , encoding="""utf-8""") as f: f.write(json.dumps(self.genres_encoder , ensure_ascii=UpperCamelCase__)) snake_case__ = os.path.join( UpperCamelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""lyrics_file"""]) with open(UpperCamelCase__ , """w""" , encoding="""utf-8""") as f: f.write(json.dumps(self.lyrics_encoder , ensure_ascii=UpperCamelCase__)) return (artists_file, genres_file, lyrics_file) def __magic_name__ ( self : List[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[str]): '''simple docstring''' snake_case__ = self.artists_decoder.get(UpperCamelCase__) snake_case__ = [self.genres_decoder.get(UpperCamelCase__) for genre in genres_index] snake_case__ = [self.lyrics_decoder.get(UpperCamelCase__) for character in lyric_index] return artist, genres, lyrics
654
0
'''simple docstring''' def _A ( A__ , A__ , A__ , A__ , A__ , A__ ): """simple docstring""" if index == r: for j in range(A__ ): print(data[j] , end=''' ''' ) print(''' ''' ) return # When no more elements are there to put in data[] if i >= n: return # current is included, put next at next location __lowercase = arr[i] combination_util(A__ , A__ , A__ , index + 1 , A__ , i + 1 ) # current is excluded, replace it with # next (Note that i+1 is passed, but # index is not changed) combination_util(A__ , A__ , A__ , A__ , A__ , i + 1 ) # The main function that prints all combinations # of size r in arr[] of size n. This function # mainly uses combinationUtil() def _A ( A__ , A__ , A__ ): """simple docstring""" __lowercase = [0] * r # Print all combination using temporary array 'data[]' combination_util(A__ , A__ , A__ , 0 , A__ , 0 ) if __name__ == "__main__": # Driver code to check the function above lowerCAmelCase__ = [10, 20, 30, 40, 50] print_combination(arr, len(arr), 3) # This code is contributed by Ambuj sahu
41
import sys from typing import Tuple import numpy as np import torch from PIL import Image from torch import nn from transformers.image_utils import PILImageResampling from utils import img_tensorize class _lowerCAmelCase : """simple docstring""" def __init__( self : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : List[str]=sys.maxsize): '''simple docstring''' snake_case__ = """bilinear""" snake_case__ = max_size snake_case__ = short_edge_length def __call__( self : List[str] , UpperCamelCase__ : Tuple): '''simple docstring''' snake_case__ = [] for img in imgs: snake_case__ , snake_case__ = img.shape[:2] # later: provide list and randomly choose index for resize snake_case__ = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1) if size == 0: return img snake_case__ = size * 1.0 / min(UpperCamelCase__ , UpperCamelCase__) if h < w: snake_case__ , snake_case__ = size, scale * w else: snake_case__ , snake_case__ = scale * h, size if max(UpperCamelCase__ , UpperCamelCase__) > self.max_size: snake_case__ = self.max_size * 1.0 / max(UpperCamelCase__ , UpperCamelCase__) snake_case__ = newh * scale snake_case__ = neww * scale snake_case__ = int(neww + 0.5) snake_case__ = int(newh + 0.5) if img.dtype == np.uinta: snake_case__ = Image.fromarray(UpperCamelCase__) snake_case__ = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR) snake_case__ = np.asarray(UpperCamelCase__) else: snake_case__ = img.permute(2 , 0 , 1).unsqueeze(0) # 3, 0, 1) # hw(c) -> nchw snake_case__ = nn.functional.interpolate( UpperCamelCase__ , (newh, neww) , mode=self.interp_method , align_corners=UpperCamelCase__).squeeze(0) img_augs.append(UpperCamelCase__) return img_augs class _lowerCAmelCase : """simple docstring""" def __init__( self : Dict , UpperCamelCase__ : Optional[int]): '''simple docstring''' snake_case__ = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST) snake_case__ = cfg.INPUT.FORMAT snake_case__ = cfg.SIZE_DIVISIBILITY snake_case__ = cfg.PAD_VALUE snake_case__ = cfg.INPUT.MAX_SIZE_TEST snake_case__ = cfg.MODEL.DEVICE snake_case__ = torch.tensor(cfg.MODEL.PIXEL_STD).to(self.device).view(len(cfg.MODEL.PIXEL_STD) , 1 , 1) snake_case__ = torch.tensor(cfg.MODEL.PIXEL_MEAN).to(self.device).view(len(cfg.MODEL.PIXEL_STD) , 1 , 1) snake_case__ = lambda UpperCamelCase__: (x - self.pixel_mean) / self.pixel_std def __magic_name__ ( self : Dict , UpperCamelCase__ : Dict): '''simple docstring''' snake_case__ = tuple(max(UpperCamelCase__) for s in zip(*[img.shape for img in images])) snake_case__ = [im.shape[-2:] for im in images] snake_case__ = [ nn.functional.pad( UpperCamelCase__ , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , ) for size, im in zip(UpperCamelCase__ , UpperCamelCase__) ] return torch.stack(UpperCamelCase__), torch.tensor(UpperCamelCase__) def __call__( self : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : str=False): '''simple docstring''' with torch.no_grad(): if not isinstance(UpperCamelCase__ , UpperCamelCase__): snake_case__ = [images] if single_image: assert len(UpperCamelCase__) == 1 for i in range(len(UpperCamelCase__)): if isinstance(images[i] , torch.Tensor): images.insert(UpperCamelCase__ , images.pop(UpperCamelCase__).to(self.device).float()) elif not isinstance(images[i] , torch.Tensor): images.insert( UpperCamelCase__ , torch.as_tensor(img_tensorize(images.pop(UpperCamelCase__) , input_format=self.input_format)) .to(self.device) .float() , ) # resize smallest edge snake_case__ = torch.tensor([im.shape[:2] for im in images]) snake_case__ = self.aug(UpperCamelCase__) # transpose images and convert to torch tensors # images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images] # now normalize before pad to avoid useless arithmetic snake_case__ = [self.normalizer(UpperCamelCase__) for x in images] # now pad them to do the following operations snake_case__ , snake_case__ = self.pad(UpperCamelCase__) # Normalize if self.size_divisibility > 0: raise NotImplementedError() # pad snake_case__ = torch.true_divide(UpperCamelCase__ , UpperCamelCase__) if single_image: return images[0], sizes[0], scales_yx[0] else: return images, sizes, scales_yx def _UpperCAmelCase ( a : Optional[Any] , a : Any ): boxes[:, 0::2] *= scale_yx[:, 1] boxes[:, 1::2] *= scale_yx[:, 0] return boxes def _UpperCAmelCase ( a : Any , a : Tuple[int, int] ): assert torch.isfinite(a ).all(), "Box tensor contains infinite or NaN!" snake_case__ , snake_case__ = box_size tensor[:, 0].clamp_(min=0 , max=a ) tensor[:, 1].clamp_(min=0 , max=a ) tensor[:, 2].clamp_(min=0 , max=a ) tensor[:, 3].clamp_(min=0 , max=a )
654
0
"""simple docstring""" from dataclasses import dataclass from typing import Dict, Optional, Union import torch import torch.nn.functional as F from torch import nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from .attention import BasicTransformerBlock from .attention_processor import AttentionProcessor, AttnProcessor from .embeddings import TimestepEmbedding, Timesteps from .modeling_utils import ModelMixin @dataclass class a__ ( lowercase_ ): __lowerCAmelCase = 42 class a__ ( lowercase_, lowercase_ ): @register_to_config def __init__( self , _a = 32 , _a = 64 , _a = 20 , _a = 768 , _a=77 , _a=4 , _a = 0.0 , _a = "silu" , _a = None , _a = None , _a = "linear" , _a = "prd" , _a = None , _a = None , _a = None , ): super().__init__() lowercase : Tuple = num_attention_heads lowercase : int = attention_head_dim lowercase : Union[str, Any] = num_attention_heads * attention_head_dim lowercase : Optional[Any] = additional_embeddings lowercase : Any = time_embed_dim or inner_dim lowercase : int = embedding_proj_dim or embedding_dim lowercase : Optional[int] = clip_embed_dim or embedding_dim lowercase : int = Timesteps(UpperCamelCase__ , UpperCamelCase__ , 0 ) lowercase : Dict = TimestepEmbedding(UpperCamelCase__ , UpperCamelCase__ , out_dim=UpperCamelCase__ , act_fn=UpperCamelCase__ ) lowercase : List[Any] = nn.Linear(UpperCamelCase__ , UpperCamelCase__ ) if embedding_proj_norm_type is None: lowercase : List[str] = None elif embedding_proj_norm_type == "layer": lowercase : Dict = nn.LayerNorm(UpperCamelCase__ ) else: raise ValueError(f"""unsupported embedding_proj_norm_type: {embedding_proj_norm_type}""" ) lowercase : Union[str, Any] = nn.Linear(UpperCamelCase__ , UpperCamelCase__ ) if encoder_hid_proj_type is None: lowercase : Optional[Any] = None elif encoder_hid_proj_type == "linear": lowercase : Tuple = nn.Linear(UpperCamelCase__ , UpperCamelCase__ ) else: raise ValueError(f"""unsupported encoder_hid_proj_type: {encoder_hid_proj_type}""" ) lowercase : str = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , UpperCamelCase__ ) ) if added_emb_type == "prd": lowercase : Tuple = nn.Parameter(torch.zeros(1 , 1 , UpperCamelCase__ ) ) elif added_emb_type is None: lowercase : List[Any] = None else: raise ValueError( f"""`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `\'prd\'` or `None`.""" ) lowercase : Union[str, Any] = nn.ModuleList( [ BasicTransformerBlock( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , dropout=UpperCamelCase__ , activation_fn="gelu" , attention_bias=UpperCamelCase__ , ) for d in range(UpperCamelCase__ ) ] ) if norm_in_type == "layer": lowercase : Optional[int] = nn.LayerNorm(UpperCamelCase__ ) elif norm_in_type is None: lowercase : int = None else: raise ValueError(f"""Unsupported norm_in_type: {norm_in_type}.""" ) lowercase : Any = nn.LayerNorm(UpperCamelCase__ ) lowercase : int = nn.Linear(UpperCamelCase__ , UpperCamelCase__ ) lowercase : Any = torch.full( [num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -10_000.0 ) causal_attention_mask.triu_(1 ) lowercase : int = causal_attention_mask[None, ...] self.register_buffer("causal_attention_mask" , UpperCamelCase__ , persistent=UpperCamelCase__ ) lowercase : int = nn.Parameter(torch.zeros(1 , UpperCamelCase__ ) ) lowercase : List[str] = nn.Parameter(torch.zeros(1 , UpperCamelCase__ ) ) @property # Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors def __magic_name__ ( self ): lowercase : List[Any] = {} def fn_recursive_add_processors(_a , _a , _a ): if hasattr(UpperCamelCase__ , "set_processor" ): lowercase : List[Any] = module.processor for sub_name, child in module.named_children(): fn_recursive_add_processors(f"""{name}.{sub_name}""" , UpperCamelCase__ , UpperCamelCase__ ) return processors for name, module in self.named_children(): fn_recursive_add_processors(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) return processors def __magic_name__ ( self , _a ): lowercase : Optional[int] = len(self.attn_processors.keys() ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) and len(UpperCamelCase__ ) != count: raise ValueError( f"""A dict of processors was passed, but the number of processors {len(UpperCamelCase__ )} does not match the""" f""" number of attention layers: {count}. Please make sure to pass {count} processor classes.""" ) def fn_recursive_attn_processor(_a , _a , _a ): if hasattr(UpperCamelCase__ , "set_processor" ): if not isinstance(UpperCamelCase__ , UpperCamelCase__ ): module.set_processor(UpperCamelCase__ ) else: module.set_processor(processor.pop(f"""{name}.processor""" ) ) for sub_name, child in module.named_children(): fn_recursive_attn_processor(f"""{name}.{sub_name}""" , UpperCamelCase__ , UpperCamelCase__ ) for name, module in self.named_children(): fn_recursive_attn_processor(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) def __magic_name__ ( self ): self.set_attn_processor(AttnProcessor() ) def __magic_name__ ( self , _a , _a , _a , _a = None , _a = None , _a = True , ): lowercase : Dict = hidden_states.shape[0] lowercase : Union[str, Any] = timestep if not torch.is_tensor(UpperCamelCase__ ): lowercase : Union[str, Any] = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device ) elif torch.is_tensor(UpperCamelCase__ ) and len(timesteps.shape ) == 0: lowercase : Optional[int] = timesteps[None].to(hidden_states.device ) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML lowercase : Optional[int] = timesteps * torch.ones(UpperCamelCase__ , dtype=timesteps.dtype , device=timesteps.device ) lowercase : Any = self.time_proj(UpperCamelCase__ ) # timesteps does not contain any weights and will always return f32 tensors # but time_embedding might be fp16, so we need to cast here. lowercase : Optional[int] = timesteps_projected.to(dtype=self.dtype ) lowercase : Any = self.time_embedding(UpperCamelCase__ ) if self.embedding_proj_norm is not None: lowercase : Optional[int] = self.embedding_proj_norm(UpperCamelCase__ ) lowercase : Optional[Any] = self.embedding_proj(UpperCamelCase__ ) if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None: lowercase : List[Any] = self.encoder_hidden_states_proj(UpperCamelCase__ ) elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None: raise ValueError("`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set" ) lowercase : Tuple = self.proj_in(UpperCamelCase__ ) lowercase : int = self.positional_embedding.to(hidden_states.dtype ) lowercase : List[str] = [] lowercase : List[Any] = 0 if encoder_hidden_states is not None: additional_embeds.append(UpperCamelCase__ ) additional_embeddings_len += encoder_hidden_states.shape[1] if len(proj_embeddings.shape ) == 2: lowercase : Tuple = proj_embeddings[:, None, :] if len(hidden_states.shape ) == 2: lowercase : int = hidden_states[:, None, :] lowercase : Any = additional_embeds + [ proj_embeddings, time_embeddings[:, None, :], hidden_states, ] if self.prd_embedding is not None: lowercase : Optional[Any] = self.prd_embedding.to(hidden_states.dtype ).expand(UpperCamelCase__ , -1 , -1 ) additional_embeds.append(UpperCamelCase__ ) lowercase : int = torch.cat( UpperCamelCase__ , dim=1 , ) # Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens lowercase : Union[str, Any] = additional_embeddings_len + proj_embeddings.shape[1] + 1 if positional_embeddings.shape[1] < hidden_states.shape[1]: lowercase : Union[str, Any] = F.pad( UpperCamelCase__ , ( 0, 0, additional_embeddings_len, self.prd_embedding.shape[1] if self.prd_embedding is not None else 0, ) , value=0.0 , ) lowercase : List[Any] = hidden_states + positional_embeddings if attention_mask is not None: lowercase : Optional[int] = (1 - attention_mask.to(hidden_states.dtype )) * -10_000.0 lowercase : Any = F.pad(UpperCamelCase__ , (0, self.additional_embeddings) , value=0.0 ) lowercase : List[str] = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype ) lowercase : Dict = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0 ) if self.norm_in is not None: lowercase : List[str] = self.norm_in(UpperCamelCase__ ) for block in self.transformer_blocks: lowercase : Any = block(UpperCamelCase__ , attention_mask=UpperCamelCase__ ) lowercase : str = self.norm_out(UpperCamelCase__ ) if self.prd_embedding is not None: lowercase : Optional[Any] = hidden_states[:, -1] else: lowercase : List[Any] = hidden_states[:, additional_embeddings_len:] lowercase : Dict = self.proj_to_clip_embeddings(UpperCamelCase__ ) if not return_dict: return (predicted_image_embedding,) return PriorTransformerOutput(predicted_image_embedding=UpperCamelCase__ ) def __magic_name__ ( self , _a ): lowercase : Any = (prior_latents * self.clip_std) + self.clip_mean return prior_latents
361
import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging a__ = logging.get_logger(__name__) a__ = { """microsoft/wavlm-base""": """https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json""", # See all WavLM models at https://huggingface.co/models?filter=wavlm } class _lowerCAmelCase ( lowercase_ ): """simple docstring""" _lowercase : Dict = '''wavlm''' def __init__( self : Tuple , UpperCamelCase__ : str=3_2 , UpperCamelCase__ : Any=7_6_8 , UpperCamelCase__ : Any=1_2 , UpperCamelCase__ : Tuple=1_2 , UpperCamelCase__ : str=3_0_7_2 , UpperCamelCase__ : Optional[Any]="gelu" , UpperCamelCase__ : Tuple=0.1 , UpperCamelCase__ : Union[str, Any]=0.1 , UpperCamelCase__ : List[Any]=0.1 , UpperCamelCase__ : str=0.0 , UpperCamelCase__ : Optional[Any]=0.1 , UpperCamelCase__ : List[Any]=0.1 , UpperCamelCase__ : Optional[int]=0.02 , UpperCamelCase__ : Optional[int]=1E-5 , UpperCamelCase__ : Any="group" , UpperCamelCase__ : List[str]="gelu" , UpperCamelCase__ : Any=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , UpperCamelCase__ : List[str]=(5, 2, 2, 2, 2, 2, 2) , UpperCamelCase__ : Dict=(1_0, 3, 3, 3, 3, 2, 2) , UpperCamelCase__ : int=False , UpperCamelCase__ : Optional[Any]=1_2_8 , UpperCamelCase__ : Optional[int]=1_6 , UpperCamelCase__ : Optional[Any]=3_2_0 , UpperCamelCase__ : Any=8_0_0 , UpperCamelCase__ : Any=False , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : Optional[Any]=0.05 , UpperCamelCase__ : Optional[Any]=1_0 , UpperCamelCase__ : Union[str, Any]=2 , UpperCamelCase__ : Optional[Any]=0.0 , UpperCamelCase__ : Tuple=1_0 , UpperCamelCase__ : Optional[int]=3_2_0 , UpperCamelCase__ : Optional[Any]=2 , UpperCamelCase__ : Any=0.1 , UpperCamelCase__ : Tuple=1_0_0 , UpperCamelCase__ : Dict=2_5_6 , UpperCamelCase__ : Optional[int]=2_5_6 , UpperCamelCase__ : List[Any]=0.1 , UpperCamelCase__ : Tuple="mean" , UpperCamelCase__ : List[Any]=False , UpperCamelCase__ : Tuple=False , UpperCamelCase__ : Union[str, Any]=2_5_6 , UpperCamelCase__ : int=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0) , UpperCamelCase__ : Optional[Any]=(5, 3, 3, 1, 1) , UpperCamelCase__ : Any=(1, 2, 3, 1, 1) , UpperCamelCase__ : Dict=5_1_2 , UpperCamelCase__ : str=8_0 , UpperCamelCase__ : Optional[int]=0 , UpperCamelCase__ : Any=1 , UpperCamelCase__ : Tuple=2 , UpperCamelCase__ : str=False , UpperCamelCase__ : List[Any]=3 , UpperCamelCase__ : Optional[Any]=2 , UpperCamelCase__ : List[Any]=3 , UpperCamelCase__ : Optional[int]=None , **UpperCamelCase__ : List[str] , ): '''simple docstring''' super().__init__(**UpperCamelCase__ , pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__) snake_case__ = hidden_size snake_case__ = feat_extract_norm snake_case__ = feat_extract_activation snake_case__ = list(UpperCamelCase__) snake_case__ = list(UpperCamelCase__) snake_case__ = list(UpperCamelCase__) snake_case__ = conv_bias snake_case__ = num_buckets snake_case__ = max_bucket_distance snake_case__ = num_conv_pos_embeddings snake_case__ = num_conv_pos_embedding_groups snake_case__ = len(self.conv_dim) snake_case__ = num_hidden_layers snake_case__ = intermediate_size snake_case__ = hidden_act snake_case__ = num_attention_heads snake_case__ = hidden_dropout snake_case__ = attention_dropout snake_case__ = activation_dropout snake_case__ = feat_proj_dropout snake_case__ = final_dropout snake_case__ = layerdrop snake_case__ = layer_norm_eps snake_case__ = initializer_range snake_case__ = num_ctc_classes snake_case__ = vocab_size snake_case__ = do_stable_layer_norm snake_case__ = use_weighted_layer_sum snake_case__ = classifier_proj_size if ( (len(self.conv_stride) != self.num_feat_extract_layers) or (len(self.conv_kernel) != self.num_feat_extract_layers) or (len(self.conv_dim) != self.num_feat_extract_layers) ): raise ValueError( """Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==""" """ `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =""" F''' {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`,''' F''' `len(config.conv_kernel) = {len(self.conv_kernel)}`.''') # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 snake_case__ = apply_spec_augment snake_case__ = mask_time_prob snake_case__ = mask_time_length snake_case__ = mask_time_min_masks snake_case__ = mask_feature_prob snake_case__ = mask_feature_length # parameters for pretraining with codevector quantized representations snake_case__ = num_codevectors_per_group snake_case__ = num_codevector_groups snake_case__ = contrastive_logits_temperature snake_case__ = num_negatives snake_case__ = codevector_dim snake_case__ = proj_codevector_dim snake_case__ = diversity_loss_weight # ctc loss snake_case__ = ctc_loss_reduction snake_case__ = ctc_zero_infinity # adapter snake_case__ = add_adapter snake_case__ = adapter_kernel_size snake_case__ = adapter_stride snake_case__ = num_adapter_layers snake_case__ = output_hidden_size or hidden_size # SequenceClassification-specific parameter. Feel free to ignore for other classes. snake_case__ = classifier_proj_size # XVector-specific parameters. Feel free to ignore for other classes. snake_case__ = list(UpperCamelCase__) snake_case__ = list(UpperCamelCase__) snake_case__ = list(UpperCamelCase__) snake_case__ = xvector_output_dim @property def __magic_name__ ( self : Optional[int]): '''simple docstring''' return functools.reduce(operator.mul , self.conv_stride , 1)
654
0
'''simple docstring''' def A_( A : str): if n_term == "": return [] UpperCamelCase = [] for temp in range(int(A)): series.append(f'''1/{temp + 1}''' if series else '1') return series if __name__ == "__main__": lowerCAmelCase : List[str] = input('Enter the last number (nth term) of the Harmonic Series') print('Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n') print(harmonic_series(nth_term))
3
from typing import List, Optional, Tuple, Union import torch from ...models import UNetaDModel from ...schedulers import ScoreSdeVeScheduler from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class _lowerCAmelCase ( lowercase_ ): """simple docstring""" _lowercase : UNetaDModel _lowercase : ScoreSdeVeScheduler def __init__( self : Union[str, Any] , UpperCamelCase__ : UNetaDModel , UpperCamelCase__ : ScoreSdeVeScheduler): '''simple docstring''' super().__init__() self.register_modules(unet=UpperCamelCase__ , scheduler=UpperCamelCase__) @torch.no_grad() def __call__( self : Union[str, Any] , UpperCamelCase__ : int = 1 , UpperCamelCase__ : int = 2_0_0_0 , UpperCamelCase__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCamelCase__ : Optional[str] = "pil" , UpperCamelCase__ : bool = True , **UpperCamelCase__ : List[str] , ): '''simple docstring''' snake_case__ = self.unet.config.sample_size snake_case__ = (batch_size, 3, img_size, img_size) snake_case__ = self.unet snake_case__ = randn_tensor(UpperCamelCase__ , generator=UpperCamelCase__) * self.scheduler.init_noise_sigma snake_case__ = sample.to(self.device) self.scheduler.set_timesteps(UpperCamelCase__) self.scheduler.set_sigmas(UpperCamelCase__) for i, t in enumerate(self.progress_bar(self.scheduler.timesteps)): snake_case__ = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device) # correction step for _ in range(self.scheduler.config.correct_steps): snake_case__ = self.unet(UpperCamelCase__ , UpperCamelCase__).sample snake_case__ = self.scheduler.step_correct(UpperCamelCase__ , UpperCamelCase__ , generator=UpperCamelCase__).prev_sample # prediction step snake_case__ = model(UpperCamelCase__ , UpperCamelCase__).sample snake_case__ = self.scheduler.step_pred(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , generator=UpperCamelCase__) snake_case__ , snake_case__ = output.prev_sample, output.prev_sample_mean snake_case__ = sample_mean.clamp(0 , 1) snake_case__ = sample.cpu().permute(0 , 2 , 3 , 1).numpy() if output_type == "pil": snake_case__ = self.numpy_to_pil(UpperCamelCase__) if not return_dict: return (sample,) return ImagePipelineOutput(images=UpperCamelCase__)
654
0
"""simple docstring""" import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from torchvision import transforms from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling def UpperCAmelCase ( snake_case : List[str] ): _lowerCAmelCase:Optional[Any] = [2, 2, 6, 2] if '''tiny''' in model_name else [2, 2, 18, 2] _lowerCAmelCase:List[Any] = True if '''large''' in model_name or '''huge''' in model_name else False _lowerCAmelCase:int = True if '''large''' in model_name or '''huge''' in model_name else False _lowerCAmelCase:Any = True if '''large''' in model_name or '''huge''' in model_name else False if "large" in model_name or "xlarge" in model_name or "huge" in model_name: if "fl3" in model_name: _lowerCAmelCase:List[str] = [3, 3, 3, 3] _lowerCAmelCase:Tuple = [5, 5, 5, 5] elif "fl4" in model_name: _lowerCAmelCase:List[str] = [4, 4, 4, 4] _lowerCAmelCase:Union[str, Any] = [3, 3, 3, 3] if "tiny" in model_name or "small" in model_name or "base" in model_name: _lowerCAmelCase:Any = [3, 3, 3, 3] if "lrf" in model_name: _lowerCAmelCase:List[str] = [3, 3, 3, 3] else: _lowerCAmelCase:Dict = [2, 2, 2, 2] if "tiny" in model_name: _lowerCAmelCase:Union[str, Any] = 96 elif "small" in model_name: _lowerCAmelCase:Union[str, Any] = 96 elif "base" in model_name: _lowerCAmelCase:Union[str, Any] = 128 elif "large" in model_name: _lowerCAmelCase:int = 192 elif "xlarge" in model_name: _lowerCAmelCase:Union[str, Any] = 256 elif "huge" in model_name: _lowerCAmelCase:List[Any] = 352 # set label information _lowerCAmelCase:int = '''huggingface/label-files''' if "large" in model_name or "huge" in model_name: _lowerCAmelCase:int = '''imagenet-22k-id2label.json''' else: _lowerCAmelCase:Union[str, Any] = '''imagenet-1k-id2label.json''' _lowerCAmelCase:Any = json.load(open(hf_hub_download(snake_case , snake_case , repo_type='''dataset''' ) , '''r''' ) ) _lowerCAmelCase:int = {int(snake_case ): v for k, v in idalabel.items()} _lowerCAmelCase:Any = {v: k for k, v in idalabel.items()} _lowerCAmelCase:Any = FocalNetConfig( embed_dim=snake_case , depths=snake_case , focal_levels=snake_case , focal_windows=snake_case , use_conv_embed=snake_case , idalabel=snake_case , labelaid=snake_case , use_post_layernorm=snake_case , use_layerscale=snake_case , ) return config def UpperCAmelCase ( snake_case : Union[str, Any] ): if "patch_embed.proj" in name: _lowerCAmelCase:Optional[int] = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' ) if "patch_embed.norm" in name: _lowerCAmelCase:Tuple = name.replace('''patch_embed.norm''' , '''embeddings.norm''' ) if "layers" in name: _lowerCAmelCase:Union[str, Any] = '''encoder.''' + name if "encoder.layers" in name: _lowerCAmelCase:List[str] = name.replace('''encoder.layers''' , '''encoder.stages''' ) if "downsample.proj" in name: _lowerCAmelCase:Optional[Any] = name.replace('''downsample.proj''' , '''downsample.projection''' ) if "blocks" in name: _lowerCAmelCase:Dict = name.replace('''blocks''' , '''layers''' ) if "modulation.f.weight" in name or "modulation.f.bias" in name: _lowerCAmelCase:int = name.replace('''modulation.f''' , '''modulation.projection_in''' ) if "modulation.h.weight" in name or "modulation.h.bias" in name: _lowerCAmelCase:Union[str, Any] = name.replace('''modulation.h''' , '''modulation.projection_context''' ) if "modulation.proj.weight" in name or "modulation.proj.bias" in name: _lowerCAmelCase:Dict = name.replace('''modulation.proj''' , '''modulation.projection_out''' ) if name == "norm.weight": _lowerCAmelCase:Any = '''layernorm.weight''' if name == "norm.bias": _lowerCAmelCase:List[str] = '''layernorm.bias''' if "head" in name: _lowerCAmelCase:Tuple = name.replace('''head''' , '''classifier''' ) else: _lowerCAmelCase:Union[str, Any] = '''focalnet.''' + name return name def UpperCAmelCase ( snake_case : Optional[Any] , snake_case : str , snake_case : List[str]=False ): # fmt: off _lowerCAmelCase:Union[str, Any] = { '''focalnet-tiny''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth''', '''focalnet-tiny-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth''', '''focalnet-small''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth''', '''focalnet-small-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth''', '''focalnet-base''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth''', '''focalnet-base-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth''', '''focalnet-large-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth''', '''focalnet-large-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth''', '''focalnet-xlarge-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth''', '''focalnet-xlarge-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth''', } # fmt: on _lowerCAmelCase:List[str] = model_name_to_url[model_name] print('''Checkpoint URL: ''' , snake_case ) _lowerCAmelCase:Any = torch.hub.load_state_dict_from_url(snake_case , map_location='''cpu''' )['''model'''] # rename keys for key in state_dict.copy().keys(): _lowerCAmelCase:str = state_dict.pop(snake_case ) _lowerCAmelCase:Dict = val _lowerCAmelCase:List[Any] = get_focalnet_config(snake_case ) _lowerCAmelCase:Any = FocalNetForImageClassification(snake_case ) model.eval() # load state dict model.load_state_dict(snake_case ) # verify conversion _lowerCAmelCase:Optional[Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg''' _lowerCAmelCase:Optional[int] = BitImageProcessor( do_resize=snake_case , size={'''shortest_edge''': 256} , resample=PILImageResampling.BILINEAR , do_center_crop=snake_case , crop_size=224 , do_normalize=snake_case , image_mean=snake_case , image_std=snake_case , ) _lowerCAmelCase:Optional[int] = Image.open(requests.get(snake_case , stream=snake_case ).raw ) _lowerCAmelCase:Tuple = processor(images=snake_case , return_tensors='''pt''' ) _lowerCAmelCase:Any = transforms.Compose( [ transforms.Resize(256 ), transforms.CenterCrop(224 ), transforms.ToTensor(), transforms.Normalize(mean=[0.4_85, 0.4_56, 0.4_06] , std=[0.2_29, 0.2_24, 0.2_25] ), ] ) _lowerCAmelCase:Optional[int] = image_transforms(snake_case ).unsqueeze(0 ) # verify pixel_values assert torch.allclose(inputs.pixel_values , snake_case , atol=1e-4 ) _lowerCAmelCase:List[str] = model(**snake_case ) _lowerCAmelCase:Tuple = outputs.logits.argmax(-1 ).item() print('''Predicted class:''' , model.config.idalabel[predicted_class_idx] ) print('''First values of logits:''' , outputs.logits[0, :3] ) if model_name == "focalnet-tiny": _lowerCAmelCase:Union[str, Any] = torch.tensor([0.21_66, -0.43_68, 0.21_91] ) elif model_name == "focalnet-tiny-lrf": _lowerCAmelCase:str = torch.tensor([1.16_69, 0.01_25, -0.16_95] ) elif model_name == "focalnet-small": _lowerCAmelCase:Tuple = torch.tensor([0.49_17, -0.04_30, 0.13_41] ) elif model_name == "focalnet-small-lrf": _lowerCAmelCase:Tuple = torch.tensor([-0.25_88, -0.53_42, -0.23_31] ) elif model_name == "focalnet-base": _lowerCAmelCase:List[str] = torch.tensor([-0.16_55, -0.40_90, -0.17_30] ) elif model_name == "focalnet-base-lrf": _lowerCAmelCase:List[str] = torch.tensor([0.53_06, -0.04_83, -0.39_28] ) assert torch.allclose(outputs.logits[0, :3] , snake_case , atol=1e-4 ) print('''Looks ok!''' ) if pytorch_dump_folder_path is not None: print(F'Saving model and processor of {model_name} to {pytorch_dump_folder_path}' ) model.save_pretrained(snake_case ) processor.save_pretrained(snake_case ) if push_to_hub: print(F'Pushing model and processor of {model_name} to the hub...' ) model.push_to_hub(F'{model_name}' ) processor.push_to_hub(F'{model_name}' ) if __name__ == "__main__": UpperCamelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--model_name''', default='''focalnet-tiny''', type=str, help='''Name of the FocalNet model you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether to push the model and processor to the hub.''', ) UpperCamelCase__ = parser.parse_args() convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
227
import random import unittest import torch from diffusers import IFInpaintingSuperResolutionPipeline from diffusers.utils import floats_tensor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import skip_mps, torch_device from ..pipeline_params import ( TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin from . import IFPipelineTesterMixin @skip_mps class _lowerCAmelCase ( lowercase_ , lowercase_ , unittest.TestCase ): """simple docstring""" _lowercase : Optional[int] = IFInpaintingSuperResolutionPipeline _lowercase : int = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''width''', '''height'''} _lowercase : Optional[Any] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({'''original_image'''} ) _lowercase : int = PipelineTesterMixin.required_optional_params - {'''latents'''} def __magic_name__ ( self : Union[str, Any]): '''simple docstring''' return self._get_superresolution_dummy_components() def __magic_name__ ( self : Optional[int] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[int]=0): '''simple docstring''' if str(UpperCamelCase__).startswith("""mps"""): snake_case__ = torch.manual_seed(UpperCamelCase__) else: snake_case__ = torch.Generator(device=UpperCamelCase__).manual_seed(UpperCamelCase__) snake_case__ = floats_tensor((1, 3, 1_6, 1_6) , rng=random.Random(UpperCamelCase__)).to(UpperCamelCase__) snake_case__ = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(UpperCamelCase__)).to(UpperCamelCase__) snake_case__ = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(UpperCamelCase__)).to(UpperCamelCase__) snake_case__ = { """prompt""": """A painting of a squirrel eating a burger""", """image""": image, """original_image""": original_image, """mask_image""": mask_image, """generator""": generator, """num_inference_steps""": 2, """output_type""": """numpy""", } return inputs @unittest.skipIf( torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , ) def __magic_name__ ( self : Dict): '''simple docstring''' self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3) def __magic_name__ ( self : int): '''simple docstring''' self._test_save_load_optional_components() @unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""") def __magic_name__ ( self : Optional[Any]): '''simple docstring''' super().test_save_load_floataa(expected_max_diff=1E-1) def __magic_name__ ( self : List[Any]): '''simple docstring''' self._test_attention_slicing_forward_pass(expected_max_diff=1E-2) def __magic_name__ ( self : Union[str, Any]): '''simple docstring''' self._test_save_load_local() def __magic_name__ ( self : str): '''simple docstring''' self._test_inference_batch_single_identical( expected_max_diff=1E-2 , )
654
0
"""simple docstring""" from ...utils import is_torch_available, is_transformers_available if is_transformers_available() and is_torch_available(): from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
359
a__ = [0, 2, 4, 6, 8] a__ = [1, 3, 5, 7, 9] def _UpperCAmelCase ( a : int , a : int , a : list[int] , a : int ): if remaining_length == 0: if digits[0] == 0 or digits[-1] == 0: return 0 for i in range(length // 2 - 1 , -1 , -1 ): remainder += digits[i] + digits[length - i - 1] if remainder % 2 == 0: return 0 remainder //= 10 return 1 if remaining_length == 1: if remainder % 2 == 0: return 0 snake_case__ = 0 for digit in range(10 ): snake_case__ = digit result += reversible_numbers( 0 , (remainder + 2 * digit) // 10 , a , a ) return result snake_case__ = 0 for digita in range(10 ): snake_case__ = digita if (remainder + digita) % 2 == 0: snake_case__ = ODD_DIGITS else: snake_case__ = EVEN_DIGITS for digita in other_parity_digits: snake_case__ = digita result += reversible_numbers( remaining_length - 2 , (remainder + digita + digita) // 10 , a , a , ) return result def _UpperCAmelCase ( a : int = 9 ): snake_case__ = 0 for length in range(1 , max_power + 1 ): result += reversible_numbers(a , 0 , [0] * length , a ) return result if __name__ == "__main__": print(F'''{solution() = }''')
654
0
'''simple docstring''' from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import numpy import tensorflow as tf from transformers import ( TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST, BertConfig, DPRConfig, TFDPRContextEncoder, TFDPRQuestionEncoder, TFDPRReader, ) class UpperCAmelCase : '''simple docstring''' def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=99 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=37 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=512 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=0 , ) -> Any: '''simple docstring''' lowerCamelCase_ = parent lowerCamelCase_ = batch_size lowerCamelCase_ = seq_length lowerCamelCase_ = is_training lowerCamelCase_ = use_input_mask lowerCamelCase_ = use_token_type_ids lowerCamelCase_ = use_labels lowerCamelCase_ = vocab_size lowerCamelCase_ = hidden_size lowerCamelCase_ = num_hidden_layers lowerCamelCase_ = num_attention_heads lowerCamelCase_ = intermediate_size lowerCamelCase_ = hidden_act lowerCamelCase_ = hidden_dropout_prob lowerCamelCase_ = attention_probs_dropout_prob lowerCamelCase_ = max_position_embeddings lowerCamelCase_ = type_vocab_size lowerCamelCase_ = type_sequence_label_size lowerCamelCase_ = initializer_range lowerCamelCase_ = num_labels lowerCamelCase_ = num_choices lowerCamelCase_ = scope lowerCamelCase_ = projection_dim def UpperCamelCase( self ) -> Union[str, Any]: '''simple docstring''' lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCamelCase_ = None if self.use_input_mask: # follow test_modeling_tf_ctrl.py lowerCamelCase_ = random_attention_mask([self.batch_size, self.seq_length] ) lowerCamelCase_ = None if self.use_token_type_ids: lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowerCamelCase_ = None lowerCamelCase_ = None lowerCamelCase_ = None if self.use_labels: lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowerCamelCase_ = ids_tensor([self.batch_size] , self.num_choices ) lowerCamelCase_ = BertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , ) lowerCamelCase_ = DPRConfig(projection_dim=self.projection_dim , **config.to_dict() ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[Any]: '''simple docstring''' lowerCamelCase_ = TFDPRContextEncoder(config=UpperCamelCase__ ) lowerCamelCase_ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ ) lowerCamelCase_ = model(UpperCamelCase__ , token_type_ids=UpperCamelCase__ ) lowerCamelCase_ = model(UpperCamelCase__ ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) ) def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Any: '''simple docstring''' lowerCamelCase_ = TFDPRQuestionEncoder(config=UpperCamelCase__ ) lowerCamelCase_ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ ) lowerCamelCase_ = model(UpperCamelCase__ , token_type_ids=UpperCamelCase__ ) lowerCamelCase_ = model(UpperCamelCase__ ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.projection_dim or self.hidden_size) ) def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[Any]: '''simple docstring''' lowerCamelCase_ = TFDPRReader(config=UpperCamelCase__ ) lowerCamelCase_ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.relevance_logits.shape , (self.batch_size,) ) def UpperCamelCase( self ) -> Union[str, Any]: '''simple docstring''' lowerCamelCase_ = self.prepare_config_and_inputs() ( ( lowerCamelCase_ ) ,( lowerCamelCase_ ) ,( lowerCamelCase_ ) ,( lowerCamelCase_ ) ,( lowerCamelCase_ ) ,( lowerCamelCase_ ) ,( lowerCamelCase_ ) , ) = config_and_inputs lowerCamelCase_ = {'input_ids': input_ids} return config, inputs_dict @require_tf class UpperCAmelCase ( lowercase_ , lowercase_ , unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE_ = ( ( TFDPRContextEncoder, TFDPRQuestionEncoder, TFDPRReader, ) if is_tf_available() else () ) SCREAMING_SNAKE_CASE_ = {'''feature-extraction''': TFDPRQuestionEncoder} if is_tf_available() else {} SCREAMING_SNAKE_CASE_ = False SCREAMING_SNAKE_CASE_ = False SCREAMING_SNAKE_CASE_ = False SCREAMING_SNAKE_CASE_ = False SCREAMING_SNAKE_CASE_ = False def UpperCamelCase( self ) -> str: '''simple docstring''' lowerCamelCase_ = TFDPRModelTester(self ) lowerCamelCase_ = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=37 ) def UpperCamelCase( self ) -> Tuple: '''simple docstring''' self.config_tester.run_common_tests() def UpperCamelCase( self ) -> Optional[Any]: '''simple docstring''' lowerCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_dpr_context_encoder(*UpperCamelCase__ ) def UpperCamelCase( self ) -> Any: '''simple docstring''' lowerCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_dpr_question_encoder(*UpperCamelCase__ ) def UpperCamelCase( self ) -> Any: '''simple docstring''' lowerCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_dpr_reader(*UpperCamelCase__ ) @slow def UpperCamelCase( self ) -> str: '''simple docstring''' for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase_ = TFDPRContextEncoder.from_pretrained(UpperCamelCase__ ) self.assertIsNotNone(UpperCamelCase__ ) for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase_ = TFDPRContextEncoder.from_pretrained(UpperCamelCase__ ) self.assertIsNotNone(UpperCamelCase__ ) for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase_ = TFDPRQuestionEncoder.from_pretrained(UpperCamelCase__ ) self.assertIsNotNone(UpperCamelCase__ ) for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase_ = TFDPRReader.from_pretrained(UpperCamelCase__ ) self.assertIsNotNone(UpperCamelCase__ ) @require_tf class UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' @slow def UpperCamelCase( self ) -> List[str]: '''simple docstring''' lowerCamelCase_ = TFDPRQuestionEncoder.from_pretrained('facebook/dpr-question_encoder-single-nq-base' ) lowerCamelCase_ = tf.constant( [[101, 7592, 1010, 2003, 2026, 3899, 10140, 1029, 102]] ) # [CLS] hello, is my dog cute? [SEP] lowerCamelCase_ = model(UpperCamelCase__ )[0] # embedding shape = (1, 768) # compare the actual values for a slice. lowerCamelCase_ = tf.constant( [ [ 0.03_236_253, 0.12_753_335, 0.16_818_509, 0.00_279_786, 0.3_896_933, 0.24_264_945, 0.2_178_971, -0.02_335_227, -0.08_481_959, -0.14_324_117, ] ] ) self.assertTrue(numpy.allclose(output[:, :10].numpy() , expected_slice.numpy() , atol=1E-4 ) )
42
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer from .base import PipelineTool a__ = { """Acehnese Arabic""": """ace_Arab""", """Acehnese Latin""": """ace_Latn""", """Mesopotamian Arabic""": """acm_Arab""", """Ta'izzi-Adeni Arabic""": """acq_Arab""", """Tunisian Arabic""": """aeb_Arab""", """Afrikaans""": """afr_Latn""", """South Levantine Arabic""": """ajp_Arab""", """Akan""": """aka_Latn""", """Amharic""": """amh_Ethi""", """North Levantine Arabic""": """apc_Arab""", """Modern Standard Arabic""": """arb_Arab""", """Modern Standard Arabic Romanized""": """arb_Latn""", """Najdi Arabic""": """ars_Arab""", """Moroccan Arabic""": """ary_Arab""", """Egyptian Arabic""": """arz_Arab""", """Assamese""": """asm_Beng""", """Asturian""": """ast_Latn""", """Awadhi""": """awa_Deva""", """Central Aymara""": """ayr_Latn""", """South Azerbaijani""": """azb_Arab""", """North Azerbaijani""": """azj_Latn""", """Bashkir""": """bak_Cyrl""", """Bambara""": """bam_Latn""", """Balinese""": """ban_Latn""", """Belarusian""": """bel_Cyrl""", """Bemba""": """bem_Latn""", """Bengali""": """ben_Beng""", """Bhojpuri""": """bho_Deva""", """Banjar Arabic""": """bjn_Arab""", """Banjar Latin""": """bjn_Latn""", """Standard Tibetan""": """bod_Tibt""", """Bosnian""": """bos_Latn""", """Buginese""": """bug_Latn""", """Bulgarian""": """bul_Cyrl""", """Catalan""": """cat_Latn""", """Cebuano""": """ceb_Latn""", """Czech""": """ces_Latn""", """Chokwe""": """cjk_Latn""", """Central Kurdish""": """ckb_Arab""", """Crimean Tatar""": """crh_Latn""", """Welsh""": """cym_Latn""", """Danish""": """dan_Latn""", """German""": """deu_Latn""", """Southwestern Dinka""": """dik_Latn""", """Dyula""": """dyu_Latn""", """Dzongkha""": """dzo_Tibt""", """Greek""": """ell_Grek""", """English""": """eng_Latn""", """Esperanto""": """epo_Latn""", """Estonian""": """est_Latn""", """Basque""": """eus_Latn""", """Ewe""": """ewe_Latn""", """Faroese""": """fao_Latn""", """Fijian""": """fij_Latn""", """Finnish""": """fin_Latn""", """Fon""": """fon_Latn""", """French""": """fra_Latn""", """Friulian""": """fur_Latn""", """Nigerian Fulfulde""": """fuv_Latn""", """Scottish Gaelic""": """gla_Latn""", """Irish""": """gle_Latn""", """Galician""": """glg_Latn""", """Guarani""": """grn_Latn""", """Gujarati""": """guj_Gujr""", """Haitian Creole""": """hat_Latn""", """Hausa""": """hau_Latn""", """Hebrew""": """heb_Hebr""", """Hindi""": """hin_Deva""", """Chhattisgarhi""": """hne_Deva""", """Croatian""": """hrv_Latn""", """Hungarian""": """hun_Latn""", """Armenian""": """hye_Armn""", """Igbo""": """ibo_Latn""", """Ilocano""": """ilo_Latn""", """Indonesian""": """ind_Latn""", """Icelandic""": """isl_Latn""", """Italian""": """ita_Latn""", """Javanese""": """jav_Latn""", """Japanese""": """jpn_Jpan""", """Kabyle""": """kab_Latn""", """Jingpho""": """kac_Latn""", """Kamba""": """kam_Latn""", """Kannada""": """kan_Knda""", """Kashmiri Arabic""": """kas_Arab""", """Kashmiri Devanagari""": """kas_Deva""", """Georgian""": """kat_Geor""", """Central Kanuri Arabic""": """knc_Arab""", """Central Kanuri Latin""": """knc_Latn""", """Kazakh""": """kaz_Cyrl""", """Kabiyè""": """kbp_Latn""", """Kabuverdianu""": """kea_Latn""", """Khmer""": """khm_Khmr""", """Kikuyu""": """kik_Latn""", """Kinyarwanda""": """kin_Latn""", """Kyrgyz""": """kir_Cyrl""", """Kimbundu""": """kmb_Latn""", """Northern Kurdish""": """kmr_Latn""", """Kikongo""": """kon_Latn""", """Korean""": """kor_Hang""", """Lao""": """lao_Laoo""", """Ligurian""": """lij_Latn""", """Limburgish""": """lim_Latn""", """Lingala""": """lin_Latn""", """Lithuanian""": """lit_Latn""", """Lombard""": """lmo_Latn""", """Latgalian""": """ltg_Latn""", """Luxembourgish""": """ltz_Latn""", """Luba-Kasai""": """lua_Latn""", """Ganda""": """lug_Latn""", """Luo""": """luo_Latn""", """Mizo""": """lus_Latn""", """Standard Latvian""": """lvs_Latn""", """Magahi""": """mag_Deva""", """Maithili""": """mai_Deva""", """Malayalam""": """mal_Mlym""", """Marathi""": """mar_Deva""", """Minangkabau Arabic """: """min_Arab""", """Minangkabau Latin""": """min_Latn""", """Macedonian""": """mkd_Cyrl""", """Plateau Malagasy""": """plt_Latn""", """Maltese""": """mlt_Latn""", """Meitei Bengali""": """mni_Beng""", """Halh Mongolian""": """khk_Cyrl""", """Mossi""": """mos_Latn""", """Maori""": """mri_Latn""", """Burmese""": """mya_Mymr""", """Dutch""": """nld_Latn""", """Norwegian Nynorsk""": """nno_Latn""", """Norwegian Bokmål""": """nob_Latn""", """Nepali""": """npi_Deva""", """Northern Sotho""": """nso_Latn""", """Nuer""": """nus_Latn""", """Nyanja""": """nya_Latn""", """Occitan""": """oci_Latn""", """West Central Oromo""": """gaz_Latn""", """Odia""": """ory_Orya""", """Pangasinan""": """pag_Latn""", """Eastern Panjabi""": """pan_Guru""", """Papiamento""": """pap_Latn""", """Western Persian""": """pes_Arab""", """Polish""": """pol_Latn""", """Portuguese""": """por_Latn""", """Dari""": """prs_Arab""", """Southern Pashto""": """pbt_Arab""", """Ayacucho Quechua""": """quy_Latn""", """Romanian""": """ron_Latn""", """Rundi""": """run_Latn""", """Russian""": """rus_Cyrl""", """Sango""": """sag_Latn""", """Sanskrit""": """san_Deva""", """Santali""": """sat_Olck""", """Sicilian""": """scn_Latn""", """Shan""": """shn_Mymr""", """Sinhala""": """sin_Sinh""", """Slovak""": """slk_Latn""", """Slovenian""": """slv_Latn""", """Samoan""": """smo_Latn""", """Shona""": """sna_Latn""", """Sindhi""": """snd_Arab""", """Somali""": """som_Latn""", """Southern Sotho""": """sot_Latn""", """Spanish""": """spa_Latn""", """Tosk Albanian""": """als_Latn""", """Sardinian""": """srd_Latn""", """Serbian""": """srp_Cyrl""", """Swati""": """ssw_Latn""", """Sundanese""": """sun_Latn""", """Swedish""": """swe_Latn""", """Swahili""": """swh_Latn""", """Silesian""": """szl_Latn""", """Tamil""": """tam_Taml""", """Tatar""": """tat_Cyrl""", """Telugu""": """tel_Telu""", """Tajik""": """tgk_Cyrl""", """Tagalog""": """tgl_Latn""", """Thai""": """tha_Thai""", """Tigrinya""": """tir_Ethi""", """Tamasheq Latin""": """taq_Latn""", """Tamasheq Tifinagh""": """taq_Tfng""", """Tok Pisin""": """tpi_Latn""", """Tswana""": """tsn_Latn""", """Tsonga""": """tso_Latn""", """Turkmen""": """tuk_Latn""", """Tumbuka""": """tum_Latn""", """Turkish""": """tur_Latn""", """Twi""": """twi_Latn""", """Central Atlas Tamazight""": """tzm_Tfng""", """Uyghur""": """uig_Arab""", """Ukrainian""": """ukr_Cyrl""", """Umbundu""": """umb_Latn""", """Urdu""": """urd_Arab""", """Northern Uzbek""": """uzn_Latn""", """Venetian""": """vec_Latn""", """Vietnamese""": """vie_Latn""", """Waray""": """war_Latn""", """Wolof""": """wol_Latn""", """Xhosa""": """xho_Latn""", """Eastern Yiddish""": """ydd_Hebr""", """Yoruba""": """yor_Latn""", """Yue Chinese""": """yue_Hant""", """Chinese Simplified""": """zho_Hans""", """Chinese Traditional""": """zho_Hant""", """Standard Malay""": """zsm_Latn""", """Zulu""": """zul_Latn""", } class _lowerCAmelCase ( lowercase_ ): """simple docstring""" _lowercase : List[str] = '''facebook/nllb-200-distilled-600M''' _lowercase : List[Any] = ( '''This is a tool that translates text from a language to another. It takes three inputs: `text`, which should ''' '''be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, ''' '''which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in ''' '''plain English, such as \'Romanian\', or \'Albanian\'. It returns the text translated in `tgt_lang`.''' ) _lowercase : Optional[int] = '''translator''' _lowercase : Optional[Any] = AutoTokenizer _lowercase : Dict = AutoModelForSeqaSeqLM _lowercase : List[str] = LANGUAGE_CODES _lowercase : Optional[Any] = ['''text''', '''text''', '''text'''] _lowercase : Tuple = ['''text'''] def __magic_name__ ( self : Any , UpperCamelCase__ : str , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int): '''simple docstring''' if src_lang not in self.lang_to_code: raise ValueError(F'''{src_lang} is not a supported language.''') if tgt_lang not in self.lang_to_code: raise ValueError(F'''{tgt_lang} is not a supported language.''') snake_case__ = self.lang_to_code[src_lang] snake_case__ = self.lang_to_code[tgt_lang] return self.pre_processor._build_translation_inputs( UpperCamelCase__ , return_tensors="""pt""" , src_lang=UpperCamelCase__ , tgt_lang=UpperCamelCase__) def __magic_name__ ( self : Dict , UpperCamelCase__ : Dict): '''simple docstring''' return self.model.generate(**UpperCamelCase__) def __magic_name__ ( self : List[str] , UpperCamelCase__ : Dict): '''simple docstring''' return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=UpperCamelCase__)
654
0
import os import pytest from transformers.dynamic_module_utils import get_imports a_ : Optional[int] = '\nimport os\n' a_ : int = '\ndef foo():\n import os\n return False\n' a_ : str = '\ndef foo():\n def bar():\n if True:\n import os\n return False\n return bar()\n' a_ : Any = '\nimport os\n\ntry:\n import bar\nexcept ImportError:\n raise ValueError()\n' a_ : Optional[Any] = '\nimport os\n\ndef foo():\n try:\n import bar\n except ImportError:\n raise ValueError()\n' a_ : List[str] = '\nimport os\n\ntry:\n import bar\nexcept (ImportError, AttributeError):\n raise ValueError()\n' a_ : Optional[int] = '\nimport os\n\ntry:\n import bar\nexcept ImportError as e:\n raise ValueError()\n' a_ : Union[str, Any] = '\nimport os\n\ntry:\n import bar\nexcept:\n raise ValueError()\n' a_ : Union[str, Any] = '\nimport os\n\ntry:\n import bar\n import baz\nexcept ImportError:\n raise ValueError()\n' a_ : Tuple = '\nimport os\n\ntry:\n import bar\n import baz\nexcept ImportError:\n x = 1\n raise ValueError()\n' a_ : List[str] = [ TOP_LEVEL_IMPORT, IMPORT_IN_FUNCTION, DEEPLY_NESTED_IMPORT, TOP_LEVEL_TRY_IMPORT, GENERIC_EXCEPT_IMPORT, MULTILINE_TRY_IMPORT, MULTILINE_BOTH_IMPORT, MULTIPLE_EXCEPTS_IMPORT, EXCEPT_AS_IMPORT, TRY_IMPORT_IN_FUNCTION, ] @pytest.mark.parametrize("case" , UpperCAmelCase__ ) def __lowercase( UpperCAmelCase__ , UpperCAmelCase__ ): """simple docstring""" lowerCamelCase = os.path.join(UpperCAmelCase__ , "test_file.py" ) with open(UpperCAmelCase__ , "w" ) as _tmp_file: _tmp_file.write(UpperCAmelCase__ ) lowerCamelCase = get_imports(UpperCAmelCase__ ) assert parsed_imports == ["os"]
623
import gc import importlib.metadata import tempfile import unittest from packaging import version from transformers import ( AutoModel, AutoModelForCausalLM, AutoModelForSeqaSeqLM, AutoModelForSequenceClassification, AutoTokenizer, BitsAndBytesConfig, pipeline, ) from transformers.testing_utils import ( is_torch_available, require_accelerate, require_bitsandbytes, require_torch, require_torch_gpu, require_torch_multi_gpu, slow, ) def _UpperCAmelCase ( a : Optional[int] ): if model.config.model_type == "gpt2": return model.transformer.h[0].mlp.c_fc return model.transformer.h[0].mlp.dense_ah_to_h if is_torch_available(): import torch import torch.nn as nn class _lowerCAmelCase ( nn.Module ): """simple docstring""" def __init__( self : int , UpperCamelCase__ : nn.Module , UpperCamelCase__ : int): '''simple docstring''' super().__init__() snake_case__ = module snake_case__ = nn.Sequential( nn.Linear(module.in_features , UpperCamelCase__ , bias=UpperCamelCase__) , nn.Linear(UpperCamelCase__ , module.out_features , bias=UpperCamelCase__) , ) snake_case__ = (2.0 / (5 * min(module.in_features , module.out_features))) ** 0.5 nn.init.normal_(self.adapter[0].weight , std=UpperCamelCase__) nn.init.zeros_(self.adapter[1].weight) self.adapter.to(module.weight.device) def __magic_name__ ( self : Tuple , UpperCamelCase__ : int , *UpperCamelCase__ : Dict , **UpperCamelCase__ : str): '''simple docstring''' return self.module(UpperCamelCase__ , *UpperCamelCase__ , **UpperCamelCase__) + self.adapter(UpperCamelCase__) @require_bitsandbytes @require_accelerate @require_torch @require_torch_gpu @slow class _lowerCAmelCase ( unittest.TestCase ): """simple docstring""" _lowercase : Dict = '''bigscience/bloom-1b7''' # Constant values _lowercase : Any = 2.109_6595_5269_2574 _lowercase : Tuple = '''Hello my name is''' _lowercase : List[Any] = set() EXPECTED_OUTPUTS.add('''Hello my name is John and I am a professional photographer. I''' ) EXPECTED_OUTPUTS.add('''Hello my name is John.\nI am a friend of your father.\n''' ) EXPECTED_OUTPUTS.add('''Hello my name is John Doe, I am a student at the University''' ) _lowercase : List[str] = 10 def __magic_name__ ( self : Optional[int]): '''simple docstring''' snake_case__ = AutoTokenizer.from_pretrained(self.model_name) class _lowerCAmelCase ( lowercase_ ): """simple docstring""" def __magic_name__ ( self : str): '''simple docstring''' super().setUp() # Models and tokenizer snake_case__ = AutoModelForCausalLM.from_pretrained( self.model_name , torch_dtype=torch.floataa , device_map="""auto""") snake_case__ = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=UpperCamelCase__ , device_map="""auto""") def __magic_name__ ( self : Tuple): '''simple docstring''' del self.model_fpaa del self.model_abit gc.collect() torch.cuda.empty_cache() def __magic_name__ ( self : str): '''simple docstring''' snake_case__ = self.model_abit.config self.assertTrue(hasattr(UpperCamelCase__ , """quantization_config""")) snake_case__ = config.to_dict() snake_case__ = config.to_diff_dict() snake_case__ = config.to_json_string() def __magic_name__ ( self : Dict): '''simple docstring''' from bitsandbytes.nn import Paramsabit snake_case__ = self.model_fpaa.get_memory_footprint() snake_case__ = self.model_abit.get_memory_footprint() self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE) snake_case__ = get_some_linear_layer(self.model_abit) self.assertTrue(linear.weight.__class__ == Paramsabit) def __magic_name__ ( self : Optional[int]): '''simple docstring''' from transformers import TaPreTrainedModel self.model_fpaa.get_memory_footprint() self.model_abit.get_memory_footprint() for name, module in self.model_abit.named_modules(): if isinstance(UpperCamelCase__ , torch.nn.Linear): if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules: # 4-bit parameters are packed in uint8 variables self.assertTrue(module.weight.dtype == torch.uinta) def __magic_name__ ( self : Dict): '''simple docstring''' snake_case__ = self.tokenizer(self.input_text , return_tensors="""pt""") snake_case__ = self.model_abit.generate(input_ids=encoded_input["""input_ids"""].to(0) , max_new_tokens=1_0) self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=UpperCamelCase__) , self.EXPECTED_OUTPUTS) def __magic_name__ ( self : str): '''simple docstring''' snake_case__ = BitsAndBytesConfig() snake_case__ = True snake_case__ = AutoModelForCausalLM.from_pretrained( self.model_name , quantization_config=UpperCamelCase__ , device_map="""auto""") snake_case__ = self.tokenizer(self.input_text , return_tensors="""pt""") snake_case__ = model_abit_from_config.generate( input_ids=encoded_input["""input_ids"""].to(0) , max_new_tokens=1_0) self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=UpperCamelCase__) , self.EXPECTED_OUTPUTS) def __magic_name__ ( self : Optional[int]): '''simple docstring''' with self.assertRaises(UpperCamelCase__), tempfile.TemporaryDirectory() as tmpdirname: self.model_abit.save_pretrained(UpperCamelCase__) def __magic_name__ ( self : List[str]): '''simple docstring''' snake_case__ = BitsAndBytesConfig() with self.assertRaises(UpperCamelCase__): snake_case__ = AutoModelForCausalLM.from_pretrained( self.model_name , quantization_config=UpperCamelCase__ , load_in_abit=UpperCamelCase__ , device_map="""auto""" , bnb_abit_quant_type="""nf4""" , ) def __magic_name__ ( self : List[Any]): '''simple docstring''' with self.assertRaises(UpperCamelCase__): # Tries with `str` self.model_abit.to("""cpu""") with self.assertRaises(UpperCamelCase__): # Tries with a `dtype`` self.model_abit.to(torch.floataa) with self.assertRaises(UpperCamelCase__): # Tries with a `device` self.model_abit.to(torch.device("""cuda:0""")) with self.assertRaises(UpperCamelCase__): # Tries with a `device` self.model_abit.float() with self.assertRaises(UpperCamelCase__): # Tries with a `device` self.model_abit.half() # Test if we did not break anything snake_case__ = self.tokenizer(self.input_text , return_tensors="""pt""") snake_case__ = self.model_fpaa.to(torch.floataa) snake_case__ = self.model_fpaa.generate(input_ids=encoded_input["""input_ids"""].to(0) , max_new_tokens=1_0) # Check this does not throw an error snake_case__ = self.model_fpaa.to("""cpu""") # Check this does not throw an error snake_case__ = self.model_fpaa.half() # Check this does not throw an error snake_case__ = self.model_fpaa.float() def __magic_name__ ( self : Dict): '''simple docstring''' snake_case__ = AutoModelForSeqaSeqLM.from_pretrained("""t5-small""" , load_in_abit=UpperCamelCase__ , device_map="""auto""") self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa) @require_bitsandbytes @require_accelerate @require_torch @require_torch_gpu @slow class _lowerCAmelCase ( unittest.TestCase ): """simple docstring""" @classmethod def __magic_name__ ( cls : Optional[Any]): '''simple docstring''' snake_case__ = """t5-small""" snake_case__ = """google/flan-t5-small""" # flan-t5 uses dense-act instead of dense-relu-dense snake_case__ = AutoTokenizer.from_pretrained(cls.model_name) snake_case__ = """Translate in German: Hello, my dog is cute""" def __magic_name__ ( self : Optional[int]): '''simple docstring''' gc.collect() torch.cuda.empty_cache() def __magic_name__ ( self : Any): '''simple docstring''' from transformers import TaForConditionalGeneration snake_case__ = TaForConditionalGeneration._keep_in_fpaa_modules snake_case__ = None # test with `t5-small` snake_case__ = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=UpperCamelCase__ , device_map="""auto""") snake_case__ = self.tokenizer(self.input_text , return_tensors="""pt""").to(0) snake_case__ = model.generate(**UpperCamelCase__) # test with `flan-t5-small` snake_case__ = TaForConditionalGeneration.from_pretrained( self.dense_act_model_name , load_in_abit=UpperCamelCase__ , device_map="""auto""") snake_case__ = self.tokenizer(self.input_text , return_tensors="""pt""").to(0) snake_case__ = model.generate(**UpperCamelCase__) snake_case__ = modules def __magic_name__ ( self : Union[str, Any]): '''simple docstring''' import bitsandbytes as bnb from transformers import TaForConditionalGeneration # test with `t5-small` snake_case__ = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=UpperCamelCase__ , device_map="""auto""") # there was a bug with decoders - this test checks that it is fixed self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit)) snake_case__ = self.tokenizer(self.input_text , return_tensors="""pt""").to(0) snake_case__ = model.generate(**UpperCamelCase__) # test with `flan-t5-small` snake_case__ = TaForConditionalGeneration.from_pretrained( self.dense_act_model_name , load_in_abit=UpperCamelCase__ , device_map="""auto""") snake_case__ = self.tokenizer(self.input_text , return_tensors="""pt""").to(0) snake_case__ = model.generate(**UpperCamelCase__) class _lowerCAmelCase ( lowercase_ ): """simple docstring""" def __magic_name__ ( self : int): '''simple docstring''' super().setUp() # model_name snake_case__ = """bigscience/bloom-560m""" snake_case__ = """t5-small""" # Different types of model snake_case__ = AutoModel.from_pretrained(self.model_name , load_in_abit=UpperCamelCase__ , device_map="""auto""") # Sequence classification model snake_case__ = AutoModelForSequenceClassification.from_pretrained( self.model_name , load_in_abit=UpperCamelCase__ , device_map="""auto""") # CausalLM model snake_case__ = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=UpperCamelCase__ , device_map="""auto""") # Seq2seq model snake_case__ = AutoModelForSeqaSeqLM.from_pretrained( self.seq_to_seq_name , load_in_abit=UpperCamelCase__ , device_map="""auto""") def __magic_name__ ( self : List[str]): '''simple docstring''' del self.base_model del self.sequence_model del self.model_abit del self.seq_to_seq_model gc.collect() torch.cuda.empty_cache() def __magic_name__ ( self : Union[str, Any]): '''simple docstring''' from bitsandbytes.nn import Paramsabit self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit) # Other heads should be nn.Parameter self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter) self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter) self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter) class _lowerCAmelCase ( lowercase_ ): """simple docstring""" def __magic_name__ ( self : Tuple): '''simple docstring''' super().setUp() def __magic_name__ ( self : int): '''simple docstring''' del self.pipe gc.collect() torch.cuda.empty_cache() def __magic_name__ ( self : Tuple): '''simple docstring''' snake_case__ = pipeline( """text-generation""" , model=self.model_name , model_kwargs={"""device_map""": """auto""", """load_in_4bit""": True, """torch_dtype""": torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , ) # Real second forward pass snake_case__ = self.pipe(self.input_text) self.assertIn(pipeline_output[0]["""generated_text"""] , self.EXPECTED_OUTPUTS) @require_torch_multi_gpu class _lowerCAmelCase ( lowercase_ ): """simple docstring""" def __magic_name__ ( self : Union[str, Any]): '''simple docstring''' super().setUp() def __magic_name__ ( self : int): '''simple docstring''' snake_case__ = AutoModelForCausalLM.from_pretrained( self.model_name , load_in_abit=UpperCamelCase__ , device_map="""balanced""") # Check correct device map self.assertEqual(set(model_parallel.hf_device_map.values()) , {0, 1}) # Check that inference pass works on the model snake_case__ = self.tokenizer(self.input_text , return_tensors="""pt""") # Second real batch snake_case__ = model_parallel.generate(input_ids=encoded_input["""input_ids"""].to(0) , max_new_tokens=1_0) self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=UpperCamelCase__) , self.EXPECTED_OUTPUTS) class _lowerCAmelCase ( lowercase_ ): """simple docstring""" def __magic_name__ ( self : Any): '''simple docstring''' snake_case__ = """facebook/opt-350m""" super().setUp() def __magic_name__ ( self : Any): '''simple docstring''' if version.parse(importlib.metadata.version("""bitsandbytes""")) < version.parse("""0.37.0"""): return # Step 1: freeze all parameters snake_case__ = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=UpperCamelCase__) self.assertEqual(set(model.hf_device_map.values()) , {torch.cuda.current_device()}) for param in model.parameters(): snake_case__ = False # freeze the model - train adapters later if param.ndim == 1: # cast the small parameters (e.g. layernorm) to fp32 for stability snake_case__ = param.data.to(torch.floataa) # Step 2: add adapters for _, module in model.named_modules(): if "OPTAttention" in repr(type(UpperCamelCase__)): snake_case__ = LoRALayer(module.q_proj , rank=1_6) snake_case__ = LoRALayer(module.k_proj , rank=1_6) snake_case__ = LoRALayer(module.v_proj , rank=1_6) # Step 3: dummy batch snake_case__ = self.tokenizer("""Test batch """ , return_tensors="""pt""").to(0) # Step 4: Check if the gradient is not None with torch.cuda.amp.autocast(): snake_case__ = model.forward(**UpperCamelCase__) out.logits.norm().backward() for module in model.modules(): if isinstance(UpperCamelCase__ , UpperCamelCase__): self.assertTrue(module.adapter[1].weight.grad is not None) self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0) elif isinstance(UpperCamelCase__ , nn.Embedding): self.assertTrue(module.weight.grad is None) class _lowerCAmelCase ( lowercase_ ): """simple docstring""" _lowercase : List[Any] = '''gpt2-xl''' _lowercase : Any = 3.3191_8548_5415_2187
654
0
'''simple docstring''' def _lowerCAmelCase ( __magic_name__ : int ) -> Tuple: if number < 0: raise ValueError('''number must not be negative''' ) return number & (number - 1) == 0 if __name__ == "__main__": import doctest doctest.testmod()
92
import glob import os import random from string import ascii_lowercase, digits import cva a__ = """""" a__ = """""" a__ = """""" a__ = 1 # (0 is vertical, 1 is horizontal) def _UpperCAmelCase ( ): snake_case__ , snake_case__ = get_dataset(a , a ) print("""Processing...""" ) snake_case__ , snake_case__ , snake_case__ = update_image_and_anno(a , a , a ) for index, image in enumerate(a ): # Get random string code: '7b7ad245cdff75241935e4dd860f3bad' snake_case__ = random_chars(32 ) snake_case__ = paths[index].split(os.sep )[-1].rsplit(""".""" , 1 )[0] snake_case__ = F'''{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}''' cva.imwrite(F'''/{file_root}.jpg''' , a , [cva.IMWRITE_JPEG_QUALITY, 85] ) print(F'''Success {index+1}/{len(a )} with {file_name}''' ) snake_case__ = [] for anno in new_annos[index]: snake_case__ = F'''{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}''' annos_list.append(a ) with open(F'''/{file_root}.txt''' , """w""" ) as outfile: outfile.write("""\n""".join(line for line in annos_list ) ) def _UpperCAmelCase ( a : str , a : str ): snake_case__ = [] snake_case__ = [] for label_file in glob.glob(os.path.join(a , """*.txt""" ) ): snake_case__ = label_file.split(os.sep )[-1].rsplit(""".""" , 1 )[0] with open(a ) as in_file: snake_case__ = in_file.readlines() snake_case__ = os.path.join(a , F'''{label_name}.jpg''' ) snake_case__ = [] for obj_list in obj_lists: snake_case__ = obj_list.rstrip("""\n""" ).split(""" """ ) boxes.append( [ int(obj[0] ), float(obj[1] ), float(obj[2] ), float(obj[3] ), float(obj[4] ), ] ) if not boxes: continue img_paths.append(a ) labels.append(a ) return img_paths, labels def _UpperCAmelCase ( a : list , a : list , a : int = 1 ): snake_case__ = [] snake_case__ = [] snake_case__ = [] for idx in range(len(a ) ): snake_case__ = [] snake_case__ = img_list[idx] path_list.append(a ) snake_case__ = anno_list[idx] snake_case__ = cva.imread(a ) if flip_type == 1: snake_case__ = cva.flip(a , a ) for bbox in img_annos: snake_case__ = 1 - bbox[1] new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] ) elif flip_type == 0: snake_case__ = cva.flip(a , a ) for bbox in img_annos: snake_case__ = 1 - bbox[2] new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] ) new_annos_lists.append(a ) new_imgs_list.append(a ) return new_imgs_list, new_annos_lists, path_list def _UpperCAmelCase ( a : int = 32 ): assert number_char > 1, "The number of character should greater than 1" snake_case__ = ascii_lowercase + digits return "".join(random.choice(a ) for _ in range(a ) ) if __name__ == "__main__": main() print("""DONE ✅""")
654
0
'''simple docstring''' import re from filelock import FileLock try: import nltk __lowerCamelCase : Union[str, Any] = True except (ImportError, ModuleNotFoundError): __lowerCamelCase : Optional[Any] = False if NLTK_AVAILABLE: with FileLock('.lock') as lock: nltk.download('punkt', quiet=True) def _a (__SCREAMING_SNAKE_CASE ): """simple docstring""" re.sub('''<n>''' , '''''' , __SCREAMING_SNAKE_CASE ) # remove pegasus newline char assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)" return "\n".join(nltk.sent_tokenize(__SCREAMING_SNAKE_CASE ) )
404
import json import os import tempfile import transformers import datasets from utils import generate_example_dataset, get_duration a__ = 5_0_0_0_0_0 a__ , a__ = os.path.split(__file__) a__ = os.path.join(RESULTS_BASEPATH, """results""", RESULTS_FILENAME.replace(""".py""", """.json""")) @get_duration def _UpperCAmelCase ( a : datasets.Dataset , **a : Tuple ): snake_case__ = dataset.map(**a ) @get_duration def _UpperCAmelCase ( a : datasets.Dataset , **a : Optional[Any] ): snake_case__ = dataset.filter(**a ) def _UpperCAmelCase ( ): snake_case__ = {"""num examples""": SPEED_TEST_N_EXAMPLES} with tempfile.TemporaryDirectory() as tmp_dir: snake_case__ = datasets.Features({"""text""": datasets.Value("""string""" ), """numbers""": datasets.Value("""float32""" )} ) snake_case__ = generate_example_dataset( os.path.join(a , """dataset.arrow""" ) , a , num_examples=a ) snake_case__ = transformers.AutoTokenizer.from_pretrained("""bert-base-cased""" , use_fast=a ) def tokenize(a : Union[str, Any] ): return tokenizer(examples["""text"""] ) snake_case__ = map(a ) snake_case__ = map(a , batched=a ) snake_case__ = map(a , function=lambda a : None , batched=a ) with dataset.formatted_as(type="""numpy""" ): snake_case__ = map(a , function=lambda a : None , batched=a ) with dataset.formatted_as(type="""pandas""" ): snake_case__ = map(a , function=lambda a : None , batched=a ) with dataset.formatted_as(type="""torch""" , columns="""numbers""" ): snake_case__ = map(a , function=lambda a : None , batched=a ) with dataset.formatted_as(type="""tensorflow""" , columns="""numbers""" ): snake_case__ = map(a , function=lambda a : None , batched=a ) snake_case__ = map(a , function=a , batched=a ) snake_case__ = filter(a ) # Activate later when tokenizer support batched inputs # with dataset.formatted_as(type='numpy'): # times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True) with open(a , """wb""" ) as f: f.write(json.dumps(a ).encode("""utf-8""" ) ) if __name__ == "__main__": # useful to run the profiler benchmark_map_filter()
654
0
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, ChannelDimension, ImageInput, PILImageResampling, is_batched, to_numpy_array, valid_images, ) from ...utils import TensorType, logging __snake_case : Dict =logging.get_logger(__name__) class lowerCamelCase__ ( lowercase_): '''simple docstring''' snake_case_ =['''pixel_values'''] def __init__(self ,__lowerCamelCase = True ,__lowerCamelCase = None ,__lowerCamelCase = PILImageResampling.BICUBIC ,__lowerCamelCase = True ,__lowerCamelCase = True ,__lowerCamelCase = 1 / 2_55 ,__lowerCamelCase = None ,__lowerCamelCase = True ,__lowerCamelCase = None ,__lowerCamelCase = None ,**__lowerCamelCase ,) -> Union[str, Any]: """simple docstring""" super().__init__(**UpperCamelCase__ ) lowerCAmelCase__ : Optional[Any] = size if size is not None else {'''height''': 2_24, '''width''': 2_24} lowerCAmelCase__ : List[str] = get_size_dict(UpperCamelCase__ ) lowerCAmelCase__ : Tuple = crop_size if crop_size is not None else {'''height''': 2_24, '''width''': 2_24} lowerCAmelCase__ : int = get_size_dict(UpperCamelCase__ ,default_to_square=UpperCamelCase__ ,param_name='''crop_size''' ) lowerCAmelCase__ : Dict = do_resize lowerCAmelCase__ : int = do_rescale lowerCAmelCase__ : Dict = do_normalize lowerCAmelCase__ : str = do_center_crop lowerCAmelCase__ : Optional[Any] = crop_size lowerCAmelCase__ : Any = size lowerCAmelCase__ : Union[str, Any] = resample lowerCAmelCase__ : Dict = rescale_factor lowerCAmelCase__ : List[str] = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN lowerCAmelCase__ : Any = image_std if image_std is not None else IMAGENET_DEFAULT_STD def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = PILImageResampling.BILINEAR ,__lowerCamelCase = None ,**__lowerCamelCase ,) -> int: """simple docstring""" lowerCAmelCase__ : str = get_size_dict(UpperCamelCase__ ) if "shortest_edge" in size: lowerCAmelCase__ : List[str] = get_resize_output_image_size(UpperCamelCase__ ,size=size['''shortest_edge'''] ,default_to_square=UpperCamelCase__ ) # size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"]) elif "height" in size and "width" in size: lowerCAmelCase__ : List[Any] = (size['''height'''], size['''width''']) else: raise ValueError(f"""Size must contain \'height\' and \'width\' keys or \'shortest_edge\' key. Got {size.keys()}""" ) return resize(UpperCamelCase__ ,size=UpperCamelCase__ ,resample=UpperCamelCase__ ,data_format=UpperCamelCase__ ,**UpperCamelCase__ ) def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = None ,**__lowerCamelCase ,) -> int: """simple docstring""" lowerCAmelCase__ : int = get_size_dict(UpperCamelCase__ ) if "height" not in size or "width" not in size: raise ValueError(f"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""" ) return center_crop(UpperCamelCase__ ,size=(size['''height'''], size['''width''']) ,data_format=UpperCamelCase__ ,**UpperCamelCase__ ) def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = None ,**__lowerCamelCase ) -> List[str]: """simple docstring""" return rescale(UpperCamelCase__ ,scale=UpperCamelCase__ ,data_format=UpperCamelCase__ ,**UpperCamelCase__ ) def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = None ,**__lowerCamelCase ,) -> str: """simple docstring""" return normalize(UpperCamelCase__ ,mean=UpperCamelCase__ ,std=UpperCamelCase__ ,data_format=UpperCamelCase__ ,**UpperCamelCase__ ) def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase = None ,__lowerCamelCase = None ,__lowerCamelCase = None ,__lowerCamelCase = None ,__lowerCamelCase = None ,__lowerCamelCase = None ,__lowerCamelCase = None ,__lowerCamelCase = None ,__lowerCamelCase = None ,__lowerCamelCase = None ,__lowerCamelCase = None ,__lowerCamelCase = ChannelDimension.FIRST ,**__lowerCamelCase ,) -> Tuple: """simple docstring""" lowerCAmelCase__ : Any = do_resize if do_resize is not None else self.do_resize lowerCAmelCase__ : List[Any] = do_rescale if do_rescale is not None else self.do_rescale lowerCAmelCase__ : int = do_normalize if do_normalize is not None else self.do_normalize lowerCAmelCase__ : Tuple = do_center_crop if do_center_crop is not None else self.do_center_crop lowerCAmelCase__ : int = crop_size if crop_size is not None else self.crop_size lowerCAmelCase__ : Union[str, Any] = get_size_dict(UpperCamelCase__ ,param_name='''crop_size''' ,default_to_square=UpperCamelCase__ ) lowerCAmelCase__ : int = resample if resample is not None else self.resample lowerCAmelCase__ : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor lowerCAmelCase__ : str = image_mean if image_mean is not None else self.image_mean lowerCAmelCase__ : int = image_std if image_std is not None else self.image_std lowerCAmelCase__ : Tuple = size if size is not None else self.size lowerCAmelCase__ : int = get_size_dict(UpperCamelCase__ ) if not is_batched(UpperCamelCase__ ): lowerCAmelCase__ : List[str] = [images] if not valid_images(UpperCamelCase__ ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None: raise ValueError('''Size must be specified if do_resize is True.''' ) if do_center_crop and crop_size is None: raise ValueError('''Crop size must be specified if do_center_crop is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) # All transformations expect numpy arrays. lowerCAmelCase__ : List[str] = [to_numpy_array(UpperCamelCase__ ) for image in images] if do_resize: lowerCAmelCase__ : int = [self.resize(image=UpperCamelCase__ ,size=UpperCamelCase__ ,resample=UpperCamelCase__ ) for image in images] if do_center_crop: lowerCAmelCase__ : str = [self.center_crop(image=UpperCamelCase__ ,size=UpperCamelCase__ ) for image in images] if do_rescale: lowerCAmelCase__ : List[Any] = [self.rescale(image=UpperCamelCase__ ,scale=UpperCamelCase__ ) for image in images] if do_normalize: lowerCAmelCase__ : Dict = [self.normalize(image=UpperCamelCase__ ,mean=UpperCamelCase__ ,std=UpperCamelCase__ ) for image in images] lowerCAmelCase__ : Optional[int] = [to_channel_dimension_format(UpperCamelCase__ ,UpperCamelCase__ ) for image in images] lowerCAmelCase__ : List[Any] = {'''pixel_values''': images} return BatchFeature(data=UpperCamelCase__ ,tensor_type=UpperCamelCase__ )
647
import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor from transformers.utils import logging logging.set_verbosity_info() a__ = logging.get_logger(__name__) def _UpperCAmelCase ( a : List[str] , a : Any=False ): snake_case__ = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((F'''blocks.{i}.norm1.weight''', F'''deit.encoder.layer.{i}.layernorm_before.weight''') ) rename_keys.append((F'''blocks.{i}.norm1.bias''', F'''deit.encoder.layer.{i}.layernorm_before.bias''') ) rename_keys.append((F'''blocks.{i}.attn.proj.weight''', F'''deit.encoder.layer.{i}.attention.output.dense.weight''') ) rename_keys.append((F'''blocks.{i}.attn.proj.bias''', F'''deit.encoder.layer.{i}.attention.output.dense.bias''') ) rename_keys.append((F'''blocks.{i}.norm2.weight''', F'''deit.encoder.layer.{i}.layernorm_after.weight''') ) rename_keys.append((F'''blocks.{i}.norm2.bias''', F'''deit.encoder.layer.{i}.layernorm_after.bias''') ) rename_keys.append((F'''blocks.{i}.mlp.fc1.weight''', F'''deit.encoder.layer.{i}.intermediate.dense.weight''') ) rename_keys.append((F'''blocks.{i}.mlp.fc1.bias''', F'''deit.encoder.layer.{i}.intermediate.dense.bias''') ) rename_keys.append((F'''blocks.{i}.mlp.fc2.weight''', F'''deit.encoder.layer.{i}.output.dense.weight''') ) rename_keys.append((F'''blocks.{i}.mlp.fc2.bias''', F'''deit.encoder.layer.{i}.output.dense.bias''') ) # projection layer + position embeddings rename_keys.extend( [ ("""cls_token""", """deit.embeddings.cls_token"""), ("""dist_token""", """deit.embeddings.distillation_token"""), ("""patch_embed.proj.weight""", """deit.embeddings.patch_embeddings.projection.weight"""), ("""patch_embed.proj.bias""", """deit.embeddings.patch_embeddings.projection.bias"""), ("""pos_embed""", """deit.embeddings.position_embeddings"""), ] ) if base_model: # layernorm + pooler rename_keys.extend( [ ("""norm.weight""", """layernorm.weight"""), ("""norm.bias""", """layernorm.bias"""), ("""pre_logits.fc.weight""", """pooler.dense.weight"""), ("""pre_logits.fc.bias""", """pooler.dense.bias"""), ] ) # if just the base model, we should remove "deit" from all keys that start with "deit" snake_case__ = [(pair[0], pair[1][4:]) if pair[1].startswith("""deit""" ) else pair for pair in rename_keys] else: # layernorm + classification heads rename_keys.extend( [ ("""norm.weight""", """deit.layernorm.weight"""), ("""norm.bias""", """deit.layernorm.bias"""), ("""head.weight""", """cls_classifier.weight"""), ("""head.bias""", """cls_classifier.bias"""), ("""head_dist.weight""", """distillation_classifier.weight"""), ("""head_dist.bias""", """distillation_classifier.bias"""), ] ) return rename_keys def _UpperCAmelCase ( a : int , a : List[Any] , a : Union[str, Any]=False ): for i in range(config.num_hidden_layers ): if base_model: snake_case__ = """""" else: snake_case__ = """deit.""" # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) snake_case__ = state_dict.pop(F'''blocks.{i}.attn.qkv.weight''' ) snake_case__ = state_dict.pop(F'''blocks.{i}.attn.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict snake_case__ = in_proj_weight[ : config.hidden_size, : ] snake_case__ = in_proj_bias[: config.hidden_size] snake_case__ = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] snake_case__ = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] snake_case__ = in_proj_weight[ -config.hidden_size :, : ] snake_case__ = in_proj_bias[-config.hidden_size :] def _UpperCAmelCase ( a : Dict , a : Union[str, Any] , a : int ): snake_case__ = dct.pop(a ) snake_case__ = val def _UpperCAmelCase ( ): snake_case__ = """http://images.cocodataset.org/val2017/000000039769.jpg""" snake_case__ = Image.open(requests.get(a , stream=a ).raw ) return im @torch.no_grad() def _UpperCAmelCase ( a : List[str] , a : Tuple ): snake_case__ = DeiTConfig() # all deit models have fine-tuned heads snake_case__ = False # dataset (fine-tuned on ImageNet 2012), patch_size and image_size snake_case__ = 1000 snake_case__ = """huggingface/label-files""" snake_case__ = """imagenet-1k-id2label.json""" snake_case__ = json.load(open(hf_hub_download(a , a , repo_type="""dataset""" ) , """r""" ) ) snake_case__ = {int(a ): v for k, v in idalabel.items()} snake_case__ = idalabel snake_case__ = {v: k for k, v in idalabel.items()} snake_case__ = int(deit_name[-6:-4] ) snake_case__ = int(deit_name[-3:] ) # size of the architecture if deit_name[9:].startswith("""tiny""" ): snake_case__ = 192 snake_case__ = 768 snake_case__ = 12 snake_case__ = 3 elif deit_name[9:].startswith("""small""" ): snake_case__ = 384 snake_case__ = 1536 snake_case__ = 12 snake_case__ = 6 if deit_name[9:].startswith("""base""" ): pass elif deit_name[4:].startswith("""large""" ): snake_case__ = 1024 snake_case__ = 4096 snake_case__ = 24 snake_case__ = 16 # load original model from timm snake_case__ = timm.create_model(a , pretrained=a ) timm_model.eval() # load state_dict of original model, remove and rename some keys snake_case__ = timm_model.state_dict() snake_case__ = create_rename_keys(a , a ) for src, dest in rename_keys: rename_key(a , a , a ) read_in_q_k_v(a , a , a ) # load HuggingFace model snake_case__ = DeiTForImageClassificationWithTeacher(a ).eval() model.load_state_dict(a ) # Check outputs on an image, prepared by DeiTImageProcessor snake_case__ = int( (256 / 224) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103 snake_case__ = DeiTImageProcessor(size=a , crop_size=config.image_size ) snake_case__ = image_processor(images=prepare_img() , return_tensors="""pt""" ) snake_case__ = encoding["""pixel_values"""] snake_case__ = model(a ) snake_case__ = timm_model(a ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(a , outputs.logits , atol=1e-3 ) Path(a ).mkdir(exist_ok=a ) print(F'''Saving model {deit_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(a ) print(F'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(a ) if __name__ == "__main__": a__ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--deit_name""", default="""vit_deit_base_distilled_patch16_224""", type=str, help="""Name of the DeiT timm model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) a__ = parser.parse_args() convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
654
0
# # This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or # many nodes) can talk to each other via nccl and allocate gpu memory. # # To run first adjust the number of processes and nodes: # # python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py # # You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port # # You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d # # use torch.distributed.launch instead of torch.distributed.run for torch < 1.9 # # If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with: # # NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py # # which should tell you what's going on behind the scenes. # # # This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that # runs on 2 nodes of 4 gpus per node: # # #SBATCH --job-name=test-nodes # name # #SBATCH --nodes=2 # nodes # #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node! # #SBATCH --cpus-per-task=10 # number of cores per tasks # #SBATCH --gres=gpu:4 # number of gpus # #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS) # #SBATCH --output=%x-%j.out # output file name # # GPUS_PER_NODE=4 # MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1) # MASTER_PORT=6000 # # srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \ # --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \ # --master_addr $MASTER_ADDR --master_port $MASTER_PORT \ # torch-distributed-gpu-test.py' # import fcntl import os import socket import torch import torch.distributed as dist def lowercase ( *SCREAMING_SNAKE_CASE ) -> Optional[int]: '''simple docstring''' with open(SCREAMING_SNAKE_CASE , 'r' ) as fh: fcntl.flock(SCREAMING_SNAKE_CASE , fcntl.LOCK_EX ) try: print(*SCREAMING_SNAKE_CASE ) finally: fcntl.flock(SCREAMING_SNAKE_CASE , fcntl.LOCK_UN ) SCREAMING_SNAKE_CASE__ : Tuple = int(os.environ["LOCAL_RANK"]) torch.cuda.set_device(local_rank) SCREAMING_SNAKE_CASE__ : str = torch.device("cuda", local_rank) SCREAMING_SNAKE_CASE__ : str = socket.gethostname() SCREAMING_SNAKE_CASE__ : Tuple = f"""[{hostname}-{local_rank}]""" try: # test distributed dist.init_process_group("nccl") dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM) dist.barrier() # test cuda is available and can allocate memory torch.cuda.is_available() torch.ones(1).cuda(local_rank) # global rank SCREAMING_SNAKE_CASE__ : List[str] = dist.get_rank() SCREAMING_SNAKE_CASE__ : Tuple = dist.get_world_size() printflock(f"""{gpu} is OK (global rank: {rank}/{world_size})""") dist.barrier() if rank == 0: printflock(f"""pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}""") except Exception: printflock(f"""{gpu} is broken""") raise
205
from dataclasses import dataclass from typing import Dict, Optional, Union import torch import torch.nn.functional as F from torch import nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from .attention import BasicTransformerBlock from .attention_processor import AttentionProcessor, AttnProcessor from .embeddings import TimestepEmbedding, Timesteps from .modeling_utils import ModelMixin @dataclass class _lowerCAmelCase ( lowercase_ ): """simple docstring""" _lowercase : torch.FloatTensor class _lowerCAmelCase ( lowercase_ , lowercase_ ): """simple docstring""" @register_to_config def __init__( self : Tuple , UpperCamelCase__ : int = 3_2 , UpperCamelCase__ : int = 6_4 , UpperCamelCase__ : int = 2_0 , UpperCamelCase__ : int = 7_6_8 , UpperCamelCase__ : Optional[Any]=7_7 , UpperCamelCase__ : str=4 , UpperCamelCase__ : float = 0.0 , UpperCamelCase__ : str = "silu" , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : Optional[str] = "linear" , UpperCamelCase__ : Optional[str] = "prd" , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[int] = None , ): '''simple docstring''' super().__init__() snake_case__ = num_attention_heads snake_case__ = attention_head_dim snake_case__ = num_attention_heads * attention_head_dim snake_case__ = additional_embeddings snake_case__ = time_embed_dim or inner_dim snake_case__ = embedding_proj_dim or embedding_dim snake_case__ = clip_embed_dim or embedding_dim snake_case__ = Timesteps(UpperCamelCase__ , UpperCamelCase__ , 0) snake_case__ = TimestepEmbedding(UpperCamelCase__ , UpperCamelCase__ , out_dim=UpperCamelCase__ , act_fn=UpperCamelCase__) snake_case__ = nn.Linear(UpperCamelCase__ , UpperCamelCase__) if embedding_proj_norm_type is None: snake_case__ = None elif embedding_proj_norm_type == "layer": snake_case__ = nn.LayerNorm(UpperCamelCase__) else: raise ValueError(F'''unsupported embedding_proj_norm_type: {embedding_proj_norm_type}''') snake_case__ = nn.Linear(UpperCamelCase__ , UpperCamelCase__) if encoder_hid_proj_type is None: snake_case__ = None elif encoder_hid_proj_type == "linear": snake_case__ = nn.Linear(UpperCamelCase__ , UpperCamelCase__) else: raise ValueError(F'''unsupported encoder_hid_proj_type: {encoder_hid_proj_type}''') snake_case__ = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , UpperCamelCase__)) if added_emb_type == "prd": snake_case__ = nn.Parameter(torch.zeros(1 , 1 , UpperCamelCase__)) elif added_emb_type is None: snake_case__ = None else: raise ValueError( F'''`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `\'prd\'` or `None`.''') snake_case__ = nn.ModuleList( [ BasicTransformerBlock( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , dropout=UpperCamelCase__ , activation_fn="""gelu""" , attention_bias=UpperCamelCase__ , ) for d in range(UpperCamelCase__) ]) if norm_in_type == "layer": snake_case__ = nn.LayerNorm(UpperCamelCase__) elif norm_in_type is None: snake_case__ = None else: raise ValueError(F'''Unsupported norm_in_type: {norm_in_type}.''') snake_case__ = nn.LayerNorm(UpperCamelCase__) snake_case__ = nn.Linear(UpperCamelCase__ , UpperCamelCase__) snake_case__ = torch.full( [num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -1_00_00.0) causal_attention_mask.triu_(1) snake_case__ = causal_attention_mask[None, ...] self.register_buffer("""causal_attention_mask""" , UpperCamelCase__ , persistent=UpperCamelCase__) snake_case__ = nn.Parameter(torch.zeros(1 , UpperCamelCase__)) snake_case__ = nn.Parameter(torch.zeros(1 , UpperCamelCase__)) @property # Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors def __magic_name__ ( self : Optional[int]): '''simple docstring''' snake_case__ = {} def fn_recursive_add_processors(UpperCamelCase__ : str , UpperCamelCase__ : torch.nn.Module , UpperCamelCase__ : Dict[str, AttentionProcessor]): if hasattr(UpperCamelCase__ , """set_processor"""): snake_case__ = module.processor for sub_name, child in module.named_children(): fn_recursive_add_processors(F'''{name}.{sub_name}''' , UpperCamelCase__ , UpperCamelCase__) return processors for name, module in self.named_children(): fn_recursive_add_processors(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__) return processors def __magic_name__ ( self : Union[str, Any] , UpperCamelCase__ : Union[AttentionProcessor, Dict[str, AttentionProcessor]]): '''simple docstring''' snake_case__ = len(self.attn_processors.keys()) if isinstance(UpperCamelCase__ , UpperCamelCase__) and len(UpperCamelCase__) != count: raise ValueError( F'''A dict of processors was passed, but the number of processors {len(UpperCamelCase__)} does not match the''' F''' number of attention layers: {count}. Please make sure to pass {count} processor classes.''') def fn_recursive_attn_processor(UpperCamelCase__ : str , UpperCamelCase__ : torch.nn.Module , UpperCamelCase__ : Optional[int]): if hasattr(UpperCamelCase__ , """set_processor"""): if not isinstance(UpperCamelCase__ , UpperCamelCase__): module.set_processor(UpperCamelCase__) else: module.set_processor(processor.pop(F'''{name}.processor''')) for sub_name, child in module.named_children(): fn_recursive_attn_processor(F'''{name}.{sub_name}''' , UpperCamelCase__ , UpperCamelCase__) for name, module in self.named_children(): fn_recursive_attn_processor(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__) def __magic_name__ ( self : Dict): '''simple docstring''' self.set_attn_processor(AttnProcessor()) def __magic_name__ ( self : Union[str, Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : Union[torch.Tensor, float, int] , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : Optional[torch.FloatTensor] = None , UpperCamelCase__ : Optional[torch.BoolTensor] = None , UpperCamelCase__ : bool = True , ): '''simple docstring''' snake_case__ = hidden_states.shape[0] snake_case__ = timestep if not torch.is_tensor(UpperCamelCase__): snake_case__ = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device) elif torch.is_tensor(UpperCamelCase__) and len(timesteps.shape) == 0: snake_case__ = timesteps[None].to(hidden_states.device) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML snake_case__ = timesteps * torch.ones(UpperCamelCase__ , dtype=timesteps.dtype , device=timesteps.device) snake_case__ = self.time_proj(UpperCamelCase__) # timesteps does not contain any weights and will always return f32 tensors # but time_embedding might be fp16, so we need to cast here. snake_case__ = timesteps_projected.to(dtype=self.dtype) snake_case__ = self.time_embedding(UpperCamelCase__) if self.embedding_proj_norm is not None: snake_case__ = self.embedding_proj_norm(UpperCamelCase__) snake_case__ = self.embedding_proj(UpperCamelCase__) if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None: snake_case__ = self.encoder_hidden_states_proj(UpperCamelCase__) elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None: raise ValueError("""`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set""") snake_case__ = self.proj_in(UpperCamelCase__) snake_case__ = self.positional_embedding.to(hidden_states.dtype) snake_case__ = [] snake_case__ = 0 if encoder_hidden_states is not None: additional_embeds.append(UpperCamelCase__) additional_embeddings_len += encoder_hidden_states.shape[1] if len(proj_embeddings.shape) == 2: snake_case__ = proj_embeddings[:, None, :] if len(hidden_states.shape) == 2: snake_case__ = hidden_states[:, None, :] snake_case__ = additional_embeds + [ proj_embeddings, time_embeddings[:, None, :], hidden_states, ] if self.prd_embedding is not None: snake_case__ = self.prd_embedding.to(hidden_states.dtype).expand(UpperCamelCase__ , -1 , -1) additional_embeds.append(UpperCamelCase__) snake_case__ = torch.cat( UpperCamelCase__ , dim=1 , ) # Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens snake_case__ = additional_embeddings_len + proj_embeddings.shape[1] + 1 if positional_embeddings.shape[1] < hidden_states.shape[1]: snake_case__ = F.pad( UpperCamelCase__ , ( 0, 0, additional_embeddings_len, self.prd_embedding.shape[1] if self.prd_embedding is not None else 0, ) , value=0.0 , ) snake_case__ = hidden_states + positional_embeddings if attention_mask is not None: snake_case__ = (1 - attention_mask.to(hidden_states.dtype)) * -1_00_00.0 snake_case__ = F.pad(UpperCamelCase__ , (0, self.additional_embeddings) , value=0.0) snake_case__ = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype) snake_case__ = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0) if self.norm_in is not None: snake_case__ = self.norm_in(UpperCamelCase__) for block in self.transformer_blocks: snake_case__ = block(UpperCamelCase__ , attention_mask=UpperCamelCase__) snake_case__ = self.norm_out(UpperCamelCase__) if self.prd_embedding is not None: snake_case__ = hidden_states[:, -1] else: snake_case__ = hidden_states[:, additional_embeddings_len:] snake_case__ = self.proj_to_clip_embeddings(UpperCamelCase__) if not return_dict: return (predicted_image_embedding,) return PriorTransformerOutput(predicted_image_embedding=UpperCamelCase__) def __magic_name__ ( self : Any , UpperCamelCase__ : Any): '''simple docstring''' snake_case__ = (prior_latents * self.clip_std) + self.clip_mean return prior_latents
654
0
'''simple docstring''' import unittest from transformers import DonutProcessor lowerCAmelCase__ = '''naver-clova-ix/donut-base''' class lowercase_ (unittest.TestCase ): """simple docstring""" def SCREAMING_SNAKE_CASE ( self : List[Any] ): __lowercase = DonutProcessor.from_pretrained(UpperCamelCase__ ) def SCREAMING_SNAKE_CASE ( self : Tuple ): __lowercase = { '''name''': '''John Doe''', '''age''': '''99''', '''city''': '''Atlanta''', '''state''': '''GA''', '''zip''': '''30301''', '''phone''': '''123-4567''', '''nicknames''': [{'''nickname''': '''Johnny'''}, {'''nickname''': '''JD'''}], } __lowercase = ( '''<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>''' '''<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>''' '''<s_nicknames><s_nickname>Johnny</s_nickname>''' '''<sep/><s_nickname>JD</s_nickname></s_nicknames>''' ) __lowercase = self.processor.tokenajson(UpperCamelCase__ ) self.assertDictEqual(UpperCamelCase__ ,UpperCamelCase__ )
41
import unittest from pathlib import Path from tempfile import TemporaryDirectory from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available from transformers.models.gpta.tokenization_gpta import GPTaTokenizer from transformers.testing_utils import require_keras_nlp, require_tf, slow if is_tf_available(): import tensorflow as tf if is_keras_nlp_available(): from transformers.models.gpta import TFGPTaTokenizer a__ = ["""gpt2"""] a__ = """gpt2""" if is_tf_available(): class _lowerCAmelCase ( tf.Module ): """simple docstring""" def __init__( self : List[Any] , UpperCamelCase__ : int): '''simple docstring''' super().__init__() snake_case__ = tokenizer snake_case__ = AutoConfig.from_pretrained(UpperCamelCase__) snake_case__ = TFGPTaLMHeadModel.from_config(UpperCamelCase__) @tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name="""text"""),)) def __magic_name__ ( self : Tuple , UpperCamelCase__ : int): '''simple docstring''' snake_case__ = self.tokenizer(UpperCamelCase__) snake_case__ = tokenized["""input_ids"""].to_tensor() snake_case__ = tf.cast(input_ids_dense > 0 , tf.intaa) # input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN]) snake_case__ = self.model(input_ids=UpperCamelCase__ , attention_mask=UpperCamelCase__)["""logits"""] return outputs @require_tf @require_keras_nlp class _lowerCAmelCase ( unittest.TestCase ): """simple docstring""" def __magic_name__ ( self : List[Any]): '''simple docstring''' super().setUp() snake_case__ = [GPTaTokenizer.from_pretrained(UpperCamelCase__) for checkpoint in (TOKENIZER_CHECKPOINTS)] snake_case__ = [TFGPTaTokenizer.from_pretrained(UpperCamelCase__) for checkpoint in TOKENIZER_CHECKPOINTS] assert len(self.tokenizers) == len(self.tf_tokenizers) snake_case__ = [ """This is a straightforward English test sentence.""", """This one has some weird characters\rto\nsee\r\nif those\u00E9break things.""", """Now we're going to add some Chinese: 一 二 三 一二三""", """And some much more rare Chinese: 齉 堃 齉堃""", """Je vais aussi écrire en français pour tester les accents""", """Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ""", ] snake_case__ = list(zip(self.test_sentences , self.test_sentences[::-1])) def __magic_name__ ( self : Optional[int]): '''simple docstring''' for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers): for test_inputs in self.test_sentences: snake_case__ = tokenizer([test_inputs] , return_tensors="""tf""") snake_case__ = tf_tokenizer([test_inputs]) for key in python_outputs.keys(): # convert them to numpy to avoid messing with ragged tensors snake_case__ = python_outputs[key].numpy() snake_case__ = tf_outputs[key].numpy() self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape)) self.assertTrue(tf.reduce_all(tf.cast(UpperCamelCase__ , tf.intaa) == tf_outputs_values)) @slow def __magic_name__ ( self : Optional[int]): '''simple docstring''' for tf_tokenizer in self.tf_tokenizers: snake_case__ = tf.function(UpperCamelCase__) for test_inputs in self.test_sentences: snake_case__ = tf.constant(UpperCamelCase__) snake_case__ = compiled_tokenizer(UpperCamelCase__) snake_case__ = tf_tokenizer(UpperCamelCase__) for key in eager_outputs.keys(): self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key])) @slow def __magic_name__ ( self : Optional[Any]): '''simple docstring''' for tf_tokenizer in self.tf_tokenizers: snake_case__ = ModelToSave(tokenizer=UpperCamelCase__) snake_case__ = tf.convert_to_tensor([self.test_sentences[0]]) snake_case__ = model.serving(UpperCamelCase__) # Build model with some sample inputs with TemporaryDirectory() as tempdir: snake_case__ = Path(UpperCamelCase__) / """saved.model""" tf.saved_model.save(UpperCamelCase__ , UpperCamelCase__ , signatures={"""serving_default""": model.serving}) snake_case__ = tf.saved_model.load(UpperCamelCase__) snake_case__ = loaded_model.signatures["""serving_default"""](UpperCamelCase__)["""output_0"""] # We may see small differences because the loaded model is compiled, so we need an epsilon for the test self.assertTrue(tf.reduce_all(out == loaded_output)) @slow def __magic_name__ ( self : Tuple): '''simple docstring''' for tf_tokenizer in self.tf_tokenizers: snake_case__ = tf.convert_to_tensor([self.test_sentences[0]]) snake_case__ = tf_tokenizer(UpperCamelCase__) # Build model with some sample inputs snake_case__ = tf_tokenizer.get_config() snake_case__ = TFGPTaTokenizer.from_config(UpperCamelCase__) snake_case__ = model_from_config(UpperCamelCase__) for key in from_config_output.keys(): self.assertTrue(tf.reduce_all(from_config_output[key] == out[key])) @slow def __magic_name__ ( self : Dict): '''simple docstring''' for tf_tokenizer in self.tf_tokenizers: # for the test to run snake_case__ = 1_2_3_1_2_3 for max_length in [3, 5, 1_0_2_4]: snake_case__ = tf.convert_to_tensor([self.test_sentences[0]]) snake_case__ = tf_tokenizer(UpperCamelCase__ , max_length=UpperCamelCase__) snake_case__ = out["""input_ids"""].numpy().shape[1] assert out_length == max_length
654
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) _A : List[Any] = { """configuration_llama""": ["""LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LlamaConfig"""], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _A : Any = ["""LlamaTokenizer"""] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _A : int = ["""LlamaTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _A : List[Any] = [ """LlamaForCausalLM""", """LlamaModel""", """LlamaPreTrainedModel""", """LlamaForSequenceClassification""", ] if TYPE_CHECKING: from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_llama import LlamaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_llama_fast import LlamaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel else: import sys _A : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
361
import tempfile import torch from diffusers import IPNDMScheduler from .test_schedulers import SchedulerCommonTest class _lowerCAmelCase ( lowercase_ ): """simple docstring""" _lowercase : int = (IPNDMScheduler,) _lowercase : int = (('''num_inference_steps''', 50),) def __magic_name__ ( self : Any , **UpperCamelCase__ : Tuple): '''simple docstring''' snake_case__ = {"""num_train_timesteps""": 1_0_0_0} config.update(**UpperCamelCase__) return config def __magic_name__ ( self : int , UpperCamelCase__ : Dict=0 , **UpperCamelCase__ : int): '''simple docstring''' snake_case__ = dict(self.forward_default_kwargs) snake_case__ = kwargs.pop("""num_inference_steps""" , UpperCamelCase__) snake_case__ = self.dummy_sample snake_case__ = 0.1 * sample snake_case__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: snake_case__ = self.get_scheduler_config(**UpperCamelCase__) snake_case__ = scheduler_class(**UpperCamelCase__) scheduler.set_timesteps(UpperCamelCase__) # copy over dummy past residuals snake_case__ = dummy_past_residuals[:] if time_step is None: snake_case__ = scheduler.timesteps[len(scheduler.timesteps) // 2] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(UpperCamelCase__) snake_case__ = scheduler_class.from_pretrained(UpperCamelCase__) new_scheduler.set_timesteps(UpperCamelCase__) # copy over dummy past residuals snake_case__ = dummy_past_residuals[:] snake_case__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample snake_case__ = new_scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical" snake_case__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample snake_case__ = new_scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical" def __magic_name__ ( self : List[Any]): '''simple docstring''' pass def __magic_name__ ( self : Tuple , UpperCamelCase__ : Union[str, Any]=0 , **UpperCamelCase__ : Tuple): '''simple docstring''' snake_case__ = dict(self.forward_default_kwargs) snake_case__ = kwargs.pop("""num_inference_steps""" , UpperCamelCase__) snake_case__ = self.dummy_sample snake_case__ = 0.1 * sample snake_case__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: snake_case__ = self.get_scheduler_config() snake_case__ = scheduler_class(**UpperCamelCase__) scheduler.set_timesteps(UpperCamelCase__) # copy over dummy past residuals (must be after setting timesteps) snake_case__ = dummy_past_residuals[:] if time_step is None: snake_case__ = scheduler.timesteps[len(scheduler.timesteps) // 2] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(UpperCamelCase__) snake_case__ = scheduler_class.from_pretrained(UpperCamelCase__) # copy over dummy past residuals new_scheduler.set_timesteps(UpperCamelCase__) # copy over dummy past residual (must be after setting timesteps) snake_case__ = dummy_past_residuals[:] snake_case__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample snake_case__ = new_scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical" snake_case__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample snake_case__ = new_scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical" def __magic_name__ ( self : Union[str, Any] , **UpperCamelCase__ : Dict): '''simple docstring''' snake_case__ = self.scheduler_classes[0] snake_case__ = self.get_scheduler_config(**UpperCamelCase__) snake_case__ = scheduler_class(**UpperCamelCase__) snake_case__ = 1_0 snake_case__ = self.dummy_model() snake_case__ = self.dummy_sample_deter scheduler.set_timesteps(UpperCamelCase__) for i, t in enumerate(scheduler.timesteps): snake_case__ = model(UpperCamelCase__ , UpperCamelCase__) snake_case__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__).prev_sample for i, t in enumerate(scheduler.timesteps): snake_case__ = model(UpperCamelCase__ , UpperCamelCase__) snake_case__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__).prev_sample return sample def __magic_name__ ( self : Optional[int]): '''simple docstring''' snake_case__ = dict(self.forward_default_kwargs) snake_case__ = kwargs.pop("""num_inference_steps""" , UpperCamelCase__) for scheduler_class in self.scheduler_classes: snake_case__ = self.get_scheduler_config() snake_case__ = scheduler_class(**UpperCamelCase__) snake_case__ = self.dummy_sample snake_case__ = 0.1 * sample if num_inference_steps is not None and hasattr(UpperCamelCase__ , """set_timesteps"""): scheduler.set_timesteps(UpperCamelCase__) elif num_inference_steps is not None and not hasattr(UpperCamelCase__ , """set_timesteps"""): snake_case__ = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) snake_case__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] snake_case__ = dummy_past_residuals[:] snake_case__ = scheduler.timesteps[5] snake_case__ = scheduler.timesteps[6] snake_case__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample snake_case__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample self.assertEqual(output_a.shape , sample.shape) self.assertEqual(output_a.shape , output_a.shape) snake_case__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample snake_case__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample self.assertEqual(output_a.shape , sample.shape) self.assertEqual(output_a.shape , output_a.shape) def __magic_name__ ( self : Union[str, Any]): '''simple docstring''' for timesteps in [1_0_0, 1_0_0_0]: self.check_over_configs(num_train_timesteps=UpperCamelCase__ , time_step=UpperCamelCase__) def __magic_name__ ( self : Dict): '''simple docstring''' for t, num_inference_steps in zip([1, 5, 1_0] , [1_0, 5_0, 1_0_0]): self.check_over_forward(num_inference_steps=UpperCamelCase__ , time_step=UpperCamelCase__) def __magic_name__ ( self : List[str]): '''simple docstring''' snake_case__ = self.full_loop() snake_case__ = torch.mean(torch.abs(UpperCamelCase__)) assert abs(result_mean.item() - 2_5_4_0_5_2_9) < 1_0
654
0
'''simple docstring''' import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, PNDMScheduler, StableDiffusionLDMaDPipeline, UNetaDConditionModel, ) from diffusers.utils import nightly, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS enable_full_determinism() class SCREAMING_SNAKE_CASE__ ( unittest.TestCase): lowerCAmelCase_ = StableDiffusionLDMaDPipeline lowerCAmelCase_ = TEXT_TO_IMAGE_PARAMS lowerCAmelCase_ = TEXT_TO_IMAGE_BATCH_PARAMS lowerCAmelCase_ = TEXT_TO_IMAGE_IMAGE_PARAMS def UpperCAmelCase_ ( self )-> Optional[int]: '''simple docstring''' torch.manual_seed(0 ) UpperCamelCase = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , ) UpperCamelCase = DDIMScheduler( beta_start=0.00_085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=UpperCamelCase__ , set_alpha_to_one=UpperCamelCase__ , ) torch.manual_seed(0 ) UpperCamelCase = AutoencoderKL( block_out_channels=[32, 64] , in_channels=6 , out_channels=6 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , ) torch.manual_seed(0 ) UpperCamelCase = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) UpperCamelCase = CLIPTextModel(UpperCamelCase__ ) UpperCamelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) UpperCamelCase = { 'unet': unet, 'scheduler': scheduler, 'vae': vae, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'safety_checker': None, 'feature_extractor': None, } return components def UpperCAmelCase_ ( self , A_ , A_=0 )-> str: '''simple docstring''' if str(UpperCamelCase__ ).startswith('mps' ): UpperCamelCase = torch.manual_seed(UpperCamelCase__ ) else: UpperCamelCase = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ ) UpperCamelCase = { 'prompt': 'A painting of a squirrel eating a burger', 'generator': generator, 'num_inference_steps': 2, 'guidance_scale': 6.0, 'output_type': 'numpy', } return inputs def UpperCAmelCase_ ( self )-> int: '''simple docstring''' UpperCamelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator UpperCamelCase = self.get_dummy_components() UpperCamelCase = StableDiffusionLDMaDPipeline(**UpperCamelCase__ ) UpperCamelCase = ldmad_pipe.to(UpperCamelCase__ ) ldmad_pipe.set_progress_bar_config(disable=UpperCamelCase__ ) UpperCamelCase = self.get_dummy_inputs(UpperCamelCase__ ) UpperCamelCase = ldmad_pipe(**UpperCamelCase__ ) UpperCamelCase , UpperCamelCase = output.rgb, output.depth UpperCamelCase = rgb[0, -3:, -3:, -1] UpperCamelCase = depth[0, -3:, -1] assert rgb.shape == (1, 64, 64, 3) assert depth.shape == (1, 64, 64) UpperCamelCase = np.array( [0.37_338_176, 0.70_247, 0.74_203_193, 0.51_643_604, 0.58_256_793, 0.60_932_136, 0.4_181_095, 0.48_355_877, 0.46_535_262] ) UpperCamelCase = np.array([103.46727, 85.812_004, 87.849_236] ) assert np.abs(image_slice_rgb.flatten() - expected_slice_rgb ).max() < 1e-2 assert np.abs(image_slice_depth.flatten() - expected_slice_depth ).max() < 1e-2 def UpperCAmelCase_ ( self )-> List[Any]: '''simple docstring''' UpperCamelCase = self.get_dummy_components() UpperCamelCase = StableDiffusionLDMaDPipeline(**UpperCamelCase__ ) UpperCamelCase = ldmad_pipe.to(UpperCamelCase__ ) ldmad_pipe.set_progress_bar_config(disable=UpperCamelCase__ ) UpperCamelCase = self.get_dummy_inputs(UpperCamelCase__ ) UpperCamelCase = 3 * [inputs['prompt']] # forward UpperCamelCase = ldmad_pipe(**UpperCamelCase__ ) UpperCamelCase , UpperCamelCase = output.rgb, output.depth UpperCamelCase = rgb_slice_a[0, -3:, -3:, -1] UpperCamelCase = depth_slice_a[0, -3:, -1] UpperCamelCase = self.get_dummy_inputs(UpperCamelCase__ ) UpperCamelCase = 3 * [inputs.pop('prompt' )] UpperCamelCase = ldmad_pipe.tokenizer( UpperCamelCase__ , padding='max_length' , max_length=ldmad_pipe.tokenizer.model_max_length , truncation=UpperCamelCase__ , return_tensors='pt' , ) UpperCamelCase = text_inputs['input_ids'].to(UpperCamelCase__ ) UpperCamelCase = ldmad_pipe.text_encoder(UpperCamelCase__ )[0] UpperCamelCase = prompt_embeds # forward UpperCamelCase = ldmad_pipe(**UpperCamelCase__ ) UpperCamelCase , UpperCamelCase = output.rgb, output.depth UpperCamelCase = rgb_slice_a[0, -3:, -3:, -1] UpperCamelCase = depth_slice_a[0, -3:, -1] assert np.abs(rgb_slice_a.flatten() - rgb_slice_a.flatten() ).max() < 1e-4 assert np.abs(depth_slice_a.flatten() - depth_slice_a.flatten() ).max() < 1e-4 def UpperCAmelCase_ ( self )-> Dict: '''simple docstring''' UpperCamelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator UpperCamelCase = self.get_dummy_components() UpperCamelCase = PNDMScheduler(skip_prk_steps=UpperCamelCase__ ) UpperCamelCase = StableDiffusionLDMaDPipeline(**UpperCamelCase__ ) UpperCamelCase = ldmad_pipe.to(UpperCamelCase__ ) ldmad_pipe.set_progress_bar_config(disable=UpperCamelCase__ ) UpperCamelCase = self.get_dummy_inputs(UpperCamelCase__ ) UpperCamelCase = 'french fries' UpperCamelCase = ldmad_pipe(**UpperCamelCase__ , negative_prompt=UpperCamelCase__ ) UpperCamelCase , UpperCamelCase = output.rgb, output.depth UpperCamelCase = rgb[0, -3:, -3:, -1] UpperCamelCase = depth[0, -3:, -1] assert rgb.shape == (1, 64, 64, 3) assert depth.shape == (1, 64, 64) UpperCamelCase = np.array( [0.37_044, 0.71_811_503, 0.7_223_251, 0.48_603_675, 0.5_638_391, 0.6_364_948, 0.42_833_704, 0.4_901_315, 0.47_926_217] ) UpperCamelCase = np.array([107.84738, 84.62_802, 89.962_135] ) assert np.abs(rgb_slice.flatten() - expected_slice_rgb ).max() < 1e-2 assert np.abs(depth_slice.flatten() - expected_slice_depth ).max() < 1e-2 @slow @require_torch_gpu class SCREAMING_SNAKE_CASE__ ( unittest.TestCase): def UpperCAmelCase_ ( self )-> Optional[int]: '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCAmelCase_ ( self , A_ , A_="cpu" , A_=torch.floataa , A_=0 )-> Optional[int]: '''simple docstring''' UpperCamelCase = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ ) UpperCamelCase = np.random.RandomState(UpperCamelCase__ ).standard_normal((1, 4, 64, 64) ) UpperCamelCase = torch.from_numpy(UpperCamelCase__ ).to(device=UpperCamelCase__ , dtype=UpperCamelCase__ ) UpperCamelCase = { 'prompt': 'a photograph of an astronaut riding a horse', 'latents': latents, 'generator': generator, 'num_inference_steps': 3, 'guidance_scale': 7.5, 'output_type': 'numpy', } return inputs def UpperCAmelCase_ ( self )-> Tuple: '''simple docstring''' UpperCamelCase = StableDiffusionLDMaDPipeline.from_pretrained('Intel/ldm3d' ) UpperCamelCase = ldmad_pipe.to(UpperCamelCase__ ) ldmad_pipe.set_progress_bar_config(disable=UpperCamelCase__ ) UpperCamelCase = self.get_inputs(UpperCamelCase__ ) UpperCamelCase = ldmad_pipe(**UpperCamelCase__ ) UpperCamelCase , UpperCamelCase = output.rgb, output.depth UpperCamelCase = rgb[0, -3:, -3:, -1].flatten() UpperCamelCase = rgb[0, -3:, -1].flatten() assert rgb.shape == (1, 512, 512, 3) assert depth.shape == (1, 512, 512) UpperCamelCase = np.array( [0.53_805_465, 0.56_707_305, 0.5_486_515, 0.57_012_236, 0.5_814_511, 0.56_253_487, 0.54_843_014, 0.55_092_263, 0.6_459_706] ) UpperCamelCase = np.array( [0.9_263_781, 0.6_678_672, 0.5_486_515, 0.92_202_145, 0.67_831_135, 0.56_253_487, 0.9_241_694, 0.7_551_478, 0.6_459_706] ) assert np.abs(rgb_slice - expected_slice_rgb ).max() < 3e-3 assert np.abs(depth_slice - expected_slice_depth ).max() < 3e-3 @nightly @require_torch_gpu class SCREAMING_SNAKE_CASE__ ( unittest.TestCase): def UpperCAmelCase_ ( self )-> Optional[Any]: '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCAmelCase_ ( self , A_ , A_="cpu" , A_=torch.floataa , A_=0 )-> Optional[Any]: '''simple docstring''' UpperCamelCase = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ ) UpperCamelCase = np.random.RandomState(UpperCamelCase__ ).standard_normal((1, 4, 64, 64) ) UpperCamelCase = torch.from_numpy(UpperCamelCase__ ).to(device=UpperCamelCase__ , dtype=UpperCamelCase__ ) UpperCamelCase = { 'prompt': 'a photograph of an astronaut riding a horse', 'latents': latents, 'generator': generator, 'num_inference_steps': 50, 'guidance_scale': 7.5, 'output_type': 'numpy', } return inputs def UpperCAmelCase_ ( self )-> int: '''simple docstring''' UpperCamelCase = StableDiffusionLDMaDPipeline.from_pretrained('Intel/ldm3d' ).to(UpperCamelCase__ ) ldmad_pipe.set_progress_bar_config(disable=UpperCamelCase__ ) UpperCamelCase = self.get_inputs(UpperCamelCase__ ) UpperCamelCase = ldmad_pipe(**UpperCamelCase__ ) UpperCamelCase , UpperCamelCase = output.rgb, output.depth UpperCamelCase = 0.495_586 UpperCamelCase = 0.33_795_515 UpperCamelCase = 112.48518 UpperCamelCase = 98.489_746 assert np.abs(expected_rgb_mean - rgb.mean() ) < 1e-3 assert np.abs(expected_rgb_std - rgb.std() ) < 1e-3 assert np.abs(expected_depth_mean - depth.mean() ) < 1e-3 assert np.abs(expected_depth_std - depth.std() ) < 1e-3 def UpperCAmelCase_ ( self )-> List[str]: '''simple docstring''' UpperCamelCase = StableDiffusionLDMaDPipeline.from_pretrained('Intel/ldm3d-4c' ).to(UpperCamelCase__ ) ldmad_pipe.set_progress_bar_config(disable=UpperCamelCase__ ) UpperCamelCase = self.get_inputs(UpperCamelCase__ ) UpperCamelCase = ldmad_pipe(**UpperCamelCase__ ) UpperCamelCase , UpperCamelCase = output.rgb, output.depth UpperCamelCase = 0.4_194_127 UpperCamelCase = 0.35_375_586 UpperCamelCase = 0.5_638_502 UpperCamelCase = 0.34_686_103 assert rgb.shape == (1, 512, 512, 3) assert depth.shape == (1, 512, 512, 1) assert np.abs(expected_rgb_mean - rgb.mean() ) < 1e-3 assert np.abs(expected_rgb_std - rgb.std() ) < 1e-3 assert np.abs(expected_depth_mean - depth.mean() ) < 1e-3 assert np.abs(expected_depth_std - depth.std() ) < 1e-3
3
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np import torch from ..models.clipseg import CLIPSegForImageSegmentation from ..utils import is_vision_available, requires_backends from .base import PipelineTool if is_vision_available(): from PIL import Image class _lowerCAmelCase ( lowercase_ ): """simple docstring""" _lowercase : Optional[Any] = ( '''This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image.''' '''It takes two arguments named `image` which should be the original image, and `label` which should be a text ''' '''describing the elements what should be identified in the segmentation mask. The tool returns the mask.''' ) _lowercase : Dict = '''CIDAS/clipseg-rd64-refined''' _lowercase : List[Any] = '''image_segmenter''' _lowercase : Tuple = CLIPSegForImageSegmentation _lowercase : str = ['''image''', '''text'''] _lowercase : Dict = ['''image'''] def __init__( self : Optional[int] , *UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : List[Any]): '''simple docstring''' requires_backends(self , ["""vision"""]) super().__init__(*UpperCamelCase__ , **UpperCamelCase__) def __magic_name__ ( self : str , UpperCamelCase__ : "Image" , UpperCamelCase__ : str): '''simple docstring''' return self.pre_processor(text=[label] , images=[image] , padding=UpperCamelCase__ , return_tensors="""pt""") def __magic_name__ ( self : Any , UpperCamelCase__ : Optional[Any]): '''simple docstring''' with torch.no_grad(): snake_case__ = self.model(**UpperCamelCase__).logits return logits def __magic_name__ ( self : Optional[int] , UpperCamelCase__ : Union[str, Any]): '''simple docstring''' snake_case__ = outputs.cpu().detach().numpy() snake_case__ = 0 snake_case__ = 1 return Image.fromarray((array * 2_5_5).astype(np.uinta))
654
0
"""simple docstring""" # This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/ import gc import random import tempfile import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, ControlNetModel, DDIMScheduler, StableDiffusionControlNetImgaImgPipeline, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import ( PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, ) enable_full_determinism() class a__ ( lowercase_ , lowercase_ , lowercase_ , unittest.TestCase ): snake_case__ = StableDiffusionControlNetImgaImgPipeline snake_case__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''} snake_case__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS snake_case__ = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({'''control_image'''} ) snake_case__ = IMAGE_TO_IMAGE_IMAGE_PARAMS def __UpperCamelCase ( self : int) -> Dict: """simple docstring""" torch.manual_seed(0) _lowerCAmelCase:Any = UNetaDConditionModel( block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') ,up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') ,cross_attention_dim=32 ,) torch.manual_seed(0) _lowerCAmelCase:Optional[int] = ControlNetModel( block_out_channels=(32, 64) ,layers_per_block=2 ,in_channels=4 ,down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') ,cross_attention_dim=32 ,conditioning_embedding_out_channels=(16, 32) ,) torch.manual_seed(0) _lowerCAmelCase:Tuple = DDIMScheduler( beta_start=0.00085 ,beta_end=0.012 ,beta_schedule='''scaled_linear''' ,clip_sample=UpperCamelCase__ ,set_alpha_to_one=UpperCamelCase__ ,) torch.manual_seed(0) _lowerCAmelCase:Union[str, Any] = AutoencoderKL( block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] ,up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] ,latent_channels=4 ,) torch.manual_seed(0) _lowerCAmelCase:Optional[int] = CLIPTextConfig( bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,) _lowerCAmelCase:Tuple = CLIPTextModel(UpperCamelCase__) _lowerCAmelCase:str = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''') _lowerCAmelCase:Optional[Any] = { '''unet''': unet, '''controlnet''': controlnet, '''scheduler''': scheduler, '''vae''': vae, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''safety_checker''': None, '''feature_extractor''': None, } return components def __UpperCamelCase ( self : int ,a__ : Optional[Any] ,a__ : List[Any]=0) -> Tuple: """simple docstring""" if str(UpperCamelCase__).startswith('''mps'''): _lowerCAmelCase:Union[str, Any] = torch.manual_seed(UpperCamelCase__) else: _lowerCAmelCase:List[Any] = torch.Generator(device=UpperCamelCase__).manual_seed(UpperCamelCase__) _lowerCAmelCase:Tuple = 2 _lowerCAmelCase:Optional[Any] = randn_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) ,generator=UpperCamelCase__ ,device=torch.device(UpperCamelCase__) ,) _lowerCAmelCase:Dict = floats_tensor(control_image.shape ,rng=random.Random(UpperCamelCase__)).to(UpperCamelCase__) _lowerCAmelCase:str = image.cpu().permute(0 ,2 ,3 ,1)[0] _lowerCAmelCase:List[Any] = Image.fromarray(np.uinta(UpperCamelCase__)).convert('''RGB''').resize((64, 64)) _lowerCAmelCase:int = { '''prompt''': '''A painting of a squirrel eating a burger''', '''generator''': generator, '''num_inference_steps''': 2, '''guidance_scale''': 6.0, '''output_type''': '''numpy''', '''image''': image, '''control_image''': control_image, } return inputs def __UpperCamelCase ( self : List[Any]) -> List[str]: """simple docstring""" return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3) @unittest.skipIf( torch_device != '''cuda''' or not is_xformers_available() ,reason='''XFormers attention is only available with CUDA and `xformers` installed''' ,) def __UpperCamelCase ( self : int) -> Any: """simple docstring""" self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3) def __UpperCamelCase ( self : Dict) -> int: """simple docstring""" self._test_inference_batch_single_identical(expected_max_diff=2E-3) class a__ ( lowercase_ , lowercase_ , unittest.TestCase ): snake_case__ = StableDiffusionControlNetImgaImgPipeline snake_case__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''} snake_case__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS snake_case__ = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess def __UpperCamelCase ( self : Union[str, Any]) -> Union[str, Any]: """simple docstring""" torch.manual_seed(0) _lowerCAmelCase:Tuple = UNetaDConditionModel( block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') ,up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') ,cross_attention_dim=32 ,) torch.manual_seed(0) def init_weights(a__ : Tuple): if isinstance(UpperCamelCase__ ,torch.nn.Convad): torch.nn.init.normal(m.weight) m.bias.data.fill_(1.0) _lowerCAmelCase:Union[str, Any] = ControlNetModel( block_out_channels=(32, 64) ,layers_per_block=2 ,in_channels=4 ,down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') ,cross_attention_dim=32 ,conditioning_embedding_out_channels=(16, 32) ,) controlneta.controlnet_down_blocks.apply(UpperCamelCase__) torch.manual_seed(0) _lowerCAmelCase:List[Any] = ControlNetModel( block_out_channels=(32, 64) ,layers_per_block=2 ,in_channels=4 ,down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') ,cross_attention_dim=32 ,conditioning_embedding_out_channels=(16, 32) ,) controlneta.controlnet_down_blocks.apply(UpperCamelCase__) torch.manual_seed(0) _lowerCAmelCase:Union[str, Any] = DDIMScheduler( beta_start=0.00085 ,beta_end=0.012 ,beta_schedule='''scaled_linear''' ,clip_sample=UpperCamelCase__ ,set_alpha_to_one=UpperCamelCase__ ,) torch.manual_seed(0) _lowerCAmelCase:List[Any] = AutoencoderKL( block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] ,up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] ,latent_channels=4 ,) torch.manual_seed(0) _lowerCAmelCase:List[Any] = CLIPTextConfig( bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,) _lowerCAmelCase:Optional[Any] = CLIPTextModel(UpperCamelCase__) _lowerCAmelCase:Optional[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''') _lowerCAmelCase:Optional[int] = MultiControlNetModel([controlneta, controlneta]) _lowerCAmelCase:Tuple = { '''unet''': unet, '''controlnet''': controlnet, '''scheduler''': scheduler, '''vae''': vae, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''safety_checker''': None, '''feature_extractor''': None, } return components def __UpperCamelCase ( self : int ,a__ : Optional[int] ,a__ : Union[str, Any]=0) -> List[Any]: """simple docstring""" if str(UpperCamelCase__).startswith('''mps'''): _lowerCAmelCase:List[Any] = torch.manual_seed(UpperCamelCase__) else: _lowerCAmelCase:List[str] = torch.Generator(device=UpperCamelCase__).manual_seed(UpperCamelCase__) _lowerCAmelCase:Any = 2 _lowerCAmelCase:Tuple = [ randn_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) ,generator=UpperCamelCase__ ,device=torch.device(UpperCamelCase__) ,), randn_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) ,generator=UpperCamelCase__ ,device=torch.device(UpperCamelCase__) ,), ] _lowerCAmelCase:Optional[Any] = floats_tensor(control_image[0].shape ,rng=random.Random(UpperCamelCase__)).to(UpperCamelCase__) _lowerCAmelCase:Optional[Any] = image.cpu().permute(0 ,2 ,3 ,1)[0] _lowerCAmelCase:Optional[int] = Image.fromarray(np.uinta(UpperCamelCase__)).convert('''RGB''').resize((64, 64)) _lowerCAmelCase:Optional[int] = { '''prompt''': '''A painting of a squirrel eating a burger''', '''generator''': generator, '''num_inference_steps''': 2, '''guidance_scale''': 6.0, '''output_type''': '''numpy''', '''image''': image, '''control_image''': control_image, } return inputs def __UpperCamelCase ( self : Optional[int]) -> Dict: """simple docstring""" _lowerCAmelCase:Optional[int] = self.get_dummy_components() _lowerCAmelCase:Union[str, Any] = self.pipeline_class(**UpperCamelCase__) pipe.to(UpperCamelCase__) _lowerCAmelCase:Tuple = 10.0 _lowerCAmelCase:Optional[int] = 4 _lowerCAmelCase:int = self.get_dummy_inputs(UpperCamelCase__) _lowerCAmelCase:Tuple = steps _lowerCAmelCase:List[str] = scale _lowerCAmelCase:Optional[Any] = pipe(**UpperCamelCase__)[0] _lowerCAmelCase:Any = self.get_dummy_inputs(UpperCamelCase__) _lowerCAmelCase:Any = steps _lowerCAmelCase:List[Any] = scale _lowerCAmelCase:Optional[Any] = pipe(**UpperCamelCase__ ,control_guidance_start=0.1 ,control_guidance_end=0.2)[0] _lowerCAmelCase:Union[str, Any] = self.get_dummy_inputs(UpperCamelCase__) _lowerCAmelCase:Dict = steps _lowerCAmelCase:List[Any] = scale _lowerCAmelCase:Union[str, Any] = pipe(**UpperCamelCase__ ,control_guidance_start=[0.1, 0.3] ,control_guidance_end=[0.2, 0.7])[0] _lowerCAmelCase:int = self.get_dummy_inputs(UpperCamelCase__) _lowerCAmelCase:Optional[Any] = steps _lowerCAmelCase:Optional[Any] = scale _lowerCAmelCase:List[Any] = pipe(**UpperCamelCase__ ,control_guidance_start=0.4 ,control_guidance_end=[0.5, 0.8])[0] # make sure that all outputs are different assert np.sum(np.abs(output_a - output_a)) > 1E-3 assert np.sum(np.abs(output_a - output_a)) > 1E-3 assert np.sum(np.abs(output_a - output_a)) > 1E-3 def __UpperCamelCase ( self : Optional[int]) -> Optional[Any]: """simple docstring""" return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3) @unittest.skipIf( torch_device != '''cuda''' or not is_xformers_available() ,reason='''XFormers attention is only available with CUDA and `xformers` installed''' ,) def __UpperCamelCase ( self : Tuple) -> Optional[Any]: """simple docstring""" self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3) def __UpperCamelCase ( self : Tuple) -> int: """simple docstring""" self._test_inference_batch_single_identical(expected_max_diff=2E-3) def __UpperCamelCase ( self : Any) -> Optional[Any]: """simple docstring""" _lowerCAmelCase:Any = self.get_dummy_components() _lowerCAmelCase:Optional[int] = self.pipeline_class(**UpperCamelCase__) pipe.to(UpperCamelCase__) pipe.set_progress_bar_config(disable=UpperCamelCase__) with tempfile.TemporaryDirectory() as tmpdir: try: # save_pretrained is not implemented for Multi-ControlNet pipe.save_pretrained(UpperCamelCase__) except NotImplementedError: pass @slow @require_torch_gpu class a__ ( unittest.TestCase ): def __UpperCamelCase ( self : Optional[Any]) -> Tuple: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def __UpperCamelCase ( self : List[Any]) -> Dict: """simple docstring""" _lowerCAmelCase:Dict = ControlNetModel.from_pretrained('''lllyasviel/sd-controlnet-canny''') _lowerCAmelCase:List[str] = StableDiffusionControlNetImgaImgPipeline.from_pretrained( '''runwayml/stable-diffusion-v1-5''' ,safety_checker=UpperCamelCase__ ,controlnet=UpperCamelCase__) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=UpperCamelCase__) _lowerCAmelCase:List[Any] = torch.Generator(device='''cpu''').manual_seed(0) _lowerCAmelCase:str = '''evil space-punk bird''' _lowerCAmelCase:Optional[int] = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png''').resize((512, 512)) _lowerCAmelCase:Any = load_image( '''https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png''').resize((512, 512)) _lowerCAmelCase:Dict = pipe( UpperCamelCase__ ,UpperCamelCase__ ,control_image=UpperCamelCase__ ,generator=UpperCamelCase__ ,output_type='''np''' ,num_inference_steps=50 ,strength=0.6 ,) _lowerCAmelCase:List[Any] = output.images[0] assert image.shape == (512, 512, 3) _lowerCAmelCase:Optional[Any] = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy''') assert np.abs(expected_image - image).max() < 9E-2
227
import unittest import numpy as np from transformers.testing_utils import require_pytesseract, require_torch from transformers.utils import is_pytesseract_available, is_torch_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_pytesseract_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class _lowerCAmelCase ( unittest.TestCase ): """simple docstring""" def __init__( self : List[str] , UpperCamelCase__ : str , UpperCamelCase__ : Tuple=7 , UpperCamelCase__ : List[Any]=3 , UpperCamelCase__ : Dict=1_8 , UpperCamelCase__ : Any=3_0 , UpperCamelCase__ : List[Any]=4_0_0 , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : Any=None , UpperCamelCase__ : Optional[int]=True , ): '''simple docstring''' snake_case__ = size if size is not None else {"""height""": 1_8, """width""": 1_8} snake_case__ = parent snake_case__ = batch_size snake_case__ = num_channels snake_case__ = image_size snake_case__ = min_resolution snake_case__ = max_resolution snake_case__ = do_resize snake_case__ = size snake_case__ = apply_ocr def __magic_name__ ( self : Optional[Any]): '''simple docstring''' return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr} @require_torch @require_pytesseract class _lowerCAmelCase ( lowercase_ , unittest.TestCase ): """simple docstring""" _lowercase : str = LayoutLMvaImageProcessor if is_pytesseract_available() else None def __magic_name__ ( self : Optional[int]): '''simple docstring''' snake_case__ = LayoutLMvaImageProcessingTester(self) @property def __magic_name__ ( self : Tuple): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def __magic_name__ ( self : List[Any]): '''simple docstring''' snake_case__ = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(UpperCamelCase__ , """do_resize""")) self.assertTrue(hasattr(UpperCamelCase__ , """size""")) self.assertTrue(hasattr(UpperCamelCase__ , """apply_ocr""")) def __magic_name__ ( self : Optional[int]): '''simple docstring''' snake_case__ = self.image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size , {"""height""": 1_8, """width""": 1_8}) snake_case__ = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2) self.assertEqual(image_processor.size , {"""height""": 4_2, """width""": 4_2}) def __magic_name__ ( self : List[str]): '''simple docstring''' pass def __magic_name__ ( self : List[str]): '''simple docstring''' snake_case__ = self.image_processing_class(**self.image_processor_dict) # create random PIL images snake_case__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__) for image in image_inputs: self.assertIsInstance(UpperCamelCase__ , Image.Image) # Test not batched input snake_case__ = image_processing(image_inputs[0] , return_tensors="""pt""") self.assertEqual( encoding.pixel_values.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) self.assertIsInstance(encoding.words , UpperCamelCase__) self.assertIsInstance(encoding.boxes , UpperCamelCase__) # Test batched snake_case__ = image_processing(UpperCamelCase__ , return_tensors="""pt""").pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) def __magic_name__ ( self : List[Any]): '''simple docstring''' snake_case__ = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors snake_case__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ , numpify=UpperCamelCase__) for image in image_inputs: self.assertIsInstance(UpperCamelCase__ , np.ndarray) # Test not batched input snake_case__ = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) # Test batched snake_case__ = image_processing(UpperCamelCase__ , return_tensors="""pt""").pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) def __magic_name__ ( self : Dict): '''simple docstring''' snake_case__ = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors snake_case__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ , torchify=UpperCamelCase__) for image in image_inputs: self.assertIsInstance(UpperCamelCase__ , torch.Tensor) # Test not batched input snake_case__ = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) # Test batched snake_case__ = image_processing(UpperCamelCase__ , return_tensors="""pt""").pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) def __magic_name__ ( self : Any): '''simple docstring''' snake_case__ = LayoutLMvaImageProcessor() from datasets import load_dataset snake_case__ = load_dataset("""hf-internal-testing/fixtures_docvqa""" , split="""test""") snake_case__ = Image.open(ds[0]["""file"""]).convert("""RGB""") snake_case__ = image_processing(UpperCamelCase__ , return_tensors="""pt""") self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_2_4, 2_2_4)) self.assertEqual(len(encoding.words) , len(encoding.boxes)) # fmt: off # the words and boxes were obtained with Tesseract 4.1.1 snake_case__ = [["""11:14""", """to""", """11:39""", """a.m""", """11:39""", """to""", """11:44""", """a.m.""", """11:44""", """a.m.""", """to""", """12:25""", """p.m.""", """12:25""", """to""", """12:58""", """p.m.""", """12:58""", """to""", """4:00""", """p.m.""", """2:00""", """to""", """5:00""", """p.m.""", """Coffee""", """Break""", """Coffee""", """will""", """be""", """served""", """for""", """men""", """and""", """women""", """in""", """the""", """lobby""", """adjacent""", """to""", """exhibit""", """area.""", """Please""", """move""", """into""", """exhibit""", """area.""", """(Exhibits""", """Open)""", """TRRF""", """GENERAL""", """SESSION""", """(PART""", """|)""", """Presiding:""", """Lee""", """A.""", """Waller""", """TRRF""", """Vice""", """President""", """“Introductory""", """Remarks”""", """Lee""", """A.""", """Waller,""", """TRRF""", """Vice""", """Presi-""", """dent""", """Individual""", """Interviews""", """with""", """TRRF""", """Public""", """Board""", """Members""", """and""", """Sci-""", """entific""", """Advisory""", """Council""", """Mem-""", """bers""", """Conducted""", """by""", """TRRF""", """Treasurer""", """Philip""", """G.""", """Kuehn""", """to""", """get""", """answers""", """which""", """the""", """public""", """refrigerated""", """warehousing""", """industry""", """is""", """looking""", """for.""", """Plus""", """questions""", """from""", """the""", """floor.""", """Dr.""", """Emil""", """M.""", """Mrak,""", """University""", """of""", """Cal-""", """ifornia,""", """Chairman,""", """TRRF""", """Board;""", """Sam""", """R.""", """Cecil,""", """University""", """of""", """Georgia""", """College""", """of""", """Agriculture;""", """Dr.""", """Stanley""", """Charm,""", """Tufts""", """University""", """School""", """of""", """Medicine;""", """Dr.""", """Robert""", """H.""", """Cotton,""", """ITT""", """Continental""", """Baking""", """Company;""", """Dr.""", """Owen""", """Fennema,""", """University""", """of""", """Wis-""", """consin;""", """Dr.""", """Robert""", """E.""", """Hardenburg,""", """USDA.""", """Questions""", """and""", """Answers""", """Exhibits""", """Open""", """Capt.""", """Jack""", """Stoney""", """Room""", """TRRF""", """Scientific""", """Advisory""", """Council""", """Meeting""", """Ballroom""", """Foyer"""]] # noqa: E231 snake_case__ = [[[1_4_1, 5_7, 2_1_4, 6_9], [2_2_8, 5_8, 2_5_2, 6_9], [1_4_1, 7_5, 2_1_6, 8_8], [2_3_0, 7_9, 2_8_0, 8_8], [1_4_2, 2_6_0, 2_1_8, 2_7_3], [2_3_0, 2_6_1, 2_5_5, 2_7_3], [1_4_3, 2_7_9, 2_1_8, 2_9_0], [2_3_1, 2_8_2, 2_9_0, 2_9_1], [1_4_3, 3_4_2, 2_1_8, 3_5_4], [2_3_1, 3_4_5, 2_8_9, 3_5_5], [2_0_2, 3_6_2, 2_2_7, 3_7_3], [1_4_3, 3_7_9, 2_2_0, 3_9_2], [2_3_1, 3_8_2, 2_9_1, 3_9_4], [1_4_4, 7_1_4, 2_2_0, 7_2_6], [2_3_1, 7_1_5, 2_5_6, 7_2_6], [1_4_4, 7_3_2, 2_2_0, 7_4_5], [2_3_2, 7_3_6, 2_9_1, 7_4_7], [1_4_4, 7_6_9, 2_1_8, 7_8_2], [2_3_1, 7_7_0, 2_5_6, 7_8_2], [1_4_1, 7_8_8, 2_0_2, 8_0_1], [2_1_5, 7_9_1, 2_7_4, 8_0_4], [1_4_3, 8_2_6, 2_0_4, 8_3_8], [2_1_5, 8_2_6, 2_4_0, 8_3_8], [1_4_2, 8_4_4, 2_0_2, 8_5_7], [2_1_5, 8_4_7, 2_7_4, 8_5_9], [3_3_4, 5_7, 4_2_7, 6_9], [4_4_0, 5_7, 5_2_2, 6_9], [3_6_9, 7_5, 4_6_1, 8_8], [4_6_9, 7_5, 5_1_6, 8_8], [5_2_8, 7_6, 5_6_2, 8_8], [5_7_0, 7_6, 6_6_7, 8_8], [6_7_5, 7_5, 7_1_1, 8_7], [7_2_1, 7_9, 7_7_8, 8_8], [7_8_9, 7_5, 8_4_0, 8_8], [3_6_9, 9_7, 4_7_0, 1_0_7], [4_8_4, 9_4, 5_0_7, 1_0_6], [5_1_8, 9_4, 5_6_2, 1_0_7], [5_7_6, 9_4, 6_5_5, 1_1_0], [6_6_8, 9_4, 7_9_2, 1_0_9], [8_0_4, 9_5, 8_2_9, 1_0_7], [3_6_9, 1_1_3, 4_6_5, 1_2_5], [4_7_7, 1_1_6, 5_4_7, 1_2_5], [5_6_2, 1_1_3, 6_5_8, 1_2_5], [6_7_1, 1_1_6, 7_4_8, 1_2_5], [7_6_1, 1_1_3, 8_1_1, 1_2_5], [3_6_9, 1_3_1, 4_6_5, 1_4_3], [4_7_7, 1_3_3, 5_4_8, 1_4_3], [5_6_3, 1_3_0, 6_9_8, 1_4_5], [7_1_0, 1_3_0, 8_0_2, 1_4_6], [3_3_6, 1_7_1, 4_1_2, 1_8_3], [4_2_3, 1_7_1, 5_7_2, 1_8_3], [5_8_2, 1_7_0, 7_1_6, 1_8_4], [7_2_8, 1_7_1, 8_1_7, 1_8_7], [8_2_9, 1_7_1, 8_4_4, 1_8_6], [3_3_8, 1_9_7, 4_8_2, 2_1_2], [5_0_7, 1_9_6, 5_5_7, 2_0_9], [5_6_9, 1_9_6, 5_9_5, 2_0_8], [6_1_0, 1_9_6, 7_0_2, 2_0_9], [5_0_5, 2_1_4, 5_8_3, 2_2_6], [5_9_5, 2_1_4, 6_5_6, 2_2_7], [6_7_0, 2_1_5, 8_0_7, 2_2_7], [3_3_5, 2_5_9, 5_4_3, 2_7_4], [5_5_6, 2_5_9, 7_0_8, 2_7_2], [3_7_2, 2_7_9, 4_2_2, 2_9_1], [4_3_5, 2_7_9, 4_6_0, 2_9_1], [4_7_4, 2_7_9, 5_7_4, 2_9_2], [5_8_7, 2_7_8, 6_6_4, 2_9_1], [6_7_6, 2_7_8, 7_3_8, 2_9_1], [7_5_1, 2_7_9, 8_3_4, 2_9_1], [3_7_2, 2_9_8, 4_3_4, 3_1_0], [3_3_5, 3_4_1, 4_8_3, 3_5_4], [4_9_7, 3_4_1, 6_5_5, 3_5_4], [6_6_7, 3_4_1, 7_2_8, 3_5_4], [7_4_0, 3_4_1, 8_2_5, 3_5_4], [3_3_5, 3_6_0, 4_3_0, 3_7_2], [4_4_2, 3_6_0, 5_3_4, 3_7_2], [5_4_5, 3_5_9, 6_8_7, 3_7_2], [6_9_7, 3_6_0, 7_5_4, 3_7_2], [7_6_5, 3_6_0, 8_2_3, 3_7_3], [3_3_4, 3_7_8, 4_2_8, 3_9_1], [4_4_0, 3_7_8, 5_7_7, 3_9_4], [5_9_0, 3_7_8, 7_0_5, 3_9_1], [7_2_0, 3_7_8, 8_0_1, 3_9_1], [3_3_4, 3_9_7, 4_0_0, 4_0_9], [3_7_0, 4_1_6, 5_2_9, 4_2_9], [5_4_4, 4_1_6, 5_7_6, 4_3_2], [5_8_7, 4_1_6, 6_6_5, 4_2_8], [6_7_7, 4_1_6, 8_1_4, 4_2_9], [3_7_2, 4_3_5, 4_5_2, 4_5_0], [4_6_5, 4_3_4, 4_9_5, 4_4_7], [5_1_1, 4_3_4, 6_0_0, 4_4_7], [6_1_1, 4_3_6, 6_3_7, 4_4_7], [6_4_9, 4_3_6, 6_9_4, 4_5_1], [7_0_5, 4_3_8, 8_2_4, 4_4_7], [3_6_9, 4_5_3, 4_5_2, 4_6_6], [4_6_4, 4_5_4, 5_0_9, 4_6_6], [5_2_2, 4_5_3, 6_1_1, 4_6_9], [6_2_5, 4_5_3, 7_9_2, 4_6_9], [3_7_0, 4_7_2, 5_5_6, 4_8_8], [5_7_0, 4_7_2, 6_8_4, 4_8_7], [6_9_7, 4_7_2, 7_1_8, 4_8_5], [7_3_2, 4_7_2, 8_3_5, 4_8_8], [3_6_9, 4_9_0, 4_1_1, 5_0_3], [4_2_5, 4_9_0, 4_8_4, 5_0_3], [4_9_6, 4_9_0, 6_3_5, 5_0_6], [6_4_5, 4_9_0, 7_0_7, 5_0_3], [7_1_8, 4_9_1, 7_6_1, 5_0_3], [7_7_1, 4_9_0, 8_4_0, 5_0_3], [3_3_6, 5_1_0, 3_7_4, 5_2_1], [3_8_8, 5_1_0, 4_4_7, 5_2_2], [4_6_0, 5_1_0, 4_8_9, 5_2_1], [5_0_3, 5_1_0, 5_8_0, 5_2_2], [5_9_2, 5_0_9, 7_3_6, 5_2_5], [7_4_5, 5_0_9, 7_7_0, 5_2_2], [7_8_1, 5_0_9, 8_4_0, 5_2_2], [3_3_8, 5_2_8, 4_3_4, 5_4_1], [4_4_8, 5_2_8, 5_9_6, 5_4_1], [6_0_9, 5_2_7, 6_8_7, 5_4_0], [7_0_0, 5_2_8, 7_9_2, 5_4_1], [3_3_6, 5_4_6, 3_9_7, 5_5_9], [4_0_7, 5_4_6, 4_3_1, 5_5_9], [4_4_3, 5_4_6, 5_2_5, 5_6_0], [5_3_7, 5_4_6, 6_8_0, 5_6_2], [6_8_8, 5_4_6, 7_1_4, 5_5_9], [7_2_2, 5_4_6, 8_3_7, 5_6_2], [3_3_6, 5_6_5, 4_4_9, 5_8_1], [4_6_1, 5_6_5, 4_8_5, 5_7_7], [4_9_7, 5_6_5, 6_6_5, 5_8_1], [6_8_1, 5_6_5, 7_1_8, 5_7_7], [7_3_2, 5_6_5, 8_3_7, 5_8_0], [3_3_7, 5_8_4, 4_3_8, 5_9_7], [4_5_2, 5_8_3, 5_2_1, 5_9_6], [5_3_5, 5_8_4, 6_7_7, 5_9_9], [6_9_0, 5_8_3, 7_8_7, 5_9_6], [8_0_1, 5_8_3, 8_2_5, 5_9_6], [3_3_8, 6_0_2, 4_7_8, 6_1_5], [4_9_2, 6_0_2, 5_3_0, 6_1_4], [5_4_3, 6_0_2, 6_3_8, 6_1_5], [6_5_0, 6_0_2, 6_7_6, 6_1_4], [6_8_8, 6_0_2, 7_8_8, 6_1_5], [8_0_2, 6_0_2, 8_4_3, 6_1_4], [3_3_7, 6_2_1, 5_0_2, 6_3_3], [5_1_6, 6_2_1, 6_1_5, 6_3_7], [6_2_9, 6_2_1, 7_7_4, 6_3_6], [7_8_9, 6_2_1, 8_2_7, 6_3_3], [3_3_7, 6_3_9, 4_1_8, 6_5_2], [4_3_2, 6_4_0, 5_7_1, 6_5_3], [5_8_7, 6_3_9, 7_3_1, 6_5_5], [7_4_3, 6_3_9, 7_6_9, 6_5_2], [7_8_0, 6_3_9, 8_4_1, 6_5_2], [3_3_8, 6_5_8, 4_4_0, 6_7_3], [4_5_5, 6_5_8, 4_9_1, 6_7_0], [5_0_8, 6_5_8, 6_0_2, 6_7_1], [6_1_6, 6_5_8, 6_3_8, 6_7_0], [6_5_4, 6_5_8, 8_3_5, 6_7_4], [3_3_7, 6_7_7, 4_2_9, 6_8_9], [3_3_7, 7_1_4, 4_8_2, 7_2_6], [4_9_5, 7_1_4, 5_4_8, 7_2_6], [5_6_1, 7_1_4, 6_8_3, 7_2_6], [3_3_8, 7_7_0, 4_6_1, 7_8_2], [4_7_4, 7_6_9, 5_5_4, 7_8_5], [4_8_9, 7_8_8, 5_6_2, 8_0_3], [5_7_6, 7_8_8, 6_4_3, 8_0_1], [6_5_6, 7_8_7, 7_5_1, 8_0_4], [7_6_4, 7_8_8, 8_4_4, 8_0_1], [3_3_4, 8_2_5, 4_2_1, 8_3_8], [4_3_0, 8_2_4, 5_7_4, 8_3_8], [5_8_4, 8_2_4, 7_2_3, 8_4_1], [3_3_5, 8_4_4, 4_5_0, 8_5_7], [4_6_4, 8_4_3, 5_8_3, 8_6_0], [6_2_8, 8_6_2, 7_5_5, 8_7_5], [7_6_9, 8_6_1, 8_4_8, 8_7_8]]] # noqa: E231 # fmt: on self.assertListEqual(encoding.words , UpperCamelCase__) self.assertListEqual(encoding.boxes , UpperCamelCase__) # with apply_OCR = False snake_case__ = LayoutLMvaImageProcessor(apply_ocr=UpperCamelCase__) snake_case__ = image_processing(UpperCamelCase__ , return_tensors="""pt""") self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_2_4, 2_2_4))
654
0
"""simple docstring""" from typing import Optional, Tuple import jax import jax.numpy as jnp from flax import linen as nn from flax.core.frozen_dict import FrozenDict from transformers import CLIPConfig, FlaxPreTrainedModel from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule def UpperCAmelCase__ ( lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Any=1e-12 ) -> Optional[Any]: '''simple docstring''' lowercase = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(lowerCAmelCase__ , axis=1 ) , a_min=lowerCAmelCase__ ) ).T lowercase = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(lowerCAmelCase__ , axis=1 ) , a_min=lowerCAmelCase__ ) ).T return jnp.matmul(lowerCAmelCase__ , norm_emb_a.T ) class _A ( nn.Module ): snake_case__ : CLIPConfig snake_case__ : jnp.dtype = jnp.floataa def A__ ( self ): """simple docstring""" lowercase = FlaxCLIPVisionModule(self.config.vision_config ) lowercase = nn.Dense(self.config.projection_dim , use_bias=UpperCamelCase__ , dtype=self.dtype ) lowercase = self.param("""concept_embeds""" , jax.nn.initializers.ones , (17, self.config.projection_dim) ) lowercase = self.param( """special_care_embeds""" , jax.nn.initializers.ones , (3, self.config.projection_dim) ) lowercase = self.param("""concept_embeds_weights""" , jax.nn.initializers.ones , (17,) ) lowercase = self.param("""special_care_embeds_weights""" , jax.nn.initializers.ones , (3,) ) def __call__( self , __lowerCAmelCase ): """simple docstring""" lowercase = self.vision_model(UpperCamelCase__ )[1] lowercase = self.visual_projection(UpperCamelCase__ ) lowercase = jax_cosine_distance(UpperCamelCase__ , self.special_care_embeds ) lowercase = jax_cosine_distance(UpperCamelCase__ , self.concept_embeds ) # increase this value to create a stronger `nfsw` filter # at the cost of increasing the possibility of filtering benign image inputs lowercase = 0.0 lowercase = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment lowercase = jnp.round(UpperCamelCase__ , 3 ) lowercase = jnp.any(special_scores > 0 , axis=1 , keepdims=UpperCamelCase__ ) # Use a lower threshold if an image has any special care concept lowercase = is_special_care * 0.0_1 lowercase = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment lowercase = jnp.round(UpperCamelCase__ , 3 ) lowercase = jnp.any(concept_scores > 0 , axis=1 ) return has_nsfw_concepts class _A ( lowercase_ ): snake_case__ : Union[str, Any] = CLIPConfig snake_case__ : Tuple = '''clip_input''' snake_case__ : Any = FlaxStableDiffusionSafetyCheckerModule def __init__( self , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = 0 , __lowerCAmelCase = jnp.floataa , __lowerCAmelCase = True , **__lowerCAmelCase , ): """simple docstring""" if input_shape is None: lowercase = (1, 224, 224, 3) lowercase = self.module_class(config=UpperCamelCase__ , dtype=UpperCamelCase__ , **UpperCamelCase__ ) super().__init__(UpperCamelCase__ , UpperCamelCase__ , input_shape=UpperCamelCase__ , seed=UpperCamelCase__ , dtype=UpperCamelCase__ , _do_init=_do_init ) def A__ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None ): """simple docstring""" lowercase = jax.random.normal(UpperCamelCase__ , UpperCamelCase__ ) lowercase , lowercase = jax.random.split(UpperCamelCase__ ) lowercase = {"""params""": params_rng, """dropout""": dropout_rng} lowercase = self.module.init(UpperCamelCase__ , UpperCamelCase__ )["""params"""] return random_params def __call__( self , __lowerCAmelCase , __lowerCAmelCase = None , ): """simple docstring""" lowercase = jnp.transpose(UpperCamelCase__ , (0, 2, 3, 1) ) return self.module.apply( {"""params""": params or self.params} , jnp.array(UpperCamelCase__ , dtype=jnp.floataa ) , rngs={} , )
359
import numpy as np import torch from torch.utils.data import Dataset from utils import logger class _lowerCAmelCase ( lowercase_ ): """simple docstring""" def __init__( self : int , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Union[str, Any]): '''simple docstring''' snake_case__ = params snake_case__ = np.array(UpperCamelCase__) snake_case__ = np.array([len(UpperCamelCase__) for t in data]) self.check() self.remove_long_sequences() self.remove_empty_sequences() self.remove_unknown_sequences() self.check() self.print_statistics() def __getitem__( self : Dict , UpperCamelCase__ : Any): '''simple docstring''' return (self.token_ids[index], self.lengths[index]) def __len__( self : Union[str, Any]): '''simple docstring''' return len(self.lengths) def __magic_name__ ( self : str): '''simple docstring''' assert len(self.token_ids) == len(self.lengths) assert all(self.lengths[i] == len(self.token_ids[i]) for i in range(len(self.lengths))) def __magic_name__ ( self : Optional[int]): '''simple docstring''' snake_case__ = self.params.max_model_input_size snake_case__ = self.lengths > max_len logger.info(F'''Splitting {sum(UpperCamelCase__)} too long sequences.''') def divide_chunks(UpperCamelCase__ : str , UpperCamelCase__ : Tuple): return [l[i : i + n] for i in range(0 , len(UpperCamelCase__) , UpperCamelCase__)] snake_case__ = [] snake_case__ = [] if self.params.mlm: snake_case__ , snake_case__ = self.params.special_tok_ids["""cls_token"""], self.params.special_tok_ids["""sep_token"""] else: snake_case__ , snake_case__ = self.params.special_tok_ids["""bos_token"""], self.params.special_tok_ids["""eos_token"""] for seq_, len_ in zip(self.token_ids , self.lengths): assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_ if len_ <= max_len: new_tok_ids.append(seq_) new_lengths.append(len_) else: snake_case__ = [] for sub_s in divide_chunks(seq_ , max_len - 2): if sub_s[0] != cls_id: snake_case__ = np.insert(UpperCamelCase__ , 0 , UpperCamelCase__) if sub_s[-1] != sep_id: snake_case__ = np.insert(UpperCamelCase__ , len(UpperCamelCase__) , UpperCamelCase__) assert len(UpperCamelCase__) <= max_len assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s sub_seqs.append(UpperCamelCase__) new_tok_ids.extend(UpperCamelCase__) new_lengths.extend([len(UpperCamelCase__) for l in sub_seqs]) snake_case__ = np.array(UpperCamelCase__) snake_case__ = np.array(UpperCamelCase__) def __magic_name__ ( self : Any): '''simple docstring''' snake_case__ = len(self) snake_case__ = self.lengths > 1_1 snake_case__ = self.token_ids[indices] snake_case__ = self.lengths[indices] snake_case__ = len(self) logger.info(F'''Remove {init_size - new_size} too short (<=11 tokens) sequences.''') def __magic_name__ ( self : List[str]): '''simple docstring''' if "unk_token" not in self.params.special_tok_ids: return else: snake_case__ = self.params.special_tok_ids["""unk_token"""] snake_case__ = len(self) snake_case__ = np.array([np.count_nonzero(a == unk_token_id) for a in self.token_ids]) snake_case__ = (unk_occs / self.lengths) < 0.5 snake_case__ = self.token_ids[indices] snake_case__ = self.lengths[indices] snake_case__ = len(self) logger.info(F'''Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).''') def __magic_name__ ( self : Optional[Any]): '''simple docstring''' if not self.params.is_master: return logger.info(F'''{len(self)} sequences''') # data_len = sum(self.lengths) # nb_unique_tokens = len(Counter(list(chain(*self.token_ids)))) # logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)') # unk_idx = self.params.special_tok_ids['unk_token'] # nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids]) # logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)') def __magic_name__ ( self : int , UpperCamelCase__ : Optional[int]): '''simple docstring''' snake_case__ = [t[0] for t in batch] snake_case__ = [t[1] for t in batch] assert len(UpperCamelCase__) == len(UpperCamelCase__) # Max for paddings snake_case__ = max(UpperCamelCase__) # Pad token ids if self.params.mlm: snake_case__ = self.params.special_tok_ids["""pad_token"""] else: snake_case__ = self.params.special_tok_ids["""unk_token"""] snake_case__ = [list(t.astype(UpperCamelCase__)) + [pad_idx] * (max_seq_len_ - len(UpperCamelCase__)) for t in token_ids] assert len(tk_) == len(UpperCamelCase__) assert all(len(UpperCamelCase__) == max_seq_len_ for t in tk_) snake_case__ = torch.tensor(tk_) # (bs, max_seq_len_) snake_case__ = torch.tensor(UpperCamelCase__) # (bs) return tk_t, lg_t
654
0
'''simple docstring''' from collections import defaultdict from math import ceil, sqrt def _UpperCamelCase ( __UpperCamelCase = 1_00_00_00 ,__UpperCamelCase = 10 ) -> Dict: lowerCamelCase_ = defaultdict(__UpperCamelCase ) for outer_width in range(3 ,(t_limit // 4) + 2 ): if outer_width * outer_width > t_limit: lowerCamelCase_ = max( ceil(sqrt(outer_width * outer_width - t_limit ) ) ,1 ) else: lowerCamelCase_ = 1 hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2 for hole_width in range(__UpperCamelCase ,outer_width - 1 ,2 ): count[outer_width * outer_width - hole_width * hole_width] += 1 return sum(1 for n in count.values() if 1 <= n <= 10 ) if __name__ == "__main__": print(f'''{solution() = }''')
42
import argparse import torch from transformers import YosoConfig, YosoForMaskedLM def _UpperCAmelCase ( a : str ): if "model" in orig_key: snake_case__ = orig_key.replace("""model.""" , """""" ) if "norm1" in orig_key: snake_case__ = orig_key.replace("""norm1""" , """attention.output.LayerNorm""" ) if "norm2" in orig_key: snake_case__ = orig_key.replace("""norm2""" , """output.LayerNorm""" ) if "norm" in orig_key: snake_case__ = orig_key.replace("""norm""" , """LayerNorm""" ) if "transformer" in orig_key: snake_case__ = orig_key.split(""".""" )[0].split("""_""" )[-1] snake_case__ = orig_key.replace(F'''transformer_{layer_num}''' , F'''encoder.layer.{layer_num}''' ) if "mha.attn" in orig_key: snake_case__ = orig_key.replace("""mha.attn""" , """attention.self""" ) if "mha" in orig_key: snake_case__ = orig_key.replace("""mha""" , """attention""" ) if "W_q" in orig_key: snake_case__ = orig_key.replace("""W_q""" , """self.query""" ) if "W_k" in orig_key: snake_case__ = orig_key.replace("""W_k""" , """self.key""" ) if "W_v" in orig_key: snake_case__ = orig_key.replace("""W_v""" , """self.value""" ) if "ff1" in orig_key: snake_case__ = orig_key.replace("""ff1""" , """intermediate.dense""" ) if "ff2" in orig_key: snake_case__ = orig_key.replace("""ff2""" , """output.dense""" ) if "ff" in orig_key: snake_case__ = orig_key.replace("""ff""" , """output.dense""" ) if "mlm_class" in orig_key: snake_case__ = orig_key.replace("""mlm.mlm_class""" , """cls.predictions.decoder""" ) if "mlm" in orig_key: snake_case__ = orig_key.replace("""mlm""" , """cls.predictions.transform""" ) if "cls" not in orig_key: snake_case__ = """yoso.""" + orig_key return orig_key def _UpperCAmelCase ( a : Tuple , a : Dict ): for key in orig_state_dict.copy().keys(): snake_case__ = orig_state_dict.pop(a ) if ("pooler" in key) or ("sen_class" in key): continue else: snake_case__ = val snake_case__ = orig_state_dict["""cls.predictions.decoder.bias"""] snake_case__ = torch.arange(a ).expand((1, -1) ) + 2 return orig_state_dict def _UpperCAmelCase ( a : int , a : List[Any] , a : List[Any] ): snake_case__ = torch.load(a , map_location="""cpu""" )["""model_state_dict"""] snake_case__ = YosoConfig.from_json_file(a ) snake_case__ = YosoForMaskedLM(a ) snake_case__ = convert_checkpoint_helper(config.max_position_embeddings , a ) print(model.load_state_dict(a ) ) model.eval() model.save_pretrained(a ) print(F'''Checkpoint successfuly converted. Model saved at {pytorch_dump_path}''' ) if __name__ == "__main__": a__ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--pytorch_model_path""", default=None, type=str, required=True, help="""Path to YOSO pytorch checkpoint.""" ) parser.add_argument( """--config_file""", default=None, type=str, required=True, help="""The json file for YOSO model config.""", ) parser.add_argument( """--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) a__ = parser.parse_args() convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
654
0
import os import tempfile from functools import partial from unittest import TestCase from unittest.mock import patch import numpy as np import pytest from datasets.arrow_dataset import Dataset from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex from .utils import require_elasticsearch, require_faiss a_ : List[Any] = pytest.mark.integration @require_faiss class lowerCamelCase__ ( lowercase_): """simple docstring""" def _a (self ): '''simple docstring''' lowerCamelCase = Dataset.from_dict({"filename": ["my_name-train" + "_" + str(UpperCamelCase__ ) for x in np.arange(30 ).tolist()]} ) return dset def _a (self ): '''simple docstring''' import faiss lowerCamelCase = self._create_dummy_dataset() lowerCamelCase = dset.map( lambda __a , __a : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=UpperCamelCase__ , keep_in_memory=UpperCamelCase__ ) lowerCamelCase = dset.add_faiss_index("vecs" , batch_size=1_00 , metric_type=faiss.METRIC_INNER_PRODUCT ) lowerCamelCase , lowerCamelCase = dset.get_nearest_examples("vecs" , np.ones(5 , dtype=np.floataa ) ) self.assertEqual(examples["filename"][0] , "my_name-train_29" ) dset.drop_index("vecs" ) def _a (self ): '''simple docstring''' import faiss lowerCamelCase = self._create_dummy_dataset() dset.add_faiss_index_from_external_arrays( external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" , batch_size=1_00 , metric_type=faiss.METRIC_INNER_PRODUCT , ) lowerCamelCase , lowerCamelCase = dset.get_nearest_examples("vecs" , np.ones(5 , dtype=np.floataa ) ) self.assertEqual(examples["filename"][0] , "my_name-train_29" ) def _a (self ): '''simple docstring''' import faiss lowerCamelCase = self._create_dummy_dataset() dset.add_faiss_index_from_external_arrays( external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" , metric_type=faiss.METRIC_INNER_PRODUCT , ) # Setting delete=False and unlinking manually is not pretty... but it is required on Windows to # ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue. # see https://bugs.python.org/issue14243 and # https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515 with tempfile.NamedTemporaryFile(delete=UpperCamelCase__ ) as tmp_file: dset.save_faiss_index("vecs" , tmp_file.name ) dset.load_faiss_index("vecs2" , tmp_file.name ) os.unlink(tmp_file.name ) lowerCamelCase , lowerCamelCase = dset.get_nearest_examples("vecs2" , np.ones(5 , dtype=np.floataa ) ) self.assertEqual(examples["filename"][0] , "my_name-train_29" ) def _a (self ): '''simple docstring''' lowerCamelCase = self._create_dummy_dataset() dset.add_faiss_index_from_external_arrays( external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" ) dset.drop_index("vecs" ) self.assertRaises(UpperCamelCase__ , partial(dset.get_nearest_examples , "vecs2" , np.ones(5 , dtype=np.floataa ) ) ) def _a (self ): '''simple docstring''' from elasticsearch import Elasticsearch lowerCamelCase = self._create_dummy_dataset() with patch("elasticsearch.Elasticsearch.search" ) as mocked_search, patch( "elasticsearch.client.IndicesClient.create" ) as mocked_index_create, patch("elasticsearch.helpers.streaming_bulk" ) as mocked_bulk: lowerCamelCase = {"acknowledged": True} mocked_bulk.return_value([(True, None)] * 30 ) lowerCamelCase = {"hits": {"hits": [{"_score": 1, "_id": 29}]}} lowerCamelCase = Elasticsearch() dset.add_elasticsearch_index("filename" , es_client=UpperCamelCase__ ) lowerCamelCase , lowerCamelCase = dset.get_nearest_examples("filename" , "my_name-train_29" ) self.assertEqual(examples["filename"][0] , "my_name-train_29" ) @require_faiss class lowerCamelCase__ ( lowercase_): """simple docstring""" def _a (self ): '''simple docstring''' import faiss lowerCamelCase = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT ) # add vectors index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsNotNone(index.faiss_index ) self.assertEqual(index.faiss_index.ntotal , 5 ) index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) ) self.assertEqual(index.faiss_index.ntotal , 10 ) # single query lowerCamelCase = np.zeros(5 , dtype=np.floataa ) lowerCamelCase = 1 lowerCamelCase , lowerCamelCase = index.search(UpperCamelCase__ ) self.assertRaises(UpperCamelCase__ , index.search , query.reshape(-1 , 1 ) ) self.assertGreater(scores[0] , 0 ) self.assertEqual(indices[0] , 1 ) # batched queries lowerCamelCase = np.eye(5 , dtype=np.floataa )[::-1] lowerCamelCase , lowerCamelCase = index.search_batch(UpperCamelCase__ ) self.assertRaises(UpperCamelCase__ , index.search_batch , queries[0] ) lowerCamelCase = [scores[0] for scores in total_scores] lowerCamelCase = [indices[0] for indices in total_indices] self.assertGreater(np.min(UpperCamelCase__ ) , 0 ) self.assertListEqual([4, 3, 2, 1, 0] , UpperCamelCase__ ) def _a (self ): '''simple docstring''' import faiss lowerCamelCase = FaissIndex(string_factory="Flat" ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsInstance(index.faiss_index , faiss.IndexFlat ) lowerCamelCase = FaissIndex(string_factory="LSH" ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsInstance(index.faiss_index , faiss.IndexLSH ) with self.assertRaises(UpperCamelCase__ ): lowerCamelCase = FaissIndex(string_factory="Flat" , custom_index=faiss.IndexFlat(5 ) ) def _a (self ): '''simple docstring''' import faiss lowerCamelCase = faiss.IndexFlat(5 ) lowerCamelCase = FaissIndex(custom_index=UpperCamelCase__ ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsInstance(index.faiss_index , faiss.IndexFlat ) def _a (self ): '''simple docstring''' import faiss lowerCamelCase = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) # Setting delete=False and unlinking manually is not pretty... but it is required on Windows to # ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue. # see https://bugs.python.org/issue14243 and # https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515 with tempfile.NamedTemporaryFile(delete=UpperCamelCase__ ) as tmp_file: index.save(tmp_file.name ) lowerCamelCase = FaissIndex.load(tmp_file.name ) os.unlink(tmp_file.name ) lowerCamelCase = np.zeros(5 , dtype=np.floataa ) lowerCamelCase = 1 lowerCamelCase , lowerCamelCase = index.search(UpperCamelCase__ ) self.assertGreater(scores[0] , 0 ) self.assertEqual(indices[0] , 1 ) @require_faiss def __lowercase( UpperCAmelCase__ ): """simple docstring""" import faiss lowerCamelCase = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) lowerCamelCase = "index.faiss" lowerCamelCase = F"""mock://{index_name}""" index.save(UpperCAmelCase__ , storage_options=mockfs.storage_options ) lowerCamelCase = FaissIndex.load(UpperCAmelCase__ , storage_options=mockfs.storage_options ) lowerCamelCase = np.zeros(5 , dtype=np.floataa ) lowerCamelCase = 1 lowerCamelCase , lowerCamelCase = index.search(UpperCAmelCase__ ) assert scores[0] > 0 assert indices[0] == 1 @require_elasticsearch class lowerCamelCase__ ( lowercase_): """simple docstring""" def _a (self ): '''simple docstring''' from elasticsearch import Elasticsearch with patch("elasticsearch.Elasticsearch.search" ) as mocked_search, patch( "elasticsearch.client.IndicesClient.create" ) as mocked_index_create, patch("elasticsearch.helpers.streaming_bulk" ) as mocked_bulk: lowerCamelCase = Elasticsearch() lowerCamelCase = {"acknowledged": True} lowerCamelCase = ElasticSearchIndex(es_client=UpperCamelCase__ ) mocked_bulk.return_value([(True, None)] * 3 ) index.add_documents(["foo", "bar", "foobar"] ) # single query lowerCamelCase = "foo" lowerCamelCase = {"hits": {"hits": [{"_score": 1, "_id": 0}]}} lowerCamelCase , lowerCamelCase = index.search(UpperCamelCase__ ) self.assertEqual(scores[0] , 1 ) self.assertEqual(indices[0] , 0 ) # single query with timeout lowerCamelCase = "foo" lowerCamelCase = {"hits": {"hits": [{"_score": 1, "_id": 0}]}} lowerCamelCase , lowerCamelCase = index.search(UpperCamelCase__ , request_timeout=30 ) self.assertEqual(scores[0] , 1 ) self.assertEqual(indices[0] , 0 ) # batched queries lowerCamelCase = ["foo", "bar", "foobar"] lowerCamelCase = {"hits": {"hits": [{"_score": 1, "_id": 1}]}} lowerCamelCase , lowerCamelCase = index.search_batch(UpperCamelCase__ ) lowerCamelCase = [scores[0] for scores in total_scores] lowerCamelCase = [indices[0] for indices in total_indices] self.assertGreater(np.min(UpperCamelCase__ ) , 0 ) self.assertListEqual([1, 1, 1] , UpperCamelCase__ ) # batched queries with timeout lowerCamelCase = ["foo", "bar", "foobar"] lowerCamelCase = {"hits": {"hits": [{"_score": 1, "_id": 1}]}} lowerCamelCase , lowerCamelCase = index.search_batch(UpperCamelCase__ , request_timeout=30 ) lowerCamelCase = [scores[0] for scores in total_scores] lowerCamelCase = [indices[0] for indices in total_indices] self.assertGreater(np.min(UpperCamelCase__ ) , 0 ) self.assertListEqual([1, 1, 1] , UpperCamelCase__ )
623
import os from typing import Optional import fsspec from fsspec.archive import AbstractArchiveFileSystem from fsspec.utils import DEFAULT_BLOCK_SIZE class _lowerCAmelCase ( lowercase_ ): """simple docstring""" _lowercase : Optional[int] = '''''' _lowercase : str = ( None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz ) _lowercase : str = None # compression type in fsspec. ex: "gzip" _lowercase : str = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz def __init__( self : List[Any] , UpperCamelCase__ : str = "" , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : Optional[dict] = None , **UpperCamelCase__ : List[Any]): '''simple docstring''' super().__init__(self , **UpperCamelCase__) # always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode snake_case__ = fsspec.open( UpperCamelCase__ , mode="""rb""" , protocol=UpperCamelCase__ , compression=self.compression , client_kwargs={ """requote_redirect_url""": False, # see https://github.com/huggingface/datasets/pull/5459 """trust_env""": True, # Enable reading proxy env variables. **(target_options or {}).pop("""client_kwargs""" , {}), # To avoid issues if it was already passed. } , **(target_options or {}) , ) snake_case__ = os.path.basename(self.file.path.split("""::""")[0]) snake_case__ = ( self.compressed_name[: self.compressed_name.rindex(""".""")] if """.""" in self.compressed_name else self.compressed_name ) snake_case__ = None @classmethod def __magic_name__ ( cls : Union[str, Any] , UpperCamelCase__ : List[Any]): '''simple docstring''' return super()._strip_protocol(UpperCamelCase__).lstrip("""/""") def __magic_name__ ( self : Dict): '''simple docstring''' if self.dir_cache is None: snake_case__ = {**self.file.fs.info(self.file.path), """name""": self.uncompressed_name} snake_case__ = {f["""name"""]: f} def __magic_name__ ( self : Optional[int] , UpperCamelCase__ : str): '''simple docstring''' return self.file.open().read() def __magic_name__ ( self : int , UpperCamelCase__ : str , UpperCamelCase__ : str = "rb" , UpperCamelCase__ : Any=None , UpperCamelCase__ : Union[str, Any]=True , UpperCamelCase__ : Optional[int]=None , **UpperCamelCase__ : Optional[Any] , ): '''simple docstring''' snake_case__ = self._strip_protocol(UpperCamelCase__) if mode != "rb": raise ValueError(F'''Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'''') return self.file.open() class _lowerCAmelCase ( lowercase_ ): """simple docstring""" _lowercase : Dict = '''bz2''' _lowercase : Dict = '''bz2''' _lowercase : Optional[int] = '''.bz2''' class _lowerCAmelCase ( lowercase_ ): """simple docstring""" _lowercase : Dict = '''gzip''' _lowercase : List[str] = '''gzip''' _lowercase : Any = '''.gz''' class _lowerCAmelCase ( lowercase_ ): """simple docstring""" _lowercase : str = '''lz4''' _lowercase : List[Any] = '''lz4''' _lowercase : Dict = '''.lz4''' class _lowerCAmelCase ( lowercase_ ): """simple docstring""" _lowercase : Optional[int] = '''xz''' _lowercase : Union[str, Any] = '''xz''' _lowercase : Optional[int] = '''.xz''' class _lowerCAmelCase ( lowercase_ ): """simple docstring""" _lowercase : Optional[int] = '''zstd''' _lowercase : Tuple = '''zstd''' _lowercase : Union[str, Any] = '''.zst''' def __init__( self : str , UpperCamelCase__ : str , UpperCamelCase__ : str = "rb" , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : Optional[dict] = None , UpperCamelCase__ : int = DEFAULT_BLOCK_SIZE , **UpperCamelCase__ : int , ): '''simple docstring''' super().__init__( fo=UpperCamelCase__ , mode=UpperCamelCase__ , target_protocol=UpperCamelCase__ , target_options=UpperCamelCase__ , block_size=UpperCamelCase__ , **UpperCamelCase__ , ) # We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2: # # File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open # out.close = close # AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only # # see https://github.com/intake/filesystem_spec/issues/725 snake_case__ = self.file.__enter__ class _lowerCAmelCase : """simple docstring""" def __init__( self : Tuple , UpperCamelCase__ : str): '''simple docstring''' snake_case__ = file_ def __enter__( self : List[str]): '''simple docstring''' self._file.__enter__() return self def __exit__( self : Dict , *UpperCamelCase__ : str , **UpperCamelCase__ : Optional[Any]): '''simple docstring''' self._file.__exit__(*UpperCamelCase__ , **UpperCamelCase__) def __iter__( self : Any): '''simple docstring''' return iter(self._file) def __magic_name__ ( self : List[str]): '''simple docstring''' return next(self._file) def __getattr__( self : Any , UpperCamelCase__ : int): '''simple docstring''' return getattr(self._file , UpperCamelCase__) def fixed_enter(*UpperCamelCase__ : int , **UpperCamelCase__ : int): return WrappedFile(_enter(*UpperCamelCase__ , **UpperCamelCase__)) snake_case__ = fixed_enter
654
0
'''simple docstring''' from typing import Callable, List, Optional, Tuple, Union import torch from transformers import CLIPTextModel, CLIPTokenizer from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin, TransformeraDModel, VQModel from ...schedulers import VQDiffusionScheduler from ...utils import logging from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput UpperCamelCase_ = logging.get_logger(__name__) # pylint: disable=invalid-name class __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ): @register_to_config def __init__( self : Optional[int] , UpperCAmelCase__ : bool , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : Optional[int] = None ): '''simple docstring''' super().__init__() lowercase : str =learnable if self.learnable: assert hidden_size is not None, "learnable=True requires `hidden_size` to be set" assert length is not None, "learnable=True requires `length` to be set" lowercase : List[str] =torch.zeros(UpperCamelCase__ , UpperCamelCase__ ) else: lowercase : List[str] =None lowercase : Optional[int] =torch.nn.Parameter(UpperCamelCase__ ) class __SCREAMING_SNAKE_CASE ( lowercase_ ): lowerCamelCase_ = 42 lowerCamelCase_ = 42 lowerCamelCase_ = 42 lowerCamelCase_ = 42 lowerCamelCase_ = 42 lowerCamelCase_ = 42 def __init__( self : Any , UpperCAmelCase__ : VQModel , UpperCAmelCase__ : CLIPTextModel , UpperCAmelCase__ : CLIPTokenizer , UpperCAmelCase__ : TransformeraDModel , UpperCAmelCase__ : VQDiffusionScheduler , UpperCAmelCase__ : LearnedClassifierFreeSamplingEmbeddings , ): '''simple docstring''' super().__init__() self.register_modules( vqvae=UpperCamelCase__ , transformer=UpperCamelCase__ , text_encoder=UpperCamelCase__ , tokenizer=UpperCamelCase__ , scheduler=UpperCamelCase__ , learned_classifier_free_sampling_embeddings=UpperCamelCase__ , ) def lowerCamelCase_ ( self : int , UpperCAmelCase__ : Any , UpperCAmelCase__ : str , UpperCAmelCase__ : Dict ): '''simple docstring''' lowercase : Optional[Any] =len(UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else 1 # get prompt text embeddings lowercase : List[Any] =self.tokenizer( UpperCamelCase__ , padding='''max_length''' , max_length=self.tokenizer.model_max_length , return_tensors='''pt''' , ) lowercase : str =text_inputs.input_ids if text_input_ids.shape[-1] > self.tokenizer.model_max_length: lowercase : Union[str, Any] =self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] ) logger.warning( '''The following part of your input was truncated because CLIP can only handle sequences up to''' F''' {self.tokenizer.model_max_length} tokens: {removed_text}''' ) lowercase : Optional[Any] =text_input_ids[:, : self.tokenizer.model_max_length] lowercase : Any =self.text_encoder(text_input_ids.to(self.device ) )[0] # NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion. # While CLIP does normalize the pooled output of the text transformer when combining # the image and text embeddings, CLIP does not directly normalize the last hidden state. # # CLIP normalizing the pooled output. # https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053 lowercase : Any =prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=UpperCamelCase__ ) # duplicate text embeddings for each generation per prompt lowercase : Optional[Any] =prompt_embeds.repeat_interleave(UpperCamelCase__ , dim=0 ) if do_classifier_free_guidance: if self.learned_classifier_free_sampling_embeddings.learnable: lowercase : Optional[Any] =self.learned_classifier_free_sampling_embeddings.embeddings lowercase : Dict =negative_prompt_embeds.unsqueeze(0 ).repeat(UpperCamelCase__ , 1 , 1 ) else: lowercase : str =[''''''] * batch_size lowercase : Any =text_input_ids.shape[-1] lowercase : int =self.tokenizer( UpperCamelCase__ , padding='''max_length''' , max_length=UpperCamelCase__ , truncation=UpperCamelCase__ , return_tensors='''pt''' , ) lowercase : Any =self.text_encoder(uncond_input.input_ids.to(self.device ) )[0] # See comment for normalizing text embeddings lowercase : Optional[int] =negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=UpperCamelCase__ ) # duplicate unconditional embeddings for each generation per prompt, using mps friendly method lowercase : Union[str, Any] =negative_prompt_embeds.shape[1] lowercase : int =negative_prompt_embeds.repeat(1 , UpperCamelCase__ , 1 ) lowercase : int =negative_prompt_embeds.view(batch_size * num_images_per_prompt , UpperCamelCase__ , -1 ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes lowercase : int =torch.cat([negative_prompt_embeds, prompt_embeds] ) return prompt_embeds @torch.no_grad() def __call__( self : str , UpperCAmelCase__ : Union[str, List[str]] , UpperCAmelCase__ : int = 100 , UpperCAmelCase__ : float = 5.0 , UpperCAmelCase__ : float = 1.0 , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCAmelCase__ : Optional[torch.FloatTensor] = None , UpperCAmelCase__ : Optional[str] = "pil" , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , UpperCAmelCase__ : int = 1 , ): '''simple docstring''' if isinstance(UpperCamelCase__ , UpperCamelCase__ ): lowercase : str =1 elif isinstance(UpperCamelCase__ , UpperCamelCase__ ): lowercase : Any =len(UpperCamelCase__ ) else: raise ValueError(F'''`prompt` has to be of type `str` or `list` but is {type(UpperCamelCase__ )}''' ) lowercase : str =batch_size * num_images_per_prompt lowercase : Tuple =guidance_scale > 1.0 lowercase : Union[str, Any] =self._encode_prompt(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) if (callback_steps is None) or ( callback_steps is not None and (not isinstance(UpperCamelCase__ , UpperCamelCase__ ) or callback_steps <= 0) ): raise ValueError( F'''`callback_steps` has to be a positive integer but is {callback_steps} of type''' F''' {type(UpperCamelCase__ )}.''' ) # get the initial completely masked latents unless the user supplied it lowercase : Optional[int] =(batch_size, self.transformer.num_latent_pixels) if latents is None: lowercase : Union[str, Any] =self.transformer.num_vector_embeds - 1 lowercase : Union[str, Any] =torch.full(UpperCamelCase__ , UpperCamelCase__ ).to(self.device ) else: if latents.shape != latents_shape: raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' ) if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any(): raise ValueError( '''Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,''' F''' {self.transformer.num_vector_embeds - 1} (inclusive).''' ) lowercase : Optional[int] =latents.to(self.device ) # set timesteps self.scheduler.set_timesteps(UpperCamelCase__ , device=self.device ) lowercase : Dict =self.scheduler.timesteps.to(self.device ) lowercase : Any =latents for i, t in enumerate(self.progress_bar(UpperCamelCase__ ) ): # expand the sample if we are doing classifier free guidance lowercase : int =torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample # predict the un-noised image # model_output == `log_p_x_0` lowercase : Any =self.transformer(UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , timestep=UpperCamelCase__ ).sample if do_classifier_free_guidance: lowercase , lowercase : Dict =model_output.chunk(2 ) lowercase : Tuple =model_output_uncond + guidance_scale * (model_output_text - model_output_uncond) model_output -= torch.logsumexp(UpperCamelCase__ , dim=1 , keepdim=UpperCamelCase__ ) lowercase : List[str] =self.truncate(UpperCamelCase__ , UpperCamelCase__ ) # remove `log(0)`'s (`-inf`s) lowercase : Tuple =model_output.clamp(-70 ) # compute the previous noisy sample x_t -> x_t-1 lowercase : str =self.scheduler.step(UpperCamelCase__ , timestep=UpperCamelCase__ , sample=UpperCamelCase__ , generator=UpperCamelCase__ ).prev_sample # call the callback, if provided if callback is not None and i % callback_steps == 0: callback(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) lowercase : List[Any] =self.vqvae.config.vq_embed_dim lowercase : Any =(batch_size, self.transformer.height, self.transformer.width, embedding_channels) lowercase : Dict =self.vqvae.quantize.get_codebook_entry(UpperCamelCase__ , shape=UpperCamelCase__ ) lowercase : str =self.vqvae.decode(UpperCamelCase__ , force_not_quantize=UpperCamelCase__ ).sample lowercase : List[str] =(image / 2 + 0.5).clamp(0 , 1 ) lowercase : Dict =image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": lowercase : Dict =self.numpy_to_pil(UpperCamelCase__ ) if not return_dict: return (image,) return ImagePipelineOutput(images=UpperCamelCase__ ) def lowerCamelCase_ ( self : Union[str, Any] , UpperCAmelCase__ : torch.FloatTensor , UpperCAmelCase__ : float ): '''simple docstring''' lowercase , lowercase : Optional[int] =torch.sort(UpperCamelCase__ , 1 , descending=UpperCamelCase__ ) lowercase : Any =torch.exp(UpperCamelCase__ ) lowercase : Dict =sorted_p_x_0.cumsum(dim=1 ) < truncation_rate # Ensure that at least the largest probability is not zeroed out lowercase : List[Any] =torch.full_like(keep_mask[:, 0:1, :] , UpperCamelCase__ ) lowercase : Dict =torch.cat((all_true, keep_mask) , dim=1 ) lowercase : Optional[int] =keep_mask[:, :-1, :] lowercase : int =keep_mask.gather(1 , indices.argsort(1 ) ) lowercase : Any =log_p_x_0.clone() lowercase : Optional[Any] =-torch.inf # -inf = log(0) return rv
92
def _UpperCAmelCase ( a : int ): if number < 0: raise ValueError("""number must not be negative""" ) return number & (number - 1) == 0 if __name__ == "__main__": import doctest doctest.testmod()
654
0
'''simple docstring''' import timeit import numpy as np import datasets from datasets.arrow_writer import ArrowWriter from datasets.features.features import _ArrayXD def _a (__SCREAMING_SNAKE_CASE ): """simple docstring""" def wrapper(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ): _UpperCamelCase =timeit.default_timer() _UpperCamelCase =func(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) _UpperCamelCase =timeit.default_timer() - starttime return delta _UpperCamelCase =func.__name__ return wrapper def _a (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=100 , __SCREAMING_SNAKE_CASE=None ): """simple docstring""" _UpperCamelCase =[] _UpperCamelCase =seq_shapes or {} for i in range(__SCREAMING_SNAKE_CASE ): _UpperCamelCase ={} for col_id, (k, v) in enumerate(features.items() ): if isinstance(__SCREAMING_SNAKE_CASE , _ArrayXD ): _UpperCamelCase =np.random.rand(*v.shape ).astype(v.dtype ) elif isinstance(__SCREAMING_SNAKE_CASE , datasets.Value ): if v.dtype == "string": _UpperCamelCase ='''The small grey turtle was surprisingly fast when challenged.''' else: _UpperCamelCase =np.random.randint(10 , size=1 ).astype(v.dtype ).item() elif isinstance(__SCREAMING_SNAKE_CASE , datasets.Sequence ): while isinstance(__SCREAMING_SNAKE_CASE , datasets.Sequence ): _UpperCamelCase =v.feature _UpperCamelCase =seq_shapes[k] _UpperCamelCase =np.random.rand(*__SCREAMING_SNAKE_CASE ).astype(v.dtype ) _UpperCamelCase =data dummy_data.append((i, example) ) return dummy_data def _a (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=100 , __SCREAMING_SNAKE_CASE=None ): """simple docstring""" _UpperCamelCase =generate_examples(__SCREAMING_SNAKE_CASE , num_examples=__SCREAMING_SNAKE_CASE , seq_shapes=__SCREAMING_SNAKE_CASE ) with ArrowWriter(features=__SCREAMING_SNAKE_CASE , path=__SCREAMING_SNAKE_CASE ) as writer: for key, record in dummy_data: _UpperCamelCase =features.encode_example(__SCREAMING_SNAKE_CASE ) writer.write(__SCREAMING_SNAKE_CASE ) _UpperCamelCase , _UpperCamelCase =writer.finalize() if not num_final_examples == num_examples: raise ValueError( f'''Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}.''' ) _UpperCamelCase =datasets.Dataset.from_file(filename=__SCREAMING_SNAKE_CASE , info=datasets.DatasetInfo(features=__SCREAMING_SNAKE_CASE ) ) return dataset
404
class _lowerCAmelCase : """simple docstring""" def __init__( self : List[Any] , UpperCamelCase__ : int): '''simple docstring''' snake_case__ = size snake_case__ = [0] * size snake_case__ = [0] * size @staticmethod def __magic_name__ ( UpperCamelCase__ : int): '''simple docstring''' return index | (index + 1) @staticmethod def __magic_name__ ( UpperCamelCase__ : int): '''simple docstring''' return (index & (index + 1)) - 1 def __magic_name__ ( self : Any , UpperCamelCase__ : int , UpperCamelCase__ : int): '''simple docstring''' snake_case__ = value while index < self.size: snake_case__ = self.get_prev(UpperCamelCase__) + 1 if current_left_border == index: snake_case__ = value else: snake_case__ = max(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__) snake_case__ = self.get_next(UpperCamelCase__) def __magic_name__ ( self : int , UpperCamelCase__ : int , UpperCamelCase__ : int): '''simple docstring''' right -= 1 # Because of right is exclusive snake_case__ = 0 while left <= right: snake_case__ = self.get_prev(UpperCamelCase__) if left <= current_left: snake_case__ = max(UpperCamelCase__ , self.tree[right]) snake_case__ = current_left else: snake_case__ = max(UpperCamelCase__ , self.arr[right]) right -= 1 return result if __name__ == "__main__": import doctest doctest.testmod()
654
0
def lowerCAmelCase__ ( lowerCamelCase_ : int = 50): '''simple docstring''' lowerCAmelCase__ : Optional[Any] = [1] * (length + 1) for row_length in range(length + 1): for tile_length in range(2 ,5): for tile_start in range(row_length - tile_length + 1): ways_number[row_length] += ways_number[ row_length - tile_start - tile_length ] return ways_number[length] if __name__ == "__main__": print(f"""{solution() = }""")
647
from __future__ import annotations import unittest from transformers import AutoTokenizer, PegasusConfig, is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel @require_tf class _lowerCAmelCase : """simple docstring""" _lowercase : List[str] = PegasusConfig _lowercase : Union[str, Any] = {} _lowercase : Tuple = '''gelu''' def __init__( self : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[int]=1_3 , UpperCamelCase__ : Any=7 , UpperCamelCase__ : Optional[Any]=True , UpperCamelCase__ : Optional[int]=False , UpperCamelCase__ : int=9_9 , UpperCamelCase__ : Dict=3_2 , UpperCamelCase__ : str=2 , UpperCamelCase__ : int=4 , UpperCamelCase__ : Tuple=3_7 , UpperCamelCase__ : Union[str, Any]=0.1 , UpperCamelCase__ : Any=0.1 , UpperCamelCase__ : str=4_0 , UpperCamelCase__ : Optional[int]=2 , UpperCamelCase__ : Optional[Any]=1 , UpperCamelCase__ : Dict=0 , ): '''simple docstring''' snake_case__ = parent snake_case__ = batch_size snake_case__ = seq_length snake_case__ = is_training snake_case__ = use_labels snake_case__ = vocab_size snake_case__ = hidden_size snake_case__ = num_hidden_layers snake_case__ = num_attention_heads snake_case__ = intermediate_size snake_case__ = hidden_dropout_prob snake_case__ = attention_probs_dropout_prob snake_case__ = max_position_embeddings snake_case__ = eos_token_id snake_case__ = pad_token_id snake_case__ = bos_token_id def __magic_name__ ( self : Optional[Any]): '''simple docstring''' snake_case__ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size) snake_case__ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size) , 1) snake_case__ = tf.concat([input_ids, eos_tensor] , axis=1) snake_case__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) snake_case__ = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) snake_case__ = prepare_pegasus_inputs_dict(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__) return config, inputs_dict def __magic_name__ ( self : Any , UpperCamelCase__ : Tuple , UpperCamelCase__ : Union[str, Any]): '''simple docstring''' snake_case__ = TFPegasusModel(config=UpperCamelCase__).get_decoder() snake_case__ = inputs_dict["""input_ids"""] snake_case__ = input_ids[:1, :] snake_case__ = inputs_dict["""attention_mask"""][:1, :] snake_case__ = inputs_dict["""head_mask"""] snake_case__ = 1 # first forward pass snake_case__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , head_mask=UpperCamelCase__ , use_cache=UpperCamelCase__) snake_case__ , snake_case__ = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids snake_case__ = ids_tensor((self.batch_size, 3) , config.vocab_size) snake_case__ = tf.cast(ids_tensor((self.batch_size, 3) , 2) , tf.inta) # append to next input_ids and snake_case__ = tf.concat([input_ids, next_tokens] , axis=-1) snake_case__ = tf.concat([attention_mask, next_attn_mask] , axis=-1) snake_case__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__)[0] snake_case__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , past_key_values=UpperCamelCase__)[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1]) # select random slice snake_case__ = int(ids_tensor((1,) , output_from_past.shape[-1])) snake_case__ = output_from_no_past[:, -3:, random_slice_idx] snake_case__ = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(UpperCamelCase__ , UpperCamelCase__ , rtol=1E-3) def _UpperCAmelCase ( a : str , a : Union[str, Any] , a : List[str] , a : str=None , a : int=None , a : int=None , a : int=None , a : Optional[int]=None , ): if attention_mask is None: snake_case__ = tf.cast(tf.math.not_equal(a , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: snake_case__ = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: snake_case__ = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: snake_case__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: snake_case__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class _lowerCAmelCase ( lowercase_ , lowercase_ , unittest.TestCase ): """simple docstring""" _lowercase : int = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else () _lowercase : List[Any] = (TFPegasusForConditionalGeneration,) if is_tf_available() else () _lowercase : List[Any] = ( { '''conversational''': TFPegasusForConditionalGeneration, '''feature-extraction''': TFPegasusModel, '''summarization''': TFPegasusForConditionalGeneration, '''text2text-generation''': TFPegasusForConditionalGeneration, '''translation''': TFPegasusForConditionalGeneration, } if is_tf_available() else {} ) _lowercase : Optional[int] = True _lowercase : Dict = False _lowercase : Any = False def __magic_name__ ( self : str): '''simple docstring''' snake_case__ = TFPegasusModelTester(self) snake_case__ = ConfigTester(self , config_class=UpperCamelCase__) def __magic_name__ ( self : List[Any]): '''simple docstring''' self.config_tester.run_common_tests() def __magic_name__ ( self : Optional[int]): '''simple docstring''' snake_case__ = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*UpperCamelCase__) @require_sentencepiece @require_tokenizers @require_tf class _lowerCAmelCase ( unittest.TestCase ): """simple docstring""" _lowercase : List[str] = [ ''' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.''', ''' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ''', ] _lowercase : str = [ '''California\'s largest electricity provider has cut power to hundreds of thousands of customers in an effort to''' ''' reduce the risk of wildfires.''', '''N-Dubz have revealed they\'re "grateful" to have been nominated for four Mobo Awards.''', ] # differs slightly from pytorch, likely due to numerical differences in linear layers _lowercase : int = '''google/pegasus-xsum''' @cached_property def __magic_name__ ( self : Dict): '''simple docstring''' return AutoTokenizer.from_pretrained(self.model_name) @cached_property def __magic_name__ ( self : int): '''simple docstring''' snake_case__ = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name) return model def __magic_name__ ( self : Dict , **UpperCamelCase__ : List[Any]): '''simple docstring''' snake_case__ = self.translate_src_text(**UpperCamelCase__) assert self.expected_text == generated_words def __magic_name__ ( self : str , **UpperCamelCase__ : List[Any]): '''simple docstring''' snake_case__ = self.tokenizer(self.src_text , **UpperCamelCase__ , padding=UpperCamelCase__ , return_tensors="""tf""") snake_case__ = self.model.generate( model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=UpperCamelCase__ , ) snake_case__ = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=UpperCamelCase__) return generated_words @slow def __magic_name__ ( self : List[str]): '''simple docstring''' self._assert_generated_batch_equal_expected()
654
0
import json import multiprocessing as mp import re from collections import defaultdict from functools import partial from typing import Dict, List, Optional, Set, Tuple, Type from datasets import Dataset from datasketch import MinHash, MinHashLSH from dpu_utils.utils.iterators import ThreadedIterator from tqdm import tqdm SCREAMING_SNAKE_CASE__ : List[str] = re.compile("[^A-Za-z_0-9]") # parameters used in DuplicationIndex SCREAMING_SNAKE_CASE__ : Any = 10 SCREAMING_SNAKE_CASE__ : str = 2_56 def lowercase ( SCREAMING_SNAKE_CASE ) -> List[str]: '''simple docstring''' if len(SCREAMING_SNAKE_CASE ) < MIN_NUM_TOKENS: return None SCREAMING_SNAKE_CASE_ = MinHash(num_perm=SCREAMING_SNAKE_CASE ) for token in set(SCREAMING_SNAKE_CASE ): min_hash.update(token.encode() ) return min_hash def lowercase ( SCREAMING_SNAKE_CASE ) -> Tuple: '''simple docstring''' return {t for t in NON_ALPHA.split(SCREAMING_SNAKE_CASE ) if len(t.strip() ) > 0} class a_ : def __init__( self , *, SCREAMING_SNAKE_CASE = 0.8_5 , ) -> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE_ = duplication_jaccard_threshold SCREAMING_SNAKE_CASE_ = NUM_PERM SCREAMING_SNAKE_CASE_ = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm ) SCREAMING_SNAKE_CASE_ = defaultdict(UpperCamelCase__ ) def A_( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE_ = self._index.query(UpperCamelCase__ ) if code_key in self._index.keys: print(f'Duplicate key {code_key}' ) return self._index.insert(UpperCamelCase__ , UpperCamelCase__ ) if len(UpperCamelCase__ ) > 0: for base_duplicate in close_duplicates: if base_duplicate in self._duplicate_clusters: self._duplicate_clusters[base_duplicate].add(UpperCamelCase__ ) break else: self._duplicate_clusters[close_duplicates[0]].add(UpperCamelCase__ ) def A_( self ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE_ = [] for base, duplicates in self._duplicate_clusters.items(): SCREAMING_SNAKE_CASE_ = [base] + list(UpperCamelCase__ ) # reformat the cluster to be a list of dict SCREAMING_SNAKE_CASE_ = [{'base_index': el[0], 'repo_name': el[1], 'path': el[2]} for el in cluster] duplicate_clusters.append(UpperCamelCase__ ) return duplicate_clusters def A_( self , SCREAMING_SNAKE_CASE ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE_ = self.get_duplicate_clusters() with open(UpperCamelCase__ , 'w' ) as f: json.dump(UpperCamelCase__ , UpperCamelCase__ ) def lowercase ( SCREAMING_SNAKE_CASE ) -> Dict: '''simple docstring''' SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = element SCREAMING_SNAKE_CASE_ = get_min_hash([t for t in NON_ALPHA.split(data['content'] ) if len(t.strip() ) > 0] ) if min_hash is not None: return (index, data["repo_name"], data["path"]), min_hash def lowercase ( SCREAMING_SNAKE_CASE ) -> int: '''simple docstring''' with mp.Pool() as pool: for data in pool.imap_unordered( _compute_min_hash , ThreadedIterator(SCREAMING_SNAKE_CASE , max_queue_size=1_00_00 ) , chunksize=1_00 , ): if data is not None: yield data def lowercase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int: '''simple docstring''' SCREAMING_SNAKE_CASE_ = DuplicationIndex(duplication_jaccard_threshold=SCREAMING_SNAKE_CASE ) for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(SCREAMING_SNAKE_CASE ) ) , max_queue_size=1_00 ) ): di.add(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) # Returns a List[Cluster] where Cluster is List[str] with the filenames. return di.get_duplicate_clusters() def lowercase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Union[str, Any]: '''simple docstring''' SCREAMING_SNAKE_CASE_ = get_tokens(SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = get_tokens(SCREAMING_SNAKE_CASE ) return len(tokensa & tokensa ) / len(tokensa | tokensa ) SCREAMING_SNAKE_CASE__ : List[str] = None def lowercase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Union[str, Any]: '''simple docstring''' SCREAMING_SNAKE_CASE_ = [] for elementa in cluster: SCREAMING_SNAKE_CASE_ = _shared_dataset[elementa['base_index']]['content'] for elementa in extremes: SCREAMING_SNAKE_CASE_ = _shared_dataset[elementa['base_index']]['content'] if jaccard_similarity(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) >= jaccard_threshold: elementa["copies"] += 1 break else: SCREAMING_SNAKE_CASE_ = 1 extremes.append(SCREAMING_SNAKE_CASE ) return extremes def lowercase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Tuple: '''simple docstring''' global _shared_dataset SCREAMING_SNAKE_CASE_ = dataset SCREAMING_SNAKE_CASE_ = [] SCREAMING_SNAKE_CASE_ = partial(_find_cluster_extremes_shared , jaccard_threshold=SCREAMING_SNAKE_CASE ) with mp.Pool() as pool: for extremes in tqdm( pool.imap_unordered( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ) , total=len(SCREAMING_SNAKE_CASE ) , ): extremes_list.append(SCREAMING_SNAKE_CASE ) return extremes_list def lowercase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 0.85 ) -> Union[str, Any]: '''simple docstring''' SCREAMING_SNAKE_CASE_ = make_duplicate_clusters(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = {x['base_index'] for cluster in duplicate_clusters for x in cluster} SCREAMING_SNAKE_CASE_ = {} SCREAMING_SNAKE_CASE_ = find_extremes(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for extremes in extremes_clusters: for element in extremes: SCREAMING_SNAKE_CASE_ = element SCREAMING_SNAKE_CASE_ = duplicate_indices - set(extreme_dict.keys() ) SCREAMING_SNAKE_CASE_ = dataset.filter(lambda SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : idx not in remove_indices , with_indices=SCREAMING_SNAKE_CASE ) # update duplicate_clusters for cluster in duplicate_clusters: for element in cluster: SCREAMING_SNAKE_CASE_ = element['base_index'] in extreme_dict if element["is_extreme"]: SCREAMING_SNAKE_CASE_ = extreme_dict[element['base_index']]['copies'] print(F'Original dataset size: {len(SCREAMING_SNAKE_CASE )}' ) print(F'Number of duplicate clusters: {len(SCREAMING_SNAKE_CASE )}' ) print(F'Files in duplicate cluster: {len(SCREAMING_SNAKE_CASE )}' ) print(F'Unique files in duplicate cluster: {len(SCREAMING_SNAKE_CASE )}' ) print(F'Filtered dataset size: {len(SCREAMING_SNAKE_CASE )}' ) return ds_filter, duplicate_clusters
205
import json import os import re import unicodedata from json.encoder import INFINITY from typing import Any, Dict, List, Optional, Tuple, Union import numpy as np import regex from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...tokenization_utils_base import BatchEncoding from ...utils import TensorType, is_flax_available, is_tf_available, is_torch_available, logging from ...utils.generic import _is_jax, _is_numpy a__ = logging.get_logger(__name__) a__ = { """artists_file""": """artists.json""", """lyrics_file""": """lyrics.json""", """genres_file""": """genres.json""", } a__ = { """artists_file""": { """jukebox""": """https://huggingface.co/ArthurZ/jukebox/blob/main/artists.json""", }, """genres_file""": { """jukebox""": """https://huggingface.co/ArthurZ/jukebox/blob/main/genres.json""", }, """lyrics_file""": { """jukebox""": """https://huggingface.co/ArthurZ/jukebox/blob/main/lyrics.json""", }, } a__ = { """jukebox""": 5_1_2, } class _lowerCAmelCase ( lowercase_ ): """simple docstring""" _lowercase : str = VOCAB_FILES_NAMES _lowercase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP _lowercase : str = PRETRAINED_LYRIC_TOKENS_SIZES _lowercase : Any = ['''input_ids''', '''attention_mask'''] def __init__( self : int , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int=["v3", "v2", "v2"] , UpperCamelCase__ : List[str]=5_1_2 , UpperCamelCase__ : Union[str, Any]=5 , UpperCamelCase__ : List[Any]="<|endoftext|>" , **UpperCamelCase__ : List[Any] , ): '''simple docstring''' snake_case__ = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__) if isinstance(UpperCamelCase__ , UpperCamelCase__) else unk_token super().__init__( unk_token=UpperCamelCase__ , n_genres=UpperCamelCase__ , version=UpperCamelCase__ , max_n_lyric_tokens=UpperCamelCase__ , **UpperCamelCase__ , ) snake_case__ = version snake_case__ = max_n_lyric_tokens snake_case__ = n_genres with open(UpperCamelCase__ , encoding="""utf-8""") as vocab_handle: snake_case__ = json.load(UpperCamelCase__) with open(UpperCamelCase__ , encoding="""utf-8""") as vocab_handle: snake_case__ = json.load(UpperCamelCase__) with open(UpperCamelCase__ , encoding="""utf-8""") as vocab_handle: snake_case__ = json.load(UpperCamelCase__) snake_case__ = R"""[^A-Za-z0-9.,:;!?\-'\"()\[\] \t\n]+""" # In v2, we had a n_vocab=80 and in v3 we missed + and so n_vocab=79 of characters. if len(self.lyrics_encoder) == 7_9: snake_case__ = oov.replace(R"""\-'""" , R"""\-+'""") snake_case__ = regex.compile(UpperCamelCase__) snake_case__ = {v: k for k, v in self.artists_encoder.items()} snake_case__ = {v: k for k, v in self.genres_encoder.items()} snake_case__ = {v: k for k, v in self.lyrics_encoder.items()} @property def __magic_name__ ( self : List[str]): '''simple docstring''' return len(self.artists_encoder) + len(self.genres_encoder) + len(self.lyrics_encoder) def __magic_name__ ( self : Union[str, Any]): '''simple docstring''' return dict(self.artists_encoder , self.genres_encoder , self.lyrics_encoder) def __magic_name__ ( self : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int): '''simple docstring''' snake_case__ = [self.artists_encoder.get(UpperCamelCase__ , 0) for artist in list_artists] for genres in range(len(UpperCamelCase__)): snake_case__ = [self.genres_encoder.get(UpperCamelCase__ , 0) for genre in list_genres[genres]] snake_case__ = list_genres[genres] + [-1] * (self.n_genres - len(list_genres[genres])) snake_case__ = [[self.lyrics_encoder.get(UpperCamelCase__ , 0) for character in list_lyrics[0]], [], []] return artists_id, list_genres, lyric_ids def __magic_name__ ( self : Optional[int] , UpperCamelCase__ : Optional[int]): '''simple docstring''' return list(UpperCamelCase__) def __magic_name__ ( self : int , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Any , **UpperCamelCase__ : List[str]): '''simple docstring''' snake_case__ , snake_case__ , snake_case__ = self.prepare_for_tokenization(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__) snake_case__ = self._tokenize(UpperCamelCase__) return artist, genre, lyrics def __magic_name__ ( self : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : bool = False): '''simple docstring''' for idx in range(len(self.version)): if self.version[idx] == "v3": snake_case__ = artists[idx].lower() snake_case__ = [genres[idx].lower()] else: snake_case__ = self._normalize(artists[idx]) + """.v2""" snake_case__ = [ self._normalize(UpperCamelCase__) + """.v2""" for genre in genres[idx].split("""_""") ] # split is for the full dictionary with combined genres if self.version[0] == "v2": snake_case__ = regex.compile(R"""[^A-Za-z0-9.,:;!?\-'\"()\[\] \t\n]+""") snake_case__ = """ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,:;!?-+'\"()[] \t\n""" snake_case__ = {vocab[index]: index + 1 for index in range(len(UpperCamelCase__))} snake_case__ = 0 snake_case__ = len(UpperCamelCase__) + 1 snake_case__ = self.vocab snake_case__ = {v: k for k, v in self.vocab.items()} snake_case__ = """""" else: snake_case__ = regex.compile(R"""[^A-Za-z0-9.,:;!?\-+'\"()\[\] \t\n]+""") snake_case__ = self._run_strip_accents(UpperCamelCase__) snake_case__ = lyrics.replace("""\\""" , """\n""") snake_case__ = self.out_of_vocab.sub("""""" , UpperCamelCase__), [], [] return artists, genres, lyrics def __magic_name__ ( self : Union[str, Any] , UpperCamelCase__ : str): '''simple docstring''' snake_case__ = unicodedata.normalize("""NFD""" , UpperCamelCase__) snake_case__ = [] for char in text: snake_case__ = unicodedata.category(UpperCamelCase__) if cat == "Mn": continue output.append(UpperCamelCase__) return "".join(UpperCamelCase__) def __magic_name__ ( self : List[str] , UpperCamelCase__ : str): '''simple docstring''' snake_case__ = ( [chr(UpperCamelCase__) for i in range(ord("""a""") , ord("""z""") + 1)] + [chr(UpperCamelCase__) for i in range(ord("""A""") , ord("""Z""") + 1)] + [chr(UpperCamelCase__) for i in range(ord("""0""") , ord("""9""") + 1)] + ["""."""] ) snake_case__ = frozenset(UpperCamelCase__) snake_case__ = re.compile(R"""_+""") snake_case__ = """""".join([c if c in accepted else """_""" for c in text.lower()]) snake_case__ = pattern.sub("""_""" , UpperCamelCase__).strip("""_""") return text def __magic_name__ ( self : List[Any] , UpperCamelCase__ : List[str]): '''simple docstring''' return " ".join(UpperCamelCase__) def __magic_name__ ( self : Any , UpperCamelCase__ : str , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , UpperCamelCase__ : bool = False): '''simple docstring''' if not isinstance(UpperCamelCase__ , UpperCamelCase__): snake_case__ = TensorType(UpperCamelCase__) # Get a function reference for the correct framework if tensor_type == TensorType.TENSORFLOW: if not is_tf_available(): raise ImportError( """Unable to convert output to TensorFlow tensors format, TensorFlow is not installed.""") import tensorflow as tf snake_case__ = tf.constant snake_case__ = tf.is_tensor elif tensor_type == TensorType.PYTORCH: if not is_torch_available(): raise ImportError("""Unable to convert output to PyTorch tensors format, PyTorch is not installed.""") import torch snake_case__ = torch.tensor snake_case__ = torch.is_tensor elif tensor_type == TensorType.JAX: if not is_flax_available(): raise ImportError("""Unable to convert output to JAX tensors format, JAX is not installed.""") import jax.numpy as jnp # noqa: F811 snake_case__ = jnp.array snake_case__ = _is_jax else: snake_case__ = np.asarray snake_case__ = _is_numpy # Do the tensor conversion in batch try: if prepend_batch_axis: snake_case__ = [inputs] if not is_tensor(UpperCamelCase__): snake_case__ = as_tensor(UpperCamelCase__) except: # noqa E722 raise ValueError( """Unable to create tensor, you should probably activate truncation and/or padding """ """with 'padding=True' 'truncation=True' to have batched tensors with the same length.""") return inputs def __call__( self : str , UpperCamelCase__ : Any , UpperCamelCase__ : Dict , UpperCamelCase__ : Any="" , UpperCamelCase__ : Dict="pt"): '''simple docstring''' snake_case__ = [0, 0, 0] snake_case__ = [artist] * len(self.version) snake_case__ = [genres] * len(self.version) snake_case__ , snake_case__ , snake_case__ = self.tokenize(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__) snake_case__ , snake_case__ , snake_case__ = self._convert_token_to_id(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__) snake_case__ = [-INFINITY] * len(full_tokens[-1]) snake_case__ = [ self.convert_to_tensors( [input_ids + [artists_id[i]] + genres_ids[i] + full_tokens[i]] , tensor_type=UpperCamelCase__) for i in range(len(self.version)) ] return BatchEncoding({"""input_ids""": input_ids, """attention_masks""": attention_masks}) def __magic_name__ ( self : Any , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None): '''simple docstring''' if not os.path.isdir(UpperCamelCase__): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''') return snake_case__ = os.path.join( UpperCamelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""artists_file"""]) with open(UpperCamelCase__ , """w""" , encoding="""utf-8""") as f: f.write(json.dumps(self.artists_encoder , ensure_ascii=UpperCamelCase__)) snake_case__ = os.path.join( UpperCamelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""genres_file"""]) with open(UpperCamelCase__ , """w""" , encoding="""utf-8""") as f: f.write(json.dumps(self.genres_encoder , ensure_ascii=UpperCamelCase__)) snake_case__ = os.path.join( UpperCamelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""lyrics_file"""]) with open(UpperCamelCase__ , """w""" , encoding="""utf-8""") as f: f.write(json.dumps(self.lyrics_encoder , ensure_ascii=UpperCamelCase__)) return (artists_file, genres_file, lyrics_file) def __magic_name__ ( self : List[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[str]): '''simple docstring''' snake_case__ = self.artists_decoder.get(UpperCamelCase__) snake_case__ = [self.genres_decoder.get(UpperCamelCase__) for genre in genres_index] snake_case__ = [self.lyrics_decoder.get(UpperCamelCase__) for character in lyric_index] return artist, genres, lyrics
654
0
'''simple docstring''' import argparse import csv import logging import os import random import numpy as np import torch from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset from tqdm import tqdm, trange from transformers import ( CONFIG_NAME, WEIGHTS_NAME, AdamW, OpenAIGPTDoubleHeadsModel, OpenAIGPTTokenizer, get_linear_schedule_with_warmup, ) logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO ) lowerCAmelCase__ = logging.getLogger(__name__) def _A ( A__ , A__ ): """simple docstring""" __lowercase = np.argmax(A__ , axis=1 ) return np.sum(outputs == labels ) def _A ( A__ ): """simple docstring""" with open(A__ , encoding='''utf_8''' ) as f: __lowercase = csv.reader(A__ ) __lowercase = [] next(A__ ) # skip the first line for line in tqdm(A__ ): output.append((''' '''.join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) ) return output def _A ( A__ , A__ , A__ , A__ , A__ , A__ ): """simple docstring""" __lowercase = [] for dataset in encoded_datasets: __lowercase = len(A__ ) __lowercase = np.zeros((n_batch, 2, input_len) , dtype=np.intaa ) __lowercase = np.zeros((n_batch, 2) , dtype=np.intaa ) __lowercase = np.full((n_batch, 2, input_len) , fill_value=-100 , dtype=np.intaa ) __lowercase = np.zeros((n_batch,) , dtype=np.intaa ) for ( i, (story, conta, conta, mc_label), ) in enumerate(A__ ): __lowercase = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token] __lowercase = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token] __lowercase = with_conta __lowercase = with_conta __lowercase = len(A__ ) - 1 __lowercase = len(A__ ) - 1 __lowercase = with_conta __lowercase = with_conta __lowercase = mc_label __lowercase = (input_ids, mc_token_ids, lm_labels, mc_labels) tensor_datasets.append(tuple(torch.tensor(A__ ) for t in all_inputs ) ) return tensor_datasets def _A ( ): """simple docstring""" __lowercase = argparse.ArgumentParser() parser.add_argument('''--model_name''' , type=A__ , default='''openai-gpt''' , help='''pretrained model name''' ) parser.add_argument('''--do_train''' , action='''store_true''' , help='''Whether to run training.''' ) parser.add_argument('''--do_eval''' , action='''store_true''' , help='''Whether to run eval on the dev set.''' ) parser.add_argument( '''--output_dir''' , default=A__ , type=A__ , required=A__ , help='''The output directory where the model predictions and checkpoints will be written.''' , ) parser.add_argument('''--train_dataset''' , type=A__ , default='''''' ) parser.add_argument('''--eval_dataset''' , type=A__ , default='''''' ) parser.add_argument('''--seed''' , type=A__ , default=42 ) parser.add_argument('''--num_train_epochs''' , type=A__ , default=3 ) parser.add_argument('''--train_batch_size''' , type=A__ , default=8 ) parser.add_argument('''--eval_batch_size''' , type=A__ , default=16 ) parser.add_argument('''--adam_epsilon''' , default=1e-8 , type=A__ , help='''Epsilon for Adam optimizer.''' ) parser.add_argument('''--max_grad_norm''' , type=A__ , default=1 ) parser.add_argument( '''--max_steps''' , default=-1 , type=A__ , help=( '''If > 0: set total number of training steps to perform. Override num_train_epochs.''' ) , ) parser.add_argument( '''--gradient_accumulation_steps''' , type=A__ , default=1 , help='''Number of updates steps to accumulate before performing a backward/update pass.''' , ) parser.add_argument('''--learning_rate''' , type=A__ , default=6.25e-5 ) parser.add_argument('''--warmup_steps''' , default=0 , type=A__ , help='''Linear warmup over warmup_steps.''' ) parser.add_argument('''--lr_schedule''' , type=A__ , default='''warmup_linear''' ) parser.add_argument('''--weight_decay''' , type=A__ , default=0.0_1 ) parser.add_argument('''--lm_coef''' , type=A__ , default=0.9 ) parser.add_argument('''--n_valid''' , type=A__ , default=374 ) parser.add_argument('''--server_ip''' , type=A__ , default='''''' , help='''Can be used for distant debugging.''' ) parser.add_argument('''--server_port''' , type=A__ , default='''''' , help='''Can be used for distant debugging.''' ) __lowercase = parser.parse_args() print(A__ ) if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print('''Waiting for debugger attach''' ) ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=A__ ) ptvsd.wait_for_attach() random.seed(args.seed ) np.random.seed(args.seed ) torch.manual_seed(args.seed ) torch.cuda.manual_seed_all(args.seed ) __lowercase = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' ) __lowercase = torch.cuda.device_count() logger.info('''device: {}, n_gpu {}'''.format(A__ , A__ ) ) if not args.do_train and not args.do_eval: raise ValueError('''At least one of `do_train` or `do_eval` must be True.''' ) if not os.path.exists(args.output_dir ): os.makedirs(args.output_dir ) # Load tokenizer and model # This loading functions also add new tokens and embeddings called `special tokens` # These new embeddings will be fine-tuned on the RocStories dataset __lowercase = ['''_start_''', '''_delimiter_''', '''_classify_'''] __lowercase = OpenAIGPTTokenizer.from_pretrained(args.model_name ) tokenizer.add_tokens(A__ ) __lowercase = tokenizer.convert_tokens_to_ids(A__ ) __lowercase = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name ) model.resize_token_embeddings(len(A__ ) ) model.to(A__ ) # Load and encode the datasets def tokenize_and_encode(A__ ): if isinstance(A__ , A__ ): return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(A__ ) ) elif isinstance(A__ , A__ ): return obj return [tokenize_and_encode(A__ ) for o in obj] logger.info('''Encoding dataset...''' ) __lowercase = load_rocstories_dataset(args.train_dataset ) __lowercase = load_rocstories_dataset(args.eval_dataset ) __lowercase = (train_dataset, eval_dataset) __lowercase = tokenize_and_encode(A__ ) # Compute the max input length for the Transformer __lowercase = model.config.n_positions // 2 - 2 __lowercase = max( len(story[:max_length] ) + max(len(conta[:max_length] ) , len(conta[:max_length] ) ) + 3 for dataset in encoded_datasets for story, conta, conta, _ in dataset ) __lowercase = min(A__ , model.config.n_positions ) # Max size of input for the pre-trained model # Prepare inputs tensors and dataloaders __lowercase = pre_process_datasets(A__ , A__ , A__ , *A__ ) __lowercase , __lowercase = tensor_datasets[0], tensor_datasets[1] __lowercase = TensorDataset(*A__ ) __lowercase = RandomSampler(A__ ) __lowercase = DataLoader(A__ , sampler=A__ , batch_size=args.train_batch_size ) __lowercase = TensorDataset(*A__ ) __lowercase = SequentialSampler(A__ ) __lowercase = DataLoader(A__ , sampler=A__ , batch_size=args.eval_batch_size ) # Prepare optimizer if args.do_train: if args.max_steps > 0: __lowercase = args.max_steps __lowercase = args.max_steps // (len(A__ ) // args.gradient_accumulation_steps) + 1 else: __lowercase = len(A__ ) // args.gradient_accumulation_steps * args.num_train_epochs __lowercase = list(model.named_parameters() ) __lowercase = ['''bias''', '''LayerNorm.bias''', '''LayerNorm.weight'''] __lowercase = [ { '''params''': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )], '''weight_decay''': args.weight_decay, }, {'''params''': [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], '''weight_decay''': 0.0}, ] __lowercase = AdamW(A__ , lr=args.learning_rate , eps=args.adam_epsilon ) __lowercase = get_linear_schedule_with_warmup( A__ , num_warmup_steps=args.warmup_steps , num_training_steps=A__ ) if args.do_train: __lowercase , __lowercase , __lowercase = 0, 0, None model.train() for _ in trange(int(args.num_train_epochs ) , desc='''Epoch''' ): __lowercase = 0 __lowercase = 0 __lowercase = tqdm(A__ , desc='''Training''' ) for step, batch in enumerate(A__ ): __lowercase = tuple(t.to(A__ ) for t in batch ) __lowercase , __lowercase , __lowercase , __lowercase = batch __lowercase = model(A__ , mc_token_ids=A__ , lm_labels=A__ , mc_labels=A__ ) __lowercase = args.lm_coef * losses[0] + losses[1] loss.backward() optimizer.step() scheduler.step() optimizer.zero_grad() tr_loss += loss.item() __lowercase = ( loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item() ) nb_tr_steps += 1 __lowercase = '''Training loss: {:.2e} lr: {:.2e}'''.format(A__ , scheduler.get_lr()[0] ) # Save a trained model if args.do_train: # Save a trained model, configuration and tokenizer __lowercase = model.module if hasattr(A__ , '''module''' ) else model # Only save the model itself # If we save using the predefined names, we can load using `from_pretrained` __lowercase = os.path.join(args.output_dir , A__ ) __lowercase = os.path.join(args.output_dir , A__ ) torch.save(model_to_save.state_dict() , A__ ) model_to_save.config.to_json_file(A__ ) tokenizer.save_vocabulary(args.output_dir ) # Load a trained model and vocabulary that you have fine-tuned __lowercase = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir ) __lowercase = OpenAIGPTTokenizer.from_pretrained(args.output_dir ) model.to(A__ ) if args.do_eval: model.eval() __lowercase , __lowercase = 0, 0 __lowercase , __lowercase = 0, 0 for batch in tqdm(A__ , desc='''Evaluating''' ): __lowercase = tuple(t.to(A__ ) for t in batch ) __lowercase , __lowercase , __lowercase , __lowercase = batch with torch.no_grad(): __lowercase , __lowercase , __lowercase , __lowercase = model( A__ , mc_token_ids=A__ , lm_labels=A__ , mc_labels=A__ ) __lowercase = mc_logits.detach().cpu().numpy() __lowercase = mc_labels.to('''cpu''' ).numpy() __lowercase = accuracy(A__ , A__ ) eval_loss += mc_loss.mean().item() eval_accuracy += tmp_eval_accuracy nb_eval_examples += input_ids.size(0 ) nb_eval_steps += 1 __lowercase = eval_loss / nb_eval_steps __lowercase = eval_accuracy / nb_eval_examples __lowercase = tr_loss / nb_tr_steps if args.do_train else None __lowercase = {'''eval_loss''': eval_loss, '''eval_accuracy''': eval_accuracy, '''train_loss''': train_loss} __lowercase = os.path.join(args.output_dir , '''eval_results.txt''' ) with open(A__ , '''w''' ) as writer: logger.info('''***** Eval results *****''' ) for key in sorted(result.keys() ): logger.info(''' %s = %s''' , A__ , str(result[key] ) ) writer.write('''%s = %s\n''' % (key, str(result[key] )) ) if __name__ == "__main__": main()
41
import sys from typing import Tuple import numpy as np import torch from PIL import Image from torch import nn from transformers.image_utils import PILImageResampling from utils import img_tensorize class _lowerCAmelCase : """simple docstring""" def __init__( self : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : List[str]=sys.maxsize): '''simple docstring''' snake_case__ = """bilinear""" snake_case__ = max_size snake_case__ = short_edge_length def __call__( self : List[str] , UpperCamelCase__ : Tuple): '''simple docstring''' snake_case__ = [] for img in imgs: snake_case__ , snake_case__ = img.shape[:2] # later: provide list and randomly choose index for resize snake_case__ = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1) if size == 0: return img snake_case__ = size * 1.0 / min(UpperCamelCase__ , UpperCamelCase__) if h < w: snake_case__ , snake_case__ = size, scale * w else: snake_case__ , snake_case__ = scale * h, size if max(UpperCamelCase__ , UpperCamelCase__) > self.max_size: snake_case__ = self.max_size * 1.0 / max(UpperCamelCase__ , UpperCamelCase__) snake_case__ = newh * scale snake_case__ = neww * scale snake_case__ = int(neww + 0.5) snake_case__ = int(newh + 0.5) if img.dtype == np.uinta: snake_case__ = Image.fromarray(UpperCamelCase__) snake_case__ = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR) snake_case__ = np.asarray(UpperCamelCase__) else: snake_case__ = img.permute(2 , 0 , 1).unsqueeze(0) # 3, 0, 1) # hw(c) -> nchw snake_case__ = nn.functional.interpolate( UpperCamelCase__ , (newh, neww) , mode=self.interp_method , align_corners=UpperCamelCase__).squeeze(0) img_augs.append(UpperCamelCase__) return img_augs class _lowerCAmelCase : """simple docstring""" def __init__( self : Dict , UpperCamelCase__ : Optional[int]): '''simple docstring''' snake_case__ = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST) snake_case__ = cfg.INPUT.FORMAT snake_case__ = cfg.SIZE_DIVISIBILITY snake_case__ = cfg.PAD_VALUE snake_case__ = cfg.INPUT.MAX_SIZE_TEST snake_case__ = cfg.MODEL.DEVICE snake_case__ = torch.tensor(cfg.MODEL.PIXEL_STD).to(self.device).view(len(cfg.MODEL.PIXEL_STD) , 1 , 1) snake_case__ = torch.tensor(cfg.MODEL.PIXEL_MEAN).to(self.device).view(len(cfg.MODEL.PIXEL_STD) , 1 , 1) snake_case__ = lambda UpperCamelCase__: (x - self.pixel_mean) / self.pixel_std def __magic_name__ ( self : Dict , UpperCamelCase__ : Dict): '''simple docstring''' snake_case__ = tuple(max(UpperCamelCase__) for s in zip(*[img.shape for img in images])) snake_case__ = [im.shape[-2:] for im in images] snake_case__ = [ nn.functional.pad( UpperCamelCase__ , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , ) for size, im in zip(UpperCamelCase__ , UpperCamelCase__) ] return torch.stack(UpperCamelCase__), torch.tensor(UpperCamelCase__) def __call__( self : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : str=False): '''simple docstring''' with torch.no_grad(): if not isinstance(UpperCamelCase__ , UpperCamelCase__): snake_case__ = [images] if single_image: assert len(UpperCamelCase__) == 1 for i in range(len(UpperCamelCase__)): if isinstance(images[i] , torch.Tensor): images.insert(UpperCamelCase__ , images.pop(UpperCamelCase__).to(self.device).float()) elif not isinstance(images[i] , torch.Tensor): images.insert( UpperCamelCase__ , torch.as_tensor(img_tensorize(images.pop(UpperCamelCase__) , input_format=self.input_format)) .to(self.device) .float() , ) # resize smallest edge snake_case__ = torch.tensor([im.shape[:2] for im in images]) snake_case__ = self.aug(UpperCamelCase__) # transpose images and convert to torch tensors # images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images] # now normalize before pad to avoid useless arithmetic snake_case__ = [self.normalizer(UpperCamelCase__) for x in images] # now pad them to do the following operations snake_case__ , snake_case__ = self.pad(UpperCamelCase__) # Normalize if self.size_divisibility > 0: raise NotImplementedError() # pad snake_case__ = torch.true_divide(UpperCamelCase__ , UpperCamelCase__) if single_image: return images[0], sizes[0], scales_yx[0] else: return images, sizes, scales_yx def _UpperCAmelCase ( a : Optional[Any] , a : Any ): boxes[:, 0::2] *= scale_yx[:, 1] boxes[:, 1::2] *= scale_yx[:, 0] return boxes def _UpperCAmelCase ( a : Any , a : Tuple[int, int] ): assert torch.isfinite(a ).all(), "Box tensor contains infinite or NaN!" snake_case__ , snake_case__ = box_size tensor[:, 0].clamp_(min=0 , max=a ) tensor[:, 1].clamp_(min=0 , max=a ) tensor[:, 2].clamp_(min=0 , max=a ) tensor[:, 3].clamp_(min=0 , max=a )
654
0
"""simple docstring""" def __magic_name__ ( __snake_case : list[int] ) -> int: lowercase : Optional[Any] = [] if len(__snake_case ) == 1: return [nums.copy()] for _ in range(len(__snake_case ) ): lowercase : List[str] = nums.pop(0 ) lowercase : List[str] = permute(__snake_case ) for perm in permutations: perm.append(__snake_case ) result.extend(__snake_case ) nums.append(__snake_case ) return result def __magic_name__ ( __snake_case : Union[str, Any] ) -> Optional[int]: def backtrack(__snake_case : List[Any] ): if start == len(__snake_case ) - 1: output.append(nums[:] ) else: for i in range(__snake_case , len(__snake_case ) ): lowercase , lowercase : int = nums[i], nums[start] backtrack(start + 1 ) lowercase , lowercase : Optional[int] = nums[i], nums[start] # backtrack lowercase : List[str] = [] backtrack(0 ) return output if __name__ == "__main__": import doctest # use res to print the data in permute2 function _A : List[str] = permutea([1, 2, 3]) print(res) doctest.testmod()
361
import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging a__ = logging.get_logger(__name__) a__ = { """microsoft/wavlm-base""": """https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json""", # See all WavLM models at https://huggingface.co/models?filter=wavlm } class _lowerCAmelCase ( lowercase_ ): """simple docstring""" _lowercase : Dict = '''wavlm''' def __init__( self : Tuple , UpperCamelCase__ : str=3_2 , UpperCamelCase__ : Any=7_6_8 , UpperCamelCase__ : Any=1_2 , UpperCamelCase__ : Tuple=1_2 , UpperCamelCase__ : str=3_0_7_2 , UpperCamelCase__ : Optional[Any]="gelu" , UpperCamelCase__ : Tuple=0.1 , UpperCamelCase__ : Union[str, Any]=0.1 , UpperCamelCase__ : List[Any]=0.1 , UpperCamelCase__ : str=0.0 , UpperCamelCase__ : Optional[Any]=0.1 , UpperCamelCase__ : List[Any]=0.1 , UpperCamelCase__ : Optional[int]=0.02 , UpperCamelCase__ : Optional[int]=1E-5 , UpperCamelCase__ : Any="group" , UpperCamelCase__ : List[str]="gelu" , UpperCamelCase__ : Any=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , UpperCamelCase__ : List[str]=(5, 2, 2, 2, 2, 2, 2) , UpperCamelCase__ : Dict=(1_0, 3, 3, 3, 3, 2, 2) , UpperCamelCase__ : int=False , UpperCamelCase__ : Optional[Any]=1_2_8 , UpperCamelCase__ : Optional[int]=1_6 , UpperCamelCase__ : Optional[Any]=3_2_0 , UpperCamelCase__ : Any=8_0_0 , UpperCamelCase__ : Any=False , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : Optional[Any]=0.05 , UpperCamelCase__ : Optional[Any]=1_0 , UpperCamelCase__ : Union[str, Any]=2 , UpperCamelCase__ : Optional[Any]=0.0 , UpperCamelCase__ : Tuple=1_0 , UpperCamelCase__ : Optional[int]=3_2_0 , UpperCamelCase__ : Optional[Any]=2 , UpperCamelCase__ : Any=0.1 , UpperCamelCase__ : Tuple=1_0_0 , UpperCamelCase__ : Dict=2_5_6 , UpperCamelCase__ : Optional[int]=2_5_6 , UpperCamelCase__ : List[Any]=0.1 , UpperCamelCase__ : Tuple="mean" , UpperCamelCase__ : List[Any]=False , UpperCamelCase__ : Tuple=False , UpperCamelCase__ : Union[str, Any]=2_5_6 , UpperCamelCase__ : int=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0) , UpperCamelCase__ : Optional[Any]=(5, 3, 3, 1, 1) , UpperCamelCase__ : Any=(1, 2, 3, 1, 1) , UpperCamelCase__ : Dict=5_1_2 , UpperCamelCase__ : str=8_0 , UpperCamelCase__ : Optional[int]=0 , UpperCamelCase__ : Any=1 , UpperCamelCase__ : Tuple=2 , UpperCamelCase__ : str=False , UpperCamelCase__ : List[Any]=3 , UpperCamelCase__ : Optional[Any]=2 , UpperCamelCase__ : List[Any]=3 , UpperCamelCase__ : Optional[int]=None , **UpperCamelCase__ : List[str] , ): '''simple docstring''' super().__init__(**UpperCamelCase__ , pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__) snake_case__ = hidden_size snake_case__ = feat_extract_norm snake_case__ = feat_extract_activation snake_case__ = list(UpperCamelCase__) snake_case__ = list(UpperCamelCase__) snake_case__ = list(UpperCamelCase__) snake_case__ = conv_bias snake_case__ = num_buckets snake_case__ = max_bucket_distance snake_case__ = num_conv_pos_embeddings snake_case__ = num_conv_pos_embedding_groups snake_case__ = len(self.conv_dim) snake_case__ = num_hidden_layers snake_case__ = intermediate_size snake_case__ = hidden_act snake_case__ = num_attention_heads snake_case__ = hidden_dropout snake_case__ = attention_dropout snake_case__ = activation_dropout snake_case__ = feat_proj_dropout snake_case__ = final_dropout snake_case__ = layerdrop snake_case__ = layer_norm_eps snake_case__ = initializer_range snake_case__ = num_ctc_classes snake_case__ = vocab_size snake_case__ = do_stable_layer_norm snake_case__ = use_weighted_layer_sum snake_case__ = classifier_proj_size if ( (len(self.conv_stride) != self.num_feat_extract_layers) or (len(self.conv_kernel) != self.num_feat_extract_layers) or (len(self.conv_dim) != self.num_feat_extract_layers) ): raise ValueError( """Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==""" """ `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =""" F''' {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`,''' F''' `len(config.conv_kernel) = {len(self.conv_kernel)}`.''') # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 snake_case__ = apply_spec_augment snake_case__ = mask_time_prob snake_case__ = mask_time_length snake_case__ = mask_time_min_masks snake_case__ = mask_feature_prob snake_case__ = mask_feature_length # parameters for pretraining with codevector quantized representations snake_case__ = num_codevectors_per_group snake_case__ = num_codevector_groups snake_case__ = contrastive_logits_temperature snake_case__ = num_negatives snake_case__ = codevector_dim snake_case__ = proj_codevector_dim snake_case__ = diversity_loss_weight # ctc loss snake_case__ = ctc_loss_reduction snake_case__ = ctc_zero_infinity # adapter snake_case__ = add_adapter snake_case__ = adapter_kernel_size snake_case__ = adapter_stride snake_case__ = num_adapter_layers snake_case__ = output_hidden_size or hidden_size # SequenceClassification-specific parameter. Feel free to ignore for other classes. snake_case__ = classifier_proj_size # XVector-specific parameters. Feel free to ignore for other classes. snake_case__ = list(UpperCamelCase__) snake_case__ = list(UpperCamelCase__) snake_case__ = list(UpperCamelCase__) snake_case__ = xvector_output_dim @property def __magic_name__ ( self : Optional[int]): '''simple docstring''' return functools.reduce(operator.mul , self.conv_stride , 1)
654
0
'''simple docstring''' from typing import Dict, List, Optional, Tuple, Union import torch from ...models import AutoencoderKL, TransformeraDModel from ...schedulers import KarrasDiffusionSchedulers from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class SCREAMING_SNAKE_CASE__ ( lowercase_): def __init__( self , A_ , A_ , A_ , A_ = None , )-> List[Any]: '''simple docstring''' super().__init__() self.register_modules(transformer=UpperCamelCase__ , vae=UpperCamelCase__ , scheduler=UpperCamelCase__ ) # create a imagenet -> id dictionary for easier use UpperCamelCase = {} if idalabel is not None: for key, value in idalabel.items(): for label in value.split(',' ): UpperCamelCase = int(UpperCamelCase__ ) UpperCamelCase = dict(sorted(self.labels.items() ) ) def UpperCAmelCase_ ( self , A_ )-> List[str]: '''simple docstring''' if not isinstance(UpperCamelCase__ , UpperCamelCase__ ): UpperCamelCase = list(UpperCamelCase__ ) for l in label: if l not in self.labels: raise ValueError( F'''{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.''' ) return [self.labels[l] for l in label] @torch.no_grad() def __call__( self , A_ , A_ = 4.0 , A_ = None , A_ = 50 , A_ = "pil" , A_ = True , )-> Dict: '''simple docstring''' UpperCamelCase = len(UpperCamelCase__ ) UpperCamelCase = self.transformer.config.sample_size UpperCamelCase = self.transformer.config.in_channels UpperCamelCase = randn_tensor( shape=(batch_size, latent_channels, latent_size, latent_size) , generator=UpperCamelCase__ , device=self.device , dtype=self.transformer.dtype , ) UpperCamelCase = torch.cat([latents] * 2 ) if guidance_scale > 1 else latents UpperCamelCase = torch.tensor(UpperCamelCase__ , device=self.device ).reshape(-1 ) UpperCamelCase = torch.tensor([1000] * batch_size , device=self.device ) UpperCamelCase = torch.cat([class_labels, class_null] , 0 ) if guidance_scale > 1 else class_labels # set step values self.scheduler.set_timesteps(UpperCamelCase__ ) for t in self.progress_bar(self.scheduler.timesteps ): if guidance_scale > 1: UpperCamelCase = latent_model_input[: len(UpperCamelCase__ ) // 2] UpperCamelCase = torch.cat([half, half] , dim=0 ) UpperCamelCase = self.scheduler.scale_model_input(UpperCamelCase__ , UpperCamelCase__ ) UpperCamelCase = t if not torch.is_tensor(UpperCamelCase__ ): # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can # This would be a good case for the `match` statement (Python 3.10+) UpperCamelCase = latent_model_input.device.type == 'mps' if isinstance(UpperCamelCase__ , UpperCamelCase__ ): UpperCamelCase = torch.floataa if is_mps else torch.floataa else: UpperCamelCase = torch.intaa if is_mps else torch.intaa UpperCamelCase = torch.tensor([timesteps] , dtype=UpperCamelCase__ , device=latent_model_input.device ) elif len(timesteps.shape ) == 0: UpperCamelCase = timesteps[None].to(latent_model_input.device ) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML UpperCamelCase = timesteps.expand(latent_model_input.shape[0] ) # predict noise model_output UpperCamelCase = self.transformer( UpperCamelCase__ , timestep=UpperCamelCase__ , class_labels=UpperCamelCase__ ).sample # perform guidance if guidance_scale > 1: UpperCamelCase , UpperCamelCase = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:] UpperCamelCase , UpperCamelCase = torch.split(UpperCamelCase__ , len(UpperCamelCase__ ) // 2 , dim=0 ) UpperCamelCase = uncond_eps + guidance_scale * (cond_eps - uncond_eps) UpperCamelCase = torch.cat([half_eps, half_eps] , dim=0 ) UpperCamelCase = torch.cat([eps, rest] , dim=1 ) # learned sigma if self.transformer.config.out_channels // 2 == latent_channels: UpperCamelCase , UpperCamelCase = torch.split(UpperCamelCase__ , UpperCamelCase__ , dim=1 ) else: UpperCamelCase = noise_pred # compute previous image: x_t -> x_t-1 UpperCamelCase = self.scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).prev_sample if guidance_scale > 1: UpperCamelCase , UpperCamelCase = latent_model_input.chunk(2 , dim=0 ) else: UpperCamelCase = latent_model_input UpperCamelCase = 1 / self.vae.config.scaling_factor * latents UpperCamelCase = self.vae.decode(UpperCamelCase__ ).sample UpperCamelCase = (samples / 2 + 0.5).clamp(0 , 1 ) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 UpperCamelCase = samples.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if output_type == "pil": UpperCamelCase = self.numpy_to_pil(UpperCamelCase__ ) if not return_dict: return (samples,) return ImagePipelineOutput(images=UpperCamelCase__ )
3
from typing import List, Optional, Tuple, Union import torch from ...models import UNetaDModel from ...schedulers import ScoreSdeVeScheduler from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class _lowerCAmelCase ( lowercase_ ): """simple docstring""" _lowercase : UNetaDModel _lowercase : ScoreSdeVeScheduler def __init__( self : Union[str, Any] , UpperCamelCase__ : UNetaDModel , UpperCamelCase__ : ScoreSdeVeScheduler): '''simple docstring''' super().__init__() self.register_modules(unet=UpperCamelCase__ , scheduler=UpperCamelCase__) @torch.no_grad() def __call__( self : Union[str, Any] , UpperCamelCase__ : int = 1 , UpperCamelCase__ : int = 2_0_0_0 , UpperCamelCase__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCamelCase__ : Optional[str] = "pil" , UpperCamelCase__ : bool = True , **UpperCamelCase__ : List[str] , ): '''simple docstring''' snake_case__ = self.unet.config.sample_size snake_case__ = (batch_size, 3, img_size, img_size) snake_case__ = self.unet snake_case__ = randn_tensor(UpperCamelCase__ , generator=UpperCamelCase__) * self.scheduler.init_noise_sigma snake_case__ = sample.to(self.device) self.scheduler.set_timesteps(UpperCamelCase__) self.scheduler.set_sigmas(UpperCamelCase__) for i, t in enumerate(self.progress_bar(self.scheduler.timesteps)): snake_case__ = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device) # correction step for _ in range(self.scheduler.config.correct_steps): snake_case__ = self.unet(UpperCamelCase__ , UpperCamelCase__).sample snake_case__ = self.scheduler.step_correct(UpperCamelCase__ , UpperCamelCase__ , generator=UpperCamelCase__).prev_sample # prediction step snake_case__ = model(UpperCamelCase__ , UpperCamelCase__).sample snake_case__ = self.scheduler.step_pred(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , generator=UpperCamelCase__) snake_case__ , snake_case__ = output.prev_sample, output.prev_sample_mean snake_case__ = sample_mean.clamp(0 , 1) snake_case__ = sample.cpu().permute(0 , 2 , 3 , 1).numpy() if output_type == "pil": snake_case__ = self.numpy_to_pil(UpperCamelCase__) if not return_dict: return (sample,) return ImagePipelineOutput(images=UpperCamelCase__)
654
0
"""simple docstring""" import argparse import os import re import numpy as np import PIL import torch from timm import create_model from torch.optim.lr_scheduler import OneCycleLR from torch.utils.data import DataLoader, Dataset from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor from accelerate import Accelerator def UpperCAmelCase ( snake_case : Optional[int] ): _lowerCAmelCase:Optional[int] = fname.split(os.path.sep )[-1] return re.search(R'''^(.*)_\d+\.jpg$''' , snake_case ).groups()[0] class a__ ( lowercase_ ): def __init__( self : List[str] ,a__ : List[str] ,a__ : Dict=None ,a__ : List[str]=None) -> List[Any]: """simple docstring""" _lowerCAmelCase:str = file_names _lowerCAmelCase:Optional[Any] = image_transform _lowerCAmelCase:Optional[Any] = label_to_id def __len__( self : Optional[Any]) -> Dict: """simple docstring""" return len(self.file_names) def __getitem__( self : Optional[Any] ,a__ : List[str]) -> str: """simple docstring""" _lowerCAmelCase:Dict = self.file_names[idx] _lowerCAmelCase:Tuple = PIL.Image.open(UpperCamelCase__) _lowerCAmelCase:List[Any] = raw_image.convert('''RGB''') if self.image_transform is not None: _lowerCAmelCase:List[Any] = self.image_transform(UpperCamelCase__) _lowerCAmelCase:int = extract_label(UpperCamelCase__) if self.label_to_id is not None: _lowerCAmelCase:Union[str, Any] = self.label_to_id[label] return {"image": image, "label": label} def UpperCAmelCase ( snake_case : str , snake_case : List[str] ): # Initialize accelerator if args.with_tracking: _lowerCAmelCase:Union[str, Any] = Accelerator( cpu=args.cpu , mixed_precision=args.mixed_precision , log_with='''all''' , project_dir=args.project_dir ) else: _lowerCAmelCase:List[Any] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs _lowerCAmelCase:List[Any] = config['''lr'''] _lowerCAmelCase:List[str] = int(config['''num_epochs'''] ) _lowerCAmelCase:Dict = int(config['''seed'''] ) _lowerCAmelCase:Optional[int] = int(config['''batch_size'''] ) _lowerCAmelCase:Optional[Any] = config['''image_size'''] if not isinstance(snake_case , (list, tuple) ): _lowerCAmelCase:Optional[Any] = (image_size, image_size) # Parse out whether we are saving every epoch or after a certain number of batches if hasattr(args.checkpointing_steps , '''isdigit''' ): if args.checkpointing_steps == "epoch": _lowerCAmelCase:List[str] = args.checkpointing_steps elif args.checkpointing_steps.isdigit(): _lowerCAmelCase:Any = int(args.checkpointing_steps ) else: raise ValueError( F'Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed.' ) else: _lowerCAmelCase:Optional[int] = None # We need to initialize the trackers we use, and also store our configuration if args.with_tracking: _lowerCAmelCase:Union[str, Any] = os.path.split(snake_case )[-1].split('''.''' )[0] accelerator.init_trackers(snake_case , snake_case ) # Grab all the image filenames _lowerCAmelCase:Tuple = [os.path.join(args.data_dir , snake_case ) for fname in os.listdir(args.data_dir ) if fname.endswith('''.jpg''' )] # Build the label correspondences _lowerCAmelCase:Optional[Any] = [extract_label(snake_case ) for fname in file_names] _lowerCAmelCase:int = list(set(snake_case ) ) id_to_label.sort() _lowerCAmelCase:List[Any] = {lbl: i for i, lbl in enumerate(snake_case )} # Set the seed before splitting the data. np.random.seed(snake_case ) torch.manual_seed(snake_case ) torch.cuda.manual_seed_all(snake_case ) # Split our filenames between train and validation _lowerCAmelCase:Union[str, Any] = np.random.permutation(len(snake_case ) ) _lowerCAmelCase:Any = int(0.8 * len(snake_case ) ) _lowerCAmelCase:List[Any] = random_perm[:cut] _lowerCAmelCase:List[str] = random_perm[cut:] # For training we use a simple RandomResizedCrop _lowerCAmelCase:int = Compose([RandomResizedCrop(snake_case , scale=(0.5, 1.0) ), ToTensor()] ) _lowerCAmelCase:List[Any] = PetsDataset( [file_names[i] for i in train_split] , image_transform=snake_case , label_to_id=snake_case ) # For evaluation, we use a deterministic Resize _lowerCAmelCase:Dict = Compose([Resize(snake_case ), ToTensor()] ) _lowerCAmelCase:Optional[int] = PetsDataset([file_names[i] for i in eval_split] , image_transform=snake_case , label_to_id=snake_case ) # Instantiate dataloaders. _lowerCAmelCase:List[str] = DataLoader(snake_case , shuffle=snake_case , batch_size=snake_case , num_workers=4 ) _lowerCAmelCase:Optional[Any] = DataLoader(snake_case , shuffle=snake_case , batch_size=snake_case , num_workers=4 ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) _lowerCAmelCase:int = create_model('''resnet50d''' , pretrained=snake_case , num_classes=len(snake_case ) ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). _lowerCAmelCase:str = model.to(accelerator.device ) # Freezing the base model for param in model.parameters(): _lowerCAmelCase:List[str] = False for param in model.get_classifier().parameters(): _lowerCAmelCase:Any = True # We normalize the batches of images to be a bit faster. _lowerCAmelCase:Dict = torch.tensor(model.default_cfg['''mean'''] )[None, :, None, None].to(accelerator.device ) _lowerCAmelCase:Dict = torch.tensor(model.default_cfg['''std'''] )[None, :, None, None].to(accelerator.device ) # Instantiate optimizer _lowerCAmelCase:List[Any] = torch.optim.Adam(params=model.parameters() , lr=lr / 25 ) # Instantiate learning rate scheduler _lowerCAmelCase:List[Any] = OneCycleLR(optimizer=snake_case , max_lr=snake_case , epochs=snake_case , steps_per_epoch=len(snake_case ) ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase:Optional[Any] = accelerator.prepare( snake_case , snake_case , snake_case , snake_case , snake_case ) # We need to keep track of how many total steps we have iterated over _lowerCAmelCase:Any = 0 # We also need to keep track of the starting epoch so files are named properly _lowerCAmelCase:Dict = 0 # Potentially load in the weights and states from a previous save if args.resume_from_checkpoint: if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "": accelerator.print(F'Resumed from checkpoint: {args.resume_from_checkpoint}' ) accelerator.load_state(args.resume_from_checkpoint ) _lowerCAmelCase:Dict = os.path.basename(args.resume_from_checkpoint ) else: # Get the most recent checkpoint _lowerCAmelCase:List[str] = [f.name for f in os.scandir(os.getcwd() ) if f.is_dir()] dirs.sort(key=os.path.getctime ) _lowerCAmelCase:int = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last # Extract `epoch_{i}` or `step_{i}` _lowerCAmelCase:int = os.path.splitext(snake_case )[0] if "epoch" in training_difference: _lowerCAmelCase:Optional[Any] = int(training_difference.replace('''epoch_''' , '''''' ) ) + 1 _lowerCAmelCase:List[str] = None else: _lowerCAmelCase:Optional[int] = int(training_difference.replace('''step_''' , '''''' ) ) _lowerCAmelCase:Dict = resume_step // len(snake_case ) resume_step -= starting_epoch * len(snake_case ) # Now we train the model for epoch in range(snake_case , snake_case ): model.train() if args.with_tracking: _lowerCAmelCase:str = 0 if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None: # We need to skip steps until we reach the resumed step _lowerCAmelCase:List[Any] = accelerator.skip_first_batches(snake_case , snake_case ) overall_step += resume_step else: # After the first iteration though, we need to go back to the original dataloader _lowerCAmelCase:int = train_dataloader for batch in active_dataloader: # We could avoid this line since we set the accelerator with `device_placement=True`. _lowerCAmelCase:Union[str, Any] = {k: v.to(accelerator.device ) for k, v in batch.items()} _lowerCAmelCase:int = (batch['''image'''] - mean) / std _lowerCAmelCase:Union[str, Any] = model(snake_case ) _lowerCAmelCase:List[str] = torch.nn.functional.cross_entropy(snake_case , batch['''label'''] ) # We keep track of the loss at each epoch if args.with_tracking: total_loss += loss.detach().float() accelerator.backward(snake_case ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() overall_step += 1 if isinstance(snake_case , snake_case ): _lowerCAmelCase:Dict = F'step_{overall_step}' if overall_step % checkpointing_steps == 0: if args.output_dir is not None: _lowerCAmelCase:Dict = os.path.join(args.output_dir , snake_case ) accelerator.save_state(snake_case ) model.eval() _lowerCAmelCase:Any = 0 _lowerCAmelCase:Dict = 0 for step, batch in enumerate(snake_case ): # We could avoid this line since we set the accelerator with `device_placement=True`. _lowerCAmelCase:int = {k: v.to(accelerator.device ) for k, v in batch.items()} _lowerCAmelCase:Optional[Any] = (batch['''image'''] - mean) / std with torch.no_grad(): _lowerCAmelCase:List[Any] = model(snake_case ) _lowerCAmelCase:Tuple = outputs.argmax(dim=-1 ) _lowerCAmelCase , _lowerCAmelCase:Optional[int] = accelerator.gather_for_metrics((predictions, batch['''label''']) ) _lowerCAmelCase:List[str] = predictions == references num_elems += accurate_preds.shape[0] accurate += accurate_preds.long().sum() _lowerCAmelCase:Tuple = accurate.item() / num_elems # Use accelerator.print to print only on the main process. accelerator.print(F'epoch {epoch}: {100 * eval_metric:.2f}' ) if args.with_tracking: accelerator.log( { '''accuracy''': 100 * eval_metric, '''train_loss''': total_loss.item() / len(snake_case ), '''epoch''': epoch, } , step=snake_case , ) if checkpointing_steps == "epoch": _lowerCAmelCase:Any = F'epoch_{epoch}' if args.output_dir is not None: _lowerCAmelCase:Optional[Any] = os.path.join(args.output_dir , snake_case ) accelerator.save_state(snake_case ) if args.with_tracking: accelerator.end_training() def UpperCAmelCase ( ): _lowerCAmelCase:Union[str, Any] = argparse.ArgumentParser(description='''Simple example of training script.''' ) parser.add_argument('''--data_dir''' , required=snake_case , help='''The data folder on disk.''' ) parser.add_argument('''--fp16''' , action='''store_true''' , help='''If passed, will use FP16 training.''' ) parser.add_argument( '''--mixed_precision''' , type=snake_case , default=snake_case , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose''' '''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.''' '''and an Nvidia Ampere GPU.''' , ) parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' ) parser.add_argument( '''--checkpointing_steps''' , type=snake_case , default=snake_case , help='''Whether the various states should be saved at the end of every n steps, or \'epoch\' for each epoch.''' , ) parser.add_argument( '''--output_dir''' , type=snake_case , default='''.''' , help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' , ) parser.add_argument( '''--resume_from_checkpoint''' , type=snake_case , default=snake_case , help='''If the training should continue from a checkpoint folder.''' , ) parser.add_argument( '''--with_tracking''' , action='''store_true''' , help='''Whether to load in all available experiment trackers from the environment and use them for logging.''' , ) parser.add_argument( '''--project_dir''' , type=snake_case , default='''logs''' , help='''Location on where to store experiment tracking logs` and relevent project information''' , ) _lowerCAmelCase:Optional[Any] = parser.parse_args() _lowerCAmelCase:List[str] = {'''lr''': 3e-2, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 64, '''image_size''': 224} training_function(snake_case , snake_case ) if __name__ == "__main__": main()
227
import random import unittest import torch from diffusers import IFInpaintingSuperResolutionPipeline from diffusers.utils import floats_tensor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import skip_mps, torch_device from ..pipeline_params import ( TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin from . import IFPipelineTesterMixin @skip_mps class _lowerCAmelCase ( lowercase_ , lowercase_ , unittest.TestCase ): """simple docstring""" _lowercase : Optional[int] = IFInpaintingSuperResolutionPipeline _lowercase : int = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''width''', '''height'''} _lowercase : Optional[Any] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({'''original_image'''} ) _lowercase : int = PipelineTesterMixin.required_optional_params - {'''latents'''} def __magic_name__ ( self : Union[str, Any]): '''simple docstring''' return self._get_superresolution_dummy_components() def __magic_name__ ( self : Optional[int] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[int]=0): '''simple docstring''' if str(UpperCamelCase__).startswith("""mps"""): snake_case__ = torch.manual_seed(UpperCamelCase__) else: snake_case__ = torch.Generator(device=UpperCamelCase__).manual_seed(UpperCamelCase__) snake_case__ = floats_tensor((1, 3, 1_6, 1_6) , rng=random.Random(UpperCamelCase__)).to(UpperCamelCase__) snake_case__ = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(UpperCamelCase__)).to(UpperCamelCase__) snake_case__ = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(UpperCamelCase__)).to(UpperCamelCase__) snake_case__ = { """prompt""": """A painting of a squirrel eating a burger""", """image""": image, """original_image""": original_image, """mask_image""": mask_image, """generator""": generator, """num_inference_steps""": 2, """output_type""": """numpy""", } return inputs @unittest.skipIf( torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , ) def __magic_name__ ( self : Dict): '''simple docstring''' self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3) def __magic_name__ ( self : int): '''simple docstring''' self._test_save_load_optional_components() @unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""") def __magic_name__ ( self : Optional[Any]): '''simple docstring''' super().test_save_load_floataa(expected_max_diff=1E-1) def __magic_name__ ( self : List[Any]): '''simple docstring''' self._test_attention_slicing_forward_pass(expected_max_diff=1E-2) def __magic_name__ ( self : Union[str, Any]): '''simple docstring''' self._test_save_load_local() def __magic_name__ ( self : str): '''simple docstring''' self._test_inference_batch_single_identical( expected_max_diff=1E-2 , )
654
0
"""simple docstring""" from pathlib import Path import fire from tqdm import tqdm def UpperCAmelCase__ ( lowerCAmelCase__ :Any="ro" , lowerCAmelCase__ :Optional[Any]="en" , lowerCAmelCase__ :Any="wmt16" , lowerCAmelCase__ :Optional[Any]=None ) -> Dict: '''simple docstring''' try: import datasets except (ModuleNotFoundError, ImportError): raise ImportError("""run pip install datasets""" ) lowercase = f'{src_lang}-{tgt_lang}' print(f'Converting {dataset}-{pair}' ) lowercase = datasets.load_dataset(lowerCAmelCase__ , lowerCAmelCase__ ) if save_dir is None: lowercase = f'{dataset}-{pair}' lowercase = Path(lowerCAmelCase__ ) save_dir.mkdir(exist_ok=lowerCAmelCase__ ) for split in ds.keys(): print(f'Splitting {split} with {ds[split].num_rows} records' ) # to save to val.source, val.target like summary datasets lowercase = """val""" if split == """validation""" else split lowercase = save_dir.joinpath(f'{fn}.source' ) lowercase = save_dir.joinpath(f'{fn}.target' ) lowercase = src_path.open("""w+""" ) lowercase = tgt_path.open("""w+""" ) # reader is the bottleneck so writing one record at a time doesn't slow things down for x in tqdm(ds[split] ): lowercase = x["""translation"""] src_fp.write(ex[src_lang] + """\n""" ) tgt_fp.write(ex[tgt_lang] + """\n""" ) print(f'Saved {dataset} dataset to {save_dir}' ) if __name__ == "__main__": fire.Fire(download_wmt_dataset)
359
a__ = [0, 2, 4, 6, 8] a__ = [1, 3, 5, 7, 9] def _UpperCAmelCase ( a : int , a : int , a : list[int] , a : int ): if remaining_length == 0: if digits[0] == 0 or digits[-1] == 0: return 0 for i in range(length // 2 - 1 , -1 , -1 ): remainder += digits[i] + digits[length - i - 1] if remainder % 2 == 0: return 0 remainder //= 10 return 1 if remaining_length == 1: if remainder % 2 == 0: return 0 snake_case__ = 0 for digit in range(10 ): snake_case__ = digit result += reversible_numbers( 0 , (remainder + 2 * digit) // 10 , a , a ) return result snake_case__ = 0 for digita in range(10 ): snake_case__ = digita if (remainder + digita) % 2 == 0: snake_case__ = ODD_DIGITS else: snake_case__ = EVEN_DIGITS for digita in other_parity_digits: snake_case__ = digita result += reversible_numbers( remaining_length - 2 , (remainder + digita + digita) // 10 , a , a , ) return result def _UpperCAmelCase ( a : int = 9 ): snake_case__ = 0 for length in range(1 , max_power + 1 ): result += reversible_numbers(a , 0 , [0] * length , a ) return result if __name__ == "__main__": print(F'''{solution() = }''')
654
0
'''simple docstring''' import json import os import tempfile import unittest import unittest.mock as mock from pathlib import Path from requests.exceptions import HTTPError from transformers.utils import ( CONFIG_NAME, FLAX_WEIGHTS_NAME, TF2_WEIGHTS_NAME, TRANSFORMERS_CACHE, WEIGHTS_NAME, cached_file, get_file_from_repo, has_file, ) A_ = "hf-internal-testing/tiny-random-bert" A_ = os.path.join(TRANSFORMERS_CACHE, "models--hf-internal-testing--tiny-random-bert") A_ = "9b8c223d42b2188cb49d29af482996f9d0f3e5a6" class UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' def UpperCamelCase( self ) -> Union[str, Any]: '''simple docstring''' lowerCamelCase_ = cached_file(UpperCamelCase__ , UpperCamelCase__ ) # Should have downloaded the file in here self.assertTrue(os.path.isdir(UpperCamelCase__ ) ) # Cache should contain at least those three subfolders: for subfolder in ["blobs", "refs", "snapshots"]: self.assertTrue(os.path.isdir(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) ) ) with open(os.path.join(UpperCamelCase__ , 'refs' , 'main' ) ) as f: lowerCamelCase_ = f.read() self.assertEqual(UpperCamelCase__ , os.path.join(UpperCamelCase__ , 'snapshots' , UpperCamelCase__ , UpperCamelCase__ ) ) self.assertTrue(os.path.isfile(UpperCamelCase__ ) ) # File is cached at the same place the second time. lowerCamelCase_ = cached_file(UpperCamelCase__ , UpperCamelCase__ ) self.assertEqual(UpperCamelCase__ , UpperCamelCase__ ) # Using a specific revision to test the full commit hash. lowerCamelCase_ = cached_file(UpperCamelCase__ , UpperCamelCase__ , revision='9b8c223' ) self.assertEqual(UpperCamelCase__ , os.path.join(UpperCamelCase__ , 'snapshots' , UpperCamelCase__ , UpperCamelCase__ ) ) def UpperCamelCase( self ) -> Optional[int]: '''simple docstring''' with self.assertRaisesRegex(UpperCamelCase__ , 'is not a valid model identifier' ): lowerCamelCase_ = cached_file('tiny-random-bert' , UpperCamelCase__ ) with self.assertRaisesRegex(UpperCamelCase__ , 'is not a valid git identifier' ): lowerCamelCase_ = cached_file(UpperCamelCase__ , UpperCamelCase__ , revision='aaaa' ) with self.assertRaisesRegex(UpperCamelCase__ , 'does not appear to have a file named' ): lowerCamelCase_ = cached_file(UpperCamelCase__ , 'conf' ) def UpperCamelCase( self ) -> Tuple: '''simple docstring''' with self.assertRaisesRegex(UpperCamelCase__ , 'does not appear to have a file named' ): lowerCamelCase_ = cached_file(UpperCamelCase__ , 'conf' ) with open(os.path.join(UpperCamelCase__ , 'refs' , 'main' ) ) as f: lowerCamelCase_ = f.read() self.assertTrue(os.path.isfile(os.path.join(UpperCamelCase__ , '.no_exist' , UpperCamelCase__ , 'conf' ) ) ) lowerCamelCase_ = cached_file(UpperCamelCase__ , 'conf' , _raise_exceptions_for_missing_entries=UpperCamelCase__ ) self.assertIsNone(UpperCamelCase__ ) lowerCamelCase_ = cached_file(UpperCamelCase__ , 'conf' , local_files_only=UpperCamelCase__ , _raise_exceptions_for_missing_entries=UpperCamelCase__ ) self.assertIsNone(UpperCamelCase__ ) lowerCamelCase_ = mock.Mock() lowerCamelCase_ = 500 lowerCamelCase_ = {} lowerCamelCase_ = HTTPError lowerCamelCase_ = {} # Under the mock environment we get a 500 error when trying to reach the tokenizer. with mock.patch('requests.Session.request' , return_value=UpperCamelCase__ ) as mock_head: lowerCamelCase_ = cached_file(UpperCamelCase__ , 'conf' , _raise_exceptions_for_connection_errors=UpperCamelCase__ ) self.assertIsNone(UpperCamelCase__ ) # This check we did call the fake head request mock_head.assert_called() def UpperCamelCase( self ) -> str: '''simple docstring''' self.assertTrue(has_file('hf-internal-testing/tiny-bert-pt-only' , UpperCamelCase__ ) ) self.assertFalse(has_file('hf-internal-testing/tiny-bert-pt-only' , UpperCamelCase__ ) ) self.assertFalse(has_file('hf-internal-testing/tiny-bert-pt-only' , UpperCamelCase__ ) ) def UpperCamelCase( self ) -> List[str]: '''simple docstring''' self.assertIsNone(get_file_from_repo('bert-base-cased' , 'ahah.txt' ) ) # The function raises if the repository does not exist. with self.assertRaisesRegex(UpperCamelCase__ , 'is not a valid model identifier' ): get_file_from_repo('bert-base-case' , UpperCamelCase__ ) # The function raises if the revision does not exist. with self.assertRaisesRegex(UpperCamelCase__ , 'is not a valid git identifier' ): get_file_from_repo('bert-base-cased' , UpperCamelCase__ , revision='ahaha' ) lowerCamelCase_ = get_file_from_repo('bert-base-cased' , UpperCamelCase__ ) # The name is the cached name which is not very easy to test, so instead we load the content. lowerCamelCase_ = json.loads(open(UpperCamelCase__ , 'r' ).read() ) self.assertEqual(config['hidden_size'] , 768 ) def UpperCamelCase( self ) -> Optional[Any]: '''simple docstring''' with tempfile.TemporaryDirectory() as tmp_dir: lowerCamelCase_ = Path(UpperCamelCase__ ) / 'a.txt' filename.touch() self.assertEqual(get_file_from_repo(UpperCamelCase__ , 'a.txt' ) , str(UpperCamelCase__ ) ) self.assertIsNone(get_file_from_repo(UpperCamelCase__ , 'b.txt' ) )
42
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer from .base import PipelineTool a__ = { """Acehnese Arabic""": """ace_Arab""", """Acehnese Latin""": """ace_Latn""", """Mesopotamian Arabic""": """acm_Arab""", """Ta'izzi-Adeni Arabic""": """acq_Arab""", """Tunisian Arabic""": """aeb_Arab""", """Afrikaans""": """afr_Latn""", """South Levantine Arabic""": """ajp_Arab""", """Akan""": """aka_Latn""", """Amharic""": """amh_Ethi""", """North Levantine Arabic""": """apc_Arab""", """Modern Standard Arabic""": """arb_Arab""", """Modern Standard Arabic Romanized""": """arb_Latn""", """Najdi Arabic""": """ars_Arab""", """Moroccan Arabic""": """ary_Arab""", """Egyptian Arabic""": """arz_Arab""", """Assamese""": """asm_Beng""", """Asturian""": """ast_Latn""", """Awadhi""": """awa_Deva""", """Central Aymara""": """ayr_Latn""", """South Azerbaijani""": """azb_Arab""", """North Azerbaijani""": """azj_Latn""", """Bashkir""": """bak_Cyrl""", """Bambara""": """bam_Latn""", """Balinese""": """ban_Latn""", """Belarusian""": """bel_Cyrl""", """Bemba""": """bem_Latn""", """Bengali""": """ben_Beng""", """Bhojpuri""": """bho_Deva""", """Banjar Arabic""": """bjn_Arab""", """Banjar Latin""": """bjn_Latn""", """Standard Tibetan""": """bod_Tibt""", """Bosnian""": """bos_Latn""", """Buginese""": """bug_Latn""", """Bulgarian""": """bul_Cyrl""", """Catalan""": """cat_Latn""", """Cebuano""": """ceb_Latn""", """Czech""": """ces_Latn""", """Chokwe""": """cjk_Latn""", """Central Kurdish""": """ckb_Arab""", """Crimean Tatar""": """crh_Latn""", """Welsh""": """cym_Latn""", """Danish""": """dan_Latn""", """German""": """deu_Latn""", """Southwestern Dinka""": """dik_Latn""", """Dyula""": """dyu_Latn""", """Dzongkha""": """dzo_Tibt""", """Greek""": """ell_Grek""", """English""": """eng_Latn""", """Esperanto""": """epo_Latn""", """Estonian""": """est_Latn""", """Basque""": """eus_Latn""", """Ewe""": """ewe_Latn""", """Faroese""": """fao_Latn""", """Fijian""": """fij_Latn""", """Finnish""": """fin_Latn""", """Fon""": """fon_Latn""", """French""": """fra_Latn""", """Friulian""": """fur_Latn""", """Nigerian Fulfulde""": """fuv_Latn""", """Scottish Gaelic""": """gla_Latn""", """Irish""": """gle_Latn""", """Galician""": """glg_Latn""", """Guarani""": """grn_Latn""", """Gujarati""": """guj_Gujr""", """Haitian Creole""": """hat_Latn""", """Hausa""": """hau_Latn""", """Hebrew""": """heb_Hebr""", """Hindi""": """hin_Deva""", """Chhattisgarhi""": """hne_Deva""", """Croatian""": """hrv_Latn""", """Hungarian""": """hun_Latn""", """Armenian""": """hye_Armn""", """Igbo""": """ibo_Latn""", """Ilocano""": """ilo_Latn""", """Indonesian""": """ind_Latn""", """Icelandic""": """isl_Latn""", """Italian""": """ita_Latn""", """Javanese""": """jav_Latn""", """Japanese""": """jpn_Jpan""", """Kabyle""": """kab_Latn""", """Jingpho""": """kac_Latn""", """Kamba""": """kam_Latn""", """Kannada""": """kan_Knda""", """Kashmiri Arabic""": """kas_Arab""", """Kashmiri Devanagari""": """kas_Deva""", """Georgian""": """kat_Geor""", """Central Kanuri Arabic""": """knc_Arab""", """Central Kanuri Latin""": """knc_Latn""", """Kazakh""": """kaz_Cyrl""", """Kabiyè""": """kbp_Latn""", """Kabuverdianu""": """kea_Latn""", """Khmer""": """khm_Khmr""", """Kikuyu""": """kik_Latn""", """Kinyarwanda""": """kin_Latn""", """Kyrgyz""": """kir_Cyrl""", """Kimbundu""": """kmb_Latn""", """Northern Kurdish""": """kmr_Latn""", """Kikongo""": """kon_Latn""", """Korean""": """kor_Hang""", """Lao""": """lao_Laoo""", """Ligurian""": """lij_Latn""", """Limburgish""": """lim_Latn""", """Lingala""": """lin_Latn""", """Lithuanian""": """lit_Latn""", """Lombard""": """lmo_Latn""", """Latgalian""": """ltg_Latn""", """Luxembourgish""": """ltz_Latn""", """Luba-Kasai""": """lua_Latn""", """Ganda""": """lug_Latn""", """Luo""": """luo_Latn""", """Mizo""": """lus_Latn""", """Standard Latvian""": """lvs_Latn""", """Magahi""": """mag_Deva""", """Maithili""": """mai_Deva""", """Malayalam""": """mal_Mlym""", """Marathi""": """mar_Deva""", """Minangkabau Arabic """: """min_Arab""", """Minangkabau Latin""": """min_Latn""", """Macedonian""": """mkd_Cyrl""", """Plateau Malagasy""": """plt_Latn""", """Maltese""": """mlt_Latn""", """Meitei Bengali""": """mni_Beng""", """Halh Mongolian""": """khk_Cyrl""", """Mossi""": """mos_Latn""", """Maori""": """mri_Latn""", """Burmese""": """mya_Mymr""", """Dutch""": """nld_Latn""", """Norwegian Nynorsk""": """nno_Latn""", """Norwegian Bokmål""": """nob_Latn""", """Nepali""": """npi_Deva""", """Northern Sotho""": """nso_Latn""", """Nuer""": """nus_Latn""", """Nyanja""": """nya_Latn""", """Occitan""": """oci_Latn""", """West Central Oromo""": """gaz_Latn""", """Odia""": """ory_Orya""", """Pangasinan""": """pag_Latn""", """Eastern Panjabi""": """pan_Guru""", """Papiamento""": """pap_Latn""", """Western Persian""": """pes_Arab""", """Polish""": """pol_Latn""", """Portuguese""": """por_Latn""", """Dari""": """prs_Arab""", """Southern Pashto""": """pbt_Arab""", """Ayacucho Quechua""": """quy_Latn""", """Romanian""": """ron_Latn""", """Rundi""": """run_Latn""", """Russian""": """rus_Cyrl""", """Sango""": """sag_Latn""", """Sanskrit""": """san_Deva""", """Santali""": """sat_Olck""", """Sicilian""": """scn_Latn""", """Shan""": """shn_Mymr""", """Sinhala""": """sin_Sinh""", """Slovak""": """slk_Latn""", """Slovenian""": """slv_Latn""", """Samoan""": """smo_Latn""", """Shona""": """sna_Latn""", """Sindhi""": """snd_Arab""", """Somali""": """som_Latn""", """Southern Sotho""": """sot_Latn""", """Spanish""": """spa_Latn""", """Tosk Albanian""": """als_Latn""", """Sardinian""": """srd_Latn""", """Serbian""": """srp_Cyrl""", """Swati""": """ssw_Latn""", """Sundanese""": """sun_Latn""", """Swedish""": """swe_Latn""", """Swahili""": """swh_Latn""", """Silesian""": """szl_Latn""", """Tamil""": """tam_Taml""", """Tatar""": """tat_Cyrl""", """Telugu""": """tel_Telu""", """Tajik""": """tgk_Cyrl""", """Tagalog""": """tgl_Latn""", """Thai""": """tha_Thai""", """Tigrinya""": """tir_Ethi""", """Tamasheq Latin""": """taq_Latn""", """Tamasheq Tifinagh""": """taq_Tfng""", """Tok Pisin""": """tpi_Latn""", """Tswana""": """tsn_Latn""", """Tsonga""": """tso_Latn""", """Turkmen""": """tuk_Latn""", """Tumbuka""": """tum_Latn""", """Turkish""": """tur_Latn""", """Twi""": """twi_Latn""", """Central Atlas Tamazight""": """tzm_Tfng""", """Uyghur""": """uig_Arab""", """Ukrainian""": """ukr_Cyrl""", """Umbundu""": """umb_Latn""", """Urdu""": """urd_Arab""", """Northern Uzbek""": """uzn_Latn""", """Venetian""": """vec_Latn""", """Vietnamese""": """vie_Latn""", """Waray""": """war_Latn""", """Wolof""": """wol_Latn""", """Xhosa""": """xho_Latn""", """Eastern Yiddish""": """ydd_Hebr""", """Yoruba""": """yor_Latn""", """Yue Chinese""": """yue_Hant""", """Chinese Simplified""": """zho_Hans""", """Chinese Traditional""": """zho_Hant""", """Standard Malay""": """zsm_Latn""", """Zulu""": """zul_Latn""", } class _lowerCAmelCase ( lowercase_ ): """simple docstring""" _lowercase : List[str] = '''facebook/nllb-200-distilled-600M''' _lowercase : List[Any] = ( '''This is a tool that translates text from a language to another. It takes three inputs: `text`, which should ''' '''be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, ''' '''which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in ''' '''plain English, such as \'Romanian\', or \'Albanian\'. It returns the text translated in `tgt_lang`.''' ) _lowercase : Optional[int] = '''translator''' _lowercase : Optional[Any] = AutoTokenizer _lowercase : Dict = AutoModelForSeqaSeqLM _lowercase : List[str] = LANGUAGE_CODES _lowercase : Optional[Any] = ['''text''', '''text''', '''text'''] _lowercase : Tuple = ['''text'''] def __magic_name__ ( self : Any , UpperCamelCase__ : str , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int): '''simple docstring''' if src_lang not in self.lang_to_code: raise ValueError(F'''{src_lang} is not a supported language.''') if tgt_lang not in self.lang_to_code: raise ValueError(F'''{tgt_lang} is not a supported language.''') snake_case__ = self.lang_to_code[src_lang] snake_case__ = self.lang_to_code[tgt_lang] return self.pre_processor._build_translation_inputs( UpperCamelCase__ , return_tensors="""pt""" , src_lang=UpperCamelCase__ , tgt_lang=UpperCamelCase__) def __magic_name__ ( self : Dict , UpperCamelCase__ : Dict): '''simple docstring''' return self.model.generate(**UpperCamelCase__) def __magic_name__ ( self : List[str] , UpperCamelCase__ : Dict): '''simple docstring''' return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=UpperCamelCase__)
654
0
import os from math import logaa def __lowercase( UpperCAmelCase__ = "base_exp.txt" ): """simple docstring""" lowerCamelCase = 0 lowerCamelCase = 0 for i, line in enumerate(open(os.path.join(os.path.dirname(UpperCAmelCase__ ) , UpperCAmelCase__ ) ) ): lowerCamelCase , lowerCamelCase = list(map(UpperCAmelCase__ , line.split("," ) ) ) if x * logaa(UpperCAmelCase__ ) > largest: lowerCamelCase = x * logaa(UpperCAmelCase__ ) lowerCamelCase = i + 1 return result if __name__ == "__main__": print(solution())
623
import gc import importlib.metadata import tempfile import unittest from packaging import version from transformers import ( AutoModel, AutoModelForCausalLM, AutoModelForSeqaSeqLM, AutoModelForSequenceClassification, AutoTokenizer, BitsAndBytesConfig, pipeline, ) from transformers.testing_utils import ( is_torch_available, require_accelerate, require_bitsandbytes, require_torch, require_torch_gpu, require_torch_multi_gpu, slow, ) def _UpperCAmelCase ( a : Optional[int] ): if model.config.model_type == "gpt2": return model.transformer.h[0].mlp.c_fc return model.transformer.h[0].mlp.dense_ah_to_h if is_torch_available(): import torch import torch.nn as nn class _lowerCAmelCase ( nn.Module ): """simple docstring""" def __init__( self : int , UpperCamelCase__ : nn.Module , UpperCamelCase__ : int): '''simple docstring''' super().__init__() snake_case__ = module snake_case__ = nn.Sequential( nn.Linear(module.in_features , UpperCamelCase__ , bias=UpperCamelCase__) , nn.Linear(UpperCamelCase__ , module.out_features , bias=UpperCamelCase__) , ) snake_case__ = (2.0 / (5 * min(module.in_features , module.out_features))) ** 0.5 nn.init.normal_(self.adapter[0].weight , std=UpperCamelCase__) nn.init.zeros_(self.adapter[1].weight) self.adapter.to(module.weight.device) def __magic_name__ ( self : Tuple , UpperCamelCase__ : int , *UpperCamelCase__ : Dict , **UpperCamelCase__ : str): '''simple docstring''' return self.module(UpperCamelCase__ , *UpperCamelCase__ , **UpperCamelCase__) + self.adapter(UpperCamelCase__) @require_bitsandbytes @require_accelerate @require_torch @require_torch_gpu @slow class _lowerCAmelCase ( unittest.TestCase ): """simple docstring""" _lowercase : Dict = '''bigscience/bloom-1b7''' # Constant values _lowercase : Any = 2.109_6595_5269_2574 _lowercase : Tuple = '''Hello my name is''' _lowercase : List[Any] = set() EXPECTED_OUTPUTS.add('''Hello my name is John and I am a professional photographer. I''' ) EXPECTED_OUTPUTS.add('''Hello my name is John.\nI am a friend of your father.\n''' ) EXPECTED_OUTPUTS.add('''Hello my name is John Doe, I am a student at the University''' ) _lowercase : List[str] = 10 def __magic_name__ ( self : Optional[int]): '''simple docstring''' snake_case__ = AutoTokenizer.from_pretrained(self.model_name) class _lowerCAmelCase ( lowercase_ ): """simple docstring""" def __magic_name__ ( self : str): '''simple docstring''' super().setUp() # Models and tokenizer snake_case__ = AutoModelForCausalLM.from_pretrained( self.model_name , torch_dtype=torch.floataa , device_map="""auto""") snake_case__ = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=UpperCamelCase__ , device_map="""auto""") def __magic_name__ ( self : Tuple): '''simple docstring''' del self.model_fpaa del self.model_abit gc.collect() torch.cuda.empty_cache() def __magic_name__ ( self : str): '''simple docstring''' snake_case__ = self.model_abit.config self.assertTrue(hasattr(UpperCamelCase__ , """quantization_config""")) snake_case__ = config.to_dict() snake_case__ = config.to_diff_dict() snake_case__ = config.to_json_string() def __magic_name__ ( self : Dict): '''simple docstring''' from bitsandbytes.nn import Paramsabit snake_case__ = self.model_fpaa.get_memory_footprint() snake_case__ = self.model_abit.get_memory_footprint() self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE) snake_case__ = get_some_linear_layer(self.model_abit) self.assertTrue(linear.weight.__class__ == Paramsabit) def __magic_name__ ( self : Optional[int]): '''simple docstring''' from transformers import TaPreTrainedModel self.model_fpaa.get_memory_footprint() self.model_abit.get_memory_footprint() for name, module in self.model_abit.named_modules(): if isinstance(UpperCamelCase__ , torch.nn.Linear): if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules: # 4-bit parameters are packed in uint8 variables self.assertTrue(module.weight.dtype == torch.uinta) def __magic_name__ ( self : Dict): '''simple docstring''' snake_case__ = self.tokenizer(self.input_text , return_tensors="""pt""") snake_case__ = self.model_abit.generate(input_ids=encoded_input["""input_ids"""].to(0) , max_new_tokens=1_0) self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=UpperCamelCase__) , self.EXPECTED_OUTPUTS) def __magic_name__ ( self : str): '''simple docstring''' snake_case__ = BitsAndBytesConfig() snake_case__ = True snake_case__ = AutoModelForCausalLM.from_pretrained( self.model_name , quantization_config=UpperCamelCase__ , device_map="""auto""") snake_case__ = self.tokenizer(self.input_text , return_tensors="""pt""") snake_case__ = model_abit_from_config.generate( input_ids=encoded_input["""input_ids"""].to(0) , max_new_tokens=1_0) self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=UpperCamelCase__) , self.EXPECTED_OUTPUTS) def __magic_name__ ( self : Optional[int]): '''simple docstring''' with self.assertRaises(UpperCamelCase__), tempfile.TemporaryDirectory() as tmpdirname: self.model_abit.save_pretrained(UpperCamelCase__) def __magic_name__ ( self : List[str]): '''simple docstring''' snake_case__ = BitsAndBytesConfig() with self.assertRaises(UpperCamelCase__): snake_case__ = AutoModelForCausalLM.from_pretrained( self.model_name , quantization_config=UpperCamelCase__ , load_in_abit=UpperCamelCase__ , device_map="""auto""" , bnb_abit_quant_type="""nf4""" , ) def __magic_name__ ( self : List[Any]): '''simple docstring''' with self.assertRaises(UpperCamelCase__): # Tries with `str` self.model_abit.to("""cpu""") with self.assertRaises(UpperCamelCase__): # Tries with a `dtype`` self.model_abit.to(torch.floataa) with self.assertRaises(UpperCamelCase__): # Tries with a `device` self.model_abit.to(torch.device("""cuda:0""")) with self.assertRaises(UpperCamelCase__): # Tries with a `device` self.model_abit.float() with self.assertRaises(UpperCamelCase__): # Tries with a `device` self.model_abit.half() # Test if we did not break anything snake_case__ = self.tokenizer(self.input_text , return_tensors="""pt""") snake_case__ = self.model_fpaa.to(torch.floataa) snake_case__ = self.model_fpaa.generate(input_ids=encoded_input["""input_ids"""].to(0) , max_new_tokens=1_0) # Check this does not throw an error snake_case__ = self.model_fpaa.to("""cpu""") # Check this does not throw an error snake_case__ = self.model_fpaa.half() # Check this does not throw an error snake_case__ = self.model_fpaa.float() def __magic_name__ ( self : Dict): '''simple docstring''' snake_case__ = AutoModelForSeqaSeqLM.from_pretrained("""t5-small""" , load_in_abit=UpperCamelCase__ , device_map="""auto""") self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa) @require_bitsandbytes @require_accelerate @require_torch @require_torch_gpu @slow class _lowerCAmelCase ( unittest.TestCase ): """simple docstring""" @classmethod def __magic_name__ ( cls : Optional[Any]): '''simple docstring''' snake_case__ = """t5-small""" snake_case__ = """google/flan-t5-small""" # flan-t5 uses dense-act instead of dense-relu-dense snake_case__ = AutoTokenizer.from_pretrained(cls.model_name) snake_case__ = """Translate in German: Hello, my dog is cute""" def __magic_name__ ( self : Optional[int]): '''simple docstring''' gc.collect() torch.cuda.empty_cache() def __magic_name__ ( self : Any): '''simple docstring''' from transformers import TaForConditionalGeneration snake_case__ = TaForConditionalGeneration._keep_in_fpaa_modules snake_case__ = None # test with `t5-small` snake_case__ = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=UpperCamelCase__ , device_map="""auto""") snake_case__ = self.tokenizer(self.input_text , return_tensors="""pt""").to(0) snake_case__ = model.generate(**UpperCamelCase__) # test with `flan-t5-small` snake_case__ = TaForConditionalGeneration.from_pretrained( self.dense_act_model_name , load_in_abit=UpperCamelCase__ , device_map="""auto""") snake_case__ = self.tokenizer(self.input_text , return_tensors="""pt""").to(0) snake_case__ = model.generate(**UpperCamelCase__) snake_case__ = modules def __magic_name__ ( self : Union[str, Any]): '''simple docstring''' import bitsandbytes as bnb from transformers import TaForConditionalGeneration # test with `t5-small` snake_case__ = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=UpperCamelCase__ , device_map="""auto""") # there was a bug with decoders - this test checks that it is fixed self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit)) snake_case__ = self.tokenizer(self.input_text , return_tensors="""pt""").to(0) snake_case__ = model.generate(**UpperCamelCase__) # test with `flan-t5-small` snake_case__ = TaForConditionalGeneration.from_pretrained( self.dense_act_model_name , load_in_abit=UpperCamelCase__ , device_map="""auto""") snake_case__ = self.tokenizer(self.input_text , return_tensors="""pt""").to(0) snake_case__ = model.generate(**UpperCamelCase__) class _lowerCAmelCase ( lowercase_ ): """simple docstring""" def __magic_name__ ( self : int): '''simple docstring''' super().setUp() # model_name snake_case__ = """bigscience/bloom-560m""" snake_case__ = """t5-small""" # Different types of model snake_case__ = AutoModel.from_pretrained(self.model_name , load_in_abit=UpperCamelCase__ , device_map="""auto""") # Sequence classification model snake_case__ = AutoModelForSequenceClassification.from_pretrained( self.model_name , load_in_abit=UpperCamelCase__ , device_map="""auto""") # CausalLM model snake_case__ = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=UpperCamelCase__ , device_map="""auto""") # Seq2seq model snake_case__ = AutoModelForSeqaSeqLM.from_pretrained( self.seq_to_seq_name , load_in_abit=UpperCamelCase__ , device_map="""auto""") def __magic_name__ ( self : List[str]): '''simple docstring''' del self.base_model del self.sequence_model del self.model_abit del self.seq_to_seq_model gc.collect() torch.cuda.empty_cache() def __magic_name__ ( self : Union[str, Any]): '''simple docstring''' from bitsandbytes.nn import Paramsabit self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit) # Other heads should be nn.Parameter self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter) self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter) self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter) class _lowerCAmelCase ( lowercase_ ): """simple docstring""" def __magic_name__ ( self : Tuple): '''simple docstring''' super().setUp() def __magic_name__ ( self : int): '''simple docstring''' del self.pipe gc.collect() torch.cuda.empty_cache() def __magic_name__ ( self : Tuple): '''simple docstring''' snake_case__ = pipeline( """text-generation""" , model=self.model_name , model_kwargs={"""device_map""": """auto""", """load_in_4bit""": True, """torch_dtype""": torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , ) # Real second forward pass snake_case__ = self.pipe(self.input_text) self.assertIn(pipeline_output[0]["""generated_text"""] , self.EXPECTED_OUTPUTS) @require_torch_multi_gpu class _lowerCAmelCase ( lowercase_ ): """simple docstring""" def __magic_name__ ( self : Union[str, Any]): '''simple docstring''' super().setUp() def __magic_name__ ( self : int): '''simple docstring''' snake_case__ = AutoModelForCausalLM.from_pretrained( self.model_name , load_in_abit=UpperCamelCase__ , device_map="""balanced""") # Check correct device map self.assertEqual(set(model_parallel.hf_device_map.values()) , {0, 1}) # Check that inference pass works on the model snake_case__ = self.tokenizer(self.input_text , return_tensors="""pt""") # Second real batch snake_case__ = model_parallel.generate(input_ids=encoded_input["""input_ids"""].to(0) , max_new_tokens=1_0) self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=UpperCamelCase__) , self.EXPECTED_OUTPUTS) class _lowerCAmelCase ( lowercase_ ): """simple docstring""" def __magic_name__ ( self : Any): '''simple docstring''' snake_case__ = """facebook/opt-350m""" super().setUp() def __magic_name__ ( self : Any): '''simple docstring''' if version.parse(importlib.metadata.version("""bitsandbytes""")) < version.parse("""0.37.0"""): return # Step 1: freeze all parameters snake_case__ = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=UpperCamelCase__) self.assertEqual(set(model.hf_device_map.values()) , {torch.cuda.current_device()}) for param in model.parameters(): snake_case__ = False # freeze the model - train adapters later if param.ndim == 1: # cast the small parameters (e.g. layernorm) to fp32 for stability snake_case__ = param.data.to(torch.floataa) # Step 2: add adapters for _, module in model.named_modules(): if "OPTAttention" in repr(type(UpperCamelCase__)): snake_case__ = LoRALayer(module.q_proj , rank=1_6) snake_case__ = LoRALayer(module.k_proj , rank=1_6) snake_case__ = LoRALayer(module.v_proj , rank=1_6) # Step 3: dummy batch snake_case__ = self.tokenizer("""Test batch """ , return_tensors="""pt""").to(0) # Step 4: Check if the gradient is not None with torch.cuda.amp.autocast(): snake_case__ = model.forward(**UpperCamelCase__) out.logits.norm().backward() for module in model.modules(): if isinstance(UpperCamelCase__ , UpperCamelCase__): self.assertTrue(module.adapter[1].weight.grad is not None) self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0) elif isinstance(UpperCamelCase__ , nn.Embedding): self.assertTrue(module.weight.grad is None) class _lowerCAmelCase ( lowercase_ ): """simple docstring""" _lowercase : List[Any] = '''gpt2-xl''' _lowercase : Any = 3.3191_8548_5415_2187
654
0
'''simple docstring''' from __future__ import annotations import unittest from transformers import XGLMConfig, XGLMTokenizer, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers.models.xglm.modeling_tf_xglm import ( TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXGLMForCausalLM, TFXGLMModel, ) @require_tf class __SCREAMING_SNAKE_CASE : lowerCamelCase_ = XGLMConfig lowerCamelCase_ = {} lowerCamelCase_ = '''gelu''' def __init__( self : str , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Any=14 , UpperCAmelCase__ : str=7 , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : Tuple=True , UpperCAmelCase__ : Tuple=True , UpperCAmelCase__ : Tuple=99 , UpperCAmelCase__ : Union[str, Any]=32 , UpperCAmelCase__ : List[Any]=2 , UpperCAmelCase__ : Union[str, Any]=4 , UpperCAmelCase__ : Any=37 , UpperCAmelCase__ : Union[str, Any]="gelu" , UpperCAmelCase__ : List[Any]=0.1 , UpperCAmelCase__ : List[str]=0.1 , UpperCAmelCase__ : Any=512 , UpperCAmelCase__ : List[str]=0.02 , ): '''simple docstring''' lowercase : Tuple =parent lowercase : int =batch_size lowercase : Optional[Any] =seq_length lowercase : List[str] =is_training lowercase : Dict =use_input_mask lowercase : str =use_labels lowercase : List[str] =vocab_size lowercase : Optional[int] =d_model lowercase : Union[str, Any] =num_hidden_layers lowercase : Optional[Any] =num_attention_heads lowercase : List[Any] =ffn_dim lowercase : List[str] =activation_function lowercase : int =activation_dropout lowercase : Any =attention_dropout lowercase : str =max_position_embeddings lowercase : Union[str, Any] =initializer_range lowercase : List[Any] =None lowercase : Tuple =0 lowercase : List[str] =2 lowercase : str =1 def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' return XGLMConfig.from_pretrained('''facebook/xglm-564M''' ) def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' lowercase : Tuple =tf.clip_by_value( ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 ) lowercase : int =None if self.use_input_mask: lowercase : Dict =random_attention_mask([self.batch_size, self.seq_length] ) lowercase : Dict =self.get_config() lowercase : Dict =floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 ) return ( config, input_ids, input_mask, head_mask, ) def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' return XGLMConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=UpperCamelCase__ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=UpperCamelCase__ , ) def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' lowercase : Optional[Any] =self.prepare_config_and_inputs() ( ( lowercase ) , ( lowercase ) , ( lowercase ) , ( lowercase ) , ) : Dict =config_and_inputs lowercase : Dict ={ '''input_ids''': input_ids, '''head_mask''': head_mask, } return config, inputs_dict @require_tf class __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , unittest.TestCase ): lowerCamelCase_ = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else () lowerCamelCase_ = (TFXGLMForCausalLM,) if is_tf_available() else () lowerCamelCase_ = ( {'''feature-extraction''': TFXGLMModel, '''text-generation''': TFXGLMForCausalLM} if is_tf_available() else {} ) lowerCamelCase_ = False lowerCamelCase_ = False lowerCamelCase_ = False def lowerCamelCase_ ( self : int ): '''simple docstring''' lowercase : Optional[Any] =TFXGLMModelTester(self ) lowercase : Optional[int] =ConfigTester(self , config_class=UpperCamelCase__ , n_embd=37 ) def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' self.config_tester.run_common_tests() @slow def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase : int =TFXGLMModel.from_pretrained(UpperCamelCase__ ) self.assertIsNotNone(UpperCamelCase__ ) @unittest.skip(reason='''Currently, model embeddings are going to undergo a major refactor.''' ) def lowerCamelCase_ ( self : Optional[int] ): '''simple docstring''' super().test_resize_token_embeddings() @require_tf class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): @slow def lowerCamelCase_ ( self : int , UpperCAmelCase__ : List[Any]=True ): '''simple docstring''' lowercase : Optional[int] =TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''' ) lowercase : Any =tf.convert_to_tensor([[2, 268, 9865]] , dtype=tf.intaa ) # The dog # </s> The dog is a very friendly dog. He is very affectionate and loves to play with other # fmt: off lowercase : Any =[2, 268, 9865, 67, 11, 1988, 57252, 9865, 5, 984, 67, 1988, 213838, 1658, 53, 70446, 33, 6657, 278, 1581] # fmt: on lowercase : Dict =model.generate(UpperCamelCase__ , do_sample=UpperCamelCase__ , num_beams=1 ) if verify_outputs: self.assertListEqual(output_ids[0].numpy().tolist() , UpperCamelCase__ ) @slow def lowerCamelCase_ ( self : Any ): '''simple docstring''' lowercase : List[str] =XGLMTokenizer.from_pretrained('''facebook/xglm-564M''' ) lowercase : Optional[int] =TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''' ) tf.random.set_seed(0 ) lowercase : Optional[Any] =tokenizer('''Today is a nice day and''' , return_tensors='''tf''' ) lowercase : Optional[Any] =tokenized.input_ids # forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices) with tf.device(''':/CPU:0''' ): lowercase : List[str] =model.generate(UpperCamelCase__ , do_sample=UpperCamelCase__ , seed=[7, 0] ) lowercase : Union[str, Any] =tokenizer.decode(output_ids[0] , skip_special_tokens=UpperCamelCase__ ) lowercase : List[str] =( '''Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due''' ) self.assertEqual(UpperCamelCase__ , UpperCamelCase__ ) @slow def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' lowercase : Any =TFXGLMForCausalLM.from_pretrained('''facebook/xglm-564M''' ) lowercase : Any =XGLMTokenizer.from_pretrained('''facebook/xglm-564M''' ) lowercase : Tuple ='''left''' # use different length sentences to test batching lowercase : Union[str, Any] =[ '''This is an extremelly long sentence that only exists to test the ability of the model to cope with ''' '''left-padding, such as in batched generation. The output for the sequence below should be the same ''' '''regardless of whether left padding is applied or not. When''', '''Hello, my dog is a little''', ] lowercase : Optional[Any] =tokenizer(UpperCamelCase__ , return_tensors='''tf''' , padding=UpperCamelCase__ ) lowercase : Any =inputs['''input_ids'''] lowercase : Dict =model.generate(input_ids=UpperCamelCase__ , attention_mask=inputs['''attention_mask'''] , max_new_tokens=12 ) lowercase : Dict =tokenizer(sentences[0] , return_tensors='''tf''' ).input_ids lowercase : Tuple =model.generate(input_ids=UpperCamelCase__ , max_new_tokens=12 ) lowercase : Tuple =tokenizer(sentences[1] , return_tensors='''tf''' ).input_ids lowercase : Optional[int] =model.generate(input_ids=UpperCamelCase__ , max_new_tokens=12 ) lowercase : str =tokenizer.batch_decode(UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ ) lowercase : Any =tokenizer.decode(output_non_padded[0] , skip_special_tokens=UpperCamelCase__ ) lowercase : Optional[Any] =tokenizer.decode(output_padded[0] , skip_special_tokens=UpperCamelCase__ ) lowercase : int =[ '''This is an extremelly long sentence that only exists to test the ability of the model to cope with ''' '''left-padding, such as in batched generation. The output for the sequence below should be the same ''' '''regardless of whether left padding is applied or not. When left padding is applied, the sequence will be ''' '''a single''', '''Hello, my dog is a little bit of a shy one, but he is very friendly''', ] self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ ) self.assertListEqual(UpperCamelCase__ , [non_padded_sentence, padded_sentence] )
92
import glob import os import random from string import ascii_lowercase, digits import cva a__ = """""" a__ = """""" a__ = """""" a__ = 1 # (0 is vertical, 1 is horizontal) def _UpperCAmelCase ( ): snake_case__ , snake_case__ = get_dataset(a , a ) print("""Processing...""" ) snake_case__ , snake_case__ , snake_case__ = update_image_and_anno(a , a , a ) for index, image in enumerate(a ): # Get random string code: '7b7ad245cdff75241935e4dd860f3bad' snake_case__ = random_chars(32 ) snake_case__ = paths[index].split(os.sep )[-1].rsplit(""".""" , 1 )[0] snake_case__ = F'''{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}''' cva.imwrite(F'''/{file_root}.jpg''' , a , [cva.IMWRITE_JPEG_QUALITY, 85] ) print(F'''Success {index+1}/{len(a )} with {file_name}''' ) snake_case__ = [] for anno in new_annos[index]: snake_case__ = F'''{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}''' annos_list.append(a ) with open(F'''/{file_root}.txt''' , """w""" ) as outfile: outfile.write("""\n""".join(line for line in annos_list ) ) def _UpperCAmelCase ( a : str , a : str ): snake_case__ = [] snake_case__ = [] for label_file in glob.glob(os.path.join(a , """*.txt""" ) ): snake_case__ = label_file.split(os.sep )[-1].rsplit(""".""" , 1 )[0] with open(a ) as in_file: snake_case__ = in_file.readlines() snake_case__ = os.path.join(a , F'''{label_name}.jpg''' ) snake_case__ = [] for obj_list in obj_lists: snake_case__ = obj_list.rstrip("""\n""" ).split(""" """ ) boxes.append( [ int(obj[0] ), float(obj[1] ), float(obj[2] ), float(obj[3] ), float(obj[4] ), ] ) if not boxes: continue img_paths.append(a ) labels.append(a ) return img_paths, labels def _UpperCAmelCase ( a : list , a : list , a : int = 1 ): snake_case__ = [] snake_case__ = [] snake_case__ = [] for idx in range(len(a ) ): snake_case__ = [] snake_case__ = img_list[idx] path_list.append(a ) snake_case__ = anno_list[idx] snake_case__ = cva.imread(a ) if flip_type == 1: snake_case__ = cva.flip(a , a ) for bbox in img_annos: snake_case__ = 1 - bbox[1] new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] ) elif flip_type == 0: snake_case__ = cva.flip(a , a ) for bbox in img_annos: snake_case__ = 1 - bbox[2] new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] ) new_annos_lists.append(a ) new_imgs_list.append(a ) return new_imgs_list, new_annos_lists, path_list def _UpperCAmelCase ( a : int = 32 ): assert number_char > 1, "The number of character should greater than 1" snake_case__ = ascii_lowercase + digits return "".join(random.choice(a ) for _ in range(a ) ) if __name__ == "__main__": main() print("""DONE ✅""")
654
0
'''simple docstring''' from math import ceil from typing import List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor from ...utils import TensorType, logging __lowerCamelCase : Any = logging.get_logger(__name__) class UpperCAmelCase ( lowercase_): """simple docstring""" lowerCAmelCase_ = ['''audio_values''', '''audio_mask'''] def __init__( self : List[str] , UpperCamelCase__ : Dict=2048 , UpperCamelCase__ : List[str]=1 , UpperCamelCase__ : List[str]=[16, 16] , UpperCamelCase__ : Any=128 , UpperCamelCase__ : str=4_4100 , UpperCamelCase__ : Dict=86 , UpperCamelCase__ : str=2048 , UpperCamelCase__ : Optional[int]=0.0 , **UpperCamelCase__ : Any , ) -> str: super().__init__( feature_size=UpperCamelCase__ , sampling_rate=UpperCamelCase__ , padding_value=UpperCamelCase__ , **UpperCamelCase__ , ) _UpperCamelCase =spectrogram_length _UpperCamelCase =num_channels _UpperCamelCase =patch_size _UpperCamelCase =feature_size // self.patch_size[1] _UpperCamelCase =n_fft _UpperCamelCase =sampling_rate // hop_length_to_sampling_rate _UpperCamelCase =sampling_rate _UpperCamelCase =padding_value _UpperCamelCase =mel_filter_bank( num_frequency_bins=1 + n_fft // 2 , num_mel_filters=UpperCamelCase__ , min_frequency=0.0 , max_frequency=2_2050.0 , sampling_rate=UpperCamelCase__ , norm='''slaney''' , mel_scale='''slaney''' , ).T def UpperCamelCase__ ( self : str , UpperCamelCase__ : np.array ) -> List[Any]: _UpperCamelCase =spectrogram( UpperCamelCase__ , window_function(self.n_fft , '''hann''' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel='''dB''' , db_range=80.0 , ) _UpperCamelCase =log_spec[:, :-1] _UpperCamelCase =log_spec - 20.0 _UpperCamelCase =np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0 return log_spec def __call__( self : Dict , UpperCamelCase__ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , UpperCamelCase__ : Optional[bool] = True , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , **UpperCamelCase__ : int , ) -> List[str]: if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( '''This feature extractor is set to support sampling rate''' F''' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled''' F''' with {self.sampling_rate} and not {sampling_rate}.''' ) else: logger.warning( '''It is strongly recommended to pass the `sampling_rate` argument to this function. ''' '''Failing to do so can result in silent errors that might be hard to debug.''' ) _UpperCamelCase =isinstance(UpperCamelCase__ , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' ) _UpperCamelCase =is_batched_numpy or ( isinstance(UpperCamelCase__ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: _UpperCamelCase =[np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech] elif not is_batched and not isinstance(UpperCamelCase__ , np.ndarray ): _UpperCamelCase =np.asarray(UpperCamelCase__ , dtype=np.floataa ) elif isinstance(UpperCamelCase__ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): _UpperCamelCase =raw_speech.astype(np.floataa ) # always return batch if not is_batched: _UpperCamelCase =[np.asarray([raw_speech] ).T] # Convert audio signals to log mel spectrograms, truncate by time axis _UpperCamelCase =[ self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech ] if isinstance(audio_features[0] , UpperCamelCase__ ): _UpperCamelCase =[np.asarray(UpperCamelCase__ , dtype=np.floataa ) for feature in audio_features] # Create audio attention mask _UpperCamelCase =max( [ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch if return_attention_mask: _UpperCamelCase =[ (ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1] + (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0] for feature in audio_features ] _UpperCamelCase =np.array(UpperCamelCase__ ).astype(np.floataa ) # convert into correct format for padding _UpperCamelCase =max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch _UpperCamelCase =np.ones([len(UpperCamelCase__ ), 1, max_time_len, self.feature_size] ).astype(np.floataa ) _UpperCamelCase =padded_audio_features * self.padding_value for i in range(len(UpperCamelCase__ ) ): _UpperCamelCase =audio_features[i] _UpperCamelCase =feature # return as BatchFeature if return_attention_mask: _UpperCamelCase ={'''audio_values''': padded_audio_features, '''audio_mask''': audio_mask} else: _UpperCamelCase ={'''audio_values''': padded_audio_features} _UpperCamelCase =BatchFeature(data=UpperCamelCase__ , tensor_type=UpperCamelCase__ ) return encoded_inputs
404
import json import os import tempfile import transformers import datasets from utils import generate_example_dataset, get_duration a__ = 5_0_0_0_0_0 a__ , a__ = os.path.split(__file__) a__ = os.path.join(RESULTS_BASEPATH, """results""", RESULTS_FILENAME.replace(""".py""", """.json""")) @get_duration def _UpperCAmelCase ( a : datasets.Dataset , **a : Tuple ): snake_case__ = dataset.map(**a ) @get_duration def _UpperCAmelCase ( a : datasets.Dataset , **a : Optional[Any] ): snake_case__ = dataset.filter(**a ) def _UpperCAmelCase ( ): snake_case__ = {"""num examples""": SPEED_TEST_N_EXAMPLES} with tempfile.TemporaryDirectory() as tmp_dir: snake_case__ = datasets.Features({"""text""": datasets.Value("""string""" ), """numbers""": datasets.Value("""float32""" )} ) snake_case__ = generate_example_dataset( os.path.join(a , """dataset.arrow""" ) , a , num_examples=a ) snake_case__ = transformers.AutoTokenizer.from_pretrained("""bert-base-cased""" , use_fast=a ) def tokenize(a : Union[str, Any] ): return tokenizer(examples["""text"""] ) snake_case__ = map(a ) snake_case__ = map(a , batched=a ) snake_case__ = map(a , function=lambda a : None , batched=a ) with dataset.formatted_as(type="""numpy""" ): snake_case__ = map(a , function=lambda a : None , batched=a ) with dataset.formatted_as(type="""pandas""" ): snake_case__ = map(a , function=lambda a : None , batched=a ) with dataset.formatted_as(type="""torch""" , columns="""numbers""" ): snake_case__ = map(a , function=lambda a : None , batched=a ) with dataset.formatted_as(type="""tensorflow""" , columns="""numbers""" ): snake_case__ = map(a , function=lambda a : None , batched=a ) snake_case__ = map(a , function=a , batched=a ) snake_case__ = filter(a ) # Activate later when tokenizer support batched inputs # with dataset.formatted_as(type='numpy'): # times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True) with open(a , """wb""" ) as f: f.write(json.dumps(a ).encode("""utf-8""" ) ) if __name__ == "__main__": # useful to run the profiler benchmark_map_filter()
654
0
import argparse import hashlib import os import urllib import warnings import torch from torch import nn from tqdm import tqdm from transformers import WhisperConfig, WhisperForConditionalGeneration __snake_case : List[str] ={ 'tiny.en': 'https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt', 'tiny': 'https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt', 'base.en': 'https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt', 'base': 'https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt', 'small.en': 'https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt', 'small': 'https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt', 'medium.en': 'https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt', 'medium': 'https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt', 'large': 'https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt', 'large-v2': 'https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt', } def lowerCAmelCase__ ( lowerCamelCase_ : Any): '''simple docstring''' lowerCAmelCase__ : Dict = ['''layers''', '''blocks'''] for k in ignore_keys: state_dict.pop(lowerCamelCase_ ,lowerCamelCase_) __snake_case : Union[str, Any] ={ 'blocks': 'layers', 'mlp.0': 'fc1', 'mlp.2': 'fc2', 'mlp_ln': 'final_layer_norm', '.attn.query': '.self_attn.q_proj', '.attn.key': '.self_attn.k_proj', '.attn.value': '.self_attn.v_proj', '.attn_ln': '.self_attn_layer_norm', '.attn.out': '.self_attn.out_proj', '.cross_attn.query': '.encoder_attn.q_proj', '.cross_attn.key': '.encoder_attn.k_proj', '.cross_attn.value': '.encoder_attn.v_proj', '.cross_attn_ln': '.encoder_attn_layer_norm', '.cross_attn.out': '.encoder_attn.out_proj', 'decoder.ln.': 'decoder.layer_norm.', 'encoder.ln.': 'encoder.layer_norm.', 'token_embedding': 'embed_tokens', 'encoder.positional_embedding': 'encoder.embed_positions.weight', 'decoder.positional_embedding': 'decoder.embed_positions.weight', 'ln_post': 'layer_norm', } def lowerCAmelCase__ ( lowerCamelCase_ : Optional[int]): '''simple docstring''' lowerCAmelCase__ : Any = list(s_dict.keys()) for key in keys: lowerCAmelCase__ : List[str] = key for k, v in WHISPER_MAPPING.items(): if k in key: lowerCAmelCase__ : List[str] = new_key.replace(lowerCamelCase_ ,lowerCamelCase_) print(f"""{key} -> {new_key}""") lowerCAmelCase__ : List[Any] = s_dict.pop(lowerCamelCase_) return s_dict def lowerCAmelCase__ ( lowerCamelCase_ : str): '''simple docstring''' lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = emb.weight.shape lowerCAmelCase__ : str = nn.Linear(lowerCamelCase_ ,lowerCamelCase_ ,bias=lowerCamelCase_) lowerCAmelCase__ : Any = emb.weight.data return lin_layer def lowerCAmelCase__ ( lowerCamelCase_ : str ,lowerCamelCase_ : str): '''simple docstring''' os.makedirs(lowerCamelCase_ ,exist_ok=lowerCamelCase_) lowerCAmelCase__ : int = os.path.basename(lowerCamelCase_) lowerCAmelCase__ : Tuple = url.split('''/''')[-2] lowerCAmelCase__ : Optional[Any] = os.path.join(lowerCamelCase_ ,lowerCamelCase_) if os.path.exists(lowerCamelCase_) and not os.path.isfile(lowerCamelCase_): raise RuntimeError(f"""{download_target} exists and is not a regular file""") if os.path.isfile(lowerCamelCase_): lowerCAmelCase__ : int = open(lowerCamelCase_ ,'''rb''').read() if hashlib.shaaaa(lowerCamelCase_).hexdigest() == expected_shaaaa: return model_bytes else: warnings.warn(f"""{download_target} exists, but the SHA256 checksum does not match; re-downloading the file""") with urllib.request.urlopen(lowerCamelCase_) as source, open(lowerCamelCase_ ,'''wb''') as output: with tqdm( total=int(source.info().get('''Content-Length''')) ,ncols=80 ,unit='''iB''' ,unit_scale=lowerCamelCase_ ,unit_divisor=1024) as loop: while True: lowerCAmelCase__ : Optional[Any] = source.read(8192) if not buffer: break output.write(lowerCamelCase_) loop.update(len(lowerCamelCase_)) lowerCAmelCase__ : Optional[int] = open(lowerCamelCase_ ,'''rb''').read() if hashlib.shaaaa(lowerCamelCase_).hexdigest() != expected_shaaaa: raise RuntimeError( '''Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model.''') return model_bytes def lowerCAmelCase__ ( lowerCamelCase_ : str ,lowerCamelCase_ : Any): '''simple docstring''' if ".pt" not in checkpoint_path: lowerCAmelCase__ : Optional[Any] = _download(_MODELS[checkpoint_path]) else: lowerCAmelCase__ : str = torch.load(lowerCamelCase_ ,map_location='''cpu''') lowerCAmelCase__ : Optional[int] = original_checkpoint['''dims'''] lowerCAmelCase__ : int = original_checkpoint['''model_state_dict'''] lowerCAmelCase__ : Dict = state_dict['''decoder.token_embedding.weight'''] remove_ignore_keys_(lowerCamelCase_) rename_keys(lowerCamelCase_) lowerCAmelCase__ : List[Any] = True lowerCAmelCase__ : List[Any] = state_dict['''decoder.layers.0.fc1.weight'''].shape[0] lowerCAmelCase__ : List[Any] = WhisperConfig( vocab_size=dimensions['''n_vocab'''] ,encoder_ffn_dim=lowerCamelCase_ ,decoder_ffn_dim=lowerCamelCase_ ,num_mel_bins=dimensions['''n_mels'''] ,d_model=dimensions['''n_audio_state'''] ,max_target_positions=dimensions['''n_text_ctx'''] ,encoder_layers=dimensions['''n_audio_layer'''] ,encoder_attention_heads=dimensions['''n_audio_head'''] ,decoder_layers=dimensions['''n_text_layer'''] ,decoder_attention_heads=dimensions['''n_text_state'''] ,max_source_positions=dimensions['''n_audio_ctx'''] ,) lowerCAmelCase__ : Any = WhisperForConditionalGeneration(lowerCamelCase_) lowerCAmelCase__ , lowerCAmelCase__ : Tuple = model.model.load_state_dict(lowerCamelCase_ ,strict=lowerCamelCase_) if len(lowerCamelCase_) > 0 and not set(lowerCamelCase_) <= { "encoder.embed_positions.weights", "decoder.embed_positions.weights", }: raise ValueError( '''Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,''' f""" but all the following weights are missing {missing}""") if tie_embeds: lowerCAmelCase__ : Optional[int] = make_linear_from_emb(model.model.decoder.embed_tokens) else: lowerCAmelCase__ : Dict = proj_out_weights model.save_pretrained(lowerCamelCase_) if __name__ == "__main__": __snake_case : List[str] =argparse.ArgumentParser() # # Required parameters parser.add_argument('--checkpoint_path', type=str, help='Patht to the downloaded checkpoints') parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') __snake_case : List[Any] =parser.parse_args() convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
647
import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor from transformers.utils import logging logging.set_verbosity_info() a__ = logging.get_logger(__name__) def _UpperCAmelCase ( a : List[str] , a : Any=False ): snake_case__ = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((F'''blocks.{i}.norm1.weight''', F'''deit.encoder.layer.{i}.layernorm_before.weight''') ) rename_keys.append((F'''blocks.{i}.norm1.bias''', F'''deit.encoder.layer.{i}.layernorm_before.bias''') ) rename_keys.append((F'''blocks.{i}.attn.proj.weight''', F'''deit.encoder.layer.{i}.attention.output.dense.weight''') ) rename_keys.append((F'''blocks.{i}.attn.proj.bias''', F'''deit.encoder.layer.{i}.attention.output.dense.bias''') ) rename_keys.append((F'''blocks.{i}.norm2.weight''', F'''deit.encoder.layer.{i}.layernorm_after.weight''') ) rename_keys.append((F'''blocks.{i}.norm2.bias''', F'''deit.encoder.layer.{i}.layernorm_after.bias''') ) rename_keys.append((F'''blocks.{i}.mlp.fc1.weight''', F'''deit.encoder.layer.{i}.intermediate.dense.weight''') ) rename_keys.append((F'''blocks.{i}.mlp.fc1.bias''', F'''deit.encoder.layer.{i}.intermediate.dense.bias''') ) rename_keys.append((F'''blocks.{i}.mlp.fc2.weight''', F'''deit.encoder.layer.{i}.output.dense.weight''') ) rename_keys.append((F'''blocks.{i}.mlp.fc2.bias''', F'''deit.encoder.layer.{i}.output.dense.bias''') ) # projection layer + position embeddings rename_keys.extend( [ ("""cls_token""", """deit.embeddings.cls_token"""), ("""dist_token""", """deit.embeddings.distillation_token"""), ("""patch_embed.proj.weight""", """deit.embeddings.patch_embeddings.projection.weight"""), ("""patch_embed.proj.bias""", """deit.embeddings.patch_embeddings.projection.bias"""), ("""pos_embed""", """deit.embeddings.position_embeddings"""), ] ) if base_model: # layernorm + pooler rename_keys.extend( [ ("""norm.weight""", """layernorm.weight"""), ("""norm.bias""", """layernorm.bias"""), ("""pre_logits.fc.weight""", """pooler.dense.weight"""), ("""pre_logits.fc.bias""", """pooler.dense.bias"""), ] ) # if just the base model, we should remove "deit" from all keys that start with "deit" snake_case__ = [(pair[0], pair[1][4:]) if pair[1].startswith("""deit""" ) else pair for pair in rename_keys] else: # layernorm + classification heads rename_keys.extend( [ ("""norm.weight""", """deit.layernorm.weight"""), ("""norm.bias""", """deit.layernorm.bias"""), ("""head.weight""", """cls_classifier.weight"""), ("""head.bias""", """cls_classifier.bias"""), ("""head_dist.weight""", """distillation_classifier.weight"""), ("""head_dist.bias""", """distillation_classifier.bias"""), ] ) return rename_keys def _UpperCAmelCase ( a : int , a : List[Any] , a : Union[str, Any]=False ): for i in range(config.num_hidden_layers ): if base_model: snake_case__ = """""" else: snake_case__ = """deit.""" # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) snake_case__ = state_dict.pop(F'''blocks.{i}.attn.qkv.weight''' ) snake_case__ = state_dict.pop(F'''blocks.{i}.attn.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict snake_case__ = in_proj_weight[ : config.hidden_size, : ] snake_case__ = in_proj_bias[: config.hidden_size] snake_case__ = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] snake_case__ = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] snake_case__ = in_proj_weight[ -config.hidden_size :, : ] snake_case__ = in_proj_bias[-config.hidden_size :] def _UpperCAmelCase ( a : Dict , a : Union[str, Any] , a : int ): snake_case__ = dct.pop(a ) snake_case__ = val def _UpperCAmelCase ( ): snake_case__ = """http://images.cocodataset.org/val2017/000000039769.jpg""" snake_case__ = Image.open(requests.get(a , stream=a ).raw ) return im @torch.no_grad() def _UpperCAmelCase ( a : List[str] , a : Tuple ): snake_case__ = DeiTConfig() # all deit models have fine-tuned heads snake_case__ = False # dataset (fine-tuned on ImageNet 2012), patch_size and image_size snake_case__ = 1000 snake_case__ = """huggingface/label-files""" snake_case__ = """imagenet-1k-id2label.json""" snake_case__ = json.load(open(hf_hub_download(a , a , repo_type="""dataset""" ) , """r""" ) ) snake_case__ = {int(a ): v for k, v in idalabel.items()} snake_case__ = idalabel snake_case__ = {v: k for k, v in idalabel.items()} snake_case__ = int(deit_name[-6:-4] ) snake_case__ = int(deit_name[-3:] ) # size of the architecture if deit_name[9:].startswith("""tiny""" ): snake_case__ = 192 snake_case__ = 768 snake_case__ = 12 snake_case__ = 3 elif deit_name[9:].startswith("""small""" ): snake_case__ = 384 snake_case__ = 1536 snake_case__ = 12 snake_case__ = 6 if deit_name[9:].startswith("""base""" ): pass elif deit_name[4:].startswith("""large""" ): snake_case__ = 1024 snake_case__ = 4096 snake_case__ = 24 snake_case__ = 16 # load original model from timm snake_case__ = timm.create_model(a , pretrained=a ) timm_model.eval() # load state_dict of original model, remove and rename some keys snake_case__ = timm_model.state_dict() snake_case__ = create_rename_keys(a , a ) for src, dest in rename_keys: rename_key(a , a , a ) read_in_q_k_v(a , a , a ) # load HuggingFace model snake_case__ = DeiTForImageClassificationWithTeacher(a ).eval() model.load_state_dict(a ) # Check outputs on an image, prepared by DeiTImageProcessor snake_case__ = int( (256 / 224) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103 snake_case__ = DeiTImageProcessor(size=a , crop_size=config.image_size ) snake_case__ = image_processor(images=prepare_img() , return_tensors="""pt""" ) snake_case__ = encoding["""pixel_values"""] snake_case__ = model(a ) snake_case__ = timm_model(a ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(a , outputs.logits , atol=1e-3 ) Path(a ).mkdir(exist_ok=a ) print(F'''Saving model {deit_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(a ) print(F'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(a ) if __name__ == "__main__": a__ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--deit_name""", default="""vit_deit_base_distilled_patch16_224""", type=str, help="""Name of the DeiT timm model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) a__ = parser.parse_args() convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
654
0
import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging SCREAMING_SNAKE_CASE__ : Any = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ : List[str] = { "microsoft/wavlm-base": "https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json", # See all WavLM models at https://huggingface.co/models?filter=wavlm } class a_ ( lowercase_ ): A = '''wavlm''' def __init__( self , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=768 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=3072 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.0_2 , SCREAMING_SNAKE_CASE=1e-5 , SCREAMING_SNAKE_CASE="group" , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=(512, 512, 512, 512, 512, 512, 512) , SCREAMING_SNAKE_CASE=(5, 2, 2, 2, 2, 2, 2) , SCREAMING_SNAKE_CASE=(10, 3, 3, 3, 3, 2, 2) , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=128 , SCREAMING_SNAKE_CASE=16 , SCREAMING_SNAKE_CASE=320 , SCREAMING_SNAKE_CASE=800 , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=0.0_5 , SCREAMING_SNAKE_CASE=10 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=10 , SCREAMING_SNAKE_CASE=320 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=100 , SCREAMING_SNAKE_CASE=256 , SCREAMING_SNAKE_CASE=256 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE="mean" , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=256 , SCREAMING_SNAKE_CASE=(512, 512, 512, 512, 1500) , SCREAMING_SNAKE_CASE=(5, 3, 3, 1, 1) , SCREAMING_SNAKE_CASE=(1, 2, 3, 1, 1) , SCREAMING_SNAKE_CASE=512 , SCREAMING_SNAKE_CASE=80 , SCREAMING_SNAKE_CASE=0 , SCREAMING_SNAKE_CASE=1 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=None , **SCREAMING_SNAKE_CASE , ) -> List[Any]: """simple docstring""" super().__init__(**UpperCamelCase__ , pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ ) SCREAMING_SNAKE_CASE_ = hidden_size SCREAMING_SNAKE_CASE_ = feat_extract_norm SCREAMING_SNAKE_CASE_ = feat_extract_activation SCREAMING_SNAKE_CASE_ = list(UpperCamelCase__ ) SCREAMING_SNAKE_CASE_ = list(UpperCamelCase__ ) SCREAMING_SNAKE_CASE_ = list(UpperCamelCase__ ) SCREAMING_SNAKE_CASE_ = conv_bias SCREAMING_SNAKE_CASE_ = num_buckets SCREAMING_SNAKE_CASE_ = max_bucket_distance SCREAMING_SNAKE_CASE_ = num_conv_pos_embeddings SCREAMING_SNAKE_CASE_ = num_conv_pos_embedding_groups SCREAMING_SNAKE_CASE_ = len(self.conv_dim ) SCREAMING_SNAKE_CASE_ = num_hidden_layers SCREAMING_SNAKE_CASE_ = intermediate_size SCREAMING_SNAKE_CASE_ = hidden_act SCREAMING_SNAKE_CASE_ = num_attention_heads SCREAMING_SNAKE_CASE_ = hidden_dropout SCREAMING_SNAKE_CASE_ = attention_dropout SCREAMING_SNAKE_CASE_ = activation_dropout SCREAMING_SNAKE_CASE_ = feat_proj_dropout SCREAMING_SNAKE_CASE_ = final_dropout SCREAMING_SNAKE_CASE_ = layerdrop SCREAMING_SNAKE_CASE_ = layer_norm_eps SCREAMING_SNAKE_CASE_ = initializer_range SCREAMING_SNAKE_CASE_ = num_ctc_classes SCREAMING_SNAKE_CASE_ = vocab_size SCREAMING_SNAKE_CASE_ = do_stable_layer_norm SCREAMING_SNAKE_CASE_ = use_weighted_layer_sum SCREAMING_SNAKE_CASE_ = classifier_proj_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( 'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==' ' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =' f' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,' f' `len(config.conv_kernel) = {len(self.conv_kernel )}`.' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 SCREAMING_SNAKE_CASE_ = apply_spec_augment SCREAMING_SNAKE_CASE_ = mask_time_prob SCREAMING_SNAKE_CASE_ = mask_time_length SCREAMING_SNAKE_CASE_ = mask_time_min_masks SCREAMING_SNAKE_CASE_ = mask_feature_prob SCREAMING_SNAKE_CASE_ = mask_feature_length # parameters for pretraining with codevector quantized representations SCREAMING_SNAKE_CASE_ = num_codevectors_per_group SCREAMING_SNAKE_CASE_ = num_codevector_groups SCREAMING_SNAKE_CASE_ = contrastive_logits_temperature SCREAMING_SNAKE_CASE_ = num_negatives SCREAMING_SNAKE_CASE_ = codevector_dim SCREAMING_SNAKE_CASE_ = proj_codevector_dim SCREAMING_SNAKE_CASE_ = diversity_loss_weight # ctc loss SCREAMING_SNAKE_CASE_ = ctc_loss_reduction SCREAMING_SNAKE_CASE_ = ctc_zero_infinity # adapter SCREAMING_SNAKE_CASE_ = add_adapter SCREAMING_SNAKE_CASE_ = adapter_kernel_size SCREAMING_SNAKE_CASE_ = adapter_stride SCREAMING_SNAKE_CASE_ = num_adapter_layers SCREAMING_SNAKE_CASE_ = output_hidden_size or hidden_size # SequenceClassification-specific parameter. Feel free to ignore for other classes. SCREAMING_SNAKE_CASE_ = classifier_proj_size # XVector-specific parameters. Feel free to ignore for other classes. SCREAMING_SNAKE_CASE_ = list(UpperCamelCase__ ) SCREAMING_SNAKE_CASE_ = list(UpperCamelCase__ ) SCREAMING_SNAKE_CASE_ = list(UpperCamelCase__ ) SCREAMING_SNAKE_CASE_ = xvector_output_dim @property def A_( self ) -> Tuple: """simple docstring""" return functools.reduce(operator.mul , self.conv_stride , 1 )
205
from dataclasses import dataclass from typing import Dict, Optional, Union import torch import torch.nn.functional as F from torch import nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from .attention import BasicTransformerBlock from .attention_processor import AttentionProcessor, AttnProcessor from .embeddings import TimestepEmbedding, Timesteps from .modeling_utils import ModelMixin @dataclass class _lowerCAmelCase ( lowercase_ ): """simple docstring""" _lowercase : torch.FloatTensor class _lowerCAmelCase ( lowercase_ , lowercase_ ): """simple docstring""" @register_to_config def __init__( self : Tuple , UpperCamelCase__ : int = 3_2 , UpperCamelCase__ : int = 6_4 , UpperCamelCase__ : int = 2_0 , UpperCamelCase__ : int = 7_6_8 , UpperCamelCase__ : Optional[Any]=7_7 , UpperCamelCase__ : str=4 , UpperCamelCase__ : float = 0.0 , UpperCamelCase__ : str = "silu" , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : Optional[str] = "linear" , UpperCamelCase__ : Optional[str] = "prd" , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[int] = None , ): '''simple docstring''' super().__init__() snake_case__ = num_attention_heads snake_case__ = attention_head_dim snake_case__ = num_attention_heads * attention_head_dim snake_case__ = additional_embeddings snake_case__ = time_embed_dim or inner_dim snake_case__ = embedding_proj_dim or embedding_dim snake_case__ = clip_embed_dim or embedding_dim snake_case__ = Timesteps(UpperCamelCase__ , UpperCamelCase__ , 0) snake_case__ = TimestepEmbedding(UpperCamelCase__ , UpperCamelCase__ , out_dim=UpperCamelCase__ , act_fn=UpperCamelCase__) snake_case__ = nn.Linear(UpperCamelCase__ , UpperCamelCase__) if embedding_proj_norm_type is None: snake_case__ = None elif embedding_proj_norm_type == "layer": snake_case__ = nn.LayerNorm(UpperCamelCase__) else: raise ValueError(F'''unsupported embedding_proj_norm_type: {embedding_proj_norm_type}''') snake_case__ = nn.Linear(UpperCamelCase__ , UpperCamelCase__) if encoder_hid_proj_type is None: snake_case__ = None elif encoder_hid_proj_type == "linear": snake_case__ = nn.Linear(UpperCamelCase__ , UpperCamelCase__) else: raise ValueError(F'''unsupported encoder_hid_proj_type: {encoder_hid_proj_type}''') snake_case__ = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , UpperCamelCase__)) if added_emb_type == "prd": snake_case__ = nn.Parameter(torch.zeros(1 , 1 , UpperCamelCase__)) elif added_emb_type is None: snake_case__ = None else: raise ValueError( F'''`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `\'prd\'` or `None`.''') snake_case__ = nn.ModuleList( [ BasicTransformerBlock( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , dropout=UpperCamelCase__ , activation_fn="""gelu""" , attention_bias=UpperCamelCase__ , ) for d in range(UpperCamelCase__) ]) if norm_in_type == "layer": snake_case__ = nn.LayerNorm(UpperCamelCase__) elif norm_in_type is None: snake_case__ = None else: raise ValueError(F'''Unsupported norm_in_type: {norm_in_type}.''') snake_case__ = nn.LayerNorm(UpperCamelCase__) snake_case__ = nn.Linear(UpperCamelCase__ , UpperCamelCase__) snake_case__ = torch.full( [num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -1_00_00.0) causal_attention_mask.triu_(1) snake_case__ = causal_attention_mask[None, ...] self.register_buffer("""causal_attention_mask""" , UpperCamelCase__ , persistent=UpperCamelCase__) snake_case__ = nn.Parameter(torch.zeros(1 , UpperCamelCase__)) snake_case__ = nn.Parameter(torch.zeros(1 , UpperCamelCase__)) @property # Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors def __magic_name__ ( self : Optional[int]): '''simple docstring''' snake_case__ = {} def fn_recursive_add_processors(UpperCamelCase__ : str , UpperCamelCase__ : torch.nn.Module , UpperCamelCase__ : Dict[str, AttentionProcessor]): if hasattr(UpperCamelCase__ , """set_processor"""): snake_case__ = module.processor for sub_name, child in module.named_children(): fn_recursive_add_processors(F'''{name}.{sub_name}''' , UpperCamelCase__ , UpperCamelCase__) return processors for name, module in self.named_children(): fn_recursive_add_processors(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__) return processors def __magic_name__ ( self : Union[str, Any] , UpperCamelCase__ : Union[AttentionProcessor, Dict[str, AttentionProcessor]]): '''simple docstring''' snake_case__ = len(self.attn_processors.keys()) if isinstance(UpperCamelCase__ , UpperCamelCase__) and len(UpperCamelCase__) != count: raise ValueError( F'''A dict of processors was passed, but the number of processors {len(UpperCamelCase__)} does not match the''' F''' number of attention layers: {count}. Please make sure to pass {count} processor classes.''') def fn_recursive_attn_processor(UpperCamelCase__ : str , UpperCamelCase__ : torch.nn.Module , UpperCamelCase__ : Optional[int]): if hasattr(UpperCamelCase__ , """set_processor"""): if not isinstance(UpperCamelCase__ , UpperCamelCase__): module.set_processor(UpperCamelCase__) else: module.set_processor(processor.pop(F'''{name}.processor''')) for sub_name, child in module.named_children(): fn_recursive_attn_processor(F'''{name}.{sub_name}''' , UpperCamelCase__ , UpperCamelCase__) for name, module in self.named_children(): fn_recursive_attn_processor(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__) def __magic_name__ ( self : Dict): '''simple docstring''' self.set_attn_processor(AttnProcessor()) def __magic_name__ ( self : Union[str, Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : Union[torch.Tensor, float, int] , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : Optional[torch.FloatTensor] = None , UpperCamelCase__ : Optional[torch.BoolTensor] = None , UpperCamelCase__ : bool = True , ): '''simple docstring''' snake_case__ = hidden_states.shape[0] snake_case__ = timestep if not torch.is_tensor(UpperCamelCase__): snake_case__ = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device) elif torch.is_tensor(UpperCamelCase__) and len(timesteps.shape) == 0: snake_case__ = timesteps[None].to(hidden_states.device) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML snake_case__ = timesteps * torch.ones(UpperCamelCase__ , dtype=timesteps.dtype , device=timesteps.device) snake_case__ = self.time_proj(UpperCamelCase__) # timesteps does not contain any weights and will always return f32 tensors # but time_embedding might be fp16, so we need to cast here. snake_case__ = timesteps_projected.to(dtype=self.dtype) snake_case__ = self.time_embedding(UpperCamelCase__) if self.embedding_proj_norm is not None: snake_case__ = self.embedding_proj_norm(UpperCamelCase__) snake_case__ = self.embedding_proj(UpperCamelCase__) if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None: snake_case__ = self.encoder_hidden_states_proj(UpperCamelCase__) elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None: raise ValueError("""`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set""") snake_case__ = self.proj_in(UpperCamelCase__) snake_case__ = self.positional_embedding.to(hidden_states.dtype) snake_case__ = [] snake_case__ = 0 if encoder_hidden_states is not None: additional_embeds.append(UpperCamelCase__) additional_embeddings_len += encoder_hidden_states.shape[1] if len(proj_embeddings.shape) == 2: snake_case__ = proj_embeddings[:, None, :] if len(hidden_states.shape) == 2: snake_case__ = hidden_states[:, None, :] snake_case__ = additional_embeds + [ proj_embeddings, time_embeddings[:, None, :], hidden_states, ] if self.prd_embedding is not None: snake_case__ = self.prd_embedding.to(hidden_states.dtype).expand(UpperCamelCase__ , -1 , -1) additional_embeds.append(UpperCamelCase__) snake_case__ = torch.cat( UpperCamelCase__ , dim=1 , ) # Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens snake_case__ = additional_embeddings_len + proj_embeddings.shape[1] + 1 if positional_embeddings.shape[1] < hidden_states.shape[1]: snake_case__ = F.pad( UpperCamelCase__ , ( 0, 0, additional_embeddings_len, self.prd_embedding.shape[1] if self.prd_embedding is not None else 0, ) , value=0.0 , ) snake_case__ = hidden_states + positional_embeddings if attention_mask is not None: snake_case__ = (1 - attention_mask.to(hidden_states.dtype)) * -1_00_00.0 snake_case__ = F.pad(UpperCamelCase__ , (0, self.additional_embeddings) , value=0.0) snake_case__ = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype) snake_case__ = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0) if self.norm_in is not None: snake_case__ = self.norm_in(UpperCamelCase__) for block in self.transformer_blocks: snake_case__ = block(UpperCamelCase__ , attention_mask=UpperCamelCase__) snake_case__ = self.norm_out(UpperCamelCase__) if self.prd_embedding is not None: snake_case__ = hidden_states[:, -1] else: snake_case__ = hidden_states[:, additional_embeddings_len:] snake_case__ = self.proj_to_clip_embeddings(UpperCamelCase__) if not return_dict: return (predicted_image_embedding,) return PriorTransformerOutput(predicted_image_embedding=UpperCamelCase__) def __magic_name__ ( self : Any , UpperCamelCase__ : Any): '''simple docstring''' snake_case__ = (prior_latents * self.clip_std) + self.clip_mean return prior_latents
654
0
'''simple docstring''' import warnings from ...utils import logging from .image_processing_yolos import YolosImageProcessor lowerCAmelCase__ = logging.get_logger(__name__) class lowercase_ (lowercase_ ): """simple docstring""" def __init__( self : str ,*lowercase__ : Union[str, Any] ,**lowercase__ : List[Any] ): warnings.warn( '''The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please''' ''' use YolosImageProcessor instead.''' ,UpperCamelCase__ ,) super().__init__(*UpperCamelCase__ ,**UpperCamelCase__ )
41
import unittest from pathlib import Path from tempfile import TemporaryDirectory from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available from transformers.models.gpta.tokenization_gpta import GPTaTokenizer from transformers.testing_utils import require_keras_nlp, require_tf, slow if is_tf_available(): import tensorflow as tf if is_keras_nlp_available(): from transformers.models.gpta import TFGPTaTokenizer a__ = ["""gpt2"""] a__ = """gpt2""" if is_tf_available(): class _lowerCAmelCase ( tf.Module ): """simple docstring""" def __init__( self : List[Any] , UpperCamelCase__ : int): '''simple docstring''' super().__init__() snake_case__ = tokenizer snake_case__ = AutoConfig.from_pretrained(UpperCamelCase__) snake_case__ = TFGPTaLMHeadModel.from_config(UpperCamelCase__) @tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name="""text"""),)) def __magic_name__ ( self : Tuple , UpperCamelCase__ : int): '''simple docstring''' snake_case__ = self.tokenizer(UpperCamelCase__) snake_case__ = tokenized["""input_ids"""].to_tensor() snake_case__ = tf.cast(input_ids_dense > 0 , tf.intaa) # input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN]) snake_case__ = self.model(input_ids=UpperCamelCase__ , attention_mask=UpperCamelCase__)["""logits"""] return outputs @require_tf @require_keras_nlp class _lowerCAmelCase ( unittest.TestCase ): """simple docstring""" def __magic_name__ ( self : List[Any]): '''simple docstring''' super().setUp() snake_case__ = [GPTaTokenizer.from_pretrained(UpperCamelCase__) for checkpoint in (TOKENIZER_CHECKPOINTS)] snake_case__ = [TFGPTaTokenizer.from_pretrained(UpperCamelCase__) for checkpoint in TOKENIZER_CHECKPOINTS] assert len(self.tokenizers) == len(self.tf_tokenizers) snake_case__ = [ """This is a straightforward English test sentence.""", """This one has some weird characters\rto\nsee\r\nif those\u00E9break things.""", """Now we're going to add some Chinese: 一 二 三 一二三""", """And some much more rare Chinese: 齉 堃 齉堃""", """Je vais aussi écrire en français pour tester les accents""", """Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ""", ] snake_case__ = list(zip(self.test_sentences , self.test_sentences[::-1])) def __magic_name__ ( self : Optional[int]): '''simple docstring''' for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers): for test_inputs in self.test_sentences: snake_case__ = tokenizer([test_inputs] , return_tensors="""tf""") snake_case__ = tf_tokenizer([test_inputs]) for key in python_outputs.keys(): # convert them to numpy to avoid messing with ragged tensors snake_case__ = python_outputs[key].numpy() snake_case__ = tf_outputs[key].numpy() self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape)) self.assertTrue(tf.reduce_all(tf.cast(UpperCamelCase__ , tf.intaa) == tf_outputs_values)) @slow def __magic_name__ ( self : Optional[int]): '''simple docstring''' for tf_tokenizer in self.tf_tokenizers: snake_case__ = tf.function(UpperCamelCase__) for test_inputs in self.test_sentences: snake_case__ = tf.constant(UpperCamelCase__) snake_case__ = compiled_tokenizer(UpperCamelCase__) snake_case__ = tf_tokenizer(UpperCamelCase__) for key in eager_outputs.keys(): self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key])) @slow def __magic_name__ ( self : Optional[Any]): '''simple docstring''' for tf_tokenizer in self.tf_tokenizers: snake_case__ = ModelToSave(tokenizer=UpperCamelCase__) snake_case__ = tf.convert_to_tensor([self.test_sentences[0]]) snake_case__ = model.serving(UpperCamelCase__) # Build model with some sample inputs with TemporaryDirectory() as tempdir: snake_case__ = Path(UpperCamelCase__) / """saved.model""" tf.saved_model.save(UpperCamelCase__ , UpperCamelCase__ , signatures={"""serving_default""": model.serving}) snake_case__ = tf.saved_model.load(UpperCamelCase__) snake_case__ = loaded_model.signatures["""serving_default"""](UpperCamelCase__)["""output_0"""] # We may see small differences because the loaded model is compiled, so we need an epsilon for the test self.assertTrue(tf.reduce_all(out == loaded_output)) @slow def __magic_name__ ( self : Tuple): '''simple docstring''' for tf_tokenizer in self.tf_tokenizers: snake_case__ = tf.convert_to_tensor([self.test_sentences[0]]) snake_case__ = tf_tokenizer(UpperCamelCase__) # Build model with some sample inputs snake_case__ = tf_tokenizer.get_config() snake_case__ = TFGPTaTokenizer.from_config(UpperCamelCase__) snake_case__ = model_from_config(UpperCamelCase__) for key in from_config_output.keys(): self.assertTrue(tf.reduce_all(from_config_output[key] == out[key])) @slow def __magic_name__ ( self : Dict): '''simple docstring''' for tf_tokenizer in self.tf_tokenizers: # for the test to run snake_case__ = 1_2_3_1_2_3 for max_length in [3, 5, 1_0_2_4]: snake_case__ = tf.convert_to_tensor([self.test_sentences[0]]) snake_case__ = tf_tokenizer(UpperCamelCase__ , max_length=UpperCamelCase__) snake_case__ = out["""input_ids"""].numpy().shape[1] assert out_length == max_length
654
0
"""simple docstring""" _A : Any = 0 # The first color of the flag. _A : int = 1 # The second color of the flag. _A : int = 2 # The third color of the flag. _A : Dict = (red, white, blue) def __magic_name__ ( __snake_case : list ) -> Tuple: if not sequence: return [] if len(__snake_case ) == 1: return list(__snake_case ) lowercase : Tuple = 0 lowercase : Any = len(__snake_case ) - 1 lowercase : Union[str, Any] = 0 while mid <= high: if sequence[mid] == colors[0]: lowercase , lowercase : Union[str, Any] = sequence[mid], sequence[low] low += 1 mid += 1 elif sequence[mid] == colors[1]: mid += 1 elif sequence[mid] == colors[2]: lowercase , lowercase : Tuple = sequence[high], sequence[mid] high -= 1 else: lowercase : Tuple = f"""The elements inside the sequence must contains only {colors} values""" raise ValueError(__snake_case ) return sequence if __name__ == "__main__": import doctest doctest.testmod() _A : Tuple = input("""Enter numbers separated by commas:\n""").strip() _A : Tuple = [int(item.strip()) for item in user_input.split(""",""")] print(F"{dutch_national_flag_sort(unsorted)}")
361
import tempfile import torch from diffusers import IPNDMScheduler from .test_schedulers import SchedulerCommonTest class _lowerCAmelCase ( lowercase_ ): """simple docstring""" _lowercase : int = (IPNDMScheduler,) _lowercase : int = (('''num_inference_steps''', 50),) def __magic_name__ ( self : Any , **UpperCamelCase__ : Tuple): '''simple docstring''' snake_case__ = {"""num_train_timesteps""": 1_0_0_0} config.update(**UpperCamelCase__) return config def __magic_name__ ( self : int , UpperCamelCase__ : Dict=0 , **UpperCamelCase__ : int): '''simple docstring''' snake_case__ = dict(self.forward_default_kwargs) snake_case__ = kwargs.pop("""num_inference_steps""" , UpperCamelCase__) snake_case__ = self.dummy_sample snake_case__ = 0.1 * sample snake_case__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: snake_case__ = self.get_scheduler_config(**UpperCamelCase__) snake_case__ = scheduler_class(**UpperCamelCase__) scheduler.set_timesteps(UpperCamelCase__) # copy over dummy past residuals snake_case__ = dummy_past_residuals[:] if time_step is None: snake_case__ = scheduler.timesteps[len(scheduler.timesteps) // 2] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(UpperCamelCase__) snake_case__ = scheduler_class.from_pretrained(UpperCamelCase__) new_scheduler.set_timesteps(UpperCamelCase__) # copy over dummy past residuals snake_case__ = dummy_past_residuals[:] snake_case__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample snake_case__ = new_scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical" snake_case__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample snake_case__ = new_scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical" def __magic_name__ ( self : List[Any]): '''simple docstring''' pass def __magic_name__ ( self : Tuple , UpperCamelCase__ : Union[str, Any]=0 , **UpperCamelCase__ : Tuple): '''simple docstring''' snake_case__ = dict(self.forward_default_kwargs) snake_case__ = kwargs.pop("""num_inference_steps""" , UpperCamelCase__) snake_case__ = self.dummy_sample snake_case__ = 0.1 * sample snake_case__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: snake_case__ = self.get_scheduler_config() snake_case__ = scheduler_class(**UpperCamelCase__) scheduler.set_timesteps(UpperCamelCase__) # copy over dummy past residuals (must be after setting timesteps) snake_case__ = dummy_past_residuals[:] if time_step is None: snake_case__ = scheduler.timesteps[len(scheduler.timesteps) // 2] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(UpperCamelCase__) snake_case__ = scheduler_class.from_pretrained(UpperCamelCase__) # copy over dummy past residuals new_scheduler.set_timesteps(UpperCamelCase__) # copy over dummy past residual (must be after setting timesteps) snake_case__ = dummy_past_residuals[:] snake_case__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample snake_case__ = new_scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical" snake_case__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample snake_case__ = new_scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical" def __magic_name__ ( self : Union[str, Any] , **UpperCamelCase__ : Dict): '''simple docstring''' snake_case__ = self.scheduler_classes[0] snake_case__ = self.get_scheduler_config(**UpperCamelCase__) snake_case__ = scheduler_class(**UpperCamelCase__) snake_case__ = 1_0 snake_case__ = self.dummy_model() snake_case__ = self.dummy_sample_deter scheduler.set_timesteps(UpperCamelCase__) for i, t in enumerate(scheduler.timesteps): snake_case__ = model(UpperCamelCase__ , UpperCamelCase__) snake_case__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__).prev_sample for i, t in enumerate(scheduler.timesteps): snake_case__ = model(UpperCamelCase__ , UpperCamelCase__) snake_case__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__).prev_sample return sample def __magic_name__ ( self : Optional[int]): '''simple docstring''' snake_case__ = dict(self.forward_default_kwargs) snake_case__ = kwargs.pop("""num_inference_steps""" , UpperCamelCase__) for scheduler_class in self.scheduler_classes: snake_case__ = self.get_scheduler_config() snake_case__ = scheduler_class(**UpperCamelCase__) snake_case__ = self.dummy_sample snake_case__ = 0.1 * sample if num_inference_steps is not None and hasattr(UpperCamelCase__ , """set_timesteps"""): scheduler.set_timesteps(UpperCamelCase__) elif num_inference_steps is not None and not hasattr(UpperCamelCase__ , """set_timesteps"""): snake_case__ = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) snake_case__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] snake_case__ = dummy_past_residuals[:] snake_case__ = scheduler.timesteps[5] snake_case__ = scheduler.timesteps[6] snake_case__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample snake_case__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample self.assertEqual(output_a.shape , sample.shape) self.assertEqual(output_a.shape , output_a.shape) snake_case__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample snake_case__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample self.assertEqual(output_a.shape , sample.shape) self.assertEqual(output_a.shape , output_a.shape) def __magic_name__ ( self : Union[str, Any]): '''simple docstring''' for timesteps in [1_0_0, 1_0_0_0]: self.check_over_configs(num_train_timesteps=UpperCamelCase__ , time_step=UpperCamelCase__) def __magic_name__ ( self : Dict): '''simple docstring''' for t, num_inference_steps in zip([1, 5, 1_0] , [1_0, 5_0, 1_0_0]): self.check_over_forward(num_inference_steps=UpperCamelCase__ , time_step=UpperCamelCase__) def __magic_name__ ( self : List[str]): '''simple docstring''' snake_case__ = self.full_loop() snake_case__ = torch.mean(torch.abs(UpperCamelCase__)) assert abs(result_mean.item() - 2_5_4_0_5_2_9) < 1_0
654
0
'''simple docstring''' import requests from bsa import BeautifulSoup def A_( A : str = "AAPL"): UpperCamelCase = f'''https://in.finance.yahoo.com/quote/{symbol}?s={symbol}''' UpperCamelCase = BeautifulSoup(requests.get(A).text , 'html.parser') UpperCamelCase = 'My(6px) Pos(r) smartphone_Mt(6px)' return soup.find('div' , class_=class_).find('span').text if __name__ == "__main__": for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split(): print(f"""Current {symbol:<4} stock price is {stock_price(symbol):>8}""")
3
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np import torch from ..models.clipseg import CLIPSegForImageSegmentation from ..utils import is_vision_available, requires_backends from .base import PipelineTool if is_vision_available(): from PIL import Image class _lowerCAmelCase ( lowercase_ ): """simple docstring""" _lowercase : Optional[Any] = ( '''This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image.''' '''It takes two arguments named `image` which should be the original image, and `label` which should be a text ''' '''describing the elements what should be identified in the segmentation mask. The tool returns the mask.''' ) _lowercase : Dict = '''CIDAS/clipseg-rd64-refined''' _lowercase : List[Any] = '''image_segmenter''' _lowercase : Tuple = CLIPSegForImageSegmentation _lowercase : str = ['''image''', '''text'''] _lowercase : Dict = ['''image'''] def __init__( self : Optional[int] , *UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : List[Any]): '''simple docstring''' requires_backends(self , ["""vision"""]) super().__init__(*UpperCamelCase__ , **UpperCamelCase__) def __magic_name__ ( self : str , UpperCamelCase__ : "Image" , UpperCamelCase__ : str): '''simple docstring''' return self.pre_processor(text=[label] , images=[image] , padding=UpperCamelCase__ , return_tensors="""pt""") def __magic_name__ ( self : Any , UpperCamelCase__ : Optional[Any]): '''simple docstring''' with torch.no_grad(): snake_case__ = self.model(**UpperCamelCase__).logits return logits def __magic_name__ ( self : Optional[int] , UpperCamelCase__ : Union[str, Any]): '''simple docstring''' snake_case__ = outputs.cpu().detach().numpy() snake_case__ = 0 snake_case__ = 1 return Image.fromarray((array * 2_5_5).astype(np.uinta))
654
0
"""simple docstring""" from __future__ import annotations import unittest from transformers import AutoTokenizer, PegasusConfig, is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel @require_tf class a__ : snake_case__ = PegasusConfig snake_case__ = {} snake_case__ = '''gelu''' def __init__( self : List[str] ,a__ : Dict ,a__ : Optional[int]=13 ,a__ : Any=7 ,a__ : Optional[Any]=True ,a__ : Optional[int]=False ,a__ : int=99 ,a__ : Dict=32 ,a__ : str=2 ,a__ : int=4 ,a__ : Tuple=37 ,a__ : Union[str, Any]=0.1 ,a__ : Any=0.1 ,a__ : str=40 ,a__ : Optional[int]=2 ,a__ : Optional[Any]=1 ,a__ : Dict=0 ,) -> Tuple: """simple docstring""" _lowerCAmelCase:Optional[Any] = parent _lowerCAmelCase:Optional[int] = batch_size _lowerCAmelCase:List[Any] = seq_length _lowerCAmelCase:str = is_training _lowerCAmelCase:str = use_labels _lowerCAmelCase:Optional[Any] = vocab_size _lowerCAmelCase:int = hidden_size _lowerCAmelCase:Dict = num_hidden_layers _lowerCAmelCase:Optional[Any] = num_attention_heads _lowerCAmelCase:Any = intermediate_size _lowerCAmelCase:Dict = hidden_dropout_prob _lowerCAmelCase:Any = attention_probs_dropout_prob _lowerCAmelCase:Any = max_position_embeddings _lowerCAmelCase:str = eos_token_id _lowerCAmelCase:Optional[int] = pad_token_id _lowerCAmelCase:str = bos_token_id def __UpperCamelCase ( self : Optional[Any]) -> Tuple: """simple docstring""" _lowerCAmelCase:Union[str, Any] = ids_tensor([self.batch_size, self.seq_length - 1] ,self.vocab_size) _lowerCAmelCase:int = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size) ,1) _lowerCAmelCase:Any = tf.concat([input_ids, eos_tensor] ,axis=1) _lowerCAmelCase:str = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size) _lowerCAmelCase:Optional[int] = self.config_cls( vocab_size=self.vocab_size ,d_model=self.hidden_size ,encoder_layers=self.num_hidden_layers ,decoder_layers=self.num_hidden_layers ,encoder_attention_heads=self.num_attention_heads ,decoder_attention_heads=self.num_attention_heads ,encoder_ffn_dim=self.intermediate_size ,decoder_ffn_dim=self.intermediate_size ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,eos_token_ids=[2] ,bos_token_id=self.bos_token_id ,pad_token_id=self.pad_token_id ,decoder_start_token_id=self.pad_token_id ,**self.config_updates ,) _lowerCAmelCase:List[Any] = prepare_pegasus_inputs_dict(UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__) return config, inputs_dict def __UpperCamelCase ( self : Any ,a__ : Tuple ,a__ : Union[str, Any]) -> str: """simple docstring""" _lowerCAmelCase:List[Any] = TFPegasusModel(config=UpperCamelCase__).get_decoder() _lowerCAmelCase:Optional[Any] = inputs_dict['''input_ids'''] _lowerCAmelCase:Optional[Any] = input_ids[:1, :] _lowerCAmelCase:List[Any] = inputs_dict['''attention_mask'''][:1, :] _lowerCAmelCase:Dict = inputs_dict['''head_mask'''] _lowerCAmelCase:str = 1 # first forward pass _lowerCAmelCase:Dict = model(UpperCamelCase__ ,attention_mask=UpperCamelCase__ ,head_mask=UpperCamelCase__ ,use_cache=UpperCamelCase__) _lowerCAmelCase , _lowerCAmelCase:Optional[int] = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids _lowerCAmelCase:Tuple = ids_tensor((self.batch_size, 3) ,config.vocab_size) _lowerCAmelCase:List[str] = tf.cast(ids_tensor((self.batch_size, 3) ,2) ,tf.inta) # append to next input_ids and _lowerCAmelCase:Any = tf.concat([input_ids, next_tokens] ,axis=-1) _lowerCAmelCase:List[Any] = tf.concat([attention_mask, next_attn_mask] ,axis=-1) _lowerCAmelCase:Dict = model(UpperCamelCase__ ,attention_mask=UpperCamelCase__)[0] _lowerCAmelCase:Optional[int] = model(UpperCamelCase__ ,attention_mask=UpperCamelCase__ ,past_key_values=UpperCamelCase__)[0] self.parent.assertEqual(next_tokens.shape[1] ,output_from_past.shape[1]) # select random slice _lowerCAmelCase:Union[str, Any] = int(ids_tensor((1,) ,output_from_past.shape[-1])) _lowerCAmelCase:Union[str, Any] = output_from_no_past[:, -3:, random_slice_idx] _lowerCAmelCase:Tuple = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(UpperCamelCase__ ,UpperCamelCase__ ,rtol=1E-3) def UpperCAmelCase ( snake_case : str , snake_case : Union[str, Any] , snake_case : List[str] , snake_case : str=None , snake_case : int=None , snake_case : int=None , snake_case : int=None , snake_case : Optional[int]=None , ): if attention_mask is None: _lowerCAmelCase:Optional[int] = tf.cast(tf.math.not_equal(snake_case , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: _lowerCAmelCase:Any = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: _lowerCAmelCase:Union[str, Any] = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: _lowerCAmelCase:Any = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: _lowerCAmelCase:Dict = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class a__ ( lowercase_ , lowercase_ , unittest.TestCase ): snake_case__ = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else () snake_case__ = (TFPegasusForConditionalGeneration,) if is_tf_available() else () snake_case__ = ( { '''conversational''': TFPegasusForConditionalGeneration, '''feature-extraction''': TFPegasusModel, '''summarization''': TFPegasusForConditionalGeneration, '''text2text-generation''': TFPegasusForConditionalGeneration, '''translation''': TFPegasusForConditionalGeneration, } if is_tf_available() else {} ) snake_case__ = True snake_case__ = False snake_case__ = False def __UpperCamelCase ( self : str) -> Dict: """simple docstring""" _lowerCAmelCase:Tuple = TFPegasusModelTester(self) _lowerCAmelCase:Tuple = ConfigTester(self ,config_class=UpperCamelCase__) def __UpperCamelCase ( self : List[Any]) -> Optional[int]: """simple docstring""" self.config_tester.run_common_tests() def __UpperCamelCase ( self : Optional[int]) -> List[str]: """simple docstring""" _lowerCAmelCase:Any = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*UpperCamelCase__) @require_sentencepiece @require_tokenizers @require_tf class a__ ( unittest.TestCase ): snake_case__ = [ ''' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.''', ''' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ''', ] snake_case__ = [ '''California\'s largest electricity provider has cut power to hundreds of thousands of customers in an effort to''' ''' reduce the risk of wildfires.''', '''N-Dubz have revealed they\'re "grateful" to have been nominated for four Mobo Awards.''', ] # differs slightly from pytorch, likely due to numerical differences in linear layers snake_case__ = '''google/pegasus-xsum''' @cached_property def __UpperCamelCase ( self : Dict) -> List[Any]: """simple docstring""" return AutoTokenizer.from_pretrained(self.model_name) @cached_property def __UpperCamelCase ( self : int) -> Union[str, Any]: """simple docstring""" _lowerCAmelCase:Tuple = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name) return model def __UpperCamelCase ( self : Dict ,**a__ : List[Any]) -> Union[str, Any]: """simple docstring""" _lowerCAmelCase:List[str] = self.translate_src_text(**UpperCamelCase__) assert self.expected_text == generated_words def __UpperCamelCase ( self : str ,**a__ : List[Any]) -> Optional[int]: """simple docstring""" _lowerCAmelCase:Optional[int] = self.tokenizer(self.src_text ,**UpperCamelCase__ ,padding=UpperCamelCase__ ,return_tensors='''tf''') _lowerCAmelCase:Any = self.model.generate( model_inputs.input_ids ,attention_mask=model_inputs.attention_mask ,num_beams=2 ,use_cache=UpperCamelCase__ ,) _lowerCAmelCase:str = self.tokenizer.batch_decode(generated_ids.numpy() ,skip_special_tokens=UpperCamelCase__) return generated_words @slow def __UpperCamelCase ( self : List[str]) -> Any: """simple docstring""" self._assert_generated_batch_equal_expected()
227
import unittest import numpy as np from transformers.testing_utils import require_pytesseract, require_torch from transformers.utils import is_pytesseract_available, is_torch_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_pytesseract_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class _lowerCAmelCase ( unittest.TestCase ): """simple docstring""" def __init__( self : List[str] , UpperCamelCase__ : str , UpperCamelCase__ : Tuple=7 , UpperCamelCase__ : List[Any]=3 , UpperCamelCase__ : Dict=1_8 , UpperCamelCase__ : Any=3_0 , UpperCamelCase__ : List[Any]=4_0_0 , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : Any=None , UpperCamelCase__ : Optional[int]=True , ): '''simple docstring''' snake_case__ = size if size is not None else {"""height""": 1_8, """width""": 1_8} snake_case__ = parent snake_case__ = batch_size snake_case__ = num_channels snake_case__ = image_size snake_case__ = min_resolution snake_case__ = max_resolution snake_case__ = do_resize snake_case__ = size snake_case__ = apply_ocr def __magic_name__ ( self : Optional[Any]): '''simple docstring''' return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr} @require_torch @require_pytesseract class _lowerCAmelCase ( lowercase_ , unittest.TestCase ): """simple docstring""" _lowercase : str = LayoutLMvaImageProcessor if is_pytesseract_available() else None def __magic_name__ ( self : Optional[int]): '''simple docstring''' snake_case__ = LayoutLMvaImageProcessingTester(self) @property def __magic_name__ ( self : Tuple): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def __magic_name__ ( self : List[Any]): '''simple docstring''' snake_case__ = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(UpperCamelCase__ , """do_resize""")) self.assertTrue(hasattr(UpperCamelCase__ , """size""")) self.assertTrue(hasattr(UpperCamelCase__ , """apply_ocr""")) def __magic_name__ ( self : Optional[int]): '''simple docstring''' snake_case__ = self.image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size , {"""height""": 1_8, """width""": 1_8}) snake_case__ = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2) self.assertEqual(image_processor.size , {"""height""": 4_2, """width""": 4_2}) def __magic_name__ ( self : List[str]): '''simple docstring''' pass def __magic_name__ ( self : List[str]): '''simple docstring''' snake_case__ = self.image_processing_class(**self.image_processor_dict) # create random PIL images snake_case__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__) for image in image_inputs: self.assertIsInstance(UpperCamelCase__ , Image.Image) # Test not batched input snake_case__ = image_processing(image_inputs[0] , return_tensors="""pt""") self.assertEqual( encoding.pixel_values.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) self.assertIsInstance(encoding.words , UpperCamelCase__) self.assertIsInstance(encoding.boxes , UpperCamelCase__) # Test batched snake_case__ = image_processing(UpperCamelCase__ , return_tensors="""pt""").pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) def __magic_name__ ( self : List[Any]): '''simple docstring''' snake_case__ = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors snake_case__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ , numpify=UpperCamelCase__) for image in image_inputs: self.assertIsInstance(UpperCamelCase__ , np.ndarray) # Test not batched input snake_case__ = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) # Test batched snake_case__ = image_processing(UpperCamelCase__ , return_tensors="""pt""").pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) def __magic_name__ ( self : Dict): '''simple docstring''' snake_case__ = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors snake_case__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ , torchify=UpperCamelCase__) for image in image_inputs: self.assertIsInstance(UpperCamelCase__ , torch.Tensor) # Test not batched input snake_case__ = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) # Test batched snake_case__ = image_processing(UpperCamelCase__ , return_tensors="""pt""").pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) def __magic_name__ ( self : Any): '''simple docstring''' snake_case__ = LayoutLMvaImageProcessor() from datasets import load_dataset snake_case__ = load_dataset("""hf-internal-testing/fixtures_docvqa""" , split="""test""") snake_case__ = Image.open(ds[0]["""file"""]).convert("""RGB""") snake_case__ = image_processing(UpperCamelCase__ , return_tensors="""pt""") self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_2_4, 2_2_4)) self.assertEqual(len(encoding.words) , len(encoding.boxes)) # fmt: off # the words and boxes were obtained with Tesseract 4.1.1 snake_case__ = [["""11:14""", """to""", """11:39""", """a.m""", """11:39""", """to""", """11:44""", """a.m.""", """11:44""", """a.m.""", """to""", """12:25""", """p.m.""", """12:25""", """to""", """12:58""", """p.m.""", """12:58""", """to""", """4:00""", """p.m.""", """2:00""", """to""", """5:00""", """p.m.""", """Coffee""", """Break""", """Coffee""", """will""", """be""", """served""", """for""", """men""", """and""", """women""", """in""", """the""", """lobby""", """adjacent""", """to""", """exhibit""", """area.""", """Please""", """move""", """into""", """exhibit""", """area.""", """(Exhibits""", """Open)""", """TRRF""", """GENERAL""", """SESSION""", """(PART""", """|)""", """Presiding:""", """Lee""", """A.""", """Waller""", """TRRF""", """Vice""", """President""", """“Introductory""", """Remarks”""", """Lee""", """A.""", """Waller,""", """TRRF""", """Vice""", """Presi-""", """dent""", """Individual""", """Interviews""", """with""", """TRRF""", """Public""", """Board""", """Members""", """and""", """Sci-""", """entific""", """Advisory""", """Council""", """Mem-""", """bers""", """Conducted""", """by""", """TRRF""", """Treasurer""", """Philip""", """G.""", """Kuehn""", """to""", """get""", """answers""", """which""", """the""", """public""", """refrigerated""", """warehousing""", """industry""", """is""", """looking""", """for.""", """Plus""", """questions""", """from""", """the""", """floor.""", """Dr.""", """Emil""", """M.""", """Mrak,""", """University""", """of""", """Cal-""", """ifornia,""", """Chairman,""", """TRRF""", """Board;""", """Sam""", """R.""", """Cecil,""", """University""", """of""", """Georgia""", """College""", """of""", """Agriculture;""", """Dr.""", """Stanley""", """Charm,""", """Tufts""", """University""", """School""", """of""", """Medicine;""", """Dr.""", """Robert""", """H.""", """Cotton,""", """ITT""", """Continental""", """Baking""", """Company;""", """Dr.""", """Owen""", """Fennema,""", """University""", """of""", """Wis-""", """consin;""", """Dr.""", """Robert""", """E.""", """Hardenburg,""", """USDA.""", """Questions""", """and""", """Answers""", """Exhibits""", """Open""", """Capt.""", """Jack""", """Stoney""", """Room""", """TRRF""", """Scientific""", """Advisory""", """Council""", """Meeting""", """Ballroom""", """Foyer"""]] # noqa: E231 snake_case__ = [[[1_4_1, 5_7, 2_1_4, 6_9], [2_2_8, 5_8, 2_5_2, 6_9], [1_4_1, 7_5, 2_1_6, 8_8], [2_3_0, 7_9, 2_8_0, 8_8], [1_4_2, 2_6_0, 2_1_8, 2_7_3], [2_3_0, 2_6_1, 2_5_5, 2_7_3], [1_4_3, 2_7_9, 2_1_8, 2_9_0], [2_3_1, 2_8_2, 2_9_0, 2_9_1], [1_4_3, 3_4_2, 2_1_8, 3_5_4], [2_3_1, 3_4_5, 2_8_9, 3_5_5], [2_0_2, 3_6_2, 2_2_7, 3_7_3], [1_4_3, 3_7_9, 2_2_0, 3_9_2], [2_3_1, 3_8_2, 2_9_1, 3_9_4], [1_4_4, 7_1_4, 2_2_0, 7_2_6], [2_3_1, 7_1_5, 2_5_6, 7_2_6], [1_4_4, 7_3_2, 2_2_0, 7_4_5], [2_3_2, 7_3_6, 2_9_1, 7_4_7], [1_4_4, 7_6_9, 2_1_8, 7_8_2], [2_3_1, 7_7_0, 2_5_6, 7_8_2], [1_4_1, 7_8_8, 2_0_2, 8_0_1], [2_1_5, 7_9_1, 2_7_4, 8_0_4], [1_4_3, 8_2_6, 2_0_4, 8_3_8], [2_1_5, 8_2_6, 2_4_0, 8_3_8], [1_4_2, 8_4_4, 2_0_2, 8_5_7], [2_1_5, 8_4_7, 2_7_4, 8_5_9], [3_3_4, 5_7, 4_2_7, 6_9], [4_4_0, 5_7, 5_2_2, 6_9], [3_6_9, 7_5, 4_6_1, 8_8], [4_6_9, 7_5, 5_1_6, 8_8], [5_2_8, 7_6, 5_6_2, 8_8], [5_7_0, 7_6, 6_6_7, 8_8], [6_7_5, 7_5, 7_1_1, 8_7], [7_2_1, 7_9, 7_7_8, 8_8], [7_8_9, 7_5, 8_4_0, 8_8], [3_6_9, 9_7, 4_7_0, 1_0_7], [4_8_4, 9_4, 5_0_7, 1_0_6], [5_1_8, 9_4, 5_6_2, 1_0_7], [5_7_6, 9_4, 6_5_5, 1_1_0], [6_6_8, 9_4, 7_9_2, 1_0_9], [8_0_4, 9_5, 8_2_9, 1_0_7], [3_6_9, 1_1_3, 4_6_5, 1_2_5], [4_7_7, 1_1_6, 5_4_7, 1_2_5], [5_6_2, 1_1_3, 6_5_8, 1_2_5], [6_7_1, 1_1_6, 7_4_8, 1_2_5], [7_6_1, 1_1_3, 8_1_1, 1_2_5], [3_6_9, 1_3_1, 4_6_5, 1_4_3], [4_7_7, 1_3_3, 5_4_8, 1_4_3], [5_6_3, 1_3_0, 6_9_8, 1_4_5], [7_1_0, 1_3_0, 8_0_2, 1_4_6], [3_3_6, 1_7_1, 4_1_2, 1_8_3], [4_2_3, 1_7_1, 5_7_2, 1_8_3], [5_8_2, 1_7_0, 7_1_6, 1_8_4], [7_2_8, 1_7_1, 8_1_7, 1_8_7], [8_2_9, 1_7_1, 8_4_4, 1_8_6], [3_3_8, 1_9_7, 4_8_2, 2_1_2], [5_0_7, 1_9_6, 5_5_7, 2_0_9], [5_6_9, 1_9_6, 5_9_5, 2_0_8], [6_1_0, 1_9_6, 7_0_2, 2_0_9], [5_0_5, 2_1_4, 5_8_3, 2_2_6], [5_9_5, 2_1_4, 6_5_6, 2_2_7], [6_7_0, 2_1_5, 8_0_7, 2_2_7], [3_3_5, 2_5_9, 5_4_3, 2_7_4], [5_5_6, 2_5_9, 7_0_8, 2_7_2], [3_7_2, 2_7_9, 4_2_2, 2_9_1], [4_3_5, 2_7_9, 4_6_0, 2_9_1], [4_7_4, 2_7_9, 5_7_4, 2_9_2], [5_8_7, 2_7_8, 6_6_4, 2_9_1], [6_7_6, 2_7_8, 7_3_8, 2_9_1], [7_5_1, 2_7_9, 8_3_4, 2_9_1], [3_7_2, 2_9_8, 4_3_4, 3_1_0], [3_3_5, 3_4_1, 4_8_3, 3_5_4], [4_9_7, 3_4_1, 6_5_5, 3_5_4], [6_6_7, 3_4_1, 7_2_8, 3_5_4], [7_4_0, 3_4_1, 8_2_5, 3_5_4], [3_3_5, 3_6_0, 4_3_0, 3_7_2], [4_4_2, 3_6_0, 5_3_4, 3_7_2], [5_4_5, 3_5_9, 6_8_7, 3_7_2], [6_9_7, 3_6_0, 7_5_4, 3_7_2], [7_6_5, 3_6_0, 8_2_3, 3_7_3], [3_3_4, 3_7_8, 4_2_8, 3_9_1], [4_4_0, 3_7_8, 5_7_7, 3_9_4], [5_9_0, 3_7_8, 7_0_5, 3_9_1], [7_2_0, 3_7_8, 8_0_1, 3_9_1], [3_3_4, 3_9_7, 4_0_0, 4_0_9], [3_7_0, 4_1_6, 5_2_9, 4_2_9], [5_4_4, 4_1_6, 5_7_6, 4_3_2], [5_8_7, 4_1_6, 6_6_5, 4_2_8], [6_7_7, 4_1_6, 8_1_4, 4_2_9], [3_7_2, 4_3_5, 4_5_2, 4_5_0], [4_6_5, 4_3_4, 4_9_5, 4_4_7], [5_1_1, 4_3_4, 6_0_0, 4_4_7], [6_1_1, 4_3_6, 6_3_7, 4_4_7], [6_4_9, 4_3_6, 6_9_4, 4_5_1], [7_0_5, 4_3_8, 8_2_4, 4_4_7], [3_6_9, 4_5_3, 4_5_2, 4_6_6], [4_6_4, 4_5_4, 5_0_9, 4_6_6], [5_2_2, 4_5_3, 6_1_1, 4_6_9], [6_2_5, 4_5_3, 7_9_2, 4_6_9], [3_7_0, 4_7_2, 5_5_6, 4_8_8], [5_7_0, 4_7_2, 6_8_4, 4_8_7], [6_9_7, 4_7_2, 7_1_8, 4_8_5], [7_3_2, 4_7_2, 8_3_5, 4_8_8], [3_6_9, 4_9_0, 4_1_1, 5_0_3], [4_2_5, 4_9_0, 4_8_4, 5_0_3], [4_9_6, 4_9_0, 6_3_5, 5_0_6], [6_4_5, 4_9_0, 7_0_7, 5_0_3], [7_1_8, 4_9_1, 7_6_1, 5_0_3], [7_7_1, 4_9_0, 8_4_0, 5_0_3], [3_3_6, 5_1_0, 3_7_4, 5_2_1], [3_8_8, 5_1_0, 4_4_7, 5_2_2], [4_6_0, 5_1_0, 4_8_9, 5_2_1], [5_0_3, 5_1_0, 5_8_0, 5_2_2], [5_9_2, 5_0_9, 7_3_6, 5_2_5], [7_4_5, 5_0_9, 7_7_0, 5_2_2], [7_8_1, 5_0_9, 8_4_0, 5_2_2], [3_3_8, 5_2_8, 4_3_4, 5_4_1], [4_4_8, 5_2_8, 5_9_6, 5_4_1], [6_0_9, 5_2_7, 6_8_7, 5_4_0], [7_0_0, 5_2_8, 7_9_2, 5_4_1], [3_3_6, 5_4_6, 3_9_7, 5_5_9], [4_0_7, 5_4_6, 4_3_1, 5_5_9], [4_4_3, 5_4_6, 5_2_5, 5_6_0], [5_3_7, 5_4_6, 6_8_0, 5_6_2], [6_8_8, 5_4_6, 7_1_4, 5_5_9], [7_2_2, 5_4_6, 8_3_7, 5_6_2], [3_3_6, 5_6_5, 4_4_9, 5_8_1], [4_6_1, 5_6_5, 4_8_5, 5_7_7], [4_9_7, 5_6_5, 6_6_5, 5_8_1], [6_8_1, 5_6_5, 7_1_8, 5_7_7], [7_3_2, 5_6_5, 8_3_7, 5_8_0], [3_3_7, 5_8_4, 4_3_8, 5_9_7], [4_5_2, 5_8_3, 5_2_1, 5_9_6], [5_3_5, 5_8_4, 6_7_7, 5_9_9], [6_9_0, 5_8_3, 7_8_7, 5_9_6], [8_0_1, 5_8_3, 8_2_5, 5_9_6], [3_3_8, 6_0_2, 4_7_8, 6_1_5], [4_9_2, 6_0_2, 5_3_0, 6_1_4], [5_4_3, 6_0_2, 6_3_8, 6_1_5], [6_5_0, 6_0_2, 6_7_6, 6_1_4], [6_8_8, 6_0_2, 7_8_8, 6_1_5], [8_0_2, 6_0_2, 8_4_3, 6_1_4], [3_3_7, 6_2_1, 5_0_2, 6_3_3], [5_1_6, 6_2_1, 6_1_5, 6_3_7], [6_2_9, 6_2_1, 7_7_4, 6_3_6], [7_8_9, 6_2_1, 8_2_7, 6_3_3], [3_3_7, 6_3_9, 4_1_8, 6_5_2], [4_3_2, 6_4_0, 5_7_1, 6_5_3], [5_8_7, 6_3_9, 7_3_1, 6_5_5], [7_4_3, 6_3_9, 7_6_9, 6_5_2], [7_8_0, 6_3_9, 8_4_1, 6_5_2], [3_3_8, 6_5_8, 4_4_0, 6_7_3], [4_5_5, 6_5_8, 4_9_1, 6_7_0], [5_0_8, 6_5_8, 6_0_2, 6_7_1], [6_1_6, 6_5_8, 6_3_8, 6_7_0], [6_5_4, 6_5_8, 8_3_5, 6_7_4], [3_3_7, 6_7_7, 4_2_9, 6_8_9], [3_3_7, 7_1_4, 4_8_2, 7_2_6], [4_9_5, 7_1_4, 5_4_8, 7_2_6], [5_6_1, 7_1_4, 6_8_3, 7_2_6], [3_3_8, 7_7_0, 4_6_1, 7_8_2], [4_7_4, 7_6_9, 5_5_4, 7_8_5], [4_8_9, 7_8_8, 5_6_2, 8_0_3], [5_7_6, 7_8_8, 6_4_3, 8_0_1], [6_5_6, 7_8_7, 7_5_1, 8_0_4], [7_6_4, 7_8_8, 8_4_4, 8_0_1], [3_3_4, 8_2_5, 4_2_1, 8_3_8], [4_3_0, 8_2_4, 5_7_4, 8_3_8], [5_8_4, 8_2_4, 7_2_3, 8_4_1], [3_3_5, 8_4_4, 4_5_0, 8_5_7], [4_6_4, 8_4_3, 5_8_3, 8_6_0], [6_2_8, 8_6_2, 7_5_5, 8_7_5], [7_6_9, 8_6_1, 8_4_8, 8_7_8]]] # noqa: E231 # fmt: on self.assertListEqual(encoding.words , UpperCamelCase__) self.assertListEqual(encoding.boxes , UpperCamelCase__) # with apply_OCR = False snake_case__ = LayoutLMvaImageProcessor(apply_ocr=UpperCamelCase__) snake_case__ = image_processing(UpperCamelCase__ , return_tensors="""pt""") self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_2_4, 2_2_4))
654
0
"""simple docstring""" import numpy as np import torch from torch.utils.data import Dataset from utils import logger class _A ( lowercase_ ): def __init__( self , __lowerCAmelCase , __lowerCAmelCase ): """simple docstring""" lowercase = params lowercase = np.array(UpperCamelCase__ ) lowercase = np.array([len(UpperCamelCase__ ) for t in data] ) self.check() self.remove_long_sequences() self.remove_empty_sequences() self.remove_unknown_sequences() self.check() self.print_statistics() def __getitem__( self , __lowerCAmelCase ): """simple docstring""" return (self.token_ids[index], self.lengths[index]) def __len__( self ): """simple docstring""" return len(self.lengths ) def A__ ( self ): """simple docstring""" assert len(self.token_ids ) == len(self.lengths ) assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) ) def A__ ( self ): """simple docstring""" lowercase = self.params.max_model_input_size lowercase = self.lengths > max_len logger.info(f'Splitting {sum(UpperCamelCase__ )} too long sequences.' ) def divide_chunks(__lowerCAmelCase , __lowerCAmelCase ): return [l[i : i + n] for i in range(0 , len(UpperCamelCase__ ) , UpperCamelCase__ )] lowercase = [] lowercase = [] if self.params.mlm: lowercase , lowercase = self.params.special_tok_ids["""cls_token"""], self.params.special_tok_ids["""sep_token"""] else: lowercase , lowercase = self.params.special_tok_ids["""bos_token"""], self.params.special_tok_ids["""eos_token"""] for seq_, len_ in zip(self.token_ids , self.lengths ): assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_ if len_ <= max_len: new_tok_ids.append(seq_ ) new_lengths.append(len_ ) else: lowercase = [] for sub_s in divide_chunks(seq_ , max_len - 2 ): if sub_s[0] != cls_id: lowercase = np.insert(UpperCamelCase__ , 0 , UpperCamelCase__ ) if sub_s[-1] != sep_id: lowercase = np.insert(UpperCamelCase__ , len(UpperCamelCase__ ) , UpperCamelCase__ ) assert len(UpperCamelCase__ ) <= max_len assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s sub_seqs.append(UpperCamelCase__ ) new_tok_ids.extend(UpperCamelCase__ ) new_lengths.extend([len(UpperCamelCase__ ) for l in sub_seqs] ) lowercase = np.array(UpperCamelCase__ ) lowercase = np.array(UpperCamelCase__ ) def A__ ( self ): """simple docstring""" lowercase = len(self ) lowercase = self.lengths > 11 lowercase = self.token_ids[indices] lowercase = self.lengths[indices] lowercase = len(self ) logger.info(f'Remove {init_size - new_size} too short (<=11 tokens) sequences.' ) def A__ ( self ): """simple docstring""" if "unk_token" not in self.params.special_tok_ids: return else: lowercase = self.params.special_tok_ids["""unk_token"""] lowercase = len(self ) lowercase = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] ) lowercase = (unk_occs / self.lengths) < 0.5 lowercase = self.token_ids[indices] lowercase = self.lengths[indices] lowercase = len(self ) logger.info(f'Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).' ) def A__ ( self ): """simple docstring""" if not self.params.is_master: return logger.info(f'{len(self )} sequences' ) # data_len = sum(self.lengths) # nb_unique_tokens = len(Counter(list(chain(*self.token_ids)))) # logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)') # unk_idx = self.params.special_tok_ids['unk_token'] # nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids]) # logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)') def A__ ( self , __lowerCAmelCase ): """simple docstring""" lowercase = [t[0] for t in batch] lowercase = [t[1] for t in batch] assert len(UpperCamelCase__ ) == len(UpperCamelCase__ ) # Max for paddings lowercase = max(UpperCamelCase__ ) # Pad token ids if self.params.mlm: lowercase = self.params.special_tok_ids["""pad_token"""] else: lowercase = self.params.special_tok_ids["""unk_token"""] lowercase = [list(t.astype(UpperCamelCase__ ) ) + [pad_idx] * (max_seq_len_ - len(UpperCamelCase__ )) for t in token_ids] assert len(tk_ ) == len(UpperCamelCase__ ) assert all(len(UpperCamelCase__ ) == max_seq_len_ for t in tk_ ) lowercase = torch.tensor(tk_ ) # (bs, max_seq_len_) lowercase = torch.tensor(UpperCamelCase__ ) # (bs) return tk_t, lg_t
359
import numpy as np import torch from torch.utils.data import Dataset from utils import logger class _lowerCAmelCase ( lowercase_ ): """simple docstring""" def __init__( self : int , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Union[str, Any]): '''simple docstring''' snake_case__ = params snake_case__ = np.array(UpperCamelCase__) snake_case__ = np.array([len(UpperCamelCase__) for t in data]) self.check() self.remove_long_sequences() self.remove_empty_sequences() self.remove_unknown_sequences() self.check() self.print_statistics() def __getitem__( self : Dict , UpperCamelCase__ : Any): '''simple docstring''' return (self.token_ids[index], self.lengths[index]) def __len__( self : Union[str, Any]): '''simple docstring''' return len(self.lengths) def __magic_name__ ( self : str): '''simple docstring''' assert len(self.token_ids) == len(self.lengths) assert all(self.lengths[i] == len(self.token_ids[i]) for i in range(len(self.lengths))) def __magic_name__ ( self : Optional[int]): '''simple docstring''' snake_case__ = self.params.max_model_input_size snake_case__ = self.lengths > max_len logger.info(F'''Splitting {sum(UpperCamelCase__)} too long sequences.''') def divide_chunks(UpperCamelCase__ : str , UpperCamelCase__ : Tuple): return [l[i : i + n] for i in range(0 , len(UpperCamelCase__) , UpperCamelCase__)] snake_case__ = [] snake_case__ = [] if self.params.mlm: snake_case__ , snake_case__ = self.params.special_tok_ids["""cls_token"""], self.params.special_tok_ids["""sep_token"""] else: snake_case__ , snake_case__ = self.params.special_tok_ids["""bos_token"""], self.params.special_tok_ids["""eos_token"""] for seq_, len_ in zip(self.token_ids , self.lengths): assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_ if len_ <= max_len: new_tok_ids.append(seq_) new_lengths.append(len_) else: snake_case__ = [] for sub_s in divide_chunks(seq_ , max_len - 2): if sub_s[0] != cls_id: snake_case__ = np.insert(UpperCamelCase__ , 0 , UpperCamelCase__) if sub_s[-1] != sep_id: snake_case__ = np.insert(UpperCamelCase__ , len(UpperCamelCase__) , UpperCamelCase__) assert len(UpperCamelCase__) <= max_len assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s sub_seqs.append(UpperCamelCase__) new_tok_ids.extend(UpperCamelCase__) new_lengths.extend([len(UpperCamelCase__) for l in sub_seqs]) snake_case__ = np.array(UpperCamelCase__) snake_case__ = np.array(UpperCamelCase__) def __magic_name__ ( self : Any): '''simple docstring''' snake_case__ = len(self) snake_case__ = self.lengths > 1_1 snake_case__ = self.token_ids[indices] snake_case__ = self.lengths[indices] snake_case__ = len(self) logger.info(F'''Remove {init_size - new_size} too short (<=11 tokens) sequences.''') def __magic_name__ ( self : List[str]): '''simple docstring''' if "unk_token" not in self.params.special_tok_ids: return else: snake_case__ = self.params.special_tok_ids["""unk_token"""] snake_case__ = len(self) snake_case__ = np.array([np.count_nonzero(a == unk_token_id) for a in self.token_ids]) snake_case__ = (unk_occs / self.lengths) < 0.5 snake_case__ = self.token_ids[indices] snake_case__ = self.lengths[indices] snake_case__ = len(self) logger.info(F'''Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).''') def __magic_name__ ( self : Optional[Any]): '''simple docstring''' if not self.params.is_master: return logger.info(F'''{len(self)} sequences''') # data_len = sum(self.lengths) # nb_unique_tokens = len(Counter(list(chain(*self.token_ids)))) # logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)') # unk_idx = self.params.special_tok_ids['unk_token'] # nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids]) # logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)') def __magic_name__ ( self : int , UpperCamelCase__ : Optional[int]): '''simple docstring''' snake_case__ = [t[0] for t in batch] snake_case__ = [t[1] for t in batch] assert len(UpperCamelCase__) == len(UpperCamelCase__) # Max for paddings snake_case__ = max(UpperCamelCase__) # Pad token ids if self.params.mlm: snake_case__ = self.params.special_tok_ids["""pad_token"""] else: snake_case__ = self.params.special_tok_ids["""unk_token"""] snake_case__ = [list(t.astype(UpperCamelCase__)) + [pad_idx] * (max_seq_len_ - len(UpperCamelCase__)) for t in token_ids] assert len(tk_) == len(UpperCamelCase__) assert all(len(UpperCamelCase__) == max_seq_len_ for t in tk_) snake_case__ = torch.tensor(tk_) # (bs, max_seq_len_) snake_case__ = torch.tensor(UpperCamelCase__) # (bs) return tk_t, lg_t
654
0
'''simple docstring''' import os import tempfile import unittest from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter from transformers.testing_utils import slow from transformers.utils import cached_property @unittest.skipUnless(os.path.exists(lowercase_ ) , 'Tatoeba directory does not exist.' ) class UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' @cached_property def UpperCamelCase( self ) -> Dict: '''simple docstring''' lowerCamelCase_ = tempfile.mkdtemp() return TatoebaConverter(save_dir=UpperCamelCase__ ) @slow def UpperCamelCase( self ) -> int: '''simple docstring''' self.resolver.convert_models(['heb-eng'] ) @slow def UpperCamelCase( self ) -> Optional[Any]: '''simple docstring''' lowerCamelCase_ ,lowerCamelCase_ = self.resolver.write_model_card('opus-mt-he-en' , dry_run=UpperCamelCase__ ) assert mmeta["long_pair"] == "heb-eng"
42
import argparse import torch from transformers import YosoConfig, YosoForMaskedLM def _UpperCAmelCase ( a : str ): if "model" in orig_key: snake_case__ = orig_key.replace("""model.""" , """""" ) if "norm1" in orig_key: snake_case__ = orig_key.replace("""norm1""" , """attention.output.LayerNorm""" ) if "norm2" in orig_key: snake_case__ = orig_key.replace("""norm2""" , """output.LayerNorm""" ) if "norm" in orig_key: snake_case__ = orig_key.replace("""norm""" , """LayerNorm""" ) if "transformer" in orig_key: snake_case__ = orig_key.split(""".""" )[0].split("""_""" )[-1] snake_case__ = orig_key.replace(F'''transformer_{layer_num}''' , F'''encoder.layer.{layer_num}''' ) if "mha.attn" in orig_key: snake_case__ = orig_key.replace("""mha.attn""" , """attention.self""" ) if "mha" in orig_key: snake_case__ = orig_key.replace("""mha""" , """attention""" ) if "W_q" in orig_key: snake_case__ = orig_key.replace("""W_q""" , """self.query""" ) if "W_k" in orig_key: snake_case__ = orig_key.replace("""W_k""" , """self.key""" ) if "W_v" in orig_key: snake_case__ = orig_key.replace("""W_v""" , """self.value""" ) if "ff1" in orig_key: snake_case__ = orig_key.replace("""ff1""" , """intermediate.dense""" ) if "ff2" in orig_key: snake_case__ = orig_key.replace("""ff2""" , """output.dense""" ) if "ff" in orig_key: snake_case__ = orig_key.replace("""ff""" , """output.dense""" ) if "mlm_class" in orig_key: snake_case__ = orig_key.replace("""mlm.mlm_class""" , """cls.predictions.decoder""" ) if "mlm" in orig_key: snake_case__ = orig_key.replace("""mlm""" , """cls.predictions.transform""" ) if "cls" not in orig_key: snake_case__ = """yoso.""" + orig_key return orig_key def _UpperCAmelCase ( a : Tuple , a : Dict ): for key in orig_state_dict.copy().keys(): snake_case__ = orig_state_dict.pop(a ) if ("pooler" in key) or ("sen_class" in key): continue else: snake_case__ = val snake_case__ = orig_state_dict["""cls.predictions.decoder.bias"""] snake_case__ = torch.arange(a ).expand((1, -1) ) + 2 return orig_state_dict def _UpperCAmelCase ( a : int , a : List[Any] , a : List[Any] ): snake_case__ = torch.load(a , map_location="""cpu""" )["""model_state_dict"""] snake_case__ = YosoConfig.from_json_file(a ) snake_case__ = YosoForMaskedLM(a ) snake_case__ = convert_checkpoint_helper(config.max_position_embeddings , a ) print(model.load_state_dict(a ) ) model.eval() model.save_pretrained(a ) print(F'''Checkpoint successfuly converted. Model saved at {pytorch_dump_path}''' ) if __name__ == "__main__": a__ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--pytorch_model_path""", default=None, type=str, required=True, help="""Path to YOSO pytorch checkpoint.""" ) parser.add_argument( """--config_file""", default=None, type=str, required=True, help="""The json file for YOSO model config.""", ) parser.add_argument( """--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) a__ = parser.parse_args() convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
654
0
from ....utils import logging a_ : str = logging.get_logger(__name__) class lowerCamelCase__ ( lowercase_): """simple docstring""" def __init__(self , __a , __a=None , __a=20_48 ): '''simple docstring''' lowerCamelCase = config.__dict__ lowerCamelCase = modal_hidden_size if num_labels: lowerCamelCase = num_labels
623
import os from typing import Optional import fsspec from fsspec.archive import AbstractArchiveFileSystem from fsspec.utils import DEFAULT_BLOCK_SIZE class _lowerCAmelCase ( lowercase_ ): """simple docstring""" _lowercase : Optional[int] = '''''' _lowercase : str = ( None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz ) _lowercase : str = None # compression type in fsspec. ex: "gzip" _lowercase : str = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz def __init__( self : List[Any] , UpperCamelCase__ : str = "" , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : Optional[dict] = None , **UpperCamelCase__ : List[Any]): '''simple docstring''' super().__init__(self , **UpperCamelCase__) # always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode snake_case__ = fsspec.open( UpperCamelCase__ , mode="""rb""" , protocol=UpperCamelCase__ , compression=self.compression , client_kwargs={ """requote_redirect_url""": False, # see https://github.com/huggingface/datasets/pull/5459 """trust_env""": True, # Enable reading proxy env variables. **(target_options or {}).pop("""client_kwargs""" , {}), # To avoid issues if it was already passed. } , **(target_options or {}) , ) snake_case__ = os.path.basename(self.file.path.split("""::""")[0]) snake_case__ = ( self.compressed_name[: self.compressed_name.rindex(""".""")] if """.""" in self.compressed_name else self.compressed_name ) snake_case__ = None @classmethod def __magic_name__ ( cls : Union[str, Any] , UpperCamelCase__ : List[Any]): '''simple docstring''' return super()._strip_protocol(UpperCamelCase__).lstrip("""/""") def __magic_name__ ( self : Dict): '''simple docstring''' if self.dir_cache is None: snake_case__ = {**self.file.fs.info(self.file.path), """name""": self.uncompressed_name} snake_case__ = {f["""name"""]: f} def __magic_name__ ( self : Optional[int] , UpperCamelCase__ : str): '''simple docstring''' return self.file.open().read() def __magic_name__ ( self : int , UpperCamelCase__ : str , UpperCamelCase__ : str = "rb" , UpperCamelCase__ : Any=None , UpperCamelCase__ : Union[str, Any]=True , UpperCamelCase__ : Optional[int]=None , **UpperCamelCase__ : Optional[Any] , ): '''simple docstring''' snake_case__ = self._strip_protocol(UpperCamelCase__) if mode != "rb": raise ValueError(F'''Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'''') return self.file.open() class _lowerCAmelCase ( lowercase_ ): """simple docstring""" _lowercase : Dict = '''bz2''' _lowercase : Dict = '''bz2''' _lowercase : Optional[int] = '''.bz2''' class _lowerCAmelCase ( lowercase_ ): """simple docstring""" _lowercase : Dict = '''gzip''' _lowercase : List[str] = '''gzip''' _lowercase : Any = '''.gz''' class _lowerCAmelCase ( lowercase_ ): """simple docstring""" _lowercase : str = '''lz4''' _lowercase : List[Any] = '''lz4''' _lowercase : Dict = '''.lz4''' class _lowerCAmelCase ( lowercase_ ): """simple docstring""" _lowercase : Optional[int] = '''xz''' _lowercase : Union[str, Any] = '''xz''' _lowercase : Optional[int] = '''.xz''' class _lowerCAmelCase ( lowercase_ ): """simple docstring""" _lowercase : Optional[int] = '''zstd''' _lowercase : Tuple = '''zstd''' _lowercase : Union[str, Any] = '''.zst''' def __init__( self : str , UpperCamelCase__ : str , UpperCamelCase__ : str = "rb" , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : Optional[dict] = None , UpperCamelCase__ : int = DEFAULT_BLOCK_SIZE , **UpperCamelCase__ : int , ): '''simple docstring''' super().__init__( fo=UpperCamelCase__ , mode=UpperCamelCase__ , target_protocol=UpperCamelCase__ , target_options=UpperCamelCase__ , block_size=UpperCamelCase__ , **UpperCamelCase__ , ) # We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2: # # File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open # out.close = close # AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only # # see https://github.com/intake/filesystem_spec/issues/725 snake_case__ = self.file.__enter__ class _lowerCAmelCase : """simple docstring""" def __init__( self : Tuple , UpperCamelCase__ : str): '''simple docstring''' snake_case__ = file_ def __enter__( self : List[str]): '''simple docstring''' self._file.__enter__() return self def __exit__( self : Dict , *UpperCamelCase__ : str , **UpperCamelCase__ : Optional[Any]): '''simple docstring''' self._file.__exit__(*UpperCamelCase__ , **UpperCamelCase__) def __iter__( self : Any): '''simple docstring''' return iter(self._file) def __magic_name__ ( self : List[str]): '''simple docstring''' return next(self._file) def __getattr__( self : Any , UpperCamelCase__ : int): '''simple docstring''' return getattr(self._file , UpperCamelCase__) def fixed_enter(*UpperCamelCase__ : int , **UpperCamelCase__ : int): return WrappedFile(_enter(*UpperCamelCase__ , **UpperCamelCase__)) snake_case__ = fixed_enter
654
0
'''simple docstring''' import json import re from typing import TYPE_CHECKING, List, Optional, Tuple, Union import numpy as np from ...utils import is_tf_available, is_torch_available, logging if TYPE_CHECKING: if is_torch_available(): import torch if is_tf_available(): import tensorflow as tf from tokenizers import pre_tokenizers from ...tokenization_utils_base import BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from .tokenization_codegen import CodeGenTokenizer UpperCamelCase_ = logging.get_logger(__name__) UpperCamelCase_ = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""} UpperCamelCase_ = { """vocab_file""": { """Salesforce/codegen-350M-mono""": """https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/vocab.json""", }, """merges_file""": { """Salesforce/codegen-350M-mono""": """https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/merges.txt""", }, """tokenizer_file""": { """Salesforce/codegen-350M-mono""": ( """https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/tokenizer.json""" ), }, } UpperCamelCase_ = { """Salesforce/codegen-350M-mono""": 2048, } class __SCREAMING_SNAKE_CASE ( lowercase_ ): lowerCamelCase_ = VOCAB_FILES_NAMES lowerCamelCase_ = PRETRAINED_VOCAB_FILES_MAP lowerCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase_ = ['''input_ids''', '''attention_mask'''] lowerCamelCase_ = CodeGenTokenizer def __init__( self : int , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : Optional[int]=None , UpperCAmelCase__ : str="<|endoftext|>" , UpperCAmelCase__ : Optional[Any]="<|endoftext|>" , UpperCAmelCase__ : Dict="<|endoftext|>" , UpperCAmelCase__ : List[str]=False , **UpperCAmelCase__ : Tuple , ): '''simple docstring''' super().__init__( UpperCamelCase__ , UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , unk_token=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , **UpperCamelCase__ , ) if kwargs.pop('''add_bos_token''' , UpperCamelCase__ ): lowercase : List[Any] =kwargs.pop('''name_or_path''' , '''''' ) raise ValueError( '''Currenty GPT2\'s fast tokenizer does NOT support adding a BOS token.''' '''Instead you should use GPT2\'s slow tokenizer class `CodeGenTokenizer` as follows: \n''' F'''`CodeGenTokenizer.from_pretrained(\'{model_id}\')`\nor\n''' F'''`AutoTokenizer.from_pretrained(\'{model_id}\', use_fast=False)`\n''' '''This issue will be fixed soon, see: https://github.com/huggingface/tokenizers/pull/1005.''' ''' so that the fast tokenizer works correctly.''' ) lowercase : Union[str, Any] =json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get('''add_prefix_space''' , UpperCamelCase__ ) != add_prefix_space: lowercase : int =getattr(UpperCamelCase__ , pre_tok_state.pop('''type''' ) ) lowercase : Dict =add_prefix_space lowercase : Union[str, Any] =pre_tok_class(**UpperCamelCase__ ) lowercase : int =add_prefix_space def lowerCamelCase_ ( self : List[str] , *UpperCAmelCase__ : str , **UpperCAmelCase__ : List[str] ): '''simple docstring''' lowercase : List[str] =kwargs.get('''is_split_into_words''' , UpperCamelCase__ ) assert self.add_prefix_space or not is_split_into_words, ( F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*UpperCamelCase__ , **UpperCamelCase__ ) def lowerCamelCase_ ( self : Tuple , *UpperCAmelCase__ : Union[str, Any] , **UpperCAmelCase__ : str ): '''simple docstring''' lowercase : List[str] =kwargs.get('''is_split_into_words''' , UpperCamelCase__ ) assert self.add_prefix_space or not is_split_into_words, ( F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' "to use it with pretokenized inputs." ) return super()._encode_plus(*UpperCamelCase__ , **UpperCamelCase__ ) def lowerCamelCase_ ( self : Any , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None ): '''simple docstring''' lowercase : Union[str, Any] =self._tokenizer.model.save(UpperCamelCase__ , name=UpperCamelCase__ ) return tuple(UpperCamelCase__ ) def lowerCamelCase_ ( self : Any , UpperCAmelCase__ : Union[int, List[int], "np.ndarray", "torch.Tensor", "tf.Tensor"] , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : Optional[List[str]] = None , **UpperCAmelCase__ : Dict , ): '''simple docstring''' lowercase : Union[str, Any] =super().decode( token_ids=UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ , clean_up_tokenization_spaces=UpperCamelCase__ , **UpperCamelCase__ , ) if truncate_before_pattern is not None and len(UpperCamelCase__ ) > 0: lowercase : Tuple =self.truncate(UpperCamelCase__ , UpperCamelCase__ ) return decoded_text def lowerCamelCase_ ( self : List[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Tuple ): '''simple docstring''' def find_re(UpperCAmelCase__ : Any , UpperCAmelCase__ : str , UpperCAmelCase__ : Dict ): lowercase : Optional[int] =pattern.search(UpperCamelCase__ , UpperCamelCase__ ) return m.start() if m else -1 lowercase : Tuple =[re.compile(UpperCamelCase__ , re.MULTILINE ) for pattern in truncate_before_pattern] lowercase : Optional[int] =list(re.finditer('''^print''' , UpperCamelCase__ , re.MULTILINE ) ) if len(UpperCamelCase__ ) > 1: lowercase : Optional[int] =completion[: prints[1].start()] lowercase : Any =list(re.finditer('''^def''' , UpperCamelCase__ , re.MULTILINE ) ) if len(UpperCamelCase__ ) > 1: lowercase : str =completion[: defs[1].start()] lowercase : str =0 lowercase : Tuple =[ pos for pos in [find_re(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) for terminal in terminals] if pos != -1 ] if len(UpperCamelCase__ ) > 0: return completion[: min(UpperCamelCase__ )] else: return completion
92
def _UpperCAmelCase ( a : int ): if number < 0: raise ValueError("""number must not be negative""" ) return number & (number - 1) == 0 if __name__ == "__main__": import doctest doctest.testmod()
654
0
'''simple docstring''' import random import unittest import torch from diffusers import IFInpaintingSuperResolutionPipeline from diffusers.utils import floats_tensor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import skip_mps, torch_device from ..pipeline_params import ( TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin from . import IFPipelineTesterMixin @skip_mps class UpperCAmelCase ( lowercase_ , lowercase_ , unittest.TestCase): """simple docstring""" lowerCAmelCase_ = IFInpaintingSuperResolutionPipeline lowerCAmelCase_ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''width''', '''height'''} lowerCAmelCase_ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({"""original_image"""}) lowerCAmelCase_ = PipelineTesterMixin.required_optional_params - {'''latents'''} def UpperCamelCase__ ( self : Union[str, Any] ) -> Any: return self._get_superresolution_dummy_components() def UpperCamelCase__ ( self : Optional[int] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[int]=0 ) -> Optional[int]: if str(UpperCamelCase__ ).startswith('''mps''' ): _UpperCamelCase =torch.manual_seed(UpperCamelCase__ ) else: _UpperCamelCase =torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ ) _UpperCamelCase =floats_tensor((1, 3, 16, 16) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ ) _UpperCamelCase =floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ ) _UpperCamelCase =floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ ) _UpperCamelCase ={ '''prompt''': '''A painting of a squirrel eating a burger''', '''image''': image, '''original_image''': original_image, '''mask_image''': mask_image, '''generator''': generator, '''num_inference_steps''': 2, '''output_type''': '''numpy''', } return inputs @unittest.skipIf( torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , ) def UpperCamelCase__ ( self : Dict ) -> Any: self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 ) def UpperCamelCase__ ( self : int ) -> List[str]: self._test_save_load_optional_components() @unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' ) def UpperCamelCase__ ( self : Optional[Any] ) -> List[str]: super().test_save_load_floataa(expected_max_diff=1E-1 ) def UpperCamelCase__ ( self : List[Any] ) -> Dict: self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 ) def UpperCamelCase__ ( self : Union[str, Any] ) -> Optional[Any]: self._test_save_load_local() def UpperCamelCase__ ( self : str ) -> Optional[int]: self._test_inference_batch_single_identical( expected_max_diff=1E-2 , )
404
class _lowerCAmelCase : """simple docstring""" def __init__( self : List[Any] , UpperCamelCase__ : int): '''simple docstring''' snake_case__ = size snake_case__ = [0] * size snake_case__ = [0] * size @staticmethod def __magic_name__ ( UpperCamelCase__ : int): '''simple docstring''' return index | (index + 1) @staticmethod def __magic_name__ ( UpperCamelCase__ : int): '''simple docstring''' return (index & (index + 1)) - 1 def __magic_name__ ( self : Any , UpperCamelCase__ : int , UpperCamelCase__ : int): '''simple docstring''' snake_case__ = value while index < self.size: snake_case__ = self.get_prev(UpperCamelCase__) + 1 if current_left_border == index: snake_case__ = value else: snake_case__ = max(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__) snake_case__ = self.get_next(UpperCamelCase__) def __magic_name__ ( self : int , UpperCamelCase__ : int , UpperCamelCase__ : int): '''simple docstring''' right -= 1 # Because of right is exclusive snake_case__ = 0 while left <= right: snake_case__ = self.get_prev(UpperCamelCase__) if left <= current_left: snake_case__ = max(UpperCamelCase__ , self.tree[right]) snake_case__ = current_left else: snake_case__ = max(UpperCamelCase__ , self.arr[right]) right -= 1 return result if __name__ == "__main__": import doctest doctest.testmod()
654
0
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np import torch from ..models.clipseg import CLIPSegForImageSegmentation from ..utils import is_vision_available, requires_backends from .base import PipelineTool if is_vision_available(): from PIL import Image class lowerCamelCase__ ( lowercase_): '''simple docstring''' snake_case_ =( '''This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image.''' '''It takes two arguments named `image` which should be the original image, and `label` which should be a text ''' '''describing the elements what should be identified in the segmentation mask. The tool returns the mask.''' ) snake_case_ ='''CIDAS/clipseg-rd64-refined''' snake_case_ ='''image_segmenter''' snake_case_ =CLIPSegForImageSegmentation snake_case_ =['''image''', '''text'''] snake_case_ =['''image'''] def __init__(self ,*__lowerCamelCase ,**__lowerCamelCase ) -> Any: """simple docstring""" requires_backends(self ,['''vision'''] ) super().__init__(*UpperCamelCase__ ,**UpperCamelCase__ ) def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ) -> int: """simple docstring""" return self.pre_processor(text=[label] ,images=[image] ,padding=UpperCamelCase__ ,return_tensors='''pt''' ) def lowerCAmelCase__ (self ,__lowerCamelCase ) -> str: """simple docstring""" with torch.no_grad(): lowerCAmelCase__ : Union[str, Any] = self.model(**UpperCamelCase__ ).logits return logits def lowerCAmelCase__ (self ,__lowerCamelCase ) -> List[Any]: """simple docstring""" lowerCAmelCase__ : Optional[Any] = outputs.cpu().detach().numpy() lowerCAmelCase__ : List[Any] = 0 lowerCAmelCase__ : List[str] = 1 return Image.fromarray((array * 2_55).astype(np.uinta ) )
647
from __future__ import annotations import unittest from transformers import AutoTokenizer, PegasusConfig, is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel @require_tf class _lowerCAmelCase : """simple docstring""" _lowercase : List[str] = PegasusConfig _lowercase : Union[str, Any] = {} _lowercase : Tuple = '''gelu''' def __init__( self : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[int]=1_3 , UpperCamelCase__ : Any=7 , UpperCamelCase__ : Optional[Any]=True , UpperCamelCase__ : Optional[int]=False , UpperCamelCase__ : int=9_9 , UpperCamelCase__ : Dict=3_2 , UpperCamelCase__ : str=2 , UpperCamelCase__ : int=4 , UpperCamelCase__ : Tuple=3_7 , UpperCamelCase__ : Union[str, Any]=0.1 , UpperCamelCase__ : Any=0.1 , UpperCamelCase__ : str=4_0 , UpperCamelCase__ : Optional[int]=2 , UpperCamelCase__ : Optional[Any]=1 , UpperCamelCase__ : Dict=0 , ): '''simple docstring''' snake_case__ = parent snake_case__ = batch_size snake_case__ = seq_length snake_case__ = is_training snake_case__ = use_labels snake_case__ = vocab_size snake_case__ = hidden_size snake_case__ = num_hidden_layers snake_case__ = num_attention_heads snake_case__ = intermediate_size snake_case__ = hidden_dropout_prob snake_case__ = attention_probs_dropout_prob snake_case__ = max_position_embeddings snake_case__ = eos_token_id snake_case__ = pad_token_id snake_case__ = bos_token_id def __magic_name__ ( self : Optional[Any]): '''simple docstring''' snake_case__ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size) snake_case__ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size) , 1) snake_case__ = tf.concat([input_ids, eos_tensor] , axis=1) snake_case__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) snake_case__ = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) snake_case__ = prepare_pegasus_inputs_dict(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__) return config, inputs_dict def __magic_name__ ( self : Any , UpperCamelCase__ : Tuple , UpperCamelCase__ : Union[str, Any]): '''simple docstring''' snake_case__ = TFPegasusModel(config=UpperCamelCase__).get_decoder() snake_case__ = inputs_dict["""input_ids"""] snake_case__ = input_ids[:1, :] snake_case__ = inputs_dict["""attention_mask"""][:1, :] snake_case__ = inputs_dict["""head_mask"""] snake_case__ = 1 # first forward pass snake_case__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , head_mask=UpperCamelCase__ , use_cache=UpperCamelCase__) snake_case__ , snake_case__ = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids snake_case__ = ids_tensor((self.batch_size, 3) , config.vocab_size) snake_case__ = tf.cast(ids_tensor((self.batch_size, 3) , 2) , tf.inta) # append to next input_ids and snake_case__ = tf.concat([input_ids, next_tokens] , axis=-1) snake_case__ = tf.concat([attention_mask, next_attn_mask] , axis=-1) snake_case__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__)[0] snake_case__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , past_key_values=UpperCamelCase__)[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1]) # select random slice snake_case__ = int(ids_tensor((1,) , output_from_past.shape[-1])) snake_case__ = output_from_no_past[:, -3:, random_slice_idx] snake_case__ = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(UpperCamelCase__ , UpperCamelCase__ , rtol=1E-3) def _UpperCAmelCase ( a : str , a : Union[str, Any] , a : List[str] , a : str=None , a : int=None , a : int=None , a : int=None , a : Optional[int]=None , ): if attention_mask is None: snake_case__ = tf.cast(tf.math.not_equal(a , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: snake_case__ = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: snake_case__ = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: snake_case__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: snake_case__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class _lowerCAmelCase ( lowercase_ , lowercase_ , unittest.TestCase ): """simple docstring""" _lowercase : int = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else () _lowercase : List[Any] = (TFPegasusForConditionalGeneration,) if is_tf_available() else () _lowercase : List[Any] = ( { '''conversational''': TFPegasusForConditionalGeneration, '''feature-extraction''': TFPegasusModel, '''summarization''': TFPegasusForConditionalGeneration, '''text2text-generation''': TFPegasusForConditionalGeneration, '''translation''': TFPegasusForConditionalGeneration, } if is_tf_available() else {} ) _lowercase : Optional[int] = True _lowercase : Dict = False _lowercase : Any = False def __magic_name__ ( self : str): '''simple docstring''' snake_case__ = TFPegasusModelTester(self) snake_case__ = ConfigTester(self , config_class=UpperCamelCase__) def __magic_name__ ( self : List[Any]): '''simple docstring''' self.config_tester.run_common_tests() def __magic_name__ ( self : Optional[int]): '''simple docstring''' snake_case__ = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*UpperCamelCase__) @require_sentencepiece @require_tokenizers @require_tf class _lowerCAmelCase ( unittest.TestCase ): """simple docstring""" _lowercase : List[str] = [ ''' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.''', ''' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ''', ] _lowercase : str = [ '''California\'s largest electricity provider has cut power to hundreds of thousands of customers in an effort to''' ''' reduce the risk of wildfires.''', '''N-Dubz have revealed they\'re "grateful" to have been nominated for four Mobo Awards.''', ] # differs slightly from pytorch, likely due to numerical differences in linear layers _lowercase : int = '''google/pegasus-xsum''' @cached_property def __magic_name__ ( self : Dict): '''simple docstring''' return AutoTokenizer.from_pretrained(self.model_name) @cached_property def __magic_name__ ( self : int): '''simple docstring''' snake_case__ = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name) return model def __magic_name__ ( self : Dict , **UpperCamelCase__ : List[Any]): '''simple docstring''' snake_case__ = self.translate_src_text(**UpperCamelCase__) assert self.expected_text == generated_words def __magic_name__ ( self : str , **UpperCamelCase__ : List[Any]): '''simple docstring''' snake_case__ = self.tokenizer(self.src_text , **UpperCamelCase__ , padding=UpperCamelCase__ , return_tensors="""tf""") snake_case__ = self.model.generate( model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=UpperCamelCase__ , ) snake_case__ = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=UpperCamelCase__) return generated_words @slow def __magic_name__ ( self : List[str]): '''simple docstring''' self._assert_generated_batch_equal_expected()
654
0
import warnings from ...utils import logging from .image_processing_owlvit import OwlViTImageProcessor SCREAMING_SNAKE_CASE__ : Tuple = logging.get_logger(__name__) class a_ ( lowercase_ ): def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> List[Any]: """simple docstring""" warnings.warn( 'The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please' ' use OwlViTImageProcessor instead.' , UpperCamelCase__ , ) super().__init__(*UpperCamelCase__ , **UpperCamelCase__ )
205
import json import os import re import unicodedata from json.encoder import INFINITY from typing import Any, Dict, List, Optional, Tuple, Union import numpy as np import regex from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...tokenization_utils_base import BatchEncoding from ...utils import TensorType, is_flax_available, is_tf_available, is_torch_available, logging from ...utils.generic import _is_jax, _is_numpy a__ = logging.get_logger(__name__) a__ = { """artists_file""": """artists.json""", """lyrics_file""": """lyrics.json""", """genres_file""": """genres.json""", } a__ = { """artists_file""": { """jukebox""": """https://huggingface.co/ArthurZ/jukebox/blob/main/artists.json""", }, """genres_file""": { """jukebox""": """https://huggingface.co/ArthurZ/jukebox/blob/main/genres.json""", }, """lyrics_file""": { """jukebox""": """https://huggingface.co/ArthurZ/jukebox/blob/main/lyrics.json""", }, } a__ = { """jukebox""": 5_1_2, } class _lowerCAmelCase ( lowercase_ ): """simple docstring""" _lowercase : str = VOCAB_FILES_NAMES _lowercase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP _lowercase : str = PRETRAINED_LYRIC_TOKENS_SIZES _lowercase : Any = ['''input_ids''', '''attention_mask'''] def __init__( self : int , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int=["v3", "v2", "v2"] , UpperCamelCase__ : List[str]=5_1_2 , UpperCamelCase__ : Union[str, Any]=5 , UpperCamelCase__ : List[Any]="<|endoftext|>" , **UpperCamelCase__ : List[Any] , ): '''simple docstring''' snake_case__ = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__) if isinstance(UpperCamelCase__ , UpperCamelCase__) else unk_token super().__init__( unk_token=UpperCamelCase__ , n_genres=UpperCamelCase__ , version=UpperCamelCase__ , max_n_lyric_tokens=UpperCamelCase__ , **UpperCamelCase__ , ) snake_case__ = version snake_case__ = max_n_lyric_tokens snake_case__ = n_genres with open(UpperCamelCase__ , encoding="""utf-8""") as vocab_handle: snake_case__ = json.load(UpperCamelCase__) with open(UpperCamelCase__ , encoding="""utf-8""") as vocab_handle: snake_case__ = json.load(UpperCamelCase__) with open(UpperCamelCase__ , encoding="""utf-8""") as vocab_handle: snake_case__ = json.load(UpperCamelCase__) snake_case__ = R"""[^A-Za-z0-9.,:;!?\-'\"()\[\] \t\n]+""" # In v2, we had a n_vocab=80 and in v3 we missed + and so n_vocab=79 of characters. if len(self.lyrics_encoder) == 7_9: snake_case__ = oov.replace(R"""\-'""" , R"""\-+'""") snake_case__ = regex.compile(UpperCamelCase__) snake_case__ = {v: k for k, v in self.artists_encoder.items()} snake_case__ = {v: k for k, v in self.genres_encoder.items()} snake_case__ = {v: k for k, v in self.lyrics_encoder.items()} @property def __magic_name__ ( self : List[str]): '''simple docstring''' return len(self.artists_encoder) + len(self.genres_encoder) + len(self.lyrics_encoder) def __magic_name__ ( self : Union[str, Any]): '''simple docstring''' return dict(self.artists_encoder , self.genres_encoder , self.lyrics_encoder) def __magic_name__ ( self : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int): '''simple docstring''' snake_case__ = [self.artists_encoder.get(UpperCamelCase__ , 0) for artist in list_artists] for genres in range(len(UpperCamelCase__)): snake_case__ = [self.genres_encoder.get(UpperCamelCase__ , 0) for genre in list_genres[genres]] snake_case__ = list_genres[genres] + [-1] * (self.n_genres - len(list_genres[genres])) snake_case__ = [[self.lyrics_encoder.get(UpperCamelCase__ , 0) for character in list_lyrics[0]], [], []] return artists_id, list_genres, lyric_ids def __magic_name__ ( self : Optional[int] , UpperCamelCase__ : Optional[int]): '''simple docstring''' return list(UpperCamelCase__) def __magic_name__ ( self : int , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Any , **UpperCamelCase__ : List[str]): '''simple docstring''' snake_case__ , snake_case__ , snake_case__ = self.prepare_for_tokenization(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__) snake_case__ = self._tokenize(UpperCamelCase__) return artist, genre, lyrics def __magic_name__ ( self : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : bool = False): '''simple docstring''' for idx in range(len(self.version)): if self.version[idx] == "v3": snake_case__ = artists[idx].lower() snake_case__ = [genres[idx].lower()] else: snake_case__ = self._normalize(artists[idx]) + """.v2""" snake_case__ = [ self._normalize(UpperCamelCase__) + """.v2""" for genre in genres[idx].split("""_""") ] # split is for the full dictionary with combined genres if self.version[0] == "v2": snake_case__ = regex.compile(R"""[^A-Za-z0-9.,:;!?\-'\"()\[\] \t\n]+""") snake_case__ = """ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,:;!?-+'\"()[] \t\n""" snake_case__ = {vocab[index]: index + 1 for index in range(len(UpperCamelCase__))} snake_case__ = 0 snake_case__ = len(UpperCamelCase__) + 1 snake_case__ = self.vocab snake_case__ = {v: k for k, v in self.vocab.items()} snake_case__ = """""" else: snake_case__ = regex.compile(R"""[^A-Za-z0-9.,:;!?\-+'\"()\[\] \t\n]+""") snake_case__ = self._run_strip_accents(UpperCamelCase__) snake_case__ = lyrics.replace("""\\""" , """\n""") snake_case__ = self.out_of_vocab.sub("""""" , UpperCamelCase__), [], [] return artists, genres, lyrics def __magic_name__ ( self : Union[str, Any] , UpperCamelCase__ : str): '''simple docstring''' snake_case__ = unicodedata.normalize("""NFD""" , UpperCamelCase__) snake_case__ = [] for char in text: snake_case__ = unicodedata.category(UpperCamelCase__) if cat == "Mn": continue output.append(UpperCamelCase__) return "".join(UpperCamelCase__) def __magic_name__ ( self : List[str] , UpperCamelCase__ : str): '''simple docstring''' snake_case__ = ( [chr(UpperCamelCase__) for i in range(ord("""a""") , ord("""z""") + 1)] + [chr(UpperCamelCase__) for i in range(ord("""A""") , ord("""Z""") + 1)] + [chr(UpperCamelCase__) for i in range(ord("""0""") , ord("""9""") + 1)] + ["""."""] ) snake_case__ = frozenset(UpperCamelCase__) snake_case__ = re.compile(R"""_+""") snake_case__ = """""".join([c if c in accepted else """_""" for c in text.lower()]) snake_case__ = pattern.sub("""_""" , UpperCamelCase__).strip("""_""") return text def __magic_name__ ( self : List[Any] , UpperCamelCase__ : List[str]): '''simple docstring''' return " ".join(UpperCamelCase__) def __magic_name__ ( self : Any , UpperCamelCase__ : str , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , UpperCamelCase__ : bool = False): '''simple docstring''' if not isinstance(UpperCamelCase__ , UpperCamelCase__): snake_case__ = TensorType(UpperCamelCase__) # Get a function reference for the correct framework if tensor_type == TensorType.TENSORFLOW: if not is_tf_available(): raise ImportError( """Unable to convert output to TensorFlow tensors format, TensorFlow is not installed.""") import tensorflow as tf snake_case__ = tf.constant snake_case__ = tf.is_tensor elif tensor_type == TensorType.PYTORCH: if not is_torch_available(): raise ImportError("""Unable to convert output to PyTorch tensors format, PyTorch is not installed.""") import torch snake_case__ = torch.tensor snake_case__ = torch.is_tensor elif tensor_type == TensorType.JAX: if not is_flax_available(): raise ImportError("""Unable to convert output to JAX tensors format, JAX is not installed.""") import jax.numpy as jnp # noqa: F811 snake_case__ = jnp.array snake_case__ = _is_jax else: snake_case__ = np.asarray snake_case__ = _is_numpy # Do the tensor conversion in batch try: if prepend_batch_axis: snake_case__ = [inputs] if not is_tensor(UpperCamelCase__): snake_case__ = as_tensor(UpperCamelCase__) except: # noqa E722 raise ValueError( """Unable to create tensor, you should probably activate truncation and/or padding """ """with 'padding=True' 'truncation=True' to have batched tensors with the same length.""") return inputs def __call__( self : str , UpperCamelCase__ : Any , UpperCamelCase__ : Dict , UpperCamelCase__ : Any="" , UpperCamelCase__ : Dict="pt"): '''simple docstring''' snake_case__ = [0, 0, 0] snake_case__ = [artist] * len(self.version) snake_case__ = [genres] * len(self.version) snake_case__ , snake_case__ , snake_case__ = self.tokenize(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__) snake_case__ , snake_case__ , snake_case__ = self._convert_token_to_id(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__) snake_case__ = [-INFINITY] * len(full_tokens[-1]) snake_case__ = [ self.convert_to_tensors( [input_ids + [artists_id[i]] + genres_ids[i] + full_tokens[i]] , tensor_type=UpperCamelCase__) for i in range(len(self.version)) ] return BatchEncoding({"""input_ids""": input_ids, """attention_masks""": attention_masks}) def __magic_name__ ( self : Any , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None): '''simple docstring''' if not os.path.isdir(UpperCamelCase__): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''') return snake_case__ = os.path.join( UpperCamelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""artists_file"""]) with open(UpperCamelCase__ , """w""" , encoding="""utf-8""") as f: f.write(json.dumps(self.artists_encoder , ensure_ascii=UpperCamelCase__)) snake_case__ = os.path.join( UpperCamelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""genres_file"""]) with open(UpperCamelCase__ , """w""" , encoding="""utf-8""") as f: f.write(json.dumps(self.genres_encoder , ensure_ascii=UpperCamelCase__)) snake_case__ = os.path.join( UpperCamelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""lyrics_file"""]) with open(UpperCamelCase__ , """w""" , encoding="""utf-8""") as f: f.write(json.dumps(self.lyrics_encoder , ensure_ascii=UpperCamelCase__)) return (artists_file, genres_file, lyrics_file) def __magic_name__ ( self : List[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[str]): '''simple docstring''' snake_case__ = self.artists_decoder.get(UpperCamelCase__) snake_case__ = [self.genres_decoder.get(UpperCamelCase__) for genre in genres_index] snake_case__ = [self.lyrics_decoder.get(UpperCamelCase__) for character in lyric_index] return artist, genres, lyrics
654
0
'''simple docstring''' from __future__ import annotations from random import random class lowercase_ : """simple docstring""" def __init__( self : str ,lowercase__ : int | None = None ): __lowercase = value __lowercase = random() __lowercase = None __lowercase = None def __repr__( self : Any ): from pprint import pformat if self.left is None and self.right is None: return F"\'{self.value}: {self.prior:.5}\'" else: return pformat( {F"{self.value}: {self.prior:.5}": (self.left, self.right)} ,indent=1 ) def __str__( self : List[str] ): __lowercase = str(self.value ) + ''' ''' __lowercase = str(self.left or '''''' ) __lowercase = str(self.right or '''''' ) return value + left + right def _A ( A__ , A__ ): """simple docstring""" if root is None: # None tree is split into 2 Nones return None, None elif root.value is None: return None, None else: if value < root.value: __lowercase , __lowercase = split(root.left , A__ ) return left, root else: __lowercase , __lowercase = split(root.right , A__ ) return root, right def _A ( A__ , A__ ): """simple docstring""" if (not left) or (not right): # If one node is None, return the other return left or right elif left.prior < right.prior: __lowercase = merge(left.right , A__ ) return left else: __lowercase = merge(A__ , right.left ) return right def _A ( A__ , A__ ): """simple docstring""" __lowercase = Node(A__ ) __lowercase , __lowercase = split(A__ , A__ ) return merge(merge(A__ , A__ ) , A__ ) def _A ( A__ , A__ ): """simple docstring""" __lowercase , __lowercase = split(A__ , value - 1 ) __lowercase , __lowercase = split(A__ , A__ ) return merge(A__ , A__ ) def _A ( A__ ): """simple docstring""" if not root: # None return else: inorder(root.left ) print(root.value , end=''',''' ) inorder(root.right ) def _A ( A__ , A__ ): """simple docstring""" for arg in args.split(): if arg[0] == "+": __lowercase = insert(A__ , int(arg[1:] ) ) elif arg[0] == "-": __lowercase = erase(A__ , int(arg[1:] ) ) else: print('''Unknown command''' ) return root def _A ( ): """simple docstring""" __lowercase = None print( '''enter numbers to create a tree, + value to add value into treap, ''' '''- value to erase all nodes with value. \'q\' to quit. ''' ) __lowercase = input() while args != "q": __lowercase = interact_treap(A__ , A__ ) print(A__ ) __lowercase = input() print('''good by!''' ) if __name__ == "__main__": import doctest doctest.testmod() main()
41
import sys from typing import Tuple import numpy as np import torch from PIL import Image from torch import nn from transformers.image_utils import PILImageResampling from utils import img_tensorize class _lowerCAmelCase : """simple docstring""" def __init__( self : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : List[str]=sys.maxsize): '''simple docstring''' snake_case__ = """bilinear""" snake_case__ = max_size snake_case__ = short_edge_length def __call__( self : List[str] , UpperCamelCase__ : Tuple): '''simple docstring''' snake_case__ = [] for img in imgs: snake_case__ , snake_case__ = img.shape[:2] # later: provide list and randomly choose index for resize snake_case__ = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1) if size == 0: return img snake_case__ = size * 1.0 / min(UpperCamelCase__ , UpperCamelCase__) if h < w: snake_case__ , snake_case__ = size, scale * w else: snake_case__ , snake_case__ = scale * h, size if max(UpperCamelCase__ , UpperCamelCase__) > self.max_size: snake_case__ = self.max_size * 1.0 / max(UpperCamelCase__ , UpperCamelCase__) snake_case__ = newh * scale snake_case__ = neww * scale snake_case__ = int(neww + 0.5) snake_case__ = int(newh + 0.5) if img.dtype == np.uinta: snake_case__ = Image.fromarray(UpperCamelCase__) snake_case__ = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR) snake_case__ = np.asarray(UpperCamelCase__) else: snake_case__ = img.permute(2 , 0 , 1).unsqueeze(0) # 3, 0, 1) # hw(c) -> nchw snake_case__ = nn.functional.interpolate( UpperCamelCase__ , (newh, neww) , mode=self.interp_method , align_corners=UpperCamelCase__).squeeze(0) img_augs.append(UpperCamelCase__) return img_augs class _lowerCAmelCase : """simple docstring""" def __init__( self : Dict , UpperCamelCase__ : Optional[int]): '''simple docstring''' snake_case__ = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST) snake_case__ = cfg.INPUT.FORMAT snake_case__ = cfg.SIZE_DIVISIBILITY snake_case__ = cfg.PAD_VALUE snake_case__ = cfg.INPUT.MAX_SIZE_TEST snake_case__ = cfg.MODEL.DEVICE snake_case__ = torch.tensor(cfg.MODEL.PIXEL_STD).to(self.device).view(len(cfg.MODEL.PIXEL_STD) , 1 , 1) snake_case__ = torch.tensor(cfg.MODEL.PIXEL_MEAN).to(self.device).view(len(cfg.MODEL.PIXEL_STD) , 1 , 1) snake_case__ = lambda UpperCamelCase__: (x - self.pixel_mean) / self.pixel_std def __magic_name__ ( self : Dict , UpperCamelCase__ : Dict): '''simple docstring''' snake_case__ = tuple(max(UpperCamelCase__) for s in zip(*[img.shape for img in images])) snake_case__ = [im.shape[-2:] for im in images] snake_case__ = [ nn.functional.pad( UpperCamelCase__ , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , ) for size, im in zip(UpperCamelCase__ , UpperCamelCase__) ] return torch.stack(UpperCamelCase__), torch.tensor(UpperCamelCase__) def __call__( self : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : str=False): '''simple docstring''' with torch.no_grad(): if not isinstance(UpperCamelCase__ , UpperCamelCase__): snake_case__ = [images] if single_image: assert len(UpperCamelCase__) == 1 for i in range(len(UpperCamelCase__)): if isinstance(images[i] , torch.Tensor): images.insert(UpperCamelCase__ , images.pop(UpperCamelCase__).to(self.device).float()) elif not isinstance(images[i] , torch.Tensor): images.insert( UpperCamelCase__ , torch.as_tensor(img_tensorize(images.pop(UpperCamelCase__) , input_format=self.input_format)) .to(self.device) .float() , ) # resize smallest edge snake_case__ = torch.tensor([im.shape[:2] for im in images]) snake_case__ = self.aug(UpperCamelCase__) # transpose images and convert to torch tensors # images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images] # now normalize before pad to avoid useless arithmetic snake_case__ = [self.normalizer(UpperCamelCase__) for x in images] # now pad them to do the following operations snake_case__ , snake_case__ = self.pad(UpperCamelCase__) # Normalize if self.size_divisibility > 0: raise NotImplementedError() # pad snake_case__ = torch.true_divide(UpperCamelCase__ , UpperCamelCase__) if single_image: return images[0], sizes[0], scales_yx[0] else: return images, sizes, scales_yx def _UpperCAmelCase ( a : Optional[Any] , a : Any ): boxes[:, 0::2] *= scale_yx[:, 1] boxes[:, 1::2] *= scale_yx[:, 0] return boxes def _UpperCAmelCase ( a : Any , a : Tuple[int, int] ): assert torch.isfinite(a ).all(), "Box tensor contains infinite or NaN!" snake_case__ , snake_case__ = box_size tensor[:, 0].clamp_(min=0 , max=a ) tensor[:, 1].clamp_(min=0 , max=a ) tensor[:, 2].clamp_(min=0 , max=a ) tensor[:, 3].clamp_(min=0 , max=a )
654
0
"""simple docstring""" import os def __magic_name__ ( __snake_case : str = "matrix.txt" ) -> int: with open(os.path.join(os.path.dirname(__snake_case ) , __snake_case ) ) as in_file: lowercase : Optional[Any] = in_file.read() lowercase : Optional[int] = [[int(__snake_case ) for cell in row.split("," )] for row in data.strip().splitlines()] lowercase : List[Any] = [[0 for cell in row] for row in grid] lowercase : Optional[Any] = len(grid[0] ) lowercase : Any = [[0 for i in range(__snake_case )] for j in range(__snake_case )] lowercase : str = grid[0][0] for i in range(1 , __snake_case ): lowercase : str = grid[0][i] + dp[0][i - 1] for i in range(1 , __snake_case ): lowercase : List[Any] = grid[i][0] + dp[i - 1][0] for i in range(1 , __snake_case ): for j in range(1 , __snake_case ): lowercase : int = grid[i][j] + min(dp[i - 1][j] , dp[i][j - 1] ) return dp[-1][-1] if __name__ == "__main__": print(F"{solution() = }")
361
import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging a__ = logging.get_logger(__name__) a__ = { """microsoft/wavlm-base""": """https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json""", # See all WavLM models at https://huggingface.co/models?filter=wavlm } class _lowerCAmelCase ( lowercase_ ): """simple docstring""" _lowercase : Dict = '''wavlm''' def __init__( self : Tuple , UpperCamelCase__ : str=3_2 , UpperCamelCase__ : Any=7_6_8 , UpperCamelCase__ : Any=1_2 , UpperCamelCase__ : Tuple=1_2 , UpperCamelCase__ : str=3_0_7_2 , UpperCamelCase__ : Optional[Any]="gelu" , UpperCamelCase__ : Tuple=0.1 , UpperCamelCase__ : Union[str, Any]=0.1 , UpperCamelCase__ : List[Any]=0.1 , UpperCamelCase__ : str=0.0 , UpperCamelCase__ : Optional[Any]=0.1 , UpperCamelCase__ : List[Any]=0.1 , UpperCamelCase__ : Optional[int]=0.02 , UpperCamelCase__ : Optional[int]=1E-5 , UpperCamelCase__ : Any="group" , UpperCamelCase__ : List[str]="gelu" , UpperCamelCase__ : Any=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , UpperCamelCase__ : List[str]=(5, 2, 2, 2, 2, 2, 2) , UpperCamelCase__ : Dict=(1_0, 3, 3, 3, 3, 2, 2) , UpperCamelCase__ : int=False , UpperCamelCase__ : Optional[Any]=1_2_8 , UpperCamelCase__ : Optional[int]=1_6 , UpperCamelCase__ : Optional[Any]=3_2_0 , UpperCamelCase__ : Any=8_0_0 , UpperCamelCase__ : Any=False , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : Optional[Any]=0.05 , UpperCamelCase__ : Optional[Any]=1_0 , UpperCamelCase__ : Union[str, Any]=2 , UpperCamelCase__ : Optional[Any]=0.0 , UpperCamelCase__ : Tuple=1_0 , UpperCamelCase__ : Optional[int]=3_2_0 , UpperCamelCase__ : Optional[Any]=2 , UpperCamelCase__ : Any=0.1 , UpperCamelCase__ : Tuple=1_0_0 , UpperCamelCase__ : Dict=2_5_6 , UpperCamelCase__ : Optional[int]=2_5_6 , UpperCamelCase__ : List[Any]=0.1 , UpperCamelCase__ : Tuple="mean" , UpperCamelCase__ : List[Any]=False , UpperCamelCase__ : Tuple=False , UpperCamelCase__ : Union[str, Any]=2_5_6 , UpperCamelCase__ : int=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0) , UpperCamelCase__ : Optional[Any]=(5, 3, 3, 1, 1) , UpperCamelCase__ : Any=(1, 2, 3, 1, 1) , UpperCamelCase__ : Dict=5_1_2 , UpperCamelCase__ : str=8_0 , UpperCamelCase__ : Optional[int]=0 , UpperCamelCase__ : Any=1 , UpperCamelCase__ : Tuple=2 , UpperCamelCase__ : str=False , UpperCamelCase__ : List[Any]=3 , UpperCamelCase__ : Optional[Any]=2 , UpperCamelCase__ : List[Any]=3 , UpperCamelCase__ : Optional[int]=None , **UpperCamelCase__ : List[str] , ): '''simple docstring''' super().__init__(**UpperCamelCase__ , pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__) snake_case__ = hidden_size snake_case__ = feat_extract_norm snake_case__ = feat_extract_activation snake_case__ = list(UpperCamelCase__) snake_case__ = list(UpperCamelCase__) snake_case__ = list(UpperCamelCase__) snake_case__ = conv_bias snake_case__ = num_buckets snake_case__ = max_bucket_distance snake_case__ = num_conv_pos_embeddings snake_case__ = num_conv_pos_embedding_groups snake_case__ = len(self.conv_dim) snake_case__ = num_hidden_layers snake_case__ = intermediate_size snake_case__ = hidden_act snake_case__ = num_attention_heads snake_case__ = hidden_dropout snake_case__ = attention_dropout snake_case__ = activation_dropout snake_case__ = feat_proj_dropout snake_case__ = final_dropout snake_case__ = layerdrop snake_case__ = layer_norm_eps snake_case__ = initializer_range snake_case__ = num_ctc_classes snake_case__ = vocab_size snake_case__ = do_stable_layer_norm snake_case__ = use_weighted_layer_sum snake_case__ = classifier_proj_size if ( (len(self.conv_stride) != self.num_feat_extract_layers) or (len(self.conv_kernel) != self.num_feat_extract_layers) or (len(self.conv_dim) != self.num_feat_extract_layers) ): raise ValueError( """Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==""" """ `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =""" F''' {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`,''' F''' `len(config.conv_kernel) = {len(self.conv_kernel)}`.''') # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 snake_case__ = apply_spec_augment snake_case__ = mask_time_prob snake_case__ = mask_time_length snake_case__ = mask_time_min_masks snake_case__ = mask_feature_prob snake_case__ = mask_feature_length # parameters for pretraining with codevector quantized representations snake_case__ = num_codevectors_per_group snake_case__ = num_codevector_groups snake_case__ = contrastive_logits_temperature snake_case__ = num_negatives snake_case__ = codevector_dim snake_case__ = proj_codevector_dim snake_case__ = diversity_loss_weight # ctc loss snake_case__ = ctc_loss_reduction snake_case__ = ctc_zero_infinity # adapter snake_case__ = add_adapter snake_case__ = adapter_kernel_size snake_case__ = adapter_stride snake_case__ = num_adapter_layers snake_case__ = output_hidden_size or hidden_size # SequenceClassification-specific parameter. Feel free to ignore for other classes. snake_case__ = classifier_proj_size # XVector-specific parameters. Feel free to ignore for other classes. snake_case__ = list(UpperCamelCase__) snake_case__ = list(UpperCamelCase__) snake_case__ = list(UpperCamelCase__) snake_case__ = xvector_output_dim @property def __magic_name__ ( self : Optional[int]): '''simple docstring''' return functools.reduce(operator.mul , self.conv_stride , 1)
654
0
'''simple docstring''' import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import MobileViTImageProcessor class SCREAMING_SNAKE_CASE__ ( unittest.TestCase): def __init__( self , A_ , A_=7 , A_=3 , A_=18 , A_=30 , A_=400 , A_=True , A_=None , A_=True , A_=None , A_=True , )-> List[Any]: '''simple docstring''' UpperCamelCase = size if size is not None else {'shortest_edge': 20} UpperCamelCase = crop_size if crop_size is not None else {'height': 18, 'width': 18} UpperCamelCase = parent UpperCamelCase = batch_size UpperCamelCase = num_channels UpperCamelCase = image_size UpperCamelCase = min_resolution UpperCamelCase = max_resolution UpperCamelCase = do_resize UpperCamelCase = size UpperCamelCase = do_center_crop UpperCamelCase = crop_size UpperCamelCase = do_flip_channel_order def UpperCAmelCase_ ( self )-> List[Any]: '''simple docstring''' return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_flip_channel_order": self.do_flip_channel_order, } @require_torch @require_vision class SCREAMING_SNAKE_CASE__ ( lowercase_ , unittest.TestCase): lowerCAmelCase_ = MobileViTImageProcessor if is_vision_available() else None def UpperCAmelCase_ ( self )-> Optional[Any]: '''simple docstring''' UpperCamelCase = MobileViTImageProcessingTester(self ) @property def UpperCAmelCase_ ( self )-> Union[str, Any]: '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def UpperCAmelCase_ ( self )-> Any: '''simple docstring''' UpperCamelCase = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(UpperCamelCase__ , 'do_resize' ) ) self.assertTrue(hasattr(UpperCamelCase__ , 'size' ) ) self.assertTrue(hasattr(UpperCamelCase__ , 'do_center_crop' ) ) self.assertTrue(hasattr(UpperCamelCase__ , 'center_crop' ) ) self.assertTrue(hasattr(UpperCamelCase__ , 'do_flip_channel_order' ) ) def UpperCAmelCase_ ( self )-> Dict: '''simple docstring''' UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'shortest_edge': 20} ) self.assertEqual(image_processor.crop_size , {'height': 18, 'width': 18} ) UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 ) self.assertEqual(image_processor.size , {'shortest_edge': 42} ) self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} ) def UpperCAmelCase_ ( self )-> str: '''simple docstring''' pass def UpperCAmelCase_ ( self )-> Union[str, Any]: '''simple docstring''' UpperCamelCase = self.image_processing_class(**self.image_processor_dict ) # create random PIL images UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ ) for image in image_inputs: self.assertIsInstance(UpperCamelCase__ , Image.Image ) # Test not batched input UpperCamelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched UpperCamelCase = image_processing(UpperCamelCase__ , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) def UpperCAmelCase_ ( self )-> str: '''simple docstring''' UpperCamelCase = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ , numpify=UpperCamelCase__ ) for image in image_inputs: self.assertIsInstance(UpperCamelCase__ , np.ndarray ) # Test not batched input UpperCamelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched UpperCamelCase = image_processing(UpperCamelCase__ , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) def UpperCAmelCase_ ( self )-> List[Any]: '''simple docstring''' UpperCamelCase = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ , torchify=UpperCamelCase__ ) for image in image_inputs: self.assertIsInstance(UpperCamelCase__ , torch.Tensor ) # Test not batched input UpperCamelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , ) # Test batched UpperCamelCase = image_processing(UpperCamelCase__ , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) , )
3
from typing import List, Optional, Tuple, Union import torch from ...models import UNetaDModel from ...schedulers import ScoreSdeVeScheduler from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class _lowerCAmelCase ( lowercase_ ): """simple docstring""" _lowercase : UNetaDModel _lowercase : ScoreSdeVeScheduler def __init__( self : Union[str, Any] , UpperCamelCase__ : UNetaDModel , UpperCamelCase__ : ScoreSdeVeScheduler): '''simple docstring''' super().__init__() self.register_modules(unet=UpperCamelCase__ , scheduler=UpperCamelCase__) @torch.no_grad() def __call__( self : Union[str, Any] , UpperCamelCase__ : int = 1 , UpperCamelCase__ : int = 2_0_0_0 , UpperCamelCase__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCamelCase__ : Optional[str] = "pil" , UpperCamelCase__ : bool = True , **UpperCamelCase__ : List[str] , ): '''simple docstring''' snake_case__ = self.unet.config.sample_size snake_case__ = (batch_size, 3, img_size, img_size) snake_case__ = self.unet snake_case__ = randn_tensor(UpperCamelCase__ , generator=UpperCamelCase__) * self.scheduler.init_noise_sigma snake_case__ = sample.to(self.device) self.scheduler.set_timesteps(UpperCamelCase__) self.scheduler.set_sigmas(UpperCamelCase__) for i, t in enumerate(self.progress_bar(self.scheduler.timesteps)): snake_case__ = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device) # correction step for _ in range(self.scheduler.config.correct_steps): snake_case__ = self.unet(UpperCamelCase__ , UpperCamelCase__).sample snake_case__ = self.scheduler.step_correct(UpperCamelCase__ , UpperCamelCase__ , generator=UpperCamelCase__).prev_sample # prediction step snake_case__ = model(UpperCamelCase__ , UpperCamelCase__).sample snake_case__ = self.scheduler.step_pred(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , generator=UpperCamelCase__) snake_case__ , snake_case__ = output.prev_sample, output.prev_sample_mean snake_case__ = sample_mean.clamp(0 , 1) snake_case__ = sample.cpu().permute(0 , 2 , 3 , 1).numpy() if output_type == "pil": snake_case__ = self.numpy_to_pil(UpperCamelCase__) if not return_dict: return (sample,) return ImagePipelineOutput(images=UpperCamelCase__)
654
0
"""simple docstring""" def UpperCAmelCase ( snake_case : str , snake_case : list[str] ): _lowerCAmelCase:Optional[Any] = '''''' for word_or_phrase in separated: if not isinstance(snake_case , snake_case ): raise Exception('''join() accepts only strings to be joined''' ) joined += word_or_phrase + separator return joined.strip(snake_case ) if __name__ == "__main__": from doctest import testmod testmod()
227
import random import unittest import torch from diffusers import IFInpaintingSuperResolutionPipeline from diffusers.utils import floats_tensor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import skip_mps, torch_device from ..pipeline_params import ( TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin from . import IFPipelineTesterMixin @skip_mps class _lowerCAmelCase ( lowercase_ , lowercase_ , unittest.TestCase ): """simple docstring""" _lowercase : Optional[int] = IFInpaintingSuperResolutionPipeline _lowercase : int = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''width''', '''height'''} _lowercase : Optional[Any] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({'''original_image'''} ) _lowercase : int = PipelineTesterMixin.required_optional_params - {'''latents'''} def __magic_name__ ( self : Union[str, Any]): '''simple docstring''' return self._get_superresolution_dummy_components() def __magic_name__ ( self : Optional[int] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[int]=0): '''simple docstring''' if str(UpperCamelCase__).startswith("""mps"""): snake_case__ = torch.manual_seed(UpperCamelCase__) else: snake_case__ = torch.Generator(device=UpperCamelCase__).manual_seed(UpperCamelCase__) snake_case__ = floats_tensor((1, 3, 1_6, 1_6) , rng=random.Random(UpperCamelCase__)).to(UpperCamelCase__) snake_case__ = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(UpperCamelCase__)).to(UpperCamelCase__) snake_case__ = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(UpperCamelCase__)).to(UpperCamelCase__) snake_case__ = { """prompt""": """A painting of a squirrel eating a burger""", """image""": image, """original_image""": original_image, """mask_image""": mask_image, """generator""": generator, """num_inference_steps""": 2, """output_type""": """numpy""", } return inputs @unittest.skipIf( torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , ) def __magic_name__ ( self : Dict): '''simple docstring''' self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3) def __magic_name__ ( self : int): '''simple docstring''' self._test_save_load_optional_components() @unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""") def __magic_name__ ( self : Optional[Any]): '''simple docstring''' super().test_save_load_floataa(expected_max_diff=1E-1) def __magic_name__ ( self : List[Any]): '''simple docstring''' self._test_attention_slicing_forward_pass(expected_max_diff=1E-2) def __magic_name__ ( self : Union[str, Any]): '''simple docstring''' self._test_save_load_local() def __magic_name__ ( self : str): '''simple docstring''' self._test_inference_batch_single_identical( expected_max_diff=1E-2 , )
654
0
"""simple docstring""" import re from filelock import FileLock try: import nltk __lowerCAmelCase : List[str] =True except (ImportError, ModuleNotFoundError): __lowerCAmelCase : int =False if NLTK_AVAILABLE: with FileLock(""".lock""") as lock: nltk.download("""punkt""", quiet=True) def UpperCAmelCase__ ( lowerCAmelCase__ :str ) -> Any: '''simple docstring''' re.sub("""<n>""" , """""" , lowerCAmelCase__ ) # remove pegasus newline char assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)" return "\n".join(nltk.sent_tokenize(lowerCAmelCase__ ) )
359
a__ = [0, 2, 4, 6, 8] a__ = [1, 3, 5, 7, 9] def _UpperCAmelCase ( a : int , a : int , a : list[int] , a : int ): if remaining_length == 0: if digits[0] == 0 or digits[-1] == 0: return 0 for i in range(length // 2 - 1 , -1 , -1 ): remainder += digits[i] + digits[length - i - 1] if remainder % 2 == 0: return 0 remainder //= 10 return 1 if remaining_length == 1: if remainder % 2 == 0: return 0 snake_case__ = 0 for digit in range(10 ): snake_case__ = digit result += reversible_numbers( 0 , (remainder + 2 * digit) // 10 , a , a ) return result snake_case__ = 0 for digita in range(10 ): snake_case__ = digita if (remainder + digita) % 2 == 0: snake_case__ = ODD_DIGITS else: snake_case__ = EVEN_DIGITS for digita in other_parity_digits: snake_case__ = digita result += reversible_numbers( remaining_length - 2 , (remainder + digita + digita) // 10 , a , a , ) return result def _UpperCAmelCase ( a : int = 9 ): snake_case__ = 0 for length in range(1 , max_power + 1 ): result += reversible_numbers(a , 0 , [0] * length , a ) return result if __name__ == "__main__": print(F'''{solution() = }''')
654
0
'''simple docstring''' import unittest from transformers import BigBirdConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax from transformers.models.big_bird.modeling_flax_big_bird import ( FlaxBigBirdForCausalLM, FlaxBigBirdForMaskedLM, FlaxBigBirdForMultipleChoice, FlaxBigBirdForPreTraining, FlaxBigBirdForQuestionAnswering, FlaxBigBirdForSequenceClassification, FlaxBigBirdForTokenClassification, FlaxBigBirdModel, ) class UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=56 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=99 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_="gelu_new" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=512 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_="block_sparse" , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=3 , ) -> List[str]: '''simple docstring''' lowerCamelCase_ = parent lowerCamelCase_ = batch_size lowerCamelCase_ = seq_length lowerCamelCase_ = is_training lowerCamelCase_ = use_attention_mask lowerCamelCase_ = use_token_type_ids lowerCamelCase_ = use_labels lowerCamelCase_ = vocab_size lowerCamelCase_ = hidden_size lowerCamelCase_ = num_hidden_layers lowerCamelCase_ = num_attention_heads lowerCamelCase_ = intermediate_size lowerCamelCase_ = hidden_act lowerCamelCase_ = hidden_dropout_prob lowerCamelCase_ = attention_probs_dropout_prob lowerCamelCase_ = max_position_embeddings lowerCamelCase_ = type_vocab_size lowerCamelCase_ = type_sequence_label_size lowerCamelCase_ = initializer_range lowerCamelCase_ = num_choices lowerCamelCase_ = rescale_embeddings lowerCamelCase_ = attention_type lowerCamelCase_ = use_bias lowerCamelCase_ = block_size lowerCamelCase_ = num_random_blocks def UpperCamelCase( self ) -> int: '''simple docstring''' lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCamelCase_ = None if self.use_attention_mask: lowerCamelCase_ = random_attention_mask([self.batch_size, self.seq_length] ) lowerCamelCase_ = None if self.use_token_type_ids: lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowerCamelCase_ = BigBirdConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , attention_type=self.attention_type , block_size=self.block_size , num_random_blocks=self.num_random_blocks , use_bias=self.use_bias , rescale_embeddings=self.rescale_embeddings , ) return config, input_ids, token_type_ids, attention_mask def UpperCamelCase( self ) -> int: '''simple docstring''' lowerCamelCase_ = self.prepare_config_and_inputs() lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = config_and_inputs lowerCamelCase_ = { 'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask, } return config, inputs_dict @require_flax class UpperCAmelCase ( lowercase_ , unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE_ = ( ( FlaxBigBirdForCausalLM, FlaxBigBirdModel, FlaxBigBirdForPreTraining, FlaxBigBirdForMaskedLM, FlaxBigBirdForMultipleChoice, FlaxBigBirdForQuestionAnswering, FlaxBigBirdForSequenceClassification, FlaxBigBirdForTokenClassification, ) if is_flax_available() else () ) SCREAMING_SNAKE_CASE_ = False SCREAMING_SNAKE_CASE_ = False def UpperCamelCase( self ) -> str: '''simple docstring''' lowerCamelCase_ = FlaxBigBirdModelTester(self ) @slow # copied from `test_modeling_flax_common` because it takes much longer than other models def UpperCamelCase( self ) -> List[str]: '''simple docstring''' super().test_from_pretrained_save_pretrained() @slow # copied from `test_modeling_flax_common` because it takes much longer than other models def UpperCamelCase( self ) -> Tuple: '''simple docstring''' super().test_from_pretrained_with_no_automatic_init() @slow # copied from `test_modeling_flax_common` because it takes much longer than other models def UpperCamelCase( self ) -> Tuple: '''simple docstring''' super().test_no_automatic_init() @slow # copied from `test_modeling_flax_common` because it takes much longer than other models def UpperCamelCase( self ) -> Tuple: '''simple docstring''' super().test_hidden_states_output() @slow def UpperCamelCase( self ) -> str: '''simple docstring''' for model_class_name in self.all_model_classes: lowerCamelCase_ = model_class_name.from_pretrained('google/bigbird-roberta-base' ) self.assertIsNotNone(UpperCamelCase__ ) def UpperCamelCase( self ) -> int: '''simple docstring''' if self.test_attn_probs: super().test_attention_outputs() @slow # copied from `test_modeling_flax_common` because it takes much longer than other models def UpperCamelCase( self ) -> Optional[int]: '''simple docstring''' lowerCamelCase_ ,lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): lowerCamelCase_ = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) lowerCamelCase_ = model_class(UpperCamelCase__ ) @jax.jit def model_jitted(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ ): return model(input_ids=UpperCamelCase__ , attention_mask=UpperCamelCase__ , **UpperCamelCase__ ) with self.subTest('JIT Enabled' ): lowerCamelCase_ = model_jitted(**UpperCamelCase__ ).to_tuple() with self.subTest('JIT Disabled' ): with jax.disable_jit(): lowerCamelCase_ = model_jitted(**UpperCamelCase__ ).to_tuple() self.assertEqual(len(UpperCamelCase__ ) , len(UpperCamelCase__ ) ) for jitted_output, output in zip(UpperCamelCase__ , UpperCamelCase__ ): self.assertEqual(jitted_output.shape , output.shape ) def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=1E-5 , SCREAMING_SNAKE_CASE_="outputs" , SCREAMING_SNAKE_CASE_=None ) -> Any: '''simple docstring''' if name.startswith('outputs.attentions' ): return else: super().check_pt_flax_outputs(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
42
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer from .base import PipelineTool a__ = { """Acehnese Arabic""": """ace_Arab""", """Acehnese Latin""": """ace_Latn""", """Mesopotamian Arabic""": """acm_Arab""", """Ta'izzi-Adeni Arabic""": """acq_Arab""", """Tunisian Arabic""": """aeb_Arab""", """Afrikaans""": """afr_Latn""", """South Levantine Arabic""": """ajp_Arab""", """Akan""": """aka_Latn""", """Amharic""": """amh_Ethi""", """North Levantine Arabic""": """apc_Arab""", """Modern Standard Arabic""": """arb_Arab""", """Modern Standard Arabic Romanized""": """arb_Latn""", """Najdi Arabic""": """ars_Arab""", """Moroccan Arabic""": """ary_Arab""", """Egyptian Arabic""": """arz_Arab""", """Assamese""": """asm_Beng""", """Asturian""": """ast_Latn""", """Awadhi""": """awa_Deva""", """Central Aymara""": """ayr_Latn""", """South Azerbaijani""": """azb_Arab""", """North Azerbaijani""": """azj_Latn""", """Bashkir""": """bak_Cyrl""", """Bambara""": """bam_Latn""", """Balinese""": """ban_Latn""", """Belarusian""": """bel_Cyrl""", """Bemba""": """bem_Latn""", """Bengali""": """ben_Beng""", """Bhojpuri""": """bho_Deva""", """Banjar Arabic""": """bjn_Arab""", """Banjar Latin""": """bjn_Latn""", """Standard Tibetan""": """bod_Tibt""", """Bosnian""": """bos_Latn""", """Buginese""": """bug_Latn""", """Bulgarian""": """bul_Cyrl""", """Catalan""": """cat_Latn""", """Cebuano""": """ceb_Latn""", """Czech""": """ces_Latn""", """Chokwe""": """cjk_Latn""", """Central Kurdish""": """ckb_Arab""", """Crimean Tatar""": """crh_Latn""", """Welsh""": """cym_Latn""", """Danish""": """dan_Latn""", """German""": """deu_Latn""", """Southwestern Dinka""": """dik_Latn""", """Dyula""": """dyu_Latn""", """Dzongkha""": """dzo_Tibt""", """Greek""": """ell_Grek""", """English""": """eng_Latn""", """Esperanto""": """epo_Latn""", """Estonian""": """est_Latn""", """Basque""": """eus_Latn""", """Ewe""": """ewe_Latn""", """Faroese""": """fao_Latn""", """Fijian""": """fij_Latn""", """Finnish""": """fin_Latn""", """Fon""": """fon_Latn""", """French""": """fra_Latn""", """Friulian""": """fur_Latn""", """Nigerian Fulfulde""": """fuv_Latn""", """Scottish Gaelic""": """gla_Latn""", """Irish""": """gle_Latn""", """Galician""": """glg_Latn""", """Guarani""": """grn_Latn""", """Gujarati""": """guj_Gujr""", """Haitian Creole""": """hat_Latn""", """Hausa""": """hau_Latn""", """Hebrew""": """heb_Hebr""", """Hindi""": """hin_Deva""", """Chhattisgarhi""": """hne_Deva""", """Croatian""": """hrv_Latn""", """Hungarian""": """hun_Latn""", """Armenian""": """hye_Armn""", """Igbo""": """ibo_Latn""", """Ilocano""": """ilo_Latn""", """Indonesian""": """ind_Latn""", """Icelandic""": """isl_Latn""", """Italian""": """ita_Latn""", """Javanese""": """jav_Latn""", """Japanese""": """jpn_Jpan""", """Kabyle""": """kab_Latn""", """Jingpho""": """kac_Latn""", """Kamba""": """kam_Latn""", """Kannada""": """kan_Knda""", """Kashmiri Arabic""": """kas_Arab""", """Kashmiri Devanagari""": """kas_Deva""", """Georgian""": """kat_Geor""", """Central Kanuri Arabic""": """knc_Arab""", """Central Kanuri Latin""": """knc_Latn""", """Kazakh""": """kaz_Cyrl""", """Kabiyè""": """kbp_Latn""", """Kabuverdianu""": """kea_Latn""", """Khmer""": """khm_Khmr""", """Kikuyu""": """kik_Latn""", """Kinyarwanda""": """kin_Latn""", """Kyrgyz""": """kir_Cyrl""", """Kimbundu""": """kmb_Latn""", """Northern Kurdish""": """kmr_Latn""", """Kikongo""": """kon_Latn""", """Korean""": """kor_Hang""", """Lao""": """lao_Laoo""", """Ligurian""": """lij_Latn""", """Limburgish""": """lim_Latn""", """Lingala""": """lin_Latn""", """Lithuanian""": """lit_Latn""", """Lombard""": """lmo_Latn""", """Latgalian""": """ltg_Latn""", """Luxembourgish""": """ltz_Latn""", """Luba-Kasai""": """lua_Latn""", """Ganda""": """lug_Latn""", """Luo""": """luo_Latn""", """Mizo""": """lus_Latn""", """Standard Latvian""": """lvs_Latn""", """Magahi""": """mag_Deva""", """Maithili""": """mai_Deva""", """Malayalam""": """mal_Mlym""", """Marathi""": """mar_Deva""", """Minangkabau Arabic """: """min_Arab""", """Minangkabau Latin""": """min_Latn""", """Macedonian""": """mkd_Cyrl""", """Plateau Malagasy""": """plt_Latn""", """Maltese""": """mlt_Latn""", """Meitei Bengali""": """mni_Beng""", """Halh Mongolian""": """khk_Cyrl""", """Mossi""": """mos_Latn""", """Maori""": """mri_Latn""", """Burmese""": """mya_Mymr""", """Dutch""": """nld_Latn""", """Norwegian Nynorsk""": """nno_Latn""", """Norwegian Bokmål""": """nob_Latn""", """Nepali""": """npi_Deva""", """Northern Sotho""": """nso_Latn""", """Nuer""": """nus_Latn""", """Nyanja""": """nya_Latn""", """Occitan""": """oci_Latn""", """West Central Oromo""": """gaz_Latn""", """Odia""": """ory_Orya""", """Pangasinan""": """pag_Latn""", """Eastern Panjabi""": """pan_Guru""", """Papiamento""": """pap_Latn""", """Western Persian""": """pes_Arab""", """Polish""": """pol_Latn""", """Portuguese""": """por_Latn""", """Dari""": """prs_Arab""", """Southern Pashto""": """pbt_Arab""", """Ayacucho Quechua""": """quy_Latn""", """Romanian""": """ron_Latn""", """Rundi""": """run_Latn""", """Russian""": """rus_Cyrl""", """Sango""": """sag_Latn""", """Sanskrit""": """san_Deva""", """Santali""": """sat_Olck""", """Sicilian""": """scn_Latn""", """Shan""": """shn_Mymr""", """Sinhala""": """sin_Sinh""", """Slovak""": """slk_Latn""", """Slovenian""": """slv_Latn""", """Samoan""": """smo_Latn""", """Shona""": """sna_Latn""", """Sindhi""": """snd_Arab""", """Somali""": """som_Latn""", """Southern Sotho""": """sot_Latn""", """Spanish""": """spa_Latn""", """Tosk Albanian""": """als_Latn""", """Sardinian""": """srd_Latn""", """Serbian""": """srp_Cyrl""", """Swati""": """ssw_Latn""", """Sundanese""": """sun_Latn""", """Swedish""": """swe_Latn""", """Swahili""": """swh_Latn""", """Silesian""": """szl_Latn""", """Tamil""": """tam_Taml""", """Tatar""": """tat_Cyrl""", """Telugu""": """tel_Telu""", """Tajik""": """tgk_Cyrl""", """Tagalog""": """tgl_Latn""", """Thai""": """tha_Thai""", """Tigrinya""": """tir_Ethi""", """Tamasheq Latin""": """taq_Latn""", """Tamasheq Tifinagh""": """taq_Tfng""", """Tok Pisin""": """tpi_Latn""", """Tswana""": """tsn_Latn""", """Tsonga""": """tso_Latn""", """Turkmen""": """tuk_Latn""", """Tumbuka""": """tum_Latn""", """Turkish""": """tur_Latn""", """Twi""": """twi_Latn""", """Central Atlas Tamazight""": """tzm_Tfng""", """Uyghur""": """uig_Arab""", """Ukrainian""": """ukr_Cyrl""", """Umbundu""": """umb_Latn""", """Urdu""": """urd_Arab""", """Northern Uzbek""": """uzn_Latn""", """Venetian""": """vec_Latn""", """Vietnamese""": """vie_Latn""", """Waray""": """war_Latn""", """Wolof""": """wol_Latn""", """Xhosa""": """xho_Latn""", """Eastern Yiddish""": """ydd_Hebr""", """Yoruba""": """yor_Latn""", """Yue Chinese""": """yue_Hant""", """Chinese Simplified""": """zho_Hans""", """Chinese Traditional""": """zho_Hant""", """Standard Malay""": """zsm_Latn""", """Zulu""": """zul_Latn""", } class _lowerCAmelCase ( lowercase_ ): """simple docstring""" _lowercase : List[str] = '''facebook/nllb-200-distilled-600M''' _lowercase : List[Any] = ( '''This is a tool that translates text from a language to another. It takes three inputs: `text`, which should ''' '''be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, ''' '''which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in ''' '''plain English, such as \'Romanian\', or \'Albanian\'. It returns the text translated in `tgt_lang`.''' ) _lowercase : Optional[int] = '''translator''' _lowercase : Optional[Any] = AutoTokenizer _lowercase : Dict = AutoModelForSeqaSeqLM _lowercase : List[str] = LANGUAGE_CODES _lowercase : Optional[Any] = ['''text''', '''text''', '''text'''] _lowercase : Tuple = ['''text'''] def __magic_name__ ( self : Any , UpperCamelCase__ : str , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int): '''simple docstring''' if src_lang not in self.lang_to_code: raise ValueError(F'''{src_lang} is not a supported language.''') if tgt_lang not in self.lang_to_code: raise ValueError(F'''{tgt_lang} is not a supported language.''') snake_case__ = self.lang_to_code[src_lang] snake_case__ = self.lang_to_code[tgt_lang] return self.pre_processor._build_translation_inputs( UpperCamelCase__ , return_tensors="""pt""" , src_lang=UpperCamelCase__ , tgt_lang=UpperCamelCase__) def __magic_name__ ( self : Dict , UpperCamelCase__ : Dict): '''simple docstring''' return self.model.generate(**UpperCamelCase__) def __magic_name__ ( self : List[str] , UpperCamelCase__ : Dict): '''simple docstring''' return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=UpperCamelCase__)
654
0
import unittest import numpy as np from transformers.testing_utils import require_pytesseract, require_torch from transformers.utils import is_pytesseract_available, is_torch_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_pytesseract_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class lowerCamelCase__ ( unittest.TestCase): """simple docstring""" def __init__(self , __a , __a=7 , __a=3 , __a=18 , __a=30 , __a=4_00 , __a=True , __a=None , __a=True , ): '''simple docstring''' lowerCamelCase = size if size is not None else {"height": 18, "width": 18} lowerCamelCase = parent lowerCamelCase = batch_size lowerCamelCase = num_channels lowerCamelCase = image_size lowerCamelCase = min_resolution lowerCamelCase = max_resolution lowerCamelCase = do_resize lowerCamelCase = size lowerCamelCase = apply_ocr def _a (self ): '''simple docstring''' return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr} @require_torch @require_pytesseract class lowerCamelCase__ ( lowercase_ , unittest.TestCase): """simple docstring""" _A = LayoutLMvaImageProcessor if is_pytesseract_available() else None def _a (self ): '''simple docstring''' lowerCamelCase = LayoutLMvaImageProcessingTester(self ) @property def _a (self ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def _a (self ): '''simple docstring''' lowerCamelCase = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(UpperCamelCase__ , "do_resize" ) ) self.assertTrue(hasattr(UpperCamelCase__ , "size" ) ) self.assertTrue(hasattr(UpperCamelCase__ , "apply_ocr" ) ) def _a (self ): '''simple docstring''' lowerCamelCase = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"height": 18, "width": 18} ) lowerCamelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 ) self.assertEqual(image_processor.size , {"height": 42, "width": 42} ) def _a (self ): '''simple docstring''' pass def _a (self ): '''simple docstring''' lowerCamelCase = self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ ) for image in image_inputs: self.assertIsInstance(UpperCamelCase__ , Image.Image ) # Test not batched input lowerCamelCase = image_processing(image_inputs[0] , return_tensors="pt" ) self.assertEqual( encoding.pixel_values.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) , ) self.assertIsInstance(encoding.words , UpperCamelCase__ ) self.assertIsInstance(encoding.boxes , UpperCamelCase__ ) # Test batched lowerCamelCase = image_processing(UpperCamelCase__ , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) , ) def _a (self ): '''simple docstring''' lowerCamelCase = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ , numpify=UpperCamelCase__ ) for image in image_inputs: self.assertIsInstance(UpperCamelCase__ , np.ndarray ) # Test not batched input lowerCamelCase = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) , ) # Test batched lowerCamelCase = image_processing(UpperCamelCase__ , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) , ) def _a (self ): '''simple docstring''' lowerCamelCase = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors lowerCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ , torchify=UpperCamelCase__ ) for image in image_inputs: self.assertIsInstance(UpperCamelCase__ , torch.Tensor ) # Test not batched input lowerCamelCase = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) , ) # Test batched lowerCamelCase = image_processing(UpperCamelCase__ , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ) , ) def _a (self ): '''simple docstring''' lowerCamelCase = LayoutLMvaImageProcessor() from datasets import load_dataset lowerCamelCase = load_dataset("hf-internal-testing/fixtures_docvqa" , split="test" ) lowerCamelCase = Image.open(ds[0]["file"] ).convert("RGB" ) lowerCamelCase = image_processing(UpperCamelCase__ , return_tensors="pt" ) self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_24, 2_24) ) self.assertEqual(len(encoding.words ) , len(encoding.boxes ) ) # fmt: off # the words and boxes were obtained with Tesseract 4.1.1 lowerCamelCase = [["11:14", "to", "11:39", "a.m", "11:39", "to", "11:44", "a.m.", "11:44", "a.m.", "to", "12:25", "p.m.", "12:25", "to", "12:58", "p.m.", "12:58", "to", "4:00", "p.m.", "2:00", "to", "5:00", "p.m.", "Coffee", "Break", "Coffee", "will", "be", "served", "for", "men", "and", "women", "in", "the", "lobby", "adjacent", "to", "exhibit", "area.", "Please", "move", "into", "exhibit", "area.", "(Exhibits", "Open)", "TRRF", "GENERAL", "SESSION", "(PART", "|)", "Presiding:", "Lee", "A.", "Waller", "TRRF", "Vice", "President", "“Introductory", "Remarks”", "Lee", "A.", "Waller,", "TRRF", "Vice", "Presi-", "dent", "Individual", "Interviews", "with", "TRRF", "Public", "Board", "Members", "and", "Sci-", "entific", "Advisory", "Council", "Mem-", "bers", "Conducted", "by", "TRRF", "Treasurer", "Philip", "G.", "Kuehn", "to", "get", "answers", "which", "the", "public", "refrigerated", "warehousing", "industry", "is", "looking", "for.", "Plus", "questions", "from", "the", "floor.", "Dr.", "Emil", "M.", "Mrak,", "University", "of", "Cal-", "ifornia,", "Chairman,", "TRRF", "Board;", "Sam", "R.", "Cecil,", "University", "of", "Georgia", "College", "of", "Agriculture;", "Dr.", "Stanley", "Charm,", "Tufts", "University", "School", "of", "Medicine;", "Dr.", "Robert", "H.", "Cotton,", "ITT", "Continental", "Baking", "Company;", "Dr.", "Owen", "Fennema,", "University", "of", "Wis-", "consin;", "Dr.", "Robert", "E.", "Hardenburg,", "USDA.", "Questions", "and", "Answers", "Exhibits", "Open", "Capt.", "Jack", "Stoney", "Room", "TRRF", "Scientific", "Advisory", "Council", "Meeting", "Ballroom", "Foyer"]] # noqa: E231 lowerCamelCase = [[[1_41, 57, 2_14, 69], [2_28, 58, 2_52, 69], [1_41, 75, 2_16, 88], [2_30, 79, 2_80, 88], [1_42, 2_60, 2_18, 2_73], [2_30, 2_61, 2_55, 2_73], [1_43, 2_79, 2_18, 2_90], [2_31, 2_82, 2_90, 2_91], [1_43, 3_42, 2_18, 3_54], [2_31, 3_45, 2_89, 3_55], [2_02, 3_62, 2_27, 3_73], [1_43, 3_79, 2_20, 3_92], [2_31, 3_82, 2_91, 3_94], [1_44, 7_14, 2_20, 7_26], [2_31, 7_15, 2_56, 7_26], [1_44, 7_32, 2_20, 7_45], [2_32, 7_36, 2_91, 7_47], [1_44, 7_69, 2_18, 7_82], [2_31, 7_70, 2_56, 7_82], [1_41, 7_88, 2_02, 8_01], [2_15, 7_91, 2_74, 8_04], [1_43, 8_26, 2_04, 8_38], [2_15, 8_26, 2_40, 8_38], [1_42, 8_44, 2_02, 8_57], [2_15, 8_47, 2_74, 8_59], [3_34, 57, 4_27, 69], [4_40, 57, 5_22, 69], [3_69, 75, 4_61, 88], [4_69, 75, 5_16, 88], [5_28, 76, 5_62, 88], [5_70, 76, 6_67, 88], [6_75, 75, 7_11, 87], [7_21, 79, 7_78, 88], [7_89, 75, 8_40, 88], [3_69, 97, 4_70, 1_07], [4_84, 94, 5_07, 1_06], [5_18, 94, 5_62, 1_07], [5_76, 94, 6_55, 1_10], [6_68, 94, 7_92, 1_09], [8_04, 95, 8_29, 1_07], [3_69, 1_13, 4_65, 1_25], [4_77, 1_16, 5_47, 1_25], [5_62, 1_13, 6_58, 1_25], [6_71, 1_16, 7_48, 1_25], [7_61, 1_13, 8_11, 1_25], [3_69, 1_31, 4_65, 1_43], [4_77, 1_33, 5_48, 1_43], [5_63, 1_30, 6_98, 1_45], [7_10, 1_30, 8_02, 1_46], [3_36, 1_71, 4_12, 1_83], [4_23, 1_71, 5_72, 1_83], [5_82, 1_70, 7_16, 1_84], [7_28, 1_71, 8_17, 1_87], [8_29, 1_71, 8_44, 1_86], [3_38, 1_97, 4_82, 2_12], [5_07, 1_96, 5_57, 2_09], [5_69, 1_96, 5_95, 2_08], [6_10, 1_96, 7_02, 2_09], [5_05, 2_14, 5_83, 2_26], [5_95, 2_14, 6_56, 2_27], [6_70, 2_15, 8_07, 2_27], [3_35, 2_59, 5_43, 2_74], [5_56, 2_59, 7_08, 2_72], [3_72, 2_79, 4_22, 2_91], [4_35, 2_79, 4_60, 2_91], [4_74, 2_79, 5_74, 2_92], [5_87, 2_78, 6_64, 2_91], [6_76, 2_78, 7_38, 2_91], [7_51, 2_79, 8_34, 2_91], [3_72, 2_98, 4_34, 3_10], [3_35, 3_41, 4_83, 3_54], [4_97, 3_41, 6_55, 3_54], [6_67, 3_41, 7_28, 3_54], [7_40, 3_41, 8_25, 3_54], [3_35, 3_60, 4_30, 3_72], [4_42, 3_60, 5_34, 3_72], [5_45, 3_59, 6_87, 3_72], [6_97, 3_60, 7_54, 3_72], [7_65, 3_60, 8_23, 3_73], [3_34, 3_78, 4_28, 3_91], [4_40, 3_78, 5_77, 3_94], [5_90, 3_78, 7_05, 3_91], [7_20, 3_78, 8_01, 3_91], [3_34, 3_97, 4_00, 4_09], [3_70, 4_16, 5_29, 4_29], [5_44, 4_16, 5_76, 4_32], [5_87, 4_16, 6_65, 4_28], [6_77, 4_16, 8_14, 4_29], [3_72, 4_35, 4_52, 4_50], [4_65, 4_34, 4_95, 4_47], [5_11, 4_34, 6_00, 4_47], [6_11, 4_36, 6_37, 4_47], [6_49, 4_36, 6_94, 4_51], [7_05, 4_38, 8_24, 4_47], [3_69, 4_53, 4_52, 4_66], [4_64, 4_54, 5_09, 4_66], [5_22, 4_53, 6_11, 4_69], [6_25, 4_53, 7_92, 4_69], [3_70, 4_72, 5_56, 4_88], [5_70, 4_72, 6_84, 4_87], [6_97, 4_72, 7_18, 4_85], [7_32, 4_72, 8_35, 4_88], [3_69, 4_90, 4_11, 5_03], [4_25, 4_90, 4_84, 5_03], [4_96, 4_90, 6_35, 5_06], [6_45, 4_90, 7_07, 5_03], [7_18, 4_91, 7_61, 5_03], [7_71, 4_90, 8_40, 5_03], [3_36, 5_10, 3_74, 5_21], [3_88, 5_10, 4_47, 5_22], [4_60, 5_10, 4_89, 5_21], [5_03, 5_10, 5_80, 5_22], [5_92, 5_09, 7_36, 5_25], [7_45, 5_09, 7_70, 5_22], [7_81, 5_09, 8_40, 5_22], [3_38, 5_28, 4_34, 5_41], [4_48, 5_28, 5_96, 5_41], [6_09, 5_27, 6_87, 5_40], [7_00, 5_28, 7_92, 5_41], [3_36, 5_46, 3_97, 5_59], [4_07, 5_46, 4_31, 5_59], [4_43, 5_46, 5_25, 5_60], [5_37, 5_46, 6_80, 5_62], [6_88, 5_46, 7_14, 5_59], [7_22, 5_46, 8_37, 5_62], [3_36, 5_65, 4_49, 5_81], [4_61, 5_65, 4_85, 5_77], [4_97, 5_65, 6_65, 5_81], [6_81, 5_65, 7_18, 5_77], [7_32, 5_65, 8_37, 5_80], [3_37, 5_84, 4_38, 5_97], [4_52, 5_83, 5_21, 5_96], [5_35, 5_84, 6_77, 5_99], [6_90, 5_83, 7_87, 5_96], [8_01, 5_83, 8_25, 5_96], [3_38, 6_02, 4_78, 6_15], [4_92, 6_02, 5_30, 6_14], [5_43, 6_02, 6_38, 6_15], [6_50, 6_02, 6_76, 6_14], [6_88, 6_02, 7_88, 6_15], [8_02, 6_02, 8_43, 6_14], [3_37, 6_21, 5_02, 6_33], [5_16, 6_21, 6_15, 6_37], [6_29, 6_21, 7_74, 6_36], [7_89, 6_21, 8_27, 6_33], [3_37, 6_39, 4_18, 6_52], [4_32, 6_40, 5_71, 6_53], [5_87, 6_39, 7_31, 6_55], [7_43, 6_39, 7_69, 6_52], [7_80, 6_39, 8_41, 6_52], [3_38, 6_58, 4_40, 6_73], [4_55, 6_58, 4_91, 6_70], [5_08, 6_58, 6_02, 6_71], [6_16, 6_58, 6_38, 6_70], [6_54, 6_58, 8_35, 6_74], [3_37, 6_77, 4_29, 6_89], [3_37, 7_14, 4_82, 7_26], [4_95, 7_14, 5_48, 7_26], [5_61, 7_14, 6_83, 7_26], [3_38, 7_70, 4_61, 7_82], [4_74, 7_69, 5_54, 7_85], [4_89, 7_88, 5_62, 8_03], [5_76, 7_88, 6_43, 8_01], [6_56, 7_87, 7_51, 8_04], [7_64, 7_88, 8_44, 8_01], [3_34, 8_25, 4_21, 8_38], [4_30, 8_24, 5_74, 8_38], [5_84, 8_24, 7_23, 8_41], [3_35, 8_44, 4_50, 8_57], [4_64, 8_43, 5_83, 8_60], [6_28, 8_62, 7_55, 8_75], [7_69, 8_61, 8_48, 8_78]]] # noqa: E231 # fmt: on self.assertListEqual(encoding.words , UpperCamelCase__ ) self.assertListEqual(encoding.boxes , UpperCamelCase__ ) # with apply_OCR = False lowerCamelCase = LayoutLMvaImageProcessor(apply_ocr=UpperCamelCase__ ) lowerCamelCase = image_processing(UpperCamelCase__ , return_tensors="pt" ) self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_24, 2_24) )
623
import gc import importlib.metadata import tempfile import unittest from packaging import version from transformers import ( AutoModel, AutoModelForCausalLM, AutoModelForSeqaSeqLM, AutoModelForSequenceClassification, AutoTokenizer, BitsAndBytesConfig, pipeline, ) from transformers.testing_utils import ( is_torch_available, require_accelerate, require_bitsandbytes, require_torch, require_torch_gpu, require_torch_multi_gpu, slow, ) def _UpperCAmelCase ( a : Optional[int] ): if model.config.model_type == "gpt2": return model.transformer.h[0].mlp.c_fc return model.transformer.h[0].mlp.dense_ah_to_h if is_torch_available(): import torch import torch.nn as nn class _lowerCAmelCase ( nn.Module ): """simple docstring""" def __init__( self : int , UpperCamelCase__ : nn.Module , UpperCamelCase__ : int): '''simple docstring''' super().__init__() snake_case__ = module snake_case__ = nn.Sequential( nn.Linear(module.in_features , UpperCamelCase__ , bias=UpperCamelCase__) , nn.Linear(UpperCamelCase__ , module.out_features , bias=UpperCamelCase__) , ) snake_case__ = (2.0 / (5 * min(module.in_features , module.out_features))) ** 0.5 nn.init.normal_(self.adapter[0].weight , std=UpperCamelCase__) nn.init.zeros_(self.adapter[1].weight) self.adapter.to(module.weight.device) def __magic_name__ ( self : Tuple , UpperCamelCase__ : int , *UpperCamelCase__ : Dict , **UpperCamelCase__ : str): '''simple docstring''' return self.module(UpperCamelCase__ , *UpperCamelCase__ , **UpperCamelCase__) + self.adapter(UpperCamelCase__) @require_bitsandbytes @require_accelerate @require_torch @require_torch_gpu @slow class _lowerCAmelCase ( unittest.TestCase ): """simple docstring""" _lowercase : Dict = '''bigscience/bloom-1b7''' # Constant values _lowercase : Any = 2.109_6595_5269_2574 _lowercase : Tuple = '''Hello my name is''' _lowercase : List[Any] = set() EXPECTED_OUTPUTS.add('''Hello my name is John and I am a professional photographer. I''' ) EXPECTED_OUTPUTS.add('''Hello my name is John.\nI am a friend of your father.\n''' ) EXPECTED_OUTPUTS.add('''Hello my name is John Doe, I am a student at the University''' ) _lowercase : List[str] = 10 def __magic_name__ ( self : Optional[int]): '''simple docstring''' snake_case__ = AutoTokenizer.from_pretrained(self.model_name) class _lowerCAmelCase ( lowercase_ ): """simple docstring""" def __magic_name__ ( self : str): '''simple docstring''' super().setUp() # Models and tokenizer snake_case__ = AutoModelForCausalLM.from_pretrained( self.model_name , torch_dtype=torch.floataa , device_map="""auto""") snake_case__ = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=UpperCamelCase__ , device_map="""auto""") def __magic_name__ ( self : Tuple): '''simple docstring''' del self.model_fpaa del self.model_abit gc.collect() torch.cuda.empty_cache() def __magic_name__ ( self : str): '''simple docstring''' snake_case__ = self.model_abit.config self.assertTrue(hasattr(UpperCamelCase__ , """quantization_config""")) snake_case__ = config.to_dict() snake_case__ = config.to_diff_dict() snake_case__ = config.to_json_string() def __magic_name__ ( self : Dict): '''simple docstring''' from bitsandbytes.nn import Paramsabit snake_case__ = self.model_fpaa.get_memory_footprint() snake_case__ = self.model_abit.get_memory_footprint() self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE) snake_case__ = get_some_linear_layer(self.model_abit) self.assertTrue(linear.weight.__class__ == Paramsabit) def __magic_name__ ( self : Optional[int]): '''simple docstring''' from transformers import TaPreTrainedModel self.model_fpaa.get_memory_footprint() self.model_abit.get_memory_footprint() for name, module in self.model_abit.named_modules(): if isinstance(UpperCamelCase__ , torch.nn.Linear): if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules: # 4-bit parameters are packed in uint8 variables self.assertTrue(module.weight.dtype == torch.uinta) def __magic_name__ ( self : Dict): '''simple docstring''' snake_case__ = self.tokenizer(self.input_text , return_tensors="""pt""") snake_case__ = self.model_abit.generate(input_ids=encoded_input["""input_ids"""].to(0) , max_new_tokens=1_0) self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=UpperCamelCase__) , self.EXPECTED_OUTPUTS) def __magic_name__ ( self : str): '''simple docstring''' snake_case__ = BitsAndBytesConfig() snake_case__ = True snake_case__ = AutoModelForCausalLM.from_pretrained( self.model_name , quantization_config=UpperCamelCase__ , device_map="""auto""") snake_case__ = self.tokenizer(self.input_text , return_tensors="""pt""") snake_case__ = model_abit_from_config.generate( input_ids=encoded_input["""input_ids"""].to(0) , max_new_tokens=1_0) self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=UpperCamelCase__) , self.EXPECTED_OUTPUTS) def __magic_name__ ( self : Optional[int]): '''simple docstring''' with self.assertRaises(UpperCamelCase__), tempfile.TemporaryDirectory() as tmpdirname: self.model_abit.save_pretrained(UpperCamelCase__) def __magic_name__ ( self : List[str]): '''simple docstring''' snake_case__ = BitsAndBytesConfig() with self.assertRaises(UpperCamelCase__): snake_case__ = AutoModelForCausalLM.from_pretrained( self.model_name , quantization_config=UpperCamelCase__ , load_in_abit=UpperCamelCase__ , device_map="""auto""" , bnb_abit_quant_type="""nf4""" , ) def __magic_name__ ( self : List[Any]): '''simple docstring''' with self.assertRaises(UpperCamelCase__): # Tries with `str` self.model_abit.to("""cpu""") with self.assertRaises(UpperCamelCase__): # Tries with a `dtype`` self.model_abit.to(torch.floataa) with self.assertRaises(UpperCamelCase__): # Tries with a `device` self.model_abit.to(torch.device("""cuda:0""")) with self.assertRaises(UpperCamelCase__): # Tries with a `device` self.model_abit.float() with self.assertRaises(UpperCamelCase__): # Tries with a `device` self.model_abit.half() # Test if we did not break anything snake_case__ = self.tokenizer(self.input_text , return_tensors="""pt""") snake_case__ = self.model_fpaa.to(torch.floataa) snake_case__ = self.model_fpaa.generate(input_ids=encoded_input["""input_ids"""].to(0) , max_new_tokens=1_0) # Check this does not throw an error snake_case__ = self.model_fpaa.to("""cpu""") # Check this does not throw an error snake_case__ = self.model_fpaa.half() # Check this does not throw an error snake_case__ = self.model_fpaa.float() def __magic_name__ ( self : Dict): '''simple docstring''' snake_case__ = AutoModelForSeqaSeqLM.from_pretrained("""t5-small""" , load_in_abit=UpperCamelCase__ , device_map="""auto""") self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa) @require_bitsandbytes @require_accelerate @require_torch @require_torch_gpu @slow class _lowerCAmelCase ( unittest.TestCase ): """simple docstring""" @classmethod def __magic_name__ ( cls : Optional[Any]): '''simple docstring''' snake_case__ = """t5-small""" snake_case__ = """google/flan-t5-small""" # flan-t5 uses dense-act instead of dense-relu-dense snake_case__ = AutoTokenizer.from_pretrained(cls.model_name) snake_case__ = """Translate in German: Hello, my dog is cute""" def __magic_name__ ( self : Optional[int]): '''simple docstring''' gc.collect() torch.cuda.empty_cache() def __magic_name__ ( self : Any): '''simple docstring''' from transformers import TaForConditionalGeneration snake_case__ = TaForConditionalGeneration._keep_in_fpaa_modules snake_case__ = None # test with `t5-small` snake_case__ = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=UpperCamelCase__ , device_map="""auto""") snake_case__ = self.tokenizer(self.input_text , return_tensors="""pt""").to(0) snake_case__ = model.generate(**UpperCamelCase__) # test with `flan-t5-small` snake_case__ = TaForConditionalGeneration.from_pretrained( self.dense_act_model_name , load_in_abit=UpperCamelCase__ , device_map="""auto""") snake_case__ = self.tokenizer(self.input_text , return_tensors="""pt""").to(0) snake_case__ = model.generate(**UpperCamelCase__) snake_case__ = modules def __magic_name__ ( self : Union[str, Any]): '''simple docstring''' import bitsandbytes as bnb from transformers import TaForConditionalGeneration # test with `t5-small` snake_case__ = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=UpperCamelCase__ , device_map="""auto""") # there was a bug with decoders - this test checks that it is fixed self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit)) snake_case__ = self.tokenizer(self.input_text , return_tensors="""pt""").to(0) snake_case__ = model.generate(**UpperCamelCase__) # test with `flan-t5-small` snake_case__ = TaForConditionalGeneration.from_pretrained( self.dense_act_model_name , load_in_abit=UpperCamelCase__ , device_map="""auto""") snake_case__ = self.tokenizer(self.input_text , return_tensors="""pt""").to(0) snake_case__ = model.generate(**UpperCamelCase__) class _lowerCAmelCase ( lowercase_ ): """simple docstring""" def __magic_name__ ( self : int): '''simple docstring''' super().setUp() # model_name snake_case__ = """bigscience/bloom-560m""" snake_case__ = """t5-small""" # Different types of model snake_case__ = AutoModel.from_pretrained(self.model_name , load_in_abit=UpperCamelCase__ , device_map="""auto""") # Sequence classification model snake_case__ = AutoModelForSequenceClassification.from_pretrained( self.model_name , load_in_abit=UpperCamelCase__ , device_map="""auto""") # CausalLM model snake_case__ = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=UpperCamelCase__ , device_map="""auto""") # Seq2seq model snake_case__ = AutoModelForSeqaSeqLM.from_pretrained( self.seq_to_seq_name , load_in_abit=UpperCamelCase__ , device_map="""auto""") def __magic_name__ ( self : List[str]): '''simple docstring''' del self.base_model del self.sequence_model del self.model_abit del self.seq_to_seq_model gc.collect() torch.cuda.empty_cache() def __magic_name__ ( self : Union[str, Any]): '''simple docstring''' from bitsandbytes.nn import Paramsabit self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit) # Other heads should be nn.Parameter self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter) self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter) self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter) class _lowerCAmelCase ( lowercase_ ): """simple docstring""" def __magic_name__ ( self : Tuple): '''simple docstring''' super().setUp() def __magic_name__ ( self : int): '''simple docstring''' del self.pipe gc.collect() torch.cuda.empty_cache() def __magic_name__ ( self : Tuple): '''simple docstring''' snake_case__ = pipeline( """text-generation""" , model=self.model_name , model_kwargs={"""device_map""": """auto""", """load_in_4bit""": True, """torch_dtype""": torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , ) # Real second forward pass snake_case__ = self.pipe(self.input_text) self.assertIn(pipeline_output[0]["""generated_text"""] , self.EXPECTED_OUTPUTS) @require_torch_multi_gpu class _lowerCAmelCase ( lowercase_ ): """simple docstring""" def __magic_name__ ( self : Union[str, Any]): '''simple docstring''' super().setUp() def __magic_name__ ( self : int): '''simple docstring''' snake_case__ = AutoModelForCausalLM.from_pretrained( self.model_name , load_in_abit=UpperCamelCase__ , device_map="""balanced""") # Check correct device map self.assertEqual(set(model_parallel.hf_device_map.values()) , {0, 1}) # Check that inference pass works on the model snake_case__ = self.tokenizer(self.input_text , return_tensors="""pt""") # Second real batch snake_case__ = model_parallel.generate(input_ids=encoded_input["""input_ids"""].to(0) , max_new_tokens=1_0) self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=UpperCamelCase__) , self.EXPECTED_OUTPUTS) class _lowerCAmelCase ( lowercase_ ): """simple docstring""" def __magic_name__ ( self : Any): '''simple docstring''' snake_case__ = """facebook/opt-350m""" super().setUp() def __magic_name__ ( self : Any): '''simple docstring''' if version.parse(importlib.metadata.version("""bitsandbytes""")) < version.parse("""0.37.0"""): return # Step 1: freeze all parameters snake_case__ = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=UpperCamelCase__) self.assertEqual(set(model.hf_device_map.values()) , {torch.cuda.current_device()}) for param in model.parameters(): snake_case__ = False # freeze the model - train adapters later if param.ndim == 1: # cast the small parameters (e.g. layernorm) to fp32 for stability snake_case__ = param.data.to(torch.floataa) # Step 2: add adapters for _, module in model.named_modules(): if "OPTAttention" in repr(type(UpperCamelCase__)): snake_case__ = LoRALayer(module.q_proj , rank=1_6) snake_case__ = LoRALayer(module.k_proj , rank=1_6) snake_case__ = LoRALayer(module.v_proj , rank=1_6) # Step 3: dummy batch snake_case__ = self.tokenizer("""Test batch """ , return_tensors="""pt""").to(0) # Step 4: Check if the gradient is not None with torch.cuda.amp.autocast(): snake_case__ = model.forward(**UpperCamelCase__) out.logits.norm().backward() for module in model.modules(): if isinstance(UpperCamelCase__ , UpperCamelCase__): self.assertTrue(module.adapter[1].weight.grad is not None) self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0) elif isinstance(UpperCamelCase__ , nn.Embedding): self.assertTrue(module.weight.grad is None) class _lowerCAmelCase ( lowercase_ ): """simple docstring""" _lowercase : List[Any] = '''gpt2-xl''' _lowercase : Any = 3.3191_8548_5415_2187
654
0
'''simple docstring''' from dataclasses import dataclass from enum import Enum from typing import List, Optional, Union import numpy as np import PIL from PIL import Image from ...utils import BaseOutput, is_torch_available, is_transformers_available @dataclass class __SCREAMING_SNAKE_CASE ( lowercase_ ): lowerCamelCase_ = 42 lowerCamelCase_ = 42 if is_transformers_available() and is_torch_available(): from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline
92
import glob import os import random from string import ascii_lowercase, digits import cva a__ = """""" a__ = """""" a__ = """""" a__ = 1 # (0 is vertical, 1 is horizontal) def _UpperCAmelCase ( ): snake_case__ , snake_case__ = get_dataset(a , a ) print("""Processing...""" ) snake_case__ , snake_case__ , snake_case__ = update_image_and_anno(a , a , a ) for index, image in enumerate(a ): # Get random string code: '7b7ad245cdff75241935e4dd860f3bad' snake_case__ = random_chars(32 ) snake_case__ = paths[index].split(os.sep )[-1].rsplit(""".""" , 1 )[0] snake_case__ = F'''{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}''' cva.imwrite(F'''/{file_root}.jpg''' , a , [cva.IMWRITE_JPEG_QUALITY, 85] ) print(F'''Success {index+1}/{len(a )} with {file_name}''' ) snake_case__ = [] for anno in new_annos[index]: snake_case__ = F'''{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}''' annos_list.append(a ) with open(F'''/{file_root}.txt''' , """w""" ) as outfile: outfile.write("""\n""".join(line for line in annos_list ) ) def _UpperCAmelCase ( a : str , a : str ): snake_case__ = [] snake_case__ = [] for label_file in glob.glob(os.path.join(a , """*.txt""" ) ): snake_case__ = label_file.split(os.sep )[-1].rsplit(""".""" , 1 )[0] with open(a ) as in_file: snake_case__ = in_file.readlines() snake_case__ = os.path.join(a , F'''{label_name}.jpg''' ) snake_case__ = [] for obj_list in obj_lists: snake_case__ = obj_list.rstrip("""\n""" ).split(""" """ ) boxes.append( [ int(obj[0] ), float(obj[1] ), float(obj[2] ), float(obj[3] ), float(obj[4] ), ] ) if not boxes: continue img_paths.append(a ) labels.append(a ) return img_paths, labels def _UpperCAmelCase ( a : list , a : list , a : int = 1 ): snake_case__ = [] snake_case__ = [] snake_case__ = [] for idx in range(len(a ) ): snake_case__ = [] snake_case__ = img_list[idx] path_list.append(a ) snake_case__ = anno_list[idx] snake_case__ = cva.imread(a ) if flip_type == 1: snake_case__ = cva.flip(a , a ) for bbox in img_annos: snake_case__ = 1 - bbox[1] new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] ) elif flip_type == 0: snake_case__ = cva.flip(a , a ) for bbox in img_annos: snake_case__ = 1 - bbox[2] new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] ) new_annos_lists.append(a ) new_imgs_list.append(a ) return new_imgs_list, new_annos_lists, path_list def _UpperCAmelCase ( a : int = 32 ): assert number_char > 1, "The number of character should greater than 1" snake_case__ = ascii_lowercase + digits return "".join(random.choice(a ) for _ in range(a ) ) if __name__ == "__main__": main() print("""DONE ✅""")
654
0
'''simple docstring''' from __future__ import annotations from statistics import mean def _a (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): """simple docstring""" _UpperCamelCase =[0] * no_of_processes _UpperCamelCase =[0] * no_of_processes # Initialize remaining_time to waiting_time. for i in range(__SCREAMING_SNAKE_CASE ): _UpperCamelCase =burst_time[i] _UpperCamelCase =[] _UpperCamelCase =0 _UpperCamelCase =0 # When processes are not completed, # A process whose arrival time has passed \ # and has remaining execution time is put into the ready_process. # The shortest process in the ready_process, target_process is executed. while completed != no_of_processes: _UpperCamelCase =[] _UpperCamelCase =-1 for i in range(__SCREAMING_SNAKE_CASE ): if (arrival_time[i] <= total_time) and (remaining_time[i] > 0): ready_process.append(__SCREAMING_SNAKE_CASE ) if len(__SCREAMING_SNAKE_CASE ) > 0: _UpperCamelCase =ready_process[0] for i in ready_process: if remaining_time[i] < remaining_time[target_process]: _UpperCamelCase =i total_time += burst_time[target_process] completed += 1 _UpperCamelCase =0 _UpperCamelCase =( total_time - arrival_time[target_process] - burst_time[target_process] ) else: total_time += 1 return waiting_time def _a (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): """simple docstring""" _UpperCamelCase =[0] * no_of_processes for i in range(__SCREAMING_SNAKE_CASE ): _UpperCamelCase =burst_time[i] + waiting_time[i] return turn_around_time if __name__ == "__main__": print('[TEST CASE 01]') __lowerCamelCase : int = 4 __lowerCamelCase : List[str] = [2, 5, 3, 7] __lowerCamelCase : Union[str, Any] = [0, 0, 0, 0] __lowerCamelCase : Optional[int] = calculate_waitingtime(arrival_time, burst_time, no_of_processes) __lowerCamelCase : int = calculate_turnaroundtime( burst_time, no_of_processes, waiting_time ) # Printing the Result print('PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time') for i, process_id in enumerate(list(range(1, 5))): print( F"""{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t""" F"""{waiting_time[i]}\t\t\t\t{turn_around_time[i]}""" ) print(F"""\nAverage waiting time = {mean(waiting_time):.5f}""") print(F"""Average turnaround time = {mean(turn_around_time):.5f}""")
404
import json import os import tempfile import transformers import datasets from utils import generate_example_dataset, get_duration a__ = 5_0_0_0_0_0 a__ , a__ = os.path.split(__file__) a__ = os.path.join(RESULTS_BASEPATH, """results""", RESULTS_FILENAME.replace(""".py""", """.json""")) @get_duration def _UpperCAmelCase ( a : datasets.Dataset , **a : Tuple ): snake_case__ = dataset.map(**a ) @get_duration def _UpperCAmelCase ( a : datasets.Dataset , **a : Optional[Any] ): snake_case__ = dataset.filter(**a ) def _UpperCAmelCase ( ): snake_case__ = {"""num examples""": SPEED_TEST_N_EXAMPLES} with tempfile.TemporaryDirectory() as tmp_dir: snake_case__ = datasets.Features({"""text""": datasets.Value("""string""" ), """numbers""": datasets.Value("""float32""" )} ) snake_case__ = generate_example_dataset( os.path.join(a , """dataset.arrow""" ) , a , num_examples=a ) snake_case__ = transformers.AutoTokenizer.from_pretrained("""bert-base-cased""" , use_fast=a ) def tokenize(a : Union[str, Any] ): return tokenizer(examples["""text"""] ) snake_case__ = map(a ) snake_case__ = map(a , batched=a ) snake_case__ = map(a , function=lambda a : None , batched=a ) with dataset.formatted_as(type="""numpy""" ): snake_case__ = map(a , function=lambda a : None , batched=a ) with dataset.formatted_as(type="""pandas""" ): snake_case__ = map(a , function=lambda a : None , batched=a ) with dataset.formatted_as(type="""torch""" , columns="""numbers""" ): snake_case__ = map(a , function=lambda a : None , batched=a ) with dataset.formatted_as(type="""tensorflow""" , columns="""numbers""" ): snake_case__ = map(a , function=lambda a : None , batched=a ) snake_case__ = map(a , function=a , batched=a ) snake_case__ = filter(a ) # Activate later when tokenizer support batched inputs # with dataset.formatted_as(type='numpy'): # times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True) with open(a , """wb""" ) as f: f.write(json.dumps(a ).encode("""utf-8""" ) ) if __name__ == "__main__": # useful to run the profiler benchmark_map_filter()
654
0
from typing import List, Optional, Union from ...image_utils import ImageInput from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class lowerCamelCase__ ( lowercase_): '''simple docstring''' snake_case_ =['''image_processor''', '''tokenizer'''] snake_case_ ='''BlipImageProcessor''' snake_case_ ='''AutoTokenizer''' def __init__(self ,__lowerCamelCase ,__lowerCamelCase ) -> List[Any]: """simple docstring""" lowerCAmelCase__ : Tuple = False super().__init__(UpperCamelCase__ ,UpperCamelCase__ ) lowerCAmelCase__ : Union[str, Any] = self.image_processor def __call__(self ,__lowerCamelCase = None ,__lowerCamelCase = None ,__lowerCamelCase = True ,__lowerCamelCase = False ,__lowerCamelCase = None ,__lowerCamelCase = None ,__lowerCamelCase = 0 ,__lowerCamelCase = None ,__lowerCamelCase = None ,__lowerCamelCase = False ,__lowerCamelCase = False ,__lowerCamelCase = False ,__lowerCamelCase = False ,__lowerCamelCase = False ,__lowerCamelCase = True ,__lowerCamelCase = None ,**__lowerCamelCase ,) -> Tuple: """simple docstring""" if images is None and text is None: raise ValueError('''You have to specify either images or text.''' ) # Get only text if images is None: lowerCAmelCase__ : Any = self.tokenizer lowerCAmelCase__ : Any = self.tokenizer( text=UpperCamelCase__ ,add_special_tokens=UpperCamelCase__ ,padding=UpperCamelCase__ ,truncation=UpperCamelCase__ ,max_length=UpperCamelCase__ ,stride=UpperCamelCase__ ,pad_to_multiple_of=UpperCamelCase__ ,return_attention_mask=UpperCamelCase__ ,return_overflowing_tokens=UpperCamelCase__ ,return_special_tokens_mask=UpperCamelCase__ ,return_offsets_mapping=UpperCamelCase__ ,return_token_type_ids=UpperCamelCase__ ,return_length=UpperCamelCase__ ,verbose=UpperCamelCase__ ,return_tensors=UpperCamelCase__ ,**UpperCamelCase__ ,) return text_encoding # add pixel_values lowerCAmelCase__ : Dict = self.image_processor(UpperCamelCase__ ,return_tensors=UpperCamelCase__ ) if text is not None: lowerCAmelCase__ : List[Any] = self.tokenizer( text=UpperCamelCase__ ,add_special_tokens=UpperCamelCase__ ,padding=UpperCamelCase__ ,truncation=UpperCamelCase__ ,max_length=UpperCamelCase__ ,stride=UpperCamelCase__ ,pad_to_multiple_of=UpperCamelCase__ ,return_attention_mask=UpperCamelCase__ ,return_overflowing_tokens=UpperCamelCase__ ,return_special_tokens_mask=UpperCamelCase__ ,return_offsets_mapping=UpperCamelCase__ ,return_token_type_ids=UpperCamelCase__ ,return_length=UpperCamelCase__ ,verbose=UpperCamelCase__ ,return_tensors=UpperCamelCase__ ,**UpperCamelCase__ ,) else: lowerCAmelCase__ : List[Any] = None if text_encoding is not None: encoding_image_processor.update(UpperCamelCase__ ) return encoding_image_processor def lowerCAmelCase__ (self ,*__lowerCamelCase ,**__lowerCamelCase ) -> Optional[Any]: """simple docstring""" return self.tokenizer.batch_decode(*UpperCamelCase__ ,**UpperCamelCase__ ) def lowerCAmelCase__ (self ,*__lowerCamelCase ,**__lowerCamelCase ) -> int: """simple docstring""" return self.tokenizer.decode(*UpperCamelCase__ ,**UpperCamelCase__ ) @property # Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names def lowerCAmelCase__ (self ) -> int: """simple docstring""" lowerCAmelCase__ : str = self.tokenizer.model_input_names lowerCAmelCase__ : int = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
647
import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor from transformers.utils import logging logging.set_verbosity_info() a__ = logging.get_logger(__name__) def _UpperCAmelCase ( a : List[str] , a : Any=False ): snake_case__ = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((F'''blocks.{i}.norm1.weight''', F'''deit.encoder.layer.{i}.layernorm_before.weight''') ) rename_keys.append((F'''blocks.{i}.norm1.bias''', F'''deit.encoder.layer.{i}.layernorm_before.bias''') ) rename_keys.append((F'''blocks.{i}.attn.proj.weight''', F'''deit.encoder.layer.{i}.attention.output.dense.weight''') ) rename_keys.append((F'''blocks.{i}.attn.proj.bias''', F'''deit.encoder.layer.{i}.attention.output.dense.bias''') ) rename_keys.append((F'''blocks.{i}.norm2.weight''', F'''deit.encoder.layer.{i}.layernorm_after.weight''') ) rename_keys.append((F'''blocks.{i}.norm2.bias''', F'''deit.encoder.layer.{i}.layernorm_after.bias''') ) rename_keys.append((F'''blocks.{i}.mlp.fc1.weight''', F'''deit.encoder.layer.{i}.intermediate.dense.weight''') ) rename_keys.append((F'''blocks.{i}.mlp.fc1.bias''', F'''deit.encoder.layer.{i}.intermediate.dense.bias''') ) rename_keys.append((F'''blocks.{i}.mlp.fc2.weight''', F'''deit.encoder.layer.{i}.output.dense.weight''') ) rename_keys.append((F'''blocks.{i}.mlp.fc2.bias''', F'''deit.encoder.layer.{i}.output.dense.bias''') ) # projection layer + position embeddings rename_keys.extend( [ ("""cls_token""", """deit.embeddings.cls_token"""), ("""dist_token""", """deit.embeddings.distillation_token"""), ("""patch_embed.proj.weight""", """deit.embeddings.patch_embeddings.projection.weight"""), ("""patch_embed.proj.bias""", """deit.embeddings.patch_embeddings.projection.bias"""), ("""pos_embed""", """deit.embeddings.position_embeddings"""), ] ) if base_model: # layernorm + pooler rename_keys.extend( [ ("""norm.weight""", """layernorm.weight"""), ("""norm.bias""", """layernorm.bias"""), ("""pre_logits.fc.weight""", """pooler.dense.weight"""), ("""pre_logits.fc.bias""", """pooler.dense.bias"""), ] ) # if just the base model, we should remove "deit" from all keys that start with "deit" snake_case__ = [(pair[0], pair[1][4:]) if pair[1].startswith("""deit""" ) else pair for pair in rename_keys] else: # layernorm + classification heads rename_keys.extend( [ ("""norm.weight""", """deit.layernorm.weight"""), ("""norm.bias""", """deit.layernorm.bias"""), ("""head.weight""", """cls_classifier.weight"""), ("""head.bias""", """cls_classifier.bias"""), ("""head_dist.weight""", """distillation_classifier.weight"""), ("""head_dist.bias""", """distillation_classifier.bias"""), ] ) return rename_keys def _UpperCAmelCase ( a : int , a : List[Any] , a : Union[str, Any]=False ): for i in range(config.num_hidden_layers ): if base_model: snake_case__ = """""" else: snake_case__ = """deit.""" # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) snake_case__ = state_dict.pop(F'''blocks.{i}.attn.qkv.weight''' ) snake_case__ = state_dict.pop(F'''blocks.{i}.attn.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict snake_case__ = in_proj_weight[ : config.hidden_size, : ] snake_case__ = in_proj_bias[: config.hidden_size] snake_case__ = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] snake_case__ = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] snake_case__ = in_proj_weight[ -config.hidden_size :, : ] snake_case__ = in_proj_bias[-config.hidden_size :] def _UpperCAmelCase ( a : Dict , a : Union[str, Any] , a : int ): snake_case__ = dct.pop(a ) snake_case__ = val def _UpperCAmelCase ( ): snake_case__ = """http://images.cocodataset.org/val2017/000000039769.jpg""" snake_case__ = Image.open(requests.get(a , stream=a ).raw ) return im @torch.no_grad() def _UpperCAmelCase ( a : List[str] , a : Tuple ): snake_case__ = DeiTConfig() # all deit models have fine-tuned heads snake_case__ = False # dataset (fine-tuned on ImageNet 2012), patch_size and image_size snake_case__ = 1000 snake_case__ = """huggingface/label-files""" snake_case__ = """imagenet-1k-id2label.json""" snake_case__ = json.load(open(hf_hub_download(a , a , repo_type="""dataset""" ) , """r""" ) ) snake_case__ = {int(a ): v for k, v in idalabel.items()} snake_case__ = idalabel snake_case__ = {v: k for k, v in idalabel.items()} snake_case__ = int(deit_name[-6:-4] ) snake_case__ = int(deit_name[-3:] ) # size of the architecture if deit_name[9:].startswith("""tiny""" ): snake_case__ = 192 snake_case__ = 768 snake_case__ = 12 snake_case__ = 3 elif deit_name[9:].startswith("""small""" ): snake_case__ = 384 snake_case__ = 1536 snake_case__ = 12 snake_case__ = 6 if deit_name[9:].startswith("""base""" ): pass elif deit_name[4:].startswith("""large""" ): snake_case__ = 1024 snake_case__ = 4096 snake_case__ = 24 snake_case__ = 16 # load original model from timm snake_case__ = timm.create_model(a , pretrained=a ) timm_model.eval() # load state_dict of original model, remove and rename some keys snake_case__ = timm_model.state_dict() snake_case__ = create_rename_keys(a , a ) for src, dest in rename_keys: rename_key(a , a , a ) read_in_q_k_v(a , a , a ) # load HuggingFace model snake_case__ = DeiTForImageClassificationWithTeacher(a ).eval() model.load_state_dict(a ) # Check outputs on an image, prepared by DeiTImageProcessor snake_case__ = int( (256 / 224) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103 snake_case__ = DeiTImageProcessor(size=a , crop_size=config.image_size ) snake_case__ = image_processor(images=prepare_img() , return_tensors="""pt""" ) snake_case__ = encoding["""pixel_values"""] snake_case__ = model(a ) snake_case__ = timm_model(a ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(a , outputs.logits , atol=1e-3 ) Path(a ).mkdir(exist_ok=a ) print(F'''Saving model {deit_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(a ) print(F'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(a ) if __name__ == "__main__": a__ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--deit_name""", default="""vit_deit_base_distilled_patch16_224""", type=str, help="""Name of the DeiT timm model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) a__ = parser.parse_args() convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
654
0
from ...configuration_utils import PretrainedConfig from ...utils import logging SCREAMING_SNAKE_CASE__ : Union[str, Any] = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ : Optional[Any] = { "MIT/ast-finetuned-audioset-10-10-0.4593": ( "https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json" ), } class a_ ( lowercase_ ): A = '''audio-spectrogram-transformer''' def __init__( self , SCREAMING_SNAKE_CASE=768 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=3072 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=0.0_2 , SCREAMING_SNAKE_CASE=1e-12 , SCREAMING_SNAKE_CASE=16 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=10 , SCREAMING_SNAKE_CASE=10 , SCREAMING_SNAKE_CASE=1024 , SCREAMING_SNAKE_CASE=128 , **SCREAMING_SNAKE_CASE , ) -> List[Any]: """simple docstring""" super().__init__(**UpperCamelCase__ ) SCREAMING_SNAKE_CASE_ = hidden_size SCREAMING_SNAKE_CASE_ = num_hidden_layers SCREAMING_SNAKE_CASE_ = num_attention_heads SCREAMING_SNAKE_CASE_ = intermediate_size SCREAMING_SNAKE_CASE_ = hidden_act SCREAMING_SNAKE_CASE_ = hidden_dropout_prob SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob SCREAMING_SNAKE_CASE_ = initializer_range SCREAMING_SNAKE_CASE_ = layer_norm_eps SCREAMING_SNAKE_CASE_ = patch_size SCREAMING_SNAKE_CASE_ = qkv_bias SCREAMING_SNAKE_CASE_ = frequency_stride SCREAMING_SNAKE_CASE_ = time_stride SCREAMING_SNAKE_CASE_ = max_length SCREAMING_SNAKE_CASE_ = num_mel_bins
205
from dataclasses import dataclass from typing import Dict, Optional, Union import torch import torch.nn.functional as F from torch import nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from .attention import BasicTransformerBlock from .attention_processor import AttentionProcessor, AttnProcessor from .embeddings import TimestepEmbedding, Timesteps from .modeling_utils import ModelMixin @dataclass class _lowerCAmelCase ( lowercase_ ): """simple docstring""" _lowercase : torch.FloatTensor class _lowerCAmelCase ( lowercase_ , lowercase_ ): """simple docstring""" @register_to_config def __init__( self : Tuple , UpperCamelCase__ : int = 3_2 , UpperCamelCase__ : int = 6_4 , UpperCamelCase__ : int = 2_0 , UpperCamelCase__ : int = 7_6_8 , UpperCamelCase__ : Optional[Any]=7_7 , UpperCamelCase__ : str=4 , UpperCamelCase__ : float = 0.0 , UpperCamelCase__ : str = "silu" , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : Optional[str] = "linear" , UpperCamelCase__ : Optional[str] = "prd" , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[int] = None , ): '''simple docstring''' super().__init__() snake_case__ = num_attention_heads snake_case__ = attention_head_dim snake_case__ = num_attention_heads * attention_head_dim snake_case__ = additional_embeddings snake_case__ = time_embed_dim or inner_dim snake_case__ = embedding_proj_dim or embedding_dim snake_case__ = clip_embed_dim or embedding_dim snake_case__ = Timesteps(UpperCamelCase__ , UpperCamelCase__ , 0) snake_case__ = TimestepEmbedding(UpperCamelCase__ , UpperCamelCase__ , out_dim=UpperCamelCase__ , act_fn=UpperCamelCase__) snake_case__ = nn.Linear(UpperCamelCase__ , UpperCamelCase__) if embedding_proj_norm_type is None: snake_case__ = None elif embedding_proj_norm_type == "layer": snake_case__ = nn.LayerNorm(UpperCamelCase__) else: raise ValueError(F'''unsupported embedding_proj_norm_type: {embedding_proj_norm_type}''') snake_case__ = nn.Linear(UpperCamelCase__ , UpperCamelCase__) if encoder_hid_proj_type is None: snake_case__ = None elif encoder_hid_proj_type == "linear": snake_case__ = nn.Linear(UpperCamelCase__ , UpperCamelCase__) else: raise ValueError(F'''unsupported encoder_hid_proj_type: {encoder_hid_proj_type}''') snake_case__ = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , UpperCamelCase__)) if added_emb_type == "prd": snake_case__ = nn.Parameter(torch.zeros(1 , 1 , UpperCamelCase__)) elif added_emb_type is None: snake_case__ = None else: raise ValueError( F'''`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `\'prd\'` or `None`.''') snake_case__ = nn.ModuleList( [ BasicTransformerBlock( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , dropout=UpperCamelCase__ , activation_fn="""gelu""" , attention_bias=UpperCamelCase__ , ) for d in range(UpperCamelCase__) ]) if norm_in_type == "layer": snake_case__ = nn.LayerNorm(UpperCamelCase__) elif norm_in_type is None: snake_case__ = None else: raise ValueError(F'''Unsupported norm_in_type: {norm_in_type}.''') snake_case__ = nn.LayerNorm(UpperCamelCase__) snake_case__ = nn.Linear(UpperCamelCase__ , UpperCamelCase__) snake_case__ = torch.full( [num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -1_00_00.0) causal_attention_mask.triu_(1) snake_case__ = causal_attention_mask[None, ...] self.register_buffer("""causal_attention_mask""" , UpperCamelCase__ , persistent=UpperCamelCase__) snake_case__ = nn.Parameter(torch.zeros(1 , UpperCamelCase__)) snake_case__ = nn.Parameter(torch.zeros(1 , UpperCamelCase__)) @property # Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors def __magic_name__ ( self : Optional[int]): '''simple docstring''' snake_case__ = {} def fn_recursive_add_processors(UpperCamelCase__ : str , UpperCamelCase__ : torch.nn.Module , UpperCamelCase__ : Dict[str, AttentionProcessor]): if hasattr(UpperCamelCase__ , """set_processor"""): snake_case__ = module.processor for sub_name, child in module.named_children(): fn_recursive_add_processors(F'''{name}.{sub_name}''' , UpperCamelCase__ , UpperCamelCase__) return processors for name, module in self.named_children(): fn_recursive_add_processors(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__) return processors def __magic_name__ ( self : Union[str, Any] , UpperCamelCase__ : Union[AttentionProcessor, Dict[str, AttentionProcessor]]): '''simple docstring''' snake_case__ = len(self.attn_processors.keys()) if isinstance(UpperCamelCase__ , UpperCamelCase__) and len(UpperCamelCase__) != count: raise ValueError( F'''A dict of processors was passed, but the number of processors {len(UpperCamelCase__)} does not match the''' F''' number of attention layers: {count}. Please make sure to pass {count} processor classes.''') def fn_recursive_attn_processor(UpperCamelCase__ : str , UpperCamelCase__ : torch.nn.Module , UpperCamelCase__ : Optional[int]): if hasattr(UpperCamelCase__ , """set_processor"""): if not isinstance(UpperCamelCase__ , UpperCamelCase__): module.set_processor(UpperCamelCase__) else: module.set_processor(processor.pop(F'''{name}.processor''')) for sub_name, child in module.named_children(): fn_recursive_attn_processor(F'''{name}.{sub_name}''' , UpperCamelCase__ , UpperCamelCase__) for name, module in self.named_children(): fn_recursive_attn_processor(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__) def __magic_name__ ( self : Dict): '''simple docstring''' self.set_attn_processor(AttnProcessor()) def __magic_name__ ( self : Union[str, Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : Union[torch.Tensor, float, int] , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : Optional[torch.FloatTensor] = None , UpperCamelCase__ : Optional[torch.BoolTensor] = None , UpperCamelCase__ : bool = True , ): '''simple docstring''' snake_case__ = hidden_states.shape[0] snake_case__ = timestep if not torch.is_tensor(UpperCamelCase__): snake_case__ = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device) elif torch.is_tensor(UpperCamelCase__) and len(timesteps.shape) == 0: snake_case__ = timesteps[None].to(hidden_states.device) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML snake_case__ = timesteps * torch.ones(UpperCamelCase__ , dtype=timesteps.dtype , device=timesteps.device) snake_case__ = self.time_proj(UpperCamelCase__) # timesteps does not contain any weights and will always return f32 tensors # but time_embedding might be fp16, so we need to cast here. snake_case__ = timesteps_projected.to(dtype=self.dtype) snake_case__ = self.time_embedding(UpperCamelCase__) if self.embedding_proj_norm is not None: snake_case__ = self.embedding_proj_norm(UpperCamelCase__) snake_case__ = self.embedding_proj(UpperCamelCase__) if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None: snake_case__ = self.encoder_hidden_states_proj(UpperCamelCase__) elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None: raise ValueError("""`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set""") snake_case__ = self.proj_in(UpperCamelCase__) snake_case__ = self.positional_embedding.to(hidden_states.dtype) snake_case__ = [] snake_case__ = 0 if encoder_hidden_states is not None: additional_embeds.append(UpperCamelCase__) additional_embeddings_len += encoder_hidden_states.shape[1] if len(proj_embeddings.shape) == 2: snake_case__ = proj_embeddings[:, None, :] if len(hidden_states.shape) == 2: snake_case__ = hidden_states[:, None, :] snake_case__ = additional_embeds + [ proj_embeddings, time_embeddings[:, None, :], hidden_states, ] if self.prd_embedding is not None: snake_case__ = self.prd_embedding.to(hidden_states.dtype).expand(UpperCamelCase__ , -1 , -1) additional_embeds.append(UpperCamelCase__) snake_case__ = torch.cat( UpperCamelCase__ , dim=1 , ) # Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens snake_case__ = additional_embeddings_len + proj_embeddings.shape[1] + 1 if positional_embeddings.shape[1] < hidden_states.shape[1]: snake_case__ = F.pad( UpperCamelCase__ , ( 0, 0, additional_embeddings_len, self.prd_embedding.shape[1] if self.prd_embedding is not None else 0, ) , value=0.0 , ) snake_case__ = hidden_states + positional_embeddings if attention_mask is not None: snake_case__ = (1 - attention_mask.to(hidden_states.dtype)) * -1_00_00.0 snake_case__ = F.pad(UpperCamelCase__ , (0, self.additional_embeddings) , value=0.0) snake_case__ = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype) snake_case__ = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0) if self.norm_in is not None: snake_case__ = self.norm_in(UpperCamelCase__) for block in self.transformer_blocks: snake_case__ = block(UpperCamelCase__ , attention_mask=UpperCamelCase__) snake_case__ = self.norm_out(UpperCamelCase__) if self.prd_embedding is not None: snake_case__ = hidden_states[:, -1] else: snake_case__ = hidden_states[:, additional_embeddings_len:] snake_case__ = self.proj_to_clip_embeddings(UpperCamelCase__) if not return_dict: return (predicted_image_embedding,) return PriorTransformerOutput(predicted_image_embedding=UpperCamelCase__) def __magic_name__ ( self : Any , UpperCamelCase__ : Any): '''simple docstring''' snake_case__ = (prior_latents * self.clip_std) + self.clip_mean return prior_latents
654
0
'''simple docstring''' import tempfile import torch from diffusers import IPNDMScheduler from .test_schedulers import SchedulerCommonTest class lowercase_ (lowercase_ ): """simple docstring""" SCREAMING_SNAKE_CASE : int = (IPNDMScheduler,) SCREAMING_SNAKE_CASE : int = (('''num_inference_steps''', 5_0),) def SCREAMING_SNAKE_CASE ( self : Any ,**lowercase__ : Tuple ): __lowercase = {'''num_train_timesteps''': 1_0_0_0} config.update(**UpperCamelCase__ ) return config def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : Dict=0 ,**lowercase__ : int ): __lowercase = dict(self.forward_default_kwargs ) __lowercase = kwargs.pop('''num_inference_steps''' ,UpperCamelCase__ ) __lowercase = self.dummy_sample __lowercase = 0.1 * sample __lowercase = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5] for scheduler_class in self.scheduler_classes: __lowercase = self.get_scheduler_config(**UpperCamelCase__ ) __lowercase = scheduler_class(**UpperCamelCase__ ) scheduler.set_timesteps(UpperCamelCase__ ) # copy over dummy past residuals __lowercase = dummy_past_residuals[:] if time_step is None: __lowercase = scheduler.timesteps[len(scheduler.timesteps ) // 2] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(UpperCamelCase__ ) __lowercase = scheduler_class.from_pretrained(UpperCamelCase__ ) new_scheduler.set_timesteps(UpperCamelCase__ ) # copy over dummy past residuals __lowercase = dummy_past_residuals[:] __lowercase = scheduler.step(UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ ,**UpperCamelCase__ ).prev_sample __lowercase = new_scheduler.step(UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ ,**UpperCamelCase__ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" __lowercase = scheduler.step(UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ ,**UpperCamelCase__ ).prev_sample __lowercase = new_scheduler.step(UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ ,**UpperCamelCase__ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" def SCREAMING_SNAKE_CASE ( self : List[Any] ): pass def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : Union[str, Any]=0 ,**lowercase__ : Tuple ): __lowercase = dict(self.forward_default_kwargs ) __lowercase = kwargs.pop('''num_inference_steps''' ,UpperCamelCase__ ) __lowercase = self.dummy_sample __lowercase = 0.1 * sample __lowercase = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5] for scheduler_class in self.scheduler_classes: __lowercase = self.get_scheduler_config() __lowercase = scheduler_class(**UpperCamelCase__ ) scheduler.set_timesteps(UpperCamelCase__ ) # copy over dummy past residuals (must be after setting timesteps) __lowercase = dummy_past_residuals[:] if time_step is None: __lowercase = scheduler.timesteps[len(scheduler.timesteps ) // 2] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(UpperCamelCase__ ) __lowercase = scheduler_class.from_pretrained(UpperCamelCase__ ) # copy over dummy past residuals new_scheduler.set_timesteps(UpperCamelCase__ ) # copy over dummy past residual (must be after setting timesteps) __lowercase = dummy_past_residuals[:] __lowercase = scheduler.step(UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ ,**UpperCamelCase__ ).prev_sample __lowercase = new_scheduler.step(UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ ,**UpperCamelCase__ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" __lowercase = scheduler.step(UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ ,**UpperCamelCase__ ).prev_sample __lowercase = new_scheduler.step(UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ ,**UpperCamelCase__ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,**lowercase__ : Dict ): __lowercase = self.scheduler_classes[0] __lowercase = self.get_scheduler_config(**UpperCamelCase__ ) __lowercase = scheduler_class(**UpperCamelCase__ ) __lowercase = 1_0 __lowercase = self.dummy_model() __lowercase = self.dummy_sample_deter scheduler.set_timesteps(UpperCamelCase__ ) for i, t in enumerate(scheduler.timesteps ): __lowercase = model(UpperCamelCase__ ,UpperCamelCase__ ) __lowercase = scheduler.step(UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ ).prev_sample for i, t in enumerate(scheduler.timesteps ): __lowercase = model(UpperCamelCase__ ,UpperCamelCase__ ) __lowercase = scheduler.step(UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ ).prev_sample return sample def SCREAMING_SNAKE_CASE ( self : Optional[int] ): __lowercase = dict(self.forward_default_kwargs ) __lowercase = kwargs.pop('''num_inference_steps''' ,UpperCamelCase__ ) for scheduler_class in self.scheduler_classes: __lowercase = self.get_scheduler_config() __lowercase = scheduler_class(**UpperCamelCase__ ) __lowercase = self.dummy_sample __lowercase = 0.1 * sample if num_inference_steps is not None and hasattr(UpperCamelCase__ ,'''set_timesteps''' ): scheduler.set_timesteps(UpperCamelCase__ ) elif num_inference_steps is not None and not hasattr(UpperCamelCase__ ,'''set_timesteps''' ): __lowercase = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) __lowercase = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5] __lowercase = dummy_past_residuals[:] __lowercase = scheduler.timesteps[5] __lowercase = scheduler.timesteps[6] __lowercase = scheduler.step(UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ ,**UpperCamelCase__ ).prev_sample __lowercase = scheduler.step(UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ ,**UpperCamelCase__ ).prev_sample self.assertEqual(output_a.shape ,sample.shape ) self.assertEqual(output_a.shape ,output_a.shape ) __lowercase = scheduler.step(UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ ,**UpperCamelCase__ ).prev_sample __lowercase = scheduler.step(UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ ,**UpperCamelCase__ ).prev_sample self.assertEqual(output_a.shape ,sample.shape ) self.assertEqual(output_a.shape ,output_a.shape ) def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): for timesteps in [1_0_0, 1_0_0_0]: self.check_over_configs(num_train_timesteps=UpperCamelCase__ ,time_step=UpperCamelCase__ ) def SCREAMING_SNAKE_CASE ( self : Dict ): for t, num_inference_steps in zip([1, 5, 1_0] ,[1_0, 5_0, 1_0_0] ): self.check_over_forward(num_inference_steps=UpperCamelCase__ ,time_step=UpperCamelCase__ ) def SCREAMING_SNAKE_CASE ( self : List[str] ): __lowercase = self.full_loop() __lowercase = torch.mean(torch.abs(UpperCamelCase__ ) ) assert abs(result_mean.item() - 2_5_4_0_5_2_9 ) < 1_0
41
import unittest from pathlib import Path from tempfile import TemporaryDirectory from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available from transformers.models.gpta.tokenization_gpta import GPTaTokenizer from transformers.testing_utils import require_keras_nlp, require_tf, slow if is_tf_available(): import tensorflow as tf if is_keras_nlp_available(): from transformers.models.gpta import TFGPTaTokenizer a__ = ["""gpt2"""] a__ = """gpt2""" if is_tf_available(): class _lowerCAmelCase ( tf.Module ): """simple docstring""" def __init__( self : List[Any] , UpperCamelCase__ : int): '''simple docstring''' super().__init__() snake_case__ = tokenizer snake_case__ = AutoConfig.from_pretrained(UpperCamelCase__) snake_case__ = TFGPTaLMHeadModel.from_config(UpperCamelCase__) @tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name="""text"""),)) def __magic_name__ ( self : Tuple , UpperCamelCase__ : int): '''simple docstring''' snake_case__ = self.tokenizer(UpperCamelCase__) snake_case__ = tokenized["""input_ids"""].to_tensor() snake_case__ = tf.cast(input_ids_dense > 0 , tf.intaa) # input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN]) snake_case__ = self.model(input_ids=UpperCamelCase__ , attention_mask=UpperCamelCase__)["""logits"""] return outputs @require_tf @require_keras_nlp class _lowerCAmelCase ( unittest.TestCase ): """simple docstring""" def __magic_name__ ( self : List[Any]): '''simple docstring''' super().setUp() snake_case__ = [GPTaTokenizer.from_pretrained(UpperCamelCase__) for checkpoint in (TOKENIZER_CHECKPOINTS)] snake_case__ = [TFGPTaTokenizer.from_pretrained(UpperCamelCase__) for checkpoint in TOKENIZER_CHECKPOINTS] assert len(self.tokenizers) == len(self.tf_tokenizers) snake_case__ = [ """This is a straightforward English test sentence.""", """This one has some weird characters\rto\nsee\r\nif those\u00E9break things.""", """Now we're going to add some Chinese: 一 二 三 一二三""", """And some much more rare Chinese: 齉 堃 齉堃""", """Je vais aussi écrire en français pour tester les accents""", """Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ""", ] snake_case__ = list(zip(self.test_sentences , self.test_sentences[::-1])) def __magic_name__ ( self : Optional[int]): '''simple docstring''' for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers): for test_inputs in self.test_sentences: snake_case__ = tokenizer([test_inputs] , return_tensors="""tf""") snake_case__ = tf_tokenizer([test_inputs]) for key in python_outputs.keys(): # convert them to numpy to avoid messing with ragged tensors snake_case__ = python_outputs[key].numpy() snake_case__ = tf_outputs[key].numpy() self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape)) self.assertTrue(tf.reduce_all(tf.cast(UpperCamelCase__ , tf.intaa) == tf_outputs_values)) @slow def __magic_name__ ( self : Optional[int]): '''simple docstring''' for tf_tokenizer in self.tf_tokenizers: snake_case__ = tf.function(UpperCamelCase__) for test_inputs in self.test_sentences: snake_case__ = tf.constant(UpperCamelCase__) snake_case__ = compiled_tokenizer(UpperCamelCase__) snake_case__ = tf_tokenizer(UpperCamelCase__) for key in eager_outputs.keys(): self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key])) @slow def __magic_name__ ( self : Optional[Any]): '''simple docstring''' for tf_tokenizer in self.tf_tokenizers: snake_case__ = ModelToSave(tokenizer=UpperCamelCase__) snake_case__ = tf.convert_to_tensor([self.test_sentences[0]]) snake_case__ = model.serving(UpperCamelCase__) # Build model with some sample inputs with TemporaryDirectory() as tempdir: snake_case__ = Path(UpperCamelCase__) / """saved.model""" tf.saved_model.save(UpperCamelCase__ , UpperCamelCase__ , signatures={"""serving_default""": model.serving}) snake_case__ = tf.saved_model.load(UpperCamelCase__) snake_case__ = loaded_model.signatures["""serving_default"""](UpperCamelCase__)["""output_0"""] # We may see small differences because the loaded model is compiled, so we need an epsilon for the test self.assertTrue(tf.reduce_all(out == loaded_output)) @slow def __magic_name__ ( self : Tuple): '''simple docstring''' for tf_tokenizer in self.tf_tokenizers: snake_case__ = tf.convert_to_tensor([self.test_sentences[0]]) snake_case__ = tf_tokenizer(UpperCamelCase__) # Build model with some sample inputs snake_case__ = tf_tokenizer.get_config() snake_case__ = TFGPTaTokenizer.from_config(UpperCamelCase__) snake_case__ = model_from_config(UpperCamelCase__) for key in from_config_output.keys(): self.assertTrue(tf.reduce_all(from_config_output[key] == out[key])) @slow def __magic_name__ ( self : Dict): '''simple docstring''' for tf_tokenizer in self.tf_tokenizers: # for the test to run snake_case__ = 1_2_3_1_2_3 for max_length in [3, 5, 1_0_2_4]: snake_case__ = tf.convert_to_tensor([self.test_sentences[0]]) snake_case__ = tf_tokenizer(UpperCamelCase__ , max_length=UpperCamelCase__) snake_case__ = out["""input_ids"""].numpy().shape[1] assert out_length == max_length
654
0
"""simple docstring""" import unittest from pathlib import Path from shutil import copyfile from transformers import SPIECE_UNDERLINE, is_sentencepiece_available from transformers.models.speech_to_text import SpeechaTextTokenizer from transformers.models.speech_to_text.tokenization_speech_to_text import VOCAB_FILES_NAMES, save_json from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin _A : List[str] = get_tests_dir("""fixtures/test_sentencepiece.model""") if is_sentencepiece_available(): import sentencepiece as sp _A : str = 5 _A : Optional[int] = 10 @require_sentencepiece @require_tokenizers class a__ ( lowercase_, unittest.TestCase ): __lowerCAmelCase = SpeechaTextTokenizer __lowerCAmelCase = False __lowerCAmelCase = True def __magic_name__ ( self ): super().setUp() lowercase : str = sp.SentencePieceProcessor() spm_model.Load(UpperCamelCase__ ) lowercase : int = ["<s>", "<pad>", "</s>", "<unk>"] vocab += [spm_model.IdToPiece(id_ ) for id_ in range(len(UpperCamelCase__ ) )] lowercase : Optional[Any] = dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) ) lowercase : Dict = Path(self.tmpdirname ) save_json(UpperCamelCase__ , save_dir / VOCAB_FILES_NAMES["vocab_file"] ) if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists(): copyfile(UpperCamelCase__ , save_dir / VOCAB_FILES_NAMES["spm_file"] ) lowercase : Optional[int] = SpeechaTextTokenizer.from_pretrained(self.tmpdirname ) tokenizer.save_pretrained(self.tmpdirname ) def __magic_name__ ( self ): lowercase : Tuple = "<pad>" lowercase : Optional[Any] = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase__ ) , UpperCamelCase__ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase__ ) , UpperCamelCase__ ) def __magic_name__ ( self ): lowercase : Any = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "<s>" ) self.assertEqual(vocab_keys[1] , "<pad>" ) self.assertEqual(vocab_keys[-1] , "j" ) self.assertEqual(len(UpperCamelCase__ ) , 1_001 ) def __magic_name__ ( self ): self.assertEqual(self.get_tokenizer().vocab_size , 1_001 ) def __magic_name__ ( self ): lowercase : Tuple = SpeechaTextTokenizer.from_pretrained(self.tmpdirname ) lowercase : Tuple = tokenizer.tokenize("This is a test" ) self.assertListEqual(UpperCamelCase__ , ["▁This", "▁is", "▁a", "▁t", "est"] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , [289, 50, 14, 174, 386] , ) lowercase : Union[str, Any] = tokenizer.tokenize("I was born in 92000, and this is falsé." ) self.assertListEqual( UpperCamelCase__ , [SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", "."] , ) lowercase : Optional[Any] = tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) self.assertListEqual(UpperCamelCase__ , [12, 25, 88, 59, 28, 23, 11, 4, 606, 351, 351, 351, 7, 16, 70, 50, 76, 84, 10, 4, 8] ) lowercase : int = tokenizer.convert_ids_to_tokens(UpperCamelCase__ ) self.assertListEqual( UpperCamelCase__ , [SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", "."] , ) @slow def __magic_name__ ( self ): lowercase : Dict = {"input_ids": [[3_791, 797, 31, 11, 64, 797, 31, 2_429, 433, 12, 1_176, 12, 20, 786, 915, 142, 2_413, 240, 37, 3_238, 797, 31, 11, 35, 93, 915, 142, 2_413, 240, 37, 5_540, 567, 1_276, 93, 37, 610, 40, 62, 455, 657, 1_042, 123, 780, 177, 37, 309, 241, 1_298, 514, 20, 292, 2_737, 114, 2_469, 241, 85, 64, 302, 548, 528, 423, 4, 509, 406, 423, 37, 601, 4, 777, 302, 548, 528, 423, 284, 4, 3_388, 511, 459, 4, 3_555, 40, 321, 302, 705, 4, 3_388, 511, 583, 326, 5, 5, 5, 62, 3_310, 560, 177, 2_680, 217, 1_508, 32, 31, 853, 418, 64, 583, 511, 1_605, 62, 35, 93, 560, 177, 2_680, 217, 1_508, 1_521, 64, 583, 511, 519, 62, 20, 1_515, 764, 20, 149, 261, 5_625, 7_972, 20, 5_540, 567, 1_276, 93, 3_925, 1_675, 11, 15, 802, 7_972, 576, 217, 1_508, 11, 35, 93, 1_253, 2_441, 15, 289, 652, 31, 416, 321, 3_842, 115, 40, 911, 8, 476, 619, 4, 380, 142, 423, 335, 240, 35, 93, 264, 8, 11, 335, 569, 420, 163, 5, 2], [260, 548, 528, 423, 20, 451, 20, 2_681, 1_153, 3_434, 20, 5_540, 37, 567, 126, 1_253, 2_441, 3_376, 449, 210, 431, 1_563, 177, 767, 5_540, 11, 1_203, 472, 11, 2_953, 685, 285, 364, 706, 1_153, 20, 6_799, 20, 2_869, 20, 4_464, 126, 40, 2_429, 20, 1_040, 866, 2_664, 418, 20, 318, 20, 1_726, 186, 20, 265, 522, 35, 93, 2_191, 4_634, 20, 1_040, 12, 6_799, 15, 228, 2_356, 142, 31, 11, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [2_575, 2_666, 684, 1_582, 1_176, 12, 627, 149, 619, 20, 4_902, 563, 11, 20, 149, 261, 3_420, 2_356, 174, 142, 4_714, 131, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=UpperCamelCase__ , model_name="facebook/s2t-small-mustc-en-de-st" , revision="a14f04cf0776c02f62a8cb800cf7909e15ea23ad" , ) @require_sentencepiece class a__ ( unittest.TestCase ): __lowerCAmelCase = '''valhalla/s2t_mustc_multilinguial_medium''' __lowerCAmelCase = '''C\'est trop cool''' __lowerCAmelCase = '''Esto es genial''' @classmethod def __magic_name__ ( cls ): lowercase : List[str] = SpeechaTextTokenizer.from_pretrained(cls.checkpoint_name ) return cls def __magic_name__ ( self ): self.assertEqual(self.tokenizer.lang_code_to_id["pt"] , 4 ) self.assertEqual(self.tokenizer.lang_code_to_id["ru"] , 6 ) self.assertEqual(self.tokenizer.lang_code_to_id["it"] , 9 ) self.assertEqual(self.tokenizer.lang_code_to_id["de"] , 11 ) def __magic_name__ ( self ): self.assertEqual(self.tokenizer.vocab_size , 10_000 ) def __magic_name__ ( self ): self.assertIn(UpperCamelCase__ , self.tokenizer.all_special_ids ) lowercase : Union[str, Any] = [ES_CODE, 4, 1_601, 47, 7_647, 2] lowercase : List[Any] = self.tokenizer.decode(UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ ) lowercase : Tuple = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=UpperCamelCase__ ) self.assertEqual(UpperCamelCase__ , UpperCamelCase__ ) self.assertNotIn(self.tokenizer.eos_token , UpperCamelCase__ ) def __magic_name__ ( self ): lowercase : Dict = "fr" lowercase : List[str] = self.tokenizer(self.french_text ).input_ids self.assertEqual(encoded[0] , UpperCamelCase__ ) self.assertEqual(encoded[-1] , self.tokenizer.eos_token_id ) def __magic_name__ ( self ): lowercase : Union[str, Any] = "fr" self.assertListEqual(self.tokenizer.prefix_tokens , [FR_CODE] ) lowercase : str = "es" self.assertListEqual(self.tokenizer.prefix_tokens , [ES_CODE] )
361
import tempfile import torch from diffusers import IPNDMScheduler from .test_schedulers import SchedulerCommonTest class _lowerCAmelCase ( lowercase_ ): """simple docstring""" _lowercase : int = (IPNDMScheduler,) _lowercase : int = (('''num_inference_steps''', 50),) def __magic_name__ ( self : Any , **UpperCamelCase__ : Tuple): '''simple docstring''' snake_case__ = {"""num_train_timesteps""": 1_0_0_0} config.update(**UpperCamelCase__) return config def __magic_name__ ( self : int , UpperCamelCase__ : Dict=0 , **UpperCamelCase__ : int): '''simple docstring''' snake_case__ = dict(self.forward_default_kwargs) snake_case__ = kwargs.pop("""num_inference_steps""" , UpperCamelCase__) snake_case__ = self.dummy_sample snake_case__ = 0.1 * sample snake_case__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: snake_case__ = self.get_scheduler_config(**UpperCamelCase__) snake_case__ = scheduler_class(**UpperCamelCase__) scheduler.set_timesteps(UpperCamelCase__) # copy over dummy past residuals snake_case__ = dummy_past_residuals[:] if time_step is None: snake_case__ = scheduler.timesteps[len(scheduler.timesteps) // 2] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(UpperCamelCase__) snake_case__ = scheduler_class.from_pretrained(UpperCamelCase__) new_scheduler.set_timesteps(UpperCamelCase__) # copy over dummy past residuals snake_case__ = dummy_past_residuals[:] snake_case__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample snake_case__ = new_scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical" snake_case__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample snake_case__ = new_scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical" def __magic_name__ ( self : List[Any]): '''simple docstring''' pass def __magic_name__ ( self : Tuple , UpperCamelCase__ : Union[str, Any]=0 , **UpperCamelCase__ : Tuple): '''simple docstring''' snake_case__ = dict(self.forward_default_kwargs) snake_case__ = kwargs.pop("""num_inference_steps""" , UpperCamelCase__) snake_case__ = self.dummy_sample snake_case__ = 0.1 * sample snake_case__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: snake_case__ = self.get_scheduler_config() snake_case__ = scheduler_class(**UpperCamelCase__) scheduler.set_timesteps(UpperCamelCase__) # copy over dummy past residuals (must be after setting timesteps) snake_case__ = dummy_past_residuals[:] if time_step is None: snake_case__ = scheduler.timesteps[len(scheduler.timesteps) // 2] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(UpperCamelCase__) snake_case__ = scheduler_class.from_pretrained(UpperCamelCase__) # copy over dummy past residuals new_scheduler.set_timesteps(UpperCamelCase__) # copy over dummy past residual (must be after setting timesteps) snake_case__ = dummy_past_residuals[:] snake_case__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample snake_case__ = new_scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical" snake_case__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample snake_case__ = new_scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical" def __magic_name__ ( self : Union[str, Any] , **UpperCamelCase__ : Dict): '''simple docstring''' snake_case__ = self.scheduler_classes[0] snake_case__ = self.get_scheduler_config(**UpperCamelCase__) snake_case__ = scheduler_class(**UpperCamelCase__) snake_case__ = 1_0 snake_case__ = self.dummy_model() snake_case__ = self.dummy_sample_deter scheduler.set_timesteps(UpperCamelCase__) for i, t in enumerate(scheduler.timesteps): snake_case__ = model(UpperCamelCase__ , UpperCamelCase__) snake_case__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__).prev_sample for i, t in enumerate(scheduler.timesteps): snake_case__ = model(UpperCamelCase__ , UpperCamelCase__) snake_case__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__).prev_sample return sample def __magic_name__ ( self : Optional[int]): '''simple docstring''' snake_case__ = dict(self.forward_default_kwargs) snake_case__ = kwargs.pop("""num_inference_steps""" , UpperCamelCase__) for scheduler_class in self.scheduler_classes: snake_case__ = self.get_scheduler_config() snake_case__ = scheduler_class(**UpperCamelCase__) snake_case__ = self.dummy_sample snake_case__ = 0.1 * sample if num_inference_steps is not None and hasattr(UpperCamelCase__ , """set_timesteps"""): scheduler.set_timesteps(UpperCamelCase__) elif num_inference_steps is not None and not hasattr(UpperCamelCase__ , """set_timesteps"""): snake_case__ = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) snake_case__ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] snake_case__ = dummy_past_residuals[:] snake_case__ = scheduler.timesteps[5] snake_case__ = scheduler.timesteps[6] snake_case__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample snake_case__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample self.assertEqual(output_a.shape , sample.shape) self.assertEqual(output_a.shape , output_a.shape) snake_case__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample snake_case__ = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__).prev_sample self.assertEqual(output_a.shape , sample.shape) self.assertEqual(output_a.shape , output_a.shape) def __magic_name__ ( self : Union[str, Any]): '''simple docstring''' for timesteps in [1_0_0, 1_0_0_0]: self.check_over_configs(num_train_timesteps=UpperCamelCase__ , time_step=UpperCamelCase__) def __magic_name__ ( self : Dict): '''simple docstring''' for t, num_inference_steps in zip([1, 5, 1_0] , [1_0, 5_0, 1_0_0]): self.check_over_forward(num_inference_steps=UpperCamelCase__ , time_step=UpperCamelCase__) def __magic_name__ ( self : List[str]): '''simple docstring''' snake_case__ = self.full_loop() snake_case__ = torch.mean(torch.abs(UpperCamelCase__)) assert abs(result_mean.item() - 2_5_4_0_5_2_9) < 1_0
654
0
'''simple docstring''' import requests lowerCAmelCase : int = 'YOUR API KEY' def A_( A : str , A : str = giphy_api_key): UpperCamelCase = '+'.join(query.split()) UpperCamelCase = f'''https://api.giphy.com/v1/gifs/search?q={formatted_query}&api_key={api_key}''' UpperCamelCase = requests.get(A).json()['data'] return [gif["url"] for gif in gifs] if __name__ == "__main__": print('\n'.join(get_gifs('space ship')))
3
# Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np import torch from ..models.clipseg import CLIPSegForImageSegmentation from ..utils import is_vision_available, requires_backends from .base import PipelineTool if is_vision_available(): from PIL import Image class _lowerCAmelCase ( lowercase_ ): """simple docstring""" _lowercase : Optional[Any] = ( '''This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image.''' '''It takes two arguments named `image` which should be the original image, and `label` which should be a text ''' '''describing the elements what should be identified in the segmentation mask. The tool returns the mask.''' ) _lowercase : Dict = '''CIDAS/clipseg-rd64-refined''' _lowercase : List[Any] = '''image_segmenter''' _lowercase : Tuple = CLIPSegForImageSegmentation _lowercase : str = ['''image''', '''text'''] _lowercase : Dict = ['''image'''] def __init__( self : Optional[int] , *UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : List[Any]): '''simple docstring''' requires_backends(self , ["""vision"""]) super().__init__(*UpperCamelCase__ , **UpperCamelCase__) def __magic_name__ ( self : str , UpperCamelCase__ : "Image" , UpperCamelCase__ : str): '''simple docstring''' return self.pre_processor(text=[label] , images=[image] , padding=UpperCamelCase__ , return_tensors="""pt""") def __magic_name__ ( self : Any , UpperCamelCase__ : Optional[Any]): '''simple docstring''' with torch.no_grad(): snake_case__ = self.model(**UpperCamelCase__).logits return logits def __magic_name__ ( self : Optional[int] , UpperCamelCase__ : Union[str, Any]): '''simple docstring''' snake_case__ = outputs.cpu().detach().numpy() snake_case__ = 0 snake_case__ = 1 return Image.fromarray((array * 2_5_5).astype(np.uinta))
654
0
"""simple docstring""" from typing import List, Optional, Tuple, Union import torch from ...models import UNetaDModel from ...schedulers import ScoreSdeVeScheduler from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class a__ ( lowercase_ ): snake_case__ = 42 snake_case__ = 42 def __init__( self : Union[str, Any] ,a__ : UNetaDModel ,a__ : ScoreSdeVeScheduler) -> Optional[int]: """simple docstring""" super().__init__() self.register_modules(unet=UpperCamelCase__ ,scheduler=UpperCamelCase__) @torch.no_grad() def __call__( self : Union[str, Any] ,a__ : int = 1 ,a__ : int = 2000 ,a__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None ,a__ : Optional[str] = "pil" ,a__ : bool = True ,**a__ : List[str] ,) -> Union[str, Any]: """simple docstring""" _lowerCAmelCase:str = self.unet.config.sample_size _lowerCAmelCase:Optional[int] = (batch_size, 3, img_size, img_size) _lowerCAmelCase:str = self.unet _lowerCAmelCase:Any = randn_tensor(UpperCamelCase__ ,generator=UpperCamelCase__) * self.scheduler.init_noise_sigma _lowerCAmelCase:str = sample.to(self.device) self.scheduler.set_timesteps(UpperCamelCase__) self.scheduler.set_sigmas(UpperCamelCase__) for i, t in enumerate(self.progress_bar(self.scheduler.timesteps)): _lowerCAmelCase:Union[str, Any] = self.scheduler.sigmas[i] * torch.ones(shape[0] ,device=self.device) # correction step for _ in range(self.scheduler.config.correct_steps): _lowerCAmelCase:Union[str, Any] = self.unet(UpperCamelCase__ ,UpperCamelCase__).sample _lowerCAmelCase:List[Any] = self.scheduler.step_correct(UpperCamelCase__ ,UpperCamelCase__ ,generator=UpperCamelCase__).prev_sample # prediction step _lowerCAmelCase:Dict = model(UpperCamelCase__ ,UpperCamelCase__).sample _lowerCAmelCase:Optional[Any] = self.scheduler.step_pred(UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ ,generator=UpperCamelCase__) _lowerCAmelCase , _lowerCAmelCase:Optional[Any] = output.prev_sample, output.prev_sample_mean _lowerCAmelCase:Optional[int] = sample_mean.clamp(0 ,1) _lowerCAmelCase:Tuple = sample.cpu().permute(0 ,2 ,3 ,1).numpy() if output_type == "pil": _lowerCAmelCase:int = self.numpy_to_pil(UpperCamelCase__) if not return_dict: return (sample,) return ImagePipelineOutput(images=UpperCamelCase__)
227
import unittest import numpy as np from transformers.testing_utils import require_pytesseract, require_torch from transformers.utils import is_pytesseract_available, is_torch_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_pytesseract_available(): from PIL import Image from transformers import LayoutLMvaImageProcessor class _lowerCAmelCase ( unittest.TestCase ): """simple docstring""" def __init__( self : List[str] , UpperCamelCase__ : str , UpperCamelCase__ : Tuple=7 , UpperCamelCase__ : List[Any]=3 , UpperCamelCase__ : Dict=1_8 , UpperCamelCase__ : Any=3_0 , UpperCamelCase__ : List[Any]=4_0_0 , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : Any=None , UpperCamelCase__ : Optional[int]=True , ): '''simple docstring''' snake_case__ = size if size is not None else {"""height""": 1_8, """width""": 1_8} snake_case__ = parent snake_case__ = batch_size snake_case__ = num_channels snake_case__ = image_size snake_case__ = min_resolution snake_case__ = max_resolution snake_case__ = do_resize snake_case__ = size snake_case__ = apply_ocr def __magic_name__ ( self : Optional[Any]): '''simple docstring''' return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr} @require_torch @require_pytesseract class _lowerCAmelCase ( lowercase_ , unittest.TestCase ): """simple docstring""" _lowercase : str = LayoutLMvaImageProcessor if is_pytesseract_available() else None def __magic_name__ ( self : Optional[int]): '''simple docstring''' snake_case__ = LayoutLMvaImageProcessingTester(self) @property def __magic_name__ ( self : Tuple): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def __magic_name__ ( self : List[Any]): '''simple docstring''' snake_case__ = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(UpperCamelCase__ , """do_resize""")) self.assertTrue(hasattr(UpperCamelCase__ , """size""")) self.assertTrue(hasattr(UpperCamelCase__ , """apply_ocr""")) def __magic_name__ ( self : Optional[int]): '''simple docstring''' snake_case__ = self.image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size , {"""height""": 1_8, """width""": 1_8}) snake_case__ = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2) self.assertEqual(image_processor.size , {"""height""": 4_2, """width""": 4_2}) def __magic_name__ ( self : List[str]): '''simple docstring''' pass def __magic_name__ ( self : List[str]): '''simple docstring''' snake_case__ = self.image_processing_class(**self.image_processor_dict) # create random PIL images snake_case__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__) for image in image_inputs: self.assertIsInstance(UpperCamelCase__ , Image.Image) # Test not batched input snake_case__ = image_processing(image_inputs[0] , return_tensors="""pt""") self.assertEqual( encoding.pixel_values.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) self.assertIsInstance(encoding.words , UpperCamelCase__) self.assertIsInstance(encoding.boxes , UpperCamelCase__) # Test batched snake_case__ = image_processing(UpperCamelCase__ , return_tensors="""pt""").pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) def __magic_name__ ( self : List[Any]): '''simple docstring''' snake_case__ = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors snake_case__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ , numpify=UpperCamelCase__) for image in image_inputs: self.assertIsInstance(UpperCamelCase__ , np.ndarray) # Test not batched input snake_case__ = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) # Test batched snake_case__ = image_processing(UpperCamelCase__ , return_tensors="""pt""").pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) def __magic_name__ ( self : Dict): '''simple docstring''' snake_case__ = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors snake_case__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ , torchify=UpperCamelCase__) for image in image_inputs: self.assertIsInstance(UpperCamelCase__ , torch.Tensor) # Test not batched input snake_case__ = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) # Test batched snake_case__ = image_processing(UpperCamelCase__ , return_tensors="""pt""").pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) def __magic_name__ ( self : Any): '''simple docstring''' snake_case__ = LayoutLMvaImageProcessor() from datasets import load_dataset snake_case__ = load_dataset("""hf-internal-testing/fixtures_docvqa""" , split="""test""") snake_case__ = Image.open(ds[0]["""file"""]).convert("""RGB""") snake_case__ = image_processing(UpperCamelCase__ , return_tensors="""pt""") self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_2_4, 2_2_4)) self.assertEqual(len(encoding.words) , len(encoding.boxes)) # fmt: off # the words and boxes were obtained with Tesseract 4.1.1 snake_case__ = [["""11:14""", """to""", """11:39""", """a.m""", """11:39""", """to""", """11:44""", """a.m.""", """11:44""", """a.m.""", """to""", """12:25""", """p.m.""", """12:25""", """to""", """12:58""", """p.m.""", """12:58""", """to""", """4:00""", """p.m.""", """2:00""", """to""", """5:00""", """p.m.""", """Coffee""", """Break""", """Coffee""", """will""", """be""", """served""", """for""", """men""", """and""", """women""", """in""", """the""", """lobby""", """adjacent""", """to""", """exhibit""", """area.""", """Please""", """move""", """into""", """exhibit""", """area.""", """(Exhibits""", """Open)""", """TRRF""", """GENERAL""", """SESSION""", """(PART""", """|)""", """Presiding:""", """Lee""", """A.""", """Waller""", """TRRF""", """Vice""", """President""", """“Introductory""", """Remarks”""", """Lee""", """A.""", """Waller,""", """TRRF""", """Vice""", """Presi-""", """dent""", """Individual""", """Interviews""", """with""", """TRRF""", """Public""", """Board""", """Members""", """and""", """Sci-""", """entific""", """Advisory""", """Council""", """Mem-""", """bers""", """Conducted""", """by""", """TRRF""", """Treasurer""", """Philip""", """G.""", """Kuehn""", """to""", """get""", """answers""", """which""", """the""", """public""", """refrigerated""", """warehousing""", """industry""", """is""", """looking""", """for.""", """Plus""", """questions""", """from""", """the""", """floor.""", """Dr.""", """Emil""", """M.""", """Mrak,""", """University""", """of""", """Cal-""", """ifornia,""", """Chairman,""", """TRRF""", """Board;""", """Sam""", """R.""", """Cecil,""", """University""", """of""", """Georgia""", """College""", """of""", """Agriculture;""", """Dr.""", """Stanley""", """Charm,""", """Tufts""", """University""", """School""", """of""", """Medicine;""", """Dr.""", """Robert""", """H.""", """Cotton,""", """ITT""", """Continental""", """Baking""", """Company;""", """Dr.""", """Owen""", """Fennema,""", """University""", """of""", """Wis-""", """consin;""", """Dr.""", """Robert""", """E.""", """Hardenburg,""", """USDA.""", """Questions""", """and""", """Answers""", """Exhibits""", """Open""", """Capt.""", """Jack""", """Stoney""", """Room""", """TRRF""", """Scientific""", """Advisory""", """Council""", """Meeting""", """Ballroom""", """Foyer"""]] # noqa: E231 snake_case__ = [[[1_4_1, 5_7, 2_1_4, 6_9], [2_2_8, 5_8, 2_5_2, 6_9], [1_4_1, 7_5, 2_1_6, 8_8], [2_3_0, 7_9, 2_8_0, 8_8], [1_4_2, 2_6_0, 2_1_8, 2_7_3], [2_3_0, 2_6_1, 2_5_5, 2_7_3], [1_4_3, 2_7_9, 2_1_8, 2_9_0], [2_3_1, 2_8_2, 2_9_0, 2_9_1], [1_4_3, 3_4_2, 2_1_8, 3_5_4], [2_3_1, 3_4_5, 2_8_9, 3_5_5], [2_0_2, 3_6_2, 2_2_7, 3_7_3], [1_4_3, 3_7_9, 2_2_0, 3_9_2], [2_3_1, 3_8_2, 2_9_1, 3_9_4], [1_4_4, 7_1_4, 2_2_0, 7_2_6], [2_3_1, 7_1_5, 2_5_6, 7_2_6], [1_4_4, 7_3_2, 2_2_0, 7_4_5], [2_3_2, 7_3_6, 2_9_1, 7_4_7], [1_4_4, 7_6_9, 2_1_8, 7_8_2], [2_3_1, 7_7_0, 2_5_6, 7_8_2], [1_4_1, 7_8_8, 2_0_2, 8_0_1], [2_1_5, 7_9_1, 2_7_4, 8_0_4], [1_4_3, 8_2_6, 2_0_4, 8_3_8], [2_1_5, 8_2_6, 2_4_0, 8_3_8], [1_4_2, 8_4_4, 2_0_2, 8_5_7], [2_1_5, 8_4_7, 2_7_4, 8_5_9], [3_3_4, 5_7, 4_2_7, 6_9], [4_4_0, 5_7, 5_2_2, 6_9], [3_6_9, 7_5, 4_6_1, 8_8], [4_6_9, 7_5, 5_1_6, 8_8], [5_2_8, 7_6, 5_6_2, 8_8], [5_7_0, 7_6, 6_6_7, 8_8], [6_7_5, 7_5, 7_1_1, 8_7], [7_2_1, 7_9, 7_7_8, 8_8], [7_8_9, 7_5, 8_4_0, 8_8], [3_6_9, 9_7, 4_7_0, 1_0_7], [4_8_4, 9_4, 5_0_7, 1_0_6], [5_1_8, 9_4, 5_6_2, 1_0_7], [5_7_6, 9_4, 6_5_5, 1_1_0], [6_6_8, 9_4, 7_9_2, 1_0_9], [8_0_4, 9_5, 8_2_9, 1_0_7], [3_6_9, 1_1_3, 4_6_5, 1_2_5], [4_7_7, 1_1_6, 5_4_7, 1_2_5], [5_6_2, 1_1_3, 6_5_8, 1_2_5], [6_7_1, 1_1_6, 7_4_8, 1_2_5], [7_6_1, 1_1_3, 8_1_1, 1_2_5], [3_6_9, 1_3_1, 4_6_5, 1_4_3], [4_7_7, 1_3_3, 5_4_8, 1_4_3], [5_6_3, 1_3_0, 6_9_8, 1_4_5], [7_1_0, 1_3_0, 8_0_2, 1_4_6], [3_3_6, 1_7_1, 4_1_2, 1_8_3], [4_2_3, 1_7_1, 5_7_2, 1_8_3], [5_8_2, 1_7_0, 7_1_6, 1_8_4], [7_2_8, 1_7_1, 8_1_7, 1_8_7], [8_2_9, 1_7_1, 8_4_4, 1_8_6], [3_3_8, 1_9_7, 4_8_2, 2_1_2], [5_0_7, 1_9_6, 5_5_7, 2_0_9], [5_6_9, 1_9_6, 5_9_5, 2_0_8], [6_1_0, 1_9_6, 7_0_2, 2_0_9], [5_0_5, 2_1_4, 5_8_3, 2_2_6], [5_9_5, 2_1_4, 6_5_6, 2_2_7], [6_7_0, 2_1_5, 8_0_7, 2_2_7], [3_3_5, 2_5_9, 5_4_3, 2_7_4], [5_5_6, 2_5_9, 7_0_8, 2_7_2], [3_7_2, 2_7_9, 4_2_2, 2_9_1], [4_3_5, 2_7_9, 4_6_0, 2_9_1], [4_7_4, 2_7_9, 5_7_4, 2_9_2], [5_8_7, 2_7_8, 6_6_4, 2_9_1], [6_7_6, 2_7_8, 7_3_8, 2_9_1], [7_5_1, 2_7_9, 8_3_4, 2_9_1], [3_7_2, 2_9_8, 4_3_4, 3_1_0], [3_3_5, 3_4_1, 4_8_3, 3_5_4], [4_9_7, 3_4_1, 6_5_5, 3_5_4], [6_6_7, 3_4_1, 7_2_8, 3_5_4], [7_4_0, 3_4_1, 8_2_5, 3_5_4], [3_3_5, 3_6_0, 4_3_0, 3_7_2], [4_4_2, 3_6_0, 5_3_4, 3_7_2], [5_4_5, 3_5_9, 6_8_7, 3_7_2], [6_9_7, 3_6_0, 7_5_4, 3_7_2], [7_6_5, 3_6_0, 8_2_3, 3_7_3], [3_3_4, 3_7_8, 4_2_8, 3_9_1], [4_4_0, 3_7_8, 5_7_7, 3_9_4], [5_9_0, 3_7_8, 7_0_5, 3_9_1], [7_2_0, 3_7_8, 8_0_1, 3_9_1], [3_3_4, 3_9_7, 4_0_0, 4_0_9], [3_7_0, 4_1_6, 5_2_9, 4_2_9], [5_4_4, 4_1_6, 5_7_6, 4_3_2], [5_8_7, 4_1_6, 6_6_5, 4_2_8], [6_7_7, 4_1_6, 8_1_4, 4_2_9], [3_7_2, 4_3_5, 4_5_2, 4_5_0], [4_6_5, 4_3_4, 4_9_5, 4_4_7], [5_1_1, 4_3_4, 6_0_0, 4_4_7], [6_1_1, 4_3_6, 6_3_7, 4_4_7], [6_4_9, 4_3_6, 6_9_4, 4_5_1], [7_0_5, 4_3_8, 8_2_4, 4_4_7], [3_6_9, 4_5_3, 4_5_2, 4_6_6], [4_6_4, 4_5_4, 5_0_9, 4_6_6], [5_2_2, 4_5_3, 6_1_1, 4_6_9], [6_2_5, 4_5_3, 7_9_2, 4_6_9], [3_7_0, 4_7_2, 5_5_6, 4_8_8], [5_7_0, 4_7_2, 6_8_4, 4_8_7], [6_9_7, 4_7_2, 7_1_8, 4_8_5], [7_3_2, 4_7_2, 8_3_5, 4_8_8], [3_6_9, 4_9_0, 4_1_1, 5_0_3], [4_2_5, 4_9_0, 4_8_4, 5_0_3], [4_9_6, 4_9_0, 6_3_5, 5_0_6], [6_4_5, 4_9_0, 7_0_7, 5_0_3], [7_1_8, 4_9_1, 7_6_1, 5_0_3], [7_7_1, 4_9_0, 8_4_0, 5_0_3], [3_3_6, 5_1_0, 3_7_4, 5_2_1], [3_8_8, 5_1_0, 4_4_7, 5_2_2], [4_6_0, 5_1_0, 4_8_9, 5_2_1], [5_0_3, 5_1_0, 5_8_0, 5_2_2], [5_9_2, 5_0_9, 7_3_6, 5_2_5], [7_4_5, 5_0_9, 7_7_0, 5_2_2], [7_8_1, 5_0_9, 8_4_0, 5_2_2], [3_3_8, 5_2_8, 4_3_4, 5_4_1], [4_4_8, 5_2_8, 5_9_6, 5_4_1], [6_0_9, 5_2_7, 6_8_7, 5_4_0], [7_0_0, 5_2_8, 7_9_2, 5_4_1], [3_3_6, 5_4_6, 3_9_7, 5_5_9], [4_0_7, 5_4_6, 4_3_1, 5_5_9], [4_4_3, 5_4_6, 5_2_5, 5_6_0], [5_3_7, 5_4_6, 6_8_0, 5_6_2], [6_8_8, 5_4_6, 7_1_4, 5_5_9], [7_2_2, 5_4_6, 8_3_7, 5_6_2], [3_3_6, 5_6_5, 4_4_9, 5_8_1], [4_6_1, 5_6_5, 4_8_5, 5_7_7], [4_9_7, 5_6_5, 6_6_5, 5_8_1], [6_8_1, 5_6_5, 7_1_8, 5_7_7], [7_3_2, 5_6_5, 8_3_7, 5_8_0], [3_3_7, 5_8_4, 4_3_8, 5_9_7], [4_5_2, 5_8_3, 5_2_1, 5_9_6], [5_3_5, 5_8_4, 6_7_7, 5_9_9], [6_9_0, 5_8_3, 7_8_7, 5_9_6], [8_0_1, 5_8_3, 8_2_5, 5_9_6], [3_3_8, 6_0_2, 4_7_8, 6_1_5], [4_9_2, 6_0_2, 5_3_0, 6_1_4], [5_4_3, 6_0_2, 6_3_8, 6_1_5], [6_5_0, 6_0_2, 6_7_6, 6_1_4], [6_8_8, 6_0_2, 7_8_8, 6_1_5], [8_0_2, 6_0_2, 8_4_3, 6_1_4], [3_3_7, 6_2_1, 5_0_2, 6_3_3], [5_1_6, 6_2_1, 6_1_5, 6_3_7], [6_2_9, 6_2_1, 7_7_4, 6_3_6], [7_8_9, 6_2_1, 8_2_7, 6_3_3], [3_3_7, 6_3_9, 4_1_8, 6_5_2], [4_3_2, 6_4_0, 5_7_1, 6_5_3], [5_8_7, 6_3_9, 7_3_1, 6_5_5], [7_4_3, 6_3_9, 7_6_9, 6_5_2], [7_8_0, 6_3_9, 8_4_1, 6_5_2], [3_3_8, 6_5_8, 4_4_0, 6_7_3], [4_5_5, 6_5_8, 4_9_1, 6_7_0], [5_0_8, 6_5_8, 6_0_2, 6_7_1], [6_1_6, 6_5_8, 6_3_8, 6_7_0], [6_5_4, 6_5_8, 8_3_5, 6_7_4], [3_3_7, 6_7_7, 4_2_9, 6_8_9], [3_3_7, 7_1_4, 4_8_2, 7_2_6], [4_9_5, 7_1_4, 5_4_8, 7_2_6], [5_6_1, 7_1_4, 6_8_3, 7_2_6], [3_3_8, 7_7_0, 4_6_1, 7_8_2], [4_7_4, 7_6_9, 5_5_4, 7_8_5], [4_8_9, 7_8_8, 5_6_2, 8_0_3], [5_7_6, 7_8_8, 6_4_3, 8_0_1], [6_5_6, 7_8_7, 7_5_1, 8_0_4], [7_6_4, 7_8_8, 8_4_4, 8_0_1], [3_3_4, 8_2_5, 4_2_1, 8_3_8], [4_3_0, 8_2_4, 5_7_4, 8_3_8], [5_8_4, 8_2_4, 7_2_3, 8_4_1], [3_3_5, 8_4_4, 4_5_0, 8_5_7], [4_6_4, 8_4_3, 5_8_3, 8_6_0], [6_2_8, 8_6_2, 7_5_5, 8_7_5], [7_6_9, 8_6_1, 8_4_8, 8_7_8]]] # noqa: E231 # fmt: on self.assertListEqual(encoding.words , UpperCamelCase__) self.assertListEqual(encoding.boxes , UpperCamelCase__) # with apply_OCR = False snake_case__ = LayoutLMvaImageProcessor(apply_ocr=UpperCamelCase__) snake_case__ = image_processing(UpperCamelCase__ , return_tensors="""pt""") self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_2_4, 2_2_4))
654
0
"""simple docstring""" import unittest from transformers import JukeboxTokenizer from transformers.testing_utils import require_torch class _A ( unittest.TestCase ): snake_case__ : Dict = JukeboxTokenizer snake_case__ : str = { '''artist''': '''Zac Brown Band''', '''genres''': '''Country''', '''lyrics''': '''I met a traveller from an antique land, Who said "Two vast and trunkless legs of stone Stand in the desert. . . . Near them, on the sand, Half sunk a shattered visage lies, whose frown, And wrinkled lip, and sneer of cold command, Tell that its sculptor well those passions read Which yet survive, stamped on these lifeless things, The hand that mocked them, and the heart that fed; And on the pedestal, these words appear: My name is Ozymandias, King of Kings; Look on my Works, ye Mighty, and despair! Nothing beside remains. Round the decay Of that colossal Wreck, boundless and bare The lone and level sands stretch far away ''', } @require_torch def A__ ( self ): """simple docstring""" import torch lowercase = JukeboxTokenizer.from_pretrained("""openai/jukebox-1b-lyrics""" ) lowercase = tokenizer(**self.metas )["""input_ids"""] # fmt: off lowercase = [ torch.tensor([[ 0, 0, 0, 7169, 507, 9, 76, 39, 31, 46, 76, 27, 76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32, 44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43, 47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35, 30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76, 27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45, 45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46, 41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76, 19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31, 76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63, 76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39, 64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8, 27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45, 34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45, 27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34, 41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49, 44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64, 76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41, 32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27, 40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46, 45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49, 31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27, 45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78, 76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29, 34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48, 31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41, 40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31, 38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31, 76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39, 41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76, 27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44, 46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78, 76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45, 46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49, 41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65, 78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76, 40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33, 76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76, 76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76, 41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64, 76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76, 27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67, 78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46, 34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76, 44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47, 40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51, 78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76, 46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27, 38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47, 40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28, 27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30, 76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45, 76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44, 76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76, 76, 76]] ), torch.tensor([[0, 0, 0, 1069, 11]] ), torch.tensor([[0, 0, 0, 1069, 11]] ), ] # fmt: on self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) ) self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) ) self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) ) @require_torch def A__ ( self ): """simple docstring""" import torch lowercase = JukeboxTokenizer.from_pretrained("""openai/jukebox-5b-lyrics""" ) lowercase = tokenizer(**self.metas )["""input_ids"""] # fmt: off lowercase = [ torch.tensor([[ 0, 0, 0, 1069, 11, -1, -1, -1, -1, 9, 77, 39, 31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38, 31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27, 40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41, 77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48, 27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40, 37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41, 32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77, 77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40, 77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63, 77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77, 46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31, 77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37, 77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30, 77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45, 64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49, 40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1, 40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77, 38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31, 31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29, 41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27, 46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46, 41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45, 31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44, 31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47, 44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42, 31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77, 38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35, 40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34, 27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34, 31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77, 34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32, 31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42, 31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31, 45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42, 31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77, 77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77, 15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77, 11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33, 45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12, 41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41, 44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34, 46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42, 27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77, 77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45, 35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63, 77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30, 31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77, 77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38, 41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64, 77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27, 40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77, 77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31, 77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45, 27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34, 77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77, 77]] ), torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]] ), torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]] ), ] # fmt: on self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) ) self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) ) self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
359
import numpy as np import torch from torch.utils.data import Dataset from utils import logger class _lowerCAmelCase ( lowercase_ ): """simple docstring""" def __init__( self : int , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Union[str, Any]): '''simple docstring''' snake_case__ = params snake_case__ = np.array(UpperCamelCase__) snake_case__ = np.array([len(UpperCamelCase__) for t in data]) self.check() self.remove_long_sequences() self.remove_empty_sequences() self.remove_unknown_sequences() self.check() self.print_statistics() def __getitem__( self : Dict , UpperCamelCase__ : Any): '''simple docstring''' return (self.token_ids[index], self.lengths[index]) def __len__( self : Union[str, Any]): '''simple docstring''' return len(self.lengths) def __magic_name__ ( self : str): '''simple docstring''' assert len(self.token_ids) == len(self.lengths) assert all(self.lengths[i] == len(self.token_ids[i]) for i in range(len(self.lengths))) def __magic_name__ ( self : Optional[int]): '''simple docstring''' snake_case__ = self.params.max_model_input_size snake_case__ = self.lengths > max_len logger.info(F'''Splitting {sum(UpperCamelCase__)} too long sequences.''') def divide_chunks(UpperCamelCase__ : str , UpperCamelCase__ : Tuple): return [l[i : i + n] for i in range(0 , len(UpperCamelCase__) , UpperCamelCase__)] snake_case__ = [] snake_case__ = [] if self.params.mlm: snake_case__ , snake_case__ = self.params.special_tok_ids["""cls_token"""], self.params.special_tok_ids["""sep_token"""] else: snake_case__ , snake_case__ = self.params.special_tok_ids["""bos_token"""], self.params.special_tok_ids["""eos_token"""] for seq_, len_ in zip(self.token_ids , self.lengths): assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_ if len_ <= max_len: new_tok_ids.append(seq_) new_lengths.append(len_) else: snake_case__ = [] for sub_s in divide_chunks(seq_ , max_len - 2): if sub_s[0] != cls_id: snake_case__ = np.insert(UpperCamelCase__ , 0 , UpperCamelCase__) if sub_s[-1] != sep_id: snake_case__ = np.insert(UpperCamelCase__ , len(UpperCamelCase__) , UpperCamelCase__) assert len(UpperCamelCase__) <= max_len assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s sub_seqs.append(UpperCamelCase__) new_tok_ids.extend(UpperCamelCase__) new_lengths.extend([len(UpperCamelCase__) for l in sub_seqs]) snake_case__ = np.array(UpperCamelCase__) snake_case__ = np.array(UpperCamelCase__) def __magic_name__ ( self : Any): '''simple docstring''' snake_case__ = len(self) snake_case__ = self.lengths > 1_1 snake_case__ = self.token_ids[indices] snake_case__ = self.lengths[indices] snake_case__ = len(self) logger.info(F'''Remove {init_size - new_size} too short (<=11 tokens) sequences.''') def __magic_name__ ( self : List[str]): '''simple docstring''' if "unk_token" not in self.params.special_tok_ids: return else: snake_case__ = self.params.special_tok_ids["""unk_token"""] snake_case__ = len(self) snake_case__ = np.array([np.count_nonzero(a == unk_token_id) for a in self.token_ids]) snake_case__ = (unk_occs / self.lengths) < 0.5 snake_case__ = self.token_ids[indices] snake_case__ = self.lengths[indices] snake_case__ = len(self) logger.info(F'''Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).''') def __magic_name__ ( self : Optional[Any]): '''simple docstring''' if not self.params.is_master: return logger.info(F'''{len(self)} sequences''') # data_len = sum(self.lengths) # nb_unique_tokens = len(Counter(list(chain(*self.token_ids)))) # logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)') # unk_idx = self.params.special_tok_ids['unk_token'] # nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids]) # logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)') def __magic_name__ ( self : int , UpperCamelCase__ : Optional[int]): '''simple docstring''' snake_case__ = [t[0] for t in batch] snake_case__ = [t[1] for t in batch] assert len(UpperCamelCase__) == len(UpperCamelCase__) # Max for paddings snake_case__ = max(UpperCamelCase__) # Pad token ids if self.params.mlm: snake_case__ = self.params.special_tok_ids["""pad_token"""] else: snake_case__ = self.params.special_tok_ids["""unk_token"""] snake_case__ = [list(t.astype(UpperCamelCase__)) + [pad_idx] * (max_seq_len_ - len(UpperCamelCase__)) for t in token_ids] assert len(tk_) == len(UpperCamelCase__) assert all(len(UpperCamelCase__) == max_seq_len_ for t in tk_) snake_case__ = torch.tensor(tk_) # (bs, max_seq_len_) snake_case__ = torch.tensor(UpperCamelCase__) # (bs) return tk_t, lg_t
654
0
'''simple docstring''' def _UpperCamelCase ( __UpperCamelCase ) -> List[str]: return [ { 0: [1, 2], 1: [0, 2], 2: [0, 1, 3, 5], 3: [2, 4], 4: [3], 5: [2, 6, 8], 6: [5, 7], 7: [6, 8], 8: [5, 7], }, { 0: [6], 1: [9], 2: [4, 5], 3: [4], 4: [2, 3], 5: [2], 6: [0, 7], 7: [6], 8: [], 9: [1], }, { 0: [4], 1: [6], 2: [], 3: [5, 6, 7], 4: [0, 6], 5: [3, 8, 9], 6: [1, 3, 4, 7], 7: [3, 6, 8, 9], 8: [5, 7], 9: [5, 7], }, { 0: [1, 3], 1: [0, 2, 4], 2: [1, 3, 4], 3: [0, 2, 4], 4: [1, 2, 3], }, ][index] def _UpperCamelCase ( __UpperCamelCase ) -> List[str]: lowerCamelCase_ = 0 lowerCamelCase_ = len(__UpperCamelCase ) # No of vertices in graph lowerCamelCase_ = [0] * n lowerCamelCase_ = [False] * n def dfs(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ): lowerCamelCase_ = True lowerCamelCase_ = id_ id_ += 1 for to in graph[at]: if to == parent: pass elif not visited[to]: dfs(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,id_ ) lowerCamelCase_ = min(low[at] ,low[to] ) if id_ <= low[to]: bridges.append((at, to) if at < to else (to, at) ) else: # This edge is a back edge and cannot be a bridge lowerCamelCase_ = min(low[at] ,low[to] ) lowerCamelCase_ = [] for i in range(__UpperCamelCase ): if not visited[i]: dfs(__UpperCamelCase ,-1 ,__UpperCamelCase ,id_ ) return bridges if __name__ == "__main__": import doctest doctest.testmod()
42
import argparse import torch from transformers import YosoConfig, YosoForMaskedLM def _UpperCAmelCase ( a : str ): if "model" in orig_key: snake_case__ = orig_key.replace("""model.""" , """""" ) if "norm1" in orig_key: snake_case__ = orig_key.replace("""norm1""" , """attention.output.LayerNorm""" ) if "norm2" in orig_key: snake_case__ = orig_key.replace("""norm2""" , """output.LayerNorm""" ) if "norm" in orig_key: snake_case__ = orig_key.replace("""norm""" , """LayerNorm""" ) if "transformer" in orig_key: snake_case__ = orig_key.split(""".""" )[0].split("""_""" )[-1] snake_case__ = orig_key.replace(F'''transformer_{layer_num}''' , F'''encoder.layer.{layer_num}''' ) if "mha.attn" in orig_key: snake_case__ = orig_key.replace("""mha.attn""" , """attention.self""" ) if "mha" in orig_key: snake_case__ = orig_key.replace("""mha""" , """attention""" ) if "W_q" in orig_key: snake_case__ = orig_key.replace("""W_q""" , """self.query""" ) if "W_k" in orig_key: snake_case__ = orig_key.replace("""W_k""" , """self.key""" ) if "W_v" in orig_key: snake_case__ = orig_key.replace("""W_v""" , """self.value""" ) if "ff1" in orig_key: snake_case__ = orig_key.replace("""ff1""" , """intermediate.dense""" ) if "ff2" in orig_key: snake_case__ = orig_key.replace("""ff2""" , """output.dense""" ) if "ff" in orig_key: snake_case__ = orig_key.replace("""ff""" , """output.dense""" ) if "mlm_class" in orig_key: snake_case__ = orig_key.replace("""mlm.mlm_class""" , """cls.predictions.decoder""" ) if "mlm" in orig_key: snake_case__ = orig_key.replace("""mlm""" , """cls.predictions.transform""" ) if "cls" not in orig_key: snake_case__ = """yoso.""" + orig_key return orig_key def _UpperCAmelCase ( a : Tuple , a : Dict ): for key in orig_state_dict.copy().keys(): snake_case__ = orig_state_dict.pop(a ) if ("pooler" in key) or ("sen_class" in key): continue else: snake_case__ = val snake_case__ = orig_state_dict["""cls.predictions.decoder.bias"""] snake_case__ = torch.arange(a ).expand((1, -1) ) + 2 return orig_state_dict def _UpperCAmelCase ( a : int , a : List[Any] , a : List[Any] ): snake_case__ = torch.load(a , map_location="""cpu""" )["""model_state_dict"""] snake_case__ = YosoConfig.from_json_file(a ) snake_case__ = YosoForMaskedLM(a ) snake_case__ = convert_checkpoint_helper(config.max_position_embeddings , a ) print(model.load_state_dict(a ) ) model.eval() model.save_pretrained(a ) print(F'''Checkpoint successfuly converted. Model saved at {pytorch_dump_path}''' ) if __name__ == "__main__": a__ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--pytorch_model_path""", default=None, type=str, required=True, help="""Path to YOSO pytorch checkpoint.""" ) parser.add_argument( """--config_file""", default=None, type=str, required=True, help="""The json file for YOSO model config.""", ) parser.add_argument( """--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) a__ = parser.parse_args() convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
654
0
def __lowercase( UpperCAmelCase__ = 3 , UpperCAmelCase__ = 7 , UpperCAmelCase__ = 1000000 ): """simple docstring""" lowerCamelCase = 0 lowerCamelCase = 1 for current_denominator in range(1 , limit + 1 ): lowerCamelCase = current_denominator * numerator // denominator if current_denominator % denominator == 0: current_numerator -= 1 if current_numerator * max_denominator > current_denominator * max_numerator: lowerCamelCase = current_numerator lowerCamelCase = current_denominator return max_numerator if __name__ == "__main__": print(solution(numerator=3, denominator=7, limit=1_0_0_0_0_0_0))
623
import os from typing import Optional import fsspec from fsspec.archive import AbstractArchiveFileSystem from fsspec.utils import DEFAULT_BLOCK_SIZE class _lowerCAmelCase ( lowercase_ ): """simple docstring""" _lowercase : Optional[int] = '''''' _lowercase : str = ( None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz ) _lowercase : str = None # compression type in fsspec. ex: "gzip" _lowercase : str = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz def __init__( self : List[Any] , UpperCamelCase__ : str = "" , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : Optional[dict] = None , **UpperCamelCase__ : List[Any]): '''simple docstring''' super().__init__(self , **UpperCamelCase__) # always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode snake_case__ = fsspec.open( UpperCamelCase__ , mode="""rb""" , protocol=UpperCamelCase__ , compression=self.compression , client_kwargs={ """requote_redirect_url""": False, # see https://github.com/huggingface/datasets/pull/5459 """trust_env""": True, # Enable reading proxy env variables. **(target_options or {}).pop("""client_kwargs""" , {}), # To avoid issues if it was already passed. } , **(target_options or {}) , ) snake_case__ = os.path.basename(self.file.path.split("""::""")[0]) snake_case__ = ( self.compressed_name[: self.compressed_name.rindex(""".""")] if """.""" in self.compressed_name else self.compressed_name ) snake_case__ = None @classmethod def __magic_name__ ( cls : Union[str, Any] , UpperCamelCase__ : List[Any]): '''simple docstring''' return super()._strip_protocol(UpperCamelCase__).lstrip("""/""") def __magic_name__ ( self : Dict): '''simple docstring''' if self.dir_cache is None: snake_case__ = {**self.file.fs.info(self.file.path), """name""": self.uncompressed_name} snake_case__ = {f["""name"""]: f} def __magic_name__ ( self : Optional[int] , UpperCamelCase__ : str): '''simple docstring''' return self.file.open().read() def __magic_name__ ( self : int , UpperCamelCase__ : str , UpperCamelCase__ : str = "rb" , UpperCamelCase__ : Any=None , UpperCamelCase__ : Union[str, Any]=True , UpperCamelCase__ : Optional[int]=None , **UpperCamelCase__ : Optional[Any] , ): '''simple docstring''' snake_case__ = self._strip_protocol(UpperCamelCase__) if mode != "rb": raise ValueError(F'''Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'''') return self.file.open() class _lowerCAmelCase ( lowercase_ ): """simple docstring""" _lowercase : Dict = '''bz2''' _lowercase : Dict = '''bz2''' _lowercase : Optional[int] = '''.bz2''' class _lowerCAmelCase ( lowercase_ ): """simple docstring""" _lowercase : Dict = '''gzip''' _lowercase : List[str] = '''gzip''' _lowercase : Any = '''.gz''' class _lowerCAmelCase ( lowercase_ ): """simple docstring""" _lowercase : str = '''lz4''' _lowercase : List[Any] = '''lz4''' _lowercase : Dict = '''.lz4''' class _lowerCAmelCase ( lowercase_ ): """simple docstring""" _lowercase : Optional[int] = '''xz''' _lowercase : Union[str, Any] = '''xz''' _lowercase : Optional[int] = '''.xz''' class _lowerCAmelCase ( lowercase_ ): """simple docstring""" _lowercase : Optional[int] = '''zstd''' _lowercase : Tuple = '''zstd''' _lowercase : Union[str, Any] = '''.zst''' def __init__( self : str , UpperCamelCase__ : str , UpperCamelCase__ : str = "rb" , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : Optional[dict] = None , UpperCamelCase__ : int = DEFAULT_BLOCK_SIZE , **UpperCamelCase__ : int , ): '''simple docstring''' super().__init__( fo=UpperCamelCase__ , mode=UpperCamelCase__ , target_protocol=UpperCamelCase__ , target_options=UpperCamelCase__ , block_size=UpperCamelCase__ , **UpperCamelCase__ , ) # We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2: # # File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open # out.close = close # AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only # # see https://github.com/intake/filesystem_spec/issues/725 snake_case__ = self.file.__enter__ class _lowerCAmelCase : """simple docstring""" def __init__( self : Tuple , UpperCamelCase__ : str): '''simple docstring''' snake_case__ = file_ def __enter__( self : List[str]): '''simple docstring''' self._file.__enter__() return self def __exit__( self : Dict , *UpperCamelCase__ : str , **UpperCamelCase__ : Optional[Any]): '''simple docstring''' self._file.__exit__(*UpperCamelCase__ , **UpperCamelCase__) def __iter__( self : Any): '''simple docstring''' return iter(self._file) def __magic_name__ ( self : List[str]): '''simple docstring''' return next(self._file) def __getattr__( self : Any , UpperCamelCase__ : int): '''simple docstring''' return getattr(self._file , UpperCamelCase__) def fixed_enter(*UpperCamelCase__ : int , **UpperCamelCase__ : int): return WrappedFile(_enter(*UpperCamelCase__ , **UpperCamelCase__)) snake_case__ = fixed_enter
654
0
'''simple docstring''' import unittest from knapsack import knapsack as k class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): def lowerCamelCase_ ( self : Optional[Any] ): '''simple docstring''' lowercase : str =0 lowercase : Any =[0] lowercase : Tuple =[0] lowercase : Any =len(UpperCamelCase__ ) self.assertEqual(k.knapsack(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) , 0 ) lowercase : Tuple =[60] lowercase : Optional[Any] =[10] lowercase : Dict =len(UpperCamelCase__ ) self.assertEqual(k.knapsack(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) , 0 ) def lowerCamelCase_ ( self : Any ): '''simple docstring''' lowercase : int =3 lowercase : int =[1, 2, 3] lowercase : List[Any] =[3, 2, 1] lowercase : Any =len(UpperCamelCase__ ) self.assertEqual(k.knapsack(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) , 5 ) def lowerCamelCase_ ( self : Dict ): '''simple docstring''' lowercase : Any =50 lowercase : List[str] =[60, 100, 120] lowercase : Any =[10, 20, 30] lowercase : List[str] =len(UpperCamelCase__ ) self.assertEqual(k.knapsack(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) , 220 ) if __name__ == "__main__": unittest.main()
92
def _UpperCAmelCase ( a : int ): if number < 0: raise ValueError("""number must not be negative""" ) return number & (number - 1) == 0 if __name__ == "__main__": import doctest doctest.testmod()
654
0
'''simple docstring''' import cva import numpy as np class UpperCAmelCase : """simple docstring""" def __init__( self : Tuple , UpperCamelCase__ : float , UpperCamelCase__ : int ) -> str: if k in (0.04, 0.06): _UpperCamelCase =k _UpperCamelCase =window_size else: raise ValueError('''invalid k value''' ) def __str__( self : List[Any] ) -> Union[str, Any]: return str(self.k ) def UpperCamelCase__ ( self : Optional[Any] , UpperCamelCase__ : str ) -> Tuple: _UpperCamelCase =cva.imread(UpperCamelCase__ , 0 ) _UpperCamelCase , _UpperCamelCase =img.shape _UpperCamelCase =[] _UpperCamelCase =img.copy() _UpperCamelCase =cva.cvtColor(UpperCamelCase__ , cva.COLOR_GRAY2RGB ) _UpperCamelCase , _UpperCamelCase =np.gradient(UpperCamelCase__ ) _UpperCamelCase =dx**2 _UpperCamelCase =dy**2 _UpperCamelCase =dx * dy _UpperCamelCase =0.04 _UpperCamelCase =self.window_size // 2 for y in range(UpperCamelCase__ , h - offset ): for x in range(UpperCamelCase__ , w - offset ): _UpperCamelCase =ixx[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() _UpperCamelCase =iyy[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() _UpperCamelCase =ixy[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() _UpperCamelCase =(wxx * wyy) - (wxy**2) _UpperCamelCase =wxx + wyy _UpperCamelCase =det - k * (trace**2) # Can change the value if r > 0.5: corner_list.append([x, y, r] ) color_img.itemset((y, x, 0) , 0 ) color_img.itemset((y, x, 1) , 0 ) color_img.itemset((y, x, 2) , 255 ) return color_img, corner_list if __name__ == "__main__": __lowerCamelCase : List[str] = HarrisCorner(0.0_4, 3) __lowerCamelCase , __lowerCamelCase : Dict = edge_detect.detect('path_to_image') cva.imwrite('detect.png', color_img)
404
class _lowerCAmelCase : """simple docstring""" def __init__( self : List[Any] , UpperCamelCase__ : int): '''simple docstring''' snake_case__ = size snake_case__ = [0] * size snake_case__ = [0] * size @staticmethod def __magic_name__ ( UpperCamelCase__ : int): '''simple docstring''' return index | (index + 1) @staticmethod def __magic_name__ ( UpperCamelCase__ : int): '''simple docstring''' return (index & (index + 1)) - 1 def __magic_name__ ( self : Any , UpperCamelCase__ : int , UpperCamelCase__ : int): '''simple docstring''' snake_case__ = value while index < self.size: snake_case__ = self.get_prev(UpperCamelCase__) + 1 if current_left_border == index: snake_case__ = value else: snake_case__ = max(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__) snake_case__ = self.get_next(UpperCamelCase__) def __magic_name__ ( self : int , UpperCamelCase__ : int , UpperCamelCase__ : int): '''simple docstring''' right -= 1 # Because of right is exclusive snake_case__ = 0 while left <= right: snake_case__ = self.get_prev(UpperCamelCase__) if left <= current_left: snake_case__ = max(UpperCamelCase__ , self.tree[right]) snake_case__ = current_left else: snake_case__ = max(UpperCamelCase__ , self.arr[right]) right -= 1 return result if __name__ == "__main__": import doctest doctest.testmod()
654
0
import unittest from transformers import SqueezeBertConfig, is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST, SqueezeBertForMaskedLM, SqueezeBertForMultipleChoice, SqueezeBertForQuestionAnswering, SqueezeBertForSequenceClassification, SqueezeBertForTokenClassification, SqueezeBertModel, ) class lowerCamelCase__ ( lowercase_): '''simple docstring''' def __init__(self ,__lowerCamelCase ,__lowerCamelCase=13 ,__lowerCamelCase=7 ,__lowerCamelCase=True ,__lowerCamelCase=True ,__lowerCamelCase=False ,__lowerCamelCase=True ,__lowerCamelCase=99 ,__lowerCamelCase=32 ,__lowerCamelCase=5 ,__lowerCamelCase=4 ,__lowerCamelCase=64 ,__lowerCamelCase="gelu" ,__lowerCamelCase=0.1 ,__lowerCamelCase=0.1 ,__lowerCamelCase=5_12 ,__lowerCamelCase=16 ,__lowerCamelCase=2 ,__lowerCamelCase=0.02 ,__lowerCamelCase=3 ,__lowerCamelCase=4 ,__lowerCamelCase=None ,__lowerCamelCase=2 ,__lowerCamelCase=2 ,__lowerCamelCase=2 ,__lowerCamelCase=2 ,__lowerCamelCase=4 ,__lowerCamelCase=1 ,) -> List[str]: """simple docstring""" lowerCAmelCase__ : Optional[int] = parent lowerCAmelCase__ : Optional[int] = batch_size lowerCAmelCase__ : Tuple = seq_length lowerCAmelCase__ : List[Any] = is_training lowerCAmelCase__ : Dict = use_input_mask lowerCAmelCase__ : Optional[int] = use_token_type_ids lowerCAmelCase__ : Dict = use_labels lowerCAmelCase__ : Dict = vocab_size lowerCAmelCase__ : Union[str, Any] = hidden_size lowerCAmelCase__ : Tuple = num_hidden_layers lowerCAmelCase__ : List[Any] = num_attention_heads lowerCAmelCase__ : Dict = intermediate_size lowerCAmelCase__ : Union[str, Any] = hidden_act lowerCAmelCase__ : Optional[int] = hidden_dropout_prob lowerCAmelCase__ : Dict = attention_probs_dropout_prob lowerCAmelCase__ : List[str] = max_position_embeddings lowerCAmelCase__ : str = type_vocab_size lowerCAmelCase__ : Union[str, Any] = type_sequence_label_size lowerCAmelCase__ : Dict = initializer_range lowerCAmelCase__ : Any = num_labels lowerCAmelCase__ : Optional[Any] = num_choices lowerCAmelCase__ : Tuple = scope lowerCAmelCase__ : Optional[Any] = q_groups lowerCAmelCase__ : int = k_groups lowerCAmelCase__ : Union[str, Any] = v_groups lowerCAmelCase__ : str = post_attention_groups lowerCAmelCase__ : List[str] = intermediate_groups lowerCAmelCase__ : int = output_groups def lowerCAmelCase__ (self ) -> List[str]: """simple docstring""" lowerCAmelCase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) lowerCAmelCase__ : str = None if self.use_input_mask: lowerCAmelCase__ : List[Any] = random_attention_mask([self.batch_size, self.seq_length] ) lowerCAmelCase__ : Any = None lowerCAmelCase__ : Optional[Any] = None lowerCAmelCase__ : Optional[Any] = None if self.use_labels: lowerCAmelCase__ : int = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) lowerCAmelCase__ : Tuple = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels ) lowerCAmelCase__ : int = ids_tensor([self.batch_size] ,self.num_choices ) lowerCAmelCase__ : Optional[int] = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def lowerCAmelCase__ (self ) -> List[Any]: """simple docstring""" return SqueezeBertConfig( embedding_size=self.hidden_size ,vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,attention_probs_dropout_prob=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,q_groups=self.q_groups ,k_groups=self.k_groups ,v_groups=self.v_groups ,post_attention_groups=self.post_attention_groups ,intermediate_groups=self.intermediate_groups ,output_groups=self.output_groups ,) def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ) -> Any: """simple docstring""" lowerCAmelCase__ : Tuple = SqueezeBertModel(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() lowerCAmelCase__ : Optional[Any] = model(UpperCamelCase__ ,UpperCamelCase__ ) lowerCAmelCase__ : Tuple = model(UpperCamelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ) -> int: """simple docstring""" lowerCAmelCase__ : Tuple = SqueezeBertForMaskedLM(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() lowerCAmelCase__ : Optional[Any] = model(UpperCamelCase__ ,attention_mask=UpperCamelCase__ ,labels=UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) ) def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ) -> str: """simple docstring""" lowerCAmelCase__ : Union[str, Any] = SqueezeBertForQuestionAnswering(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() lowerCAmelCase__ : str = model( UpperCamelCase__ ,attention_mask=UpperCamelCase__ ,start_positions=UpperCamelCase__ ,end_positions=UpperCamelCase__ ) self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) ) def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ) -> List[str]: """simple docstring""" lowerCAmelCase__ : int = self.num_labels lowerCAmelCase__ : str = SqueezeBertForSequenceClassification(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() lowerCAmelCase__ : Any = model(UpperCamelCase__ ,attention_mask=UpperCamelCase__ ,labels=UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) ) def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ) -> Dict: """simple docstring""" lowerCAmelCase__ : Any = self.num_labels lowerCAmelCase__ : Any = SqueezeBertForTokenClassification(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() lowerCAmelCase__ : str = model(UpperCamelCase__ ,attention_mask=UpperCamelCase__ ,labels=UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) ) def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ) -> Optional[Any]: """simple docstring""" lowerCAmelCase__ : Dict = self.num_choices lowerCAmelCase__ : Any = SqueezeBertForMultipleChoice(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() lowerCAmelCase__ : Dict = input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous() lowerCAmelCase__ : Optional[int] = input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous() lowerCAmelCase__ : int = model( UpperCamelCase__ ,attention_mask=UpperCamelCase__ ,labels=UpperCamelCase__ ,) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) ) def lowerCAmelCase__ (self ) -> Any: """simple docstring""" lowerCAmelCase__ : str = self.prepare_config_and_inputs() ((lowerCAmelCase__) , (lowerCAmelCase__) , (lowerCAmelCase__) , (lowerCAmelCase__) , (lowerCAmelCase__) , (lowerCAmelCase__)) : int = config_and_inputs lowerCAmelCase__ : List[str] = {'''input_ids''': input_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class lowerCamelCase__ ( lowercase_ , lowercase_ , unittest.TestCase): '''simple docstring''' snake_case_ =( ( SqueezeBertModel, SqueezeBertForMaskedLM, SqueezeBertForMultipleChoice, SqueezeBertForQuestionAnswering, SqueezeBertForSequenceClassification, SqueezeBertForTokenClassification, ) if is_torch_available() else None ) snake_case_ =( { '''feature-extraction''': SqueezeBertModel, '''fill-mask''': SqueezeBertForMaskedLM, '''question-answering''': SqueezeBertForQuestionAnswering, '''text-classification''': SqueezeBertForSequenceClassification, '''token-classification''': SqueezeBertForTokenClassification, '''zero-shot''': SqueezeBertForSequenceClassification, } if is_torch_available() else {} ) snake_case_ =False snake_case_ =True snake_case_ =False def lowerCAmelCase__ (self ) -> Any: """simple docstring""" lowerCAmelCase__ : Dict = SqueezeBertModelTester(self ) lowerCAmelCase__ : Optional[int] = ConfigTester(self ,config_class=UpperCamelCase__ ,dim=37 ) def lowerCAmelCase__ (self ) -> str: """simple docstring""" self.config_tester.run_common_tests() def lowerCAmelCase__ (self ) -> Union[str, Any]: """simple docstring""" lowerCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_model(*UpperCamelCase__ ) def lowerCAmelCase__ (self ) -> Optional[Any]: """simple docstring""" lowerCAmelCase__ : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_masked_lm(*UpperCamelCase__ ) def lowerCAmelCase__ (self ) -> str: """simple docstring""" lowerCAmelCase__ : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_question_answering(*UpperCamelCase__ ) def lowerCAmelCase__ (self ) -> List[str]: """simple docstring""" lowerCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_sequence_classification(*UpperCamelCase__ ) def lowerCAmelCase__ (self ) -> Optional[int]: """simple docstring""" lowerCAmelCase__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_token_classification(*UpperCamelCase__ ) def lowerCAmelCase__ (self ) -> List[Any]: """simple docstring""" lowerCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_multiple_choice(*UpperCamelCase__ ) @slow def lowerCAmelCase__ (self ) -> int: """simple docstring""" for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCAmelCase__ : List[str] = SqueezeBertModel.from_pretrained(UpperCamelCase__ ) self.assertIsNotNone(UpperCamelCase__ ) @require_sentencepiece @require_tokenizers @require_torch class lowerCamelCase__ ( unittest.TestCase): '''simple docstring''' @slow def lowerCAmelCase__ (self ) -> List[Any]: """simple docstring""" lowerCAmelCase__ : List[Any] = SqueezeBertForSequenceClassification.from_pretrained('''squeezebert/squeezebert-mnli''' ) lowerCAmelCase__ : int = torch.tensor([[1, 2_94_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 13, 15_88, 2]] ) lowerCAmelCase__ : str = model(UpperCamelCase__ )[0] lowerCAmelCase__ : Tuple = torch.Size((1, 3) ) self.assertEqual(output.shape ,UpperCamelCase__ ) lowerCAmelCase__ : Any = torch.tensor([[0.6401, -0.0349, -0.6041]] ) self.assertTrue(torch.allclose(UpperCamelCase__ ,UpperCamelCase__ ,atol=1e-4 ) )
647
from __future__ import annotations import unittest from transformers import AutoTokenizer, PegasusConfig, is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel @require_tf class _lowerCAmelCase : """simple docstring""" _lowercase : List[str] = PegasusConfig _lowercase : Union[str, Any] = {} _lowercase : Tuple = '''gelu''' def __init__( self : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[int]=1_3 , UpperCamelCase__ : Any=7 , UpperCamelCase__ : Optional[Any]=True , UpperCamelCase__ : Optional[int]=False , UpperCamelCase__ : int=9_9 , UpperCamelCase__ : Dict=3_2 , UpperCamelCase__ : str=2 , UpperCamelCase__ : int=4 , UpperCamelCase__ : Tuple=3_7 , UpperCamelCase__ : Union[str, Any]=0.1 , UpperCamelCase__ : Any=0.1 , UpperCamelCase__ : str=4_0 , UpperCamelCase__ : Optional[int]=2 , UpperCamelCase__ : Optional[Any]=1 , UpperCamelCase__ : Dict=0 , ): '''simple docstring''' snake_case__ = parent snake_case__ = batch_size snake_case__ = seq_length snake_case__ = is_training snake_case__ = use_labels snake_case__ = vocab_size snake_case__ = hidden_size snake_case__ = num_hidden_layers snake_case__ = num_attention_heads snake_case__ = intermediate_size snake_case__ = hidden_dropout_prob snake_case__ = attention_probs_dropout_prob snake_case__ = max_position_embeddings snake_case__ = eos_token_id snake_case__ = pad_token_id snake_case__ = bos_token_id def __magic_name__ ( self : Optional[Any]): '''simple docstring''' snake_case__ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size) snake_case__ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size) , 1) snake_case__ = tf.concat([input_ids, eos_tensor] , axis=1) snake_case__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) snake_case__ = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) snake_case__ = prepare_pegasus_inputs_dict(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__) return config, inputs_dict def __magic_name__ ( self : Any , UpperCamelCase__ : Tuple , UpperCamelCase__ : Union[str, Any]): '''simple docstring''' snake_case__ = TFPegasusModel(config=UpperCamelCase__).get_decoder() snake_case__ = inputs_dict["""input_ids"""] snake_case__ = input_ids[:1, :] snake_case__ = inputs_dict["""attention_mask"""][:1, :] snake_case__ = inputs_dict["""head_mask"""] snake_case__ = 1 # first forward pass snake_case__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , head_mask=UpperCamelCase__ , use_cache=UpperCamelCase__) snake_case__ , snake_case__ = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids snake_case__ = ids_tensor((self.batch_size, 3) , config.vocab_size) snake_case__ = tf.cast(ids_tensor((self.batch_size, 3) , 2) , tf.inta) # append to next input_ids and snake_case__ = tf.concat([input_ids, next_tokens] , axis=-1) snake_case__ = tf.concat([attention_mask, next_attn_mask] , axis=-1) snake_case__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__)[0] snake_case__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , past_key_values=UpperCamelCase__)[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1]) # select random slice snake_case__ = int(ids_tensor((1,) , output_from_past.shape[-1])) snake_case__ = output_from_no_past[:, -3:, random_slice_idx] snake_case__ = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(UpperCamelCase__ , UpperCamelCase__ , rtol=1E-3) def _UpperCAmelCase ( a : str , a : Union[str, Any] , a : List[str] , a : str=None , a : int=None , a : int=None , a : int=None , a : Optional[int]=None , ): if attention_mask is None: snake_case__ = tf.cast(tf.math.not_equal(a , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: snake_case__ = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: snake_case__ = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: snake_case__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: snake_case__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class _lowerCAmelCase ( lowercase_ , lowercase_ , unittest.TestCase ): """simple docstring""" _lowercase : int = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else () _lowercase : List[Any] = (TFPegasusForConditionalGeneration,) if is_tf_available() else () _lowercase : List[Any] = ( { '''conversational''': TFPegasusForConditionalGeneration, '''feature-extraction''': TFPegasusModel, '''summarization''': TFPegasusForConditionalGeneration, '''text2text-generation''': TFPegasusForConditionalGeneration, '''translation''': TFPegasusForConditionalGeneration, } if is_tf_available() else {} ) _lowercase : Optional[int] = True _lowercase : Dict = False _lowercase : Any = False def __magic_name__ ( self : str): '''simple docstring''' snake_case__ = TFPegasusModelTester(self) snake_case__ = ConfigTester(self , config_class=UpperCamelCase__) def __magic_name__ ( self : List[Any]): '''simple docstring''' self.config_tester.run_common_tests() def __magic_name__ ( self : Optional[int]): '''simple docstring''' snake_case__ = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*UpperCamelCase__) @require_sentencepiece @require_tokenizers @require_tf class _lowerCAmelCase ( unittest.TestCase ): """simple docstring""" _lowercase : List[str] = [ ''' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.''', ''' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ''', ] _lowercase : str = [ '''California\'s largest electricity provider has cut power to hundreds of thousands of customers in an effort to''' ''' reduce the risk of wildfires.''', '''N-Dubz have revealed they\'re "grateful" to have been nominated for four Mobo Awards.''', ] # differs slightly from pytorch, likely due to numerical differences in linear layers _lowercase : int = '''google/pegasus-xsum''' @cached_property def __magic_name__ ( self : Dict): '''simple docstring''' return AutoTokenizer.from_pretrained(self.model_name) @cached_property def __magic_name__ ( self : int): '''simple docstring''' snake_case__ = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name) return model def __magic_name__ ( self : Dict , **UpperCamelCase__ : List[Any]): '''simple docstring''' snake_case__ = self.translate_src_text(**UpperCamelCase__) assert self.expected_text == generated_words def __magic_name__ ( self : str , **UpperCamelCase__ : List[Any]): '''simple docstring''' snake_case__ = self.tokenizer(self.src_text , **UpperCamelCase__ , padding=UpperCamelCase__ , return_tensors="""tf""") snake_case__ = self.model.generate( model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=UpperCamelCase__ , ) snake_case__ = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=UpperCamelCase__) return generated_words @slow def __magic_name__ ( self : List[str]): '''simple docstring''' self._assert_generated_batch_equal_expected()
654
0
import argparse import collections import os import re import tempfile import pandas as pd from datasets import Dataset from huggingface_hub import hf_hub_download, upload_folder from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/update_metadata.py SCREAMING_SNAKE_CASE__ : Tuple = "src/transformers" # This is to make sure the transformers module imported is the one in the repo. SCREAMING_SNAKE_CASE__ : List[Any] = direct_transformers_import(TRANSFORMERS_PATH) # Regexes that match TF/Flax/PT model names. SCREAMING_SNAKE_CASE__ : Optional[int] = re.compile(r"TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)") SCREAMING_SNAKE_CASE__ : Optional[Any] = re.compile(r"Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)") # Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes. SCREAMING_SNAKE_CASE__ : int = re.compile(r"(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)") # Fill this with tuples (pipeline_tag, model_mapping, auto_model) SCREAMING_SNAKE_CASE__ : List[str] = [ ("pretraining", "MODEL_FOR_PRETRAINING_MAPPING_NAMES", "AutoModelForPreTraining"), ("feature-extraction", "MODEL_MAPPING_NAMES", "AutoModel"), ("audio-classification", "MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES", "AutoModelForAudioClassification"), ("text-generation", "MODEL_FOR_CAUSAL_LM_MAPPING_NAMES", "AutoModelForCausalLM"), ("automatic-speech-recognition", "MODEL_FOR_CTC_MAPPING_NAMES", "AutoModelForCTC"), ("image-classification", "MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES", "AutoModelForImageClassification"), ("image-segmentation", "MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES", "AutoModelForImageSegmentation"), ("fill-mask", "MODEL_FOR_MASKED_LM_MAPPING_NAMES", "AutoModelForMaskedLM"), ("object-detection", "MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES", "AutoModelForObjectDetection"), ( "zero-shot-object-detection", "MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES", "AutoModelForZeroShotObjectDetection", ), ("question-answering", "MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES", "AutoModelForQuestionAnswering"), ("text2text-generation", "MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES", "AutoModelForSeq2SeqLM"), ("text-classification", "MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES", "AutoModelForSequenceClassification"), ("automatic-speech-recognition", "MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES", "AutoModelForSpeechSeq2Seq"), ( "table-question-answering", "MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES", "AutoModelForTableQuestionAnswering", ), ("token-classification", "MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES", "AutoModelForTokenClassification"), ("multiple-choice", "MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES", "AutoModelForMultipleChoice"), ( "next-sentence-prediction", "MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES", "AutoModelForNextSentencePrediction", ), ( "audio-frame-classification", "MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES", "AutoModelForAudioFrameClassification", ), ("audio-xvector", "MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES", "AutoModelForAudioXVector"), ( "document-question-answering", "MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES", "AutoModelForDocumentQuestionAnswering", ), ( "visual-question-answering", "MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES", "AutoModelForVisualQuestionAnswering", ), ("image-to-text", "MODEL_FOR_FOR_VISION_2_SEQ_MAPPING_NAMES", "AutoModelForVision2Seq"), ( "zero-shot-image-classification", "MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES", "AutoModelForZeroShotImageClassification", ), ("depth-estimation", "MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES", "AutoModelForDepthEstimation"), ("video-classification", "MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES", "AutoModelForVideoClassification"), ("mask-generation", "MODEL_FOR_MASK_GENERATION_MAPPING_NAMES", "AutoModelForMaskGeneration"), ] def lowercase ( SCREAMING_SNAKE_CASE ) -> str: '''simple docstring''' SCREAMING_SNAKE_CASE_ = re.finditer('.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)' , SCREAMING_SNAKE_CASE ) return [m.group(0 ) for m in matches] def lowercase ( ) -> List[str]: '''simple docstring''' SCREAMING_SNAKE_CASE_ = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES SCREAMING_SNAKE_CASE_ = { config.replace('Config' , '' ): model_type for model_type, config in config_maping_names.items() } # Dictionaries flagging if each model prefix has a backend in PT/TF/Flax. SCREAMING_SNAKE_CASE_ = collections.defaultdict(SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = collections.defaultdict(SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = collections.defaultdict(SCREAMING_SNAKE_CASE ) # Let's lookup through all transformers object (once) and find if models are supported by a given backend. for attr_name in dir(SCREAMING_SNAKE_CASE ): SCREAMING_SNAKE_CASE_ = None if _re_tf_models.match(SCREAMING_SNAKE_CASE ) is not None: SCREAMING_SNAKE_CASE_ = tf_models SCREAMING_SNAKE_CASE_ = _re_tf_models.match(SCREAMING_SNAKE_CASE ).groups()[0] elif _re_flax_models.match(SCREAMING_SNAKE_CASE ) is not None: SCREAMING_SNAKE_CASE_ = flax_models SCREAMING_SNAKE_CASE_ = _re_flax_models.match(SCREAMING_SNAKE_CASE ).groups()[0] elif _re_pt_models.match(SCREAMING_SNAKE_CASE ) is not None: SCREAMING_SNAKE_CASE_ = pt_models SCREAMING_SNAKE_CASE_ = _re_pt_models.match(SCREAMING_SNAKE_CASE ).groups()[0] if lookup_dict is not None: while len(SCREAMING_SNAKE_CASE ) > 0: if attr_name in model_prefix_to_model_type: SCREAMING_SNAKE_CASE_ = True break # Try again after removing the last word in the name SCREAMING_SNAKE_CASE_ = ''.join(camel_case_split(SCREAMING_SNAKE_CASE )[:-1] ) SCREAMING_SNAKE_CASE_ = set(list(pt_models.keys() ) + list(tf_models.keys() ) + list(flax_models.keys() ) ) SCREAMING_SNAKE_CASE_ = list(SCREAMING_SNAKE_CASE ) all_models.sort() SCREAMING_SNAKE_CASE_ = {'model_type': all_models} SCREAMING_SNAKE_CASE_ = [pt_models[t] for t in all_models] SCREAMING_SNAKE_CASE_ = [tf_models[t] for t in all_models] SCREAMING_SNAKE_CASE_ = [flax_models[t] for t in all_models] # Now let's use the auto-mapping names to make sure SCREAMING_SNAKE_CASE_ = {} for t in all_models: if t in transformers_module.models.auto.processing_auto.PROCESSOR_MAPPING_NAMES: SCREAMING_SNAKE_CASE_ = 'AutoProcessor' elif t in transformers_module.models.auto.tokenization_auto.TOKENIZER_MAPPING_NAMES: SCREAMING_SNAKE_CASE_ = 'AutoTokenizer' elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES: SCREAMING_SNAKE_CASE_ = 'AutoFeatureExtractor' else: # Default to AutoTokenizer if a model has nothing, for backward compatibility. SCREAMING_SNAKE_CASE_ = 'AutoTokenizer' SCREAMING_SNAKE_CASE_ = [processors[t] for t in all_models] return pd.DataFrame(SCREAMING_SNAKE_CASE ) def lowercase ( SCREAMING_SNAKE_CASE ) -> Optional[Any]: '''simple docstring''' SCREAMING_SNAKE_CASE_ = [ transformers_module.models.auto.modeling_auto, transformers_module.models.auto.modeling_tf_auto, transformers_module.models.auto.modeling_flax_auto, ] for pipeline_tag, model_mapping, auto_class in PIPELINE_TAGS_AND_AUTO_MODELS: SCREAMING_SNAKE_CASE_ = [model_mapping, F'TF_{model_mapping}', F'FLAX_{model_mapping}'] SCREAMING_SNAKE_CASE_ = [auto_class, F'TF_{auto_class}', F'Flax_{auto_class}'] # Loop through all three frameworks for module, cls, mapping in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): # The type of pipeline may not exist in this framework if not hasattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): continue # First extract all model_names SCREAMING_SNAKE_CASE_ = [] for name in getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).values(): if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): model_names.append(SCREAMING_SNAKE_CASE ) else: model_names.extend(list(SCREAMING_SNAKE_CASE ) ) # Add pipeline tag and auto model class for those models table.update({model_name: (pipeline_tag, cls) for model_name in model_names} ) return table def lowercase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[int]: '''simple docstring''' SCREAMING_SNAKE_CASE_ = get_frameworks_table() SCREAMING_SNAKE_CASE_ = Dataset.from_pandas(SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = hf_hub_download( 'huggingface/transformers-metadata' , 'pipeline_tags.json' , repo_type='dataset' , token=SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = Dataset.from_json(SCREAMING_SNAKE_CASE ) SCREAMING_SNAKE_CASE_ = { tags_dataset[i]['model_class']: (tags_dataset[i]['pipeline_tag'], tags_dataset[i]['auto_class']) for i in range(len(SCREAMING_SNAKE_CASE ) ) } SCREAMING_SNAKE_CASE_ = update_pipeline_and_auto_class_table(SCREAMING_SNAKE_CASE ) # Sort the model classes to avoid some nondeterministic updates to create false update commits. SCREAMING_SNAKE_CASE_ = sorted(table.keys() ) SCREAMING_SNAKE_CASE_ = pd.DataFrame( { 'model_class': model_classes, 'pipeline_tag': [table[m][0] for m in model_classes], 'auto_class': [table[m][1] for m in model_classes], } ) SCREAMING_SNAKE_CASE_ = Dataset.from_pandas(SCREAMING_SNAKE_CASE ) with tempfile.TemporaryDirectory() as tmp_dir: frameworks_dataset.to_json(os.path.join(SCREAMING_SNAKE_CASE , 'frameworks.json' ) ) tags_dataset.to_json(os.path.join(SCREAMING_SNAKE_CASE , 'pipeline_tags.json' ) ) if commit_sha is not None: SCREAMING_SNAKE_CASE_ = ( F'Update with commit {commit_sha}\n\nSee: ' F'https://github.com/huggingface/transformers/commit/{commit_sha}' ) else: SCREAMING_SNAKE_CASE_ = 'Update' upload_folder( repo_id='huggingface/transformers-metadata' , folder_path=SCREAMING_SNAKE_CASE , repo_type='dataset' , token=SCREAMING_SNAKE_CASE , commit_message=SCREAMING_SNAKE_CASE , ) def lowercase ( ) -> Optional[Any]: '''simple docstring''' SCREAMING_SNAKE_CASE_ = {tag: cls for tag, _, cls in PIPELINE_TAGS_AND_AUTO_MODELS} SCREAMING_SNAKE_CASE_ = transformers_module.pipelines.SUPPORTED_TASKS SCREAMING_SNAKE_CASE_ = [] for key in pipeline_tasks: if key not in in_table: SCREAMING_SNAKE_CASE_ = pipeline_tasks[key]['pt'] if isinstance(SCREAMING_SNAKE_CASE , (list, tuple) ): SCREAMING_SNAKE_CASE_ = model[0] SCREAMING_SNAKE_CASE_ = model.__name__ if model not in in_table.values(): missing.append(SCREAMING_SNAKE_CASE ) if len(SCREAMING_SNAKE_CASE ) > 0: SCREAMING_SNAKE_CASE_ = ', '.join(SCREAMING_SNAKE_CASE ) raise ValueError( 'The following pipeline tags are not present in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant inside ' F'`utils/update_metadata.py`: {msg}. Please add them!' ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ : List[Any] = argparse.ArgumentParser() parser.add_argument("--token", type=str, help="The token to use to push to the transformers-metadata dataset.") parser.add_argument("--commit_sha", type=str, help="The sha of the commit going with this update.") parser.add_argument("--check-only", action="store_true", help="Activate to just check all pipelines are present.") SCREAMING_SNAKE_CASE__ : List[str] = parser.parse_args() if args.check_only: check_pipeline_tags() else: update_metadata(args.token, args.commit_sha)
205
import json import os import re import unicodedata from json.encoder import INFINITY from typing import Any, Dict, List, Optional, Tuple, Union import numpy as np import regex from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...tokenization_utils_base import BatchEncoding from ...utils import TensorType, is_flax_available, is_tf_available, is_torch_available, logging from ...utils.generic import _is_jax, _is_numpy a__ = logging.get_logger(__name__) a__ = { """artists_file""": """artists.json""", """lyrics_file""": """lyrics.json""", """genres_file""": """genres.json""", } a__ = { """artists_file""": { """jukebox""": """https://huggingface.co/ArthurZ/jukebox/blob/main/artists.json""", }, """genres_file""": { """jukebox""": """https://huggingface.co/ArthurZ/jukebox/blob/main/genres.json""", }, """lyrics_file""": { """jukebox""": """https://huggingface.co/ArthurZ/jukebox/blob/main/lyrics.json""", }, } a__ = { """jukebox""": 5_1_2, } class _lowerCAmelCase ( lowercase_ ): """simple docstring""" _lowercase : str = VOCAB_FILES_NAMES _lowercase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP _lowercase : str = PRETRAINED_LYRIC_TOKENS_SIZES _lowercase : Any = ['''input_ids''', '''attention_mask'''] def __init__( self : int , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int=["v3", "v2", "v2"] , UpperCamelCase__ : List[str]=5_1_2 , UpperCamelCase__ : Union[str, Any]=5 , UpperCamelCase__ : List[Any]="<|endoftext|>" , **UpperCamelCase__ : List[Any] , ): '''simple docstring''' snake_case__ = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__) if isinstance(UpperCamelCase__ , UpperCamelCase__) else unk_token super().__init__( unk_token=UpperCamelCase__ , n_genres=UpperCamelCase__ , version=UpperCamelCase__ , max_n_lyric_tokens=UpperCamelCase__ , **UpperCamelCase__ , ) snake_case__ = version snake_case__ = max_n_lyric_tokens snake_case__ = n_genres with open(UpperCamelCase__ , encoding="""utf-8""") as vocab_handle: snake_case__ = json.load(UpperCamelCase__) with open(UpperCamelCase__ , encoding="""utf-8""") as vocab_handle: snake_case__ = json.load(UpperCamelCase__) with open(UpperCamelCase__ , encoding="""utf-8""") as vocab_handle: snake_case__ = json.load(UpperCamelCase__) snake_case__ = R"""[^A-Za-z0-9.,:;!?\-'\"()\[\] \t\n]+""" # In v2, we had a n_vocab=80 and in v3 we missed + and so n_vocab=79 of characters. if len(self.lyrics_encoder) == 7_9: snake_case__ = oov.replace(R"""\-'""" , R"""\-+'""") snake_case__ = regex.compile(UpperCamelCase__) snake_case__ = {v: k for k, v in self.artists_encoder.items()} snake_case__ = {v: k for k, v in self.genres_encoder.items()} snake_case__ = {v: k for k, v in self.lyrics_encoder.items()} @property def __magic_name__ ( self : List[str]): '''simple docstring''' return len(self.artists_encoder) + len(self.genres_encoder) + len(self.lyrics_encoder) def __magic_name__ ( self : Union[str, Any]): '''simple docstring''' return dict(self.artists_encoder , self.genres_encoder , self.lyrics_encoder) def __magic_name__ ( self : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int): '''simple docstring''' snake_case__ = [self.artists_encoder.get(UpperCamelCase__ , 0) for artist in list_artists] for genres in range(len(UpperCamelCase__)): snake_case__ = [self.genres_encoder.get(UpperCamelCase__ , 0) for genre in list_genres[genres]] snake_case__ = list_genres[genres] + [-1] * (self.n_genres - len(list_genres[genres])) snake_case__ = [[self.lyrics_encoder.get(UpperCamelCase__ , 0) for character in list_lyrics[0]], [], []] return artists_id, list_genres, lyric_ids def __magic_name__ ( self : Optional[int] , UpperCamelCase__ : Optional[int]): '''simple docstring''' return list(UpperCamelCase__) def __magic_name__ ( self : int , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Any , **UpperCamelCase__ : List[str]): '''simple docstring''' snake_case__ , snake_case__ , snake_case__ = self.prepare_for_tokenization(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__) snake_case__ = self._tokenize(UpperCamelCase__) return artist, genre, lyrics def __magic_name__ ( self : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : bool = False): '''simple docstring''' for idx in range(len(self.version)): if self.version[idx] == "v3": snake_case__ = artists[idx].lower() snake_case__ = [genres[idx].lower()] else: snake_case__ = self._normalize(artists[idx]) + """.v2""" snake_case__ = [ self._normalize(UpperCamelCase__) + """.v2""" for genre in genres[idx].split("""_""") ] # split is for the full dictionary with combined genres if self.version[0] == "v2": snake_case__ = regex.compile(R"""[^A-Za-z0-9.,:;!?\-'\"()\[\] \t\n]+""") snake_case__ = """ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,:;!?-+'\"()[] \t\n""" snake_case__ = {vocab[index]: index + 1 for index in range(len(UpperCamelCase__))} snake_case__ = 0 snake_case__ = len(UpperCamelCase__) + 1 snake_case__ = self.vocab snake_case__ = {v: k for k, v in self.vocab.items()} snake_case__ = """""" else: snake_case__ = regex.compile(R"""[^A-Za-z0-9.,:;!?\-+'\"()\[\] \t\n]+""") snake_case__ = self._run_strip_accents(UpperCamelCase__) snake_case__ = lyrics.replace("""\\""" , """\n""") snake_case__ = self.out_of_vocab.sub("""""" , UpperCamelCase__), [], [] return artists, genres, lyrics def __magic_name__ ( self : Union[str, Any] , UpperCamelCase__ : str): '''simple docstring''' snake_case__ = unicodedata.normalize("""NFD""" , UpperCamelCase__) snake_case__ = [] for char in text: snake_case__ = unicodedata.category(UpperCamelCase__) if cat == "Mn": continue output.append(UpperCamelCase__) return "".join(UpperCamelCase__) def __magic_name__ ( self : List[str] , UpperCamelCase__ : str): '''simple docstring''' snake_case__ = ( [chr(UpperCamelCase__) for i in range(ord("""a""") , ord("""z""") + 1)] + [chr(UpperCamelCase__) for i in range(ord("""A""") , ord("""Z""") + 1)] + [chr(UpperCamelCase__) for i in range(ord("""0""") , ord("""9""") + 1)] + ["""."""] ) snake_case__ = frozenset(UpperCamelCase__) snake_case__ = re.compile(R"""_+""") snake_case__ = """""".join([c if c in accepted else """_""" for c in text.lower()]) snake_case__ = pattern.sub("""_""" , UpperCamelCase__).strip("""_""") return text def __magic_name__ ( self : List[Any] , UpperCamelCase__ : List[str]): '''simple docstring''' return " ".join(UpperCamelCase__) def __magic_name__ ( self : Any , UpperCamelCase__ : str , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , UpperCamelCase__ : bool = False): '''simple docstring''' if not isinstance(UpperCamelCase__ , UpperCamelCase__): snake_case__ = TensorType(UpperCamelCase__) # Get a function reference for the correct framework if tensor_type == TensorType.TENSORFLOW: if not is_tf_available(): raise ImportError( """Unable to convert output to TensorFlow tensors format, TensorFlow is not installed.""") import tensorflow as tf snake_case__ = tf.constant snake_case__ = tf.is_tensor elif tensor_type == TensorType.PYTORCH: if not is_torch_available(): raise ImportError("""Unable to convert output to PyTorch tensors format, PyTorch is not installed.""") import torch snake_case__ = torch.tensor snake_case__ = torch.is_tensor elif tensor_type == TensorType.JAX: if not is_flax_available(): raise ImportError("""Unable to convert output to JAX tensors format, JAX is not installed.""") import jax.numpy as jnp # noqa: F811 snake_case__ = jnp.array snake_case__ = _is_jax else: snake_case__ = np.asarray snake_case__ = _is_numpy # Do the tensor conversion in batch try: if prepend_batch_axis: snake_case__ = [inputs] if not is_tensor(UpperCamelCase__): snake_case__ = as_tensor(UpperCamelCase__) except: # noqa E722 raise ValueError( """Unable to create tensor, you should probably activate truncation and/or padding """ """with 'padding=True' 'truncation=True' to have batched tensors with the same length.""") return inputs def __call__( self : str , UpperCamelCase__ : Any , UpperCamelCase__ : Dict , UpperCamelCase__ : Any="" , UpperCamelCase__ : Dict="pt"): '''simple docstring''' snake_case__ = [0, 0, 0] snake_case__ = [artist] * len(self.version) snake_case__ = [genres] * len(self.version) snake_case__ , snake_case__ , snake_case__ = self.tokenize(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__) snake_case__ , snake_case__ , snake_case__ = self._convert_token_to_id(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__) snake_case__ = [-INFINITY] * len(full_tokens[-1]) snake_case__ = [ self.convert_to_tensors( [input_ids + [artists_id[i]] + genres_ids[i] + full_tokens[i]] , tensor_type=UpperCamelCase__) for i in range(len(self.version)) ] return BatchEncoding({"""input_ids""": input_ids, """attention_masks""": attention_masks}) def __magic_name__ ( self : Any , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None): '''simple docstring''' if not os.path.isdir(UpperCamelCase__): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''') return snake_case__ = os.path.join( UpperCamelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""artists_file"""]) with open(UpperCamelCase__ , """w""" , encoding="""utf-8""") as f: f.write(json.dumps(self.artists_encoder , ensure_ascii=UpperCamelCase__)) snake_case__ = os.path.join( UpperCamelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""genres_file"""]) with open(UpperCamelCase__ , """w""" , encoding="""utf-8""") as f: f.write(json.dumps(self.genres_encoder , ensure_ascii=UpperCamelCase__)) snake_case__ = os.path.join( UpperCamelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""lyrics_file"""]) with open(UpperCamelCase__ , """w""" , encoding="""utf-8""") as f: f.write(json.dumps(self.lyrics_encoder , ensure_ascii=UpperCamelCase__)) return (artists_file, genres_file, lyrics_file) def __magic_name__ ( self : List[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[str]): '''simple docstring''' snake_case__ = self.artists_decoder.get(UpperCamelCase__) snake_case__ = [self.genres_decoder.get(UpperCamelCase__) for genre in genres_index] snake_case__ = [self.lyrics_decoder.get(UpperCamelCase__) for character in lyric_index] return artist, genres, lyrics
654
0
'''simple docstring''' import argparse import os import re import packaging.version lowerCAmelCase__ = '''examples/''' lowerCAmelCase__ = { '''examples''': (re.compile(R'''^check_min_version\(\"[^\"]+\"\)\s*$''', re.MULTILINE), '''check_min_version(\"VERSION\")\n'''), '''init''': (re.compile(R'''^__version__\s+=\s+\"([^\"]+)\"\s*$''', re.MULTILINE), '''__version__ = \"VERSION\"\n'''), '''setup''': (re.compile(R'''^(\s*)version\s*=\s*\"[^\"]+\",''', re.MULTILINE), R'''\1version=\"VERSION\",'''), '''doc''': (re.compile(R'''^(\s*)release\s*=\s*\"[^\"]+\"$''', re.MULTILINE), '''release = \"VERSION\"\n'''), } lowerCAmelCase__ = { '''init''': '''src/transformers/__init__.py''', '''setup''': '''setup.py''', } lowerCAmelCase__ = '''README.md''' def _A ( A__ , A__ , A__ ): """simple docstring""" with open(A__ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: __lowercase = f.read() __lowercase , __lowercase = REPLACE_PATTERNS[pattern] __lowercase = replace.replace('''VERSION''' , A__ ) __lowercase = re_pattern.sub(A__ , A__ ) with open(A__ , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f: f.write(A__ ) def _A ( A__ ): """simple docstring""" for folder, directories, fnames in os.walk(A__ ): # Removing some of the folders with non-actively maintained examples from the walk if "research_projects" in directories: directories.remove('''research_projects''' ) if "legacy" in directories: directories.remove('''legacy''' ) for fname in fnames: if fname.endswith('''.py''' ): update_version_in_file(os.path.join(A__ , A__ ) , A__ , pattern='''examples''' ) def _A ( A__ , A__=False ): """simple docstring""" for pattern, fname in REPLACE_FILES.items(): update_version_in_file(A__ , A__ , A__ ) if not patch: update_version_in_examples(A__ ) def _A ( ): """simple docstring""" __lowercase = '''🤗 Transformers currently provides the following architectures''' __lowercase = '''1. Want to contribute a new model?''' with open(A__ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: __lowercase = f.readlines() # Find the start of the list. __lowercase = 0 while not lines[start_index].startswith(_start_prompt ): start_index += 1 start_index += 1 __lowercase = start_index # Update the lines in the model list. while not lines[index].startswith(_end_prompt ): if lines[index].startswith('''1.''' ): __lowercase = lines[index].replace( '''https://huggingface.co/docs/transformers/main/model_doc''' , '''https://huggingface.co/docs/transformers/model_doc''' , ) index += 1 with open(A__ , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f: f.writelines(A__ ) def _A ( ): """simple docstring""" with open(REPLACE_FILES['''init'''] , '''r''' ) as f: __lowercase = f.read() __lowercase = REPLACE_PATTERNS['''init'''][0].search(A__ ).groups()[0] return packaging.version.parse(A__ ) def _A ( A__=False ): """simple docstring""" __lowercase = get_version() if patch and default_version.is_devrelease: raise ValueError('''Can\'t create a patch version from the dev branch, checkout a released version!''' ) if default_version.is_devrelease: __lowercase = default_version.base_version elif patch: __lowercase = F"{default_version.major}.{default_version.minor}.{default_version.micro + 1}" else: __lowercase = F"{default_version.major}.{default_version.minor + 1}.0" # Now let's ask nicely if that's the right one. __lowercase = input(F"Which version are you releasing? [{default_version}]" ) if len(A__ ) == 0: __lowercase = default_version print(F"Updating version to {version}." ) global_version_update(A__ , patch=A__ ) if not patch: print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' ) clean_main_ref_in_model_list() def _A ( ): """simple docstring""" __lowercase = get_version() __lowercase = F"{current_version.major}.{current_version.minor + 1}.0.dev0" __lowercase = current_version.base_version # Check with the user we got that right. __lowercase = input(F"Which version are we developing now? [{dev_version}]" ) if len(A__ ) == 0: __lowercase = dev_version print(F"Updating version to {version}." ) global_version_update(A__ ) print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' ) clean_main_ref_in_model_list() if __name__ == "__main__": lowerCAmelCase__ = argparse.ArgumentParser() parser.add_argument('''--post_release''', action='''store_true''', help='''Whether this is pre or post release.''') parser.add_argument('''--patch''', action='''store_true''', help='''Whether or not this is a patch release.''') lowerCAmelCase__ = parser.parse_args() if not args.post_release: pre_release_work(patch=args.patch) elif args.patch: print('''Nothing to do after a patch :-)''') else: post_release_work()
41
import sys from typing import Tuple import numpy as np import torch from PIL import Image from torch import nn from transformers.image_utils import PILImageResampling from utils import img_tensorize class _lowerCAmelCase : """simple docstring""" def __init__( self : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : List[str]=sys.maxsize): '''simple docstring''' snake_case__ = """bilinear""" snake_case__ = max_size snake_case__ = short_edge_length def __call__( self : List[str] , UpperCamelCase__ : Tuple): '''simple docstring''' snake_case__ = [] for img in imgs: snake_case__ , snake_case__ = img.shape[:2] # later: provide list and randomly choose index for resize snake_case__ = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1) if size == 0: return img snake_case__ = size * 1.0 / min(UpperCamelCase__ , UpperCamelCase__) if h < w: snake_case__ , snake_case__ = size, scale * w else: snake_case__ , snake_case__ = scale * h, size if max(UpperCamelCase__ , UpperCamelCase__) > self.max_size: snake_case__ = self.max_size * 1.0 / max(UpperCamelCase__ , UpperCamelCase__) snake_case__ = newh * scale snake_case__ = neww * scale snake_case__ = int(neww + 0.5) snake_case__ = int(newh + 0.5) if img.dtype == np.uinta: snake_case__ = Image.fromarray(UpperCamelCase__) snake_case__ = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR) snake_case__ = np.asarray(UpperCamelCase__) else: snake_case__ = img.permute(2 , 0 , 1).unsqueeze(0) # 3, 0, 1) # hw(c) -> nchw snake_case__ = nn.functional.interpolate( UpperCamelCase__ , (newh, neww) , mode=self.interp_method , align_corners=UpperCamelCase__).squeeze(0) img_augs.append(UpperCamelCase__) return img_augs class _lowerCAmelCase : """simple docstring""" def __init__( self : Dict , UpperCamelCase__ : Optional[int]): '''simple docstring''' snake_case__ = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST) snake_case__ = cfg.INPUT.FORMAT snake_case__ = cfg.SIZE_DIVISIBILITY snake_case__ = cfg.PAD_VALUE snake_case__ = cfg.INPUT.MAX_SIZE_TEST snake_case__ = cfg.MODEL.DEVICE snake_case__ = torch.tensor(cfg.MODEL.PIXEL_STD).to(self.device).view(len(cfg.MODEL.PIXEL_STD) , 1 , 1) snake_case__ = torch.tensor(cfg.MODEL.PIXEL_MEAN).to(self.device).view(len(cfg.MODEL.PIXEL_STD) , 1 , 1) snake_case__ = lambda UpperCamelCase__: (x - self.pixel_mean) / self.pixel_std def __magic_name__ ( self : Dict , UpperCamelCase__ : Dict): '''simple docstring''' snake_case__ = tuple(max(UpperCamelCase__) for s in zip(*[img.shape for img in images])) snake_case__ = [im.shape[-2:] for im in images] snake_case__ = [ nn.functional.pad( UpperCamelCase__ , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , ) for size, im in zip(UpperCamelCase__ , UpperCamelCase__) ] return torch.stack(UpperCamelCase__), torch.tensor(UpperCamelCase__) def __call__( self : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : str=False): '''simple docstring''' with torch.no_grad(): if not isinstance(UpperCamelCase__ , UpperCamelCase__): snake_case__ = [images] if single_image: assert len(UpperCamelCase__) == 1 for i in range(len(UpperCamelCase__)): if isinstance(images[i] , torch.Tensor): images.insert(UpperCamelCase__ , images.pop(UpperCamelCase__).to(self.device).float()) elif not isinstance(images[i] , torch.Tensor): images.insert( UpperCamelCase__ , torch.as_tensor(img_tensorize(images.pop(UpperCamelCase__) , input_format=self.input_format)) .to(self.device) .float() , ) # resize smallest edge snake_case__ = torch.tensor([im.shape[:2] for im in images]) snake_case__ = self.aug(UpperCamelCase__) # transpose images and convert to torch tensors # images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images] # now normalize before pad to avoid useless arithmetic snake_case__ = [self.normalizer(UpperCamelCase__) for x in images] # now pad them to do the following operations snake_case__ , snake_case__ = self.pad(UpperCamelCase__) # Normalize if self.size_divisibility > 0: raise NotImplementedError() # pad snake_case__ = torch.true_divide(UpperCamelCase__ , UpperCamelCase__) if single_image: return images[0], sizes[0], scales_yx[0] else: return images, sizes, scales_yx def _UpperCAmelCase ( a : Optional[Any] , a : Any ): boxes[:, 0::2] *= scale_yx[:, 1] boxes[:, 1::2] *= scale_yx[:, 0] return boxes def _UpperCAmelCase ( a : Any , a : Tuple[int, int] ): assert torch.isfinite(a ).all(), "Box tensor contains infinite or NaN!" snake_case__ , snake_case__ = box_size tensor[:, 0].clamp_(min=0 , max=a ) tensor[:, 1].clamp_(min=0 , max=a ) tensor[:, 2].clamp_(min=0 , max=a ) tensor[:, 3].clamp_(min=0 , max=a )
654
0
"""simple docstring""" import sys from typing import Tuple import numpy as np import torch from PIL import Image from torch import nn from transformers.image_utils import PILImageResampling from utils import img_tensorize class a__ : def __init__( self , _a , _a=sys.maxsize ): lowercase : Optional[int] = "bilinear" lowercase : Optional[int] = max_size lowercase : List[str] = short_edge_length def __call__( self , _a ): lowercase : Union[str, Any] = [] for img in imgs: lowercase , lowercase : Tuple = img.shape[:2] # later: provide list and randomly choose index for resize lowercase : Dict = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1 ) if size == 0: return img lowercase : List[Any] = size * 1.0 / min(UpperCamelCase__ , UpperCamelCase__ ) if h < w: lowercase , lowercase : Union[str, Any] = size, scale * w else: lowercase , lowercase : str = scale * h, size if max(UpperCamelCase__ , UpperCamelCase__ ) > self.max_size: lowercase : Dict = self.max_size * 1.0 / max(UpperCamelCase__ , UpperCamelCase__ ) lowercase : Dict = newh * scale lowercase : Any = neww * scale lowercase : List[str] = int(neww + 0.5 ) lowercase : Tuple = int(newh + 0.5 ) if img.dtype == np.uinta: lowercase : Any = Image.fromarray(UpperCamelCase__ ) lowercase : str = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR ) lowercase : Optional[int] = np.asarray(UpperCamelCase__ ) else: lowercase : Any = img.permute(2 , 0 , 1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw lowercase : int = nn.functional.interpolate( UpperCamelCase__ , (newh, neww) , mode=self.interp_method , align_corners=UpperCamelCase__ ).squeeze(0 ) img_augs.append(UpperCamelCase__ ) return img_augs class a__ : def __init__( self , _a ): lowercase : int = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST ) lowercase : Optional[int] = cfg.INPUT.FORMAT lowercase : Union[str, Any] = cfg.SIZE_DIVISIBILITY lowercase : str = cfg.PAD_VALUE lowercase : str = cfg.INPUT.MAX_SIZE_TEST lowercase : str = cfg.MODEL.DEVICE lowercase : Any = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 ) lowercase : Union[str, Any] = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 ) lowercase : int = lambda _a : (x - self.pixel_mean) / self.pixel_std def __magic_name__ ( self , _a ): lowercase : str = tuple(max(UpperCamelCase__ ) for s in zip(*[img.shape for img in images] ) ) lowercase : Any = [im.shape[-2:] for im in images] lowercase : Tuple = [ nn.functional.pad( UpperCamelCase__ , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , ) for size, im in zip(UpperCamelCase__ , UpperCamelCase__ ) ] return torch.stack(UpperCamelCase__ ), torch.tensor(UpperCamelCase__ ) def __call__( self , _a , _a=False ): with torch.no_grad(): if not isinstance(UpperCamelCase__ , UpperCamelCase__ ): lowercase : List[Any] = [images] if single_image: assert len(UpperCamelCase__ ) == 1 for i in range(len(UpperCamelCase__ ) ): if isinstance(images[i] , torch.Tensor ): images.insert(UpperCamelCase__ , images.pop(UpperCamelCase__ ).to(self.device ).float() ) elif not isinstance(images[i] , torch.Tensor ): images.insert( UpperCamelCase__ , torch.as_tensor(img_tensorize(images.pop(UpperCamelCase__ ) , input_format=self.input_format ) ) .to(self.device ) .float() , ) # resize smallest edge lowercase : List[str] = torch.tensor([im.shape[:2] for im in images] ) lowercase : List[str] = self.aug(UpperCamelCase__ ) # transpose images and convert to torch tensors # images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images] # now normalize before pad to avoid useless arithmetic lowercase : Union[str, Any] = [self.normalizer(UpperCamelCase__ ) for x in images] # now pad them to do the following operations lowercase , lowercase : int = self.pad(UpperCamelCase__ ) # Normalize if self.size_divisibility > 0: raise NotImplementedError() # pad lowercase : Tuple = torch.true_divide(UpperCamelCase__ , UpperCamelCase__ ) if single_image: return images[0], sizes[0], scales_yx[0] else: return images, sizes, scales_yx def __magic_name__ ( __snake_case : Optional[Any] , __snake_case : Any ) -> Optional[Any]: boxes[:, 0::2] *= scale_yx[:, 1] boxes[:, 1::2] *= scale_yx[:, 0] return boxes def __magic_name__ ( __snake_case : Any , __snake_case : Tuple[int, int] ) -> List[Any]: assert torch.isfinite(__snake_case ).all(), "Box tensor contains infinite or NaN!" lowercase , lowercase : Tuple = box_size tensor[:, 0].clamp_(min=0 , max=__snake_case ) tensor[:, 1].clamp_(min=0 , max=__snake_case ) tensor[:, 2].clamp_(min=0 , max=__snake_case ) tensor[:, 3].clamp_(min=0 , max=__snake_case )
361
import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging a__ = logging.get_logger(__name__) a__ = { """microsoft/wavlm-base""": """https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json""", # See all WavLM models at https://huggingface.co/models?filter=wavlm } class _lowerCAmelCase ( lowercase_ ): """simple docstring""" _lowercase : Dict = '''wavlm''' def __init__( self : Tuple , UpperCamelCase__ : str=3_2 , UpperCamelCase__ : Any=7_6_8 , UpperCamelCase__ : Any=1_2 , UpperCamelCase__ : Tuple=1_2 , UpperCamelCase__ : str=3_0_7_2 , UpperCamelCase__ : Optional[Any]="gelu" , UpperCamelCase__ : Tuple=0.1 , UpperCamelCase__ : Union[str, Any]=0.1 , UpperCamelCase__ : List[Any]=0.1 , UpperCamelCase__ : str=0.0 , UpperCamelCase__ : Optional[Any]=0.1 , UpperCamelCase__ : List[Any]=0.1 , UpperCamelCase__ : Optional[int]=0.02 , UpperCamelCase__ : Optional[int]=1E-5 , UpperCamelCase__ : Any="group" , UpperCamelCase__ : List[str]="gelu" , UpperCamelCase__ : Any=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , UpperCamelCase__ : List[str]=(5, 2, 2, 2, 2, 2, 2) , UpperCamelCase__ : Dict=(1_0, 3, 3, 3, 3, 2, 2) , UpperCamelCase__ : int=False , UpperCamelCase__ : Optional[Any]=1_2_8 , UpperCamelCase__ : Optional[int]=1_6 , UpperCamelCase__ : Optional[Any]=3_2_0 , UpperCamelCase__ : Any=8_0_0 , UpperCamelCase__ : Any=False , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : Optional[Any]=0.05 , UpperCamelCase__ : Optional[Any]=1_0 , UpperCamelCase__ : Union[str, Any]=2 , UpperCamelCase__ : Optional[Any]=0.0 , UpperCamelCase__ : Tuple=1_0 , UpperCamelCase__ : Optional[int]=3_2_0 , UpperCamelCase__ : Optional[Any]=2 , UpperCamelCase__ : Any=0.1 , UpperCamelCase__ : Tuple=1_0_0 , UpperCamelCase__ : Dict=2_5_6 , UpperCamelCase__ : Optional[int]=2_5_6 , UpperCamelCase__ : List[Any]=0.1 , UpperCamelCase__ : Tuple="mean" , UpperCamelCase__ : List[Any]=False , UpperCamelCase__ : Tuple=False , UpperCamelCase__ : Union[str, Any]=2_5_6 , UpperCamelCase__ : int=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0) , UpperCamelCase__ : Optional[Any]=(5, 3, 3, 1, 1) , UpperCamelCase__ : Any=(1, 2, 3, 1, 1) , UpperCamelCase__ : Dict=5_1_2 , UpperCamelCase__ : str=8_0 , UpperCamelCase__ : Optional[int]=0 , UpperCamelCase__ : Any=1 , UpperCamelCase__ : Tuple=2 , UpperCamelCase__ : str=False , UpperCamelCase__ : List[Any]=3 , UpperCamelCase__ : Optional[Any]=2 , UpperCamelCase__ : List[Any]=3 , UpperCamelCase__ : Optional[int]=None , **UpperCamelCase__ : List[str] , ): '''simple docstring''' super().__init__(**UpperCamelCase__ , pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__) snake_case__ = hidden_size snake_case__ = feat_extract_norm snake_case__ = feat_extract_activation snake_case__ = list(UpperCamelCase__) snake_case__ = list(UpperCamelCase__) snake_case__ = list(UpperCamelCase__) snake_case__ = conv_bias snake_case__ = num_buckets snake_case__ = max_bucket_distance snake_case__ = num_conv_pos_embeddings snake_case__ = num_conv_pos_embedding_groups snake_case__ = len(self.conv_dim) snake_case__ = num_hidden_layers snake_case__ = intermediate_size snake_case__ = hidden_act snake_case__ = num_attention_heads snake_case__ = hidden_dropout snake_case__ = attention_dropout snake_case__ = activation_dropout snake_case__ = feat_proj_dropout snake_case__ = final_dropout snake_case__ = layerdrop snake_case__ = layer_norm_eps snake_case__ = initializer_range snake_case__ = num_ctc_classes snake_case__ = vocab_size snake_case__ = do_stable_layer_norm snake_case__ = use_weighted_layer_sum snake_case__ = classifier_proj_size if ( (len(self.conv_stride) != self.num_feat_extract_layers) or (len(self.conv_kernel) != self.num_feat_extract_layers) or (len(self.conv_dim) != self.num_feat_extract_layers) ): raise ValueError( """Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==""" """ `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =""" F''' {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`,''' F''' `len(config.conv_kernel) = {len(self.conv_kernel)}`.''') # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 snake_case__ = apply_spec_augment snake_case__ = mask_time_prob snake_case__ = mask_time_length snake_case__ = mask_time_min_masks snake_case__ = mask_feature_prob snake_case__ = mask_feature_length # parameters for pretraining with codevector quantized representations snake_case__ = num_codevectors_per_group snake_case__ = num_codevector_groups snake_case__ = contrastive_logits_temperature snake_case__ = num_negatives snake_case__ = codevector_dim snake_case__ = proj_codevector_dim snake_case__ = diversity_loss_weight # ctc loss snake_case__ = ctc_loss_reduction snake_case__ = ctc_zero_infinity # adapter snake_case__ = add_adapter snake_case__ = adapter_kernel_size snake_case__ = adapter_stride snake_case__ = num_adapter_layers snake_case__ = output_hidden_size or hidden_size # SequenceClassification-specific parameter. Feel free to ignore for other classes. snake_case__ = classifier_proj_size # XVector-specific parameters. Feel free to ignore for other classes. snake_case__ = list(UpperCamelCase__) snake_case__ = list(UpperCamelCase__) snake_case__ = list(UpperCamelCase__) snake_case__ = xvector_output_dim @property def __magic_name__ ( self : Optional[int]): '''simple docstring''' return functools.reduce(operator.mul , self.conv_stride , 1)
654
0
'''simple docstring''' import inspect from typing import Callable, List, Optional, Union import torch from transformers import ( CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, WhisperForConditionalGeneration, WhisperProcessor, ) from diffusers import ( AutoencoderKL, DDIMScheduler, DiffusionPipeline, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker from diffusers.utils import logging lowerCAmelCase : List[str] = logging.get_logger(__name__) # pylint: disable=invalid-name class SCREAMING_SNAKE_CASE__ ( lowercase_): def __init__( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , )-> Optional[Any]: '''simple docstring''' super().__init__() if safety_checker is None: logger.warning( F'''You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure''' ' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered' ' results in services or applications open to the public. Both the diffusers team and Hugging Face' ' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling' ' it only for use-cases that involve analyzing network behavior or auditing its results. For more' ' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .' ) self.register_modules( speech_model=UpperCamelCase__ , speech_processor=UpperCamelCase__ , vae=UpperCamelCase__ , text_encoder=UpperCamelCase__ , tokenizer=UpperCamelCase__ , unet=UpperCamelCase__ , scheduler=UpperCamelCase__ , feature_extractor=UpperCamelCase__ , ) def UpperCAmelCase_ ( self , A_ = "auto" )-> int: '''simple docstring''' if slice_size == "auto": UpperCamelCase = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(UpperCamelCase__ ) def UpperCAmelCase_ ( self )-> str: '''simple docstring''' self.enable_attention_slicing(UpperCamelCase__ ) @torch.no_grad() def __call__( self , A_ , A_=16000 , A_ = 512 , A_ = 512 , A_ = 50 , A_ = 7.5 , A_ = None , A_ = 1 , A_ = 0.0 , A_ = None , A_ = None , A_ = "pil" , A_ = True , A_ = None , A_ = 1 , **A_ , )-> Union[str, Any]: '''simple docstring''' UpperCamelCase = self.speech_processor.feature_extractor( UpperCamelCase__ , return_tensors='pt' , sampling_rate=UpperCamelCase__ ).input_features.to(self.device ) UpperCamelCase = self.speech_model.generate(UpperCamelCase__ , max_length=480000 ) UpperCamelCase = self.speech_processor.tokenizer.batch_decode(UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ , normalize=UpperCamelCase__ )[ 0 ] if isinstance(UpperCamelCase__ , UpperCamelCase__ ): UpperCamelCase = 1 elif isinstance(UpperCamelCase__ , UpperCamelCase__ ): UpperCamelCase = len(UpperCamelCase__ ) else: raise ValueError(F'''`prompt` has to be of type `str` or `list` but is {type(UpperCamelCase__ )}''' ) if height % 8 != 0 or width % 8 != 0: raise ValueError(F'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' ) if (callback_steps is None) or ( callback_steps is not None and (not isinstance(UpperCamelCase__ , UpperCamelCase__ ) or callback_steps <= 0) ): raise ValueError( F'''`callback_steps` has to be a positive integer but is {callback_steps} of type''' F''' {type(UpperCamelCase__ )}.''' ) # get prompt text embeddings UpperCamelCase = self.tokenizer( UpperCamelCase__ , padding='max_length' , max_length=self.tokenizer.model_max_length , return_tensors='pt' , ) UpperCamelCase = text_inputs.input_ids if text_input_ids.shape[-1] > self.tokenizer.model_max_length: UpperCamelCase = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] ) logger.warning( 'The following part of your input was truncated because CLIP can only handle sequences up to' F''' {self.tokenizer.model_max_length} tokens: {removed_text}''' ) UpperCamelCase = text_input_ids[:, : self.tokenizer.model_max_length] UpperCamelCase = self.text_encoder(text_input_ids.to(self.device ) )[0] # duplicate text embeddings for each generation per prompt, using mps friendly method UpperCamelCase , UpperCamelCase , UpperCamelCase = text_embeddings.shape UpperCamelCase = text_embeddings.repeat(1 , UpperCamelCase__ , 1 ) UpperCamelCase = text_embeddings.view(bs_embed * num_images_per_prompt , UpperCamelCase__ , -1 ) # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. UpperCamelCase = guidance_scale > 1.0 # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: UpperCamelCase = 42 if negative_prompt is None: UpperCamelCase = [''] * batch_size elif type(UpperCamelCase__ ) is not type(UpperCamelCase__ ): raise TypeError( F'''`negative_prompt` should be the same type to `prompt`, but got {type(UpperCamelCase__ )} !=''' F''' {type(UpperCamelCase__ )}.''' ) elif isinstance(UpperCamelCase__ , UpperCamelCase__ ): UpperCamelCase = [negative_prompt] elif batch_size != len(UpperCamelCase__ ): raise ValueError( F'''`negative_prompt`: {negative_prompt} has batch size {len(UpperCamelCase__ )}, but `prompt`:''' F''' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches''' ' the batch size of `prompt`.' ) else: UpperCamelCase = negative_prompt UpperCamelCase = text_input_ids.shape[-1] UpperCamelCase = self.tokenizer( UpperCamelCase__ , padding='max_length' , max_length=UpperCamelCase__ , truncation=UpperCamelCase__ , return_tensors='pt' , ) UpperCamelCase = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0] # duplicate unconditional embeddings for each generation per prompt, using mps friendly method UpperCamelCase = uncond_embeddings.shape[1] UpperCamelCase = uncond_embeddings.repeat(1 , UpperCamelCase__ , 1 ) UpperCamelCase = uncond_embeddings.view(batch_size * num_images_per_prompt , UpperCamelCase__ , -1 ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes UpperCamelCase = torch.cat([uncond_embeddings, text_embeddings] ) # get the initial random noise unless the user supplied it # Unlike in other pipelines, latents need to be generated in the target device # for 1-to-1 results reproducibility with the CompVis implementation. # However this currently doesn't work in `mps`. UpperCamelCase = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8) UpperCamelCase = text_embeddings.dtype if latents is None: if self.device.type == "mps": # randn does not exist on mps UpperCamelCase = torch.randn(UpperCamelCase__ , generator=UpperCamelCase__ , device='cpu' , dtype=UpperCamelCase__ ).to( self.device ) else: UpperCamelCase = torch.randn(UpperCamelCase__ , generator=UpperCamelCase__ , device=self.device , dtype=UpperCamelCase__ ) else: if latents.shape != latents_shape: raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' ) UpperCamelCase = latents.to(self.device ) # set timesteps self.scheduler.set_timesteps(UpperCamelCase__ ) # Some schedulers like PNDM have timesteps as arrays # It's more optimized to move all timesteps to correct device beforehand UpperCamelCase = self.scheduler.timesteps.to(self.device ) # scale the initial noise by the standard deviation required by the scheduler UpperCamelCase = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] UpperCamelCase = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() ) UpperCamelCase = {} if accepts_eta: UpperCamelCase = eta for i, t in enumerate(self.progress_bar(UpperCamelCase__ ) ): # expand the latents if we are doing classifier free guidance UpperCamelCase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents UpperCamelCase = self.scheduler.scale_model_input(UpperCamelCase__ , UpperCamelCase__ ) # predict the noise residual UpperCamelCase = self.unet(UpperCamelCase__ , UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ ).sample # perform guidance if do_classifier_free_guidance: UpperCamelCase , UpperCamelCase = noise_pred.chunk(2 ) UpperCamelCase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # compute the previous noisy sample x_t -> x_t-1 UpperCamelCase = self.scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample # call the callback, if provided if callback is not None and i % callback_steps == 0: callback(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) UpperCamelCase = 1 / 0.18_215 * latents UpperCamelCase = self.vae.decode(UpperCamelCase__ ).sample UpperCamelCase = (image / 2 + 0.5).clamp(0 , 1 ) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 UpperCamelCase = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if output_type == "pil": UpperCamelCase = self.numpy_to_pil(UpperCamelCase__ ) if not return_dict: return image return StableDiffusionPipelineOutput(images=UpperCamelCase__ , nsfw_content_detected=UpperCamelCase__ )
3
from typing import List, Optional, Tuple, Union import torch from ...models import UNetaDModel from ...schedulers import ScoreSdeVeScheduler from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class _lowerCAmelCase ( lowercase_ ): """simple docstring""" _lowercase : UNetaDModel _lowercase : ScoreSdeVeScheduler def __init__( self : Union[str, Any] , UpperCamelCase__ : UNetaDModel , UpperCamelCase__ : ScoreSdeVeScheduler): '''simple docstring''' super().__init__() self.register_modules(unet=UpperCamelCase__ , scheduler=UpperCamelCase__) @torch.no_grad() def __call__( self : Union[str, Any] , UpperCamelCase__ : int = 1 , UpperCamelCase__ : int = 2_0_0_0 , UpperCamelCase__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCamelCase__ : Optional[str] = "pil" , UpperCamelCase__ : bool = True , **UpperCamelCase__ : List[str] , ): '''simple docstring''' snake_case__ = self.unet.config.sample_size snake_case__ = (batch_size, 3, img_size, img_size) snake_case__ = self.unet snake_case__ = randn_tensor(UpperCamelCase__ , generator=UpperCamelCase__) * self.scheduler.init_noise_sigma snake_case__ = sample.to(self.device) self.scheduler.set_timesteps(UpperCamelCase__) self.scheduler.set_sigmas(UpperCamelCase__) for i, t in enumerate(self.progress_bar(self.scheduler.timesteps)): snake_case__ = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device) # correction step for _ in range(self.scheduler.config.correct_steps): snake_case__ = self.unet(UpperCamelCase__ , UpperCamelCase__).sample snake_case__ = self.scheduler.step_correct(UpperCamelCase__ , UpperCamelCase__ , generator=UpperCamelCase__).prev_sample # prediction step snake_case__ = model(UpperCamelCase__ , UpperCamelCase__).sample snake_case__ = self.scheduler.step_pred(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , generator=UpperCamelCase__) snake_case__ , snake_case__ = output.prev_sample, output.prev_sample_mean snake_case__ = sample_mean.clamp(0 , 1) snake_case__ = sample.cpu().permute(0 , 2 , 3 , 1).numpy() if output_type == "pil": snake_case__ = self.numpy_to_pil(UpperCamelCase__) if not return_dict: return (sample,) return ImagePipelineOutput(images=UpperCamelCase__)
654
0