code
stringlengths
81
54k
code_codestyle
int64
0
721
style_context
stringlengths
91
41.9k
style_context_codestyle
int64
0
699
label
int64
0
1
# This is the module that test_patching.py uses to test patch_submodule() import os # noqa: this is just for tests import os as renamed_os # noqa: this is just for tests from os import path # noqa: this is just for tests from os import path as renamed_path # noqa: this is just for tests from os.path import join # noqa: this is just for tests from os.path import join as renamed_join # noqa: this is just for tests __lowerCamelCase : Any = open # noqa: we just need to have a builtin inside this module to test it properly
715
import copy from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import ClassLabel, Features, Value from .base import TaskTemplate @dataclass(frozen=UpperCamelCase_ ) class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ): """simple docstring""" a_ = field(default="text-classification" , metadata={"include_in_asdict_even_if_is_default": True} ) a_ = Features({"text": Value("string" )} ) a_ = Features({"labels": ClassLabel} ) a_ = "text" a_ = "labels" def _lowercase ( self : Tuple , __A : List[Any] ): if self.label_column not in features: raise ValueError(f'''Column {self.label_column} is not present in features.''' ) if not isinstance(features[self.label_column] , __A ): raise ValueError(f'''Column {self.label_column} is not a ClassLabel.''' ) snake_case__ : Any = copy.deepcopy(self ) snake_case__ : Optional[Any] = self.label_schema.copy() snake_case__ : List[str] = features[self.label_column] snake_case__ : Dict = label_schema return task_template @property def _lowercase ( self : Tuple ): return { self.text_column: "text", self.label_column: "labels", }
25
0
def SCREAMING_SNAKE_CASE ( snake_case_ : list[int] ): snake_case__ : List[Any] = len(snake_case__ ) for i in range(snake_case__ ): for j in range(i + 1 , snake_case__ ): if numbers[j] < numbers[i]: snake_case__, snake_case__ : int = numbers[j], numbers[i] return numbers if __name__ == "__main__": __lowerCamelCase : List[Any] = input("""Enter numbers separated by a comma:\n""").strip() __lowerCamelCase : List[Any] = [int(item) for item in user_input.split(""",""")] print(exchange_sort(unsorted))
716
import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES from ...utils import logging from ..auto import CONFIG_MAPPING __lowerCamelCase : Union[str, Any] = logging.get_logger(__name__) __lowerCamelCase : Dict = { """Salesforce/instruct-blip-flan-t5""": """https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json""", } class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ): """simple docstring""" a_ = "instructblip_vision_model" def __init__( self : List[Any] , __A : Dict=1_4_0_8 , __A : Tuple=6_1_4_4 , __A : str=3_9 , __A : int=1_6 , __A : str=2_2_4 , __A : Any=1_4 , __A : Dict="gelu" , __A : List[Any]=1e-6 , __A : Any=0.0 , __A : List[Any]=1e-1_0 , __A : Union[str, Any]=True , **__A : Tuple , ): super().__init__(**__A ) snake_case__ : List[str] = hidden_size snake_case__ : Optional[int] = intermediate_size snake_case__ : List[str] = num_hidden_layers snake_case__ : List[Any] = num_attention_heads snake_case__ : str = patch_size snake_case__ : int = image_size snake_case__ : int = initializer_range snake_case__ : Optional[int] = attention_dropout snake_case__ : str = layer_norm_eps snake_case__ : Optional[Any] = hidden_act snake_case__ : Tuple = qkv_bias @classmethod def _lowercase ( cls : List[str] , __A : Union[str, os.PathLike] , **__A : Optional[Any] ): cls._set_token_in_kwargs(__A ) snake_case__, snake_case__ : str = cls.get_config_dict(__A , **__A ) # get the vision config dict if we are loading from InstructBlipConfig if config_dict.get("model_type" ) == "instructblip": snake_case__ : Union[str, Any] = config_dict["vision_config"] if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type: logger.warning( f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type ''' f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(__A , **__A ) class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ): """simple docstring""" a_ = "instructblip_qformer" def __init__( self : Any , __A : Union[str, Any]=3_0_5_2_2 , __A : Union[str, Any]=7_6_8 , __A : Optional[int]=1_2 , __A : Dict=1_2 , __A : Dict=3_0_7_2 , __A : List[str]="gelu" , __A : Union[str, Any]=0.1 , __A : Tuple=0.1 , __A : Any=5_1_2 , __A : Optional[int]=0.0_2 , __A : List[str]=1e-1_2 , __A : Any=0 , __A : Optional[Any]="absolute" , __A : str=2 , __A : Any=1_4_0_8 , **__A : List[str] , ): super().__init__(pad_token_id=__A , **__A ) snake_case__ : Dict = vocab_size snake_case__ : Optional[int] = hidden_size snake_case__ : Optional[Any] = num_hidden_layers snake_case__ : str = num_attention_heads snake_case__ : int = hidden_act snake_case__ : Optional[Any] = intermediate_size snake_case__ : Union[str, Any] = hidden_dropout_prob snake_case__ : List[Any] = attention_probs_dropout_prob snake_case__ : List[Any] = max_position_embeddings snake_case__ : int = initializer_range snake_case__ : Dict = layer_norm_eps snake_case__ : str = position_embedding_type snake_case__ : Dict = cross_attention_frequency snake_case__ : List[str] = encoder_hidden_size @classmethod def _lowercase ( cls : List[Any] , __A : Union[str, os.PathLike] , **__A : Optional[int] ): cls._set_token_in_kwargs(__A ) snake_case__, snake_case__ : Tuple = cls.get_config_dict(__A , **__A ) # get the qformer config dict if we are loading from InstructBlipConfig if config_dict.get("model_type" ) == "instructblip": snake_case__ : List[Any] = config_dict["qformer_config"] if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type: logger.warning( f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type ''' f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(__A , **__A ) class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ): """simple docstring""" a_ = "instructblip" a_ = True def __init__( self : List[str] , __A : Optional[Any]=None , __A : Tuple=None , __A : Optional[int]=None , __A : Optional[Any]=3_2 , **__A : Optional[int] ): super().__init__(**__A ) if vision_config is None: snake_case__ : Any = {} logger.info("vision_config is None. initializing the InstructBlipVisionConfig with default values." ) if qformer_config is None: snake_case__ : Optional[Any] = {} logger.info("qformer_config is None. Initializing the InstructBlipQFormerConfig with default values." ) if text_config is None: snake_case__ : Optional[int] = {} logger.info("text_config is None. Initializing the text config with default values (`OPTConfig`)." ) snake_case__ : List[Any] = InstructBlipVisionConfig(**__A ) snake_case__ : Union[str, Any] = InstructBlipQFormerConfig(**__A ) snake_case__ : Dict = text_config["model_type"] if "model_type" in text_config else "opt" snake_case__ : List[Any] = CONFIG_MAPPING[text_model_type](**__A ) snake_case__ : Union[str, Any] = self.text_config.tie_word_embeddings snake_case__ : Tuple = self.text_config.is_encoder_decoder snake_case__ : str = num_query_tokens snake_case__ : Dict = self.vision_config.hidden_size snake_case__ : List[Any] = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES snake_case__ : int = 1.0 snake_case__ : Optional[int] = 0.0_2 @classmethod def _lowercase ( cls : List[str] , __A : InstructBlipVisionConfig , __A : InstructBlipQFormerConfig , __A : PretrainedConfig , **__A : int , ): return cls( vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **__A , ) def _lowercase ( self : Optional[int] ): snake_case__ : Any = copy.deepcopy(self.__dict__ ) snake_case__ : Optional[Any] = self.vision_config.to_dict() snake_case__ : List[str] = self.qformer_config.to_dict() snake_case__ : List[Any] = self.text_config.to_dict() snake_case__ : List[Any] = self.__class__.model_type return output
25
0
from __future__ import annotations __lowerCamelCase : str = 8.9_88e9 # units = N * m^s * C^-2 def SCREAMING_SNAKE_CASE ( snake_case_ : Dict , snake_case_ : Union[str, Any] , snake_case_ : Any , snake_case_ : Dict ): snake_case__ : Any = abs(chargea * chargea ) if (force, chargea, chargea, distance).count(0 ) != 1: raise ValueError("One and only one argument must be 0" ) if distance < 0: raise ValueError("Distance cannot be negative" ) if force == 0: snake_case__ : Any = COULOMBS_CONSTANT * charge_product / (distance**2) return {"force": force} elif chargea == 0: snake_case__ : List[str] = abs(snake_case_ ) * (distance**2) / (COULOMBS_CONSTANT * chargea) return {"charge1": chargea} elif chargea == 0: snake_case__ : Union[str, Any] = abs(snake_case_ ) * (distance**2) / (COULOMBS_CONSTANT * chargea) return {"charge2": chargea} elif distance == 0: snake_case__ : Any = (COULOMBS_CONSTANT * charge_product / abs(snake_case_ )) ** 0.5 return {"distance": distance} raise ValueError("Exactly one argument must be 0" ) if __name__ == "__main__": import doctest doctest.testmod()
717
def SCREAMING_SNAKE_CASE ( snake_case_ : list ): if len(snake_case_ ) <= 1: return lst snake_case__ : List[Any] = 1 while i < len(snake_case_ ): if lst[i - 1] <= lst[i]: i += 1 else: snake_case__, snake_case__ : Tuple = lst[i], lst[i - 1] i -= 1 if i == 0: snake_case__ : Union[str, Any] = 1 return lst if __name__ == "__main__": __lowerCamelCase : Dict = input("""Enter numbers separated by a comma:\n""").strip() __lowerCamelCase : Tuple = [int(item) for item in user_input.split(""",""")] print(gnome_sort(unsorted))
25
0
import math from collections.abc import Callable def SCREAMING_SNAKE_CASE ( snake_case_ : Any , snake_case_ : Tuple , snake_case_ : str ): snake_case__ : float = xa snake_case__ : float = xa while True: if x_n == x_na or function(snake_case_ ) == function(snake_case_ ): raise ZeroDivisionError("float division by zero, could not find root" ) snake_case__ : float = x_na - ( function(snake_case_ ) / ((function(snake_case_ ) - function(snake_case_ )) / (x_na - x_n)) ) if abs(x_na - x_na ) < 10**-5: return x_na snake_case__ : int = x_na snake_case__ : Union[str, Any] = x_na def SCREAMING_SNAKE_CASE ( snake_case_ : Any ): return math.pow(snake_case_ , 3 ) - (2 * x) - 5 if __name__ == "__main__": print(intersection(f, 3, 3.5))
718
from __future__ import annotations import time __lowerCamelCase : str = list[tuple[int, int]] __lowerCamelCase : Optional[int] = [ [0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles [0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0], [1, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0], ] __lowerCamelCase : Tuple = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right class SCREAMING_SNAKE_CASE__ : """simple docstring""" def __init__( self : Union[str, Any] , __A : int , __A : int , __A : int , __A : int , __A : Node | None ): snake_case__ : Optional[int] = pos_x snake_case__ : Dict = pos_y snake_case__ : int = (pos_y, pos_x) snake_case__ : Optional[int] = goal_x snake_case__ : Tuple = goal_y snake_case__ : str = parent class SCREAMING_SNAKE_CASE__ : """simple docstring""" def __init__( self : List[Any] , __A : tuple[int, int] , __A : tuple[int, int] ): snake_case__ : Tuple = Node(start[1] , start[0] , goal[1] , goal[0] , __A ) snake_case__ : Tuple = Node(goal[1] , goal[0] , goal[1] , goal[0] , __A ) snake_case__ : int = [self.start] snake_case__ : Union[str, Any] = False def _lowercase ( self : Dict ): while self.node_queue: snake_case__ : Optional[Any] = self.node_queue.pop(0 ) if current_node.pos == self.target.pos: snake_case__ : Optional[Any] = True return self.retrace_path(__A ) snake_case__ : int = self.get_successors(__A ) for node in successors: self.node_queue.append(__A ) if not self.reached: return [self.start.pos] return None def _lowercase ( self : Union[str, Any] , __A : Node ): snake_case__ : str = [] for action in delta: snake_case__ : str = parent.pos_x + action[1] snake_case__ : Union[str, Any] = parent.pos_y + action[0] if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(__A ) - 1): continue if grid[pos_y][pos_x] != 0: continue successors.append( Node(__A , __A , self.target.pos_y , self.target.pos_x , __A ) ) return successors def _lowercase ( self : Optional[Any] , __A : Node | None ): snake_case__ : Tuple = node snake_case__ : Any = [] while current_node is not None: path.append((current_node.pos_y, current_node.pos_x) ) snake_case__ : Tuple = current_node.parent path.reverse() return path class SCREAMING_SNAKE_CASE__ : """simple docstring""" def __init__( self : Dict , __A : str , __A : int ): snake_case__ : str = BreadthFirstSearch(__A , __A ) snake_case__ : int = BreadthFirstSearch(__A , __A ) snake_case__ : Tuple = False def _lowercase ( self : Optional[Any] ): while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue: snake_case__ : Any = self.fwd_bfs.node_queue.pop(0 ) snake_case__ : List[str] = self.bwd_bfs.node_queue.pop(0 ) if current_bwd_node.pos == current_fwd_node.pos: snake_case__ : List[str] = True return self.retrace_bidirectional_path( __A , __A ) snake_case__ : Union[str, Any] = current_bwd_node snake_case__ : Dict = current_fwd_node snake_case__ : List[Any] = { self.fwd_bfs: self.fwd_bfs.get_successors(__A ), self.bwd_bfs: self.bwd_bfs.get_successors(__A ), } for bfs in [self.fwd_bfs, self.bwd_bfs]: for node in successors[bfs]: bfs.node_queue.append(__A ) if not self.reached: return [self.fwd_bfs.start.pos] return None def _lowercase ( self : Any , __A : Node , __A : Node ): snake_case__ : List[str] = self.fwd_bfs.retrace_path(__A ) snake_case__ : Optional[Any] = self.bwd_bfs.retrace_path(__A ) bwd_path.pop() bwd_path.reverse() snake_case__ : List[Any] = fwd_path + bwd_path return path if __name__ == "__main__": # all coordinates are given in format [y,x] import doctest doctest.testmod() __lowerCamelCase : str = (0, 0) __lowerCamelCase : List[str] = (len(grid) - 1, len(grid[0]) - 1) for elem in grid: print(elem) __lowerCamelCase : Any = time.time() __lowerCamelCase : Optional[Any] = BreadthFirstSearch(init, goal) __lowerCamelCase : str = bfs.search() __lowerCamelCase : Optional[Any] = time.time() - start_bfs_time print("""Unidirectional BFS computation time : """, bfs_time) __lowerCamelCase : Optional[Any] = time.time() __lowerCamelCase : Optional[int] = BidirectionalBreadthFirstSearch(init, goal) __lowerCamelCase : str = bd_bfs.search() __lowerCamelCase : Optional[Any] = time.time() - start_bd_bfs_time print("""Bidirectional BFS computation time : """, bd_bfs_time)
25
0
import pickle import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin __lowerCamelCase : str = get_tests_dir("""fixtures/test_sentencepiece.model""") @require_sentencepiece @require_tokenizers class SCREAMING_SNAKE_CASE__ ( __lowerCamelCase , unittest.TestCase ): """simple docstring""" a_ = XGLMTokenizer a_ = XGLMTokenizerFast a_ = True a_ = True def _lowercase ( self : str ): super().setUp() # We have a SentencePiece fixture for testing snake_case__ : Optional[int] = XGLMTokenizer(SCREAMING_SNAKE_CASE_ , keep_accents=SCREAMING_SNAKE_CASE_ ) tokenizer.save_pretrained(self.tmpdirname ) def _lowercase ( self : Union[str, Any] ): snake_case__ : Tuple = "<pad>" snake_case__ : Union[str, Any] = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) def _lowercase ( self : List[str] ): snake_case__ : str = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "<s>" ) self.assertEqual(vocab_keys[1] , "<pad>" ) self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 1_0_0_8 ) def _lowercase ( self : Optional[int] ): self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_8 ) def _lowercase ( self : Tuple ): snake_case__ : Dict = XGLMTokenizer(SCREAMING_SNAKE_CASE_ , keep_accents=SCREAMING_SNAKE_CASE_ ) snake_case__ : Optional[Any] = tokenizer.tokenize("This is a test" ) self.assertListEqual(SCREAMING_SNAKE_CASE_ , ["▁This", "▁is", "▁a", "▁t", "est"] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , ) snake_case__ : List[Any] = tokenizer.tokenize("I was born in 92000, and this is falsé." ) self.assertListEqual( SCREAMING_SNAKE_CASE_ , [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", ".", ] , ) snake_case__ : Tuple = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) self.assertListEqual( SCREAMING_SNAKE_CASE_ , [ value + tokenizer.fairseq_offset for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4] ] , ) snake_case__ : Optional[int] = tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ ) self.assertListEqual( SCREAMING_SNAKE_CASE_ , [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", ".", ] , ) @cached_property def _lowercase ( self : str ): return XGLMTokenizer.from_pretrained("facebook/xglm-564M" ) def _lowercase ( self : Optional[Any] ): with tempfile.NamedTemporaryFile() as f: shutil.copyfile(SCREAMING_SNAKE_CASE_ , f.name ) snake_case__ : Tuple = XGLMTokenizer(f.name , keep_accents=SCREAMING_SNAKE_CASE_ ) snake_case__ : List[str] = pickle.dumps(SCREAMING_SNAKE_CASE_ ) pickle.loads(SCREAMING_SNAKE_CASE_ ) def _lowercase ( self : Tuple ): if not self.test_rust_tokenizer: return snake_case__ : Optional[Any] = self.get_tokenizer() snake_case__ : Dict = self.get_rust_tokenizer() snake_case__ : Dict = "I was born in 92000, and this is falsé." snake_case__ : int = tokenizer.tokenize(SCREAMING_SNAKE_CASE_ ) snake_case__ : Any = rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE_ ) self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) snake_case__ : Optional[int] = tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ ) snake_case__ : Tuple = rust_tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ ) self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) snake_case__ : str = self.get_rust_tokenizer() snake_case__ : Dict = tokenizer.encode(SCREAMING_SNAKE_CASE_ ) snake_case__ : Dict = rust_tokenizer.encode(SCREAMING_SNAKE_CASE_ ) self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) @slow def _lowercase ( self : Dict ): snake_case__ : str = "Hello World!" snake_case__ : Optional[int] = [2, 3_1_2_2_7, 4_4_4_7, 3_5] self.assertListEqual(SCREAMING_SNAKE_CASE_ , self.big_tokenizer.encode(SCREAMING_SNAKE_CASE_ ) ) @slow def _lowercase ( self : List[str] ): snake_case__ : Tuple = ( "This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will" " add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth" ) # fmt: off snake_case__ : Union[str, Any] = [2, 1_0_1_8, 6_7, 1_1, 1_9_8_8, 2_6_1_7, 5_6_3_1, 2_7_8, 1_1, 3_4_0_7, 4_8, 7_1_6_3_0, 2_8_0_8_5, 4, 3_2_3_4, 1_5_7, 1_3, 6, 5, 6, 4, 3_5_2_6, 7_6_8, 1_5, 6_5_9, 5_7, 2_9_8, 3_9_8_3, 8_6_4, 1_2_9, 2_1, 6, 5, 1_3_6_7_5, 3_7_7, 6_5_2, 7_5_8_0, 1_0_3_4_1, 1_5_5, 2_8_1_7, 4_2_2, 1_6_6_6, 7, 1_6_7_4, 5_3, 1_1_3, 2_0_2_2_7_7, 1_7_8_9_2, 3_3, 6_0, 8_7, 4, 3_2_3_4, 1_5_7, 6_1, 2_6_6_7, 5_2_3_7_6, 1_9, 8_8, 2_3, 7_3_5] # fmt: on self.assertListEqual(SCREAMING_SNAKE_CASE_ , self.big_tokenizer.encode(SCREAMING_SNAKE_CASE_ ) ) @slow def _lowercase ( self : List[str] ): # fmt: off snake_case__ : List[Any] = { "input_ids": [[2, 1_0_8_8_2_5, 1_1_6_3, 1_5, 8_8_0_1_0, 4_7_3, 1_5_8_9_8, 1_5_7, 1_3_6_7_2, 1_8_5_7, 3_1_2, 8, 2_3_8_0_2_1, 1_1_6_3, 5_3, 1_3_6_7_2, 1_8_5_7, 3_1_2, 8, 5_3_2_8_3, 1_8_2_3_9_6, 8, 1_8_5_6_6, 1_6, 3_6_7_3_3, 4_1_0_1, 8, 2_3_0, 2_4_4_0_1_7, 1_2_2_5_5_3, 7, 1_5, 1_3_2_5_9_7, 4, 2_9_3, 1_2_5_1_1, 7_6_1_0, 4, 3_4_1_4, 1_3_2_5_9_7, 9, 4, 3_2_3_6_1, 3_6_2, 4, 7_3_4, 2_8_5_1_2, 3_2_5_6_9, 1_8, 4, 3_2_3_6_1, 2_6_0_9_6, 1_4_9_8_2, 7_3, 1_8_7_1_5, 2_1_4_3_3, 2_3_5_2_6_1, 1_5, 4_9_2, 1_2_4_2_7, 1_6, 5_3, 1_8_7_1_5, 2_1_4_3_3, 6_5_4_5_4, 1_5, 2_3_6_5_9, 5_6_3, 1_6, 2_7_8, 5_9_7, 2_8_4_3, 5_9_5, 7_9_3_1, 1_8_2_3_9_6, 6_4_1_8_6, 2_2, 8_8_6, 5_9_5, 1_3_2_9_8_1, 5_3, 2_5_5_4_0, 3_4_4_9, 4_3_9_8_2, 3_9_9_0_1, 5_9_5_1, 8_7_8, 3_3_0, 4, 2_7_6_9_4, 8_0_2_6_9, 3_1_2, 5_3, 6_5_1_7, 1_1_7_8_0, 6_1_1, 2_0_4_0_8, 5], [2, 6, 1_3_2_5_9_7, 6_7, 4_2_8_9_7, 3_3, 5_9_2, 8, 1_6_3_7_2_9, 2_5_5_4_0, 3_6_1, 1_3_6_9_9_7, 1_0_9_5_1_4, 1_7_3_2_3_0, 7, 5_0_1, 6_0, 1_0_2_9_1_3, 1_9_6, 5_6_3_1, 2_3_5, 6_3_2_4_3, 4_7_3, 6, 2_3_1_7_5_7, 7_4, 5_2_7_7, 7_9_0_5, 5_3, 3_0_9_5, 3_7_3_1_7, 2_2, 4_5_4, 1_8_3_8_7_4, 5], [2, 2_6_8, 3_1_2_9_8, 4_6_5_3_0, 6, 1_3_2_9_3_5, 4_3_8_3_1, 7, 5_9_7, 3_2, 2_4, 3_6_8_8, 9_8_6_5, 5]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] } # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=SCREAMING_SNAKE_CASE_ , model_name="facebook/xglm-564M" , padding=SCREAMING_SNAKE_CASE_ , )
719
import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ConditionalDetrImageProcessor class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): """simple docstring""" def __init__( self : List[Any] , __A : Dict , __A : int=7 , __A : Optional[Any]=3 , __A : List[str]=3_0 , __A : List[Any]=4_0_0 , __A : Union[str, Any]=True , __A : List[Any]=None , __A : Optional[Any]=True , __A : Tuple=[0.5, 0.5, 0.5] , __A : Union[str, Any]=[0.5, 0.5, 0.5] , __A : List[str]=True , __A : Any=1 / 2_5_5 , __A : Optional[int]=True , ): # by setting size["longest_edge"] > max_resolution we're effectively not testing this :p snake_case__ : List[str] = size if size is not None else {"shortest_edge": 1_8, "longest_edge": 1_3_3_3} snake_case__ : Dict = parent snake_case__ : Optional[int] = batch_size snake_case__ : Union[str, Any] = num_channels snake_case__ : str = min_resolution snake_case__ : Tuple = max_resolution snake_case__ : List[Any] = do_resize snake_case__ : Dict = size snake_case__ : List[str] = do_normalize snake_case__ : Optional[int] = image_mean snake_case__ : Optional[int] = image_std snake_case__ : Any = do_rescale snake_case__ : Optional[int] = rescale_factor snake_case__ : int = do_pad def _lowercase ( self : Dict ): return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def _lowercase ( self : Optional[int] , __A : Dict , __A : List[Any]=False ): if not batched: snake_case__ : List[str] = image_inputs[0] if isinstance(__A , Image.Image ): snake_case__, snake_case__ : Tuple = image.size else: snake_case__, snake_case__ : List[str] = image.shape[1], image.shape[2] if w < h: snake_case__ : Dict = int(self.size["shortest_edge"] * h / w ) snake_case__ : Optional[int] = self.size["shortest_edge"] elif w > h: snake_case__ : List[Any] = self.size["shortest_edge"] snake_case__ : Union[str, Any] = int(self.size["shortest_edge"] * w / h ) else: snake_case__ : Dict = self.size["shortest_edge"] snake_case__ : Dict = self.size["shortest_edge"] else: snake_case__ : str = [] for image in image_inputs: snake_case__, snake_case__ : str = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) snake_case__ : Dict = max(__A , key=lambda __A : item[0] )[0] snake_case__ : Tuple = max(__A , key=lambda __A : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ , unittest.TestCase ): """simple docstring""" a_ = ConditionalDetrImageProcessor if is_vision_available() else None def _lowercase ( self : int ): snake_case__ : Tuple = ConditionalDetrImageProcessingTester(self ) @property def _lowercase ( self : Any ): return self.image_processor_tester.prepare_image_processor_dict() def _lowercase ( self : Any ): snake_case__ : Dict = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__A , "image_mean" ) ) self.assertTrue(hasattr(__A , "image_std" ) ) self.assertTrue(hasattr(__A , "do_normalize" ) ) self.assertTrue(hasattr(__A , "do_resize" ) ) self.assertTrue(hasattr(__A , "size" ) ) def _lowercase ( self : List[str] ): snake_case__ : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"shortest_edge": 1_8, "longest_edge": 1_3_3_3} ) self.assertEqual(image_processor.do_pad , __A ) snake_case__ : Any = self.image_processing_class.from_dict( self.image_processor_dict , size=4_2 , max_size=8_4 , pad_and_return_pixel_mask=__A ) self.assertEqual(image_processor.size , {"shortest_edge": 4_2, "longest_edge": 8_4} ) self.assertEqual(image_processor.do_pad , __A ) def _lowercase ( self : Union[str, Any] ): pass def _lowercase ( self : List[str] ): # Initialize image_processing snake_case__ : Dict = self.image_processing_class(**self.image_processor_dict ) # create random PIL images snake_case__ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A ) for image in image_inputs: self.assertIsInstance(__A , Image.Image ) # Test not batched input snake_case__ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values snake_case__, snake_case__ : Union[str, Any] = self.image_processor_tester.get_expected_values(__A ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched snake_case__, snake_case__ : Tuple = self.image_processor_tester.get_expected_values(__A , batched=__A ) snake_case__ : int = image_processing(__A , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def _lowercase ( self : Tuple ): # Initialize image_processing snake_case__ : List[str] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors snake_case__ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , numpify=__A ) for image in image_inputs: self.assertIsInstance(__A , np.ndarray ) # Test not batched input snake_case__ : int = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values snake_case__, snake_case__ : Dict = self.image_processor_tester.get_expected_values(__A ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched snake_case__ : Optional[Any] = image_processing(__A , return_tensors="pt" ).pixel_values snake_case__, snake_case__ : str = self.image_processor_tester.get_expected_values(__A , batched=__A ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def _lowercase ( self : Tuple ): # Initialize image_processing snake_case__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors snake_case__ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , torchify=__A ) for image in image_inputs: self.assertIsInstance(__A , torch.Tensor ) # Test not batched input snake_case__ : Tuple = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values snake_case__, snake_case__ : Optional[int] = self.image_processor_tester.get_expected_values(__A ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched snake_case__ : Dict = image_processing(__A , return_tensors="pt" ).pixel_values snake_case__, snake_case__ : int = self.image_processor_tester.get_expected_values(__A , batched=__A ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) @slow def _lowercase ( self : List[Any] ): # prepare image and target snake_case__ : Union[str, Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f: snake_case__ : Union[str, Any] = json.loads(f.read() ) snake_case__ : Optional[Any] = {"image_id": 3_9_7_6_9, "annotations": target} # encode them snake_case__ : Tuple = ConditionalDetrImageProcessor.from_pretrained("microsoft/conditional-detr-resnet-50" ) snake_case__ : int = image_processing(images=__A , annotations=__A , return_tensors="pt" ) # verify pixel values snake_case__ : str = torch.Size([1, 3, 8_0_0, 1_0_6_6] ) self.assertEqual(encoding["pixel_values"].shape , __A ) snake_case__ : Tuple = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] ) self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __A , atol=1e-4 ) ) # verify area snake_case__ : Optional[int] = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] ) self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __A ) ) # verify boxes snake_case__ : Tuple = torch.Size([6, 4] ) self.assertEqual(encoding["labels"][0]["boxes"].shape , __A ) snake_case__ : List[Any] = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] ) self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __A , atol=1e-3 ) ) # verify image_id snake_case__ : str = torch.tensor([3_9_7_6_9] ) self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __A ) ) # verify is_crowd snake_case__ : List[Any] = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __A ) ) # verify class_labels snake_case__ : Optional[int] = torch.tensor([7_5, 7_5, 6_3, 6_5, 1_7, 1_7] ) self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __A ) ) # verify orig_size snake_case__ : Dict = torch.tensor([4_8_0, 6_4_0] ) self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __A ) ) # verify size snake_case__ : List[str] = torch.tensor([8_0_0, 1_0_6_6] ) self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __A ) ) @slow def _lowercase ( self : str ): # prepare image, target and masks_path snake_case__ : Optional[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f: snake_case__ : int = json.loads(f.read() ) snake_case__ : Optional[int] = {"file_name": "000000039769.png", "image_id": 3_9_7_6_9, "segments_info": target} snake_case__ : Optional[Any] = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" ) # encode them snake_case__ : Optional[int] = ConditionalDetrImageProcessor(format="coco_panoptic" ) snake_case__ : Tuple = image_processing(images=__A , annotations=__A , masks_path=__A , return_tensors="pt" ) # verify pixel values snake_case__ : Optional[Any] = torch.Size([1, 3, 8_0_0, 1_0_6_6] ) self.assertEqual(encoding["pixel_values"].shape , __A ) snake_case__ : List[str] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] ) self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __A , atol=1e-4 ) ) # verify area snake_case__ : Tuple = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] ) self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __A ) ) # verify boxes snake_case__ : Dict = torch.Size([6, 4] ) self.assertEqual(encoding["labels"][0]["boxes"].shape , __A ) snake_case__ : int = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] ) self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __A , atol=1e-3 ) ) # verify image_id snake_case__ : str = torch.tensor([3_9_7_6_9] ) self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __A ) ) # verify is_crowd snake_case__ : Tuple = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __A ) ) # verify class_labels snake_case__ : Optional[Any] = torch.tensor([1_7, 1_7, 6_3, 7_5, 7_5, 9_3] ) self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __A ) ) # verify masks snake_case__ : str = 8_2_2_8_7_3 self.assertEqual(encoding["labels"][0]["masks"].sum().item() , __A ) # verify orig_size snake_case__ : int = torch.tensor([4_8_0, 6_4_0] ) self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __A ) ) # verify size snake_case__ : List[Any] = torch.tensor([8_0_0, 1_0_6_6] ) self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __A ) )
25
0
import heapq import sys import numpy as np __lowerCamelCase : Optional[int] = tuple[int, int] class SCREAMING_SNAKE_CASE__ : """simple docstring""" def __init__( self : Optional[Any] ): snake_case__ : Any = [] snake_case__ : int = set() def _lowercase ( self : Any ): if not self.empty(): return self.elements[0][0] else: return float("inf" ) def _lowercase ( self : List[Any] ): return len(self.elements ) == 0 def _lowercase ( self : Optional[int] , __A : str , __A : Dict ): if item not in self.set: heapq.heappush(self.elements , (priority, item) ) self.set.add(snake_case_ ) else: # update # print("update", item) snake_case__ : int = [] (snake_case__) : Dict = heapq.heappop(self.elements ) while x != item: temp.append((pri, x) ) (snake_case__) : Any = heapq.heappop(self.elements ) temp.append((priority, item) ) for pro, xxx in temp: heapq.heappush(self.elements , (pro, xxx) ) def _lowercase ( self : int , __A : Optional[int] ): if item in self.set: self.set.remove(snake_case_ ) snake_case__ : str = [] (snake_case__) : Optional[Any] = heapq.heappop(self.elements ) while x != item: temp.append((pro, x) ) (snake_case__) : Optional[Any] = heapq.heappop(self.elements ) for prito, yyy in temp: heapq.heappush(self.elements , (prito, yyy) ) def _lowercase ( self : Union[str, Any] ): return self.elements[0][1] def _lowercase ( self : Union[str, Any] ): (snake_case__) : int = heapq.heappop(self.elements ) self.set.remove(snake_case_ ) return (priority, item) def SCREAMING_SNAKE_CASE ( snake_case_ : TPos , snake_case_ : TPos ): snake_case__ : str = np.array(snake_case_ ) snake_case__ : Optional[int] = np.array(snake_case_ ) return np.linalg.norm(a - b ) def SCREAMING_SNAKE_CASE ( snake_case_ : TPos , snake_case_ : TPos ): return consistent_heuristic(snake_case_ , snake_case_ ) // t def SCREAMING_SNAKE_CASE ( snake_case_ : TPos , snake_case_ : TPos ): return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] ) def SCREAMING_SNAKE_CASE ( snake_case_ : TPos , snake_case_ : int , snake_case_ : TPos , snake_case_ : dict[TPos, float] ): snake_case__ : List[str] = g_function[start] + Wa * heuristics[i](snake_case_ , snake_case_ ) return ans def SCREAMING_SNAKE_CASE ( snake_case_ : Dict , snake_case_ : List[Any] , snake_case_ : Tuple ): snake_case__ : List[str] = np.chararray((n, n) ) for i in range(snake_case_ ): for j in range(snake_case_ ): snake_case__ : List[Any] = "*" for i in range(snake_case_ ): for j in range(snake_case_ ): if (j, (n - 1) - i) in blocks: snake_case__ : List[Any] = "#" snake_case__ : List[Any] = "-" snake_case__ : Dict = back_pointer[goal] while x != start: (snake_case__) : List[Any] = x # print(x) snake_case__ : Optional[Any] = "-" snake_case__ : int = back_pointer[x] snake_case__ : Optional[Any] = "-" for i in range(snake_case_ ): for j in range(snake_case_ ): if (i, j) == (0, n - 1): print(grid[i][j] , end=" " ) print("<-- End position" , end=" " ) else: print(grid[i][j] , end=" " ) print() print("^" ) print("Start position" ) print() print("# is an obstacle" ) print("- is the path taken by algorithm" ) print("PATH TAKEN BY THE ALGORITHM IS:-" ) snake_case__ : List[str] = back_pointer[goal] while x != start: print(snake_case_ , end=" " ) snake_case__ : Tuple = back_pointer[x] print(snake_case_ ) sys.exit() def SCREAMING_SNAKE_CASE ( snake_case_ : TPos ): if p[0] < 0 or p[0] > n - 1: return False if p[1] < 0 or p[1] > n - 1: return False return True def SCREAMING_SNAKE_CASE ( snake_case_ : Optional[int] , snake_case_ : List[str] , snake_case_ : List[Any] , snake_case_ : Optional[Any] , snake_case_ : Tuple , snake_case_ : Optional[Any] , snake_case_ : Any , snake_case_ : List[str] , ): for itera in range(snake_case_ ): open_list[itera].remove_element(snake_case_ ) # print("s", s) # print("j", j) (snake_case__) : int = s snake_case__ : Union[str, Any] = (x - 1, y) snake_case__ : Optional[Any] = (x + 1, y) snake_case__ : List[Any] = (x, y + 1) snake_case__ : int = (x, y - 1) for neighbours in [left, right, up, down]: if neighbours not in blocks: if valid(snake_case_ ) and neighbours not in visited: # print("neighbour", neighbours) visited.add(snake_case_ ) snake_case__ : Optional[int] = -1 snake_case__ : List[str] = float("inf" ) if valid(snake_case_ ) and g_function[neighbours] > g_function[s] + 1: snake_case__ : int = g_function[s] + 1 snake_case__ : str = s if neighbours not in close_list_anchor: open_list[0].put(snake_case_ , key(snake_case_ , 0 , snake_case_ , snake_case_ ) ) if neighbours not in close_list_inad: for var in range(1 , snake_case_ ): if key(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) <= Wa * key( snake_case_ , 0 , snake_case_ , snake_case_ ): open_list[j].put( snake_case_ , key(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) ) def SCREAMING_SNAKE_CASE ( ): snake_case__ : Optional[int] = [] for x in range(1 , 5 ): for y in range(1 , 6 ): some_list.append((x, y) ) for x in range(15 , 20 ): some_list.append((x, 17) ) for x in range(10 , 19 ): for y in range(1 , 15 ): some_list.append((x, y) ) # L block for x in range(1 , 4 ): for y in range(12 , 19 ): some_list.append((x, y) ) for x in range(3 , 13 ): for y in range(16 , 19 ): some_list.append((x, y) ) return some_list __lowerCamelCase : str = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a} __lowerCamelCase : Union[str, Any] = [ (0, 1), (1, 1), (2, 1), (3, 1), (4, 1), (5, 1), (6, 1), (7, 1), (8, 1), (9, 1), (10, 1), (11, 1), (12, 1), (13, 1), (14, 1), (15, 1), (16, 1), (17, 1), (18, 1), (19, 1), ] __lowerCamelCase : int = make_common_ground() __lowerCamelCase : Tuple = blocks_blk # hyper parameters __lowerCamelCase : Any = 1 __lowerCamelCase : List[Any] = 1 __lowerCamelCase : List[Any] = 20 __lowerCamelCase : List[str] = 3 # one consistent and two other inconsistent # start and end destination __lowerCamelCase : List[Any] = (0, 0) __lowerCamelCase : int = (n - 1, n - 1) __lowerCamelCase : Any = 1 def SCREAMING_SNAKE_CASE ( snake_case_ : TPos , snake_case_ : TPos , snake_case_ : int ): snake_case__ : Tuple = {start: 0, goal: float("inf" )} snake_case__ : List[str] = {start: -1, goal: -1} snake_case__ : Any = [] snake_case__ : Union[str, Any] = set() for i in range(snake_case_ ): open_list.append(PriorityQueue() ) open_list[i].put(snake_case_ , key(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) ) snake_case__ : list[int] = [] snake_case__ : list[int] = [] while open_list[0].minkey() < float("inf" ): for i in range(1 , snake_case_ ): # print(open_list[0].minkey(), open_list[i].minkey()) if open_list[i].minkey() <= Wa * open_list[0].minkey(): global t t += 1 if g_function[goal] <= open_list[i].minkey(): if g_function[goal] < float("inf" ): do_something(snake_case_ , snake_case_ , snake_case_ ) else: snake_case__ : Optional[Any] = open_list[i].top_show() visited.add(snake_case_ ) expand_state( snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , ) close_list_inad.append(snake_case_ ) else: if g_function[goal] <= open_list[0].minkey(): if g_function[goal] < float("inf" ): do_something(snake_case_ , snake_case_ , snake_case_ ) else: snake_case__ : Any = open_list[0].top_show() visited.add(snake_case_ ) expand_state( snake_case_ , 0 , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , ) close_list_anchor.append(snake_case_ ) print("No path found to goal" ) print() for i in range(n - 1 , -1 , -1 ): for j in range(snake_case_ ): if (j, i) in blocks: print("#" , end=" " ) elif (j, i) in back_pointer: if (j, i) == (n - 1, n - 1): print("*" , end=" " ) else: print("-" , end=" " ) else: print("*" , end=" " ) if (j, i) == (n - 1, n - 1): print("<-- End position" , end=" " ) print() print("^" ) print("Start position" ) print() print("# is an obstacle" ) print("- is the path taken by algorithm" ) if __name__ == "__main__": multi_a_star(start, goal, n_heuristic)
720
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on import numpy # noqa: F401 # Here to have a nice missing dependency error message early on import requests # noqa: F401 # Here to have a nice missing dependency error message early on import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on from mauve import compute_mauve # From: mauve-text import datasets __lowerCamelCase : Optional[int] = """\ @inproceedings{pillutla-etal:mauve:neurips2021, title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers}, author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid}, booktitle = {NeurIPS}, year = {2021} } """ __lowerCamelCase : str = """\ MAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure. MAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences. For details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021). This metrics is a wrapper around the official implementation of MAUVE: https://github.com/krishnap25/mauve """ __lowerCamelCase : str = """ Calculates MAUVE scores between two lists of generated text and reference text. Args: predictions: list of generated text to score. Each predictions should be a string with tokens separated by spaces. references: list of reference for each prediction. Each reference should be a string with tokens separated by spaces. Optional Args: num_buckets: the size of the histogram to quantize P and Q. Options: 'auto' (default) or an integer pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1 kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9 kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5 kmeans_max_iter: maximum number of k-means iterations. Default 500 featurize_model_name: name of the model from which features are obtained. Default 'gpt2-large' Use one of ['gpt2', 'gpt2-medium', 'gpt2-large', 'gpt2-xl']. device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU max_text_length: maximum number of tokens to consider. Default 1024 divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25 mauve_scaling_factor: \"c\" from the paper. Default 5. verbose: If True (default), print running time updates seed: random seed to initialize k-means cluster assignments. Returns: mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer, frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer, divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve, p_hist: a discrete distribution, which is a quantized version of the text distribution p_text, q_hist: same as above, but with q_text. Examples: >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest >>> import datasets >>> mauve = datasets.load_metric('mauve') >>> predictions = [\"hello there\", \"general kenobi\"] >>> references = [\"hello there\", \"general kenobi\"] >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP >>> print(out.mauve) # doctest: +SKIP 1.0 """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class SCREAMING_SNAKE_CASE__ ( datasets.Metric ): """simple docstring""" def _lowercase ( self : Dict ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , homepage="https://github.com/krishnap25/mauve" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("string" , id="sequence" ), "references": datasets.Value("string" , id="sequence" ), } ) , codebase_urls=["https://github.com/krishnap25/mauve"] , reference_urls=[ "https://arxiv.org/abs/2102.01454", "https://github.com/krishnap25/mauve", ] , ) def _lowercase ( self : Union[str, Any] , __A : Dict , __A : List[str] , __A : int=None , __A : List[Any]=None , __A : Optional[int]=None , __A : List[Any]=None , __A : Union[str, Any]="auto" , __A : Optional[Any]=-1 , __A : Optional[Any]=0.9 , __A : Any=5 , __A : List[Any]=5_0_0 , __A : Tuple="gpt2-large" , __A : Optional[Any]=-1 , __A : str=1_0_2_4 , __A : Tuple=2_5 , __A : str=5 , __A : Optional[int]=True , __A : Any=2_5 , ): snake_case__ : List[Any] = compute_mauve( p_text=__A , q_text=__A , p_features=__A , q_features=__A , p_tokens=__A , q_tokens=__A , num_buckets=__A , pca_max_data=__A , kmeans_explained_var=__A , kmeans_num_redo=__A , kmeans_max_iter=__A , featurize_model_name=__A , device_id=__A , max_text_length=__A , divergence_curve_discretization_size=__A , mauve_scaling_factor=__A , verbose=__A , seed=__A , ) return out
25
0
import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings from diffusers.utils import load_numpy, slow, torch_device from diffusers.utils.testing_utils import require_torch_gpu __lowerCamelCase : Union[str, Any] = False class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): """simple docstring""" def _lowercase ( self : Dict ): super().tearDown() gc.collect() torch.cuda.empty_cache() @property def _lowercase ( self : Tuple ): return 1_2 @property def _lowercase ( self : Optional[Any] ): return 1_2 @property def _lowercase ( self : List[Any] ): return 3_2 @property def _lowercase ( self : Any ): torch.manual_seed(0 ) snake_case__ : Tuple = VQModel( block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , ) return model @property def _lowercase ( self : List[Any] ): snake_case__ : Optional[int] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) return tokenizer @property def _lowercase ( self : List[str] ): torch.manual_seed(0 ) snake_case__ : int = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=3_7 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , ) return CLIPTextModel(UpperCAmelCase__ ) @property def _lowercase ( self : Dict ): torch.manual_seed(0 ) snake_case__ : Tuple = 1_2 snake_case__ : Optional[Any] = 1_2 snake_case__ : Tuple = { '''attention_bias''': True, '''cross_attention_dim''': 3_2, '''attention_head_dim''': height * width, '''num_attention_heads''': 1, '''num_vector_embeds''': self.num_embed, '''num_embeds_ada_norm''': self.num_embeds_ada_norm, '''norm_num_groups''': 3_2, '''sample_size''': width, '''activation_fn''': '''geglu-approximate''', } snake_case__ : List[Any] = TransformeraDModel(**UpperCAmelCase__ ) return model def _lowercase ( self : Optional[Any] ): snake_case__ : int = '''cpu''' snake_case__ : List[str] = self.dummy_vqvae snake_case__ : Optional[Any] = self.dummy_text_encoder snake_case__ : List[str] = self.dummy_tokenizer snake_case__ : List[Any] = self.dummy_transformer snake_case__ : str = VQDiffusionScheduler(self.num_embed ) snake_case__ : Optional[int] = LearnedClassifierFreeSamplingEmbeddings(learnable=UpperCAmelCase__ ) snake_case__ : str = VQDiffusionPipeline( vqvae=UpperCAmelCase__ , text_encoder=UpperCAmelCase__ , tokenizer=UpperCAmelCase__ , transformer=UpperCAmelCase__ , scheduler=UpperCAmelCase__ , learned_classifier_free_sampling_embeddings=UpperCAmelCase__ , ) snake_case__ : Dict = pipe.to(UpperCAmelCase__ ) pipe.set_progress_bar_config(disable=UpperCAmelCase__ ) snake_case__ : Tuple = '''teddy bear playing in the pool''' snake_case__ : Any = torch.Generator(device=UpperCAmelCase__ ).manual_seed(0 ) snake_case__ : Dict = pipe([prompt] , generator=UpperCAmelCase__ , num_inference_steps=2 , output_type="np" ) snake_case__ : List[str] = output.images snake_case__ : int = torch.Generator(device=UpperCAmelCase__ ).manual_seed(0 ) snake_case__ : List[Any] = pipe( [prompt] , generator=UpperCAmelCase__ , output_type="np" , return_dict=UpperCAmelCase__ , num_inference_steps=2 )[0] snake_case__ : Optional[Any] = image[0, -3:, -3:, -1] snake_case__ : int = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 2_4, 2_4, 3) snake_case__ : str = np.array([0.6_5_5_1, 0.6_1_6_8, 0.5_0_0_8, 0.5_6_7_6, 0.5_6_5_9, 0.4_2_9_5, 0.6_0_7_3, 0.5_5_9_9, 0.4_9_9_2] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 def _lowercase ( self : str ): snake_case__ : Dict = '''cpu''' snake_case__ : Any = self.dummy_vqvae snake_case__ : List[str] = self.dummy_text_encoder snake_case__ : int = self.dummy_tokenizer snake_case__ : str = self.dummy_transformer snake_case__ : Dict = VQDiffusionScheduler(self.num_embed ) snake_case__ : Dict = LearnedClassifierFreeSamplingEmbeddings( learnable=UpperCAmelCase__ , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length ) snake_case__ : Dict = VQDiffusionPipeline( vqvae=UpperCAmelCase__ , text_encoder=UpperCAmelCase__ , tokenizer=UpperCAmelCase__ , transformer=UpperCAmelCase__ , scheduler=UpperCAmelCase__ , learned_classifier_free_sampling_embeddings=UpperCAmelCase__ , ) snake_case__ : int = pipe.to(UpperCAmelCase__ ) pipe.set_progress_bar_config(disable=UpperCAmelCase__ ) snake_case__ : Optional[Any] = '''teddy bear playing in the pool''' snake_case__ : Optional[int] = torch.Generator(device=UpperCAmelCase__ ).manual_seed(0 ) snake_case__ : Optional[int] = pipe([prompt] , generator=UpperCAmelCase__ , num_inference_steps=2 , output_type="np" ) snake_case__ : Any = output.images snake_case__ : Optional[int] = torch.Generator(device=UpperCAmelCase__ ).manual_seed(0 ) snake_case__ : Dict = pipe( [prompt] , generator=UpperCAmelCase__ , output_type="np" , return_dict=UpperCAmelCase__ , num_inference_steps=2 )[0] snake_case__ : Optional[Any] = image[0, -3:, -3:, -1] snake_case__ : List[Any] = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 2_4, 2_4, 3) snake_case__ : List[Any] = np.array([0.6_6_9_3, 0.6_0_7_5, 0.4_9_5_9, 0.5_7_0_1, 0.5_5_8_3, 0.4_3_3_3, 0.6_1_7_1, 0.5_6_8_4, 0.4_9_8_8] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 @slow @require_torch_gpu class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): """simple docstring""" def _lowercase ( self : str ): super().tearDown() gc.collect() torch.cuda.empty_cache() def _lowercase ( self : Any ): snake_case__ : str = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy" ) snake_case__ : Optional[Any] = VQDiffusionPipeline.from_pretrained("microsoft/vq-diffusion-ithq" ) snake_case__ : int = pipeline.to(UpperCAmelCase__ ) pipeline.set_progress_bar_config(disable=UpperCAmelCase__ ) # requires GPU generator for gumbel softmax # don't use GPU generator in tests though snake_case__ : int = torch.Generator(device=UpperCAmelCase__ ).manual_seed(0 ) snake_case__ : Optional[int] = pipeline( "teddy bear playing in the pool" , num_images_per_prompt=1 , generator=UpperCAmelCase__ , output_type="np" , ) snake_case__ : Optional[Any] = output.images[0] assert image.shape == (2_5_6, 2_5_6, 3) assert np.abs(expected_image - image ).max() < 2.0
721
# Lint as: python3 # pylint: enable=line-too-long # pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position __lowerCamelCase : Union[str, Any] = """2.13.1""" import platform import pyarrow from packaging import version if version.parse(platform.python_version()) < version.parse("""3.7"""): raise ImportWarning( """To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition.""" ) if version.parse(pyarrow.__version__).major < 8: raise ImportWarning( """To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n""" """If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`.""" ) del platform del pyarrow del version from .arrow_dataset import Dataset from .arrow_reader import ReadInstruction from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder from .combine import concatenate_datasets, interleave_datasets from .dataset_dict import DatasetDict, IterableDatasetDict from .download import * from .features import * from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled from .info import DatasetInfo, MetricInfo from .inspect import ( get_dataset_config_info, get_dataset_config_names, get_dataset_infos, get_dataset_split_names, inspect_dataset, inspect_metric, list_datasets, list_metrics, ) from .iterable_dataset import IterableDataset from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric from .metric import Metric from .splits import ( NamedSplit, NamedSplitAll, Split, SplitBase, SplitDict, SplitGenerator, SplitInfo, SubSplitInfo, percent, ) from .tasks import * from .utils import * from .utils import logging # deprecated modules from datasets import arrow_dataset as _arrow_dataset # isort:skip from datasets import utils as _utils # isort:skip from datasets.utils import download_manager as _deprecated_download_manager # isort:skip __lowerCamelCase : List[Any] = concatenate_datasets __lowerCamelCase : List[str] = DownloadConfig __lowerCamelCase : Union[str, Any] = DownloadManager __lowerCamelCase : str = DownloadMode __lowerCamelCase : Union[str, Any] = DownloadConfig __lowerCamelCase : List[str] = DownloadMode __lowerCamelCase : Dict = DownloadManager del _arrow_dataset, _utils, _deprecated_download_manager
25
0
import requests from bsa import BeautifulSoup def SCREAMING_SNAKE_CASE ( snake_case_ : Tuple = "AAPL" ): snake_case__ : Union[str, Any] = F'''https://in.finance.yahoo.com/quote/{symbol}?s={symbol}''' snake_case__ : Union[str, Any] = BeautifulSoup(requests.get(_lowerCAmelCase ).text , "html.parser" ) snake_case__ : Tuple = "My(6px) Pos(r) smartphone_Mt(6px)" return soup.find("div" , class_=class_ ).find("span" ).text if __name__ == "__main__": for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split(): print(f"Current {symbol:<4} stock price is {stock_price(symbol):>8}")
700
from __future__ import annotations def SCREAMING_SNAKE_CASE ( snake_case_ : int ): snake_case__ : str = [True] * limit snake_case__ : str = False snake_case__ : str = False snake_case__ : str = True for i in range(3 , int(limit**0.5 + 1 ) , 2 ): snake_case__ : Optional[Any] = i * 2 while index < limit: snake_case__ : Union[str, Any] = False snake_case__ : Any = index + i snake_case__ : Optional[Any] = [2] for i in range(3 , snake_case_ , 2 ): if is_prime[i]: primes.append(snake_case_ ) return primes def SCREAMING_SNAKE_CASE ( snake_case_ : int = 1000000 ): snake_case__ : Optional[int] = prime_sieve(snake_case_ ) snake_case__ : List[Any] = 0 snake_case__ : List[str] = 0 for i in range(len(snake_case_ ) ): for j in range(i + length , len(snake_case_ ) ): snake_case__ : Dict = sum(primes[i:j] ) if sol >= ceiling: break if sol in primes: snake_case__ : Tuple = j - i snake_case__ : str = sol return largest if __name__ == "__main__": print(f"{solution() = }")
25
0
def SCREAMING_SNAKE_CASE ( snake_case_ : int = 50 ): snake_case__ : Union[str, Any] = [1] * (length + 1) for row_length in range(3 , length + 1 ): for block_length in range(3 , row_length + 1 ): for block_start in range(row_length - block_length ): ways_number[row_length] += ways_number[ row_length - block_start - block_length - 1 ] ways_number[row_length] += 1 return ways_number[length] if __name__ == "__main__": print(f"{solution() = }")
701
import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DeformableDetrImageProcessor class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): """simple docstring""" def __init__( self : int , __A : List[str] , __A : Union[str, Any]=7 , __A : Any=3 , __A : Optional[Any]=3_0 , __A : List[str]=4_0_0 , __A : str=True , __A : Optional[Any]=None , __A : Optional[int]=True , __A : int=[0.5, 0.5, 0.5] , __A : Dict=[0.5, 0.5, 0.5] , __A : Optional[int]=True , __A : int=1 / 2_5_5 , __A : List[str]=True , ): # by setting size["longest_edge"] > max_resolution we're effectively not testing this :p snake_case__ : List[str] = size if size is not None else {"shortest_edge": 1_8, "longest_edge": 1_3_3_3} snake_case__ : Optional[Any] = parent snake_case__ : str = batch_size snake_case__ : Union[str, Any] = num_channels snake_case__ : Optional[Any] = min_resolution snake_case__ : List[str] = max_resolution snake_case__ : Tuple = do_resize snake_case__ : str = size snake_case__ : str = do_normalize snake_case__ : Optional[Any] = image_mean snake_case__ : List[str] = image_std snake_case__ : List[str] = do_rescale snake_case__ : Tuple = rescale_factor snake_case__ : Tuple = do_pad def _lowercase ( self : str ): return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def _lowercase ( self : Optional[Any] , __A : List[Any] , __A : List[Any]=False ): if not batched: snake_case__ : List[Any] = image_inputs[0] if isinstance(__A , Image.Image ): snake_case__, snake_case__ : str = image.size else: snake_case__, snake_case__ : Dict = image.shape[1], image.shape[2] if w < h: snake_case__ : Any = int(self.size["shortest_edge"] * h / w ) snake_case__ : Any = self.size["shortest_edge"] elif w > h: snake_case__ : Optional[int] = self.size["shortest_edge"] snake_case__ : Any = int(self.size["shortest_edge"] * w / h ) else: snake_case__ : Tuple = self.size["shortest_edge"] snake_case__ : int = self.size["shortest_edge"] else: snake_case__ : Any = [] for image in image_inputs: snake_case__, snake_case__ : Optional[Any] = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) snake_case__ : List[Any] = max(__A , key=lambda __A : item[0] )[0] snake_case__ : int = max(__A , key=lambda __A : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ , unittest.TestCase ): """simple docstring""" a_ = DeformableDetrImageProcessor if is_vision_available() else None def _lowercase ( self : str ): snake_case__ : Optional[Any] = DeformableDetrImageProcessingTester(self ) @property def _lowercase ( self : List[Any] ): return self.image_processor_tester.prepare_image_processor_dict() def _lowercase ( self : Tuple ): snake_case__ : Any = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__A , "image_mean" ) ) self.assertTrue(hasattr(__A , "image_std" ) ) self.assertTrue(hasattr(__A , "do_normalize" ) ) self.assertTrue(hasattr(__A , "do_resize" ) ) self.assertTrue(hasattr(__A , "do_rescale" ) ) self.assertTrue(hasattr(__A , "do_pad" ) ) self.assertTrue(hasattr(__A , "size" ) ) def _lowercase ( self : Any ): snake_case__ : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"shortest_edge": 1_8, "longest_edge": 1_3_3_3} ) self.assertEqual(image_processor.do_pad , __A ) snake_case__ : Tuple = self.image_processing_class.from_dict( self.image_processor_dict , size=4_2 , max_size=8_4 , pad_and_return_pixel_mask=__A ) self.assertEqual(image_processor.size , {"shortest_edge": 4_2, "longest_edge": 8_4} ) self.assertEqual(image_processor.do_pad , __A ) def _lowercase ( self : str ): pass def _lowercase ( self : List[str] ): # Initialize image_processing snake_case__ : Any = self.image_processing_class(**self.image_processor_dict ) # create random PIL images snake_case__ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A ) for image in image_inputs: self.assertIsInstance(__A , Image.Image ) # Test not batched input snake_case__ : Tuple = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values snake_case__, snake_case__ : List[str] = self.image_processor_tester.get_expected_values(__A ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched snake_case__, snake_case__ : List[Any] = self.image_processor_tester.get_expected_values(__A , batched=__A ) snake_case__ : int = image_processing(__A , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def _lowercase ( self : int ): # Initialize image_processing snake_case__ : Any = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors snake_case__ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , numpify=__A ) for image in image_inputs: self.assertIsInstance(__A , np.ndarray ) # Test not batched input snake_case__ : Optional[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values snake_case__, snake_case__ : int = self.image_processor_tester.get_expected_values(__A ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched snake_case__ : Tuple = image_processing(__A , return_tensors="pt" ).pixel_values snake_case__, snake_case__ : int = self.image_processor_tester.get_expected_values(__A , batched=__A ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def _lowercase ( self : Union[str, Any] ): # Initialize image_processing snake_case__ : Optional[int] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors snake_case__ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , torchify=__A ) for image in image_inputs: self.assertIsInstance(__A , torch.Tensor ) # Test not batched input snake_case__ : str = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values snake_case__, snake_case__ : Union[str, Any] = self.image_processor_tester.get_expected_values(__A ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched snake_case__ : Tuple = image_processing(__A , return_tensors="pt" ).pixel_values snake_case__, snake_case__ : Tuple = self.image_processor_tester.get_expected_values(__A , batched=__A ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) @slow def _lowercase ( self : Optional[Any] ): # prepare image and target snake_case__ : Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f: snake_case__ : Tuple = json.loads(f.read() ) snake_case__ : Union[str, Any] = {"image_id": 3_9_7_6_9, "annotations": target} # encode them snake_case__ : str = DeformableDetrImageProcessor() snake_case__ : Tuple = image_processing(images=__A , annotations=__A , return_tensors="pt" ) # verify pixel values snake_case__ : Optional[Any] = torch.Size([1, 3, 8_0_0, 1_0_6_6] ) self.assertEqual(encoding["pixel_values"].shape , __A ) snake_case__ : Union[str, Any] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] ) self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __A , atol=1e-4 ) ) # verify area snake_case__ : str = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] ) self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __A ) ) # verify boxes snake_case__ : Union[str, Any] = torch.Size([6, 4] ) self.assertEqual(encoding["labels"][0]["boxes"].shape , __A ) snake_case__ : List[Any] = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] ) self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __A , atol=1e-3 ) ) # verify image_id snake_case__ : Any = torch.tensor([3_9_7_6_9] ) self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __A ) ) # verify is_crowd snake_case__ : Tuple = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __A ) ) # verify class_labels snake_case__ : int = torch.tensor([7_5, 7_5, 6_3, 6_5, 1_7, 1_7] ) self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __A ) ) # verify orig_size snake_case__ : List[str] = torch.tensor([4_8_0, 6_4_0] ) self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __A ) ) # verify size snake_case__ : Tuple = torch.tensor([8_0_0, 1_0_6_6] ) self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __A ) ) @slow def _lowercase ( self : Optional[int] ): # prepare image, target and masks_path snake_case__ : Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f: snake_case__ : Any = json.loads(f.read() ) snake_case__ : Dict = {"file_name": "000000039769.png", "image_id": 3_9_7_6_9, "segments_info": target} snake_case__ : int = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" ) # encode them snake_case__ : List[str] = DeformableDetrImageProcessor(format="coco_panoptic" ) snake_case__ : List[Any] = image_processing(images=__A , annotations=__A , masks_path=__A , return_tensors="pt" ) # verify pixel values snake_case__ : List[Any] = torch.Size([1, 3, 8_0_0, 1_0_6_6] ) self.assertEqual(encoding["pixel_values"].shape , __A ) snake_case__ : Optional[int] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] ) self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __A , atol=1e-4 ) ) # verify area snake_case__ : Tuple = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] ) self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __A ) ) # verify boxes snake_case__ : Any = torch.Size([6, 4] ) self.assertEqual(encoding["labels"][0]["boxes"].shape , __A ) snake_case__ : Any = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] ) self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __A , atol=1e-3 ) ) # verify image_id snake_case__ : List[str] = torch.tensor([3_9_7_6_9] ) self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __A ) ) # verify is_crowd snake_case__ : Any = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __A ) ) # verify class_labels snake_case__ : List[str] = torch.tensor([1_7, 1_7, 6_3, 7_5, 7_5, 9_3] ) self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __A ) ) # verify masks snake_case__ : Union[str, Any] = 8_2_2_8_7_3 self.assertEqual(encoding["labels"][0]["masks"].sum().item() , __A ) # verify orig_size snake_case__ : int = torch.tensor([4_8_0, 6_4_0] ) self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __A ) ) # verify size snake_case__ : Union[str, Any] = torch.tensor([8_0_0, 1_0_6_6] ) self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __A ) )
25
0
from __future__ import annotations from decimal import Decimal from math import * # noqa: F403 from sympy import diff def SCREAMING_SNAKE_CASE ( snake_case_ : Optional[Any] , snake_case_ : List[str] , snake_case_ : Optional[int] = 10**-10 ): snake_case__ : int = a while True: snake_case__ : List[Any] = Decimal(UpperCamelCase__ ) - ( Decimal(eval(UpperCamelCase__ ) ) / Decimal(eval(str(diff(UpperCamelCase__ ) ) ) ) # noqa: S307 ) # This number dictates the accuracy of the answer if abs(eval(UpperCamelCase__ ) ) < precision: # noqa: S307 return float(UpperCamelCase__ ) # Let's Execute if __name__ == "__main__": # Find root of trigonometric function # Find value of pi print(f"The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}") # Find root of polynomial print(f"The root of x**2 - 5*x + 2 = 0 is {newton_raphson('x**2 - 5*x + 2', 0.4)}") # Find Square Root of 5 print(f"The root of log(x) - 1 = 0 is {newton_raphson('log(x) - 1', 2)}") # Exponential Roots print(f"The root of exp(x) - 1 = 0 is {newton_raphson('exp(x) - 1', 0)}")
702
import json import os from functools import lru_cache from typing import Dict, List, Optional, Tuple, Union import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...tokenization_utils_base import BatchEncoding, EncodedInput from ...utils import PaddingStrategy, logging __lowerCamelCase : List[str] = logging.get_logger(__name__) __lowerCamelCase : Optional[Any] = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""} # See all LED models at https://huggingface.co/models?filter=LED __lowerCamelCase : Tuple = { """vocab_file""": { """allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json""", }, """merges_file""": { """allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt""", }, """tokenizer_file""": { """allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json""", }, } __lowerCamelCase : Dict = { """allenai/led-base-16384""": 1_6384, } @lru_cache() # Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode def SCREAMING_SNAKE_CASE ( ): snake_case__ : Optional[int] = ( list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) ) ) snake_case__ : Optional[int] = bs[:] snake_case__ : Any = 0 for b in range(2**8 ): if b not in bs: bs.append(snake_case_ ) cs.append(2**8 + n ) n += 1 snake_case__ : Dict = [chr(snake_case_ ) for n in cs] return dict(zip(snake_case_ , snake_case_ ) ) def SCREAMING_SNAKE_CASE ( snake_case_ : List[Any] ): snake_case__ : Dict = set() snake_case__ : Tuple = word[0] for char in word[1:]: pairs.add((prev_char, char) ) snake_case__ : List[Any] = char return pairs class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ): """simple docstring""" a_ = VOCAB_FILES_NAMES a_ = PRETRAINED_VOCAB_FILES_MAP a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a_ = ["input_ids", "attention_mask"] def __init__( self : List[str] , __A : Any , __A : List[str] , __A : Optional[Any]="replace" , __A : Optional[int]="<s>" , __A : Union[str, Any]="</s>" , __A : Tuple="</s>" , __A : List[Any]="<s>" , __A : Dict="<unk>" , __A : Any="<pad>" , __A : Optional[int]="<mask>" , __A : List[str]=False , **__A : Union[str, Any] , ): snake_case__ : List[Any] = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else bos_token snake_case__ : List[str] = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else eos_token snake_case__ : Any = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else sep_token snake_case__ : List[Any] = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else cls_token snake_case__ : Tuple = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else unk_token snake_case__ : List[Any] = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else pad_token # Mask token behave like a normal word, i.e. include the space before it snake_case__ : List[str] = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else mask_token super().__init__( errors=__A , bos_token=__A , eos_token=__A , unk_token=__A , sep_token=__A , cls_token=__A , pad_token=__A , mask_token=__A , add_prefix_space=__A , **__A , ) with open(__A , encoding="utf-8" ) as vocab_handle: snake_case__ : Any = json.load(__A ) snake_case__ : Optional[Any] = {v: k for k, v in self.encoder.items()} snake_case__ : Union[str, Any] = errors # how to handle errors in decoding snake_case__ : Any = bytes_to_unicode() snake_case__ : Optional[Any] = {v: k for k, v in self.byte_encoder.items()} with open(__A , encoding="utf-8" ) as merges_handle: snake_case__ : str = merges_handle.read().split("\n" )[1:-1] snake_case__ : int = [tuple(merge.split() ) for merge in bpe_merges] snake_case__ : str = dict(zip(__A , range(len(__A ) ) ) ) snake_case__ : Optional[int] = {} snake_case__ : Any = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions snake_case__ : Union[str, Any] = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" ) @property # Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size def _lowercase ( self : List[Any] ): return len(self.encoder ) def _lowercase ( self : Any ): return dict(self.encoder , **self.added_tokens_encoder ) def _lowercase ( self : Optional[Any] , __A : Optional[int] ): if token in self.cache: return self.cache[token] snake_case__ : Union[str, Any] = tuple(__A ) snake_case__ : List[Any] = get_pairs(__A ) if not pairs: return token while True: snake_case__ : Tuple = min(__A , key=lambda __A : self.bpe_ranks.get(__A , float("inf" ) ) ) if bigram not in self.bpe_ranks: break snake_case__, snake_case__ : Dict = bigram snake_case__ : str = [] snake_case__ : Union[str, Any] = 0 while i < len(__A ): try: snake_case__ : Dict = word.index(__A , __A ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) snake_case__ : str = j if word[i] == first and i < len(__A ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 snake_case__ : str = tuple(__A ) snake_case__ : int = new_word if len(__A ) == 1: break else: snake_case__ : List[str] = get_pairs(__A ) snake_case__ : List[Any] = " ".join(__A ) snake_case__ : Optional[int] = word return word def _lowercase ( self : Optional[Any] , __A : Optional[Any] ): snake_case__ : List[str] = [] for token in re.findall(self.pat , __A ): snake_case__ : Dict = "".join( self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__A ).split(" " ) ) return bpe_tokens def _lowercase ( self : Union[str, Any] , __A : Optional[int] ): return self.encoder.get(__A , self.encoder.get(self.unk_token ) ) def _lowercase ( self : Optional[int] , __A : Optional[Any] ): return self.decoder.get(__A ) def _lowercase ( self : Union[str, Any] , __A : Dict ): snake_case__ : Optional[Any] = "".join(__A ) snake_case__ : int = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors ) return text def _lowercase ( self : Optional[int] , __A : str , __A : Optional[str] = None ): if not os.path.isdir(__A ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return snake_case__ : List[Any] = os.path.join( __A , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) snake_case__ : str = os.path.join( __A , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] ) with open(__A , "w" , encoding="utf-8" ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=__A , ensure_ascii=__A ) + "\n" ) snake_case__ : str = 0 with open(__A , "w" , encoding="utf-8" ) as writer: writer.write("#version: 0.2\n" ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __A : kv[1] ): if index != token_index: logger.warning( f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.''' " Please check that the tokenizer is not corrupted!" ) snake_case__ : int = token_index writer.write(" ".join(__A ) + "\n" ) index += 1 return vocab_file, merge_file def _lowercase ( self : int , __A : List[int] , __A : Optional[List[int]] = None ): if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] snake_case__ : Tuple = [self.cls_token_id] snake_case__ : List[Any] = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def _lowercase ( self : Optional[Any] , __A : List[int] , __A : Optional[List[int]] = None , __A : bool = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__A , token_ids_a=__A , already_has_special_tokens=__A ) if token_ids_a is None: return [1] + ([0] * len(__A )) + [1] return [1] + ([0] * len(__A )) + [1, 1] + ([0] * len(__A )) + [1] def _lowercase ( self : List[Any] , __A : List[int] , __A : Optional[List[int]] = None ): snake_case__ : Any = [self.sep_token_id] snake_case__ : List[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def _lowercase ( self : Optional[Any] , __A : int , __A : int=False , **__A : Dict ): snake_case__ : Optional[int] = kwargs.pop("add_prefix_space" , self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(__A ) > 0 and not text[0].isspace()): snake_case__ : Optional[int] = " " + text return (text, kwargs) def _lowercase ( self : Any , __A : Union[Dict[str, EncodedInput], BatchEncoding] , __A : Optional[int] = None , __A : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , __A : Optional[int] = None , __A : Optional[bool] = None , ): snake_case__ : Optional[Any] = super()._pad( encoded_inputs=__A , max_length=__A , padding_strategy=__A , pad_to_multiple_of=__A , return_attention_mask=__A , ) # Load from model defaults if return_attention_mask is None: snake_case__ : Union[str, Any] = "attention_mask" in self.model_input_names if return_attention_mask and "global_attention_mask" in encoded_inputs: snake_case__ : Union[str, Any] = encoded_inputs[self.model_input_names[0]] # `global_attention_mask` need to have the same length as other (sequential) inputs. snake_case__ : Tuple = len(encoded_inputs["global_attention_mask"] ) != len(__A ) if needs_to_be_padded: snake_case__ : int = len(__A ) - len(encoded_inputs["global_attention_mask"] ) if self.padding_side == "right": # Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend` snake_case__ : int = ( encoded_inputs["global_attention_mask"] + [-1] * difference ) elif self.padding_side == "left": snake_case__ : Tuple = [-1] * difference + encoded_inputs[ "global_attention_mask" ] else: raise ValueError("Invalid padding strategy:" + str(self.padding_side ) ) return encoded_inputs
25
0
from __future__ import annotations from statistics import mean def SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any] , snake_case_ : List[str] , snake_case_ : str ): snake_case__ : Any = [0] * no_of_processes snake_case__ : Optional[int] = [0] * no_of_processes # Initialize remaining_time to waiting_time. for i in range(__SCREAMING_SNAKE_CASE ): snake_case__ : str = burst_time[i] snake_case__ : list[int] = [] snake_case__ : Tuple = 0 snake_case__ : Optional[int] = 0 # When processes are not completed, # A process whose arrival time has passed \ # and has remaining execution time is put into the ready_process. # The shortest process in the ready_process, target_process is executed. while completed != no_of_processes: snake_case__ : Any = [] snake_case__ : Dict = -1 for i in range(__SCREAMING_SNAKE_CASE ): if (arrival_time[i] <= total_time) and (remaining_time[i] > 0): ready_process.append(__SCREAMING_SNAKE_CASE ) if len(__SCREAMING_SNAKE_CASE ) > 0: snake_case__ : Dict = ready_process[0] for i in ready_process: if remaining_time[i] < remaining_time[target_process]: snake_case__ : Optional[int] = i total_time += burst_time[target_process] completed += 1 snake_case__ : Tuple = 0 snake_case__ : int = ( total_time - arrival_time[target_process] - burst_time[target_process] ) else: total_time += 1 return waiting_time def SCREAMING_SNAKE_CASE ( snake_case_ : List[str] , snake_case_ : int , snake_case_ : List[str] ): snake_case__ : Optional[int] = [0] * no_of_processes for i in range(__SCREAMING_SNAKE_CASE ): snake_case__ : Dict = burst_time[i] + waiting_time[i] return turn_around_time if __name__ == "__main__": print("""[TEST CASE 01]""") __lowerCamelCase : str = 4 __lowerCamelCase : Tuple = [2, 5, 3, 7] __lowerCamelCase : Union[str, Any] = [0, 0, 0, 0] __lowerCamelCase : Tuple = calculate_waitingtime(arrival_time, burst_time, no_of_processes) __lowerCamelCase : List[Any] = calculate_turnaroundtime( burst_time, no_of_processes, waiting_time ) # Printing the Result print("""PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time""") for i, process_id in enumerate(list(range(1, 5))): print( f"{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t" f"{waiting_time[i]}\t\t\t\t{turn_around_time[i]}" ) print(f"\nAverage waiting time = {mean(waiting_time):.5f}") print(f"Average turnaround time = {mean(turn_around_time):.5f}")
703
# tests directory-specific settings - this file is run automatically # by pytest before any tests are run import sys import warnings from os.path import abspath, dirname, join # allow having multiple repository checkouts and not needing to remember to rerun # 'pip install -e .[dev]' when switching between checkouts and running tests. __lowerCamelCase : Dict = abspath(join(dirname(dirname(__file__)), """src""")) sys.path.insert(1, git_repo_path) # silence FutureWarning warnings in tests since often we can't act on them until # they become normal warnings - i.e. the tests still need to test the current functionality warnings.simplefilter(action="""ignore""", category=FutureWarning) def SCREAMING_SNAKE_CASE ( snake_case_ : str ): from diffusers.utils.testing_utils import pytest_addoption_shared pytest_addoption_shared(snake_case_ ) def SCREAMING_SNAKE_CASE ( snake_case_ : Any ): from diffusers.utils.testing_utils import pytest_terminal_summary_main snake_case__ : Optional[int] = terminalreporter.config.getoption("--make-reports" ) if make_reports: pytest_terminal_summary_main(snake_case_ , id=snake_case_ )
25
0
import argparse from pathlib import Path import torch from packaging import version from torch.onnx import export from diffusers import AutoencoderKL __lowerCamelCase : int = version.parse(version.parse(torch.__version__).base_version) < version.parse("""1.11""") def SCREAMING_SNAKE_CASE ( snake_case_ : Dict , snake_case_ : Any , snake_case_ : str , snake_case_ : str , snake_case_ : Optional[int] , snake_case_ : int , snake_case_ : Optional[int] , snake_case_ : List[str]=False , ): output_path.parent.mkdir(parents=snake_case_ , exist_ok=snake_case_ ) # PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11, # so we check the torch version for backwards compatibility if is_torch_less_than_1_11: export( snake_case_ , snake_case_ , f=output_path.as_posix() , input_names=snake_case_ , output_names=snake_case_ , dynamic_axes=snake_case_ , do_constant_folding=snake_case_ , use_external_data_format=snake_case_ , enable_onnx_checker=snake_case_ , opset_version=snake_case_ , ) else: export( snake_case_ , snake_case_ , f=output_path.as_posix() , input_names=snake_case_ , output_names=snake_case_ , dynamic_axes=snake_case_ , do_constant_folding=snake_case_ , opset_version=snake_case_ , ) @torch.no_grad() def SCREAMING_SNAKE_CASE ( snake_case_ : int , snake_case_ : str , snake_case_ : List[str] , snake_case_ : Any = False ): snake_case__ : List[str] = torch.floataa if fpaa else torch.floataa if fpaa and torch.cuda.is_available(): snake_case__ : List[str] = "cuda" elif fpaa and not torch.cuda.is_available(): raise ValueError("`float16` model export is only supported on GPUs with CUDA" ) else: snake_case__ : str = "cpu" snake_case__ : Any = Path(snake_case_ ) # VAE DECODER snake_case__ : Optional[int] = AutoencoderKL.from_pretrained(model_path + "/vae" ) snake_case__ : List[Any] = vae_decoder.config.latent_channels # forward only through the decoder part snake_case__ : int = vae_decoder.decode onnx_export( snake_case_ , model_args=( torch.randn(1 , snake_case_ , 25 , 25 ).to(device=snake_case_ , dtype=snake_case_ ), False, ) , output_path=output_path / "vae_decoder" / "model.onnx" , ordered_input_names=["latent_sample", "return_dict"] , output_names=["sample"] , dynamic_axes={ "latent_sample": {0: "batch", 1: "channels", 2: "height", 3: "width"}, } , opset=snake_case_ , ) del vae_decoder if __name__ == "__main__": __lowerCamelCase : Union[str, Any] = argparse.ArgumentParser() parser.add_argument( """--model_path""", type=str, required=True, help="""Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).""", ) parser.add_argument("""--output_path""", type=str, required=True, help="""Path to the output model.""") parser.add_argument( """--opset""", default=14, type=int, help="""The version of the ONNX operator set to use.""", ) parser.add_argument("""--fp16""", action="""store_true""", default=False, help="""Export the models in `float16` mode""") __lowerCamelCase : List[Any] = parser.parse_args() print(args.output_path) convert_models(args.model_path, args.output_path, args.opset, args.fpaa) print("""SD: Done: ONNX""")
704
def SCREAMING_SNAKE_CASE ( snake_case_ : str ): snake_case__ : Any = [0] * len(snake_case_ ) for i in range(1 , len(snake_case_ ) ): # use last results for better performance - dynamic programming snake_case__ : Union[str, Any] = prefix_result[i - 1] while j > 0 and input_string[i] != input_string[j]: snake_case__ : str = prefix_result[j - 1] if input_string[i] == input_string[j]: j += 1 snake_case__ : int = j return prefix_result def SCREAMING_SNAKE_CASE ( snake_case_ : str ): return max(prefix_function(snake_case_ ) ) if __name__ == "__main__": import doctest doctest.testmod()
25
0
from __future__ import annotations from random import random class SCREAMING_SNAKE_CASE__ : """simple docstring""" def __init__( self : int , __A : int | None = None ): snake_case__ : Union[str, Any] = value snake_case__ : Union[str, Any] = random() snake_case__ : Node | None = None snake_case__ : Node | None = None def __repr__( self : List[str] ): from pprint import pformat if self.left is None and self.right is None: return f'''\'{self.value}: {self.prior:.5}\'''' else: return pformat( {f'''{self.value}: {self.prior:.5}''': (self.left, self.right)} , indent=1 ) def __str__( self : Any ): snake_case__ : Optional[int] = str(self.value ) + " " snake_case__ : Optional[Any] = str(self.left or "" ) snake_case__ : Union[str, Any] = str(self.right or "" ) return value + left + right def SCREAMING_SNAKE_CASE ( snake_case_ : Node | None , snake_case_ : int ): if root is None: # None tree is split into 2 Nones return None, None elif root.value is None: return None, None else: if value < root.value: snake_case__ : Dict = split(root.left , _lowerCamelCase ) return left, root else: snake_case__ : Any = split(root.right , _lowerCamelCase ) return root, right def SCREAMING_SNAKE_CASE ( snake_case_ : Node | None , snake_case_ : Node | None ): if (not left) or (not right): # If one node is None, return the other return left or right elif left.prior < right.prior: snake_case__ : int = merge(left.right , _lowerCamelCase ) return left else: snake_case__ : List[Any] = merge(_lowerCamelCase , right.left ) return right def SCREAMING_SNAKE_CASE ( snake_case_ : Node | None , snake_case_ : int ): snake_case__ : Optional[Any] = Node(_lowerCamelCase ) snake_case__ : str = split(_lowerCamelCase , _lowerCamelCase ) return merge(merge(_lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase ) def SCREAMING_SNAKE_CASE ( snake_case_ : Node | None , snake_case_ : int ): snake_case__ : List[Any] = split(_lowerCamelCase , value - 1 ) snake_case__ : int = split(_lowerCamelCase , _lowerCamelCase ) return merge(_lowerCamelCase , _lowerCamelCase ) def SCREAMING_SNAKE_CASE ( snake_case_ : Node | None ): if not root: # None return else: inorder(root.left ) print(root.value , end="," ) inorder(root.right ) def SCREAMING_SNAKE_CASE ( snake_case_ : Node | None , snake_case_ : str ): for arg in args.split(): if arg[0] == "+": snake_case__ : Optional[int] = insert(_lowerCamelCase , int(arg[1:] ) ) elif arg[0] == "-": snake_case__ : Any = erase(_lowerCamelCase , int(arg[1:] ) ) else: print("Unknown command" ) return root def SCREAMING_SNAKE_CASE ( ): snake_case__ : Optional[int] = None print( "enter numbers to create a tree, + value to add value into treap, " "- value to erase all nodes with value. 'q' to quit. " ) snake_case__ : int = input() while args != "q": snake_case__ : Union[str, Any] = interact_treap(_lowerCamelCase , _lowerCamelCase ) print(_lowerCamelCase ) snake_case__ : Union[str, Any] = input() print("good by!" ) if __name__ == "__main__": import doctest doctest.testmod() main()
705
# Lint as: python3 import sys from collections.abc import Mapping from typing import TYPE_CHECKING, Dict, Optional import numpy as np import pyarrow as pa from .. import config from ..utils.logging import get_logger from ..utils.py_utils import map_nested from .formatting import TensorFormatter if TYPE_CHECKING: import jax import jaxlib __lowerCamelCase : Optional[int] = get_logger() __lowerCamelCase : Optional[dict] = None class SCREAMING_SNAKE_CASE__ ( TensorFormatter[Mapping, "jax.Array", Mapping] ): """simple docstring""" def __init__( self : Optional[Any] , __A : Dict=None , __A : List[str]=None , **__A : str ): super().__init__(features=__A ) import jax from jaxlib.xla_client import Device if isinstance(__A , __A ): raise ValueError( f'''Expected {device} to be a `str` not {type(__A )}, as `jaxlib.xla_extension.Device` ''' "is not serializable neither with `pickle` nor with `dill`. Instead you can surround " "the device with `str()` to get its string identifier that will be internally mapped " "to the actual `jaxlib.xla_extension.Device`." ) snake_case__ : List[Any] = device if isinstance(__A , __A ) else str(jax.devices()[0] ) # using global variable since `jaxlib.xla_extension.Device` is not serializable neither # with `pickle` nor with `dill`, so we need to use a global variable instead global DEVICE_MAPPING if DEVICE_MAPPING is None: snake_case__ : Any = self._map_devices_to_str() if self.device not in list(DEVICE_MAPPING.keys() ): logger.warning( f'''Device with string identifier {self.device} not listed among the available ''' f'''devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default ''' f'''device: {str(jax.devices()[0] )}.''' ) snake_case__ : str = str(jax.devices()[0] ) snake_case__ : str = jnp_array_kwargs @staticmethod def _lowercase ( ): import jax return {str(__A ): device for device in jax.devices()} def _lowercase ( self : Optional[Any] , __A : str ): import jax import jax.numpy as jnp if isinstance(__A , __A ) and column: if all( isinstance(__A , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ): return jnp.stack(__A , axis=0 ) return column def _lowercase ( self : int , __A : Tuple ): import jax import jax.numpy as jnp if isinstance(__A , (str, bytes, type(__A )) ): return value elif isinstance(__A , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ): return value.tolist() snake_case__ : Optional[int] = {} if isinstance(__A , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ): # the default int precision depends on the jax config # see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision if jax.config.jax_enable_xaa: snake_case__ : Any = {"dtype": jnp.intaa} else: snake_case__ : Tuple = {"dtype": jnp.intaa} elif isinstance(__A , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ): snake_case__ : str = {"dtype": jnp.floataa} elif config.PIL_AVAILABLE and "PIL" in sys.modules: import PIL.Image if isinstance(__A , PIL.Image.Image ): snake_case__ : Optional[Any] = np.asarray(__A ) # using global variable since `jaxlib.xla_extension.Device` is not serializable neither # with `pickle` nor with `dill`, so we need to use a global variable instead global DEVICE_MAPPING if DEVICE_MAPPING is None: snake_case__ : int = self._map_devices_to_str() with jax.default_device(DEVICE_MAPPING[self.device] ): # calling jnp.array on a np.ndarray does copy the data # see https://github.com/google/jax/issues/4486 return jnp.array(__A , **{**default_dtype, **self.jnp_array_kwargs} ) def _lowercase ( self : Union[str, Any] , __A : Optional[int] ): import jax # support for torch, tf, jax etc. if config.TORCH_AVAILABLE and "torch" in sys.modules: import torch if isinstance(__A , torch.Tensor ): return self._tensorize(data_struct.detach().cpu().numpy()[()] ) if hasattr(__A , "__array__" ) and not isinstance(__A , jax.Array ): snake_case__ : Union[str, Any] = data_struct.__array__() # support for nested types like struct of list of struct if isinstance(__A , np.ndarray ): if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects return self._consolidate([self.recursive_tensorize(__A ) for substruct in data_struct] ) elif isinstance(__A , (list, tuple) ): return self._consolidate([self.recursive_tensorize(__A ) for substruct in data_struct] ) return self._tensorize(__A ) def _lowercase ( self : Tuple , __A : dict ): return map_nested(self._recursive_tensorize , __A , map_list=__A ) def _lowercase ( self : Optional[int] , __A : pa.Table ): snake_case__ : int = self.numpy_arrow_extractor().extract_row(__A ) snake_case__ : Tuple = self.python_features_decoder.decode_row(__A ) return self.recursive_tensorize(__A ) def _lowercase ( self : Optional[Any] , __A : pa.Table ): snake_case__ : Any = self.numpy_arrow_extractor().extract_column(__A ) snake_case__ : Optional[int] = self.python_features_decoder.decode_column(__A , pa_table.column_names[0] ) snake_case__ : List[Any] = self.recursive_tensorize(__A ) snake_case__ : Dict = self._consolidate(__A ) return column def _lowercase ( self : str , __A : pa.Table ): snake_case__ : Any = self.numpy_arrow_extractor().extract_batch(__A ) snake_case__ : int = self.python_features_decoder.decode_batch(__A ) snake_case__ : List[Any] = self.recursive_tensorize(__A ) for column_name in batch: snake_case__ : Any = self._consolidate(batch[column_name] ) return batch
25
0
import argparse import struct import unittest class SCREAMING_SNAKE_CASE__ : """simple docstring""" def __init__( self : List[Any] , __A : bytes ): snake_case__ : Dict = data # Initialize hash values snake_case__ : Union[str, Any] = [ 0x6A09E667, 0xBB67AE85, 0x3C6EF372, 0xA54FF53A, 0x510E527F, 0x9B05688C, 0x1F83D9AB, 0x5BE0CD19, ] # Initialize round constants snake_case__ : List[Any] = [ 0x428A2F98, 0x71374491, 0xB5C0FBCF, 0xE9B5DBA5, 0x3956C25B, 0x59F111F1, 0x923F82A4, 0xAB1C5ED5, 0xD807AA98, 0x12835B01, 0x243185BE, 0x550C7DC3, 0x72BE5D74, 0x80DEB1FE, 0x9BDC06A7, 0xC19BF174, 0xE49B69C1, 0xEFBE4786, 0x0FC19DC6, 0x240CA1CC, 0x2DE92C6F, 0x4A7484AA, 0x5CB0A9DC, 0x76F988DA, 0x983E5152, 0xA831C66D, 0xB00327C8, 0xBF597FC7, 0xC6E00BF3, 0xD5A79147, 0x06CA6351, 0x14292967, 0x27B70A85, 0x2E1B2138, 0x4D2C6DFC, 0x53380D13, 0x650A7354, 0x766A0ABB, 0x81C2C92E, 0x92722C85, 0xA2BFE8A1, 0xA81A664B, 0xC24B8B70, 0xC76C51A3, 0xD192E819, 0xD6990624, 0xF40E3585, 0x106AA070, 0x19A4C116, 0x1E376C08, 0x2748774C, 0x34B0BCB5, 0x391C0CB3, 0x4ED8AA4A, 0x5B9CCA4F, 0x682E6FF3, 0x748F82EE, 0x78A5636F, 0x84C87814, 0x8CC70208, 0x90BEFFFA, 0xA4506CEB, 0xBEF9A3F7, 0xC67178F2, ] snake_case__ : List[str] = self.preprocessing(self.data ) self.final_hash() @staticmethod def _lowercase ( __A : bytes ): snake_case__ : str = B"\x80" + (B"\x00" * (6_3 - (len(__UpperCamelCase ) + 8) % 6_4)) snake_case__ : Optional[int] = struct.pack(">Q" , (len(__UpperCamelCase ) * 8) ) return data + padding + big_endian_integer def _lowercase ( self : Tuple ): # Convert into blocks of 64 bytes snake_case__ : List[Any] = [ self.preprocessed_data[x : x + 6_4] for x in range(0 , len(self.preprocessed_data ) , 6_4 ) ] for block in self.blocks: # Convert the given block into a list of 4 byte integers snake_case__ : Dict = list(struct.unpack(">16L" , __UpperCamelCase ) ) # add 48 0-ed integers words += [0] * 4_8 snake_case__, snake_case__, snake_case__, snake_case__, snake_case__, snake_case__, snake_case__, snake_case__ : str = self.hashes for index in range(0 , 6_4 ): if index > 1_5: # modify the zero-ed indexes at the end of the array snake_case__ : Tuple = ( self.ror(words[index - 1_5] , 7 ) ^ self.ror(words[index - 1_5] , 1_8 ) ^ (words[index - 1_5] >> 3) ) snake_case__ : Union[str, Any] = ( self.ror(words[index - 2] , 1_7 ) ^ self.ror(words[index - 2] , 1_9 ) ^ (words[index - 2] >> 1_0) ) snake_case__ : int = ( words[index - 1_6] + sa + words[index - 7] + sa ) % 0x100000000 # Compression snake_case__ : Union[str, Any] = self.ror(__UpperCamelCase , 6 ) ^ self.ror(__UpperCamelCase , 1_1 ) ^ self.ror(__UpperCamelCase , 2_5 ) snake_case__ : int = (e & f) ^ ((~e & 0xFFFFFFFF) & g) snake_case__ : Dict = ( h + sa + ch + self.round_constants[index] + words[index] ) % 0x100000000 snake_case__ : str = self.ror(__UpperCamelCase , 2 ) ^ self.ror(__UpperCamelCase , 1_3 ) ^ self.ror(__UpperCamelCase , 2_2 ) snake_case__ : Tuple = (a & b) ^ (a & c) ^ (b & c) snake_case__ : Optional[Any] = (sa + maj) % 0x100000000 snake_case__, snake_case__, snake_case__, snake_case__, snake_case__, snake_case__, snake_case__, snake_case__ : Optional[int] = ( g, f, e, ((d + tempa) % 0x100000000), c, b, a, ((tempa + tempa) % 0x100000000), ) snake_case__ : Tuple = [a, b, c, d, e, f, g, h] # Modify final values snake_case__ : List[Any] = [ ((element + mutated_hash_values[index]) % 0x100000000) for index, element in enumerate(self.hashes ) ] snake_case__ : str = "".join([hex(__UpperCamelCase )[2:].zfill(8 ) for value in self.hashes] ) def _lowercase ( self : str , __A : int , __A : int ): return 0xFFFFFFFF & (value << (3_2 - rotations)) | (value >> rotations) class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): """simple docstring""" def _lowercase ( self : Any ): import hashlib snake_case__ : int = bytes("Test String" , "utf-8" ) self.assertEqual(SHAaaa(__UpperCamelCase ).hash , hashlib.shaaaa(__UpperCamelCase ).hexdigest() ) def SCREAMING_SNAKE_CASE ( ): import doctest doctest.testmod() snake_case__ : Any = argparse.ArgumentParser() parser.add_argument( "-s" , "--string" , dest="input_string" , default="Hello World!! Welcome to Cryptography" , help="Hash the string" , ) parser.add_argument( "-f" , "--file" , dest="input_file" , help="Hash contents of a file" ) snake_case__ : Optional[int] = parser.parse_args() snake_case__ : str = args.input_string # hash input should be a bytestring if args.input_file: with open(args.input_file , "rb" ) as f: snake_case__ : List[str] = f.read() else: snake_case__ : List[str] = bytes(_SCREAMING_SNAKE_CASE , "utf-8" ) print(SHAaaa(_SCREAMING_SNAKE_CASE ).hash ) if __name__ == "__main__": main()
706
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) __lowerCamelCase : Tuple = { """configuration_roberta_prelayernorm""": [ """ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RobertaPreLayerNormConfig""", """RobertaPreLayerNormOnnxConfig""", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : Tuple = [ """ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST""", """RobertaPreLayerNormForCausalLM""", """RobertaPreLayerNormForMaskedLM""", """RobertaPreLayerNormForMultipleChoice""", """RobertaPreLayerNormForQuestionAnswering""", """RobertaPreLayerNormForSequenceClassification""", """RobertaPreLayerNormForTokenClassification""", """RobertaPreLayerNormModel""", """RobertaPreLayerNormPreTrainedModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : Union[str, Any] = [ """TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFRobertaPreLayerNormForCausalLM""", """TFRobertaPreLayerNormForMaskedLM""", """TFRobertaPreLayerNormForMultipleChoice""", """TFRobertaPreLayerNormForQuestionAnswering""", """TFRobertaPreLayerNormForSequenceClassification""", """TFRobertaPreLayerNormForTokenClassification""", """TFRobertaPreLayerNormMainLayer""", """TFRobertaPreLayerNormModel""", """TFRobertaPreLayerNormPreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : List[Any] = [ """FlaxRobertaPreLayerNormForCausalLM""", """FlaxRobertaPreLayerNormForMaskedLM""", """FlaxRobertaPreLayerNormForMultipleChoice""", """FlaxRobertaPreLayerNormForQuestionAnswering""", """FlaxRobertaPreLayerNormForSequenceClassification""", """FlaxRobertaPreLayerNormForTokenClassification""", """FlaxRobertaPreLayerNormModel""", """FlaxRobertaPreLayerNormPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_roberta_prelayernorm import ( ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaPreLayerNormConfig, RobertaPreLayerNormOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roberta_prelayernorm import ( ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST, RobertaPreLayerNormForCausalLM, RobertaPreLayerNormForMaskedLM, RobertaPreLayerNormForMultipleChoice, RobertaPreLayerNormForQuestionAnswering, RobertaPreLayerNormForSequenceClassification, RobertaPreLayerNormForTokenClassification, RobertaPreLayerNormModel, RobertaPreLayerNormPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_roberta_prelayernorm import ( TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST, TFRobertaPreLayerNormForCausalLM, TFRobertaPreLayerNormForMaskedLM, TFRobertaPreLayerNormForMultipleChoice, TFRobertaPreLayerNormForQuestionAnswering, TFRobertaPreLayerNormForSequenceClassification, TFRobertaPreLayerNormForTokenClassification, TFRobertaPreLayerNormMainLayer, TFRobertaPreLayerNormModel, TFRobertaPreLayerNormPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_roberta_prelayernorm import ( FlaxRobertaPreLayerNormForCausalLM, FlaxRobertaPreLayerNormForMaskedLM, FlaxRobertaPreLayerNormForMultipleChoice, FlaxRobertaPreLayerNormForQuestionAnswering, FlaxRobertaPreLayerNormForSequenceClassification, FlaxRobertaPreLayerNormForTokenClassification, FlaxRobertaPreLayerNormModel, FlaxRobertaPreLayerNormPreTrainedModel, ) else: import sys __lowerCamelCase : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
25
0
import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( WavaVecaConfig, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaForCTC, WavaVecaForPreTraining, WavaVecaProcessor, logging, ) from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification logging.set_verbosity_info() __lowerCamelCase : Optional[int] = logging.get_logger(__name__) __lowerCamelCase : int = { """post_extract_proj""": """feature_projection.projection""", """encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""", """self_attn.k_proj""": """encoder.layers.*.attention.k_proj""", """self_attn.v_proj""": """encoder.layers.*.attention.v_proj""", """self_attn.q_proj""": """encoder.layers.*.attention.q_proj""", """self_attn.out_proj""": """encoder.layers.*.attention.out_proj""", """self_attn_layer_norm""": """encoder.layers.*.layer_norm""", """fc1""": """encoder.layers.*.feed_forward.intermediate_dense""", """fc2""": """encoder.layers.*.feed_forward.output_dense""", """final_layer_norm""": """encoder.layers.*.final_layer_norm""", """encoder.layer_norm""": """encoder.layer_norm""", """adapter_layer""": """encoder.layers.*.adapter_layer""", """w2v_model.layer_norm""": """feature_projection.layer_norm""", """quantizer.weight_proj""": """quantizer.weight_proj""", """quantizer.vars""": """quantizer.codevectors""", """project_q""": """project_q""", """final_proj""": """project_hid""", """w2v_encoder.proj""": """lm_head""", """mask_emb""": """masked_spec_embed""", """pooling_layer.linear""": """projector""", """pooling_layer.projection""": """classifier""", } __lowerCamelCase : str = [ """lm_head""", """quantizer.weight_proj""", """quantizer.codevectors""", """project_q""", """project_hid""", """projector""", """classifier""", ] def SCREAMING_SNAKE_CASE ( snake_case_ : Optional[int] ): snake_case__ : Any = {} with open(snake_case_ , "r" ) as file: for line_number, line in enumerate(snake_case_ ): snake_case__ : List[str] = line.strip() if line: snake_case__ : List[str] = line.split() snake_case__ : Optional[int] = line_number snake_case__ : Union[str, Any] = words[0] snake_case__ : Optional[int] = value return result def SCREAMING_SNAKE_CASE ( snake_case_ : Any , snake_case_ : Tuple , snake_case_ : Any , snake_case_ : Optional[int] , snake_case_ : List[Any] ): for attribute in key.split("." ): snake_case__ : Any = getattr(snake_case_ , snake_case_ ) snake_case__ : Optional[int] = None for param_key in PARAM_MAPPING.keys(): if full_name.endswith(snake_case_ ): snake_case__ : List[str] = PARAM_MAPPING[full_name.split("." )[-1]] snake_case__ : int = """param""" if weight_type is not None and weight_type != "param": snake_case__ : Union[str, Any] = getattr(snake_case_ , snake_case_ ).shape elif weight_type is not None and weight_type == "param": snake_case__ : Optional[int] = hf_pointer for attribute in hf_param_name.split("." ): snake_case__ : Optional[Any] = getattr(snake_case_ , snake_case_ ) snake_case__ : List[Any] = shape_pointer.shape # let's reduce dimension snake_case__ : Union[str, Any] = value[0] else: snake_case__ : List[str] = hf_pointer.shape if hf_shape != value.shape: raise ValueError( F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be''' F''' {value.shape} for {full_name}''' ) if weight_type == "weight": snake_case__ : List[str] = value elif weight_type == "weight_g": snake_case__ : Any = value elif weight_type == "weight_v": snake_case__ : Optional[Any] = value elif weight_type == "bias": snake_case__ : Any = value elif weight_type == "param": for attribute in hf_param_name.split("." ): snake_case__ : int = getattr(snake_case_ , snake_case_ ) snake_case__ : int = value else: snake_case__ : Optional[int] = value logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' ) def SCREAMING_SNAKE_CASE ( snake_case_ : Optional[Any] , snake_case_ : Dict , snake_case_ : List[Any] , snake_case_ : Optional[Any] , snake_case_ : Optional[int] ): snake_case__ : int = None for param_key in PARAM_MAPPING.keys(): if full_name.endswith(snake_case_ ): snake_case__ : List[str] = PARAM_MAPPING[full_name.split("." )[-1]] snake_case__ : Tuple = """param""" if weight_type is not None and weight_type != "param": snake_case__ : Dict = """.""".join([key, weight_type] ) elif weight_type is not None and weight_type == "param": snake_case__ : Tuple = """.""".join([key, hf_param_name] ) else: snake_case__ : Dict = key snake_case__ : int = value if """lm_head""" in full_key else value[0] __lowerCamelCase : Optional[int] = { """W_a""": """linear_1.weight""", """W_b""": """linear_2.weight""", """b_a""": """linear_1.bias""", """b_b""": """linear_2.bias""", """ln_W""": """norm.weight""", """ln_b""": """norm.bias""", } def SCREAMING_SNAKE_CASE ( snake_case_ : Optional[int] , snake_case_ : Tuple , snake_case_ : Optional[Any]=None , snake_case_ : Any=None ): snake_case__ : str = False for key, mapped_key in MAPPING.items(): snake_case__ : Tuple = """wav2vec2.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]: snake_case__ : List[Any] = True if "*" in mapped_key: snake_case__ : int = name.split(snake_case_ )[0].split("." )[-2] snake_case__ : Tuple = mapped_key.replace("*" , snake_case_ ) if "weight_g" in name: snake_case__ : Optional[int] = """weight_g""" elif "weight_v" in name: snake_case__ : List[Any] = """weight_v""" elif "bias" in name: snake_case__ : List[str] = """bias""" elif "weight" in name: # TODO: don't match quantizer.weight_proj snake_case__ : List[str] = """weight""" else: snake_case__ : Optional[int] = None if hf_dict is not None: rename_dict(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) else: set_recursively(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) return is_used return is_used def SCREAMING_SNAKE_CASE ( snake_case_ : Optional[int] , snake_case_ : Optional[int] , snake_case_ : Tuple ): snake_case__ : Union[str, Any] = [] snake_case__ : Dict = fairseq_model.state_dict() snake_case__ : List[Any] = hf_model.wavaveca.feature_extractor for name, value in fairseq_dict.items(): snake_case__ : Dict = False if "conv_layers" in name: load_conv_layer( snake_case_ , snake_case_ , snake_case_ , snake_case_ , hf_model.config.feat_extract_norm == "group" , ) snake_case__ : Dict = True else: snake_case__ : int = load_wavaveca_layer(snake_case_ , snake_case_ , snake_case_ ) if not is_used: unused_weights.append(snake_case_ ) logger.warning(F'''Unused weights: {unused_weights}''' ) def SCREAMING_SNAKE_CASE ( snake_case_ : Dict , snake_case_ : int , snake_case_ : str , snake_case_ : Tuple , snake_case_ : Union[str, Any] ): snake_case__ : Any = full_name.split("conv_layers." )[-1] snake_case__ : List[str] = name.split("." ) snake_case__ : Optional[Any] = int(items[0] ) snake_case__ : Dict = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' ) snake_case__ : Optional[int] = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' ) snake_case__ : List[str] = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' ) snake_case__ : str = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' ) snake_case__ : Tuple = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) else: unused_weights.append(snake_case_ ) @torch.no_grad() def SCREAMING_SNAKE_CASE ( snake_case_ : Optional[Any] , snake_case_ : str , snake_case_ : List[Any]=None , snake_case_ : List[Any]=None , snake_case_ : Any=True , snake_case_ : Optional[int]=False ): if config_path is not None: snake_case__ : Optional[Any] = WavaVecaConfig.from_pretrained(snake_case_ ) else: snake_case__ : Any = WavaVecaConfig() if is_seq_class: snake_case__ : int = read_txt_into_dict(snake_case_ ) snake_case__ : List[str] = idalabel snake_case__ : Union[str, Any] = WavaVecaForSequenceClassification(snake_case_ ) snake_case__ : Optional[Any] = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=snake_case_ , return_attention_mask=snake_case_ , ) feature_extractor.save_pretrained(snake_case_ ) elif is_finetuned: if dict_path: snake_case__ : List[Any] = Dictionary.load(snake_case_ ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq snake_case__ : Optional[Any] = target_dict.pad_index snake_case__ : Any = target_dict.bos_index snake_case__ : Any = target_dict.eos_index snake_case__ : str = len(target_dict.symbols ) snake_case__ : Tuple = os.path.join(snake_case_ , "vocab.json" ) if not os.path.isdir(snake_case_ ): logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(snake_case_ ) ) return os.makedirs(snake_case_ , exist_ok=snake_case_ ) snake_case__ : Optional[int] = target_dict.indices # fairseq has the <pad> and <s> switched snake_case__ : List[str] = 0 snake_case__ : List[str] = 1 with open(snake_case_ , "w" , encoding="utf-8" ) as vocab_handle: json.dump(snake_case_ , snake_case_ ) snake_case__ : Optional[int] = WavaVecaCTCTokenizer( snake_case_ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=snake_case_ , ) snake_case__ : Optional[int] = True if config.feat_extract_norm == """layer""" else False snake_case__ : Optional[int] = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=snake_case_ , return_attention_mask=snake_case_ , ) snake_case__ : Optional[int] = WavaVecaProcessor(feature_extractor=snake_case_ , tokenizer=snake_case_ ) processor.save_pretrained(snake_case_ ) snake_case__ : List[str] = WavaVecaForCTC(snake_case_ ) else: snake_case__ : Optional[Any] = WavaVecaForPreTraining(snake_case_ ) if is_finetuned or is_seq_class: snake_case__ : Optional[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} ) else: snake_case__ : int = argparse.Namespace(task="audio_pretraining" ) snake_case__ : Tuple = fairseq.tasks.setup_task(snake_case_ ) snake_case__ : Any = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=snake_case_ ) snake_case__ : Any = model[0].eval() recursively_load_weights(snake_case_ , snake_case_ , not is_finetuned ) hf_wavavec.save_pretrained(snake_case_ ) if __name__ == "__main__": __lowerCamelCase : str = argparse.ArgumentParser() parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""") parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") parser.add_argument( """--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not""" ) parser.add_argument( """--is_seq_class""", action="""store_true""", help="""Whether the model to convert is a fine-tuned sequence classification model or not""", ) __lowerCamelCase : Dict = parser.parse_args() __lowerCamelCase : List[str] = not args.not_finetuned and not args.is_seq_class convert_wavaveca_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, is_finetuned, args.is_seq_class, )
707
import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from transformers.activations import gelu_new, gelu_python, get_activation @require_torch class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): """simple docstring""" def _lowercase ( self : Tuple ): snake_case__ : List[str] = torch.tensor([-1_0_0, -1, -0.1, 0, 0.1, 1.0, 1_0_0] ) snake_case__ : Tuple = get_activation("gelu" ) self.assertTrue(torch.allclose(gelu_python(__A ) , torch_builtin(__A ) ) ) self.assertFalse(torch.allclose(gelu_python(__A ) , gelu_new(__A ) ) ) def _lowercase ( self : Dict ): snake_case__ : str = torch.tensor([-1_0_0, -1, -0.1, 0, 0.1, 1.0, 1_0_0] ) snake_case__ : Union[str, Any] = get_activation("gelu" ) snake_case__ : int = get_activation("gelu_10" ) snake_case__ : Optional[int] = torch_builtin(__A ) snake_case__ : Dict = geluaa(__A ) snake_case__ : Optional[Any] = torch.where(y_gelu_aa < 1_0.0 , 1 , 0 ) self.assertTrue(torch.max(__A ).item() == 1_0.0 ) self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) ) def _lowercase ( self : str ): get_activation("gelu" ) get_activation("gelu_10" ) get_activation("gelu_fast" ) get_activation("gelu_new" ) get_activation("gelu_python" ) get_activation("gelu_pytorch_tanh" ) get_activation("linear" ) get_activation("mish" ) get_activation("quick_gelu" ) get_activation("relu" ) get_activation("sigmoid" ) get_activation("silu" ) get_activation("swish" ) get_activation("tanh" ) with self.assertRaises(__A ): get_activation("bogus" ) with self.assertRaises(__A ): get_activation(__A ) def _lowercase ( self : List[str] ): snake_case__ : List[str] = get_activation("gelu" ) snake_case__ : Any = 1 snake_case__ : Union[str, Any] = get_activation("gelu" ) self.assertEqual(acta.a , 1 ) with self.assertRaises(__A ): snake_case__ : int = acta.a
25
0
from __future__ import annotations def SCREAMING_SNAKE_CASE ( snake_case_ : int , snake_case_ : int ): if partitions <= 0: raise ValueError("partitions must be a positive number!" ) if partitions > number_of_bytes: raise ValueError("partitions can not > number_of_bytes!" ) snake_case__ : Tuple = number_of_bytes // partitions snake_case__ : Any = [] for i in range(lowerCAmelCase__ ): snake_case__ : List[str] = i * bytes_per_partition + 1 snake_case__ : Optional[int] = ( number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition ) allocation_list.append(F'''{start_bytes}-{end_bytes}''' ) return allocation_list if __name__ == "__main__": import doctest doctest.testmod()
708
import argparse import fairseq import torch from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging logging.set_verbosity_info() __lowerCamelCase : int = logging.get_logger(__name__) __lowerCamelCase : int = { """post_extract_proj""": """feature_projection.projection""", """encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""", """self_attn.k_proj""": """encoder.layers.*.attention.k_proj""", """self_attn.v_proj""": """encoder.layers.*.attention.v_proj""", """self_attn.q_proj""": """encoder.layers.*.attention.q_proj""", """self_attn.out_proj""": """encoder.layers.*.attention.out_proj""", """self_attn_layer_norm""": """encoder.layers.*.layer_norm""", """fc1""": """encoder.layers.*.feed_forward.intermediate_dense""", """fc2""": """encoder.layers.*.feed_forward.output_dense""", """final_layer_norm""": """encoder.layers.*.final_layer_norm""", """encoder.layer_norm""": """encoder.layer_norm""", """encoder.layer_norm_for_extract""": """layer_norm_for_extract""", """w2v_model.layer_norm""": """feature_projection.layer_norm""", """quantizer.weight_proj""": """quantizer.weight_proj""", """quantizer.vars""": """quantizer.codevectors""", """project_q""": """project_q""", """final_proj""": """project_hid""", """w2v_encoder.proj""": """lm_head""", """label_embs_concat""": """label_embeddings_concat""", """mask_emb""": """masked_spec_embed""", """spk_proj""": """speaker_proj""", } __lowerCamelCase : Tuple = [ """lm_head""", """quantizer.weight_proj""", """quantizer.codevectors""", """project_q""", """project_hid""", """label_embeddings_concat""", """speaker_proj""", """layer_norm_for_extract""", ] def SCREAMING_SNAKE_CASE ( snake_case_ : Tuple , snake_case_ : Union[str, Any] , snake_case_ : Union[str, Any] , snake_case_ : Any , snake_case_ : Union[str, Any] ): for attribute in key.split("." ): snake_case__ : int = getattr(snake_case_ , snake_case_ ) if weight_type is not None: snake_case__ : Optional[Any] = getattr(snake_case_ , snake_case_ ).shape else: snake_case__ : List[str] = hf_pointer.shape if hf_shape != value.shape: raise ValueError( F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be''' F''' {value.shape} for {full_name}''' ) if weight_type == "weight": snake_case__ : str = value elif weight_type == "weight_g": snake_case__ : Union[str, Any] = value elif weight_type == "weight_v": snake_case__ : Optional[Any] = value elif weight_type == "bias": snake_case__ : str = value else: snake_case__ : Union[str, Any] = value logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' ) def SCREAMING_SNAKE_CASE ( snake_case_ : Any , snake_case_ : Union[str, Any] ): snake_case__ : str = [] snake_case__ : Optional[int] = fairseq_model.state_dict() snake_case__ : int = hf_model.unispeech_sat.feature_extractor for name, value in fairseq_dict.items(): snake_case__ : Dict = False if "conv_layers" in name: load_conv_layer( snake_case_ , snake_case_ , snake_case_ , snake_case_ , hf_model.config.feat_extract_norm == "group" , ) snake_case__ : str = True else: for key, mapped_key in MAPPING.items(): snake_case__ : Optional[int] = "unispeech_sat." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]: if "layer_norm_for_extract" in name and (".".join(name.split("." )[:-1] ) != key): # special case since naming is very similar continue snake_case__ : int = True if "*" in mapped_key: snake_case__ : Any = name.split(snake_case_ )[0].split("." )[-2] snake_case__ : Any = mapped_key.replace("*" , snake_case_ ) if "weight_g" in name: snake_case__ : List[Any] = "weight_g" elif "weight_v" in name: snake_case__ : Optional[Any] = "weight_v" elif "bias" in name: snake_case__ : Optional[Any] = "bias" elif "weight" in name: # TODO: don't match quantizer.weight_proj snake_case__ : Optional[Any] = "weight" else: snake_case__ : Optional[Any] = None set_recursively(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) continue if not is_used: unused_weights.append(snake_case_ ) logger.warning(F'''Unused weights: {unused_weights}''' ) def SCREAMING_SNAKE_CASE ( snake_case_ : Any , snake_case_ : List[str] , snake_case_ : List[Any] , snake_case_ : Optional[Any] , snake_case_ : str ): snake_case__ : Tuple = full_name.split("conv_layers." )[-1] snake_case__ : Union[str, Any] = name.split("." ) snake_case__ : str = int(items[0] ) snake_case__ : str = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' ) snake_case__ : Any = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' ) snake_case__ : Any = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.''' ) snake_case__ : Optional[Any] = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''' ) snake_case__ : int = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) else: unused_weights.append(snake_case_ ) @torch.no_grad() def SCREAMING_SNAKE_CASE ( snake_case_ : int , snake_case_ : Any , snake_case_ : Optional[int]=None , snake_case_ : Optional[int]=None , snake_case_ : Any=True ): if config_path is not None: snake_case__ : Tuple = UniSpeechSatConfig.from_pretrained(snake_case_ ) else: snake_case__ : Tuple = UniSpeechSatConfig() snake_case__ : str = "" if is_finetuned: snake_case__ : Tuple = UniSpeechSatForCTC(snake_case_ ) else: snake_case__ : Any = UniSpeechSatForPreTraining(snake_case_ ) snake_case__, snake_case__, snake_case__ : Any = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} ) snake_case__ : Tuple = model[0].eval() recursively_load_weights(snake_case_ , snake_case_ ) hf_wavavec.save_pretrained(snake_case_ ) if __name__ == "__main__": __lowerCamelCase : int = argparse.ArgumentParser() parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""") parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") parser.add_argument( """--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not""" ) __lowerCamelCase : List[Any] = parser.parse_args() convert_unispeech_sat_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
25
0
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration __lowerCamelCase : Any = """facebook/wmt19-en-de""" __lowerCamelCase : Optional[Any] = FSMTTokenizer.from_pretrained(mname) # get the correct vocab sizes, etc. from the master model __lowerCamelCase : Optional[Any] = FSMTConfig.from_pretrained(mname) config.update( dict( d_model=4, encoder_layers=1, decoder_layers=1, encoder_ffn_dim=4, decoder_ffn_dim=4, encoder_attention_heads=1, decoder_attention_heads=1, ) ) __lowerCamelCase : List[str] = FSMTForConditionalGeneration(config) print(f"num of params {tiny_model.num_parameters()}") # Test __lowerCamelCase : Tuple = tokenizer(["""Making tiny model"""], return_tensors="""pt""") __lowerCamelCase : Union[str, Any] = tiny_model(**batch) print("""test output:""", len(outputs.logits[0])) # Save __lowerCamelCase : Dict = """tiny-wmt19-en-de""" tiny_model.half() # makes it smaller tiny_model.save_pretrained(mname_tiny) tokenizer.save_pretrained(mname_tiny) print(f"Generated {mname_tiny}") # Upload # transformers-cli upload tiny-wmt19-en-de
709
import copy import tempfile import unittest from transformers import MaMaaaConfig, is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from transformers.utils import cached_property from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaTokenizer from transformers.models.mam_aaa.modeling_mam_aaa import MaMaaaDecoder, MaMaaaEncoder def SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : Dict , snake_case_ : List[Any] , snake_case_ : Dict=None , snake_case_ : Tuple=None , snake_case_ : List[str]=None , snake_case_ : List[str]=None , snake_case_ : List[str]=None , ): if attention_mask is None: snake_case__ : Any = input_ids.ne(config.pad_token_id ) if decoder_attention_mask is None: snake_case__ : List[Any] = decoder_input_ids.ne(config.pad_token_id ) if head_mask is None: snake_case__ : str = torch.ones(config.encoder_layers , config.encoder_attention_heads , device=snake_case_ ) if decoder_head_mask is None: snake_case__ : Optional[int] = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=snake_case_ ) if cross_attn_head_mask is None: snake_case__ : Union[str, Any] = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=snake_case_ ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } class SCREAMING_SNAKE_CASE__ : """simple docstring""" def __init__( self : List[str] , __A : Any , __A : List[str]=1_3 , __A : List[Any]=7 , __A : Union[str, Any]=True , __A : Union[str, Any]=False , __A : str=9_9 , __A : Optional[Any]=1_6 , __A : Optional[Any]=2 , __A : Any=4 , __A : List[Any]=4 , __A : int="relu" , __A : Optional[int]=0.1 , __A : Tuple=0.1 , __A : Optional[int]=0.0 , __A : Optional[Any]=0.0 , __A : List[Any]=2_0 , __A : Optional[Any]=2 , __A : int=1 , __A : Union[str, Any]=0 , ): snake_case__ : Optional[Any] = parent snake_case__ : List[str] = batch_size snake_case__ : Union[str, Any] = seq_length snake_case__ : Optional[Any] = is_training snake_case__ : List[str] = use_labels snake_case__ : Tuple = vocab_size snake_case__ : Optional[Any] = hidden_size snake_case__ : Union[str, Any] = num_hidden_layers snake_case__ : List[Any] = num_attention_heads snake_case__ : Tuple = intermediate_size snake_case__ : str = hidden_act snake_case__ : Optional[Any] = hidden_dropout_prob snake_case__ : int = attention_probs_dropout_prob snake_case__ : int = encoder_layerdrop snake_case__ : Tuple = decoder_layerdrop snake_case__ : List[str] = max_position_embeddings snake_case__ : Tuple = eos_token_id snake_case__ : Dict = pad_token_id snake_case__ : str = bos_token_id def _lowercase ( self : Tuple ): snake_case__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) snake_case__ : Union[str, Any] = self.eos_token_id # Eos Token snake_case__ : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) # we need to clamp the input ids here to avoid having pad token in between # this is because for M2M100 the position_ids are prepared such that # all pad tokens have pos id = 2 and rest are between 2..seq_length # and the seq_length here is seq_length - num_pad_tokens # but when using past, there is no way of knowing if the past input ids had # pad tokens in them, which results in incorrect seq_lenth and which in turn results in # position_ids being off by num_pad_tokens in past input snake_case__ : int = input_ids.clamp(self.pad_token_id + 1 ) snake_case__ : Optional[Any] = decoder_input_ids.clamp(self.pad_token_id + 1 ) snake_case__ : Union[str, Any] = self.get_config() snake_case__ : Union[str, Any] = prepare_mam_aaa_inputs_dict(__A , __A , __A ) return config, inputs_dict def _lowercase ( self : Dict ): return MaMaaaConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , encoder_layerdrop=self.encoder_layerdrop , decoder_layerdrop=self.decoder_layerdrop , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , ) def _lowercase ( self : List[str] ): snake_case__, snake_case__ : Any = self.prepare_config_and_inputs() return config, inputs_dict def _lowercase ( self : Optional[Any] , __A : int , __A : Dict ): snake_case__ : Union[str, Any] = MaMaaaModel(config=__A ).get_decoder().to(__A ).eval() snake_case__ : List[Any] = inputs_dict["input_ids"] snake_case__ : Optional[Any] = inputs_dict["attention_mask"] snake_case__ : Union[str, Any] = inputs_dict["head_mask"] # first forward pass snake_case__ : Dict = model(__A , attention_mask=__A , head_mask=__A , use_cache=__A ) snake_case__, snake_case__ : Dict = outputs.to_tuple() # create hypothetical multiple next token and extent to next_input_ids snake_case__ : int = ids_tensor((self.batch_size, 3) , config.vocab_size ) snake_case__ : List[str] = ids_tensor((self.batch_size, 3) , 2 ) # append to next input_ids and snake_case__ : Union[str, Any] = torch.cat([input_ids, next_tokens] , dim=-1 ) snake_case__ : List[Any] = torch.cat([attention_mask, next_attn_mask] , dim=-1 ) snake_case__ : Tuple = model(__A , attention_mask=__A )["last_hidden_state"] snake_case__ : Tuple = model(__A , attention_mask=__A , past_key_values=__A )[ "last_hidden_state" ] # select random slice snake_case__ : Optional[Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item() snake_case__ : Optional[Any] = output_from_no_past[:, -3:, random_slice_idx].detach() snake_case__ : Any = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(__A , __A , atol=1e-2 ) ) def _lowercase ( self : str , __A : Dict , __A : Optional[Any] ): snake_case__ : Union[str, Any] = MaMaaaModel(config=__A ).to(__A ).eval() snake_case__ : Union[str, Any] = model(**__A ) snake_case__ : Tuple = outputs.encoder_last_hidden_state snake_case__ : Union[str, Any] = outputs.last_hidden_state with tempfile.TemporaryDirectory() as tmpdirname: snake_case__ : Dict = model.get_encoder() encoder.save_pretrained(__A ) snake_case__ : Any = MaMaaaEncoder.from_pretrained(__A ).to(__A ) snake_case__ : List[str] = encoder(inputs_dict["input_ids"] , attention_mask=inputs_dict["attention_mask"] )[ 0 ] self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 ) with tempfile.TemporaryDirectory() as tmpdirname: snake_case__ : Dict = model.get_decoder() decoder.save_pretrained(__A ) snake_case__ : Optional[Any] = MaMaaaDecoder.from_pretrained(__A ).to(__A ) snake_case__ : List[str] = decoder( input_ids=inputs_dict["decoder_input_ids"] , attention_mask=inputs_dict["decoder_attention_mask"] , encoder_hidden_states=__A , encoder_attention_mask=inputs_dict["attention_mask"] , )[0] self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 ) @require_torch class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ): """simple docstring""" a_ = ( ( MaMaaaModel, MaMaaaForConditionalGeneration, ) if is_torch_available() else () ) a_ = (MaMaaaForConditionalGeneration,) if is_torch_available() else () a_ = ( { "conversational": MaMaaaForConditionalGeneration, "feature-extraction": MaMaaaModel, "summarization": MaMaaaForConditionalGeneration, "text2text-generation": MaMaaaForConditionalGeneration, "translation": MaMaaaForConditionalGeneration, } if is_torch_available() else {} ) a_ = True a_ = True a_ = False a_ = False def _lowercase ( self : int , __A : Tuple , __A : Any , __A : Optional[Any] , __A : Optional[Any] , __A : Union[str, Any] ): if pipeline_test_casse_name == "TranslationPipelineTests": # Get `ValueError: Translation requires a `src_lang` and a `tgt_lang` for this model`. # `M2M100Config` was never used in pipeline tests: cannot create a simple tokenizer. return True return False def _lowercase ( self : Tuple ): snake_case__ : Any = MaMaaaModelTester(self ) snake_case__ : Dict = ConfigTester(self , config_class=__A ) def _lowercase ( self : Optional[Any] ): self.config_tester.run_common_tests() def _lowercase ( self : Union[str, Any] ): snake_case__, snake_case__ : int = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: snake_case__ : int = model_class(__A ) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(__A ) snake_case__, snake_case__ : Optional[int] = model_class.from_pretrained(__A , output_loading_info=__A ) self.assertEqual(info["missing_keys"] , [] ) def _lowercase ( self : Dict ): snake_case__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past_large_inputs(*__A ) def _lowercase ( self : Any ): snake_case__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_encoder_decoder_model_standalone(*__A ) def _lowercase ( self : Union[str, Any] ): snake_case__, snake_case__ : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in (MaMaaaModel, MaMaaaForConditionalGeneration): snake_case__ : str = model_class(__A ) model.to(__A ) model.eval() snake_case__ : str = copy.deepcopy(self._prepare_for_class(__A , __A ) ) if not self.is_encoder_decoder: snake_case__ : Optional[Any] = inputs["input_ids"] del inputs["input_ids"] else: snake_case__ : Union[str, Any] = inputs["input_ids"] snake_case__ : List[str] = inputs.get("decoder_input_ids" , __A ) del inputs["input_ids"] inputs.pop("decoder_input_ids" , __A ) snake_case__ : Tuple = model.get_input_embeddings() if not self.is_encoder_decoder: snake_case__ : List[Any] = wte(__A ) else: snake_case__ : Any = wte(__A ) snake_case__ : Optional[int] = wte(__A ) with torch.no_grad(): model(**__A )[0] def _lowercase ( self : Optional[Any] ): snake_case__, snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() snake_case__ : Any = input_dict["input_ids"] snake_case__ : int = input_ids.ne(1 ).to(__A ) snake_case__ : List[Any] = MaMaaaForConditionalGeneration(__A ).eval().to(__A ) if torch_device == "cuda": model.half() model.generate(__A , attention_mask=__A ) model.generate(num_beams=4 , do_sample=__A , early_stopping=__A , num_return_sequences=3 ) def SCREAMING_SNAKE_CASE ( snake_case_ : int ): return torch.tensor(snake_case_ , dtype=torch.long , device=snake_case_ ) __lowerCamelCase : Optional[Any] = 1e-4 @require_torch @require_sentencepiece @require_tokenizers @slow class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): """simple docstring""" @cached_property def _lowercase ( self : str ): return MaMaaaTokenizer.from_pretrained("facebook/m2m100_418M" ) def _lowercase ( self : Optional[int] ): snake_case__ : List[str] = MaMaaaModel.from_pretrained("facebook/m2m100_418M" ).to(__A ) snake_case__ : Optional[Any] = _long_tensor([[1_2_8_0_2_8, 9_8, 1_2, 3_0_5_2_7, 2_7_3_2, 1_5_9, 7_7_5_5, 6_1_9_0_4, 3_9_1_4_4, 3_8, 2]] ) snake_case__ : str = _long_tensor([[2, 1_2_8_0_2_8, 9_8, 1_2, 3_0_5_2_7, 2_7_3_2, 1_5_9, 7_7_5_5, 6_1_9_0_4, 3_9_1_4_4, 3_8]] ) snake_case__ : int = prepare_mam_aaa_inputs_dict(model.config , __A , __A ) with torch.no_grad(): snake_case__ : str = model(**__A )[0] snake_case__ : Tuple = torch.Size((1, 1_1, 1_0_2_4) ) self.assertEqual(output.shape , __A ) # change to expected output here snake_case__ : Optional[Any] = torch.tensor( [[-0.7_7_8_0, -0.1_6_7_6, 0.1_0_3_8], [-6.7_5_5_6, -1.3_9_9_2, 0.0_5_6_7], [-7.5_3_8_3, -0.5_9_2_0, -0.2_7_7_9]] , device=__A ) self.assertTrue(torch.allclose(output[:, :3, :3] , __A , atol=__A ) ) def _lowercase ( self : Union[str, Any] ): snake_case__ : Union[str, Any] = MaMaaaForConditionalGeneration.from_pretrained("facebook/m2m100_418M" ).to(__A ) # change to intended input snake_case__ : Union[str, Any] = _long_tensor([[1_2_8_0_2_8, 9_8, 1_2, 3_0_5_2_7, 2_7_3_2, 1_5_9, 7_7_5_5, 6_1_9_0_4, 3_9_1_4_4, 3_8, 2]] ) snake_case__ : List[str] = _long_tensor([[2, 1_2_8_0_2_8, 9_8, 1_2, 3_0_5_2_7, 2_7_3_2, 1_5_9, 7_7_5_5, 6_1_9_0_4, 3_9_1_4_4, 3_8]] ) snake_case__ : int = prepare_mam_aaa_inputs_dict(model.config , __A , __A ) with torch.no_grad(): snake_case__ : Union[str, Any] = model(**__A )[0] snake_case__ : Tuple = torch.Size((1, 1_1, model.config.vocab_size) ) self.assertEqual(output.shape , __A ) # change to expected output here snake_case__ : List[str] = torch.tensor( [[-1.0_4_4_8, -1.0_4_1_1, 3.7_9_9_2], [-3.2_1_9_1, -3.2_3_8_6, -1.3_4_5_1], [-3.6_2_1_0, -3.5_9_9_3, 0.4_9_2_5]] , device=__A ) self.assertTrue(torch.allclose(output[:, :3, :3] , __A , atol=__A ) ) def _lowercase ( self : Optional[Any] ): snake_case__ : List[Any] = MaMaaaForConditionalGeneration.from_pretrained("facebook/m2m100_418M" ).to(__A ) snake_case__ : List[str] = MaMaaaTokenizer.from_pretrained("facebook/m2m100_418M" , src_lang="fr" , tgt_lang="en" ) snake_case__ : List[Any] = [ "L'affaire NSA souligne l'absence totale de débat sur le renseignement", "Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.", "Lorsque François Hollande téléphone à Barack Obama ou quand le ministre des affaires étrangères Laurent" " Fabius convoque l'ambassadeur des Etats-Unis, ils réagissent à une vraie découverte, qui est celle de" " l'ampleur de la surveillance américaine sur l'ensemble des communications en France.", ] # The below article tests that we don't add any hypotheses outside of the top n_beams snake_case__ : str = tokenizer(__A , padding=__A , return_tensors="pt" ) snake_case__ : Tuple = model.generate( input_ids=dct["input_ids"].to(__A ) , attention_mask=dct["attention_mask"].to(__A ) , num_beams=5 , forced_bos_token_id=tokenizer.get_lang_id("en" ) , ) snake_case__ : List[str] = [ "The NSA case highlights the total absence of intelligence debate", "I think there are two levels of response from the French government.", "When François Hollande calls Barack Obama or when Foreign Minister Laurent Fabius calls the U.S." " Ambassador, they respond to a real discovery, which is that of the scale of U.S. surveillance on all" " communications in France.", ] snake_case__ : Dict = tokenizer.batch_decode( hypotheses_batch.tolist() , clean_up_tokenization_spaces=__A , skip_special_tokens=__A ) assert generated == expected_en
25
0
import importlib import os import fsspec import pytest from fsspec import register_implementation from fsspec.registry import _registry as _fsspec_registry from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem from .utils import require_lza, require_zstandard def SCREAMING_SNAKE_CASE ( snake_case_ : List[Any] ): assert "mock" in _fsspec_registry assert "bz2" in _fsspec_registry def SCREAMING_SNAKE_CASE ( ): assert "mock" not in _fsspec_registry assert "bz2" in _fsspec_registry def SCREAMING_SNAKE_CASE ( ): snake_case__ : str = "mock-s3-bucket" snake_case__ : List[str] = F'''s3://{mock_bucket}''' snake_case__ : Dict = extract_path_from_uri(snake_case_ ) assert dataset_path.startswith("s3://" ) is False snake_case__ : Optional[Any] = "./local/path" snake_case__ : Tuple = extract_path_from_uri(snake_case_ ) assert dataset_path == new_dataset_path def SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any] ): snake_case__ : Union[str, Any] = is_remote_filesystem(snake_case_ ) assert is_remote is True snake_case__ : Optional[int] = fsspec.filesystem("file" ) snake_case__ : List[str] = is_remote_filesystem(snake_case_ ) assert is_remote is False @pytest.mark.parametrize("compression_fs_class" , snake_case_ ) def SCREAMING_SNAKE_CASE ( snake_case_ : List[Any] , snake_case_ : Dict , snake_case_ : List[str] , snake_case_ : Tuple , snake_case_ : Union[str, Any] , snake_case_ : str , snake_case_ : Any ): snake_case__ : str = {"gzip": gz_file, "xz": xz_file, "zstd": zstd_file, "bz2": bza_file, "lz4": lza_file} snake_case__ : str = input_paths[compression_fs_class.protocol] if input_path is None: snake_case__ : Union[str, Any] = F'''for \'{compression_fs_class.protocol}\' compression protocol, ''' if compression_fs_class.protocol == "lz4": reason += require_lza.kwargs["reason"] elif compression_fs_class.protocol == "zstd": reason += require_zstandard.kwargs["reason"] pytest.skip(snake_case_ ) snake_case__ : int = fsspec.filesystem(compression_fs_class.protocol , fo=snake_case_ ) assert isinstance(snake_case_ , snake_case_ ) snake_case__ : Tuple = os.path.basename(snake_case_ ) snake_case__ : Dict = expected_filename[: expected_filename.rindex("." )] assert fs.glob("*" ) == [expected_filename] with fs.open(snake_case_ , "r" , encoding="utf-8" ) as f, open(snake_case_ , encoding="utf-8" ) as expected_file: assert f.read() == expected_file.read() @pytest.mark.parametrize("protocol" , ["zip", "gzip"] ) def SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any] , snake_case_ : Tuple , snake_case_ : int ): snake_case__ : Optional[Any] = {"zip": zip_jsonl_path, "gzip": jsonl_gz_path} snake_case__ : Tuple = compressed_file_paths[protocol] snake_case__ : Optional[int] = "dataset.jsonl" snake_case__ : Dict = F'''{protocol}://{member_file_path}::{compressed_file_path}''' snake_case__, *snake_case__ : List[Any] = fsspec.get_fs_token_paths(snake_case_ ) assert fs.isfile(snake_case_ ) assert not fs.isfile("non_existing_" + member_file_path ) @pytest.mark.integration def SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any] , snake_case_ : Optional[int] , snake_case_ : Optional[int] , snake_case_ : int ): snake_case__ : str = hf_api.dataset_info(snake_case_ , token=snake_case_ ) snake_case__ : Tuple = HfFileSystem(repo_info=snake_case_ , token=snake_case_ ) assert sorted(hffs.glob("*" ) ) == [".gitattributes", "data"] assert hffs.isdir("data" ) assert hffs.isfile(".gitattributes" ) and hffs.isfile("data/text_data.txt" ) with open(snake_case_ ) as f: assert hffs.open("data/text_data.txt" , "r" ).read() == f.read() def SCREAMING_SNAKE_CASE ( ): snake_case__ : Any = "bz2" # Import module import datasets.filesystems # Overwrite protocol and reload register_implementation(snake_case_ , snake_case_ , clobber=snake_case_ ) with pytest.warns(snake_case_ ) as warning_info: importlib.reload(datasets.filesystems ) assert len(snake_case_ ) == 1 assert ( str(warning_info[0].message ) == F'''A filesystem protocol was already set for {protocol} and will be overwritten.''' )
710
from unittest.mock import patch import pyspark from datasets.packaged_modules.spark.spark import ( Spark, SparkExamplesIterable, _generate_iterable_examples, ) from ..utils import ( require_dill_gt_0_3_2, require_not_windows, ) def SCREAMING_SNAKE_CASE ( snake_case_ : List[Any] , snake_case_ : Union[str, Any] ): snake_case__ : Optional[int] = [] for part_id in partition_order: snake_case__ : List[Any] = df.where(F'''SPARK_PARTITION_ID() = {part_id}''' ).collect() for row_idx, row in enumerate(snake_case_ ): expected_row_ids_and_row_dicts.append((F'''{part_id}_{row_idx}''', row.asDict()) ) return expected_row_ids_and_row_dicts @require_not_windows @require_dill_gt_0_3_2 def SCREAMING_SNAKE_CASE ( ): snake_case__ : Tuple = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate() snake_case__ : Union[str, Any] = spark.range(100 ).repartition(1 ) snake_case__ : Any = Spark(snake_case_ ) # The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means # that each partition can hold 2 rows. spark_builder._repartition_df_if_needed(max_shard_size=16 ) # Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions. assert spark_builder.df.rdd.getNumPartitions() == 50 @require_not_windows @require_dill_gt_0_3_2 def SCREAMING_SNAKE_CASE ( ): snake_case__ : Dict = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate() snake_case__ : Optional[Any] = spark.range(10 ).repartition(2 ) snake_case__ : Optional[Any] = [1, 0] snake_case__ : Dict = _generate_iterable_examples(snake_case_ , snake_case_ ) # Reverse the partitions. snake_case__ : Tuple = _get_expected_row_ids_and_row_dicts_for_partition_order(snake_case_ , snake_case_ ) for i, (row_id, row_dict) in enumerate(generate_fn() ): snake_case__, snake_case__ : Tuple = expected_row_ids_and_row_dicts[i] assert row_id == expected_row_id assert row_dict == expected_row_dict @require_not_windows @require_dill_gt_0_3_2 def SCREAMING_SNAKE_CASE ( ): snake_case__ : Optional[int] = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate() snake_case__ : Optional[int] = spark.range(10 ).repartition(1 ) snake_case__ : Union[str, Any] = SparkExamplesIterable(snake_case_ ) assert it.n_shards == 1 for i, (row_id, row_dict) in enumerate(snake_case_ ): assert row_id == F'''0_{i}''' assert row_dict == {"id": i} @require_not_windows @require_dill_gt_0_3_2 def SCREAMING_SNAKE_CASE ( ): snake_case__ : Optional[int] = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate() snake_case__ : str = spark.range(30 ).repartition(3 ) # Mock the generator so that shuffle reverses the partition indices. with patch("numpy.random.Generator" ) as generator_mock: snake_case__ : Union[str, Any] = lambda snake_case_ : x.reverse() snake_case__ : Optional[int] = _get_expected_row_ids_and_row_dicts_for_partition_order(snake_case_ , [2, 1, 0] ) snake_case__ : List[Any] = SparkExamplesIterable(snake_case_ ).shuffle_data_sources(snake_case_ ) assert shuffled_it.n_shards == 3 for i, (row_id, row_dict) in enumerate(snake_case_ ): snake_case__, snake_case__ : Optional[Any] = expected_row_ids_and_row_dicts[i] assert row_id == expected_row_id assert row_dict == expected_row_dict @require_not_windows @require_dill_gt_0_3_2 def SCREAMING_SNAKE_CASE ( ): snake_case__ : Any = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate() snake_case__ : Tuple = spark.range(20 ).repartition(4 ) # Partitions 0 and 2 snake_case__ : List[Any] = SparkExamplesIterable(snake_case_ ).shard_data_sources(worker_id=0 , num_workers=2 ) assert shard_it_a.n_shards == 2 snake_case__ : List[str] = _get_expected_row_ids_and_row_dicts_for_partition_order(snake_case_ , [0, 2] ) for i, (row_id, row_dict) in enumerate(snake_case_ ): snake_case__, snake_case__ : Optional[int] = expected_row_ids_and_row_dicts_a[i] assert row_id == expected_row_id assert row_dict == expected_row_dict # Partitions 1 and 3 snake_case__ : Any = SparkExamplesIterable(snake_case_ ).shard_data_sources(worker_id=1 , num_workers=2 ) assert shard_it_a.n_shards == 2 snake_case__ : List[Any] = _get_expected_row_ids_and_row_dicts_for_partition_order(snake_case_ , [1, 3] ) for i, (row_id, row_dict) in enumerate(snake_case_ ): snake_case__, snake_case__ : Optional[Any] = expected_row_ids_and_row_dicts_a[i] assert row_id == expected_row_id assert row_dict == expected_row_dict @require_not_windows @require_dill_gt_0_3_2 def SCREAMING_SNAKE_CASE ( ): snake_case__ : Dict = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate() snake_case__ : Tuple = spark.range(100 ).repartition(1 ) snake_case__ : Union[str, Any] = Spark(snake_case_ ) # Choose a small max_shard_size for maximum partitioning. spark_builder._repartition_df_if_needed(max_shard_size=1 ) # The new number of partitions should not be greater than the number of rows. assert spark_builder.df.rdd.getNumPartitions() == 100
25
0
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __lowerCamelCase : List[str] = logging.get_logger(__name__) __lowerCamelCase : str = { "facebook/xlm-roberta-xl": "https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json", "facebook/xlm-roberta-xxl": "https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json", # See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl } class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ): """simple docstring""" a_ = "xlm-roberta-xl" def __init__( self : List[str] , __A : Dict=2_5_0_8_8_0 , __A : int=2_5_6_0 , __A : Optional[int]=3_6 , __A : Union[str, Any]=3_2 , __A : str=1_0_2_4_0 , __A : str="gelu" , __A : Dict=0.1 , __A : List[Any]=0.1 , __A : Optional[Any]=5_1_4 , __A : List[Any]=1 , __A : int=0.0_2 , __A : Optional[int]=1e-0_5 , __A : Dict=1 , __A : str=0 , __A : Union[str, Any]=2 , __A : List[str]="absolute" , __A : Dict=True , __A : Dict=None , **__A : str , ): super().__init__(pad_token_id=lowerCAmelCase_ , bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , **lowerCAmelCase_ ) snake_case__ : Union[str, Any] = vocab_size snake_case__ : Optional[int] = hidden_size snake_case__ : str = num_hidden_layers snake_case__ : Any = num_attention_heads snake_case__ : Union[str, Any] = hidden_act snake_case__ : Any = intermediate_size snake_case__ : int = hidden_dropout_prob snake_case__ : Union[str, Any] = attention_probs_dropout_prob snake_case__ : Optional[int] = max_position_embeddings snake_case__ : Dict = type_vocab_size snake_case__ : Optional[Any] = initializer_range snake_case__ : int = layer_norm_eps snake_case__ : str = position_embedding_type snake_case__ : Optional[int] = use_cache snake_case__ : List[str] = classifier_dropout class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ): """simple docstring""" @property def _lowercase ( self : Union[str, Any] ): if self.task == "multiple-choice": snake_case__ : int = {0: "batch", 1: "choice", 2: "sequence"} else: snake_case__ : Dict = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ] )
711
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __lowerCamelCase : List[str] = {"""configuration_xlnet""": ["""XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XLNetConfig"""]} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : str = ["""XLNetTokenizer"""] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : Dict = ["""XLNetTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : str = [ """XLNET_PRETRAINED_MODEL_ARCHIVE_LIST""", """XLNetForMultipleChoice""", """XLNetForQuestionAnswering""", """XLNetForQuestionAnsweringSimple""", """XLNetForSequenceClassification""", """XLNetForTokenClassification""", """XLNetLMHeadModel""", """XLNetModel""", """XLNetPreTrainedModel""", """load_tf_weights_in_xlnet""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : Dict = [ """TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFXLNetForMultipleChoice""", """TFXLNetForQuestionAnsweringSimple""", """TFXLNetForSequenceClassification""", """TFXLNetForTokenClassification""", """TFXLNetLMHeadModel""", """TFXLNetMainLayer""", """TFXLNetModel""", """TFXLNetPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xlnet import XLNetTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xlnet_fast import XLNetTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xlnet import ( XLNET_PRETRAINED_MODEL_ARCHIVE_LIST, XLNetForMultipleChoice, XLNetForQuestionAnswering, XLNetForQuestionAnsweringSimple, XLNetForSequenceClassification, XLNetForTokenClassification, XLNetLMHeadModel, XLNetModel, XLNetPreTrainedModel, load_tf_weights_in_xlnet, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xlnet import ( TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFXLNetForMultipleChoice, TFXLNetForQuestionAnsweringSimple, TFXLNetForSequenceClassification, TFXLNetForTokenClassification, TFXLNetLMHeadModel, TFXLNetMainLayer, TFXLNetModel, TFXLNetPreTrainedModel, ) else: import sys __lowerCamelCase : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
25
0
import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, EulerAncestralDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, StableDiffusionInstructPixaPixPipeline, UNetaDConditionModel, ) from diffusers.image_processor import VaeImageProcessor from diffusers.utils import floats_tensor, load_image, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , unittest.TestCase ): """simple docstring""" a_ = StableDiffusionInstructPixaPixPipeline a_ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width", "cross_attention_kwargs"} a_ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS a_ = IMAGE_TO_IMAGE_IMAGE_PARAMS a_ = IMAGE_TO_IMAGE_IMAGE_PARAMS def _lowercase ( self : str ): torch.manual_seed(0 ) snake_case__ : Optional[Any] = UNetaDConditionModel( block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=8 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=3_2 , ) snake_case__ : List[str] = PNDMScheduler(skip_prk_steps=_UpperCAmelCase ) torch.manual_seed(0 ) snake_case__ : int = AutoencoderKL( block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , ) torch.manual_seed(0 ) snake_case__ : str = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , ) snake_case__ : Optional[int] = CLIPTextModel(_UpperCAmelCase ) snake_case__ : str = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) snake_case__ : Union[str, Any] = { '''unet''': unet, '''scheduler''': scheduler, '''vae''': vae, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''safety_checker''': None, '''feature_extractor''': None, } return components def _lowercase ( self : Tuple , __A : str , __A : int=0 ): snake_case__ : Any = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(_UpperCAmelCase ) ).to(_UpperCAmelCase ) snake_case__ : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0] snake_case__ : Tuple = Image.fromarray(np.uinta(_UpperCAmelCase ) ).convert("RGB" ) if str(_UpperCAmelCase ).startswith("mps" ): snake_case__ : Union[str, Any] = torch.manual_seed(_UpperCAmelCase ) else: snake_case__ : Union[str, Any] = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase ) snake_case__ : Union[str, Any] = { '''prompt''': '''A painting of a squirrel eating a burger''', '''image''': image, '''generator''': generator, '''num_inference_steps''': 2, '''guidance_scale''': 6.0, '''image_guidance_scale''': 1, '''output_type''': '''numpy''', } return inputs def _lowercase ( self : Any ): snake_case__ : List[Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator snake_case__ : Dict = self.get_dummy_components() snake_case__ : Any = StableDiffusionInstructPixaPixPipeline(**_UpperCAmelCase ) snake_case__ : int = sd_pipe.to(_UpperCAmelCase ) sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase ) snake_case__ : Union[str, Any] = self.get_dummy_inputs(_UpperCAmelCase ) snake_case__ : str = sd_pipe(**_UpperCAmelCase ).images snake_case__ : List[Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 3_2, 3_2, 3) snake_case__ : Optional[int] = np.array([0.7_5_2_6, 0.3_7_5_0, 0.4_5_4_7, 0.6_1_1_7, 0.5_8_6_6, 0.5_0_1_6, 0.4_3_2_7, 0.5_6_4_2, 0.4_8_1_5] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def _lowercase ( self : List[str] ): snake_case__ : Dict = '''cpu''' # ensure determinism for the device-dependent torch.Generator snake_case__ : Optional[Any] = self.get_dummy_components() snake_case__ : Dict = StableDiffusionInstructPixaPixPipeline(**_UpperCAmelCase ) snake_case__ : List[Any] = sd_pipe.to(_UpperCAmelCase ) sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase ) snake_case__ : Dict = self.get_dummy_inputs(_UpperCAmelCase ) snake_case__ : Union[str, Any] = '''french fries''' snake_case__ : str = sd_pipe(**_UpperCAmelCase , negative_prompt=_UpperCAmelCase ) snake_case__ : Dict = output.images snake_case__ : Optional[int] = image[0, -3:, -3:, -1] assert image.shape == (1, 3_2, 3_2, 3) snake_case__ : Tuple = np.array([0.7_5_1_1, 0.3_6_4_2, 0.4_5_5_3, 0.6_2_3_6, 0.5_7_9_7, 0.5_0_1_3, 0.4_3_4_3, 0.5_6_1_1, 0.4_8_3_1] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def _lowercase ( self : str ): snake_case__ : Union[str, Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator snake_case__ : Dict = self.get_dummy_components() snake_case__ : str = StableDiffusionInstructPixaPixPipeline(**_UpperCAmelCase ) snake_case__ : Optional[int] = sd_pipe.to(_UpperCAmelCase ) sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase ) snake_case__ : Union[str, Any] = self.get_dummy_inputs(_UpperCAmelCase ) snake_case__ : List[str] = [inputs['''prompt''']] * 2 snake_case__ : Optional[Any] = np.array(inputs["image"] ).astype(np.floataa ) / 2_5_5.0 snake_case__ : Optional[Any] = torch.from_numpy(_UpperCAmelCase ).unsqueeze(0 ).to(_UpperCAmelCase ) snake_case__ : Tuple = image / 2 + 0.5 snake_case__ : str = image.permute(0 , 3 , 1 , 2 ) snake_case__ : List[str] = image.repeat(2 , 1 , 1 , 1 ) snake_case__ : int = sd_pipe(**_UpperCAmelCase ).images snake_case__ : Optional[Any] = image[-1, -3:, -3:, -1] assert image.shape == (2, 3_2, 3_2, 3) snake_case__ : List[str] = np.array([0.5_8_1_2, 0.5_7_4_8, 0.5_2_2_2, 0.5_9_0_8, 0.5_6_9_5, 0.7_1_7_4, 0.6_8_0_4, 0.5_5_2_3, 0.5_5_7_9] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def _lowercase ( self : int ): snake_case__ : Dict = '''cpu''' # ensure determinism for the device-dependent torch.Generator snake_case__ : List[str] = self.get_dummy_components() snake_case__ : Union[str, Any] = EulerAncestralDiscreteScheduler( beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="scaled_linear" ) snake_case__ : str = StableDiffusionInstructPixaPixPipeline(**_UpperCAmelCase ) snake_case__ : List[str] = sd_pipe.to(_UpperCAmelCase ) sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase ) snake_case__ : Dict = self.get_dummy_inputs(_UpperCAmelCase ) snake_case__ : Any = sd_pipe(**_UpperCAmelCase ).images snake_case__ : Dict = image[0, -3:, -3:, -1] snake_case__ : Optional[int] = [round(_UpperCAmelCase , 4 ) for x in image_slice.flatten().tolist()] print(",".join([str(_UpperCAmelCase ) for x in slice] ) ) assert image.shape == (1, 3_2, 3_2, 3) snake_case__ : int = np.array([0.7_4_1_7, 0.3_8_4_2, 0.4_7_3_2, 0.5_7_7_6, 0.5_8_9_1, 0.5_1_3_9, 0.4_0_5_2, 0.5_6_7_3, 0.4_9_8_6] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def _lowercase ( self : Optional[Any] ): super().test_inference_batch_single_identical(expected_max_diff=3e-3 ) def _lowercase ( self : List[str] ): snake_case__ : Any = self.get_dummy_components() snake_case__ : Optional[Any] = StableDiffusionInstructPixaPixPipeline(**_UpperCAmelCase ) snake_case__ : Any = VaeImageProcessor(do_resize=_UpperCAmelCase , do_normalize=_UpperCAmelCase ) snake_case__ : Dict = pipe.to(_UpperCAmelCase ) pipe.set_progress_bar_config(disable=_UpperCAmelCase ) snake_case__ : str = pipe(**self.get_dummy_inputs_by_type(_UpperCAmelCase , input_image_type="pt" ) )[0] snake_case__ : List[Any] = components['''vae'''] snake_case__ : List[Any] = self.get_dummy_inputs_by_type(_UpperCAmelCase , input_image_type="pt" ) for image_param in self.image_latents_params: if image_param in inputs.keys(): snake_case__ : Optional[Any] = vae.encode(inputs[image_param] ).latent_dist.mode() snake_case__ : str = pipe(**_UpperCAmelCase )[0] snake_case__ : Union[str, Any] = np.abs(out - out_latents_inputs ).max() self.assertLess(_UpperCAmelCase , 1e-4 , "passing latents as image input generate different result from passing image" ) @slow @require_torch_gpu class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): """simple docstring""" def _lowercase ( self : Optional[int] ): super().tearDown() gc.collect() torch.cuda.empty_cache() def _lowercase ( self : int , __A : str=0 ): snake_case__ : List[str] = torch.manual_seed(_UpperCAmelCase ) snake_case__ : str = load_image( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg" ) snake_case__ : List[str] = { '''prompt''': '''turn him into a cyborg''', '''image''': image, '''generator''': generator, '''num_inference_steps''': 3, '''guidance_scale''': 7.5, '''image_guidance_scale''': 1.0, '''output_type''': '''numpy''', } return inputs def _lowercase ( self : Dict ): snake_case__ : List[str] = StableDiffusionInstructPixaPixPipeline.from_pretrained( "timbrooks/instruct-pix2pix" , safety_checker=_UpperCAmelCase ) pipe.to(_UpperCAmelCase ) pipe.set_progress_bar_config(disable=_UpperCAmelCase ) pipe.enable_attention_slicing() snake_case__ : str = self.get_inputs() snake_case__ : Optional[Any] = pipe(**_UpperCAmelCase ).images snake_case__ : List[str] = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 5_1_2, 5_1_2, 3) snake_case__ : Tuple = np.array([0.5_9_0_2, 0.6_0_1_5, 0.6_0_2_7, 0.5_9_8_3, 0.6_0_9_2, 0.6_0_6_1, 0.5_7_6_5, 0.5_7_8_5, 0.5_5_5_5] ) assert np.abs(expected_slice - image_slice ).max() < 1e-3 def _lowercase ( self : Tuple ): snake_case__ : Tuple = StableDiffusionInstructPixaPixPipeline.from_pretrained( "timbrooks/instruct-pix2pix" , safety_checker=_UpperCAmelCase ) snake_case__ : Optional[int] = LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.to(_UpperCAmelCase ) pipe.set_progress_bar_config(disable=_UpperCAmelCase ) pipe.enable_attention_slicing() snake_case__ : List[str] = self.get_inputs() snake_case__ : Optional[int] = pipe(**_UpperCAmelCase ).images snake_case__ : Optional[Any] = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 5_1_2, 5_1_2, 3) snake_case__ : Tuple = np.array([0.6_5_7_8, 0.6_8_1_7, 0.6_9_7_2, 0.6_7_6_1, 0.6_8_5_6, 0.6_9_1_6, 0.6_4_2_8, 0.6_5_1_6, 0.6_3_0_1] ) assert np.abs(expected_slice - image_slice ).max() < 1e-3 def _lowercase ( self : int ): snake_case__ : List[Any] = StableDiffusionInstructPixaPixPipeline.from_pretrained( "timbrooks/instruct-pix2pix" , safety_checker=_UpperCAmelCase ) snake_case__ : Optional[int] = DDIMScheduler.from_config(pipe.scheduler.config ) pipe.to(_UpperCAmelCase ) pipe.set_progress_bar_config(disable=_UpperCAmelCase ) pipe.enable_attention_slicing() snake_case__ : List[str] = self.get_inputs() snake_case__ : str = pipe(**_UpperCAmelCase ).images snake_case__ : Optional[int] = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 5_1_2, 5_1_2, 3) snake_case__ : Any = np.array([0.3_8_2_8, 0.3_8_3_4, 0.3_8_1_8, 0.3_7_9_2, 0.3_8_6_5, 0.3_7_5_2, 0.3_7_9_2, 0.3_8_4_7, 0.3_7_5_3] ) assert np.abs(expected_slice - image_slice ).max() < 1e-3 def _lowercase ( self : Union[str, Any] ): snake_case__ : Dict = 0 def callback_fn(__A : Any , __A : Dict , __A : Optional[Any] ) -> None: snake_case__ : Optional[Any] = True nonlocal number_of_steps number_of_steps += 1 if step == 1: snake_case__ : Optional[int] = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 6_4, 6_4) snake_case__ : int = latents[0, -3:, -3:, -1] snake_case__ : int = np.array([-0.2_4_6_3, -0.4_6_4_4, -0.9_7_5_6, 1.5_1_7_6, 1.4_4_1_4, 0.7_8_6_6, 0.9_8_9_7, 0.8_5_2_1, 0.7_9_8_3] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2 elif step == 2: snake_case__ : Union[str, Any] = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 6_4, 6_4) snake_case__ : List[str] = latents[0, -3:, -3:, -1] snake_case__ : Tuple = np.array([-0.2_6_4_4, -0.4_6_2_6, -0.9_6_5_3, 1.5_1_7_6, 1.4_5_5_1, 0.7_6_8_6, 0.9_8_0_5, 0.8_4_5_2, 0.8_1_1_5] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2 snake_case__ : Union[str, Any] = False snake_case__ : Optional[int] = StableDiffusionInstructPixaPixPipeline.from_pretrained( "timbrooks/instruct-pix2pix" , safety_checker=_UpperCAmelCase , torch_dtype=torch.floataa ) snake_case__ : Optional[int] = pipe.to(_UpperCAmelCase ) pipe.set_progress_bar_config(disable=_UpperCAmelCase ) pipe.enable_attention_slicing() snake_case__ : str = self.get_inputs() pipe(**_UpperCAmelCase , callback=_UpperCAmelCase , callback_steps=1 ) assert callback_fn.has_been_called assert number_of_steps == 3 def _lowercase ( self : Dict ): torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() snake_case__ : Any = StableDiffusionInstructPixaPixPipeline.from_pretrained( "timbrooks/instruct-pix2pix" , safety_checker=_UpperCAmelCase , torch_dtype=torch.floataa ) snake_case__ : Optional[int] = pipe.to(_UpperCAmelCase ) pipe.set_progress_bar_config(disable=_UpperCAmelCase ) pipe.enable_attention_slicing(1 ) pipe.enable_sequential_cpu_offload() snake_case__ : List[Any] = self.get_inputs() snake_case__ : Tuple = pipe(**_UpperCAmelCase ) snake_case__ : List[str] = torch.cuda.max_memory_allocated() # make sure that less than 2.2 GB is allocated assert mem_bytes < 2.2 * 1_0**9 def _lowercase ( self : str ): snake_case__ : List[Any] = self.get_inputs() # resize to resolution that is divisible by 8 but not 16 or 32 snake_case__ : str = inputs['''image'''].resize((5_0_4, 5_0_4) ) snake_case__ : Tuple = '''timbrooks/instruct-pix2pix''' snake_case__ : Any = StableDiffusionInstructPixaPixPipeline.from_pretrained( _UpperCAmelCase , safety_checker=_UpperCAmelCase , ) pipe.to(_UpperCAmelCase ) pipe.set_progress_bar_config(disable=_UpperCAmelCase ) pipe.enable_attention_slicing() snake_case__ : List[Any] = pipe(**_UpperCAmelCase ) snake_case__ : int = output.images[0] snake_case__ : Optional[int] = image[2_5_5:2_5_8, 3_8_3:3_8_6, -1] assert image.shape == (5_0_4, 5_0_4, 3) snake_case__ : Union[str, Any] = np.array([0.2_7_2_6, 0.2_5_2_9, 0.2_6_6_4, 0.2_6_5_5, 0.2_6_4_1, 0.2_6_4_2, 0.2_5_9_1, 0.2_6_4_9, 0.2_5_9_0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
712
from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline else: from .pipeline_kandinsky import KandinskyPipeline from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput from .text_encoder import MultilingualCLIP
25
0
import gc import inspect import unittest import torch from parameterized import parameterized from diffusers import PriorTransformer from diffusers.utils import floats_tensor, slow, torch_all_close, torch_device from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin enable_full_determinism() class SCREAMING_SNAKE_CASE__ ( _a , unittest.TestCase ): """simple docstring""" a_ = PriorTransformer a_ = "hidden_states" @property def _lowercase ( self : List[Any] ): snake_case__ : Optional[int] = 4 snake_case__ : List[Any] = 8 snake_case__ : Optional[int] = 7 snake_case__ : List[Any] = floats_tensor((batch_size, embedding_dim) ).to(_A ) snake_case__ : Optional[int] = floats_tensor((batch_size, embedding_dim) ).to(_A ) snake_case__ : Optional[Any] = floats_tensor((batch_size, num_embeddings, embedding_dim) ).to(_A ) return { "hidden_states": hidden_states, "timestep": 2, "proj_embedding": proj_embedding, "encoder_hidden_states": encoder_hidden_states, } def _lowercase ( self : List[str] , __A : List[Any]=0 ): torch.manual_seed(_A ) snake_case__ : str = 4 snake_case__ : Union[str, Any] = 8 snake_case__ : List[str] = 7 snake_case__ : Optional[int] = torch.randn((batch_size, embedding_dim) ).to(_A ) snake_case__ : List[str] = torch.randn((batch_size, embedding_dim) ).to(_A ) snake_case__ : Dict = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(_A ) return { "hidden_states": hidden_states, "timestep": 2, "proj_embedding": proj_embedding, "encoder_hidden_states": encoder_hidden_states, } @property def _lowercase ( self : List[Any] ): return (4, 8) @property def _lowercase ( self : Any ): return (4, 8) def _lowercase ( self : Optional[Any] ): snake_case__ : Optional[int] = { "num_attention_heads": 2, "attention_head_dim": 4, "num_layers": 2, "embedding_dim": 8, "num_embeddings": 7, "additional_embeddings": 4, } snake_case__ : Optional[int] = self.dummy_input return init_dict, inputs_dict def _lowercase ( self : Any ): snake_case__, snake_case__ : int = PriorTransformer.from_pretrained( "hf-internal-testing/prior-dummy" , output_loading_info=_A ) self.assertIsNotNone(_A ) self.assertEqual(len(loading_info["missing_keys"] ) , 0 ) model.to(_A ) snake_case__ : Any = model(**self.dummy_input )[0] assert hidden_states is not None, "Make sure output is not None" def _lowercase ( self : Optional[Any] ): snake_case__, snake_case__ : List[str] = self.prepare_init_args_and_inputs_for_common() snake_case__ : Any = self.model_class(**_A ) snake_case__ : Optional[int] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic snake_case__ : Dict = [*signature.parameters.keys()] snake_case__ : str = ["hidden_states", "timestep"] self.assertListEqual(arg_names[:2] , _A ) def _lowercase ( self : Dict ): snake_case__ : List[Any] = PriorTransformer.from_pretrained("hf-internal-testing/prior-dummy" ) snake_case__ : str = model.to(_A ) if hasattr(_A , "set_default_attn_processor" ): model.set_default_attn_processor() snake_case__ : Dict = self.get_dummy_seed_input() with torch.no_grad(): snake_case__ : List[str] = model(**_A )[0] snake_case__ : Optional[Any] = output[0, :5].flatten().cpu() print(_A ) # Since the VAE Gaussian prior's generator is seeded on the appropriate device, # the expected output slices are not the same for CPU and GPU. snake_case__ : str = torch.tensor([-1.3_4_3_6, -0.2_8_7_0, 0.7_5_3_8, 0.4_3_6_8, -0.0_2_3_9] ) self.assertTrue(torch_all_close(_A , _A , rtol=1e-2 ) ) @slow class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): """simple docstring""" def _lowercase ( self : Any , __A : Optional[int]=1 , __A : List[str]=7_6_8 , __A : str=7_7 , __A : Dict=0 ): torch.manual_seed(_A ) snake_case__ : Any = batch_size snake_case__ : Union[str, Any] = embedding_dim snake_case__ : Dict = num_embeddings snake_case__ : Any = torch.randn((batch_size, embedding_dim) ).to(_A ) snake_case__ : Dict = torch.randn((batch_size, embedding_dim) ).to(_A ) snake_case__ : Any = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(_A ) return { "hidden_states": hidden_states, "timestep": 2, "proj_embedding": proj_embedding, "encoder_hidden_states": encoder_hidden_states, } def _lowercase ( self : int ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() @parameterized.expand( [ # fmt: off [1_3, [-0.5_8_6_1, 0.1_2_8_3, -0.0_9_3_1, 0.0_8_8_2, 0.4_4_7_6, 0.1_3_2_9, -0.0_4_9_8, 0.0_6_4_0]], [3_7, [-0.4_9_1_3, 0.0_1_1_0, -0.0_4_8_3, 0.0_5_4_1, 0.4_9_5_4, -0.0_1_7_0, 0.0_3_5_4, 0.1_6_5_1]], # fmt: on ] ) def _lowercase ( self : Union[str, Any] , __A : Dict , __A : List[str] ): snake_case__ : int = PriorTransformer.from_pretrained("kandinsky-community/kandinsky-2-1-prior" , subfolder="prior" ) model.to(_A ) snake_case__ : Optional[int] = self.get_dummy_seed_input(seed=_A ) with torch.no_grad(): snake_case__ : Tuple = model(**_A )[0] assert list(sample.shape ) == [1, 7_6_8] snake_case__ : List[Any] = sample[0, :8].flatten().cpu() print(_A ) snake_case__ : List[Any] = torch.tensor(_A ) assert torch_all_close(_A , _A , atol=1e-3 )
713
import numpy as np from matplotlib import pyplot as plt from sklearn.datasets import load_iris from sklearn.metrics import ConfusionMatrixDisplay from sklearn.model_selection import train_test_split from xgboost import XGBClassifier def SCREAMING_SNAKE_CASE ( snake_case_ : dict ): return (data["data"], data["target"]) def SCREAMING_SNAKE_CASE ( snake_case_ : np.ndarray , snake_case_ : np.ndarray ): snake_case__ : Optional[int] = XGBClassifier() classifier.fit(snake_case_ , snake_case_ ) return classifier def SCREAMING_SNAKE_CASE ( ): snake_case__ : Any = load_iris() snake_case__, snake_case__ : str = data_handling(snake_case_ ) snake_case__, snake_case__, snake_case__, snake_case__ : int = train_test_split( snake_case_ , snake_case_ , test_size=0.25 ) snake_case__ : Dict = iris["target_names"] # Create an XGBoost Classifier from the training data snake_case__ : Dict = xgboost(snake_case_ , snake_case_ ) # Display the confusion matrix of the classifier with both training and test sets ConfusionMatrixDisplay.from_estimator( snake_case_ , snake_case_ , snake_case_ , display_labels=snake_case_ , cmap="Blues" , normalize="true" , ) plt.title("Normalized Confusion Matrix - IRIS Dataset" ) plt.show() if __name__ == "__main__": import doctest doctest.testmod(verbose=True) main()
25
0
import datasets from .evaluate import evaluate __lowerCamelCase : int = """\ @article{hendrycks2021cuad, title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review}, author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball}, journal={arXiv preprint arXiv:2103.06268}, year={2021} } """ __lowerCamelCase : List[str] = """ This metric wrap the official scoring script for version 1 of the Contract Understanding Atticus Dataset (CUAD). Contract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510 commercial legal contracts that have been manually labeled to identify 41 categories of important clauses that lawyers look for when reviewing contracts in connection with corporate transactions. """ __lowerCamelCase : int = """ Computes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall). Args: predictions: List of question-answers dictionaries with the following key-values: - \'id\': id of the question-answer pair as given in the references (see below) - \'prediction_text\': list of possible texts for the answer, as a list of strings depending on a threshold on the confidence probability of each prediction. references: List of question-answers dictionaries with the following key-values: - \'id\': id of the question-answer pair (see above), - \'answers\': a Dict in the CUAD dataset format { \'text\': list of possible texts for the answer, as a list of strings \'answer_start\': list of start positions for the answer, as a list of ints } Note that answer_start values are not taken into account to compute the metric. Returns: \'exact_match\': Exact match (the normalized answer exactly match the gold answer) \'f1\': The F-score of predicted tokens versus the gold answer \'aupr\': Area Under the Precision-Recall curve \'prec_at_80_recall\': Precision at 80% recall \'prec_at_90_recall\': Precision at 90% recall Examples: >>> predictions = [{\'prediction_text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\'], \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}] >>> references = [{\'answers\': {\'answer_start\': [143, 49], \'text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\']}, \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}] >>> cuad_metric = datasets.load_metric(\"cuad\") >>> results = cuad_metric.compute(predictions=predictions, references=references) >>> print(results) {\'exact_match\': 100.0, \'f1\': 100.0, \'aupr\': 0.0, \'prec_at_80_recall\': 1.0, \'prec_at_90_recall\': 1.0} """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class SCREAMING_SNAKE_CASE__ ( datasets.Metric ): """simple docstring""" def _lowercase ( self : int ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": { "id": datasets.Value("string" ), "prediction_text": datasets.features.Sequence(datasets.Value("string" ) ), }, "references": { "id": datasets.Value("string" ), "answers": datasets.features.Sequence( { "text": datasets.Value("string" ), "answer_start": datasets.Value("int32" ), } ), }, } ) , codebase_urls=["https://www.atticusprojectai.org/cuad"] , reference_urls=["https://www.atticusprojectai.org/cuad"] , ) def _lowercase ( self : List[Any] , __A : Tuple , __A : str ): snake_case__ : Any = {prediction["id"]: prediction["prediction_text"] for prediction in predictions} snake_case__ : Optional[int] = [ { "paragraphs": [ { "qas": [ { "answers": [{"text": answer_text} for answer_text in ref["answers"]["text"]], "id": ref["id"], } for ref in references ] } ] } ] snake_case__ : Any = evaluate(dataset=__UpperCamelCase , predictions=__UpperCamelCase ) return score
714
import argparse import re from typing import Dict import torch from datasets import Audio, Dataset, load_dataset, load_metric from transformers import AutoFeatureExtractor, pipeline def SCREAMING_SNAKE_CASE ( snake_case_ : Dataset , snake_case_ : Dict[str, str] ): snake_case__ : Tuple = args.log_outputs snake_case__ : Union[str, Any] = "_".join(args.dataset.split("/" ) + [args.config, args.split] ) # load metric snake_case__ : List[str] = load_metric("wer" ) snake_case__ : List[str] = load_metric("cer" ) # compute metrics snake_case__ : List[Any] = wer.compute(references=result["target"] , predictions=result["prediction"] ) snake_case__ : List[str] = cer.compute(references=result["target"] , predictions=result["prediction"] ) # print & log results snake_case__ : Dict = F'''WER: {wer_result}\nCER: {cer_result}''' print(snake_case_ ) with open(F'''{dataset_id}_eval_results.txt''' , "w" ) as f: f.write(snake_case_ ) # log all results in text file. Possibly interesting for analysis if log_outputs is not None: snake_case__ : Union[str, Any] = F'''log_{dataset_id}_predictions.txt''' snake_case__ : int = F'''log_{dataset_id}_targets.txt''' with open(snake_case_ , "w" ) as p, open(snake_case_ , "w" ) as t: # mapping function to write output def write_to_file(snake_case_ : Union[str, Any] , snake_case_ : Any ): p.write(F'''{i}''' + "\n" ) p.write(batch["prediction"] + "\n" ) t.write(F'''{i}''' + "\n" ) t.write(batch["target"] + "\n" ) result.map(snake_case_ , with_indices=snake_case_ ) def SCREAMING_SNAKE_CASE ( snake_case_ : str ): snake_case__ : List[Any] = "[,?.!\-\;\:\"“%‘”�—’…–]" # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training snake_case__ : Optional[int] = re.sub(snake_case_ , "" , text.lower() ) # In addition, we can normalize the target text, e.g. removing new lines characters etc... # note that order is important here! snake_case__ : Optional[Any] = ["\n\n", "\n", " ", " "] for t in token_sequences_to_ignore: snake_case__ : Optional[int] = " ".join(text.split(snake_case_ ) ) return text def SCREAMING_SNAKE_CASE ( snake_case_ : int ): # load dataset snake_case__ : int = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=snake_case_ ) # for testing: only process the first two examples as a test # dataset = dataset.select(range(10)) # load processor snake_case__ : List[str] = AutoFeatureExtractor.from_pretrained(args.model_id ) snake_case__ : List[Any] = feature_extractor.sampling_rate # resample audio snake_case__ : Dict = dataset.cast_column("audio" , Audio(sampling_rate=snake_case_ ) ) # load eval pipeline if args.device is None: snake_case__ : int = 0 if torch.cuda.is_available() else -1 snake_case__ : List[str] = pipeline("automatic-speech-recognition" , model=args.model_id , device=args.device ) # map function to decode audio def map_to_pred(snake_case_ : Any ): snake_case__ : Union[str, Any] = asr( batch["audio"]["array"] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s ) snake_case__ : Optional[int] = prediction["text"] snake_case__ : Optional[Any] = normalize_text(batch["sentence"] ) return batch # run inference on all examples snake_case__ : Any = dataset.map(snake_case_ , remove_columns=dataset.column_names ) # compute and log_results # do not change function below log_results(snake_case_ , snake_case_ ) if __name__ == "__main__": __lowerCamelCase : Dict = argparse.ArgumentParser() parser.add_argument( """--model_id""", type=str, required=True, help="""Model identifier. Should be loadable with 🤗 Transformers""" ) parser.add_argument( """--dataset""", type=str, required=True, help="""Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets""", ) parser.add_argument( """--config""", type=str, required=True, help="""Config of the dataset. *E.g.* `'en'` for Common Voice""" ) parser.add_argument("""--split""", type=str, required=True, help="""Split of the dataset. *E.g.* `'test'`""") parser.add_argument( """--chunk_length_s""", type=float, default=None, help="""Chunk length in seconds. Defaults to 5 seconds.""" ) parser.add_argument( """--stride_length_s""", type=float, default=None, help="""Stride of the audio chunks. Defaults to 1 second.""" ) parser.add_argument( """--log_outputs""", action="""store_true""", help="""If defined, write outputs to log file for analysis.""" ) parser.add_argument( """--device""", type=int, default=None, help="""The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.""", ) __lowerCamelCase : str = parser.parse_args() main(args)
25
0
import argparse import torch from safetensors.torch import load_file from diffusers import StableDiffusionPipeline def SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : Optional[Any] , snake_case_ : Optional[Any] , snake_case_ : Optional[int] , snake_case_ : int ): snake_case__ : str = StableDiffusionPipeline.from_pretrained(_UpperCamelCase , torch_dtype=torch.floataa ) # load LoRA weight from .safetensors snake_case__ : int = load_file(_UpperCamelCase ) snake_case__ : List[Any] = [] # directly update weight in diffusers model for key in state_dict: # it is suggested to print out the key, it usually will be something like below # "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight" # as we have set the alpha beforehand, so just skip if ".alpha" in key or key in visited: continue if "text" in key: snake_case__ : int = key.split("." )[0].split(LORA_PREFIX_TEXT_ENCODER + "_" )[-1].split("_" ) snake_case__ : Any = pipeline.text_encoder else: snake_case__ : Union[str, Any] = key.split("." )[0].split(LORA_PREFIX_UNET + "_" )[-1].split("_" ) snake_case__ : Optional[Any] = pipeline.unet # find the target layer snake_case__ : int = layer_infos.pop(0 ) while len(_UpperCamelCase ) > -1: try: snake_case__ : List[Any] = curr_layer.__getattr__(_UpperCamelCase ) if len(_UpperCamelCase ) > 0: snake_case__ : Dict = layer_infos.pop(0 ) elif len(_UpperCamelCase ) == 0: break except Exception: if len(_UpperCamelCase ) > 0: temp_name += "_" + layer_infos.pop(0 ) else: snake_case__ : Union[str, Any] = layer_infos.pop(0 ) snake_case__ : str = [] if "lora_down" in key: pair_keys.append(key.replace("lora_down" , "lora_up" ) ) pair_keys.append(_UpperCamelCase ) else: pair_keys.append(_UpperCamelCase ) pair_keys.append(key.replace("lora_up" , "lora_down" ) ) # update weight if len(state_dict[pair_keys[0]].shape ) == 4: snake_case__ : List[Any] = state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa ) snake_case__ : Union[str, Any] = state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa ) curr_layer.weight.data += alpha * torch.mm(_UpperCamelCase , _UpperCamelCase ).unsqueeze(2 ).unsqueeze(3 ) else: snake_case__ : List[str] = state_dict[pair_keys[0]].to(torch.floataa ) snake_case__ : str = state_dict[pair_keys[1]].to(torch.floataa ) curr_layer.weight.data += alpha * torch.mm(_UpperCamelCase , _UpperCamelCase ) # update visited list for item in pair_keys: visited.append(_UpperCamelCase ) return pipeline if __name__ == "__main__": __lowerCamelCase : Optional[Any] = argparse.ArgumentParser() parser.add_argument( """--base_model_path""", default=None, type=str, required=True, help="""Path to the base model in diffusers format.""" ) parser.add_argument( """--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert.""" ) parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""") parser.add_argument( """--lora_prefix_unet""", default="""lora_unet""", type=str, help="""The prefix of UNet weight in safetensors""" ) parser.add_argument( """--lora_prefix_text_encoder""", default="""lora_te""", type=str, help="""The prefix of text encoder weight in safetensors""", ) parser.add_argument("""--alpha""", default=0.75, type=float, help="""The merging ratio in W = W0 + alpha * deltaW""") parser.add_argument( """--to_safetensors""", action="""store_true""", help="""Whether to store pipeline in safetensors format or not.""" ) parser.add_argument("""--device""", type=str, help="""Device to use (e.g. cpu, cuda:0, cuda:1, etc.)""") __lowerCamelCase : int = parser.parse_args() __lowerCamelCase : List[Any] = args.base_model_path __lowerCamelCase : Any = args.checkpoint_path __lowerCamelCase : str = args.dump_path __lowerCamelCase : Optional[Any] = args.lora_prefix_unet __lowerCamelCase : Tuple = args.lora_prefix_text_encoder __lowerCamelCase : Dict = args.alpha __lowerCamelCase : Dict = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha) __lowerCamelCase : str = pipe.to(args.device) pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
715
import copy from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import ClassLabel, Features, Value from .base import TaskTemplate @dataclass(frozen=UpperCamelCase_ ) class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ): """simple docstring""" a_ = field(default="text-classification" , metadata={"include_in_asdict_even_if_is_default": True} ) a_ = Features({"text": Value("string" )} ) a_ = Features({"labels": ClassLabel} ) a_ = "text" a_ = "labels" def _lowercase ( self : Tuple , __A : List[Any] ): if self.label_column not in features: raise ValueError(f'''Column {self.label_column} is not present in features.''' ) if not isinstance(features[self.label_column] , __A ): raise ValueError(f'''Column {self.label_column} is not a ClassLabel.''' ) snake_case__ : Any = copy.deepcopy(self ) snake_case__ : Optional[Any] = self.label_schema.copy() snake_case__ : List[str] = features[self.label_column] snake_case__ : Dict = label_schema return task_template @property def _lowercase ( self : Tuple ): return { self.text_column: "text", self.label_column: "labels", }
25
0
import argparse import json import os from pathlib import Path import requests import torch from transformers import JukeboxConfig, JukeboxModel from transformers.utils import logging logging.set_verbosity_info() __lowerCamelCase : List[Any] = logging.get_logger(__name__) __lowerCamelCase : List[str] = """https://openaipublic.azureedge.net/jukebox/models/""" __lowerCamelCase : Optional[int] = { """jukebox-1b-lyrics""": [ """5b/vqvae.pth.tar""", """5b/prior_level_0.pth.tar""", """5b/prior_level_1.pth.tar""", """1b_lyrics/prior_level_2.pth.tar""", ], """jukebox-5b-lyrics""": [ """5b/vqvae.pth.tar""", """5b/prior_level_0.pth.tar""", """5b/prior_level_1.pth.tar""", """5b_lyrics/prior_level_2.pth.tar""", ], } def SCREAMING_SNAKE_CASE ( snake_case_ : Optional[int] ): if key.endswith(".model.1.bias" ) and len(key.split("." ) ) > 10: snake_case__ : Optional[int] = key.replace(".model.1.bias" , ".conv1d_1.bias" ) elif key.endswith(".model.1.weight" ) and len(key.split("." ) ) > 10: snake_case__ : str = key.replace(".model.1.weight" , ".conv1d_1.weight" ) elif key.endswith(".model.3.bias" ) and len(key.split("." ) ) > 10: snake_case__ : List[Any] = key.replace(".model.3.bias" , ".conv1d_2.bias" ) elif key.endswith(".model.3.weight" ) and len(key.split("." ) ) > 10: snake_case__ : int = key.replace(".model.3.weight" , ".conv1d_2.weight" ) if "conditioner_blocks.0." in key: snake_case__ : List[Any] = key.replace("conditioner_blocks.0" , "conditioner_blocks" ) if "prime_prior" in key: snake_case__ : Dict = key.replace("prime_prior" , "encoder" ) if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key: snake_case__ : Any = key.replace(".emb." , "." ) if key.endswith("k" ): # replace vqvae.X.k with vqvae.X.codebook return key.replace(".k" , ".codebook" ) if "y_emb." in key: return key.replace("y_emb." , "metadata_embedding." ) if "x_emb.emb." in key: snake_case__ : Union[str, Any] = key.replace("0.x_emb.emb" , "embed_tokens" ) if "prime_state_ln" in key: return key.replace("prime_state_ln" , "encoder.final_layer_norm" ) if ".ln" in key: return key.replace(".ln" , ".layer_norm" ) if "_ln" in key: return key.replace("_ln" , "_layer_norm" ) if "prime_state_proj" in key: return key.replace("prime_state_proj" , "encoder.proj_in" ) if "prime_x_out" in key: return key.replace("prime_x_out" , "encoder.lm_head" ) if "prior.x_out" in key: return key.replace("x_out" , "fc_proj_out" ) if "x_emb" in key: return key.replace("x_emb" , "embed_tokens" ) return key def SCREAMING_SNAKE_CASE ( snake_case_ : List[str] , snake_case_ : str , snake_case_ : Tuple , snake_case_ : Tuple ): snake_case__ : Any = {} import re snake_case__ : Tuple = re.compile(R"encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)" ) snake_case__ : Tuple = re.compile( R"encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" ) snake_case__ : Optional[int] = re.compile(R"encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)" ) snake_case__ : Dict = re.compile(R"decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)" ) snake_case__ : Dict = re.compile( R"decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" ) snake_case__ : str = re.compile(R"decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)" ) snake_case__ : Optional[int] = re.compile(R"conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)" ) snake_case__ : List[Any] = re.compile( R"conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" ) snake_case__ : str = re.compile(R"conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)" ) for original_key, value in state_dict.items(): # rename vqvae.encoder keys if re_encoder_block_conv_in.fullmatch(snake_case_ ): snake_case__ : Tuple = re_encoder_block_conv_in.match(snake_case_ ) snake_case__ : Tuple = regex_match.groups() snake_case__ : List[str] = int(groups[2] ) * 2 + int(groups[3] ) snake_case__ : Any = F'''encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}''' snake_case__ : Tuple = re_encoder_block_conv_in.sub(snake_case_ , snake_case_ ) elif re_encoder_block_resnet.fullmatch(snake_case_ ): snake_case__ : Any = re_encoder_block_resnet.match(snake_case_ ) snake_case__ : Optional[Any] = regex_match.groups() snake_case__ : List[str] = int(groups[2] ) * 2 + int(groups[3] ) snake_case__ : Optional[int] = {"1": 1, "3": 2}[groups[-2]] snake_case__ : Tuple = F'''encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.''' snake_case__ : int = F'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}''' snake_case__ : Any = prefix + resnet_block snake_case__ : int = re_encoder_block_resnet.sub(snake_case_ , snake_case_ ) elif re_encoder_block_proj_out.fullmatch(snake_case_ ): snake_case__ : Optional[int] = re_encoder_block_proj_out.match(snake_case_ ) snake_case__ : Union[str, Any] = regex_match.groups() snake_case__ : Dict = F'''encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}''' snake_case__ : str = re_encoder_block_proj_out.sub(snake_case_ , snake_case_ ) # rename vqvae.decoder keys elif re_decoder_block_conv_out.fullmatch(snake_case_ ): snake_case__ : Optional[int] = re_decoder_block_conv_out.match(snake_case_ ) snake_case__ : int = regex_match.groups() snake_case__ : List[Any] = int(groups[2] ) * 2 + int(groups[3] ) - 2 snake_case__ : Dict = F'''decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}''' snake_case__ : Optional[int] = re_decoder_block_conv_out.sub(snake_case_ , snake_case_ ) elif re_decoder_block_resnet.fullmatch(snake_case_ ): snake_case__ : List[Any] = re_decoder_block_resnet.match(snake_case_ ) snake_case__ : Any = regex_match.groups() snake_case__ : int = int(groups[2] ) * 2 + int(groups[3] ) - 2 snake_case__ : int = {"1": 1, "3": 2}[groups[-2]] snake_case__ : Optional[int] = F'''decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.''' snake_case__ : Optional[int] = F'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}''' snake_case__ : Optional[Any] = prefix + resnet_block snake_case__ : Dict = re_decoder_block_resnet.sub(snake_case_ , snake_case_ ) elif re_decoder_block_proj_in.fullmatch(snake_case_ ): snake_case__ : Optional[Any] = re_decoder_block_proj_in.match(snake_case_ ) snake_case__ : List[Any] = regex_match.groups() snake_case__ : str = F'''decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}''' snake_case__ : List[Any] = re_decoder_block_proj_in.sub(snake_case_ , snake_case_ ) # rename prior cond.model to upsampler.upsample_block and resnet elif re_prior_cond_conv_out.fullmatch(snake_case_ ): snake_case__ : Optional[int] = re_prior_cond_conv_out.match(snake_case_ ) snake_case__ : int = regex_match.groups() snake_case__ : str = int(groups[1] ) * 2 + int(groups[2] ) - 2 snake_case__ : Optional[int] = F'''conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}''' snake_case__ : List[Any] = re_prior_cond_conv_out.sub(snake_case_ , snake_case_ ) elif re_prior_cond_resnet.fullmatch(snake_case_ ): snake_case__ : Union[str, Any] = re_prior_cond_resnet.match(snake_case_ ) snake_case__ : int = regex_match.groups() snake_case__ : Tuple = int(groups[1] ) * 2 + int(groups[2] ) - 2 snake_case__ : str = {"1": 1, "3": 2}[groups[-2]] snake_case__ : int = F'''conditioner_blocks.upsampler.upsample_block.{block_index}.''' snake_case__ : Dict = F'''resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}''' snake_case__ : Dict = prefix + resnet_block snake_case__ : Optional[int] = re_prior_cond_resnet.sub(snake_case_ , snake_case_ ) elif re_prior_cond_proj_in.fullmatch(snake_case_ ): snake_case__ : int = re_prior_cond_proj_in.match(snake_case_ ) snake_case__ : Dict = regex_match.groups() snake_case__ : int = F'''conditioner_blocks.upsampler.proj_in.{groups[-1]}''' snake_case__ : str = re_prior_cond_proj_in.sub(snake_case_ , snake_case_ ) # keep original key else: snake_case__ : List[str] = original_key snake_case__ : Any = replace_key(snake_case_ ) if F'''{key_prefix}.{key}''' not in model_state_dict or key is None: print(F'''failed converting {original_key} to {key}, does not match''' ) # handle missmatched shape elif value.shape != model_state_dict[F'''{key_prefix}.{key}'''].shape: snake_case__ : Dict = model_state_dict[F'''{key_prefix}.{key}'''] print(F'''{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match''' ) snake_case__ : int = original_key snake_case__ : List[str] = original_key snake_case__ : Tuple = value return new_dict @torch.no_grad() def SCREAMING_SNAKE_CASE ( snake_case_ : Optional[Any]=None , snake_case_ : Any=None ): for file in MODEL_MAPPING[model_name]: if not os.path.isfile(F'''{pytorch_dump_folder_path}/{file.split('/' )[-1]}''' ): snake_case__ : Tuple = requests.get(F'''{PREFIX}{file}''' , allow_redirects=snake_case_ ) os.makedirs(F'''{pytorch_dump_folder_path}/''' , exist_ok=snake_case_ ) open(F'''{pytorch_dump_folder_path}/{file.split('/' )[-1]}''' , "wb" ).write(r.content ) snake_case__ : Union[str, Any] = MODEL_MAPPING[model_name.split("/" )[-1]] snake_case__ : List[Any] = JukeboxConfig.from_pretrained(snake_case_ ) snake_case__ : Tuple = JukeboxModel(snake_case_ ) snake_case__ : Any = [] snake_case__ : Union[str, Any] = {} for i, dict_name in enumerate(snake_case_ ): snake_case__ : Tuple = torch.load(F'''{pytorch_dump_folder_path}/{dict_name.split('/' )[-1]}''' )["model"] snake_case__ : Union[str, Any] = {} for k in old_dic.keys(): if k.endswith(".b" ): snake_case__ : Union[str, Any] = old_dic[k] elif k.endswith(".w" ): snake_case__ : Dict = old_dic[k] elif "level_2" not in dict_name and "cond.model." in k: snake_case__ : str = old_dic[k] else: snake_case__ : List[Any] = old_dic[k] snake_case__ : Optional[int] = "vqvae" if i == 0 else F'''priors.{3 - i}''' snake_case__ : List[Any] = fix_jukebox_keys(snake_case_ , model.state_dict() , snake_case_ , snake_case_ ) weight_dict.append(snake_case_ ) snake_case__ : Optional[int] = weight_dict.pop(0 ) model.vqvae.load_state_dict(snake_case_ ) for i in range(len(snake_case_ ) ): model.priors[i].load_state_dict(weight_dict[2 - i] ) Path(snake_case_ ).mkdir(exist_ok=snake_case_ ) with open(F'''{pytorch_dump_folder_path}/mapping.json''' , "w" ) as txtfile: json.dump(snake_case_ , snake_case_ ) print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(snake_case_ ) return weight_dict if __name__ == "__main__": __lowerCamelCase : Tuple = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""jukebox-5b-lyrics""", type=str, help="""Name of the model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default="""jukebox-5b-lyrics-converted""", type=str, help="""Path to the output PyTorch model directory.""", ) __lowerCamelCase : Any = parser.parse_args() convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
716
import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES from ...utils import logging from ..auto import CONFIG_MAPPING __lowerCamelCase : Union[str, Any] = logging.get_logger(__name__) __lowerCamelCase : Dict = { """Salesforce/instruct-blip-flan-t5""": """https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json""", } class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ): """simple docstring""" a_ = "instructblip_vision_model" def __init__( self : List[Any] , __A : Dict=1_4_0_8 , __A : Tuple=6_1_4_4 , __A : str=3_9 , __A : int=1_6 , __A : str=2_2_4 , __A : Any=1_4 , __A : Dict="gelu" , __A : List[Any]=1e-6 , __A : Any=0.0 , __A : List[Any]=1e-1_0 , __A : Union[str, Any]=True , **__A : Tuple , ): super().__init__(**__A ) snake_case__ : List[str] = hidden_size snake_case__ : Optional[int] = intermediate_size snake_case__ : List[str] = num_hidden_layers snake_case__ : List[Any] = num_attention_heads snake_case__ : str = patch_size snake_case__ : int = image_size snake_case__ : int = initializer_range snake_case__ : Optional[int] = attention_dropout snake_case__ : str = layer_norm_eps snake_case__ : Optional[Any] = hidden_act snake_case__ : Tuple = qkv_bias @classmethod def _lowercase ( cls : List[str] , __A : Union[str, os.PathLike] , **__A : Optional[Any] ): cls._set_token_in_kwargs(__A ) snake_case__, snake_case__ : str = cls.get_config_dict(__A , **__A ) # get the vision config dict if we are loading from InstructBlipConfig if config_dict.get("model_type" ) == "instructblip": snake_case__ : Union[str, Any] = config_dict["vision_config"] if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type: logger.warning( f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type ''' f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(__A , **__A ) class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ): """simple docstring""" a_ = "instructblip_qformer" def __init__( self : Any , __A : Union[str, Any]=3_0_5_2_2 , __A : Union[str, Any]=7_6_8 , __A : Optional[int]=1_2 , __A : Dict=1_2 , __A : Dict=3_0_7_2 , __A : List[str]="gelu" , __A : Union[str, Any]=0.1 , __A : Tuple=0.1 , __A : Any=5_1_2 , __A : Optional[int]=0.0_2 , __A : List[str]=1e-1_2 , __A : Any=0 , __A : Optional[Any]="absolute" , __A : str=2 , __A : Any=1_4_0_8 , **__A : List[str] , ): super().__init__(pad_token_id=__A , **__A ) snake_case__ : Dict = vocab_size snake_case__ : Optional[int] = hidden_size snake_case__ : Optional[Any] = num_hidden_layers snake_case__ : str = num_attention_heads snake_case__ : int = hidden_act snake_case__ : Optional[Any] = intermediate_size snake_case__ : Union[str, Any] = hidden_dropout_prob snake_case__ : List[Any] = attention_probs_dropout_prob snake_case__ : List[Any] = max_position_embeddings snake_case__ : int = initializer_range snake_case__ : Dict = layer_norm_eps snake_case__ : str = position_embedding_type snake_case__ : Dict = cross_attention_frequency snake_case__ : List[str] = encoder_hidden_size @classmethod def _lowercase ( cls : List[Any] , __A : Union[str, os.PathLike] , **__A : Optional[int] ): cls._set_token_in_kwargs(__A ) snake_case__, snake_case__ : Tuple = cls.get_config_dict(__A , **__A ) # get the qformer config dict if we are loading from InstructBlipConfig if config_dict.get("model_type" ) == "instructblip": snake_case__ : List[Any] = config_dict["qformer_config"] if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type: logger.warning( f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type ''' f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(__A , **__A ) class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ): """simple docstring""" a_ = "instructblip" a_ = True def __init__( self : List[str] , __A : Optional[Any]=None , __A : Tuple=None , __A : Optional[int]=None , __A : Optional[Any]=3_2 , **__A : Optional[int] ): super().__init__(**__A ) if vision_config is None: snake_case__ : Any = {} logger.info("vision_config is None. initializing the InstructBlipVisionConfig with default values." ) if qformer_config is None: snake_case__ : Optional[Any] = {} logger.info("qformer_config is None. Initializing the InstructBlipQFormerConfig with default values." ) if text_config is None: snake_case__ : Optional[int] = {} logger.info("text_config is None. Initializing the text config with default values (`OPTConfig`)." ) snake_case__ : List[Any] = InstructBlipVisionConfig(**__A ) snake_case__ : Union[str, Any] = InstructBlipQFormerConfig(**__A ) snake_case__ : Dict = text_config["model_type"] if "model_type" in text_config else "opt" snake_case__ : List[Any] = CONFIG_MAPPING[text_model_type](**__A ) snake_case__ : Union[str, Any] = self.text_config.tie_word_embeddings snake_case__ : Tuple = self.text_config.is_encoder_decoder snake_case__ : str = num_query_tokens snake_case__ : Dict = self.vision_config.hidden_size snake_case__ : List[Any] = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES snake_case__ : int = 1.0 snake_case__ : Optional[int] = 0.0_2 @classmethod def _lowercase ( cls : List[str] , __A : InstructBlipVisionConfig , __A : InstructBlipQFormerConfig , __A : PretrainedConfig , **__A : int , ): return cls( vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **__A , ) def _lowercase ( self : Optional[int] ): snake_case__ : Any = copy.deepcopy(self.__dict__ ) snake_case__ : Optional[Any] = self.vision_config.to_dict() snake_case__ : List[str] = self.qformer_config.to_dict() snake_case__ : List[Any] = self.text_config.to_dict() snake_case__ : List[Any] = self.__class__.model_type return output
25
0
import logging import os from .state import PartialState class SCREAMING_SNAKE_CASE__ ( logging.LoggerAdapter ): """simple docstring""" @staticmethod def _lowercase ( __A : int ): snake_case__ : Any = PartialState() return not main_process_only or (main_process_only and state.is_main_process) def _lowercase ( self : Optional[Any] , __A : Dict , __A : Union[str, Any] , *__A : List[str] , **__A : Dict ): if PartialState._shared_state == {}: raise RuntimeError( "You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility." ) snake_case__ : List[Any] = kwargs.pop("main_process_only" , lowerCamelCase__ ) snake_case__ : Union[str, Any] = kwargs.pop("in_order" , lowerCamelCase__ ) if self.isEnabledFor(lowerCamelCase__ ): if self._should_log(lowerCamelCase__ ): snake_case__ : List[str] = self.process(lowerCamelCase__ , lowerCamelCase__ ) self.logger.log(lowerCamelCase__ , lowerCamelCase__ , *lowerCamelCase__ , **lowerCamelCase__ ) elif in_order: snake_case__ : int = PartialState() for i in range(state.num_processes ): if i == state.process_index: snake_case__ : int = self.process(lowerCamelCase__ , lowerCamelCase__ ) self.logger.log(lowerCamelCase__ , lowerCamelCase__ , *lowerCamelCase__ , **lowerCamelCase__ ) state.wait_for_everyone() def SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : str = None ): if log_level is None: snake_case__ : Tuple = os.environ.get("ACCELERATE_LOG_LEVEL" , snake_case_ ) snake_case__ : Union[str, Any] = logging.getLogger(snake_case_ ) if log_level is not None: logger.setLevel(log_level.upper() ) logger.root.setLevel(log_level.upper() ) return MultiProcessAdapter(snake_case_ , {} )
717
def SCREAMING_SNAKE_CASE ( snake_case_ : list ): if len(snake_case_ ) <= 1: return lst snake_case__ : List[Any] = 1 while i < len(snake_case_ ): if lst[i - 1] <= lst[i]: i += 1 else: snake_case__, snake_case__ : Tuple = lst[i], lst[i - 1] i -= 1 if i == 0: snake_case__ : Union[str, Any] = 1 return lst if __name__ == "__main__": __lowerCamelCase : Dict = input("""Enter numbers separated by a comma:\n""").strip() __lowerCamelCase : Tuple = [int(item) for item in user_input.split(""",""")] print(gnome_sort(unsorted))
25
0
def SCREAMING_SNAKE_CASE ( snake_case_ : int ): if num < 0: return False snake_case__ : int = num snake_case__ : int = 0 while num > 0: snake_case__ : Optional[int] = rev_num * 10 + (num % 10) num //= 10 return num_copy == rev_num if __name__ == "__main__": import doctest doctest.testmod()
718
from __future__ import annotations import time __lowerCamelCase : str = list[tuple[int, int]] __lowerCamelCase : Optional[int] = [ [0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles [0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0], [1, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0], ] __lowerCamelCase : Tuple = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right class SCREAMING_SNAKE_CASE__ : """simple docstring""" def __init__( self : Union[str, Any] , __A : int , __A : int , __A : int , __A : int , __A : Node | None ): snake_case__ : Optional[int] = pos_x snake_case__ : Dict = pos_y snake_case__ : int = (pos_y, pos_x) snake_case__ : Optional[int] = goal_x snake_case__ : Tuple = goal_y snake_case__ : str = parent class SCREAMING_SNAKE_CASE__ : """simple docstring""" def __init__( self : List[Any] , __A : tuple[int, int] , __A : tuple[int, int] ): snake_case__ : Tuple = Node(start[1] , start[0] , goal[1] , goal[0] , __A ) snake_case__ : Tuple = Node(goal[1] , goal[0] , goal[1] , goal[0] , __A ) snake_case__ : int = [self.start] snake_case__ : Union[str, Any] = False def _lowercase ( self : Dict ): while self.node_queue: snake_case__ : Optional[Any] = self.node_queue.pop(0 ) if current_node.pos == self.target.pos: snake_case__ : Optional[Any] = True return self.retrace_path(__A ) snake_case__ : int = self.get_successors(__A ) for node in successors: self.node_queue.append(__A ) if not self.reached: return [self.start.pos] return None def _lowercase ( self : Union[str, Any] , __A : Node ): snake_case__ : str = [] for action in delta: snake_case__ : str = parent.pos_x + action[1] snake_case__ : Union[str, Any] = parent.pos_y + action[0] if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(__A ) - 1): continue if grid[pos_y][pos_x] != 0: continue successors.append( Node(__A , __A , self.target.pos_y , self.target.pos_x , __A ) ) return successors def _lowercase ( self : Optional[Any] , __A : Node | None ): snake_case__ : Tuple = node snake_case__ : Any = [] while current_node is not None: path.append((current_node.pos_y, current_node.pos_x) ) snake_case__ : Tuple = current_node.parent path.reverse() return path class SCREAMING_SNAKE_CASE__ : """simple docstring""" def __init__( self : Dict , __A : str , __A : int ): snake_case__ : str = BreadthFirstSearch(__A , __A ) snake_case__ : int = BreadthFirstSearch(__A , __A ) snake_case__ : Tuple = False def _lowercase ( self : Optional[Any] ): while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue: snake_case__ : Any = self.fwd_bfs.node_queue.pop(0 ) snake_case__ : List[str] = self.bwd_bfs.node_queue.pop(0 ) if current_bwd_node.pos == current_fwd_node.pos: snake_case__ : List[str] = True return self.retrace_bidirectional_path( __A , __A ) snake_case__ : Union[str, Any] = current_bwd_node snake_case__ : Dict = current_fwd_node snake_case__ : List[Any] = { self.fwd_bfs: self.fwd_bfs.get_successors(__A ), self.bwd_bfs: self.bwd_bfs.get_successors(__A ), } for bfs in [self.fwd_bfs, self.bwd_bfs]: for node in successors[bfs]: bfs.node_queue.append(__A ) if not self.reached: return [self.fwd_bfs.start.pos] return None def _lowercase ( self : Any , __A : Node , __A : Node ): snake_case__ : List[str] = self.fwd_bfs.retrace_path(__A ) snake_case__ : Optional[Any] = self.bwd_bfs.retrace_path(__A ) bwd_path.pop() bwd_path.reverse() snake_case__ : List[Any] = fwd_path + bwd_path return path if __name__ == "__main__": # all coordinates are given in format [y,x] import doctest doctest.testmod() __lowerCamelCase : str = (0, 0) __lowerCamelCase : List[str] = (len(grid) - 1, len(grid[0]) - 1) for elem in grid: print(elem) __lowerCamelCase : Any = time.time() __lowerCamelCase : Optional[Any] = BreadthFirstSearch(init, goal) __lowerCamelCase : str = bfs.search() __lowerCamelCase : Optional[Any] = time.time() - start_bfs_time print("""Unidirectional BFS computation time : """, bfs_time) __lowerCamelCase : Optional[Any] = time.time() __lowerCamelCase : Optional[int] = BidirectionalBreadthFirstSearch(init, goal) __lowerCamelCase : str = bd_bfs.search() __lowerCamelCase : Optional[Any] = time.time() - start_bd_bfs_time print("""Bidirectional BFS computation time : """, bd_bfs_time)
25
0
import argparse import os import torch from transformers.utils import WEIGHTS_NAME __lowerCamelCase : Union[str, Any] = ["""small""", """medium""", """large"""] __lowerCamelCase : List[Any] = """lm_head.decoder.weight""" __lowerCamelCase : Union[str, Any] = """lm_head.weight""" def SCREAMING_SNAKE_CASE ( snake_case_ : Any , snake_case_ : Tuple ): snake_case__ : Tuple = torch.load(UpperCamelCase__ ) snake_case__ : Optional[Any] = d.pop(UpperCamelCase__ ) os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ ) torch.save(UpperCamelCase__ , os.path.join(UpperCamelCase__ , UpperCamelCase__ ) ) if __name__ == "__main__": __lowerCamelCase : Tuple = argparse.ArgumentParser() parser.add_argument("""--dialogpt_path""", default=""".""", type=str) __lowerCamelCase : Dict = parser.parse_args() for MODEL in DIALOGPT_MODELS: __lowerCamelCase : Optional[Any] = os.path.join(args.dialogpt_path, f"{MODEL}_ft.pkl") __lowerCamelCase : Dict = f"./DialoGPT-{MODEL}" convert_dialogpt_checkpoint( checkpoint_path, pytorch_dump_folder_path, )
719
import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ConditionalDetrImageProcessor class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): """simple docstring""" def __init__( self : List[Any] , __A : Dict , __A : int=7 , __A : Optional[Any]=3 , __A : List[str]=3_0 , __A : List[Any]=4_0_0 , __A : Union[str, Any]=True , __A : List[Any]=None , __A : Optional[Any]=True , __A : Tuple=[0.5, 0.5, 0.5] , __A : Union[str, Any]=[0.5, 0.5, 0.5] , __A : List[str]=True , __A : Any=1 / 2_5_5 , __A : Optional[int]=True , ): # by setting size["longest_edge"] > max_resolution we're effectively not testing this :p snake_case__ : List[str] = size if size is not None else {"shortest_edge": 1_8, "longest_edge": 1_3_3_3} snake_case__ : Dict = parent snake_case__ : Optional[int] = batch_size snake_case__ : Union[str, Any] = num_channels snake_case__ : str = min_resolution snake_case__ : Tuple = max_resolution snake_case__ : List[Any] = do_resize snake_case__ : Dict = size snake_case__ : List[str] = do_normalize snake_case__ : Optional[int] = image_mean snake_case__ : Optional[int] = image_std snake_case__ : Any = do_rescale snake_case__ : Optional[int] = rescale_factor snake_case__ : int = do_pad def _lowercase ( self : Dict ): return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def _lowercase ( self : Optional[int] , __A : Dict , __A : List[Any]=False ): if not batched: snake_case__ : List[str] = image_inputs[0] if isinstance(__A , Image.Image ): snake_case__, snake_case__ : Tuple = image.size else: snake_case__, snake_case__ : List[str] = image.shape[1], image.shape[2] if w < h: snake_case__ : Dict = int(self.size["shortest_edge"] * h / w ) snake_case__ : Optional[int] = self.size["shortest_edge"] elif w > h: snake_case__ : List[Any] = self.size["shortest_edge"] snake_case__ : Union[str, Any] = int(self.size["shortest_edge"] * w / h ) else: snake_case__ : Dict = self.size["shortest_edge"] snake_case__ : Dict = self.size["shortest_edge"] else: snake_case__ : str = [] for image in image_inputs: snake_case__, snake_case__ : str = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) snake_case__ : Dict = max(__A , key=lambda __A : item[0] )[0] snake_case__ : Tuple = max(__A , key=lambda __A : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ , unittest.TestCase ): """simple docstring""" a_ = ConditionalDetrImageProcessor if is_vision_available() else None def _lowercase ( self : int ): snake_case__ : Tuple = ConditionalDetrImageProcessingTester(self ) @property def _lowercase ( self : Any ): return self.image_processor_tester.prepare_image_processor_dict() def _lowercase ( self : Any ): snake_case__ : Dict = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__A , "image_mean" ) ) self.assertTrue(hasattr(__A , "image_std" ) ) self.assertTrue(hasattr(__A , "do_normalize" ) ) self.assertTrue(hasattr(__A , "do_resize" ) ) self.assertTrue(hasattr(__A , "size" ) ) def _lowercase ( self : List[str] ): snake_case__ : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"shortest_edge": 1_8, "longest_edge": 1_3_3_3} ) self.assertEqual(image_processor.do_pad , __A ) snake_case__ : Any = self.image_processing_class.from_dict( self.image_processor_dict , size=4_2 , max_size=8_4 , pad_and_return_pixel_mask=__A ) self.assertEqual(image_processor.size , {"shortest_edge": 4_2, "longest_edge": 8_4} ) self.assertEqual(image_processor.do_pad , __A ) def _lowercase ( self : Union[str, Any] ): pass def _lowercase ( self : List[str] ): # Initialize image_processing snake_case__ : Dict = self.image_processing_class(**self.image_processor_dict ) # create random PIL images snake_case__ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A ) for image in image_inputs: self.assertIsInstance(__A , Image.Image ) # Test not batched input snake_case__ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values snake_case__, snake_case__ : Union[str, Any] = self.image_processor_tester.get_expected_values(__A ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched snake_case__, snake_case__ : Tuple = self.image_processor_tester.get_expected_values(__A , batched=__A ) snake_case__ : int = image_processing(__A , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def _lowercase ( self : Tuple ): # Initialize image_processing snake_case__ : List[str] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors snake_case__ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , numpify=__A ) for image in image_inputs: self.assertIsInstance(__A , np.ndarray ) # Test not batched input snake_case__ : int = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values snake_case__, snake_case__ : Dict = self.image_processor_tester.get_expected_values(__A ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched snake_case__ : Optional[Any] = image_processing(__A , return_tensors="pt" ).pixel_values snake_case__, snake_case__ : str = self.image_processor_tester.get_expected_values(__A , batched=__A ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def _lowercase ( self : Tuple ): # Initialize image_processing snake_case__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors snake_case__ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , torchify=__A ) for image in image_inputs: self.assertIsInstance(__A , torch.Tensor ) # Test not batched input snake_case__ : Tuple = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values snake_case__, snake_case__ : Optional[int] = self.image_processor_tester.get_expected_values(__A ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched snake_case__ : Dict = image_processing(__A , return_tensors="pt" ).pixel_values snake_case__, snake_case__ : int = self.image_processor_tester.get_expected_values(__A , batched=__A ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) @slow def _lowercase ( self : List[Any] ): # prepare image and target snake_case__ : Union[str, Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f: snake_case__ : Union[str, Any] = json.loads(f.read() ) snake_case__ : Optional[Any] = {"image_id": 3_9_7_6_9, "annotations": target} # encode them snake_case__ : Tuple = ConditionalDetrImageProcessor.from_pretrained("microsoft/conditional-detr-resnet-50" ) snake_case__ : int = image_processing(images=__A , annotations=__A , return_tensors="pt" ) # verify pixel values snake_case__ : str = torch.Size([1, 3, 8_0_0, 1_0_6_6] ) self.assertEqual(encoding["pixel_values"].shape , __A ) snake_case__ : Tuple = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] ) self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __A , atol=1e-4 ) ) # verify area snake_case__ : Optional[int] = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] ) self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __A ) ) # verify boxes snake_case__ : Tuple = torch.Size([6, 4] ) self.assertEqual(encoding["labels"][0]["boxes"].shape , __A ) snake_case__ : List[Any] = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] ) self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __A , atol=1e-3 ) ) # verify image_id snake_case__ : str = torch.tensor([3_9_7_6_9] ) self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __A ) ) # verify is_crowd snake_case__ : List[Any] = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __A ) ) # verify class_labels snake_case__ : Optional[int] = torch.tensor([7_5, 7_5, 6_3, 6_5, 1_7, 1_7] ) self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __A ) ) # verify orig_size snake_case__ : Dict = torch.tensor([4_8_0, 6_4_0] ) self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __A ) ) # verify size snake_case__ : List[str] = torch.tensor([8_0_0, 1_0_6_6] ) self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __A ) ) @slow def _lowercase ( self : str ): # prepare image, target and masks_path snake_case__ : Optional[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f: snake_case__ : int = json.loads(f.read() ) snake_case__ : Optional[int] = {"file_name": "000000039769.png", "image_id": 3_9_7_6_9, "segments_info": target} snake_case__ : Optional[Any] = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" ) # encode them snake_case__ : Optional[int] = ConditionalDetrImageProcessor(format="coco_panoptic" ) snake_case__ : Tuple = image_processing(images=__A , annotations=__A , masks_path=__A , return_tensors="pt" ) # verify pixel values snake_case__ : Optional[Any] = torch.Size([1, 3, 8_0_0, 1_0_6_6] ) self.assertEqual(encoding["pixel_values"].shape , __A ) snake_case__ : List[str] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] ) self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __A , atol=1e-4 ) ) # verify area snake_case__ : Tuple = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] ) self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __A ) ) # verify boxes snake_case__ : Dict = torch.Size([6, 4] ) self.assertEqual(encoding["labels"][0]["boxes"].shape , __A ) snake_case__ : int = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] ) self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __A , atol=1e-3 ) ) # verify image_id snake_case__ : str = torch.tensor([3_9_7_6_9] ) self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __A ) ) # verify is_crowd snake_case__ : Tuple = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __A ) ) # verify class_labels snake_case__ : Optional[Any] = torch.tensor([1_7, 1_7, 6_3, 7_5, 7_5, 9_3] ) self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __A ) ) # verify masks snake_case__ : str = 8_2_2_8_7_3 self.assertEqual(encoding["labels"][0]["masks"].sum().item() , __A ) # verify orig_size snake_case__ : int = torch.tensor([4_8_0, 6_4_0] ) self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __A ) ) # verify size snake_case__ : List[Any] = torch.tensor([8_0_0, 1_0_6_6] ) self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __A ) )
25
0
import datasets from .evaluate import evaluate __lowerCamelCase : Union[str, Any] = """\\n@inproceedings{Rajpurkar2016SQuAD10,\n title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text},\n author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang},\n booktitle={EMNLP},\n year={2016}\n}\n""" __lowerCamelCase : List[str] = """\nThis metric wrap the official scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD).\n\nStanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by\ncrowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span,\nfrom the corresponding reading passage, or the question might be unanswerable.\n""" __lowerCamelCase : Tuple = """\nComputes SQuAD scores (F1 and EM).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair as given in the references (see below)\n - \'prediction_text\': the text of the answer\n references: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair (see above),\n - \'answers\': a Dict in the SQuAD dataset format\n {\n \'text\': list of possible texts for the answer, as a list of strings\n \'answer_start\': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n \'exact_match\': Exact match (the normalized answer exactly match the gold answer)\n \'f1\': The F-score of predicted tokens versus the gold answer\nExamples:\n\n >>> predictions = [{\'prediction_text\': \'1976\', \'id\': \'56e10a3be3433e1400422b22\'}]\n >>> references = [{\'answers\': {\'answer_start\': [97], \'text\': [\'1976\']}, \'id\': \'56e10a3be3433e1400422b22\'}]\n >>> squad_metric = datasets.load_metric(\"squad\")\n >>> results = squad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 100.0, \'f1\': 100.0}\n""" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class SCREAMING_SNAKE_CASE__ ( datasets.Metric ): """simple docstring""" def _lowercase ( self : Union[str, Any] ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": {"id": datasets.Value("string" ), "prediction_text": datasets.Value("string" )}, "references": { "id": datasets.Value("string" ), "answers": datasets.features.Sequence( { "text": datasets.Value("string" ), "answer_start": datasets.Value("int32" ), } ), }, } ) , codebase_urls=["https://rajpurkar.github.io/SQuAD-explorer/"] , reference_urls=["https://rajpurkar.github.io/SQuAD-explorer/"] , ) def _lowercase ( self : List[str] , __A : List[Any] , __A : Dict ): snake_case__ : int = {prediction["id"]: prediction["prediction_text"] for prediction in predictions} snake_case__ : Dict = [ { "paragraphs": [ { "qas": [ { "answers": [{"text": answer_text} for answer_text in ref["answers"]["text"]], "id": ref["id"], } for ref in references ] } ] } ] snake_case__ : List[str] = evaluate(dataset=__A , predictions=__A ) return score
720
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on import numpy # noqa: F401 # Here to have a nice missing dependency error message early on import requests # noqa: F401 # Here to have a nice missing dependency error message early on import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on from mauve import compute_mauve # From: mauve-text import datasets __lowerCamelCase : Optional[int] = """\ @inproceedings{pillutla-etal:mauve:neurips2021, title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers}, author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid}, booktitle = {NeurIPS}, year = {2021} } """ __lowerCamelCase : str = """\ MAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure. MAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences. For details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021). This metrics is a wrapper around the official implementation of MAUVE: https://github.com/krishnap25/mauve """ __lowerCamelCase : str = """ Calculates MAUVE scores between two lists of generated text and reference text. Args: predictions: list of generated text to score. Each predictions should be a string with tokens separated by spaces. references: list of reference for each prediction. Each reference should be a string with tokens separated by spaces. Optional Args: num_buckets: the size of the histogram to quantize P and Q. Options: 'auto' (default) or an integer pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1 kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9 kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5 kmeans_max_iter: maximum number of k-means iterations. Default 500 featurize_model_name: name of the model from which features are obtained. Default 'gpt2-large' Use one of ['gpt2', 'gpt2-medium', 'gpt2-large', 'gpt2-xl']. device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU max_text_length: maximum number of tokens to consider. Default 1024 divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25 mauve_scaling_factor: \"c\" from the paper. Default 5. verbose: If True (default), print running time updates seed: random seed to initialize k-means cluster assignments. Returns: mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer, frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer, divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve, p_hist: a discrete distribution, which is a quantized version of the text distribution p_text, q_hist: same as above, but with q_text. Examples: >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest >>> import datasets >>> mauve = datasets.load_metric('mauve') >>> predictions = [\"hello there\", \"general kenobi\"] >>> references = [\"hello there\", \"general kenobi\"] >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP >>> print(out.mauve) # doctest: +SKIP 1.0 """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class SCREAMING_SNAKE_CASE__ ( datasets.Metric ): """simple docstring""" def _lowercase ( self : Dict ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , homepage="https://github.com/krishnap25/mauve" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("string" , id="sequence" ), "references": datasets.Value("string" , id="sequence" ), } ) , codebase_urls=["https://github.com/krishnap25/mauve"] , reference_urls=[ "https://arxiv.org/abs/2102.01454", "https://github.com/krishnap25/mauve", ] , ) def _lowercase ( self : Union[str, Any] , __A : Dict , __A : List[str] , __A : int=None , __A : List[Any]=None , __A : Optional[int]=None , __A : List[Any]=None , __A : Union[str, Any]="auto" , __A : Optional[Any]=-1 , __A : Optional[Any]=0.9 , __A : Any=5 , __A : List[Any]=5_0_0 , __A : Tuple="gpt2-large" , __A : Optional[Any]=-1 , __A : str=1_0_2_4 , __A : Tuple=2_5 , __A : str=5 , __A : Optional[int]=True , __A : Any=2_5 , ): snake_case__ : List[Any] = compute_mauve( p_text=__A , q_text=__A , p_features=__A , q_features=__A , p_tokens=__A , q_tokens=__A , num_buckets=__A , pca_max_data=__A , kmeans_explained_var=__A , kmeans_num_redo=__A , kmeans_max_iter=__A , featurize_model_name=__A , device_id=__A , max_text_length=__A , divergence_curve_discretization_size=__A , mauve_scaling_factor=__A , verbose=__A , seed=__A , ) return out
25
0
def SCREAMING_SNAKE_CASE ( ): snake_case__ : Dict = 0 for i in range(1 , 1001 ): total += i**i return str(snake_case_ )[-10:] if __name__ == "__main__": print(solution())
721
# Lint as: python3 # pylint: enable=line-too-long # pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position __lowerCamelCase : Union[str, Any] = """2.13.1""" import platform import pyarrow from packaging import version if version.parse(platform.python_version()) < version.parse("""3.7"""): raise ImportWarning( """To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition.""" ) if version.parse(pyarrow.__version__).major < 8: raise ImportWarning( """To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n""" """If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`.""" ) del platform del pyarrow del version from .arrow_dataset import Dataset from .arrow_reader import ReadInstruction from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder from .combine import concatenate_datasets, interleave_datasets from .dataset_dict import DatasetDict, IterableDatasetDict from .download import * from .features import * from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled from .info import DatasetInfo, MetricInfo from .inspect import ( get_dataset_config_info, get_dataset_config_names, get_dataset_infos, get_dataset_split_names, inspect_dataset, inspect_metric, list_datasets, list_metrics, ) from .iterable_dataset import IterableDataset from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric from .metric import Metric from .splits import ( NamedSplit, NamedSplitAll, Split, SplitBase, SplitDict, SplitGenerator, SplitInfo, SubSplitInfo, percent, ) from .tasks import * from .utils import * from .utils import logging # deprecated modules from datasets import arrow_dataset as _arrow_dataset # isort:skip from datasets import utils as _utils # isort:skip from datasets.utils import download_manager as _deprecated_download_manager # isort:skip __lowerCamelCase : List[Any] = concatenate_datasets __lowerCamelCase : List[str] = DownloadConfig __lowerCamelCase : Union[str, Any] = DownloadManager __lowerCamelCase : str = DownloadMode __lowerCamelCase : Union[str, Any] = DownloadConfig __lowerCamelCase : List[str] = DownloadMode __lowerCamelCase : Dict = DownloadManager del _arrow_dataset, _utils, _deprecated_download_manager
25
0
import importlib import json import os import sys import tempfile import unittest from pathlib import Path import transformers import transformers.models.auto from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig from transformers.models.bert.configuration_bert import BertConfig from transformers.models.roberta.configuration_roberta import RobertaConfig from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils""")) from test_module.custom_configuration import CustomConfig # noqa E402 __lowerCamelCase : Union[str, Any] = get_tests_dir("""fixtures/dummy-config.json""") class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): """simple docstring""" def _lowercase ( self : Optional[int] ): snake_case__ : str = 0 def _lowercase ( self : int ): self.assertIsNotNone(transformers.models.auto.__spec__ ) self.assertIsNotNone(importlib.util.find_spec("transformers.models.auto" ) ) def _lowercase ( self : List[Any] ): snake_case__ : str = AutoConfig.from_pretrained("bert-base-uncased" ) self.assertIsInstance(__A , __A ) def _lowercase ( self : Union[str, Any] ): snake_case__ : Optional[int] = AutoConfig.from_pretrained(__A ) self.assertIsInstance(__A , __A ) def _lowercase ( self : List[Any] ): snake_case__ : Any = AutoConfig.from_pretrained(__A ) self.assertIsInstance(__A , __A ) def _lowercase ( self : List[Any] ): snake_case__ : Tuple = AutoConfig.for_model("roberta" ) self.assertIsInstance(__A , __A ) def _lowercase ( self : Tuple ): with tempfile.TemporaryDirectory() as tmp_dir: # This model name contains bert and roberta, but roberta ends up being picked. snake_case__ : Any = os.path.join(__A , "fake-roberta" ) os.makedirs(__A , exist_ok=__A ) with open(os.path.join(__A , "config.json" ) , "w" ) as f: f.write(json.dumps({} ) ) snake_case__ : List[Any] = AutoConfig.from_pretrained(__A ) self.assertEqual(type(__A ) , __A ) def _lowercase ( self : List[str] ): try: AutoConfig.register("custom" , __A ) # Wrong model type will raise an error with self.assertRaises(__A ): AutoConfig.register("model" , __A ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(__A ): AutoConfig.register("bert" , __A ) # Now that the config is registered, it can be used as any other config with the auto-API snake_case__ : str = CustomConfig() with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(__A ) snake_case__ : List[str] = AutoConfig.from_pretrained(__A ) self.assertIsInstance(__A , __A ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] def _lowercase ( self : Union[str, Any] ): with self.assertRaisesRegex( __A , "bert-base is not a local folder and is not a valid model identifier" ): snake_case__ : Any = AutoConfig.from_pretrained("bert-base" ) def _lowercase ( self : int ): with self.assertRaisesRegex( __A , R"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ): snake_case__ : Optional[Any] = AutoConfig.from_pretrained(__A , revision="aaaaaa" ) def _lowercase ( self : Any ): with self.assertRaisesRegex( __A , "hf-internal-testing/no-config-test-repo does not appear to have a file named config.json." , ): snake_case__ : Any = AutoConfig.from_pretrained("hf-internal-testing/no-config-test-repo" ) def _lowercase ( self : List[str] ): # If remote code is not set, we will time out when asking whether to load the model. with self.assertRaises(__A ): snake_case__ : Dict = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" ) # If remote code is disabled, we can't load this config. with self.assertRaises(__A ): snake_case__ : Union[str, Any] = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" , trust_remote_code=__A ) snake_case__ : Optional[int] = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" , trust_remote_code=__A ) self.assertEqual(config.__class__.__name__ , "NewModelConfig" ) # Test config can be reloaded. with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(__A ) snake_case__ : Union[str, Any] = AutoConfig.from_pretrained(__A , trust_remote_code=__A ) self.assertEqual(reloaded_config.__class__.__name__ , "NewModelConfig" ) def _lowercase ( self : Any ): class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ): """simple docstring""" a_ = "new-model" try: AutoConfig.register("new-model" , __A ) # If remote code is not set, the default is to use local snake_case__ : Any = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" ) self.assertEqual(config.__class__.__name__ , "NewModelConfigLocal" ) # If remote code is disabled, we load the local one. snake_case__ : Dict = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" , trust_remote_code=__A ) self.assertEqual(config.__class__.__name__ , "NewModelConfigLocal" ) # If remote is enabled, we load from the Hub snake_case__ : Dict = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" , trust_remote_code=__A ) self.assertEqual(config.__class__.__name__ , "NewModelConfig" ) finally: if "new-model" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["new-model"]
700
from __future__ import annotations def SCREAMING_SNAKE_CASE ( snake_case_ : int ): snake_case__ : str = [True] * limit snake_case__ : str = False snake_case__ : str = False snake_case__ : str = True for i in range(3 , int(limit**0.5 + 1 ) , 2 ): snake_case__ : Optional[Any] = i * 2 while index < limit: snake_case__ : Union[str, Any] = False snake_case__ : Any = index + i snake_case__ : Optional[Any] = [2] for i in range(3 , snake_case_ , 2 ): if is_prime[i]: primes.append(snake_case_ ) return primes def SCREAMING_SNAKE_CASE ( snake_case_ : int = 1000000 ): snake_case__ : Optional[int] = prime_sieve(snake_case_ ) snake_case__ : List[Any] = 0 snake_case__ : List[str] = 0 for i in range(len(snake_case_ ) ): for j in range(i + length , len(snake_case_ ) ): snake_case__ : Dict = sum(primes[i:j] ) if sol >= ceiling: break if sol in primes: snake_case__ : Tuple = j - i snake_case__ : str = sol return largest if __name__ == "__main__": print(f"{solution() = }")
25
0
import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ConditionalDetrImageProcessor class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): """simple docstring""" def __init__( self : List[Any] , __A : Dict , __A : int=7 , __A : Optional[Any]=3 , __A : List[str]=3_0 , __A : List[Any]=4_0_0 , __A : Union[str, Any]=True , __A : List[Any]=None , __A : Optional[Any]=True , __A : Tuple=[0.5, 0.5, 0.5] , __A : Union[str, Any]=[0.5, 0.5, 0.5] , __A : List[str]=True , __A : Any=1 / 2_5_5 , __A : Optional[int]=True , ): # by setting size["longest_edge"] > max_resolution we're effectively not testing this :p snake_case__ : List[str] = size if size is not None else {"shortest_edge": 1_8, "longest_edge": 1_3_3_3} snake_case__ : Dict = parent snake_case__ : Optional[int] = batch_size snake_case__ : Union[str, Any] = num_channels snake_case__ : str = min_resolution snake_case__ : Tuple = max_resolution snake_case__ : List[Any] = do_resize snake_case__ : Dict = size snake_case__ : List[str] = do_normalize snake_case__ : Optional[int] = image_mean snake_case__ : Optional[int] = image_std snake_case__ : Any = do_rescale snake_case__ : Optional[int] = rescale_factor snake_case__ : int = do_pad def _lowercase ( self : Dict ): return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def _lowercase ( self : Optional[int] , __A : Dict , __A : List[Any]=False ): if not batched: snake_case__ : List[str] = image_inputs[0] if isinstance(__A , Image.Image ): snake_case__ : Tuple = image.size else: snake_case__ : List[str] = image.shape[1], image.shape[2] if w < h: snake_case__ : Dict = int(self.size["shortest_edge"] * h / w ) snake_case__ : Optional[int] = self.size["shortest_edge"] elif w > h: snake_case__ : List[Any] = self.size["shortest_edge"] snake_case__ : Union[str, Any] = int(self.size["shortest_edge"] * w / h ) else: snake_case__ : Dict = self.size["shortest_edge"] snake_case__ : Dict = self.size["shortest_edge"] else: snake_case__ : str = [] for image in image_inputs: snake_case__ : str = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) snake_case__ : Dict = max(__A , key=lambda __A : item[0] )[0] snake_case__ : Tuple = max(__A , key=lambda __A : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ , unittest.TestCase ): """simple docstring""" a_ = ConditionalDetrImageProcessor if is_vision_available() else None def _lowercase ( self : int ): snake_case__ : Tuple = ConditionalDetrImageProcessingTester(self ) @property def _lowercase ( self : Any ): return self.image_processor_tester.prepare_image_processor_dict() def _lowercase ( self : Any ): snake_case__ : Dict = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__A , "image_mean" ) ) self.assertTrue(hasattr(__A , "image_std" ) ) self.assertTrue(hasattr(__A , "do_normalize" ) ) self.assertTrue(hasattr(__A , "do_resize" ) ) self.assertTrue(hasattr(__A , "size" ) ) def _lowercase ( self : List[str] ): snake_case__ : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"shortest_edge": 1_8, "longest_edge": 1_3_3_3} ) self.assertEqual(image_processor.do_pad , __A ) snake_case__ : Any = self.image_processing_class.from_dict( self.image_processor_dict , size=4_2 , max_size=8_4 , pad_and_return_pixel_mask=__A ) self.assertEqual(image_processor.size , {"shortest_edge": 4_2, "longest_edge": 8_4} ) self.assertEqual(image_processor.do_pad , __A ) def _lowercase ( self : Union[str, Any] ): pass def _lowercase ( self : List[str] ): # Initialize image_processing snake_case__ : Dict = self.image_processing_class(**self.image_processor_dict ) # create random PIL images snake_case__ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A ) for image in image_inputs: self.assertIsInstance(__A , Image.Image ) # Test not batched input snake_case__ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values snake_case__ : Union[str, Any] = self.image_processor_tester.get_expected_values(__A ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched snake_case__ : Tuple = self.image_processor_tester.get_expected_values(__A , batched=__A ) snake_case__ : int = image_processing(__A , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def _lowercase ( self : Tuple ): # Initialize image_processing snake_case__ : List[str] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors snake_case__ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , numpify=__A ) for image in image_inputs: self.assertIsInstance(__A , np.ndarray ) # Test not batched input snake_case__ : int = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values snake_case__ : Dict = self.image_processor_tester.get_expected_values(__A ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched snake_case__ : Optional[Any] = image_processing(__A , return_tensors="pt" ).pixel_values snake_case__ : str = self.image_processor_tester.get_expected_values(__A , batched=__A ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def _lowercase ( self : Tuple ): # Initialize image_processing snake_case__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors snake_case__ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , torchify=__A ) for image in image_inputs: self.assertIsInstance(__A , torch.Tensor ) # Test not batched input snake_case__ : Tuple = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values snake_case__ : Optional[int] = self.image_processor_tester.get_expected_values(__A ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched snake_case__ : Dict = image_processing(__A , return_tensors="pt" ).pixel_values snake_case__ : int = self.image_processor_tester.get_expected_values(__A , batched=__A ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) @slow def _lowercase ( self : List[Any] ): # prepare image and target snake_case__ : Union[str, Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f: snake_case__ : Union[str, Any] = json.loads(f.read() ) snake_case__ : Optional[Any] = {"image_id": 3_9_7_6_9, "annotations": target} # encode them snake_case__ : Tuple = ConditionalDetrImageProcessor.from_pretrained("microsoft/conditional-detr-resnet-50" ) snake_case__ : int = image_processing(images=__A , annotations=__A , return_tensors="pt" ) # verify pixel values snake_case__ : str = torch.Size([1, 3, 8_0_0, 1_0_6_6] ) self.assertEqual(encoding["pixel_values"].shape , __A ) snake_case__ : Tuple = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] ) self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __A , atol=1e-4 ) ) # verify area snake_case__ : Optional[int] = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] ) self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __A ) ) # verify boxes snake_case__ : Tuple = torch.Size([6, 4] ) self.assertEqual(encoding["labels"][0]["boxes"].shape , __A ) snake_case__ : List[Any] = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] ) self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __A , atol=1e-3 ) ) # verify image_id snake_case__ : str = torch.tensor([3_9_7_6_9] ) self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __A ) ) # verify is_crowd snake_case__ : List[Any] = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __A ) ) # verify class_labels snake_case__ : Optional[int] = torch.tensor([7_5, 7_5, 6_3, 6_5, 1_7, 1_7] ) self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __A ) ) # verify orig_size snake_case__ : Dict = torch.tensor([4_8_0, 6_4_0] ) self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __A ) ) # verify size snake_case__ : List[str] = torch.tensor([8_0_0, 1_0_6_6] ) self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __A ) ) @slow def _lowercase ( self : str ): # prepare image, target and masks_path snake_case__ : Optional[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f: snake_case__ : int = json.loads(f.read() ) snake_case__ : Optional[int] = {"file_name": "000000039769.png", "image_id": 3_9_7_6_9, "segments_info": target} snake_case__ : Optional[Any] = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" ) # encode them snake_case__ : Optional[int] = ConditionalDetrImageProcessor(format="coco_panoptic" ) snake_case__ : Tuple = image_processing(images=__A , annotations=__A , masks_path=__A , return_tensors="pt" ) # verify pixel values snake_case__ : Optional[Any] = torch.Size([1, 3, 8_0_0, 1_0_6_6] ) self.assertEqual(encoding["pixel_values"].shape , __A ) snake_case__ : List[str] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] ) self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __A , atol=1e-4 ) ) # verify area snake_case__ : Tuple = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] ) self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __A ) ) # verify boxes snake_case__ : Dict = torch.Size([6, 4] ) self.assertEqual(encoding["labels"][0]["boxes"].shape , __A ) snake_case__ : int = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] ) self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __A , atol=1e-3 ) ) # verify image_id snake_case__ : str = torch.tensor([3_9_7_6_9] ) self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __A ) ) # verify is_crowd snake_case__ : Tuple = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __A ) ) # verify class_labels snake_case__ : Optional[Any] = torch.tensor([1_7, 1_7, 6_3, 7_5, 7_5, 9_3] ) self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __A ) ) # verify masks snake_case__ : str = 8_2_2_8_7_3 self.assertEqual(encoding["labels"][0]["masks"].sum().item() , __A ) # verify orig_size snake_case__ : int = torch.tensor([4_8_0, 6_4_0] ) self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __A ) ) # verify size snake_case__ : List[Any] = torch.tensor([8_0_0, 1_0_6_6] ) self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __A ) )
701
import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DeformableDetrImageProcessor class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): """simple docstring""" def __init__( self : int , __A : List[str] , __A : Union[str, Any]=7 , __A : Any=3 , __A : Optional[Any]=3_0 , __A : List[str]=4_0_0 , __A : str=True , __A : Optional[Any]=None , __A : Optional[int]=True , __A : int=[0.5, 0.5, 0.5] , __A : Dict=[0.5, 0.5, 0.5] , __A : Optional[int]=True , __A : int=1 / 2_5_5 , __A : List[str]=True , ): # by setting size["longest_edge"] > max_resolution we're effectively not testing this :p snake_case__ : List[str] = size if size is not None else {"shortest_edge": 1_8, "longest_edge": 1_3_3_3} snake_case__ : Optional[Any] = parent snake_case__ : str = batch_size snake_case__ : Union[str, Any] = num_channels snake_case__ : Optional[Any] = min_resolution snake_case__ : List[str] = max_resolution snake_case__ : Tuple = do_resize snake_case__ : str = size snake_case__ : str = do_normalize snake_case__ : Optional[Any] = image_mean snake_case__ : List[str] = image_std snake_case__ : List[str] = do_rescale snake_case__ : Tuple = rescale_factor snake_case__ : Tuple = do_pad def _lowercase ( self : str ): return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def _lowercase ( self : Optional[Any] , __A : List[Any] , __A : List[Any]=False ): if not batched: snake_case__ : List[Any] = image_inputs[0] if isinstance(__A , Image.Image ): snake_case__, snake_case__ : str = image.size else: snake_case__, snake_case__ : Dict = image.shape[1], image.shape[2] if w < h: snake_case__ : Any = int(self.size["shortest_edge"] * h / w ) snake_case__ : Any = self.size["shortest_edge"] elif w > h: snake_case__ : Optional[int] = self.size["shortest_edge"] snake_case__ : Any = int(self.size["shortest_edge"] * w / h ) else: snake_case__ : Tuple = self.size["shortest_edge"] snake_case__ : int = self.size["shortest_edge"] else: snake_case__ : Any = [] for image in image_inputs: snake_case__, snake_case__ : Optional[Any] = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) snake_case__ : List[Any] = max(__A , key=lambda __A : item[0] )[0] snake_case__ : int = max(__A , key=lambda __A : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ , unittest.TestCase ): """simple docstring""" a_ = DeformableDetrImageProcessor if is_vision_available() else None def _lowercase ( self : str ): snake_case__ : Optional[Any] = DeformableDetrImageProcessingTester(self ) @property def _lowercase ( self : List[Any] ): return self.image_processor_tester.prepare_image_processor_dict() def _lowercase ( self : Tuple ): snake_case__ : Any = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__A , "image_mean" ) ) self.assertTrue(hasattr(__A , "image_std" ) ) self.assertTrue(hasattr(__A , "do_normalize" ) ) self.assertTrue(hasattr(__A , "do_resize" ) ) self.assertTrue(hasattr(__A , "do_rescale" ) ) self.assertTrue(hasattr(__A , "do_pad" ) ) self.assertTrue(hasattr(__A , "size" ) ) def _lowercase ( self : Any ): snake_case__ : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"shortest_edge": 1_8, "longest_edge": 1_3_3_3} ) self.assertEqual(image_processor.do_pad , __A ) snake_case__ : Tuple = self.image_processing_class.from_dict( self.image_processor_dict , size=4_2 , max_size=8_4 , pad_and_return_pixel_mask=__A ) self.assertEqual(image_processor.size , {"shortest_edge": 4_2, "longest_edge": 8_4} ) self.assertEqual(image_processor.do_pad , __A ) def _lowercase ( self : str ): pass def _lowercase ( self : List[str] ): # Initialize image_processing snake_case__ : Any = self.image_processing_class(**self.image_processor_dict ) # create random PIL images snake_case__ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A ) for image in image_inputs: self.assertIsInstance(__A , Image.Image ) # Test not batched input snake_case__ : Tuple = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values snake_case__, snake_case__ : List[str] = self.image_processor_tester.get_expected_values(__A ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched snake_case__, snake_case__ : List[Any] = self.image_processor_tester.get_expected_values(__A , batched=__A ) snake_case__ : int = image_processing(__A , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def _lowercase ( self : int ): # Initialize image_processing snake_case__ : Any = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors snake_case__ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , numpify=__A ) for image in image_inputs: self.assertIsInstance(__A , np.ndarray ) # Test not batched input snake_case__ : Optional[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values snake_case__, snake_case__ : int = self.image_processor_tester.get_expected_values(__A ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched snake_case__ : Tuple = image_processing(__A , return_tensors="pt" ).pixel_values snake_case__, snake_case__ : int = self.image_processor_tester.get_expected_values(__A , batched=__A ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def _lowercase ( self : Union[str, Any] ): # Initialize image_processing snake_case__ : Optional[int] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors snake_case__ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , torchify=__A ) for image in image_inputs: self.assertIsInstance(__A , torch.Tensor ) # Test not batched input snake_case__ : str = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values snake_case__, snake_case__ : Union[str, Any] = self.image_processor_tester.get_expected_values(__A ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched snake_case__ : Tuple = image_processing(__A , return_tensors="pt" ).pixel_values snake_case__, snake_case__ : Tuple = self.image_processor_tester.get_expected_values(__A , batched=__A ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) @slow def _lowercase ( self : Optional[Any] ): # prepare image and target snake_case__ : Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f: snake_case__ : Tuple = json.loads(f.read() ) snake_case__ : Union[str, Any] = {"image_id": 3_9_7_6_9, "annotations": target} # encode them snake_case__ : str = DeformableDetrImageProcessor() snake_case__ : Tuple = image_processing(images=__A , annotations=__A , return_tensors="pt" ) # verify pixel values snake_case__ : Optional[Any] = torch.Size([1, 3, 8_0_0, 1_0_6_6] ) self.assertEqual(encoding["pixel_values"].shape , __A ) snake_case__ : Union[str, Any] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] ) self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __A , atol=1e-4 ) ) # verify area snake_case__ : str = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] ) self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __A ) ) # verify boxes snake_case__ : Union[str, Any] = torch.Size([6, 4] ) self.assertEqual(encoding["labels"][0]["boxes"].shape , __A ) snake_case__ : List[Any] = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] ) self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __A , atol=1e-3 ) ) # verify image_id snake_case__ : Any = torch.tensor([3_9_7_6_9] ) self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __A ) ) # verify is_crowd snake_case__ : Tuple = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __A ) ) # verify class_labels snake_case__ : int = torch.tensor([7_5, 7_5, 6_3, 6_5, 1_7, 1_7] ) self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __A ) ) # verify orig_size snake_case__ : List[str] = torch.tensor([4_8_0, 6_4_0] ) self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __A ) ) # verify size snake_case__ : Tuple = torch.tensor([8_0_0, 1_0_6_6] ) self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __A ) ) @slow def _lowercase ( self : Optional[int] ): # prepare image, target and masks_path snake_case__ : Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f: snake_case__ : Any = json.loads(f.read() ) snake_case__ : Dict = {"file_name": "000000039769.png", "image_id": 3_9_7_6_9, "segments_info": target} snake_case__ : int = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" ) # encode them snake_case__ : List[str] = DeformableDetrImageProcessor(format="coco_panoptic" ) snake_case__ : List[Any] = image_processing(images=__A , annotations=__A , masks_path=__A , return_tensors="pt" ) # verify pixel values snake_case__ : List[Any] = torch.Size([1, 3, 8_0_0, 1_0_6_6] ) self.assertEqual(encoding["pixel_values"].shape , __A ) snake_case__ : Optional[int] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] ) self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __A , atol=1e-4 ) ) # verify area snake_case__ : Tuple = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] ) self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __A ) ) # verify boxes snake_case__ : Any = torch.Size([6, 4] ) self.assertEqual(encoding["labels"][0]["boxes"].shape , __A ) snake_case__ : Any = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] ) self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __A , atol=1e-3 ) ) # verify image_id snake_case__ : List[str] = torch.tensor([3_9_7_6_9] ) self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __A ) ) # verify is_crowd snake_case__ : Any = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __A ) ) # verify class_labels snake_case__ : List[str] = torch.tensor([1_7, 1_7, 6_3, 7_5, 7_5, 9_3] ) self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __A ) ) # verify masks snake_case__ : Union[str, Any] = 8_2_2_8_7_3 self.assertEqual(encoding["labels"][0]["masks"].sum().item() , __A ) # verify orig_size snake_case__ : int = torch.tensor([4_8_0, 6_4_0] ) self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __A ) ) # verify size snake_case__ : Union[str, Any] = torch.tensor([8_0_0, 1_0_6_6] ) self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __A ) )
25
0
def SCREAMING_SNAKE_CASE ( snake_case_ : int , snake_case_ : int ): return 1 if input_a == input_a else 0 def SCREAMING_SNAKE_CASE ( ): assert xnor_gate(0 , 0 ) == 1 assert xnor_gate(0 , 1 ) == 0 assert xnor_gate(1 , 0 ) == 0 assert xnor_gate(1 , 1 ) == 1 if __name__ == "__main__": print(xnor_gate(0, 0)) print(xnor_gate(0, 1)) print(xnor_gate(1, 0)) print(xnor_gate(1, 1))
702
import json import os from functools import lru_cache from typing import Dict, List, Optional, Tuple, Union import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...tokenization_utils_base import BatchEncoding, EncodedInput from ...utils import PaddingStrategy, logging __lowerCamelCase : List[str] = logging.get_logger(__name__) __lowerCamelCase : Optional[Any] = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""} # See all LED models at https://huggingface.co/models?filter=LED __lowerCamelCase : Tuple = { """vocab_file""": { """allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json""", }, """merges_file""": { """allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt""", }, """tokenizer_file""": { """allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json""", }, } __lowerCamelCase : Dict = { """allenai/led-base-16384""": 1_6384, } @lru_cache() # Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode def SCREAMING_SNAKE_CASE ( ): snake_case__ : Optional[int] = ( list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) ) ) snake_case__ : Optional[int] = bs[:] snake_case__ : Any = 0 for b in range(2**8 ): if b not in bs: bs.append(snake_case_ ) cs.append(2**8 + n ) n += 1 snake_case__ : Dict = [chr(snake_case_ ) for n in cs] return dict(zip(snake_case_ , snake_case_ ) ) def SCREAMING_SNAKE_CASE ( snake_case_ : List[Any] ): snake_case__ : Dict = set() snake_case__ : Tuple = word[0] for char in word[1:]: pairs.add((prev_char, char) ) snake_case__ : List[Any] = char return pairs class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ): """simple docstring""" a_ = VOCAB_FILES_NAMES a_ = PRETRAINED_VOCAB_FILES_MAP a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a_ = ["input_ids", "attention_mask"] def __init__( self : List[str] , __A : Any , __A : List[str] , __A : Optional[Any]="replace" , __A : Optional[int]="<s>" , __A : Union[str, Any]="</s>" , __A : Tuple="</s>" , __A : List[Any]="<s>" , __A : Dict="<unk>" , __A : Any="<pad>" , __A : Optional[int]="<mask>" , __A : List[str]=False , **__A : Union[str, Any] , ): snake_case__ : List[Any] = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else bos_token snake_case__ : List[str] = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else eos_token snake_case__ : Any = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else sep_token snake_case__ : List[Any] = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else cls_token snake_case__ : Tuple = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else unk_token snake_case__ : List[Any] = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else pad_token # Mask token behave like a normal word, i.e. include the space before it snake_case__ : List[str] = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else mask_token super().__init__( errors=__A , bos_token=__A , eos_token=__A , unk_token=__A , sep_token=__A , cls_token=__A , pad_token=__A , mask_token=__A , add_prefix_space=__A , **__A , ) with open(__A , encoding="utf-8" ) as vocab_handle: snake_case__ : Any = json.load(__A ) snake_case__ : Optional[Any] = {v: k for k, v in self.encoder.items()} snake_case__ : Union[str, Any] = errors # how to handle errors in decoding snake_case__ : Any = bytes_to_unicode() snake_case__ : Optional[Any] = {v: k for k, v in self.byte_encoder.items()} with open(__A , encoding="utf-8" ) as merges_handle: snake_case__ : str = merges_handle.read().split("\n" )[1:-1] snake_case__ : int = [tuple(merge.split() ) for merge in bpe_merges] snake_case__ : str = dict(zip(__A , range(len(__A ) ) ) ) snake_case__ : Optional[int] = {} snake_case__ : Any = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions snake_case__ : Union[str, Any] = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" ) @property # Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size def _lowercase ( self : List[Any] ): return len(self.encoder ) def _lowercase ( self : Any ): return dict(self.encoder , **self.added_tokens_encoder ) def _lowercase ( self : Optional[Any] , __A : Optional[int] ): if token in self.cache: return self.cache[token] snake_case__ : Union[str, Any] = tuple(__A ) snake_case__ : List[Any] = get_pairs(__A ) if not pairs: return token while True: snake_case__ : Tuple = min(__A , key=lambda __A : self.bpe_ranks.get(__A , float("inf" ) ) ) if bigram not in self.bpe_ranks: break snake_case__, snake_case__ : Dict = bigram snake_case__ : str = [] snake_case__ : Union[str, Any] = 0 while i < len(__A ): try: snake_case__ : Dict = word.index(__A , __A ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) snake_case__ : str = j if word[i] == first and i < len(__A ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 snake_case__ : str = tuple(__A ) snake_case__ : int = new_word if len(__A ) == 1: break else: snake_case__ : List[str] = get_pairs(__A ) snake_case__ : List[Any] = " ".join(__A ) snake_case__ : Optional[int] = word return word def _lowercase ( self : Optional[Any] , __A : Optional[Any] ): snake_case__ : List[str] = [] for token in re.findall(self.pat , __A ): snake_case__ : Dict = "".join( self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__A ).split(" " ) ) return bpe_tokens def _lowercase ( self : Union[str, Any] , __A : Optional[int] ): return self.encoder.get(__A , self.encoder.get(self.unk_token ) ) def _lowercase ( self : Optional[int] , __A : Optional[Any] ): return self.decoder.get(__A ) def _lowercase ( self : Union[str, Any] , __A : Dict ): snake_case__ : Optional[Any] = "".join(__A ) snake_case__ : int = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors ) return text def _lowercase ( self : Optional[int] , __A : str , __A : Optional[str] = None ): if not os.path.isdir(__A ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return snake_case__ : List[Any] = os.path.join( __A , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) snake_case__ : str = os.path.join( __A , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] ) with open(__A , "w" , encoding="utf-8" ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=__A , ensure_ascii=__A ) + "\n" ) snake_case__ : str = 0 with open(__A , "w" , encoding="utf-8" ) as writer: writer.write("#version: 0.2\n" ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __A : kv[1] ): if index != token_index: logger.warning( f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.''' " Please check that the tokenizer is not corrupted!" ) snake_case__ : int = token_index writer.write(" ".join(__A ) + "\n" ) index += 1 return vocab_file, merge_file def _lowercase ( self : int , __A : List[int] , __A : Optional[List[int]] = None ): if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] snake_case__ : Tuple = [self.cls_token_id] snake_case__ : List[Any] = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def _lowercase ( self : Optional[Any] , __A : List[int] , __A : Optional[List[int]] = None , __A : bool = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__A , token_ids_a=__A , already_has_special_tokens=__A ) if token_ids_a is None: return [1] + ([0] * len(__A )) + [1] return [1] + ([0] * len(__A )) + [1, 1] + ([0] * len(__A )) + [1] def _lowercase ( self : List[Any] , __A : List[int] , __A : Optional[List[int]] = None ): snake_case__ : Any = [self.sep_token_id] snake_case__ : List[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def _lowercase ( self : Optional[Any] , __A : int , __A : int=False , **__A : Dict ): snake_case__ : Optional[int] = kwargs.pop("add_prefix_space" , self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(__A ) > 0 and not text[0].isspace()): snake_case__ : Optional[int] = " " + text return (text, kwargs) def _lowercase ( self : Any , __A : Union[Dict[str, EncodedInput], BatchEncoding] , __A : Optional[int] = None , __A : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , __A : Optional[int] = None , __A : Optional[bool] = None , ): snake_case__ : Optional[Any] = super()._pad( encoded_inputs=__A , max_length=__A , padding_strategy=__A , pad_to_multiple_of=__A , return_attention_mask=__A , ) # Load from model defaults if return_attention_mask is None: snake_case__ : Union[str, Any] = "attention_mask" in self.model_input_names if return_attention_mask and "global_attention_mask" in encoded_inputs: snake_case__ : Union[str, Any] = encoded_inputs[self.model_input_names[0]] # `global_attention_mask` need to have the same length as other (sequential) inputs. snake_case__ : Tuple = len(encoded_inputs["global_attention_mask"] ) != len(__A ) if needs_to_be_padded: snake_case__ : int = len(__A ) - len(encoded_inputs["global_attention_mask"] ) if self.padding_side == "right": # Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend` snake_case__ : int = ( encoded_inputs["global_attention_mask"] + [-1] * difference ) elif self.padding_side == "left": snake_case__ : Tuple = [-1] * difference + encoded_inputs[ "global_attention_mask" ] else: raise ValueError("Invalid padding strategy:" + str(self.padding_side ) ) return encoded_inputs
25
0
from .imports import is_tqdm_available if is_tqdm_available(): from tqdm.auto import tqdm as _tqdm from ..state import PartialState def SCREAMING_SNAKE_CASE ( snake_case_ : bool = True , *snake_case_ : Any , **snake_case_ : str ): if not is_tqdm_available(): raise ImportError("Accelerate's `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`." ) snake_case__ : Union[str, Any] = False if main_process_only: snake_case__ : Dict = PartialState().local_process_index == 0 return _tqdm(*snake_case_ , **snake_case_ , disable=snake_case_ )
703
# tests directory-specific settings - this file is run automatically # by pytest before any tests are run import sys import warnings from os.path import abspath, dirname, join # allow having multiple repository checkouts and not needing to remember to rerun # 'pip install -e .[dev]' when switching between checkouts and running tests. __lowerCamelCase : Dict = abspath(join(dirname(dirname(__file__)), """src""")) sys.path.insert(1, git_repo_path) # silence FutureWarning warnings in tests since often we can't act on them until # they become normal warnings - i.e. the tests still need to test the current functionality warnings.simplefilter(action="""ignore""", category=FutureWarning) def SCREAMING_SNAKE_CASE ( snake_case_ : str ): from diffusers.utils.testing_utils import pytest_addoption_shared pytest_addoption_shared(snake_case_ ) def SCREAMING_SNAKE_CASE ( snake_case_ : Any ): from diffusers.utils.testing_utils import pytest_terminal_summary_main snake_case__ : Optional[int] = terminalreporter.config.getoption("--make-reports" ) if make_reports: pytest_terminal_summary_main(snake_case_ , id=snake_case_ )
25
0
from collections.abc import Callable import numpy as np def SCREAMING_SNAKE_CASE ( snake_case_ : Callable , snake_case_ : float , snake_case_ : float , snake_case_ : float , snake_case_ : float ): snake_case__ : Union[str, Any] = int(np.ceil((x_end - xa) / step_size ) ) snake_case__ : Optional[Any] = np.zeros((n + 1,) ) snake_case__ : Any = ya snake_case__ : Optional[int] = xa for k in range(snake_case_ ): snake_case__ : int = y[k] + step_size * ode_func(snake_case_ , y[k] ) snake_case__ : Union[str, Any] = y[k] + ( (step_size / 2) * (ode_func(snake_case_ , y[k] ) + ode_func(x + step_size , snake_case_ )) ) x += step_size return y if __name__ == "__main__": import doctest doctest.testmod()
704
def SCREAMING_SNAKE_CASE ( snake_case_ : str ): snake_case__ : Any = [0] * len(snake_case_ ) for i in range(1 , len(snake_case_ ) ): # use last results for better performance - dynamic programming snake_case__ : Union[str, Any] = prefix_result[i - 1] while j > 0 and input_string[i] != input_string[j]: snake_case__ : str = prefix_result[j - 1] if input_string[i] == input_string[j]: j += 1 snake_case__ : int = j return prefix_result def SCREAMING_SNAKE_CASE ( snake_case_ : str ): return max(prefix_function(snake_case_ ) ) if __name__ == "__main__": import doctest doctest.testmod()
25
0
from ...configuration_utils import PretrainedConfig from ...utils import logging __lowerCamelCase : Union[str, Any] = logging.get_logger(__name__) __lowerCamelCase : List[Any] = { """tanreinama/GPTSAN-2.8B-spout_is_uniform""": ( """https://huggingface.co/tanreinama/GPTSAN-2.8B-spout_is_uniform/resolve/main/config.json""" ), } class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ): """simple docstring""" a_ = "gptsan-japanese" a_ = [ "past_key_values", ] a_ = { "hidden_size": "d_model", "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers", } def __init__( self : Optional[Any] , __A : Optional[int]=3_6_0_0_0 , __A : Optional[int]=1_2_8_0 , __A : Optional[Any]=1_0_2_4 , __A : Optional[Any]=8_1_9_2 , __A : Optional[Any]=4_0_9_6 , __A : str=1_2_8 , __A : List[str]=1_0 , __A : Union[str, Any]=0 , __A : Optional[Any]=1_6 , __A : str=1_6 , __A : Optional[int]=1_2_8 , __A : List[str]=0.0 , __A : Any=1e-5 , __A : List[Any]=False , __A : Optional[Any]=0.0 , __A : List[str]="float32" , __A : List[str]=False , __A : int=False , __A : int=False , __A : Tuple=0.0_0_2 , __A : str=False , __A : str=True , __A : List[Any]=3_5_9_9_8 , __A : Any=3_5_9_9_5 , __A : Dict=3_5_9_9_9 , **__A : Optional[Any] , ): snake_case__ : Dict = vocab_size snake_case__ : Optional[Any] = max_position_embeddings snake_case__ : Optional[int] = d_model snake_case__ : Optional[Any] = d_ff snake_case__ : int = d_ext snake_case__ : Optional[int] = d_spout snake_case__ : str = num_switch_layers snake_case__ : int = num_ext_layers snake_case__ : List[Any] = num_switch_layers + num_ext_layers snake_case__ : Union[str, Any] = num_heads snake_case__ : Any = num_experts snake_case__ : Dict = expert_capacity snake_case__ : List[Any] = dropout_rate snake_case__ : Tuple = layer_norm_epsilon snake_case__ : Union[str, Any] = router_bias snake_case__ : str = router_jitter_noise snake_case__ : List[str] = router_dtype snake_case__ : Any = router_ignore_padding_tokens snake_case__ : List[Any] = output_hidden_states snake_case__ : Union[str, Any] = output_attentions snake_case__ : Optional[int] = initializer_factor snake_case__ : Dict = output_router_logits snake_case__ : Union[str, Any] = use_cache super().__init__( separator_token_id=__A , pad_token_id=__A , eos_token_id=__A , **__A , )
705
# Lint as: python3 import sys from collections.abc import Mapping from typing import TYPE_CHECKING, Dict, Optional import numpy as np import pyarrow as pa from .. import config from ..utils.logging import get_logger from ..utils.py_utils import map_nested from .formatting import TensorFormatter if TYPE_CHECKING: import jax import jaxlib __lowerCamelCase : Optional[int] = get_logger() __lowerCamelCase : Optional[dict] = None class SCREAMING_SNAKE_CASE__ ( TensorFormatter[Mapping, "jax.Array", Mapping] ): """simple docstring""" def __init__( self : Optional[Any] , __A : Dict=None , __A : List[str]=None , **__A : str ): super().__init__(features=__A ) import jax from jaxlib.xla_client import Device if isinstance(__A , __A ): raise ValueError( f'''Expected {device} to be a `str` not {type(__A )}, as `jaxlib.xla_extension.Device` ''' "is not serializable neither with `pickle` nor with `dill`. Instead you can surround " "the device with `str()` to get its string identifier that will be internally mapped " "to the actual `jaxlib.xla_extension.Device`." ) snake_case__ : List[Any] = device if isinstance(__A , __A ) else str(jax.devices()[0] ) # using global variable since `jaxlib.xla_extension.Device` is not serializable neither # with `pickle` nor with `dill`, so we need to use a global variable instead global DEVICE_MAPPING if DEVICE_MAPPING is None: snake_case__ : Any = self._map_devices_to_str() if self.device not in list(DEVICE_MAPPING.keys() ): logger.warning( f'''Device with string identifier {self.device} not listed among the available ''' f'''devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default ''' f'''device: {str(jax.devices()[0] )}.''' ) snake_case__ : str = str(jax.devices()[0] ) snake_case__ : str = jnp_array_kwargs @staticmethod def _lowercase ( ): import jax return {str(__A ): device for device in jax.devices()} def _lowercase ( self : Optional[Any] , __A : str ): import jax import jax.numpy as jnp if isinstance(__A , __A ) and column: if all( isinstance(__A , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ): return jnp.stack(__A , axis=0 ) return column def _lowercase ( self : int , __A : Tuple ): import jax import jax.numpy as jnp if isinstance(__A , (str, bytes, type(__A )) ): return value elif isinstance(__A , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ): return value.tolist() snake_case__ : Optional[int] = {} if isinstance(__A , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ): # the default int precision depends on the jax config # see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision if jax.config.jax_enable_xaa: snake_case__ : Any = {"dtype": jnp.intaa} else: snake_case__ : Tuple = {"dtype": jnp.intaa} elif isinstance(__A , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ): snake_case__ : str = {"dtype": jnp.floataa} elif config.PIL_AVAILABLE and "PIL" in sys.modules: import PIL.Image if isinstance(__A , PIL.Image.Image ): snake_case__ : Optional[Any] = np.asarray(__A ) # using global variable since `jaxlib.xla_extension.Device` is not serializable neither # with `pickle` nor with `dill`, so we need to use a global variable instead global DEVICE_MAPPING if DEVICE_MAPPING is None: snake_case__ : int = self._map_devices_to_str() with jax.default_device(DEVICE_MAPPING[self.device] ): # calling jnp.array on a np.ndarray does copy the data # see https://github.com/google/jax/issues/4486 return jnp.array(__A , **{**default_dtype, **self.jnp_array_kwargs} ) def _lowercase ( self : Union[str, Any] , __A : Optional[int] ): import jax # support for torch, tf, jax etc. if config.TORCH_AVAILABLE and "torch" in sys.modules: import torch if isinstance(__A , torch.Tensor ): return self._tensorize(data_struct.detach().cpu().numpy()[()] ) if hasattr(__A , "__array__" ) and not isinstance(__A , jax.Array ): snake_case__ : Union[str, Any] = data_struct.__array__() # support for nested types like struct of list of struct if isinstance(__A , np.ndarray ): if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects return self._consolidate([self.recursive_tensorize(__A ) for substruct in data_struct] ) elif isinstance(__A , (list, tuple) ): return self._consolidate([self.recursive_tensorize(__A ) for substruct in data_struct] ) return self._tensorize(__A ) def _lowercase ( self : Tuple , __A : dict ): return map_nested(self._recursive_tensorize , __A , map_list=__A ) def _lowercase ( self : Optional[int] , __A : pa.Table ): snake_case__ : int = self.numpy_arrow_extractor().extract_row(__A ) snake_case__ : Tuple = self.python_features_decoder.decode_row(__A ) return self.recursive_tensorize(__A ) def _lowercase ( self : Optional[Any] , __A : pa.Table ): snake_case__ : Any = self.numpy_arrow_extractor().extract_column(__A ) snake_case__ : Optional[int] = self.python_features_decoder.decode_column(__A , pa_table.column_names[0] ) snake_case__ : List[Any] = self.recursive_tensorize(__A ) snake_case__ : Dict = self._consolidate(__A ) return column def _lowercase ( self : str , __A : pa.Table ): snake_case__ : Any = self.numpy_arrow_extractor().extract_batch(__A ) snake_case__ : int = self.python_features_decoder.decode_batch(__A ) snake_case__ : List[Any] = self.recursive_tensorize(__A ) for column_name in batch: snake_case__ : Any = self._consolidate(batch[column_name] ) return batch
25
0
# Lint as: python3 # pylint: enable=line-too-long # pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position __lowerCamelCase : Union[str, Any] = """2.13.1""" import platform import pyarrow from packaging import version if version.parse(platform.python_version()) < version.parse("""3.7"""): raise ImportWarning( """To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition.""" ) if version.parse(pyarrow.__version__).major < 8: raise ImportWarning( """To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n""" """If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`.""" ) del platform del pyarrow del version from .arrow_dataset import Dataset from .arrow_reader import ReadInstruction from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder from .combine import concatenate_datasets, interleave_datasets from .dataset_dict import DatasetDict, IterableDatasetDict from .download import * from .features import * from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled from .info import DatasetInfo, MetricInfo from .inspect import ( get_dataset_config_info, get_dataset_config_names, get_dataset_infos, get_dataset_split_names, inspect_dataset, inspect_metric, list_datasets, list_metrics, ) from .iterable_dataset import IterableDataset from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric from .metric import Metric from .splits import ( NamedSplit, NamedSplitAll, Split, SplitBase, SplitDict, SplitGenerator, SplitInfo, SubSplitInfo, percent, ) from .tasks import * from .utils import * from .utils import logging # deprecated modules from datasets import arrow_dataset as _arrow_dataset # isort:skip from datasets import utils as _utils # isort:skip from datasets.utils import download_manager as _deprecated_download_manager # isort:skip __lowerCamelCase : List[Any] = concatenate_datasets __lowerCamelCase : List[str] = DownloadConfig __lowerCamelCase : Union[str, Any] = DownloadManager __lowerCamelCase : str = DownloadMode __lowerCamelCase : Union[str, Any] = DownloadConfig __lowerCamelCase : List[str] = DownloadMode __lowerCamelCase : Dict = DownloadManager del _arrow_dataset, _utils, _deprecated_download_manager
706
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) __lowerCamelCase : Tuple = { """configuration_roberta_prelayernorm""": [ """ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RobertaPreLayerNormConfig""", """RobertaPreLayerNormOnnxConfig""", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : Tuple = [ """ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST""", """RobertaPreLayerNormForCausalLM""", """RobertaPreLayerNormForMaskedLM""", """RobertaPreLayerNormForMultipleChoice""", """RobertaPreLayerNormForQuestionAnswering""", """RobertaPreLayerNormForSequenceClassification""", """RobertaPreLayerNormForTokenClassification""", """RobertaPreLayerNormModel""", """RobertaPreLayerNormPreTrainedModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : Union[str, Any] = [ """TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFRobertaPreLayerNormForCausalLM""", """TFRobertaPreLayerNormForMaskedLM""", """TFRobertaPreLayerNormForMultipleChoice""", """TFRobertaPreLayerNormForQuestionAnswering""", """TFRobertaPreLayerNormForSequenceClassification""", """TFRobertaPreLayerNormForTokenClassification""", """TFRobertaPreLayerNormMainLayer""", """TFRobertaPreLayerNormModel""", """TFRobertaPreLayerNormPreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : List[Any] = [ """FlaxRobertaPreLayerNormForCausalLM""", """FlaxRobertaPreLayerNormForMaskedLM""", """FlaxRobertaPreLayerNormForMultipleChoice""", """FlaxRobertaPreLayerNormForQuestionAnswering""", """FlaxRobertaPreLayerNormForSequenceClassification""", """FlaxRobertaPreLayerNormForTokenClassification""", """FlaxRobertaPreLayerNormModel""", """FlaxRobertaPreLayerNormPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_roberta_prelayernorm import ( ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaPreLayerNormConfig, RobertaPreLayerNormOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roberta_prelayernorm import ( ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST, RobertaPreLayerNormForCausalLM, RobertaPreLayerNormForMaskedLM, RobertaPreLayerNormForMultipleChoice, RobertaPreLayerNormForQuestionAnswering, RobertaPreLayerNormForSequenceClassification, RobertaPreLayerNormForTokenClassification, RobertaPreLayerNormModel, RobertaPreLayerNormPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_roberta_prelayernorm import ( TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST, TFRobertaPreLayerNormForCausalLM, TFRobertaPreLayerNormForMaskedLM, TFRobertaPreLayerNormForMultipleChoice, TFRobertaPreLayerNormForQuestionAnswering, TFRobertaPreLayerNormForSequenceClassification, TFRobertaPreLayerNormForTokenClassification, TFRobertaPreLayerNormMainLayer, TFRobertaPreLayerNormModel, TFRobertaPreLayerNormPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_roberta_prelayernorm import ( FlaxRobertaPreLayerNormForCausalLM, FlaxRobertaPreLayerNormForMaskedLM, FlaxRobertaPreLayerNormForMultipleChoice, FlaxRobertaPreLayerNormForQuestionAnswering, FlaxRobertaPreLayerNormForSequenceClassification, FlaxRobertaPreLayerNormForTokenClassification, FlaxRobertaPreLayerNormModel, FlaxRobertaPreLayerNormPreTrainedModel, ) else: import sys __lowerCamelCase : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
25
0
from .integrations import ( is_optuna_available, is_ray_available, is_sigopt_available, is_wandb_available, run_hp_search_optuna, run_hp_search_ray, run_hp_search_sigopt, run_hp_search_wandb, ) from .trainer_utils import ( HPSearchBackend, default_hp_space_optuna, default_hp_space_ray, default_hp_space_sigopt, default_hp_space_wandb, ) from .utils import logging __lowerCamelCase : int = logging.get_logger(__name__) class SCREAMING_SNAKE_CASE__ : """simple docstring""" a_ = 4_2 a_ = None @staticmethod def _lowercase ( ): raise NotImplementedError def _lowercase ( self : str , __A : Tuple , __A : int , __A : str , **__A : Union[str, Any] ): raise NotImplementedError def _lowercase ( self : int , __A : int ): raise NotImplementedError def _lowercase ( self : Union[str, Any] ): if not self.is_available(): raise RuntimeError( f'''You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}.''' ) @classmethod def _lowercase ( cls : str ): return f'''`pip install {cls.pip_package or cls.name}`''' class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ): """simple docstring""" a_ = "optuna" @staticmethod def _lowercase ( ): return is_optuna_available() def _lowercase ( self : Union[str, Any] , __A : List[str] , __A : int , __A : str , **__A : List[Any] ): return run_hp_search_optuna(__A , __A , __A , **__A ) def _lowercase ( self : Any , __A : List[Any] ): return default_hp_space_optuna(__A ) class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ): """simple docstring""" a_ = "ray" a_ = "'ray[tune]'" @staticmethod def _lowercase ( ): return is_ray_available() def _lowercase ( self : Optional[Any] , __A : Dict , __A : int , __A : str , **__A : List[Any] ): return run_hp_search_ray(__A , __A , __A , **__A ) def _lowercase ( self : List[str] , __A : List[Any] ): return default_hp_space_ray(__A ) class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ): """simple docstring""" a_ = "sigopt" @staticmethod def _lowercase ( ): return is_sigopt_available() def _lowercase ( self : Optional[Any] , __A : str , __A : int , __A : str , **__A : List[str] ): return run_hp_search_sigopt(__A , __A , __A , **__A ) def _lowercase ( self : Optional[Any] , __A : List[str] ): return default_hp_space_sigopt(__A ) class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ): """simple docstring""" a_ = "wandb" @staticmethod def _lowercase ( ): return is_wandb_available() def _lowercase ( self : str , __A : Optional[int] , __A : int , __A : str , **__A : Union[str, Any] ): return run_hp_search_wandb(__A , __A , __A , **__A ) def _lowercase ( self : Optional[Any] , __A : int ): return default_hp_space_wandb(__A ) __lowerCamelCase : Union[str, Any] = { HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend] } def SCREAMING_SNAKE_CASE ( ): snake_case__ : Tuple = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()] if len(snake_case_ ) > 0: snake_case__ : Any = available_backends[0].name if len(snake_case_ ) > 1: logger.info( F'''{len(snake_case_ )} hyperparameter search backends available. Using {name} as the default.''' ) return name raise RuntimeError( "No hyperparameter search backend available.\n" + "\n".join( F''' - To install {backend.name} run {backend.pip_install()}''' for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
707
import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from transformers.activations import gelu_new, gelu_python, get_activation @require_torch class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): """simple docstring""" def _lowercase ( self : Tuple ): snake_case__ : List[str] = torch.tensor([-1_0_0, -1, -0.1, 0, 0.1, 1.0, 1_0_0] ) snake_case__ : Tuple = get_activation("gelu" ) self.assertTrue(torch.allclose(gelu_python(__A ) , torch_builtin(__A ) ) ) self.assertFalse(torch.allclose(gelu_python(__A ) , gelu_new(__A ) ) ) def _lowercase ( self : Dict ): snake_case__ : str = torch.tensor([-1_0_0, -1, -0.1, 0, 0.1, 1.0, 1_0_0] ) snake_case__ : Union[str, Any] = get_activation("gelu" ) snake_case__ : int = get_activation("gelu_10" ) snake_case__ : Optional[int] = torch_builtin(__A ) snake_case__ : Dict = geluaa(__A ) snake_case__ : Optional[Any] = torch.where(y_gelu_aa < 1_0.0 , 1 , 0 ) self.assertTrue(torch.max(__A ).item() == 1_0.0 ) self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) ) def _lowercase ( self : str ): get_activation("gelu" ) get_activation("gelu_10" ) get_activation("gelu_fast" ) get_activation("gelu_new" ) get_activation("gelu_python" ) get_activation("gelu_pytorch_tanh" ) get_activation("linear" ) get_activation("mish" ) get_activation("quick_gelu" ) get_activation("relu" ) get_activation("sigmoid" ) get_activation("silu" ) get_activation("swish" ) get_activation("tanh" ) with self.assertRaises(__A ): get_activation("bogus" ) with self.assertRaises(__A ): get_activation(__A ) def _lowercase ( self : List[str] ): snake_case__ : List[str] = get_activation("gelu" ) snake_case__ : Any = 1 snake_case__ : Union[str, Any] = get_activation("gelu" ) self.assertEqual(acta.a , 1 ) with self.assertRaises(__A ): snake_case__ : int = acta.a
25
0
import pytest from datasets import inspect_metric, list_metrics, load_metric @pytest.fixture def SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any] ): monkeypatch.setattr("datasets.utils.deprecation_utils._emitted_deprecation_warnings" , set() ) @pytest.fixture def SCREAMING_SNAKE_CASE ( snake_case_ : Dict ): class SCREAMING_SNAKE_CASE__ : """simple docstring""" def __init__( self : Union[str, Any] , __A : Any ): snake_case__ : Dict = metric_id class SCREAMING_SNAKE_CASE__ : """simple docstring""" a_ = [MetricMock(UpperCamelCase_ ) for metric_id in ["accuracy", "mse", "precision", "codeparrot/apps_metric"]] def _lowercase ( self : Dict ): return self._metrics monkeypatch.setattr("datasets.inspect.huggingface_hub" , HfhMock() ) @pytest.mark.parametrize( "func, args" , [(load_metric, ("metrics/mse",)), (list_metrics, ()), (inspect_metric, ("metrics/mse", "tmp_path"))] ) def SCREAMING_SNAKE_CASE ( snake_case_ : Optional[Any] , snake_case_ : Union[str, Any] , snake_case_ : Union[str, Any] , snake_case_ : Any , snake_case_ : Any ): if "tmp_path" in args: snake_case__ : Any = tuple(arg if arg != "tmp_path" else tmp_path for arg in args ) with pytest.warns(snake_case_ , match="https://huggingface.co/docs/evaluate" ): func(*snake_case_ )
708
import argparse import fairseq import torch from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging logging.set_verbosity_info() __lowerCamelCase : int = logging.get_logger(__name__) __lowerCamelCase : int = { """post_extract_proj""": """feature_projection.projection""", """encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""", """self_attn.k_proj""": """encoder.layers.*.attention.k_proj""", """self_attn.v_proj""": """encoder.layers.*.attention.v_proj""", """self_attn.q_proj""": """encoder.layers.*.attention.q_proj""", """self_attn.out_proj""": """encoder.layers.*.attention.out_proj""", """self_attn_layer_norm""": """encoder.layers.*.layer_norm""", """fc1""": """encoder.layers.*.feed_forward.intermediate_dense""", """fc2""": """encoder.layers.*.feed_forward.output_dense""", """final_layer_norm""": """encoder.layers.*.final_layer_norm""", """encoder.layer_norm""": """encoder.layer_norm""", """encoder.layer_norm_for_extract""": """layer_norm_for_extract""", """w2v_model.layer_norm""": """feature_projection.layer_norm""", """quantizer.weight_proj""": """quantizer.weight_proj""", """quantizer.vars""": """quantizer.codevectors""", """project_q""": """project_q""", """final_proj""": """project_hid""", """w2v_encoder.proj""": """lm_head""", """label_embs_concat""": """label_embeddings_concat""", """mask_emb""": """masked_spec_embed""", """spk_proj""": """speaker_proj""", } __lowerCamelCase : Tuple = [ """lm_head""", """quantizer.weight_proj""", """quantizer.codevectors""", """project_q""", """project_hid""", """label_embeddings_concat""", """speaker_proj""", """layer_norm_for_extract""", ] def SCREAMING_SNAKE_CASE ( snake_case_ : Tuple , snake_case_ : Union[str, Any] , snake_case_ : Union[str, Any] , snake_case_ : Any , snake_case_ : Union[str, Any] ): for attribute in key.split("." ): snake_case__ : int = getattr(snake_case_ , snake_case_ ) if weight_type is not None: snake_case__ : Optional[Any] = getattr(snake_case_ , snake_case_ ).shape else: snake_case__ : List[str] = hf_pointer.shape if hf_shape != value.shape: raise ValueError( F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be''' F''' {value.shape} for {full_name}''' ) if weight_type == "weight": snake_case__ : str = value elif weight_type == "weight_g": snake_case__ : Union[str, Any] = value elif weight_type == "weight_v": snake_case__ : Optional[Any] = value elif weight_type == "bias": snake_case__ : str = value else: snake_case__ : Union[str, Any] = value logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' ) def SCREAMING_SNAKE_CASE ( snake_case_ : Any , snake_case_ : Union[str, Any] ): snake_case__ : str = [] snake_case__ : Optional[int] = fairseq_model.state_dict() snake_case__ : int = hf_model.unispeech_sat.feature_extractor for name, value in fairseq_dict.items(): snake_case__ : Dict = False if "conv_layers" in name: load_conv_layer( snake_case_ , snake_case_ , snake_case_ , snake_case_ , hf_model.config.feat_extract_norm == "group" , ) snake_case__ : str = True else: for key, mapped_key in MAPPING.items(): snake_case__ : Optional[int] = "unispeech_sat." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]: if "layer_norm_for_extract" in name and (".".join(name.split("." )[:-1] ) != key): # special case since naming is very similar continue snake_case__ : int = True if "*" in mapped_key: snake_case__ : Any = name.split(snake_case_ )[0].split("." )[-2] snake_case__ : Any = mapped_key.replace("*" , snake_case_ ) if "weight_g" in name: snake_case__ : List[Any] = "weight_g" elif "weight_v" in name: snake_case__ : Optional[Any] = "weight_v" elif "bias" in name: snake_case__ : Optional[Any] = "bias" elif "weight" in name: # TODO: don't match quantizer.weight_proj snake_case__ : Optional[Any] = "weight" else: snake_case__ : Optional[Any] = None set_recursively(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) continue if not is_used: unused_weights.append(snake_case_ ) logger.warning(F'''Unused weights: {unused_weights}''' ) def SCREAMING_SNAKE_CASE ( snake_case_ : Any , snake_case_ : List[str] , snake_case_ : List[Any] , snake_case_ : Optional[Any] , snake_case_ : str ): snake_case__ : Tuple = full_name.split("conv_layers." )[-1] snake_case__ : Union[str, Any] = name.split("." ) snake_case__ : str = int(items[0] ) snake_case__ : str = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' ) snake_case__ : Any = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' ) snake_case__ : Any = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.''' ) snake_case__ : Optional[Any] = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''' ) snake_case__ : int = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) else: unused_weights.append(snake_case_ ) @torch.no_grad() def SCREAMING_SNAKE_CASE ( snake_case_ : int , snake_case_ : Any , snake_case_ : Optional[int]=None , snake_case_ : Optional[int]=None , snake_case_ : Any=True ): if config_path is not None: snake_case__ : Tuple = UniSpeechSatConfig.from_pretrained(snake_case_ ) else: snake_case__ : Tuple = UniSpeechSatConfig() snake_case__ : str = "" if is_finetuned: snake_case__ : Tuple = UniSpeechSatForCTC(snake_case_ ) else: snake_case__ : Any = UniSpeechSatForPreTraining(snake_case_ ) snake_case__, snake_case__, snake_case__ : Any = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} ) snake_case__ : Tuple = model[0].eval() recursively_load_weights(snake_case_ , snake_case_ ) hf_wavavec.save_pretrained(snake_case_ ) if __name__ == "__main__": __lowerCamelCase : int = argparse.ArgumentParser() parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""") parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") parser.add_argument( """--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not""" ) __lowerCamelCase : List[Any] = parser.parse_args() convert_unispeech_sat_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
25
0
def SCREAMING_SNAKE_CASE ( snake_case_ : str ): snake_case__ : Tuple = 0 for ch in input_str: snake_case__ : Optional[int] = ord(snake_case_ ) snake_case__ : List[Any] = pow(2 , snake_case_ ) # If we already turned on bit for current character's unicode if bitmap >> ch_unicode & 1 == 1: return False bitmap |= ch_bit_index_on return True if __name__ == "__main__": import doctest doctest.testmod()
709
import copy import tempfile import unittest from transformers import MaMaaaConfig, is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from transformers.utils import cached_property from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaTokenizer from transformers.models.mam_aaa.modeling_mam_aaa import MaMaaaDecoder, MaMaaaEncoder def SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : Dict , snake_case_ : List[Any] , snake_case_ : Dict=None , snake_case_ : Tuple=None , snake_case_ : List[str]=None , snake_case_ : List[str]=None , snake_case_ : List[str]=None , ): if attention_mask is None: snake_case__ : Any = input_ids.ne(config.pad_token_id ) if decoder_attention_mask is None: snake_case__ : List[Any] = decoder_input_ids.ne(config.pad_token_id ) if head_mask is None: snake_case__ : str = torch.ones(config.encoder_layers , config.encoder_attention_heads , device=snake_case_ ) if decoder_head_mask is None: snake_case__ : Optional[int] = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=snake_case_ ) if cross_attn_head_mask is None: snake_case__ : Union[str, Any] = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=snake_case_ ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } class SCREAMING_SNAKE_CASE__ : """simple docstring""" def __init__( self : List[str] , __A : Any , __A : List[str]=1_3 , __A : List[Any]=7 , __A : Union[str, Any]=True , __A : Union[str, Any]=False , __A : str=9_9 , __A : Optional[Any]=1_6 , __A : Optional[Any]=2 , __A : Any=4 , __A : List[Any]=4 , __A : int="relu" , __A : Optional[int]=0.1 , __A : Tuple=0.1 , __A : Optional[int]=0.0 , __A : Optional[Any]=0.0 , __A : List[Any]=2_0 , __A : Optional[Any]=2 , __A : int=1 , __A : Union[str, Any]=0 , ): snake_case__ : Optional[Any] = parent snake_case__ : List[str] = batch_size snake_case__ : Union[str, Any] = seq_length snake_case__ : Optional[Any] = is_training snake_case__ : List[str] = use_labels snake_case__ : Tuple = vocab_size snake_case__ : Optional[Any] = hidden_size snake_case__ : Union[str, Any] = num_hidden_layers snake_case__ : List[Any] = num_attention_heads snake_case__ : Tuple = intermediate_size snake_case__ : str = hidden_act snake_case__ : Optional[Any] = hidden_dropout_prob snake_case__ : int = attention_probs_dropout_prob snake_case__ : int = encoder_layerdrop snake_case__ : Tuple = decoder_layerdrop snake_case__ : List[str] = max_position_embeddings snake_case__ : Tuple = eos_token_id snake_case__ : Dict = pad_token_id snake_case__ : str = bos_token_id def _lowercase ( self : Tuple ): snake_case__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) snake_case__ : Union[str, Any] = self.eos_token_id # Eos Token snake_case__ : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) # we need to clamp the input ids here to avoid having pad token in between # this is because for M2M100 the position_ids are prepared such that # all pad tokens have pos id = 2 and rest are between 2..seq_length # and the seq_length here is seq_length - num_pad_tokens # but when using past, there is no way of knowing if the past input ids had # pad tokens in them, which results in incorrect seq_lenth and which in turn results in # position_ids being off by num_pad_tokens in past input snake_case__ : int = input_ids.clamp(self.pad_token_id + 1 ) snake_case__ : Optional[Any] = decoder_input_ids.clamp(self.pad_token_id + 1 ) snake_case__ : Union[str, Any] = self.get_config() snake_case__ : Union[str, Any] = prepare_mam_aaa_inputs_dict(__A , __A , __A ) return config, inputs_dict def _lowercase ( self : Dict ): return MaMaaaConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , encoder_layerdrop=self.encoder_layerdrop , decoder_layerdrop=self.decoder_layerdrop , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , ) def _lowercase ( self : List[str] ): snake_case__, snake_case__ : Any = self.prepare_config_and_inputs() return config, inputs_dict def _lowercase ( self : Optional[Any] , __A : int , __A : Dict ): snake_case__ : Union[str, Any] = MaMaaaModel(config=__A ).get_decoder().to(__A ).eval() snake_case__ : List[Any] = inputs_dict["input_ids"] snake_case__ : Optional[Any] = inputs_dict["attention_mask"] snake_case__ : Union[str, Any] = inputs_dict["head_mask"] # first forward pass snake_case__ : Dict = model(__A , attention_mask=__A , head_mask=__A , use_cache=__A ) snake_case__, snake_case__ : Dict = outputs.to_tuple() # create hypothetical multiple next token and extent to next_input_ids snake_case__ : int = ids_tensor((self.batch_size, 3) , config.vocab_size ) snake_case__ : List[str] = ids_tensor((self.batch_size, 3) , 2 ) # append to next input_ids and snake_case__ : Union[str, Any] = torch.cat([input_ids, next_tokens] , dim=-1 ) snake_case__ : List[Any] = torch.cat([attention_mask, next_attn_mask] , dim=-1 ) snake_case__ : Tuple = model(__A , attention_mask=__A )["last_hidden_state"] snake_case__ : Tuple = model(__A , attention_mask=__A , past_key_values=__A )[ "last_hidden_state" ] # select random slice snake_case__ : Optional[Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item() snake_case__ : Optional[Any] = output_from_no_past[:, -3:, random_slice_idx].detach() snake_case__ : Any = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(__A , __A , atol=1e-2 ) ) def _lowercase ( self : str , __A : Dict , __A : Optional[Any] ): snake_case__ : Union[str, Any] = MaMaaaModel(config=__A ).to(__A ).eval() snake_case__ : Union[str, Any] = model(**__A ) snake_case__ : Tuple = outputs.encoder_last_hidden_state snake_case__ : Union[str, Any] = outputs.last_hidden_state with tempfile.TemporaryDirectory() as tmpdirname: snake_case__ : Dict = model.get_encoder() encoder.save_pretrained(__A ) snake_case__ : Any = MaMaaaEncoder.from_pretrained(__A ).to(__A ) snake_case__ : List[str] = encoder(inputs_dict["input_ids"] , attention_mask=inputs_dict["attention_mask"] )[ 0 ] self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 ) with tempfile.TemporaryDirectory() as tmpdirname: snake_case__ : Dict = model.get_decoder() decoder.save_pretrained(__A ) snake_case__ : Optional[Any] = MaMaaaDecoder.from_pretrained(__A ).to(__A ) snake_case__ : List[str] = decoder( input_ids=inputs_dict["decoder_input_ids"] , attention_mask=inputs_dict["decoder_attention_mask"] , encoder_hidden_states=__A , encoder_attention_mask=inputs_dict["attention_mask"] , )[0] self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 ) @require_torch class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ): """simple docstring""" a_ = ( ( MaMaaaModel, MaMaaaForConditionalGeneration, ) if is_torch_available() else () ) a_ = (MaMaaaForConditionalGeneration,) if is_torch_available() else () a_ = ( { "conversational": MaMaaaForConditionalGeneration, "feature-extraction": MaMaaaModel, "summarization": MaMaaaForConditionalGeneration, "text2text-generation": MaMaaaForConditionalGeneration, "translation": MaMaaaForConditionalGeneration, } if is_torch_available() else {} ) a_ = True a_ = True a_ = False a_ = False def _lowercase ( self : int , __A : Tuple , __A : Any , __A : Optional[Any] , __A : Optional[Any] , __A : Union[str, Any] ): if pipeline_test_casse_name == "TranslationPipelineTests": # Get `ValueError: Translation requires a `src_lang` and a `tgt_lang` for this model`. # `M2M100Config` was never used in pipeline tests: cannot create a simple tokenizer. return True return False def _lowercase ( self : Tuple ): snake_case__ : Any = MaMaaaModelTester(self ) snake_case__ : Dict = ConfigTester(self , config_class=__A ) def _lowercase ( self : Optional[Any] ): self.config_tester.run_common_tests() def _lowercase ( self : Union[str, Any] ): snake_case__, snake_case__ : int = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: snake_case__ : int = model_class(__A ) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(__A ) snake_case__, snake_case__ : Optional[int] = model_class.from_pretrained(__A , output_loading_info=__A ) self.assertEqual(info["missing_keys"] , [] ) def _lowercase ( self : Dict ): snake_case__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past_large_inputs(*__A ) def _lowercase ( self : Any ): snake_case__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_encoder_decoder_model_standalone(*__A ) def _lowercase ( self : Union[str, Any] ): snake_case__, snake_case__ : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in (MaMaaaModel, MaMaaaForConditionalGeneration): snake_case__ : str = model_class(__A ) model.to(__A ) model.eval() snake_case__ : str = copy.deepcopy(self._prepare_for_class(__A , __A ) ) if not self.is_encoder_decoder: snake_case__ : Optional[Any] = inputs["input_ids"] del inputs["input_ids"] else: snake_case__ : Union[str, Any] = inputs["input_ids"] snake_case__ : List[str] = inputs.get("decoder_input_ids" , __A ) del inputs["input_ids"] inputs.pop("decoder_input_ids" , __A ) snake_case__ : Tuple = model.get_input_embeddings() if not self.is_encoder_decoder: snake_case__ : List[Any] = wte(__A ) else: snake_case__ : Any = wte(__A ) snake_case__ : Optional[int] = wte(__A ) with torch.no_grad(): model(**__A )[0] def _lowercase ( self : Optional[Any] ): snake_case__, snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() snake_case__ : Any = input_dict["input_ids"] snake_case__ : int = input_ids.ne(1 ).to(__A ) snake_case__ : List[Any] = MaMaaaForConditionalGeneration(__A ).eval().to(__A ) if torch_device == "cuda": model.half() model.generate(__A , attention_mask=__A ) model.generate(num_beams=4 , do_sample=__A , early_stopping=__A , num_return_sequences=3 ) def SCREAMING_SNAKE_CASE ( snake_case_ : int ): return torch.tensor(snake_case_ , dtype=torch.long , device=snake_case_ ) __lowerCamelCase : Optional[Any] = 1e-4 @require_torch @require_sentencepiece @require_tokenizers @slow class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): """simple docstring""" @cached_property def _lowercase ( self : str ): return MaMaaaTokenizer.from_pretrained("facebook/m2m100_418M" ) def _lowercase ( self : Optional[int] ): snake_case__ : List[str] = MaMaaaModel.from_pretrained("facebook/m2m100_418M" ).to(__A ) snake_case__ : Optional[Any] = _long_tensor([[1_2_8_0_2_8, 9_8, 1_2, 3_0_5_2_7, 2_7_3_2, 1_5_9, 7_7_5_5, 6_1_9_0_4, 3_9_1_4_4, 3_8, 2]] ) snake_case__ : str = _long_tensor([[2, 1_2_8_0_2_8, 9_8, 1_2, 3_0_5_2_7, 2_7_3_2, 1_5_9, 7_7_5_5, 6_1_9_0_4, 3_9_1_4_4, 3_8]] ) snake_case__ : int = prepare_mam_aaa_inputs_dict(model.config , __A , __A ) with torch.no_grad(): snake_case__ : str = model(**__A )[0] snake_case__ : Tuple = torch.Size((1, 1_1, 1_0_2_4) ) self.assertEqual(output.shape , __A ) # change to expected output here snake_case__ : Optional[Any] = torch.tensor( [[-0.7_7_8_0, -0.1_6_7_6, 0.1_0_3_8], [-6.7_5_5_6, -1.3_9_9_2, 0.0_5_6_7], [-7.5_3_8_3, -0.5_9_2_0, -0.2_7_7_9]] , device=__A ) self.assertTrue(torch.allclose(output[:, :3, :3] , __A , atol=__A ) ) def _lowercase ( self : Union[str, Any] ): snake_case__ : Union[str, Any] = MaMaaaForConditionalGeneration.from_pretrained("facebook/m2m100_418M" ).to(__A ) # change to intended input snake_case__ : Union[str, Any] = _long_tensor([[1_2_8_0_2_8, 9_8, 1_2, 3_0_5_2_7, 2_7_3_2, 1_5_9, 7_7_5_5, 6_1_9_0_4, 3_9_1_4_4, 3_8, 2]] ) snake_case__ : List[str] = _long_tensor([[2, 1_2_8_0_2_8, 9_8, 1_2, 3_0_5_2_7, 2_7_3_2, 1_5_9, 7_7_5_5, 6_1_9_0_4, 3_9_1_4_4, 3_8]] ) snake_case__ : int = prepare_mam_aaa_inputs_dict(model.config , __A , __A ) with torch.no_grad(): snake_case__ : Union[str, Any] = model(**__A )[0] snake_case__ : Tuple = torch.Size((1, 1_1, model.config.vocab_size) ) self.assertEqual(output.shape , __A ) # change to expected output here snake_case__ : List[str] = torch.tensor( [[-1.0_4_4_8, -1.0_4_1_1, 3.7_9_9_2], [-3.2_1_9_1, -3.2_3_8_6, -1.3_4_5_1], [-3.6_2_1_0, -3.5_9_9_3, 0.4_9_2_5]] , device=__A ) self.assertTrue(torch.allclose(output[:, :3, :3] , __A , atol=__A ) ) def _lowercase ( self : Optional[Any] ): snake_case__ : List[Any] = MaMaaaForConditionalGeneration.from_pretrained("facebook/m2m100_418M" ).to(__A ) snake_case__ : List[str] = MaMaaaTokenizer.from_pretrained("facebook/m2m100_418M" , src_lang="fr" , tgt_lang="en" ) snake_case__ : List[Any] = [ "L'affaire NSA souligne l'absence totale de débat sur le renseignement", "Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.", "Lorsque François Hollande téléphone à Barack Obama ou quand le ministre des affaires étrangères Laurent" " Fabius convoque l'ambassadeur des Etats-Unis, ils réagissent à une vraie découverte, qui est celle de" " l'ampleur de la surveillance américaine sur l'ensemble des communications en France.", ] # The below article tests that we don't add any hypotheses outside of the top n_beams snake_case__ : str = tokenizer(__A , padding=__A , return_tensors="pt" ) snake_case__ : Tuple = model.generate( input_ids=dct["input_ids"].to(__A ) , attention_mask=dct["attention_mask"].to(__A ) , num_beams=5 , forced_bos_token_id=tokenizer.get_lang_id("en" ) , ) snake_case__ : List[str] = [ "The NSA case highlights the total absence of intelligence debate", "I think there are two levels of response from the French government.", "When François Hollande calls Barack Obama or when Foreign Minister Laurent Fabius calls the U.S." " Ambassador, they respond to a real discovery, which is that of the scale of U.S. surveillance on all" " communications in France.", ] snake_case__ : Dict = tokenizer.batch_decode( hypotheses_batch.tolist() , clean_up_tokenization_spaces=__A , skip_special_tokens=__A ) assert generated == expected_en
25
0
from typing import List, Union import numpy as np from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING __lowerCamelCase : Optional[int] = logging.get_logger(__name__) @add_end_docstrings(UpperCamelCase_ ) class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ): """simple docstring""" def __init__( self : str , *__A : str , **__A : Optional[Any] ): super().__init__(*__A , **__A ) requires_backends(self , "vision" ) self.check_model_type(__A ) def __call__( self : List[Any] , __A : Union[str, List[str], "Image.Image", List["Image.Image"]] , **__A : Optional[int] ): return super().__call__(__A , **__A ) def _lowercase ( self : List[str] , **__A : Optional[Any] ): return {}, {}, {} def _lowercase ( self : Dict , __A : Tuple ): snake_case__ : Dict = load_image(__A ) snake_case__ : Tuple = image.size snake_case__ : Any = self.image_processor(images=__A , return_tensors=self.framework ) return model_inputs def _lowercase ( self : int , __A : Dict ): snake_case__ : Optional[Any] = self.model(**__A ) return model_outputs def _lowercase ( self : Union[str, Any] , __A : Tuple ): snake_case__ : str = model_outputs.predicted_depth snake_case__ : Optional[Any] = torch.nn.functional.interpolate( predicted_depth.unsqueeze(1 ) , size=self.image_size[::-1] , mode="bicubic" , align_corners=__A ) snake_case__ : List[Any] = prediction.squeeze().cpu().numpy() snake_case__ : str = (output * 2_5_5 / np.max(__A )).astype("uint8" ) snake_case__ : Any = Image.fromarray(__A ) snake_case__ : Dict = {} snake_case__ : Tuple = predicted_depth snake_case__ : Dict = depth return output_dict
710
from unittest.mock import patch import pyspark from datasets.packaged_modules.spark.spark import ( Spark, SparkExamplesIterable, _generate_iterable_examples, ) from ..utils import ( require_dill_gt_0_3_2, require_not_windows, ) def SCREAMING_SNAKE_CASE ( snake_case_ : List[Any] , snake_case_ : Union[str, Any] ): snake_case__ : Optional[int] = [] for part_id in partition_order: snake_case__ : List[Any] = df.where(F'''SPARK_PARTITION_ID() = {part_id}''' ).collect() for row_idx, row in enumerate(snake_case_ ): expected_row_ids_and_row_dicts.append((F'''{part_id}_{row_idx}''', row.asDict()) ) return expected_row_ids_and_row_dicts @require_not_windows @require_dill_gt_0_3_2 def SCREAMING_SNAKE_CASE ( ): snake_case__ : Tuple = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate() snake_case__ : Union[str, Any] = spark.range(100 ).repartition(1 ) snake_case__ : Any = Spark(snake_case_ ) # The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means # that each partition can hold 2 rows. spark_builder._repartition_df_if_needed(max_shard_size=16 ) # Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions. assert spark_builder.df.rdd.getNumPartitions() == 50 @require_not_windows @require_dill_gt_0_3_2 def SCREAMING_SNAKE_CASE ( ): snake_case__ : Dict = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate() snake_case__ : Optional[Any] = spark.range(10 ).repartition(2 ) snake_case__ : Optional[Any] = [1, 0] snake_case__ : Dict = _generate_iterable_examples(snake_case_ , snake_case_ ) # Reverse the partitions. snake_case__ : Tuple = _get_expected_row_ids_and_row_dicts_for_partition_order(snake_case_ , snake_case_ ) for i, (row_id, row_dict) in enumerate(generate_fn() ): snake_case__, snake_case__ : Tuple = expected_row_ids_and_row_dicts[i] assert row_id == expected_row_id assert row_dict == expected_row_dict @require_not_windows @require_dill_gt_0_3_2 def SCREAMING_SNAKE_CASE ( ): snake_case__ : Optional[int] = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate() snake_case__ : Optional[int] = spark.range(10 ).repartition(1 ) snake_case__ : Union[str, Any] = SparkExamplesIterable(snake_case_ ) assert it.n_shards == 1 for i, (row_id, row_dict) in enumerate(snake_case_ ): assert row_id == F'''0_{i}''' assert row_dict == {"id": i} @require_not_windows @require_dill_gt_0_3_2 def SCREAMING_SNAKE_CASE ( ): snake_case__ : Optional[int] = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate() snake_case__ : str = spark.range(30 ).repartition(3 ) # Mock the generator so that shuffle reverses the partition indices. with patch("numpy.random.Generator" ) as generator_mock: snake_case__ : Union[str, Any] = lambda snake_case_ : x.reverse() snake_case__ : Optional[int] = _get_expected_row_ids_and_row_dicts_for_partition_order(snake_case_ , [2, 1, 0] ) snake_case__ : List[Any] = SparkExamplesIterable(snake_case_ ).shuffle_data_sources(snake_case_ ) assert shuffled_it.n_shards == 3 for i, (row_id, row_dict) in enumerate(snake_case_ ): snake_case__, snake_case__ : Optional[Any] = expected_row_ids_and_row_dicts[i] assert row_id == expected_row_id assert row_dict == expected_row_dict @require_not_windows @require_dill_gt_0_3_2 def SCREAMING_SNAKE_CASE ( ): snake_case__ : Any = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate() snake_case__ : Tuple = spark.range(20 ).repartition(4 ) # Partitions 0 and 2 snake_case__ : List[Any] = SparkExamplesIterable(snake_case_ ).shard_data_sources(worker_id=0 , num_workers=2 ) assert shard_it_a.n_shards == 2 snake_case__ : List[str] = _get_expected_row_ids_and_row_dicts_for_partition_order(snake_case_ , [0, 2] ) for i, (row_id, row_dict) in enumerate(snake_case_ ): snake_case__, snake_case__ : Optional[int] = expected_row_ids_and_row_dicts_a[i] assert row_id == expected_row_id assert row_dict == expected_row_dict # Partitions 1 and 3 snake_case__ : Any = SparkExamplesIterable(snake_case_ ).shard_data_sources(worker_id=1 , num_workers=2 ) assert shard_it_a.n_shards == 2 snake_case__ : List[Any] = _get_expected_row_ids_and_row_dicts_for_partition_order(snake_case_ , [1, 3] ) for i, (row_id, row_dict) in enumerate(snake_case_ ): snake_case__, snake_case__ : Optional[Any] = expected_row_ids_and_row_dicts_a[i] assert row_id == expected_row_id assert row_dict == expected_row_dict @require_not_windows @require_dill_gt_0_3_2 def SCREAMING_SNAKE_CASE ( ): snake_case__ : Dict = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate() snake_case__ : Tuple = spark.range(100 ).repartition(1 ) snake_case__ : Union[str, Any] = Spark(snake_case_ ) # Choose a small max_shard_size for maximum partitioning. spark_builder._repartition_df_if_needed(max_shard_size=1 ) # The new number of partitions should not be greater than the number of rows. assert spark_builder.df.rdd.getNumPartitions() == 100
25
0
class SCREAMING_SNAKE_CASE__ : """simple docstring""" def __init__( self : int , __A : List[Any] ): # we need a list not a string, so do something to change the type snake_case__ : Optional[Any] = arr.split("," ) def _lowercase ( self : Tuple ): snake_case__ : str = [int(self.array[0] )] * len(self.array ) snake_case__ : Union[str, Any] = [int(self.array[0] )] * len(self.array ) for i in range(1 , len(self.array ) ): snake_case__ : Union[str, Any] = max( int(self.array[i] ) + sum_value[i - 1] , int(self.array[i] ) ) snake_case__ : Dict = max(sum_value[i] , rear[i - 1] ) return rear[len(self.array ) - 1] if __name__ == "__main__": __lowerCamelCase : Any = input("""please input some numbers:""") __lowerCamelCase : int = SubArray(whole_array) __lowerCamelCase : int = array.solve_sub_array() print(("""the results is:""", re))
711
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __lowerCamelCase : List[str] = {"""configuration_xlnet""": ["""XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XLNetConfig"""]} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : str = ["""XLNetTokenizer"""] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : Dict = ["""XLNetTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : str = [ """XLNET_PRETRAINED_MODEL_ARCHIVE_LIST""", """XLNetForMultipleChoice""", """XLNetForQuestionAnswering""", """XLNetForQuestionAnsweringSimple""", """XLNetForSequenceClassification""", """XLNetForTokenClassification""", """XLNetLMHeadModel""", """XLNetModel""", """XLNetPreTrainedModel""", """load_tf_weights_in_xlnet""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : Dict = [ """TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFXLNetForMultipleChoice""", """TFXLNetForQuestionAnsweringSimple""", """TFXLNetForSequenceClassification""", """TFXLNetForTokenClassification""", """TFXLNetLMHeadModel""", """TFXLNetMainLayer""", """TFXLNetModel""", """TFXLNetPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xlnet import XLNetTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xlnet_fast import XLNetTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xlnet import ( XLNET_PRETRAINED_MODEL_ARCHIVE_LIST, XLNetForMultipleChoice, XLNetForQuestionAnswering, XLNetForQuestionAnsweringSimple, XLNetForSequenceClassification, XLNetForTokenClassification, XLNetLMHeadModel, XLNetModel, XLNetPreTrainedModel, load_tf_weights_in_xlnet, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xlnet import ( TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFXLNetForMultipleChoice, TFXLNetForQuestionAnsweringSimple, TFXLNetForSequenceClassification, TFXLNetForTokenClassification, TFXLNetLMHeadModel, TFXLNetMainLayer, TFXLNetModel, TFXLNetPreTrainedModel, ) else: import sys __lowerCamelCase : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
25
0
from __future__ import annotations import inspect import unittest from transformers import ViTConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFViTForImageClassification, TFViTModel if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class SCREAMING_SNAKE_CASE__ : """simple docstring""" def __init__( self : Union[str, Any] , __A : int , __A : Optional[int]=1_3 , __A : Dict=3_0 , __A : str=2 , __A : List[str]=3 , __A : Union[str, Any]=True , __A : List[Any]=True , __A : List[Any]=3_2 , __A : str=2 , __A : Any=4 , __A : Dict=3_7 , __A : Optional[int]="gelu" , __A : List[str]=0.1 , __A : List[str]=0.1 , __A : str=1_0 , __A : Any=0.0_2 , __A : str=3 , __A : Any=None , ): snake_case__ : Optional[int] = parent snake_case__ : str = batch_size snake_case__ : Optional[Any] = image_size snake_case__ : Tuple = patch_size snake_case__ : Union[str, Any] = num_channels snake_case__ : List[Any] = is_training snake_case__ : Optional[int] = use_labels snake_case__ : Union[str, Any] = hidden_size snake_case__ : Any = num_hidden_layers snake_case__ : str = num_attention_heads snake_case__ : int = intermediate_size snake_case__ : Any = hidden_act snake_case__ : str = hidden_dropout_prob snake_case__ : str = attention_probs_dropout_prob snake_case__ : Optional[Any] = type_sequence_label_size snake_case__ : Optional[Any] = initializer_range snake_case__ : str = scope # in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) snake_case__ : Any = (image_size // patch_size) ** 2 snake_case__ : Optional[Any] = num_patches + 1 def _lowercase ( self : Optional[Any] ): snake_case__ : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) snake_case__ : Tuple = None if self.use_labels: snake_case__ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size ) snake_case__ : Tuple = self.get_config() return config, pixel_values, labels def _lowercase ( self : Dict ): return ViTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__A , initializer_range=self.initializer_range , ) def _lowercase ( self : Optional[Any] , __A : Union[str, Any] , __A : Any , __A : Optional[int] ): snake_case__ : List[Any] = TFViTModel(config=__A ) snake_case__ : Union[str, Any] = model(__A , training=__A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) # Test with an image with different size than the one specified in config. snake_case__ : Optional[Any] = self.image_size // 2 snake_case__ : Dict = pixel_values[:, :, :image_size, :image_size] snake_case__ : Any = model(__A , interpolate_pos_encoding=__A , training=__A ) snake_case__ : Any = (image_size // self.patch_size) ** 2 + 1 self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, seq_length, self.hidden_size) ) def _lowercase ( self : Optional[int] , __A : int , __A : List[Any] , __A : List[Any] ): snake_case__ : Any = self.type_sequence_label_size snake_case__ : Optional[Any] = TFViTForImageClassification(__A ) snake_case__ : List[str] = model(__A , labels=__A , training=__A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # Test with an image with different size than the one specified in config. snake_case__ : Tuple = self.image_size // 2 snake_case__ : str = pixel_values[:, :, :image_size, :image_size] snake_case__ : int = model(__A , interpolate_pos_encoding=__A , training=__A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images snake_case__ : Union[str, Any] = 1 snake_case__ : Union[str, Any] = TFViTForImageClassification(__A ) snake_case__ : Tuple = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) snake_case__ : str = model(__A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def _lowercase ( self : Union[str, Any] ): snake_case__ : Optional[int] = self.prepare_config_and_inputs() snake_case__ : Any = config_and_inputs snake_case__ : int = {"pixel_values": pixel_values} return config, inputs_dict @require_tf class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ): """simple docstring""" a_ = (TFViTModel, TFViTForImageClassification) if is_tf_available() else () a_ = ( {"feature-extraction": TFViTModel, "image-classification": TFViTForImageClassification} if is_tf_available() else {} ) a_ = False a_ = False a_ = False def _lowercase ( self : Union[str, Any] ): snake_case__ : Any = TFViTModelTester(self ) snake_case__ : Dict = ConfigTester(self , config_class=__A , has_text_modality=__A , hidden_size=3_7 ) def _lowercase ( self : Any ): self.config_tester.run_common_tests() @unittest.skip(reason="ViT does not use inputs_embeds" ) def _lowercase ( self : Optional[int] ): pass @unittest.skip(reason="ViT does not use inputs_embeds" ) def _lowercase ( self : Optional[int] ): pass def _lowercase ( self : List[str] ): snake_case__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case__ : Any = model_class(__A ) self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) ) snake_case__ : Optional[int] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__A , tf.keras.layers.Layer ) ) def _lowercase ( self : Dict ): snake_case__ : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case__ : Optional[Any] = model_class(__A ) snake_case__ : Union[str, Any] = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic snake_case__ : Dict = [*signature.parameters.keys()] snake_case__ : Tuple = ["pixel_values"] self.assertListEqual(arg_names[:1] , __A ) def _lowercase ( self : Dict ): snake_case__ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__A ) def _lowercase ( self : Any ): snake_case__ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__A ) @slow def _lowercase ( self : str ): snake_case__ : List[str] = TFViTModel.from_pretrained("google/vit-base-patch16-224" ) self.assertIsNotNone(__A ) def SCREAMING_SNAKE_CASE ( ): snake_case__ : Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_tf @require_vision class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): """simple docstring""" @cached_property def _lowercase ( self : Union[str, Any] ): return ViTImageProcessor.from_pretrained("google/vit-base-patch16-224" ) if is_vision_available() else None @slow def _lowercase ( self : int ): snake_case__ : Tuple = TFViTForImageClassification.from_pretrained("google/vit-base-patch16-224" ) snake_case__ : List[str] = self.default_image_processor snake_case__ : Optional[int] = prepare_img() snake_case__ : int = image_processor(images=__A , return_tensors="tf" ) # forward pass snake_case__ : Dict = model(**__A ) # verify the logits snake_case__ : str = tf.TensorShape((1, 1_0_0_0) ) self.assertEqual(outputs.logits.shape , __A ) snake_case__ : int = tf.constant([-0.2_7_4_4, 0.8_2_1_5, -0.0_8_3_6] ) tf.debugging.assert_near(outputs.logits[0, :3] , __A , atol=1e-4 )
712
from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline else: from .pipeline_kandinsky import KandinskyPipeline from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput from .text_encoder import MultilingualCLIP
25
0
def SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : bool = False ): if not isinstance(snake_case_ , snake_case_ ): snake_case__ : List[str] = F'''Expected string as input, found {type(snake_case_ )}''' raise ValueError(snake_case_ ) if not isinstance(snake_case_ , snake_case_ ): snake_case__ : Tuple = F'''Expected boolean as use_pascal parameter, found {type(snake_case_ )}''' raise ValueError(snake_case_ ) snake_case__ : Tuple = input_str.split("_" ) snake_case__ : str = 0 if use_pascal else 1 snake_case__ : Optional[int] = words[start_index:] snake_case__ : Optional[Any] = [word[0].upper() + word[1:] for word in words_to_capitalize] snake_case__ : List[str] = "" if use_pascal else words[0] return "".join([initial_word, *capitalized_words] ) if __name__ == "__main__": from doctest import testmod testmod()
713
import numpy as np from matplotlib import pyplot as plt from sklearn.datasets import load_iris from sklearn.metrics import ConfusionMatrixDisplay from sklearn.model_selection import train_test_split from xgboost import XGBClassifier def SCREAMING_SNAKE_CASE ( snake_case_ : dict ): return (data["data"], data["target"]) def SCREAMING_SNAKE_CASE ( snake_case_ : np.ndarray , snake_case_ : np.ndarray ): snake_case__ : Optional[int] = XGBClassifier() classifier.fit(snake_case_ , snake_case_ ) return classifier def SCREAMING_SNAKE_CASE ( ): snake_case__ : Any = load_iris() snake_case__, snake_case__ : str = data_handling(snake_case_ ) snake_case__, snake_case__, snake_case__, snake_case__ : int = train_test_split( snake_case_ , snake_case_ , test_size=0.25 ) snake_case__ : Dict = iris["target_names"] # Create an XGBoost Classifier from the training data snake_case__ : Dict = xgboost(snake_case_ , snake_case_ ) # Display the confusion matrix of the classifier with both training and test sets ConfusionMatrixDisplay.from_estimator( snake_case_ , snake_case_ , snake_case_ , display_labels=snake_case_ , cmap="Blues" , normalize="true" , ) plt.title("Normalized Confusion Matrix - IRIS Dataset" ) plt.show() if __name__ == "__main__": import doctest doctest.testmod(verbose=True) main()
25
0
from .constants import ( MODEL_NAME, OPTIMIZER_NAME, RNG_STATE_NAME, SAFE_WEIGHTS_INDEX_NAME, SAFE_WEIGHTS_NAME, SCALER_NAME, SCHEDULER_NAME, TORCH_LAUNCH_PARAMS, WEIGHTS_INDEX_NAME, WEIGHTS_NAME, ) from .dataclasses import ( BnbQuantizationConfig, ComputeEnvironment, CustomDtype, DeepSpeedPlugin, DistributedDataParallelKwargs, DistributedType, DynamoBackend, FPaRecipeKwargs, FullyShardedDataParallelPlugin, GradientAccumulationPlugin, GradScalerKwargs, InitProcessGroupKwargs, KwargsHandler, LoggerType, MegatronLMPlugin, PrecisionType, ProjectConfiguration, RNGType, SageMakerDistributedType, TensorInformation, TorchDynamoPlugin, ) from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env from .imports import ( get_ccl_version, is_abit_bnb_available, is_abit_bnb_available, is_aim_available, is_bfaa_available, is_bnb_available, is_botoa_available, is_ccl_available, is_comet_ml_available, is_datasets_available, is_deepspeed_available, is_fpa_available, is_ipex_available, is_megatron_lm_available, is_mlflow_available, is_mps_available, is_npu_available, is_rich_available, is_safetensors_available, is_sagemaker_available, is_tensorboard_available, is_tpu_available, is_transformers_available, is_wandb_available, is_xpu_available, ) from .modeling import ( check_device_map, check_tied_parameters_in_config, check_tied_parameters_on_same_device, compute_module_sizes, convert_file_size_to_int, dtype_byte_size, find_tied_parameters, get_balanced_memory, get_max_layer_size, get_max_memory, get_mixed_precision_context_manager, id_tensor_storage, infer_auto_device_map, load_checkpoint_in_model, load_offloaded_weights, load_state_dict, named_module_tensors, retie_parameters, set_module_tensor_to_device, shard_checkpoint, ) from .offload import ( OffloadedWeightsLoader, PrefixedDataset, extract_submodules_state_dict, load_offloaded_weight, offload_state_dict, offload_weight, save_offload_index, ) from .operations import ( broadcast, broadcast_object_list, concatenate, convert_outputs_to_fpaa, convert_to_fpaa, find_batch_size, find_device, gather, gather_object, get_data_structure, honor_type, initialize_tensors, is_namedtuple, is_tensor_information, is_torch_tensor, listify, pad_across_processes, recursively_apply, reduce, send_to_device, slice_tensors, ) from .versions import compare_versions, is_torch_version if is_deepspeed_available(): from .deepspeed import ( DeepSpeedEngineWrapper, DeepSpeedOptimizerWrapper, DeepSpeedSchedulerWrapper, DummyOptim, DummyScheduler, HfDeepSpeedConfig, ) from .bnb import has_abit_bnb_layers, load_and_quantize_model from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer from .launch import ( PrepareForLaunch, _filter_args, prepare_deepspeed_cmd_env, prepare_multi_gpu_env, prepare_sagemager_args_inputs, prepare_simple_launcher_cmd_env, prepare_tpu, ) from .megatron_lm import ( AbstractTrainStep, BertTrainStep, GPTTrainStep, MegatronEngine, MegatronLMDummyDataLoader, MegatronLMDummyScheduler, MegatronLMOptimizerWrapper, MegatronLMSchedulerWrapper, TaTrainStep, avg_losses_across_data_parallel_group, gather_across_data_parallel_groups, ) from .megatron_lm import initialize as megatron_lm_initialize from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader from .megatron_lm import prepare_model as megatron_lm_prepare_model from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler from .memory import find_executable_batch_size, release_memory from .other import ( extract_model_from_parallel, get_pretty_name, is_port_in_use, merge_dicts, patch_environment, save, wait_for_everyone, write_basic_config, ) from .random import set_seed, synchronize_rng_state, synchronize_rng_states from .torch_xla import install_xla from .tqdm import tqdm from .transformer_engine import convert_model, has_transformer_engine_layers
714
import argparse import re from typing import Dict import torch from datasets import Audio, Dataset, load_dataset, load_metric from transformers import AutoFeatureExtractor, pipeline def SCREAMING_SNAKE_CASE ( snake_case_ : Dataset , snake_case_ : Dict[str, str] ): snake_case__ : Tuple = args.log_outputs snake_case__ : Union[str, Any] = "_".join(args.dataset.split("/" ) + [args.config, args.split] ) # load metric snake_case__ : List[str] = load_metric("wer" ) snake_case__ : List[str] = load_metric("cer" ) # compute metrics snake_case__ : List[Any] = wer.compute(references=result["target"] , predictions=result["prediction"] ) snake_case__ : List[str] = cer.compute(references=result["target"] , predictions=result["prediction"] ) # print & log results snake_case__ : Dict = F'''WER: {wer_result}\nCER: {cer_result}''' print(snake_case_ ) with open(F'''{dataset_id}_eval_results.txt''' , "w" ) as f: f.write(snake_case_ ) # log all results in text file. Possibly interesting for analysis if log_outputs is not None: snake_case__ : Union[str, Any] = F'''log_{dataset_id}_predictions.txt''' snake_case__ : int = F'''log_{dataset_id}_targets.txt''' with open(snake_case_ , "w" ) as p, open(snake_case_ , "w" ) as t: # mapping function to write output def write_to_file(snake_case_ : Union[str, Any] , snake_case_ : Any ): p.write(F'''{i}''' + "\n" ) p.write(batch["prediction"] + "\n" ) t.write(F'''{i}''' + "\n" ) t.write(batch["target"] + "\n" ) result.map(snake_case_ , with_indices=snake_case_ ) def SCREAMING_SNAKE_CASE ( snake_case_ : str ): snake_case__ : List[Any] = "[,?.!\-\;\:\"“%‘”�—’…–]" # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training snake_case__ : Optional[int] = re.sub(snake_case_ , "" , text.lower() ) # In addition, we can normalize the target text, e.g. removing new lines characters etc... # note that order is important here! snake_case__ : Optional[Any] = ["\n\n", "\n", " ", " "] for t in token_sequences_to_ignore: snake_case__ : Optional[int] = " ".join(text.split(snake_case_ ) ) return text def SCREAMING_SNAKE_CASE ( snake_case_ : int ): # load dataset snake_case__ : int = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=snake_case_ ) # for testing: only process the first two examples as a test # dataset = dataset.select(range(10)) # load processor snake_case__ : List[str] = AutoFeatureExtractor.from_pretrained(args.model_id ) snake_case__ : List[Any] = feature_extractor.sampling_rate # resample audio snake_case__ : Dict = dataset.cast_column("audio" , Audio(sampling_rate=snake_case_ ) ) # load eval pipeline if args.device is None: snake_case__ : int = 0 if torch.cuda.is_available() else -1 snake_case__ : List[str] = pipeline("automatic-speech-recognition" , model=args.model_id , device=args.device ) # map function to decode audio def map_to_pred(snake_case_ : Any ): snake_case__ : Union[str, Any] = asr( batch["audio"]["array"] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s ) snake_case__ : Optional[int] = prediction["text"] snake_case__ : Optional[Any] = normalize_text(batch["sentence"] ) return batch # run inference on all examples snake_case__ : Any = dataset.map(snake_case_ , remove_columns=dataset.column_names ) # compute and log_results # do not change function below log_results(snake_case_ , snake_case_ ) if __name__ == "__main__": __lowerCamelCase : Dict = argparse.ArgumentParser() parser.add_argument( """--model_id""", type=str, required=True, help="""Model identifier. Should be loadable with 🤗 Transformers""" ) parser.add_argument( """--dataset""", type=str, required=True, help="""Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets""", ) parser.add_argument( """--config""", type=str, required=True, help="""Config of the dataset. *E.g.* `'en'` for Common Voice""" ) parser.add_argument("""--split""", type=str, required=True, help="""Split of the dataset. *E.g.* `'test'`""") parser.add_argument( """--chunk_length_s""", type=float, default=None, help="""Chunk length in seconds. Defaults to 5 seconds.""" ) parser.add_argument( """--stride_length_s""", type=float, default=None, help="""Stride of the audio chunks. Defaults to 1 second.""" ) parser.add_argument( """--log_outputs""", action="""store_true""", help="""If defined, write outputs to log file for analysis.""" ) parser.add_argument( """--device""", type=int, default=None, help="""The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.""", ) __lowerCamelCase : str = parser.parse_args() main(args)
25
0
class SCREAMING_SNAKE_CASE__ : """simple docstring""" def __init__( self : Tuple ): snake_case__ : Union[str, Any] = {} def _lowercase ( self : Union[str, Any] ): print(self.vertex ) for i in self.vertex: print(__A , " -> " , " -> ".join([str(__A ) for j in self.vertex[i]] ) ) def _lowercase ( self : List[str] , __A : int , __A : int ): # check if vertex is already present, if from_vertex in self.vertex: self.vertex[from_vertex].append(__A ) else: # else make a new vertex snake_case__ : Optional[int] = [to_vertex] def _lowercase ( self : Optional[int] ): # visited array for storing already visited nodes snake_case__ : Optional[int] = [False] * len(self.vertex ) # call the recursive helper function for i in range(len(self.vertex ) ): if not visited[i]: self.dfs_recursive(__A , __A ) def _lowercase ( self : Any , __A : int , __A : list ): # mark start vertex as visited snake_case__ : Tuple = True print(__A , end=" " ) # Recur for all the vertices that are adjacent to this node for i in self.vertex: if not visited[i]: self.dfs_recursive(__A , __A ) if __name__ == "__main__": __lowerCamelCase : str = Graph() g.add_edge(0, 1) g.add_edge(0, 2) g.add_edge(1, 2) g.add_edge(2, 0) g.add_edge(2, 3) g.add_edge(3, 3) g.print_graph() print("""DFS:""") g.dfs() # OUTPUT: # 0 -> 1 -> 2 # 1 -> 2 # 2 -> 0 -> 3 # 3 -> 3 # DFS: # 0 1 2 3
715
import copy from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import ClassLabel, Features, Value from .base import TaskTemplate @dataclass(frozen=UpperCamelCase_ ) class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ): """simple docstring""" a_ = field(default="text-classification" , metadata={"include_in_asdict_even_if_is_default": True} ) a_ = Features({"text": Value("string" )} ) a_ = Features({"labels": ClassLabel} ) a_ = "text" a_ = "labels" def _lowercase ( self : Tuple , __A : List[Any] ): if self.label_column not in features: raise ValueError(f'''Column {self.label_column} is not present in features.''' ) if not isinstance(features[self.label_column] , __A ): raise ValueError(f'''Column {self.label_column} is not a ClassLabel.''' ) snake_case__ : Any = copy.deepcopy(self ) snake_case__ : Optional[Any] = self.label_schema.copy() snake_case__ : List[str] = features[self.label_column] snake_case__ : Dict = label_schema return task_template @property def _lowercase ( self : Tuple ): return { self.text_column: "text", self.label_column: "labels", }
25
0
import gc import random import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import ( AutoencoderKL, DiffusionPipeline, EulerDiscreteScheduler, StableDiffusionXLImgaImgPipeline, UNetaDConditionModel, ) from diffusers.utils import floats_tensor, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ): """simple docstring""" a_ = StableDiffusionXLImgaImgPipeline a_ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"} a_ = PipelineTesterMixin.required_optional_params - {"latents"} a_ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS a_ = IMAGE_TO_IMAGE_IMAGE_PARAMS a_ = IMAGE_TO_IMAGE_IMAGE_PARAMS def _lowercase ( self : str ): torch.manual_seed(0 ) snake_case__ : int = UNetaDConditionModel( block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , attention_head_dim=(2, 4) , use_linear_projection=__A , addition_embed_type="text_time" , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=8_0 , cross_attention_dim=6_4 , ) snake_case__ : Union[str, Any] = EulerDiscreteScheduler( beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , steps_offset=1 , beta_schedule="scaled_linear" , timestep_spacing="leading" , ) torch.manual_seed(0 ) snake_case__ : Any = AutoencoderKL( block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=1_2_8 , ) torch.manual_seed(0 ) snake_case__ : Optional[int] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act="gelu" , projection_dim=3_2 , ) snake_case__ : Optional[int] = CLIPTextModel(__A ) snake_case__ : Any = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" , local_files_only=__A ) snake_case__ : Optional[Any] = CLIPTextModelWithProjection(__A ) snake_case__ : Tuple = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" , local_files_only=__A ) snake_case__ : Tuple = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "text_encoder_2": text_encoder_a, "tokenizer_2": tokenizer_a, # "safety_checker": None, # "feature_extractor": None, } return components def _lowercase ( self : Tuple , __A : str , __A : Optional[int]=0 ): snake_case__ : List[str] = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(__A ) ).to(__A ) snake_case__ : Union[str, Any] = image / 2 + 0.5 if str(__A ).startswith("mps" ): snake_case__ : Optional[int] = torch.manual_seed(__A ) else: snake_case__ : List[Any] = torch.Generator(device=__A ).manual_seed(__A ) snake_case__ : str = { "prompt": "A painting of a squirrel eating a burger", "image": image, "generator": generator, "num_inference_steps": 2, "guidance_scale": 5.0, "output_type": "numpy", "strength": 0.7_5, } return inputs def _lowercase ( self : Tuple ): snake_case__ : Tuple = "cpu" # ensure determinism for the device-dependent torch.Generator snake_case__ : str = self.get_dummy_components() snake_case__ : str = StableDiffusionXLImgaImgPipeline(**__A ) snake_case__ : List[str] = sd_pipe.to(__A ) sd_pipe.set_progress_bar_config(disable=__A ) snake_case__ : Optional[Any] = self.get_dummy_inputs(__A ) snake_case__ : Dict = sd_pipe(**__A ).images snake_case__ : int = image[0, -3:, -3:, -1] assert image.shape == (1, 3_2, 3_2, 3) snake_case__ : Any = np.array([0.4_6_5_6, 0.4_8_4_0, 0.4_4_3_9, 0.6_6_9_8, 0.5_5_7_4, 0.4_5_2_4, 0.5_7_9_9, 0.5_9_4_3, 0.5_1_6_5] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def _lowercase ( self : Tuple ): super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 ) def _lowercase ( self : Tuple ): super().test_inference_batch_single_identical(expected_max_diff=3e-3 ) def _lowercase ( self : Optional[int] ): pass def _lowercase ( self : int ): snake_case__ : List[str] = self.get_dummy_components() snake_case__ : Dict = StableDiffusionXLImgaImgPipeline(**__A ) snake_case__ : Any = sd_pipe.to(__A ) snake_case__ : int = sd_pipe.to(__A ) sd_pipe.set_progress_bar_config(disable=__A ) # forward without prompt embeds snake_case__ : int = self.get_dummy_inputs(__A ) snake_case__ : Tuple = 3 * ["this is a negative prompt"] snake_case__ : Optional[Any] = negative_prompt snake_case__ : List[str] = 3 * [inputs["prompt"]] snake_case__ : str = sd_pipe(**__A ) snake_case__ : Optional[Any] = output.images[0, -3:, -3:, -1] # forward with prompt embeds snake_case__ : Optional[Any] = self.get_dummy_inputs(__A ) snake_case__ : Union[str, Any] = 3 * ["this is a negative prompt"] snake_case__ : Union[str, Any] = 3 * [inputs.pop("prompt" )] ( snake_case__ ) : int = sd_pipe.encode_prompt(__A , negative_prompt=__A ) snake_case__ : List[str] = sd_pipe( **__A , prompt_embeds=__A , negative_prompt_embeds=__A , pooled_prompt_embeds=__A , negative_pooled_prompt_embeds=__A , ) snake_case__ : Optional[Any] = output.images[0, -3:, -3:, -1] # make sure that it's equal assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4 @slow @require_torch_gpu class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): """simple docstring""" def _lowercase ( self : Optional[Any] ): super().tearDown() gc.collect() torch.cuda.empty_cache() def _lowercase ( self : Union[str, Any] , __A : str , __A : int="cpu" , __A : str=torch.floataa , __A : Tuple=0 ): snake_case__ : Tuple = torch.Generator(device=__A ).manual_seed(__A ) snake_case__ : Union[str, Any] = np.random.RandomState(__A ).standard_normal((1, 4, 6_4, 6_4) ) snake_case__ : Dict = torch.from_numpy(__A ).to(device=__A , dtype=__A ) snake_case__ : List[str] = { "prompt": "a photograph of an astronaut riding a horse", "latents": latents, "generator": generator, "num_inference_steps": 3, "guidance_scale": 7.5, "output_type": "numpy", } return inputs def _lowercase ( self : Optional[int] ): snake_case__ : Optional[Any] = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-base" ) pipe.to(__A ) pipe.set_progress_bar_config(disable=__A ) snake_case__ : List[str] = self.get_inputs(__A ) snake_case__ : List[str] = pipe(**__A ).images snake_case__ : Any = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 5_1_2, 5_1_2, 3) snake_case__ : Optional[Any] = np.array([0.4_9_4_9_3, 0.4_7_8_9_6, 0.4_0_7_9_8, 0.5_4_2_1_4, 0.5_3_2_1_2, 0.4_8_2_0_2, 0.4_7_6_5_6, 0.4_6_3_2_9, 0.4_8_5_0_6] ) assert np.abs(image_slice - expected_slice ).max() < 7e-3
716
import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES from ...utils import logging from ..auto import CONFIG_MAPPING __lowerCamelCase : Union[str, Any] = logging.get_logger(__name__) __lowerCamelCase : Dict = { """Salesforce/instruct-blip-flan-t5""": """https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json""", } class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ): """simple docstring""" a_ = "instructblip_vision_model" def __init__( self : List[Any] , __A : Dict=1_4_0_8 , __A : Tuple=6_1_4_4 , __A : str=3_9 , __A : int=1_6 , __A : str=2_2_4 , __A : Any=1_4 , __A : Dict="gelu" , __A : List[Any]=1e-6 , __A : Any=0.0 , __A : List[Any]=1e-1_0 , __A : Union[str, Any]=True , **__A : Tuple , ): super().__init__(**__A ) snake_case__ : List[str] = hidden_size snake_case__ : Optional[int] = intermediate_size snake_case__ : List[str] = num_hidden_layers snake_case__ : List[Any] = num_attention_heads snake_case__ : str = patch_size snake_case__ : int = image_size snake_case__ : int = initializer_range snake_case__ : Optional[int] = attention_dropout snake_case__ : str = layer_norm_eps snake_case__ : Optional[Any] = hidden_act snake_case__ : Tuple = qkv_bias @classmethod def _lowercase ( cls : List[str] , __A : Union[str, os.PathLike] , **__A : Optional[Any] ): cls._set_token_in_kwargs(__A ) snake_case__, snake_case__ : str = cls.get_config_dict(__A , **__A ) # get the vision config dict if we are loading from InstructBlipConfig if config_dict.get("model_type" ) == "instructblip": snake_case__ : Union[str, Any] = config_dict["vision_config"] if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type: logger.warning( f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type ''' f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(__A , **__A ) class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ): """simple docstring""" a_ = "instructblip_qformer" def __init__( self : Any , __A : Union[str, Any]=3_0_5_2_2 , __A : Union[str, Any]=7_6_8 , __A : Optional[int]=1_2 , __A : Dict=1_2 , __A : Dict=3_0_7_2 , __A : List[str]="gelu" , __A : Union[str, Any]=0.1 , __A : Tuple=0.1 , __A : Any=5_1_2 , __A : Optional[int]=0.0_2 , __A : List[str]=1e-1_2 , __A : Any=0 , __A : Optional[Any]="absolute" , __A : str=2 , __A : Any=1_4_0_8 , **__A : List[str] , ): super().__init__(pad_token_id=__A , **__A ) snake_case__ : Dict = vocab_size snake_case__ : Optional[int] = hidden_size snake_case__ : Optional[Any] = num_hidden_layers snake_case__ : str = num_attention_heads snake_case__ : int = hidden_act snake_case__ : Optional[Any] = intermediate_size snake_case__ : Union[str, Any] = hidden_dropout_prob snake_case__ : List[Any] = attention_probs_dropout_prob snake_case__ : List[Any] = max_position_embeddings snake_case__ : int = initializer_range snake_case__ : Dict = layer_norm_eps snake_case__ : str = position_embedding_type snake_case__ : Dict = cross_attention_frequency snake_case__ : List[str] = encoder_hidden_size @classmethod def _lowercase ( cls : List[Any] , __A : Union[str, os.PathLike] , **__A : Optional[int] ): cls._set_token_in_kwargs(__A ) snake_case__, snake_case__ : Tuple = cls.get_config_dict(__A , **__A ) # get the qformer config dict if we are loading from InstructBlipConfig if config_dict.get("model_type" ) == "instructblip": snake_case__ : List[Any] = config_dict["qformer_config"] if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type: logger.warning( f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type ''' f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(__A , **__A ) class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ): """simple docstring""" a_ = "instructblip" a_ = True def __init__( self : List[str] , __A : Optional[Any]=None , __A : Tuple=None , __A : Optional[int]=None , __A : Optional[Any]=3_2 , **__A : Optional[int] ): super().__init__(**__A ) if vision_config is None: snake_case__ : Any = {} logger.info("vision_config is None. initializing the InstructBlipVisionConfig with default values." ) if qformer_config is None: snake_case__ : Optional[Any] = {} logger.info("qformer_config is None. Initializing the InstructBlipQFormerConfig with default values." ) if text_config is None: snake_case__ : Optional[int] = {} logger.info("text_config is None. Initializing the text config with default values (`OPTConfig`)." ) snake_case__ : List[Any] = InstructBlipVisionConfig(**__A ) snake_case__ : Union[str, Any] = InstructBlipQFormerConfig(**__A ) snake_case__ : Dict = text_config["model_type"] if "model_type" in text_config else "opt" snake_case__ : List[Any] = CONFIG_MAPPING[text_model_type](**__A ) snake_case__ : Union[str, Any] = self.text_config.tie_word_embeddings snake_case__ : Tuple = self.text_config.is_encoder_decoder snake_case__ : str = num_query_tokens snake_case__ : Dict = self.vision_config.hidden_size snake_case__ : List[Any] = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES snake_case__ : int = 1.0 snake_case__ : Optional[int] = 0.0_2 @classmethod def _lowercase ( cls : List[str] , __A : InstructBlipVisionConfig , __A : InstructBlipQFormerConfig , __A : PretrainedConfig , **__A : int , ): return cls( vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **__A , ) def _lowercase ( self : Optional[int] ): snake_case__ : Any = copy.deepcopy(self.__dict__ ) snake_case__ : Optional[Any] = self.vision_config.to_dict() snake_case__ : List[str] = self.qformer_config.to_dict() snake_case__ : List[Any] = self.text_config.to_dict() snake_case__ : List[Any] = self.__class__.model_type return output
25
0
import random import unittest import numpy as np from diffusers import ( DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, OnnxStableDiffusionImgaImgPipeline, PNDMScheduler, ) from diffusers.utils import floats_tensor from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ , unittest.TestCase ): """simple docstring""" a_ = "hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline" def _lowercase ( self : Tuple , __A : Union[str, Any]=0 ): snake_case__ : Optional[int] = floats_tensor((1, 3, 1_2_8, 1_2_8) , rng=random.Random(__A ) ) snake_case__ : Union[str, Any] = np.random.RandomState(__A ) snake_case__ : Dict = { "prompt": "A painting of a squirrel eating a burger", "image": image, "generator": generator, "num_inference_steps": 3, "strength": 0.7_5, "guidance_scale": 7.5, "output_type": "numpy", } return inputs def _lowercase ( self : List[str] ): snake_case__ : List[Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" ) pipe.set_progress_bar_config(disable=__A ) snake_case__ : Tuple = self.get_dummy_inputs() snake_case__ : Optional[Any] = pipe(**__A ).images snake_case__ : Dict = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 1_2_8, 1_2_8, 3) snake_case__ : Any = np.array([0.6_9_6_4_3, 0.5_8_4_8_4, 0.5_0_3_1_4, 0.5_8_7_6_0, 0.5_5_3_6_8, 0.5_9_6_4_3, 0.5_1_5_2_9, 0.4_1_2_1_7, 0.4_9_0_8_7] ) assert np.abs(image_slice - expected_slice ).max() < 1e-1 def _lowercase ( self : Any ): snake_case__ : int = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" ) snake_case__ : Optional[Any] = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=__A ) pipe.set_progress_bar_config(disable=__A ) snake_case__ : Union[str, Any] = self.get_dummy_inputs() snake_case__ : Optional[int] = pipe(**__A ).images snake_case__ : Optional[int] = image[0, -3:, -3:, -1] assert image.shape == (1, 1_2_8, 1_2_8, 3) snake_case__ : Dict = np.array([0.6_1_7_3_7, 0.5_4_6_4_2, 0.5_3_1_8_3, 0.5_4_4_6_5, 0.5_2_7_4_2, 0.6_0_5_2_5, 0.4_9_9_6_9, 0.4_0_6_5_5, 0.4_8_1_5_4] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def _lowercase ( self : List[str] ): snake_case__ : Optional[Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" ) snake_case__ : List[Any] = LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=__A ) # warmup pass to apply optimizations snake_case__ : int = pipe(**self.get_dummy_inputs() ) snake_case__ : Optional[int] = self.get_dummy_inputs() snake_case__ : int = pipe(**__A ).images snake_case__ : List[Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 1_2_8, 1_2_8, 3) snake_case__ : List[str] = np.array([0.5_2_7_6_1, 0.5_9_9_7_7, 0.4_9_0_3_3, 0.4_9_6_1_9, 0.5_4_2_8_2, 0.5_0_3_1_1, 0.4_7_6_0_0, 0.4_0_9_1_8, 0.4_5_2_0_3] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def _lowercase ( self : List[str] ): snake_case__ : List[Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" ) snake_case__ : Union[str, Any] = EulerDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=__A ) snake_case__ : int = self.get_dummy_inputs() snake_case__ : Dict = pipe(**__A ).images snake_case__ : Optional[int] = image[0, -3:, -3:, -1] assert image.shape == (1, 1_2_8, 1_2_8, 3) snake_case__ : List[str] = np.array([0.5_2_9_1_1, 0.6_0_0_0_4, 0.4_9_2_2_9, 0.4_9_8_0_5, 0.5_4_5_0_2, 0.5_0_6_8_0, 0.4_7_7_7_7, 0.4_1_0_2_8, 0.4_5_3_0_4] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def _lowercase ( self : int ): snake_case__ : Any = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" ) snake_case__ : str = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=__A ) snake_case__ : Tuple = self.get_dummy_inputs() snake_case__ : int = pipe(**__A ).images snake_case__ : Union[str, Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 1_2_8, 1_2_8, 3) snake_case__ : List[Any] = np.array([0.5_2_9_1_1, 0.6_0_0_0_4, 0.4_9_2_2_9, 0.4_9_8_0_5, 0.5_4_5_0_2, 0.5_0_6_8_0, 0.4_7_7_7_7, 0.4_1_0_2_8, 0.4_5_3_0_4] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def _lowercase ( self : Tuple ): snake_case__ : List[str] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" ) snake_case__ : Optional[int] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=__A ) snake_case__ : Optional[Any] = self.get_dummy_inputs() snake_case__ : Optional[Any] = pipe(**__A ).images snake_case__ : Optional[Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 1_2_8, 1_2_8, 3) snake_case__ : int = np.array([0.6_5_3_3_1, 0.5_8_2_7_7, 0.4_8_2_0_4, 0.5_6_0_5_9, 0.5_3_6_6_5, 0.5_6_2_3_5, 0.5_0_9_6_9, 0.4_0_0_0_9, 0.4_6_5_5_2] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 @nightly @require_onnxruntime @require_torch_gpu class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): """simple docstring""" @property def _lowercase ( self : Union[str, Any] ): return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def _lowercase ( self : List[str] ): snake_case__ : Any = ort.SessionOptions() snake_case__ : Union[str, Any] = False return options def _lowercase ( self : str ): snake_case__ : int = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/img2img/sketch-mountains-input.jpg" ) snake_case__ : Optional[int] = init_image.resize((7_6_8, 5_1_2) ) # using the PNDM scheduler by default snake_case__ : Union[str, Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained( "CompVis/stable-diffusion-v1-4" , revision="onnx" , safety_checker=__A , feature_extractor=__A , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=__A ) snake_case__ : Tuple = "A fantasy landscape, trending on artstation" snake_case__ : Tuple = np.random.RandomState(0 ) snake_case__ : str = pipe( prompt=__A , image=__A , strength=0.7_5 , guidance_scale=7.5 , num_inference_steps=1_0 , generator=__A , output_type="np" , ) snake_case__ : str = output.images snake_case__ : Any = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1] assert images.shape == (1, 5_1_2, 7_6_8, 3) snake_case__ : List[Any] = np.array([0.4_9_0_9, 0.5_0_5_9, 0.5_3_7_2, 0.4_6_2_3, 0.4_8_7_6, 0.5_0_4_9, 0.4_8_2_0, 0.4_9_5_6, 0.5_0_1_9] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2 def _lowercase ( self : List[str] ): snake_case__ : List[Any] = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/img2img/sketch-mountains-input.jpg" ) snake_case__ : int = init_image.resize((7_6_8, 5_1_2) ) snake_case__ : Dict = LMSDiscreteScheduler.from_pretrained( "runwayml/stable-diffusion-v1-5" , subfolder="scheduler" , revision="onnx" ) snake_case__ : Optional[Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5" , revision="onnx" , scheduler=__A , safety_checker=__A , feature_extractor=__A , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=__A ) snake_case__ : Optional[int] = "A fantasy landscape, trending on artstation" snake_case__ : Tuple = np.random.RandomState(0 ) snake_case__ : Any = pipe( prompt=__A , image=__A , strength=0.7_5 , guidance_scale=7.5 , num_inference_steps=2_0 , generator=__A , output_type="np" , ) snake_case__ : List[Any] = output.images snake_case__ : List[Any] = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1] assert images.shape == (1, 5_1_2, 7_6_8, 3) snake_case__ : Dict = np.array([0.8_0_4_3, 0.9_2_6, 0.9_5_8_1, 0.8_1_1_9, 0.8_9_5_4, 0.9_1_3, 0.7_2_0_9, 0.7_4_6_3, 0.7_4_3_1] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
717
def SCREAMING_SNAKE_CASE ( snake_case_ : list ): if len(snake_case_ ) <= 1: return lst snake_case__ : List[Any] = 1 while i < len(snake_case_ ): if lst[i - 1] <= lst[i]: i += 1 else: snake_case__, snake_case__ : Tuple = lst[i], lst[i - 1] i -= 1 if i == 0: snake_case__ : Union[str, Any] = 1 return lst if __name__ == "__main__": __lowerCamelCase : Dict = input("""Enter numbers separated by a comma:\n""").strip() __lowerCamelCase : Tuple = [int(item) for item in user_input.split(""",""")] print(gnome_sort(unsorted))
25
0
from __future__ import annotations import math import random from collections.abc import Collection from typing import overload class SCREAMING_SNAKE_CASE__ : """simple docstring""" def __init__( self : str , __A : Collection[float] | None = None ): if components is None: snake_case__ : Dict = [] snake_case__ : Dict = list(__A ) def __len__( self : Any ): return len(self.__components ) def __str__( self : List[str] ): return "(" + ",".join(map(__A , self.__components ) ) + ")" def __add__( self : List[Any] , __A : Vector ): snake_case__ : Any = len(self ) if size == len(__A ): snake_case__ : Dict = [self.__components[i] + other.component(__A ) for i in range(__A )] return Vector(__A ) else: raise Exception("must have the same size" ) def __sub__( self : List[str] , __A : Vector ): snake_case__ : Union[str, Any] = len(self ) if size == len(__A ): snake_case__ : Optional[int] = [self.__components[i] - other.component(__A ) for i in range(__A )] return Vector(__A ) else: # error case raise Exception("must have the same size" ) @overload def __mul__( self : List[str] , __A : float ): ... @overload def __mul__( self : Dict , __A : Vector ): ... def __mul__( self : int , __A : float | Vector ): if isinstance(__A , (float, int) ): snake_case__ : List[Any] = [c * other for c in self.__components] return Vector(__A ) elif isinstance(__A , __A ) and len(self ) == len(__A ): snake_case__ : int = len(self ) snake_case__ : Tuple = [self.__components[i] * other.component(__A ) for i in range(__A )] return sum(__A ) else: # error case raise Exception("invalid operand!" ) def _lowercase ( self : str ): return Vector(self.__components ) def _lowercase ( self : str , __A : int ): if isinstance(__A , __A ) and -len(self.__components ) <= i < len(self.__components ): return self.__components[i] else: raise Exception("index out of range" ) def _lowercase ( self : Union[str, Any] , __A : int , __A : float ): assert -len(self.__components ) <= pos < len(self.__components ) snake_case__ : Optional[Any] = value def _lowercase ( self : int ): if len(self.__components ) == 0: raise Exception("Vector is empty" ) snake_case__ : List[Any] = [c**2 for c in self.__components] return math.sqrt(sum(__A ) ) def _lowercase ( self : int , __A : Vector , __A : bool = False ): snake_case__ : Union[str, Any] = self * other snake_case__ : Union[str, Any] = self.euclidean_length() * other.euclidean_length() if deg: return math.degrees(math.acos(num / den ) ) else: return math.acos(num / den ) def SCREAMING_SNAKE_CASE ( snake_case_ : int ): assert isinstance(snake_case_ , snake_case_ ) return Vector([0] * dimension ) def SCREAMING_SNAKE_CASE ( snake_case_ : int , snake_case_ : int ): assert isinstance(snake_case_ , snake_case_ ) and (isinstance(snake_case_ , snake_case_ )) snake_case__ : List[Any] = [0] * dimension snake_case__ : List[Any] = 1 return Vector(snake_case_ ) def SCREAMING_SNAKE_CASE ( snake_case_ : float , snake_case_ : Vector , snake_case_ : Vector ): assert ( isinstance(snake_case_ , snake_case_ ) and isinstance(snake_case_ , snake_case_ ) and (isinstance(snake_case_ , (int, float) )) ) return x * scalar + y def SCREAMING_SNAKE_CASE ( snake_case_ : int , snake_case_ : int , snake_case_ : int ): random.seed(snake_case_ ) snake_case__ : Union[str, Any] = [random.randint(snake_case_ , snake_case_ ) for _ in range(snake_case_ )] return Vector(snake_case_ ) class SCREAMING_SNAKE_CASE__ : """simple docstring""" def __init__( self : Optional[Any] , __A : list[list[float]] , __A : int , __A : int ): snake_case__ : List[Any] = matrix snake_case__ : Any = w snake_case__ : Any = h def __str__( self : Union[str, Any] ): snake_case__ : int = "" for i in range(self.__height ): ans += "|" for j in range(self.__width ): if j < self.__width - 1: ans += str(self.__matrix[i][j] ) + "," else: ans += str(self.__matrix[i][j] ) + "|\n" return ans def __add__( self : Tuple , __A : Matrix ): if self.__width == other.width() and self.__height == other.height(): snake_case__ : Dict = [] for i in range(self.__height ): snake_case__ : Union[str, Any] = [ self.__matrix[i][j] + other.component(__A , __A ) for j in range(self.__width ) ] matrix.append(__A ) return Matrix(__A , self.__width , self.__height ) else: raise Exception("matrix must have the same dimension!" ) def __sub__( self : Optional[Any] , __A : Matrix ): if self.__width == other.width() and self.__height == other.height(): snake_case__ : List[str] = [] for i in range(self.__height ): snake_case__ : int = [ self.__matrix[i][j] - other.component(__A , __A ) for j in range(self.__width ) ] matrix.append(__A ) return Matrix(__A , self.__width , self.__height ) else: raise Exception("matrices must have the same dimension!" ) @overload def __mul__( self : List[str] , __A : float ): ... @overload def __mul__( self : Tuple , __A : Vector ): ... def __mul__( self : List[str] , __A : float | Vector ): if isinstance(__A , __A ): # matrix-vector if len(__A ) == self.__width: snake_case__ : Tuple = zero_vector(self.__height ) for i in range(self.__height ): snake_case__ : List[str] = [ self.__matrix[i][j] * other.component(__A ) for j in range(self.__width ) ] ans.change_component(__A , sum(__A ) ) return ans else: raise Exception( "vector must have the same size as the " "number of columns of the matrix!" ) elif isinstance(__A , (int, float) ): # matrix-scalar snake_case__ : Tuple = [ [self.__matrix[i][j] * other for j in range(self.__width )] for i in range(self.__height ) ] return Matrix(__A , self.__width , self.__height ) return None def _lowercase ( self : str ): return self.__height def _lowercase ( self : Union[str, Any] ): return self.__width def _lowercase ( self : Optional[Any] , __A : int , __A : int ): if 0 <= x < self.__height and 0 <= y < self.__width: return self.__matrix[x][y] else: raise Exception("change_component: indices out of bounds" ) def _lowercase ( self : str , __A : int , __A : int , __A : float ): if 0 <= x < self.__height and 0 <= y < self.__width: snake_case__ : Optional[int] = value else: raise Exception("change_component: indices out of bounds" ) def _lowercase ( self : str , __A : int , __A : int ): if self.__height != self.__width: raise Exception("Matrix is not square" ) snake_case__ : Tuple = self.__matrix[:x] + self.__matrix[x + 1 :] for i in range(len(__A ) ): snake_case__ : Dict = minor[i][:y] + minor[i][y + 1 :] return Matrix(__A , self.__width - 1 , self.__height - 1 ).determinant() def _lowercase ( self : Union[str, Any] , __A : int , __A : int ): if self.__height != self.__width: raise Exception("Matrix is not square" ) if 0 <= x < self.__height and 0 <= y < self.__width: return (-1) ** (x + y) * self.minor(__A , __A ) else: raise Exception("Indices out of bounds" ) def _lowercase ( self : List[str] ): if self.__height != self.__width: raise Exception("Matrix is not square" ) if self.__height < 1: raise Exception("Matrix has no element" ) elif self.__height == 1: return self.__matrix[0][0] elif self.__height == 2: return ( self.__matrix[0][0] * self.__matrix[1][1] - self.__matrix[0][1] * self.__matrix[1][0] ) else: snake_case__ : Dict = [ self.__matrix[0][y] * self.cofactor(0 , __A ) for y in range(self.__width ) ] return sum(__A ) def SCREAMING_SNAKE_CASE ( snake_case_ : int ): snake_case__ : list[list[float]] = [[0] * n for _ in range(snake_case_ )] return Matrix(snake_case_ , snake_case_ , snake_case_ ) def SCREAMING_SNAKE_CASE ( snake_case_ : int , snake_case_ : int , snake_case_ : int , snake_case_ : int ): random.seed(snake_case_ ) snake_case__ : list[list[float]] = [ [random.randint(snake_case_ , snake_case_ ) for _ in range(snake_case_ )] for _ in range(snake_case_ ) ] return Matrix(snake_case_ , snake_case_ , snake_case_ )
718
from __future__ import annotations import time __lowerCamelCase : str = list[tuple[int, int]] __lowerCamelCase : Optional[int] = [ [0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles [0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0], [1, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0], ] __lowerCamelCase : Tuple = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right class SCREAMING_SNAKE_CASE__ : """simple docstring""" def __init__( self : Union[str, Any] , __A : int , __A : int , __A : int , __A : int , __A : Node | None ): snake_case__ : Optional[int] = pos_x snake_case__ : Dict = pos_y snake_case__ : int = (pos_y, pos_x) snake_case__ : Optional[int] = goal_x snake_case__ : Tuple = goal_y snake_case__ : str = parent class SCREAMING_SNAKE_CASE__ : """simple docstring""" def __init__( self : List[Any] , __A : tuple[int, int] , __A : tuple[int, int] ): snake_case__ : Tuple = Node(start[1] , start[0] , goal[1] , goal[0] , __A ) snake_case__ : Tuple = Node(goal[1] , goal[0] , goal[1] , goal[0] , __A ) snake_case__ : int = [self.start] snake_case__ : Union[str, Any] = False def _lowercase ( self : Dict ): while self.node_queue: snake_case__ : Optional[Any] = self.node_queue.pop(0 ) if current_node.pos == self.target.pos: snake_case__ : Optional[Any] = True return self.retrace_path(__A ) snake_case__ : int = self.get_successors(__A ) for node in successors: self.node_queue.append(__A ) if not self.reached: return [self.start.pos] return None def _lowercase ( self : Union[str, Any] , __A : Node ): snake_case__ : str = [] for action in delta: snake_case__ : str = parent.pos_x + action[1] snake_case__ : Union[str, Any] = parent.pos_y + action[0] if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(__A ) - 1): continue if grid[pos_y][pos_x] != 0: continue successors.append( Node(__A , __A , self.target.pos_y , self.target.pos_x , __A ) ) return successors def _lowercase ( self : Optional[Any] , __A : Node | None ): snake_case__ : Tuple = node snake_case__ : Any = [] while current_node is not None: path.append((current_node.pos_y, current_node.pos_x) ) snake_case__ : Tuple = current_node.parent path.reverse() return path class SCREAMING_SNAKE_CASE__ : """simple docstring""" def __init__( self : Dict , __A : str , __A : int ): snake_case__ : str = BreadthFirstSearch(__A , __A ) snake_case__ : int = BreadthFirstSearch(__A , __A ) snake_case__ : Tuple = False def _lowercase ( self : Optional[Any] ): while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue: snake_case__ : Any = self.fwd_bfs.node_queue.pop(0 ) snake_case__ : List[str] = self.bwd_bfs.node_queue.pop(0 ) if current_bwd_node.pos == current_fwd_node.pos: snake_case__ : List[str] = True return self.retrace_bidirectional_path( __A , __A ) snake_case__ : Union[str, Any] = current_bwd_node snake_case__ : Dict = current_fwd_node snake_case__ : List[Any] = { self.fwd_bfs: self.fwd_bfs.get_successors(__A ), self.bwd_bfs: self.bwd_bfs.get_successors(__A ), } for bfs in [self.fwd_bfs, self.bwd_bfs]: for node in successors[bfs]: bfs.node_queue.append(__A ) if not self.reached: return [self.fwd_bfs.start.pos] return None def _lowercase ( self : Any , __A : Node , __A : Node ): snake_case__ : List[str] = self.fwd_bfs.retrace_path(__A ) snake_case__ : Optional[Any] = self.bwd_bfs.retrace_path(__A ) bwd_path.pop() bwd_path.reverse() snake_case__ : List[Any] = fwd_path + bwd_path return path if __name__ == "__main__": # all coordinates are given in format [y,x] import doctest doctest.testmod() __lowerCamelCase : str = (0, 0) __lowerCamelCase : List[str] = (len(grid) - 1, len(grid[0]) - 1) for elem in grid: print(elem) __lowerCamelCase : Any = time.time() __lowerCamelCase : Optional[Any] = BreadthFirstSearch(init, goal) __lowerCamelCase : str = bfs.search() __lowerCamelCase : Optional[Any] = time.time() - start_bfs_time print("""Unidirectional BFS computation time : """, bfs_time) __lowerCamelCase : Optional[Any] = time.time() __lowerCamelCase : Optional[int] = BidirectionalBreadthFirstSearch(init, goal) __lowerCamelCase : str = bd_bfs.search() __lowerCamelCase : Optional[Any] = time.time() - start_bd_bfs_time print("""Bidirectional BFS computation time : """, bd_bfs_time)
25
0
from dataclasses import dataclass from typing import Dict, Optional, Tuple, Union import torch import torch.nn as nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, apply_forward_hook from .attention_processor import AttentionProcessor, AttnProcessor from .modeling_utils import ModelMixin from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder @dataclass class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ): """simple docstring""" a_ = 4_2 class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ , UpperCamelCase_ ): """simple docstring""" a_ = True @register_to_config def __init__( self : int , __A : int = 3 , __A : int = 3 , __A : Tuple[str] = ("DownEncoderBlock2D",) , __A : Tuple[str] = ("UpDecoderBlock2D",) , __A : Tuple[int] = (6_4,) , __A : int = 1 , __A : str = "silu" , __A : int = 4 , __A : int = 3_2 , __A : int = 3_2 , __A : float = 0.1_8_2_1_5 , ): super().__init__() # pass init params to Encoder snake_case__ : List[str] = Encoder( in_channels=__A , out_channels=__A , down_block_types=__A , block_out_channels=__A , layers_per_block=__A , act_fn=__A , norm_num_groups=__A , double_z=__A , ) # pass init params to Decoder snake_case__ : Union[str, Any] = Decoder( in_channels=__A , out_channels=__A , up_block_types=__A , block_out_channels=__A , layers_per_block=__A , norm_num_groups=__A , act_fn=__A , ) snake_case__ : List[Any] = nn.Convad(2 * latent_channels , 2 * latent_channels , 1 ) snake_case__ : Any = nn.Convad(__A , __A , 1 ) snake_case__ : str = False snake_case__ : Dict = False # only relevant if vae tiling is enabled snake_case__ : List[str] = self.config.sample_size snake_case__ : List[Any] = ( self.config.sample_size[0] if isinstance(self.config.sample_size , (list, tuple) ) else self.config.sample_size ) snake_case__ : Tuple = int(sample_size / (2 ** (len(self.config.block_out_channels ) - 1)) ) snake_case__ : List[str] = 0.2_5 def _lowercase ( self : int , __A : int , __A : int=False ): if isinstance(__A , (Encoder, Decoder) ): snake_case__ : List[Any] = value def _lowercase ( self : Union[str, Any] , __A : bool = True ): snake_case__ : int = use_tiling def _lowercase ( self : Tuple ): self.enable_tiling(__A ) def _lowercase ( self : Optional[Any] ): snake_case__ : str = True def _lowercase ( self : Tuple ): snake_case__ : Optional[int] = False @property # Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors def _lowercase ( self : Optional[Any] ): snake_case__ : Optional[Any] = {} def fn_recursive_add_processors(__A : str , __A : torch.nn.Module , __A : Dict[str, AttentionProcessor] ): if hasattr(__A , "set_processor" ): snake_case__ : List[Any] = module.processor for sub_name, child in module.named_children(): fn_recursive_add_processors(f'''{name}.{sub_name}''' , __A , __A ) return processors for name, module in self.named_children(): fn_recursive_add_processors(__A , __A , __A ) return processors def _lowercase ( self : Union[str, Any] , __A : Union[AttentionProcessor, Dict[str, AttentionProcessor]] ): snake_case__ : str = len(self.attn_processors.keys() ) if isinstance(__A , __A ) and len(__A ) != count: raise ValueError( f'''A dict of processors was passed, but the number of processors {len(__A )} does not match the''' f''' number of attention layers: {count}. Please make sure to pass {count} processor classes.''' ) def fn_recursive_attn_processor(__A : str , __A : torch.nn.Module , __A : List[str] ): if hasattr(__A , "set_processor" ): if not isinstance(__A , __A ): module.set_processor(__A ) else: module.set_processor(processor.pop(f'''{name}.processor''' ) ) for sub_name, child in module.named_children(): fn_recursive_attn_processor(f'''{name}.{sub_name}''' , __A , __A ) for name, module in self.named_children(): fn_recursive_attn_processor(__A , __A , __A ) def _lowercase ( self : Any ): self.set_attn_processor(AttnProcessor() ) @apply_forward_hook def _lowercase ( self : Optional[Any] , __A : torch.FloatTensor , __A : bool = True ): if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size): return self.tiled_encode(__A , return_dict=__A ) if self.use_slicing and x.shape[0] > 1: snake_case__ : int = [self.encoder(__A ) for x_slice in x.split(1 )] snake_case__ : str = torch.cat(__A ) else: snake_case__ : Dict = self.encoder(__A ) snake_case__ : Optional[int] = self.quant_conv(__A ) snake_case__ : int = DiagonalGaussianDistribution(__A ) if not return_dict: return (posterior,) return AutoencoderKLOutput(latent_dist=__A ) def _lowercase ( self : Optional[Any] , __A : torch.FloatTensor , __A : bool = True ): if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size): return self.tiled_decode(__A , return_dict=__A ) snake_case__ : Optional[int] = self.post_quant_conv(__A ) snake_case__ : List[str] = self.decoder(__A ) if not return_dict: return (dec,) return DecoderOutput(sample=__A ) @apply_forward_hook def _lowercase ( self : str , __A : torch.FloatTensor , __A : bool = True ): if self.use_slicing and z.shape[0] > 1: snake_case__ : int = [self._decode(__A ).sample for z_slice in z.split(1 )] snake_case__ : Union[str, Any] = torch.cat(__A ) else: snake_case__ : Optional[Any] = self._decode(__A ).sample if not return_dict: return (decoded,) return DecoderOutput(sample=__A ) def _lowercase ( self : List[Any] , __A : Union[str, Any] , __A : Dict , __A : Union[str, Any] ): snake_case__ : List[str] = min(a.shape[2] , b.shape[2] , __A ) for y in range(__A ): snake_case__ : Optional[Any] = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent) return b def _lowercase ( self : str , __A : int , __A : Union[str, Any] , __A : str ): snake_case__ : Optional[Any] = min(a.shape[3] , b.shape[3] , __A ) for x in range(__A ): snake_case__ : Tuple = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent) return b def _lowercase ( self : List[str] , __A : torch.FloatTensor , __A : bool = True ): snake_case__ : List[Any] = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor) ) snake_case__ : Union[str, Any] = int(self.tile_latent_min_size * self.tile_overlap_factor ) snake_case__ : Union[str, Any] = self.tile_latent_min_size - blend_extent # Split the image into 512x512 tiles and encode them separately. snake_case__ : Tuple = [] for i in range(0 , x.shape[2] , __A ): snake_case__ : int = [] for j in range(0 , x.shape[3] , __A ): snake_case__ : str = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size] snake_case__ : Optional[Any] = self.encoder(__A ) snake_case__ : Union[str, Any] = self.quant_conv(__A ) row.append(__A ) rows.append(__A ) snake_case__ : Any = [] for i, row in enumerate(__A ): snake_case__ : Optional[int] = [] for j, tile in enumerate(__A ): # blend the above tile and the left tile # to the current tile and add the current tile to the result row if i > 0: snake_case__ : str = self.blend_v(rows[i - 1][j] , __A , __A ) if j > 0: snake_case__ : List[Any] = self.blend_h(row[j - 1] , __A , __A ) result_row.append(tile[:, :, :row_limit, :row_limit] ) result_rows.append(torch.cat(__A , dim=3 ) ) snake_case__ : Any = torch.cat(__A , dim=2 ) snake_case__ : Optional[Any] = DiagonalGaussianDistribution(__A ) if not return_dict: return (posterior,) return AutoencoderKLOutput(latent_dist=__A ) def _lowercase ( self : Any , __A : torch.FloatTensor , __A : bool = True ): snake_case__ : Any = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor) ) snake_case__ : Union[str, Any] = int(self.tile_sample_min_size * self.tile_overlap_factor ) snake_case__ : int = self.tile_sample_min_size - blend_extent # Split z into overlapping 64x64 tiles and decode them separately. # The tiles have an overlap to avoid seams between tiles. snake_case__ : Union[str, Any] = [] for i in range(0 , z.shape[2] , __A ): snake_case__ : Dict = [] for j in range(0 , z.shape[3] , __A ): snake_case__ : Optional[int] = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size] snake_case__ : Optional[Any] = self.post_quant_conv(__A ) snake_case__ : Optional[int] = self.decoder(__A ) row.append(__A ) rows.append(__A ) snake_case__ : Tuple = [] for i, row in enumerate(__A ): snake_case__ : Optional[int] = [] for j, tile in enumerate(__A ): # blend the above tile and the left tile # to the current tile and add the current tile to the result row if i > 0: snake_case__ : Any = self.blend_v(rows[i - 1][j] , __A , __A ) if j > 0: snake_case__ : Dict = self.blend_h(row[j - 1] , __A , __A ) result_row.append(tile[:, :, :row_limit, :row_limit] ) result_rows.append(torch.cat(__A , dim=3 ) ) snake_case__ : Any = torch.cat(__A , dim=2 ) if not return_dict: return (dec,) return DecoderOutput(sample=__A ) def _lowercase ( self : Union[str, Any] , __A : torch.FloatTensor , __A : bool = False , __A : bool = True , __A : Optional[torch.Generator] = None , ): snake_case__ : List[Any] = sample snake_case__ : int = self.encode(__A ).latent_dist if sample_posterior: snake_case__ : Union[str, Any] = posterior.sample(generator=__A ) else: snake_case__ : int = posterior.mode() snake_case__ : int = self.decode(__A ).sample if not return_dict: return (dec,) return DecoderOutput(sample=__A )
719
import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ConditionalDetrImageProcessor class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): """simple docstring""" def __init__( self : List[Any] , __A : Dict , __A : int=7 , __A : Optional[Any]=3 , __A : List[str]=3_0 , __A : List[Any]=4_0_0 , __A : Union[str, Any]=True , __A : List[Any]=None , __A : Optional[Any]=True , __A : Tuple=[0.5, 0.5, 0.5] , __A : Union[str, Any]=[0.5, 0.5, 0.5] , __A : List[str]=True , __A : Any=1 / 2_5_5 , __A : Optional[int]=True , ): # by setting size["longest_edge"] > max_resolution we're effectively not testing this :p snake_case__ : List[str] = size if size is not None else {"shortest_edge": 1_8, "longest_edge": 1_3_3_3} snake_case__ : Dict = parent snake_case__ : Optional[int] = batch_size snake_case__ : Union[str, Any] = num_channels snake_case__ : str = min_resolution snake_case__ : Tuple = max_resolution snake_case__ : List[Any] = do_resize snake_case__ : Dict = size snake_case__ : List[str] = do_normalize snake_case__ : Optional[int] = image_mean snake_case__ : Optional[int] = image_std snake_case__ : Any = do_rescale snake_case__ : Optional[int] = rescale_factor snake_case__ : int = do_pad def _lowercase ( self : Dict ): return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def _lowercase ( self : Optional[int] , __A : Dict , __A : List[Any]=False ): if not batched: snake_case__ : List[str] = image_inputs[0] if isinstance(__A , Image.Image ): snake_case__, snake_case__ : Tuple = image.size else: snake_case__, snake_case__ : List[str] = image.shape[1], image.shape[2] if w < h: snake_case__ : Dict = int(self.size["shortest_edge"] * h / w ) snake_case__ : Optional[int] = self.size["shortest_edge"] elif w > h: snake_case__ : List[Any] = self.size["shortest_edge"] snake_case__ : Union[str, Any] = int(self.size["shortest_edge"] * w / h ) else: snake_case__ : Dict = self.size["shortest_edge"] snake_case__ : Dict = self.size["shortest_edge"] else: snake_case__ : str = [] for image in image_inputs: snake_case__, snake_case__ : str = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) snake_case__ : Dict = max(__A , key=lambda __A : item[0] )[0] snake_case__ : Tuple = max(__A , key=lambda __A : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ , unittest.TestCase ): """simple docstring""" a_ = ConditionalDetrImageProcessor if is_vision_available() else None def _lowercase ( self : int ): snake_case__ : Tuple = ConditionalDetrImageProcessingTester(self ) @property def _lowercase ( self : Any ): return self.image_processor_tester.prepare_image_processor_dict() def _lowercase ( self : Any ): snake_case__ : Dict = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__A , "image_mean" ) ) self.assertTrue(hasattr(__A , "image_std" ) ) self.assertTrue(hasattr(__A , "do_normalize" ) ) self.assertTrue(hasattr(__A , "do_resize" ) ) self.assertTrue(hasattr(__A , "size" ) ) def _lowercase ( self : List[str] ): snake_case__ : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"shortest_edge": 1_8, "longest_edge": 1_3_3_3} ) self.assertEqual(image_processor.do_pad , __A ) snake_case__ : Any = self.image_processing_class.from_dict( self.image_processor_dict , size=4_2 , max_size=8_4 , pad_and_return_pixel_mask=__A ) self.assertEqual(image_processor.size , {"shortest_edge": 4_2, "longest_edge": 8_4} ) self.assertEqual(image_processor.do_pad , __A ) def _lowercase ( self : Union[str, Any] ): pass def _lowercase ( self : List[str] ): # Initialize image_processing snake_case__ : Dict = self.image_processing_class(**self.image_processor_dict ) # create random PIL images snake_case__ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A ) for image in image_inputs: self.assertIsInstance(__A , Image.Image ) # Test not batched input snake_case__ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values snake_case__, snake_case__ : Union[str, Any] = self.image_processor_tester.get_expected_values(__A ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched snake_case__, snake_case__ : Tuple = self.image_processor_tester.get_expected_values(__A , batched=__A ) snake_case__ : int = image_processing(__A , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def _lowercase ( self : Tuple ): # Initialize image_processing snake_case__ : List[str] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors snake_case__ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , numpify=__A ) for image in image_inputs: self.assertIsInstance(__A , np.ndarray ) # Test not batched input snake_case__ : int = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values snake_case__, snake_case__ : Dict = self.image_processor_tester.get_expected_values(__A ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched snake_case__ : Optional[Any] = image_processing(__A , return_tensors="pt" ).pixel_values snake_case__, snake_case__ : str = self.image_processor_tester.get_expected_values(__A , batched=__A ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def _lowercase ( self : Tuple ): # Initialize image_processing snake_case__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors snake_case__ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , torchify=__A ) for image in image_inputs: self.assertIsInstance(__A , torch.Tensor ) # Test not batched input snake_case__ : Tuple = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values snake_case__, snake_case__ : Optional[int] = self.image_processor_tester.get_expected_values(__A ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched snake_case__ : Dict = image_processing(__A , return_tensors="pt" ).pixel_values snake_case__, snake_case__ : int = self.image_processor_tester.get_expected_values(__A , batched=__A ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) @slow def _lowercase ( self : List[Any] ): # prepare image and target snake_case__ : Union[str, Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f: snake_case__ : Union[str, Any] = json.loads(f.read() ) snake_case__ : Optional[Any] = {"image_id": 3_9_7_6_9, "annotations": target} # encode them snake_case__ : Tuple = ConditionalDetrImageProcessor.from_pretrained("microsoft/conditional-detr-resnet-50" ) snake_case__ : int = image_processing(images=__A , annotations=__A , return_tensors="pt" ) # verify pixel values snake_case__ : str = torch.Size([1, 3, 8_0_0, 1_0_6_6] ) self.assertEqual(encoding["pixel_values"].shape , __A ) snake_case__ : Tuple = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] ) self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __A , atol=1e-4 ) ) # verify area snake_case__ : Optional[int] = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] ) self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __A ) ) # verify boxes snake_case__ : Tuple = torch.Size([6, 4] ) self.assertEqual(encoding["labels"][0]["boxes"].shape , __A ) snake_case__ : List[Any] = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] ) self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __A , atol=1e-3 ) ) # verify image_id snake_case__ : str = torch.tensor([3_9_7_6_9] ) self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __A ) ) # verify is_crowd snake_case__ : List[Any] = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __A ) ) # verify class_labels snake_case__ : Optional[int] = torch.tensor([7_5, 7_5, 6_3, 6_5, 1_7, 1_7] ) self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __A ) ) # verify orig_size snake_case__ : Dict = torch.tensor([4_8_0, 6_4_0] ) self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __A ) ) # verify size snake_case__ : List[str] = torch.tensor([8_0_0, 1_0_6_6] ) self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __A ) ) @slow def _lowercase ( self : str ): # prepare image, target and masks_path snake_case__ : Optional[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f: snake_case__ : int = json.loads(f.read() ) snake_case__ : Optional[int] = {"file_name": "000000039769.png", "image_id": 3_9_7_6_9, "segments_info": target} snake_case__ : Optional[Any] = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" ) # encode them snake_case__ : Optional[int] = ConditionalDetrImageProcessor(format="coco_panoptic" ) snake_case__ : Tuple = image_processing(images=__A , annotations=__A , masks_path=__A , return_tensors="pt" ) # verify pixel values snake_case__ : Optional[Any] = torch.Size([1, 3, 8_0_0, 1_0_6_6] ) self.assertEqual(encoding["pixel_values"].shape , __A ) snake_case__ : List[str] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] ) self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __A , atol=1e-4 ) ) # verify area snake_case__ : Tuple = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] ) self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __A ) ) # verify boxes snake_case__ : Dict = torch.Size([6, 4] ) self.assertEqual(encoding["labels"][0]["boxes"].shape , __A ) snake_case__ : int = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] ) self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __A , atol=1e-3 ) ) # verify image_id snake_case__ : str = torch.tensor([3_9_7_6_9] ) self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __A ) ) # verify is_crowd snake_case__ : Tuple = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __A ) ) # verify class_labels snake_case__ : Optional[Any] = torch.tensor([1_7, 1_7, 6_3, 7_5, 7_5, 9_3] ) self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __A ) ) # verify masks snake_case__ : str = 8_2_2_8_7_3 self.assertEqual(encoding["labels"][0]["masks"].sum().item() , __A ) # verify orig_size snake_case__ : int = torch.tensor([4_8_0, 6_4_0] ) self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __A ) ) # verify size snake_case__ : List[Any] = torch.tensor([8_0_0, 1_0_6_6] ) self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __A ) )
25
0
from __future__ import annotations class SCREAMING_SNAKE_CASE__ : """simple docstring""" def __init__( self : int , __A : int ): snake_case__ : Union[str, Any] = order # a_{0} ... a_{k} snake_case__ : int = [1.0] + [0.0] * order # b_{0} ... b_{k} snake_case__ : Dict = [1.0] + [0.0] * order # x[n-1] ... x[n-k] snake_case__ : str = [0.0] * self.order # y[n-1] ... y[n-k] snake_case__ : List[str] = [0.0] * self.order def _lowercase ( self : Any , __A : list[float] , __A : list[float] ): if len(__A ) < self.order: snake_case__ : int = [1.0, *a_coeffs] if len(__A ) != self.order + 1: snake_case__ : Optional[int] = ( f'''Expected a_coeffs to have {self.order + 1} elements ''' f'''for {self.order}-order filter, got {len(__A )}''' ) raise ValueError(__A ) if len(__A ) != self.order + 1: snake_case__ : Tuple = ( f'''Expected b_coeffs to have {self.order + 1} elements ''' f'''for {self.order}-order filter, got {len(__A )}''' ) raise ValueError(__A ) snake_case__ : Tuple = a_coeffs snake_case__ : Optional[Any] = b_coeffs def _lowercase ( self : int , __A : float ): snake_case__ : Union[str, Any] = 0.0 # Start at index 1 and do index 0 at the end. for i in range(1 , self.order + 1 ): result += ( self.b_coeffs[i] * self.input_history[i - 1] - self.a_coeffs[i] * self.output_history[i - 1] ) snake_case__ : int = (result + self.b_coeffs[0] * sample) / self.a_coeffs[0] snake_case__ : Any = self.input_history[:-1] snake_case__ : str = self.output_history[:-1] snake_case__ : Tuple = sample snake_case__ : int = result return result
720
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on import numpy # noqa: F401 # Here to have a nice missing dependency error message early on import requests # noqa: F401 # Here to have a nice missing dependency error message early on import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on from mauve import compute_mauve # From: mauve-text import datasets __lowerCamelCase : Optional[int] = """\ @inproceedings{pillutla-etal:mauve:neurips2021, title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers}, author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid}, booktitle = {NeurIPS}, year = {2021} } """ __lowerCamelCase : str = """\ MAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure. MAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences. For details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021). This metrics is a wrapper around the official implementation of MAUVE: https://github.com/krishnap25/mauve """ __lowerCamelCase : str = """ Calculates MAUVE scores between two lists of generated text and reference text. Args: predictions: list of generated text to score. Each predictions should be a string with tokens separated by spaces. references: list of reference for each prediction. Each reference should be a string with tokens separated by spaces. Optional Args: num_buckets: the size of the histogram to quantize P and Q. Options: 'auto' (default) or an integer pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1 kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9 kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5 kmeans_max_iter: maximum number of k-means iterations. Default 500 featurize_model_name: name of the model from which features are obtained. Default 'gpt2-large' Use one of ['gpt2', 'gpt2-medium', 'gpt2-large', 'gpt2-xl']. device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU max_text_length: maximum number of tokens to consider. Default 1024 divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25 mauve_scaling_factor: \"c\" from the paper. Default 5. verbose: If True (default), print running time updates seed: random seed to initialize k-means cluster assignments. Returns: mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer, frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer, divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve, p_hist: a discrete distribution, which is a quantized version of the text distribution p_text, q_hist: same as above, but with q_text. Examples: >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest >>> import datasets >>> mauve = datasets.load_metric('mauve') >>> predictions = [\"hello there\", \"general kenobi\"] >>> references = [\"hello there\", \"general kenobi\"] >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP >>> print(out.mauve) # doctest: +SKIP 1.0 """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class SCREAMING_SNAKE_CASE__ ( datasets.Metric ): """simple docstring""" def _lowercase ( self : Dict ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , homepage="https://github.com/krishnap25/mauve" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("string" , id="sequence" ), "references": datasets.Value("string" , id="sequence" ), } ) , codebase_urls=["https://github.com/krishnap25/mauve"] , reference_urls=[ "https://arxiv.org/abs/2102.01454", "https://github.com/krishnap25/mauve", ] , ) def _lowercase ( self : Union[str, Any] , __A : Dict , __A : List[str] , __A : int=None , __A : List[Any]=None , __A : Optional[int]=None , __A : List[Any]=None , __A : Union[str, Any]="auto" , __A : Optional[Any]=-1 , __A : Optional[Any]=0.9 , __A : Any=5 , __A : List[Any]=5_0_0 , __A : Tuple="gpt2-large" , __A : Optional[Any]=-1 , __A : str=1_0_2_4 , __A : Tuple=2_5 , __A : str=5 , __A : Optional[int]=True , __A : Any=2_5 , ): snake_case__ : List[Any] = compute_mauve( p_text=__A , q_text=__A , p_features=__A , q_features=__A , p_tokens=__A , q_tokens=__A , num_buckets=__A , pca_max_data=__A , kmeans_explained_var=__A , kmeans_num_redo=__A , kmeans_max_iter=__A , featurize_model_name=__A , device_id=__A , max_text_length=__A , divergence_curve_discretization_size=__A , mauve_scaling_factor=__A , verbose=__A , seed=__A , ) return out
25
0
import argparse import fairseq import torch from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging logging.set_verbosity_info() __lowerCamelCase : int = logging.get_logger(__name__) __lowerCamelCase : int = { """post_extract_proj""": """feature_projection.projection""", """encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""", """self_attn.k_proj""": """encoder.layers.*.attention.k_proj""", """self_attn.v_proj""": """encoder.layers.*.attention.v_proj""", """self_attn.q_proj""": """encoder.layers.*.attention.q_proj""", """self_attn.out_proj""": """encoder.layers.*.attention.out_proj""", """self_attn_layer_norm""": """encoder.layers.*.layer_norm""", """fc1""": """encoder.layers.*.feed_forward.intermediate_dense""", """fc2""": """encoder.layers.*.feed_forward.output_dense""", """final_layer_norm""": """encoder.layers.*.final_layer_norm""", """encoder.layer_norm""": """encoder.layer_norm""", """encoder.layer_norm_for_extract""": """layer_norm_for_extract""", """w2v_model.layer_norm""": """feature_projection.layer_norm""", """quantizer.weight_proj""": """quantizer.weight_proj""", """quantizer.vars""": """quantizer.codevectors""", """project_q""": """project_q""", """final_proj""": """project_hid""", """w2v_encoder.proj""": """lm_head""", """label_embs_concat""": """label_embeddings_concat""", """mask_emb""": """masked_spec_embed""", """spk_proj""": """speaker_proj""", } __lowerCamelCase : Tuple = [ """lm_head""", """quantizer.weight_proj""", """quantizer.codevectors""", """project_q""", """project_hid""", """label_embeddings_concat""", """speaker_proj""", """layer_norm_for_extract""", ] def SCREAMING_SNAKE_CASE ( snake_case_ : Tuple , snake_case_ : Union[str, Any] , snake_case_ : Union[str, Any] , snake_case_ : Any , snake_case_ : Union[str, Any] ): for attribute in key.split("." ): snake_case__ : int = getattr(snake_case_ , snake_case_ ) if weight_type is not None: snake_case__ : Optional[Any] = getattr(snake_case_ , snake_case_ ).shape else: snake_case__ : List[str] = hf_pointer.shape if hf_shape != value.shape: raise ValueError( F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be''' F''' {value.shape} for {full_name}''' ) if weight_type == "weight": snake_case__ : str = value elif weight_type == "weight_g": snake_case__ : Union[str, Any] = value elif weight_type == "weight_v": snake_case__ : Optional[Any] = value elif weight_type == "bias": snake_case__ : str = value else: snake_case__ : Union[str, Any] = value logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' ) def SCREAMING_SNAKE_CASE ( snake_case_ : Any , snake_case_ : Union[str, Any] ): snake_case__ : str = [] snake_case__ : Optional[int] = fairseq_model.state_dict() snake_case__ : int = hf_model.unispeech_sat.feature_extractor for name, value in fairseq_dict.items(): snake_case__ : Dict = False if "conv_layers" in name: load_conv_layer( snake_case_ , snake_case_ , snake_case_ , snake_case_ , hf_model.config.feat_extract_norm == "group" , ) snake_case__ : str = True else: for key, mapped_key in MAPPING.items(): snake_case__ : Optional[int] = "unispeech_sat." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]: if "layer_norm_for_extract" in name and (".".join(name.split("." )[:-1] ) != key): # special case since naming is very similar continue snake_case__ : int = True if "*" in mapped_key: snake_case__ : Any = name.split(snake_case_ )[0].split("." )[-2] snake_case__ : Any = mapped_key.replace("*" , snake_case_ ) if "weight_g" in name: snake_case__ : List[Any] = "weight_g" elif "weight_v" in name: snake_case__ : Optional[Any] = "weight_v" elif "bias" in name: snake_case__ : Optional[Any] = "bias" elif "weight" in name: # TODO: don't match quantizer.weight_proj snake_case__ : Optional[Any] = "weight" else: snake_case__ : Optional[Any] = None set_recursively(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) continue if not is_used: unused_weights.append(snake_case_ ) logger.warning(F'''Unused weights: {unused_weights}''' ) def SCREAMING_SNAKE_CASE ( snake_case_ : Any , snake_case_ : List[str] , snake_case_ : List[Any] , snake_case_ : Optional[Any] , snake_case_ : str ): snake_case__ : Tuple = full_name.split("conv_layers." )[-1] snake_case__ : Union[str, Any] = name.split("." ) snake_case__ : str = int(items[0] ) snake_case__ : str = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' ) snake_case__ : Any = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' ) snake_case__ : Any = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.''' ) snake_case__ : Optional[Any] = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''' ) snake_case__ : int = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) else: unused_weights.append(snake_case_ ) @torch.no_grad() def SCREAMING_SNAKE_CASE ( snake_case_ : int , snake_case_ : Any , snake_case_ : Optional[int]=None , snake_case_ : Optional[int]=None , snake_case_ : Any=True ): if config_path is not None: snake_case__ : Tuple = UniSpeechSatConfig.from_pretrained(snake_case_ ) else: snake_case__ : Tuple = UniSpeechSatConfig() snake_case__ : str = "" if is_finetuned: snake_case__ : Tuple = UniSpeechSatForCTC(snake_case_ ) else: snake_case__ : Any = UniSpeechSatForPreTraining(snake_case_ ) snake_case__ : Any = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} ) snake_case__ : Tuple = model[0].eval() recursively_load_weights(snake_case_ , snake_case_ ) hf_wavavec.save_pretrained(snake_case_ ) if __name__ == "__main__": __lowerCamelCase : int = argparse.ArgumentParser() parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""") parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") parser.add_argument( """--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not""" ) __lowerCamelCase : List[Any] = parser.parse_args() convert_unispeech_sat_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
721
# Lint as: python3 # pylint: enable=line-too-long # pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position __lowerCamelCase : Union[str, Any] = """2.13.1""" import platform import pyarrow from packaging import version if version.parse(platform.python_version()) < version.parse("""3.7"""): raise ImportWarning( """To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition.""" ) if version.parse(pyarrow.__version__).major < 8: raise ImportWarning( """To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n""" """If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`.""" ) del platform del pyarrow del version from .arrow_dataset import Dataset from .arrow_reader import ReadInstruction from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder from .combine import concatenate_datasets, interleave_datasets from .dataset_dict import DatasetDict, IterableDatasetDict from .download import * from .features import * from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled from .info import DatasetInfo, MetricInfo from .inspect import ( get_dataset_config_info, get_dataset_config_names, get_dataset_infos, get_dataset_split_names, inspect_dataset, inspect_metric, list_datasets, list_metrics, ) from .iterable_dataset import IterableDataset from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric from .metric import Metric from .splits import ( NamedSplit, NamedSplitAll, Split, SplitBase, SplitDict, SplitGenerator, SplitInfo, SubSplitInfo, percent, ) from .tasks import * from .utils import * from .utils import logging # deprecated modules from datasets import arrow_dataset as _arrow_dataset # isort:skip from datasets import utils as _utils # isort:skip from datasets.utils import download_manager as _deprecated_download_manager # isort:skip __lowerCamelCase : List[Any] = concatenate_datasets __lowerCamelCase : List[str] = DownloadConfig __lowerCamelCase : Union[str, Any] = DownloadManager __lowerCamelCase : str = DownloadMode __lowerCamelCase : Union[str, Any] = DownloadConfig __lowerCamelCase : List[str] = DownloadMode __lowerCamelCase : Dict = DownloadManager del _arrow_dataset, _utils, _deprecated_download_manager
25
0
import argparse import os import jax as jnp import numpy as onp import torch import torch.nn as nn from music_spectrogram_diffusion import inference from tax import checkpoints from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder __lowerCamelCase : List[str] = """base_with_context""" def SCREAMING_SNAKE_CASE ( snake_case_ : List[str] , snake_case_ : Any ): snake_case__ : Optional[Any] = nn.Parameter(torch.FloatTensor(weights["token_embedder"]["embedding"] ) ) snake_case__ : List[str] = nn.Parameter( torch.FloatTensor(weights["Embed_0"]["embedding"] ) , requires_grad=snake_case_ ) for lyr_num, lyr in enumerate(model.encoders ): snake_case__ : str = weights[F'''layers_{lyr_num}'''] snake_case__ : Union[str, Any] = nn.Parameter( torch.FloatTensor(ly_weight["pre_attention_layer_norm"]["scale"] ) ) snake_case__ : Union[str, Any] = ly_weight["attention"] snake_case__ : Optional[int] = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) ) snake_case__ : Optional[int] = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) ) snake_case__ : Tuple = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) ) snake_case__ : Tuple = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) ) snake_case__ : Optional[Any] = nn.Parameter(torch.FloatTensor(ly_weight["pre_mlp_layer_norm"]["scale"] ) ) snake_case__ : str = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_0"]["kernel"].T ) ) snake_case__ : Union[str, Any] = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_1"]["kernel"].T ) ) snake_case__ : List[str] = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wo"]["kernel"].T ) ) snake_case__ : Optional[Any] = nn.Parameter(torch.FloatTensor(weights["encoder_norm"]["scale"] ) ) return model def SCREAMING_SNAKE_CASE ( snake_case_ : List[Any] , snake_case_ : str ): snake_case__ : int = nn.Parameter(torch.FloatTensor(weights["input_proj"]["kernel"].T ) ) snake_case__ : Tuple = nn.Parameter( torch.FloatTensor(weights["Embed_0"]["embedding"] ) , requires_grad=snake_case_ ) for lyr_num, lyr in enumerate(model.encoders ): snake_case__ : int = weights[F'''layers_{lyr_num}'''] snake_case__ : Any = ly_weight["attention"] snake_case__ : int = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) ) snake_case__ : Dict = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) ) snake_case__ : List[Any] = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) ) snake_case__ : Dict = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) ) snake_case__ : Any = nn.Parameter( torch.FloatTensor(ly_weight["pre_attention_layer_norm"]["scale"] ) ) snake_case__ : Union[str, Any] = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_0"]["kernel"].T ) ) snake_case__ : List[Any] = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_1"]["kernel"].T ) ) snake_case__ : int = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wo"]["kernel"].T ) ) snake_case__ : Any = nn.Parameter(torch.FloatTensor(ly_weight["pre_mlp_layer_norm"]["scale"] ) ) snake_case__ : Optional[Any] = nn.Parameter(torch.FloatTensor(weights["encoder_norm"]["scale"] ) ) return model def SCREAMING_SNAKE_CASE ( snake_case_ : List[Any] , snake_case_ : Optional[Any] ): snake_case__ : Any = nn.Parameter(torch.FloatTensor(weights["time_emb_dense0"]["kernel"].T ) ) snake_case__ : str = nn.Parameter(torch.FloatTensor(weights["time_emb_dense1"]["kernel"].T ) ) snake_case__ : Union[str, Any] = nn.Parameter( torch.FloatTensor(weights["Embed_0"]["embedding"] ) , requires_grad=snake_case_ ) snake_case__ : List[Any] = nn.Parameter( torch.FloatTensor(weights["continuous_inputs_projection"]["kernel"].T ) ) for lyr_num, lyr in enumerate(model.decoders ): snake_case__ : Tuple = weights[F'''layers_{lyr_num}'''] snake_case__ : Tuple = nn.Parameter( torch.FloatTensor(ly_weight["pre_self_attention_layer_norm"]["scale"] ) ) snake_case__ : str = nn.Parameter( torch.FloatTensor(ly_weight["FiLMLayer_0"]["DenseGeneral_0"]["kernel"].T ) ) snake_case__ : Optional[Any] = ly_weight["self_attention"] snake_case__ : Optional[Any] = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) ) snake_case__ : int = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) ) snake_case__ : List[Any] = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) ) snake_case__ : Dict = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) ) snake_case__ : List[Any] = ly_weight["MultiHeadDotProductAttention_0"] snake_case__ : Optional[Any] = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) ) snake_case__ : Optional[int] = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) ) snake_case__ : List[Any] = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) ) snake_case__ : Union[str, Any] = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) ) snake_case__ : List[str] = nn.Parameter( torch.FloatTensor(ly_weight["pre_cross_attention_layer_norm"]["scale"] ) ) snake_case__ : str = nn.Parameter(torch.FloatTensor(ly_weight["pre_mlp_layer_norm"]["scale"] ) ) snake_case__ : Any = nn.Parameter( torch.FloatTensor(ly_weight["FiLMLayer_1"]["DenseGeneral_0"]["kernel"].T ) ) snake_case__ : List[str] = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_0"]["kernel"].T ) ) snake_case__ : Dict = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_1"]["kernel"].T ) ) snake_case__ : Any = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wo"]["kernel"].T ) ) snake_case__ : Tuple = nn.Parameter(torch.FloatTensor(weights["decoder_norm"]["scale"] ) ) snake_case__ : Optional[Any] = nn.Parameter(torch.FloatTensor(weights["spec_out_dense"]["kernel"].T ) ) return model def SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any] ): snake_case__ : List[Any] = checkpoints.load_tax_checkpoint(args.checkpoint_path ) snake_case__ : Any = jnp.tree_util.tree_map(onp.array , snake_case_ ) snake_case__ : Union[str, Any] = [ "from __gin__ import dynamic_registration", "from music_spectrogram_diffusion.models.diffusion import diffusion_utils", "diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0", "diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()", ] snake_case__ : int = os.path.join(args.checkpoint_path , ".." , "config.gin" ) snake_case__ : Optional[int] = inference.parse_training_gin_file(snake_case_ , snake_case_ ) snake_case__ : str = inference.InferenceModel(args.checkpoint_path , snake_case_ ) snake_case__ : str = DDPMScheduler(beta_schedule="squaredcos_cap_v2" , variance_type="fixed_large" ) snake_case__ : List[str] = SpectrogramNotesEncoder( max_length=synth_model.sequence_length["inputs"] , vocab_size=synth_model.model.module.config.vocab_size , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj="gated-gelu" , ) snake_case__ : Optional[Any] = SpectrogramContEncoder( input_dims=synth_model.audio_codec.n_dims , targets_context_length=synth_model.sequence_length["targets_context"] , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj="gated-gelu" , ) snake_case__ : Any = TaFilmDecoder( input_dims=synth_model.audio_codec.n_dims , targets_length=synth_model.sequence_length["targets_context"] , max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time , d_model=synth_model.model.module.config.emb_dim , num_layers=synth_model.model.module.config.num_decoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , dropout_rate=synth_model.model.module.config.dropout_rate , ) snake_case__ : Union[str, Any] = load_notes_encoder(ta_checkpoint["target"]["token_encoder"] , snake_case_ ) snake_case__ : List[str] = load_continuous_encoder(ta_checkpoint["target"]["continuous_encoder"] , snake_case_ ) snake_case__ : Union[str, Any] = load_decoder(ta_checkpoint["target"]["decoder"] , snake_case_ ) snake_case__ : Optional[int] = OnnxRuntimeModel.from_pretrained("kashif/soundstream_mel_decoder" ) snake_case__ : str = SpectrogramDiffusionPipeline( notes_encoder=snake_case_ , continuous_encoder=snake_case_ , decoder=snake_case_ , scheduler=snake_case_ , melgan=snake_case_ , ) if args.save: pipe.save_pretrained(args.output_path ) if __name__ == "__main__": __lowerCamelCase : List[str] = argparse.ArgumentParser() parser.add_argument("""--output_path""", default=None, type=str, required=True, help="""Path to the converted model.""") parser.add_argument( """--save""", default=True, type=bool, required=False, help="""Whether to save the converted model or not.""" ) parser.add_argument( """--checkpoint_path""", default=f"{MODEL}/checkpoint_500000", type=str, required=False, help="""Path to the original jax model checkpoint.""", ) __lowerCamelCase : List[str] = parser.parse_args() main(args)
700
from __future__ import annotations def SCREAMING_SNAKE_CASE ( snake_case_ : int ): snake_case__ : str = [True] * limit snake_case__ : str = False snake_case__ : str = False snake_case__ : str = True for i in range(3 , int(limit**0.5 + 1 ) , 2 ): snake_case__ : Optional[Any] = i * 2 while index < limit: snake_case__ : Union[str, Any] = False snake_case__ : Any = index + i snake_case__ : Optional[Any] = [2] for i in range(3 , snake_case_ , 2 ): if is_prime[i]: primes.append(snake_case_ ) return primes def SCREAMING_SNAKE_CASE ( snake_case_ : int = 1000000 ): snake_case__ : Optional[int] = prime_sieve(snake_case_ ) snake_case__ : List[Any] = 0 snake_case__ : List[str] = 0 for i in range(len(snake_case_ ) ): for j in range(i + length , len(snake_case_ ) ): snake_case__ : Dict = sum(primes[i:j] ) if sol >= ceiling: break if sol in primes: snake_case__ : Tuple = j - i snake_case__ : str = sol return largest if __name__ == "__main__": print(f"{solution() = }")
25
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __lowerCamelCase : Optional[Any] = { """configuration_lilt""": ["""LILT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LiltConfig"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : int = [ """LILT_PRETRAINED_MODEL_ARCHIVE_LIST""", """LiltForQuestionAnswering""", """LiltForSequenceClassification""", """LiltForTokenClassification""", """LiltModel""", """LiltPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_lilt import ( LILT_PRETRAINED_MODEL_ARCHIVE_LIST, LiltForQuestionAnswering, LiltForSequenceClassification, LiltForTokenClassification, LiltModel, LiltPreTrainedModel, ) else: import sys __lowerCamelCase : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
701
import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DeformableDetrImageProcessor class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): """simple docstring""" def __init__( self : int , __A : List[str] , __A : Union[str, Any]=7 , __A : Any=3 , __A : Optional[Any]=3_0 , __A : List[str]=4_0_0 , __A : str=True , __A : Optional[Any]=None , __A : Optional[int]=True , __A : int=[0.5, 0.5, 0.5] , __A : Dict=[0.5, 0.5, 0.5] , __A : Optional[int]=True , __A : int=1 / 2_5_5 , __A : List[str]=True , ): # by setting size["longest_edge"] > max_resolution we're effectively not testing this :p snake_case__ : List[str] = size if size is not None else {"shortest_edge": 1_8, "longest_edge": 1_3_3_3} snake_case__ : Optional[Any] = parent snake_case__ : str = batch_size snake_case__ : Union[str, Any] = num_channels snake_case__ : Optional[Any] = min_resolution snake_case__ : List[str] = max_resolution snake_case__ : Tuple = do_resize snake_case__ : str = size snake_case__ : str = do_normalize snake_case__ : Optional[Any] = image_mean snake_case__ : List[str] = image_std snake_case__ : List[str] = do_rescale snake_case__ : Tuple = rescale_factor snake_case__ : Tuple = do_pad def _lowercase ( self : str ): return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def _lowercase ( self : Optional[Any] , __A : List[Any] , __A : List[Any]=False ): if not batched: snake_case__ : List[Any] = image_inputs[0] if isinstance(__A , Image.Image ): snake_case__, snake_case__ : str = image.size else: snake_case__, snake_case__ : Dict = image.shape[1], image.shape[2] if w < h: snake_case__ : Any = int(self.size["shortest_edge"] * h / w ) snake_case__ : Any = self.size["shortest_edge"] elif w > h: snake_case__ : Optional[int] = self.size["shortest_edge"] snake_case__ : Any = int(self.size["shortest_edge"] * w / h ) else: snake_case__ : Tuple = self.size["shortest_edge"] snake_case__ : int = self.size["shortest_edge"] else: snake_case__ : Any = [] for image in image_inputs: snake_case__, snake_case__ : Optional[Any] = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) snake_case__ : List[Any] = max(__A , key=lambda __A : item[0] )[0] snake_case__ : int = max(__A , key=lambda __A : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ , unittest.TestCase ): """simple docstring""" a_ = DeformableDetrImageProcessor if is_vision_available() else None def _lowercase ( self : str ): snake_case__ : Optional[Any] = DeformableDetrImageProcessingTester(self ) @property def _lowercase ( self : List[Any] ): return self.image_processor_tester.prepare_image_processor_dict() def _lowercase ( self : Tuple ): snake_case__ : Any = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__A , "image_mean" ) ) self.assertTrue(hasattr(__A , "image_std" ) ) self.assertTrue(hasattr(__A , "do_normalize" ) ) self.assertTrue(hasattr(__A , "do_resize" ) ) self.assertTrue(hasattr(__A , "do_rescale" ) ) self.assertTrue(hasattr(__A , "do_pad" ) ) self.assertTrue(hasattr(__A , "size" ) ) def _lowercase ( self : Any ): snake_case__ : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"shortest_edge": 1_8, "longest_edge": 1_3_3_3} ) self.assertEqual(image_processor.do_pad , __A ) snake_case__ : Tuple = self.image_processing_class.from_dict( self.image_processor_dict , size=4_2 , max_size=8_4 , pad_and_return_pixel_mask=__A ) self.assertEqual(image_processor.size , {"shortest_edge": 4_2, "longest_edge": 8_4} ) self.assertEqual(image_processor.do_pad , __A ) def _lowercase ( self : str ): pass def _lowercase ( self : List[str] ): # Initialize image_processing snake_case__ : Any = self.image_processing_class(**self.image_processor_dict ) # create random PIL images snake_case__ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A ) for image in image_inputs: self.assertIsInstance(__A , Image.Image ) # Test not batched input snake_case__ : Tuple = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values snake_case__, snake_case__ : List[str] = self.image_processor_tester.get_expected_values(__A ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched snake_case__, snake_case__ : List[Any] = self.image_processor_tester.get_expected_values(__A , batched=__A ) snake_case__ : int = image_processing(__A , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def _lowercase ( self : int ): # Initialize image_processing snake_case__ : Any = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors snake_case__ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , numpify=__A ) for image in image_inputs: self.assertIsInstance(__A , np.ndarray ) # Test not batched input snake_case__ : Optional[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values snake_case__, snake_case__ : int = self.image_processor_tester.get_expected_values(__A ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched snake_case__ : Tuple = image_processing(__A , return_tensors="pt" ).pixel_values snake_case__, snake_case__ : int = self.image_processor_tester.get_expected_values(__A , batched=__A ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def _lowercase ( self : Union[str, Any] ): # Initialize image_processing snake_case__ : Optional[int] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors snake_case__ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , torchify=__A ) for image in image_inputs: self.assertIsInstance(__A , torch.Tensor ) # Test not batched input snake_case__ : str = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values snake_case__, snake_case__ : Union[str, Any] = self.image_processor_tester.get_expected_values(__A ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched snake_case__ : Tuple = image_processing(__A , return_tensors="pt" ).pixel_values snake_case__, snake_case__ : Tuple = self.image_processor_tester.get_expected_values(__A , batched=__A ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) @slow def _lowercase ( self : Optional[Any] ): # prepare image and target snake_case__ : Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f: snake_case__ : Tuple = json.loads(f.read() ) snake_case__ : Union[str, Any] = {"image_id": 3_9_7_6_9, "annotations": target} # encode them snake_case__ : str = DeformableDetrImageProcessor() snake_case__ : Tuple = image_processing(images=__A , annotations=__A , return_tensors="pt" ) # verify pixel values snake_case__ : Optional[Any] = torch.Size([1, 3, 8_0_0, 1_0_6_6] ) self.assertEqual(encoding["pixel_values"].shape , __A ) snake_case__ : Union[str, Any] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] ) self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __A , atol=1e-4 ) ) # verify area snake_case__ : str = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] ) self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __A ) ) # verify boxes snake_case__ : Union[str, Any] = torch.Size([6, 4] ) self.assertEqual(encoding["labels"][0]["boxes"].shape , __A ) snake_case__ : List[Any] = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] ) self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __A , atol=1e-3 ) ) # verify image_id snake_case__ : Any = torch.tensor([3_9_7_6_9] ) self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __A ) ) # verify is_crowd snake_case__ : Tuple = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __A ) ) # verify class_labels snake_case__ : int = torch.tensor([7_5, 7_5, 6_3, 6_5, 1_7, 1_7] ) self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __A ) ) # verify orig_size snake_case__ : List[str] = torch.tensor([4_8_0, 6_4_0] ) self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __A ) ) # verify size snake_case__ : Tuple = torch.tensor([8_0_0, 1_0_6_6] ) self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __A ) ) @slow def _lowercase ( self : Optional[int] ): # prepare image, target and masks_path snake_case__ : Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f: snake_case__ : Any = json.loads(f.read() ) snake_case__ : Dict = {"file_name": "000000039769.png", "image_id": 3_9_7_6_9, "segments_info": target} snake_case__ : int = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" ) # encode them snake_case__ : List[str] = DeformableDetrImageProcessor(format="coco_panoptic" ) snake_case__ : List[Any] = image_processing(images=__A , annotations=__A , masks_path=__A , return_tensors="pt" ) # verify pixel values snake_case__ : List[Any] = torch.Size([1, 3, 8_0_0, 1_0_6_6] ) self.assertEqual(encoding["pixel_values"].shape , __A ) snake_case__ : Optional[int] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] ) self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __A , atol=1e-4 ) ) # verify area snake_case__ : Tuple = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] ) self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __A ) ) # verify boxes snake_case__ : Any = torch.Size([6, 4] ) self.assertEqual(encoding["labels"][0]["boxes"].shape , __A ) snake_case__ : Any = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] ) self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __A , atol=1e-3 ) ) # verify image_id snake_case__ : List[str] = torch.tensor([3_9_7_6_9] ) self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __A ) ) # verify is_crowd snake_case__ : Any = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __A ) ) # verify class_labels snake_case__ : List[str] = torch.tensor([1_7, 1_7, 6_3, 7_5, 7_5, 9_3] ) self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __A ) ) # verify masks snake_case__ : Union[str, Any] = 8_2_2_8_7_3 self.assertEqual(encoding["labels"][0]["masks"].sum().item() , __A ) # verify orig_size snake_case__ : int = torch.tensor([4_8_0, 6_4_0] ) self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __A ) ) # verify size snake_case__ : Union[str, Any] = torch.tensor([8_0_0, 1_0_6_6] ) self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __A ) )
25
0
from collections.abc import Iterator, MutableMapping from dataclasses import dataclass from typing import Generic, TypeVar __lowerCamelCase : str = TypeVar("""KEY""") __lowerCamelCase : int = TypeVar("""VAL""") @dataclass(frozen=UpperCamelCase_ , slots=UpperCamelCase_ ) class SCREAMING_SNAKE_CASE__ ( Generic[KEY, VAL] ): """simple docstring""" a_ = 4_2 a_ = 4_2 class SCREAMING_SNAKE_CASE__ ( _Item ): """simple docstring""" def __init__( self : Tuple ): super().__init__(__A , __A ) def __bool__( self : Dict ): return False __lowerCamelCase : List[str] = _DeletedItem() class SCREAMING_SNAKE_CASE__ ( MutableMapping[KEY, VAL] ): """simple docstring""" def __init__( self : Optional[Any] , __A : int = 8 , __A : float = 0.7_5 ): snake_case__ : Dict = initial_block_size snake_case__ : list[_Item | None] = [None] * initial_block_size assert 0.0 < capacity_factor < 1.0 snake_case__ : Optional[Any] = capacity_factor snake_case__ : str = 0 def _lowercase ( self : List[str] , __A : KEY ): return hash(__A ) % len(self._buckets ) def _lowercase ( self : Union[str, Any] , __A : int ): return (ind + 1) % len(self._buckets ) def _lowercase ( self : Optional[Any] , __A : int , __A : KEY , __A : VAL ): snake_case__ : Tuple = self._buckets[ind] if not stored: snake_case__ : List[Any] = _Item(__A , __A ) self._len += 1 return True elif stored.key == key: snake_case__ : List[str] = _Item(__A , __A ) return True else: return False def _lowercase ( self : Dict ): snake_case__ : Optional[Any] = len(self._buckets ) * self._capacity_factor return len(self ) >= int(__A ) def _lowercase ( self : Dict ): if len(self._buckets ) <= self._initial_block_size: return False snake_case__ : Optional[Any] = len(self._buckets ) * self._capacity_factor / 2 return len(self ) < limit def _lowercase ( self : Optional[int] , __A : int ): snake_case__ : Optional[Any] = self._buckets snake_case__ : Optional[int] = [None] * new_size snake_case__ : Any = 0 for item in old_buckets: if item: self._add_item(item.key , item.val ) def _lowercase ( self : int ): self._resize(len(self._buckets ) * 2 ) def _lowercase ( self : Union[str, Any] ): self._resize(len(self._buckets ) // 2 ) def _lowercase ( self : int , __A : KEY ): snake_case__ : List[Any] = self._get_bucket_index(__A ) for _ in range(len(self._buckets ) ): yield ind snake_case__ : int = self._get_next_ind(__A ) def _lowercase ( self : Tuple , __A : KEY , __A : VAL ): for ind in self._iterate_buckets(__A ): if self._try_set(__A , __A , __A ): break def __setitem__( self : Union[str, Any] , __A : KEY , __A : VAL ): if self._is_full(): self._size_up() self._add_item(__A , __A ) def __delitem__( self : List[str] , __A : KEY ): for ind in self._iterate_buckets(__A ): snake_case__ : str = self._buckets[ind] if item is None: raise KeyError(__A ) if item is _deleted: continue if item.key == key: snake_case__ : str = _deleted self._len -= 1 break if self._is_sparse(): self._size_down() def __getitem__( self : str , __A : KEY ): for ind in self._iterate_buckets(__A ): snake_case__ : Dict = self._buckets[ind] if item is None: break if item is _deleted: continue if item.key == key: return item.val raise KeyError(__A ) def __len__( self : int ): return self._len def __iter__( self : List[str] ): yield from (item.key for item in self._buckets if item) def __repr__( self : str ): snake_case__ : Union[str, Any] = " ,".join( f'''{item.key}: {item.val}''' for item in self._buckets if item ) return f'''HashMap({val_string})'''
702
import json import os from functools import lru_cache from typing import Dict, List, Optional, Tuple, Union import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...tokenization_utils_base import BatchEncoding, EncodedInput from ...utils import PaddingStrategy, logging __lowerCamelCase : List[str] = logging.get_logger(__name__) __lowerCamelCase : Optional[Any] = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""} # See all LED models at https://huggingface.co/models?filter=LED __lowerCamelCase : Tuple = { """vocab_file""": { """allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json""", }, """merges_file""": { """allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt""", }, """tokenizer_file""": { """allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json""", }, } __lowerCamelCase : Dict = { """allenai/led-base-16384""": 1_6384, } @lru_cache() # Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode def SCREAMING_SNAKE_CASE ( ): snake_case__ : Optional[int] = ( list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) ) ) snake_case__ : Optional[int] = bs[:] snake_case__ : Any = 0 for b in range(2**8 ): if b not in bs: bs.append(snake_case_ ) cs.append(2**8 + n ) n += 1 snake_case__ : Dict = [chr(snake_case_ ) for n in cs] return dict(zip(snake_case_ , snake_case_ ) ) def SCREAMING_SNAKE_CASE ( snake_case_ : List[Any] ): snake_case__ : Dict = set() snake_case__ : Tuple = word[0] for char in word[1:]: pairs.add((prev_char, char) ) snake_case__ : List[Any] = char return pairs class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ): """simple docstring""" a_ = VOCAB_FILES_NAMES a_ = PRETRAINED_VOCAB_FILES_MAP a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a_ = ["input_ids", "attention_mask"] def __init__( self : List[str] , __A : Any , __A : List[str] , __A : Optional[Any]="replace" , __A : Optional[int]="<s>" , __A : Union[str, Any]="</s>" , __A : Tuple="</s>" , __A : List[Any]="<s>" , __A : Dict="<unk>" , __A : Any="<pad>" , __A : Optional[int]="<mask>" , __A : List[str]=False , **__A : Union[str, Any] , ): snake_case__ : List[Any] = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else bos_token snake_case__ : List[str] = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else eos_token snake_case__ : Any = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else sep_token snake_case__ : List[Any] = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else cls_token snake_case__ : Tuple = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else unk_token snake_case__ : List[Any] = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else pad_token # Mask token behave like a normal word, i.e. include the space before it snake_case__ : List[str] = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else mask_token super().__init__( errors=__A , bos_token=__A , eos_token=__A , unk_token=__A , sep_token=__A , cls_token=__A , pad_token=__A , mask_token=__A , add_prefix_space=__A , **__A , ) with open(__A , encoding="utf-8" ) as vocab_handle: snake_case__ : Any = json.load(__A ) snake_case__ : Optional[Any] = {v: k for k, v in self.encoder.items()} snake_case__ : Union[str, Any] = errors # how to handle errors in decoding snake_case__ : Any = bytes_to_unicode() snake_case__ : Optional[Any] = {v: k for k, v in self.byte_encoder.items()} with open(__A , encoding="utf-8" ) as merges_handle: snake_case__ : str = merges_handle.read().split("\n" )[1:-1] snake_case__ : int = [tuple(merge.split() ) for merge in bpe_merges] snake_case__ : str = dict(zip(__A , range(len(__A ) ) ) ) snake_case__ : Optional[int] = {} snake_case__ : Any = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions snake_case__ : Union[str, Any] = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" ) @property # Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size def _lowercase ( self : List[Any] ): return len(self.encoder ) def _lowercase ( self : Any ): return dict(self.encoder , **self.added_tokens_encoder ) def _lowercase ( self : Optional[Any] , __A : Optional[int] ): if token in self.cache: return self.cache[token] snake_case__ : Union[str, Any] = tuple(__A ) snake_case__ : List[Any] = get_pairs(__A ) if not pairs: return token while True: snake_case__ : Tuple = min(__A , key=lambda __A : self.bpe_ranks.get(__A , float("inf" ) ) ) if bigram not in self.bpe_ranks: break snake_case__, snake_case__ : Dict = bigram snake_case__ : str = [] snake_case__ : Union[str, Any] = 0 while i < len(__A ): try: snake_case__ : Dict = word.index(__A , __A ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) snake_case__ : str = j if word[i] == first and i < len(__A ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 snake_case__ : str = tuple(__A ) snake_case__ : int = new_word if len(__A ) == 1: break else: snake_case__ : List[str] = get_pairs(__A ) snake_case__ : List[Any] = " ".join(__A ) snake_case__ : Optional[int] = word return word def _lowercase ( self : Optional[Any] , __A : Optional[Any] ): snake_case__ : List[str] = [] for token in re.findall(self.pat , __A ): snake_case__ : Dict = "".join( self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__A ).split(" " ) ) return bpe_tokens def _lowercase ( self : Union[str, Any] , __A : Optional[int] ): return self.encoder.get(__A , self.encoder.get(self.unk_token ) ) def _lowercase ( self : Optional[int] , __A : Optional[Any] ): return self.decoder.get(__A ) def _lowercase ( self : Union[str, Any] , __A : Dict ): snake_case__ : Optional[Any] = "".join(__A ) snake_case__ : int = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors ) return text def _lowercase ( self : Optional[int] , __A : str , __A : Optional[str] = None ): if not os.path.isdir(__A ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return snake_case__ : List[Any] = os.path.join( __A , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) snake_case__ : str = os.path.join( __A , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] ) with open(__A , "w" , encoding="utf-8" ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=__A , ensure_ascii=__A ) + "\n" ) snake_case__ : str = 0 with open(__A , "w" , encoding="utf-8" ) as writer: writer.write("#version: 0.2\n" ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __A : kv[1] ): if index != token_index: logger.warning( f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.''' " Please check that the tokenizer is not corrupted!" ) snake_case__ : int = token_index writer.write(" ".join(__A ) + "\n" ) index += 1 return vocab_file, merge_file def _lowercase ( self : int , __A : List[int] , __A : Optional[List[int]] = None ): if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] snake_case__ : Tuple = [self.cls_token_id] snake_case__ : List[Any] = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def _lowercase ( self : Optional[Any] , __A : List[int] , __A : Optional[List[int]] = None , __A : bool = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__A , token_ids_a=__A , already_has_special_tokens=__A ) if token_ids_a is None: return [1] + ([0] * len(__A )) + [1] return [1] + ([0] * len(__A )) + [1, 1] + ([0] * len(__A )) + [1] def _lowercase ( self : List[Any] , __A : List[int] , __A : Optional[List[int]] = None ): snake_case__ : Any = [self.sep_token_id] snake_case__ : List[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def _lowercase ( self : Optional[Any] , __A : int , __A : int=False , **__A : Dict ): snake_case__ : Optional[int] = kwargs.pop("add_prefix_space" , self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(__A ) > 0 and not text[0].isspace()): snake_case__ : Optional[int] = " " + text return (text, kwargs) def _lowercase ( self : Any , __A : Union[Dict[str, EncodedInput], BatchEncoding] , __A : Optional[int] = None , __A : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , __A : Optional[int] = None , __A : Optional[bool] = None , ): snake_case__ : Optional[Any] = super()._pad( encoded_inputs=__A , max_length=__A , padding_strategy=__A , pad_to_multiple_of=__A , return_attention_mask=__A , ) # Load from model defaults if return_attention_mask is None: snake_case__ : Union[str, Any] = "attention_mask" in self.model_input_names if return_attention_mask and "global_attention_mask" in encoded_inputs: snake_case__ : Union[str, Any] = encoded_inputs[self.model_input_names[0]] # `global_attention_mask` need to have the same length as other (sequential) inputs. snake_case__ : Tuple = len(encoded_inputs["global_attention_mask"] ) != len(__A ) if needs_to_be_padded: snake_case__ : int = len(__A ) - len(encoded_inputs["global_attention_mask"] ) if self.padding_side == "right": # Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend` snake_case__ : int = ( encoded_inputs["global_attention_mask"] + [-1] * difference ) elif self.padding_side == "left": snake_case__ : Tuple = [-1] * difference + encoded_inputs[ "global_attention_mask" ] else: raise ValueError("Invalid padding strategy:" + str(self.padding_side ) ) return encoded_inputs
25
0
def SCREAMING_SNAKE_CASE ( snake_case_ : str ): return credit_card_number.startswith(("34", "35", "37", "4", "5", "6") ) def SCREAMING_SNAKE_CASE ( snake_case_ : str ): snake_case__ : Tuple = credit_card_number snake_case__ : int = 0 snake_case__ : Optional[Any] = len(snake_case_ ) - 2 for i in range(snake_case_ , -1 , -2 ): # double the value of every second digit snake_case__ : str = int(cc_number[i] ) digit *= 2 # If doubling of a number results in a two digit number # i.e greater than 9(e.g., 6 × 2 = 12), # then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6), # to get a single digit number. if digit > 9: digit %= 10 digit += 1 snake_case__ : Union[str, Any] = cc_number[:i] + str(snake_case_ ) + cc_number[i + 1 :] total += digit # Sum up the remaining digits for i in range(len(snake_case_ ) - 1 , -1 , -2 ): total += int(cc_number[i] ) return total % 10 == 0 def SCREAMING_SNAKE_CASE ( snake_case_ : str ): snake_case__ : List[Any] = F'''{credit_card_number} is an invalid credit card number because''' if not credit_card_number.isdigit(): print(F'''{error_message} it has nonnumerical characters.''' ) return False if not 13 <= len(snake_case_ ) <= 16: print(F'''{error_message} of its length.''' ) return False if not validate_initial_digits(snake_case_ ): print(F'''{error_message} of its first two digits.''' ) return False if not luhn_validation(snake_case_ ): print(F'''{error_message} it fails the Luhn check.''' ) return False print(F'''{credit_card_number} is a valid credit card number.''' ) return True if __name__ == "__main__": import doctest doctest.testmod() validate_credit_card_number("""4111111111111111""") validate_credit_card_number("""32323""")
703
# tests directory-specific settings - this file is run automatically # by pytest before any tests are run import sys import warnings from os.path import abspath, dirname, join # allow having multiple repository checkouts and not needing to remember to rerun # 'pip install -e .[dev]' when switching between checkouts and running tests. __lowerCamelCase : Dict = abspath(join(dirname(dirname(__file__)), """src""")) sys.path.insert(1, git_repo_path) # silence FutureWarning warnings in tests since often we can't act on them until # they become normal warnings - i.e. the tests still need to test the current functionality warnings.simplefilter(action="""ignore""", category=FutureWarning) def SCREAMING_SNAKE_CASE ( snake_case_ : str ): from diffusers.utils.testing_utils import pytest_addoption_shared pytest_addoption_shared(snake_case_ ) def SCREAMING_SNAKE_CASE ( snake_case_ : Any ): from diffusers.utils.testing_utils import pytest_terminal_summary_main snake_case__ : Optional[int] = terminalreporter.config.getoption("--make-reports" ) if make_reports: pytest_terminal_summary_main(snake_case_ , id=snake_case_ )
25
0
def SCREAMING_SNAKE_CASE ( snake_case_ : int ): if not isinstance(snake_case_ , snake_case_ ): snake_case__ : int = F'''Input value of [number={number}] must be an integer''' raise TypeError(snake_case_ ) if number < 0: return False snake_case__ : List[str] = number * number while number > 0: if number % 10 != number_square % 10: return False number //= 10 number_square //= 10 return True if __name__ == "__main__": import doctest doctest.testmod()
704
def SCREAMING_SNAKE_CASE ( snake_case_ : str ): snake_case__ : Any = [0] * len(snake_case_ ) for i in range(1 , len(snake_case_ ) ): # use last results for better performance - dynamic programming snake_case__ : Union[str, Any] = prefix_result[i - 1] while j > 0 and input_string[i] != input_string[j]: snake_case__ : str = prefix_result[j - 1] if input_string[i] == input_string[j]: j += 1 snake_case__ : int = j return prefix_result def SCREAMING_SNAKE_CASE ( snake_case_ : str ): return max(prefix_function(snake_case_ ) ) if __name__ == "__main__": import doctest doctest.testmod()
25
0
import unittest import numpy as np from diffusers import OnnxStableDiffusionInpaintPipelineLegacy from diffusers.utils.testing_utils import ( is_onnx_available, load_image, load_numpy, nightly, require_onnxruntime, require_torch_gpu, ) if is_onnx_available(): import onnxruntime as ort @nightly @require_onnxruntime @require_torch_gpu class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): """simple docstring""" @property def _lowercase ( self : int ): return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def _lowercase ( self : List[str] ): snake_case__ : Tuple = ort.SessionOptions() snake_case__ : Optional[Any] = False return options def _lowercase ( self : str ): snake_case__ : List[str] = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/in_paint/overture-creations-5sI6fQgYIuo.png" ) snake_case__ : Tuple = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/in_paint/overture-creations-5sI6fQgYIuo_mask.png" ) snake_case__ : Tuple = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy" ) # using the PNDM scheduler by default snake_case__ : str = OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained( "CompVis/stable-diffusion-v1-4" , revision="onnx" , safety_checker=__A , feature_extractor=__A , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=__A ) snake_case__ : Any = "A red cat sitting on a park bench" snake_case__ : int = np.random.RandomState(0 ) snake_case__ : Tuple = pipe( prompt=__A , image=__A , mask_image=__A , strength=0.7_5 , guidance_scale=7.5 , num_inference_steps=1_5 , generator=__A , output_type="np" , ) snake_case__ : str = output.images[0] assert image.shape == (5_1_2, 5_1_2, 3) assert np.abs(expected_image - image ).max() < 1e-2
705
# Lint as: python3 import sys from collections.abc import Mapping from typing import TYPE_CHECKING, Dict, Optional import numpy as np import pyarrow as pa from .. import config from ..utils.logging import get_logger from ..utils.py_utils import map_nested from .formatting import TensorFormatter if TYPE_CHECKING: import jax import jaxlib __lowerCamelCase : Optional[int] = get_logger() __lowerCamelCase : Optional[dict] = None class SCREAMING_SNAKE_CASE__ ( TensorFormatter[Mapping, "jax.Array", Mapping] ): """simple docstring""" def __init__( self : Optional[Any] , __A : Dict=None , __A : List[str]=None , **__A : str ): super().__init__(features=__A ) import jax from jaxlib.xla_client import Device if isinstance(__A , __A ): raise ValueError( f'''Expected {device} to be a `str` not {type(__A )}, as `jaxlib.xla_extension.Device` ''' "is not serializable neither with `pickle` nor with `dill`. Instead you can surround " "the device with `str()` to get its string identifier that will be internally mapped " "to the actual `jaxlib.xla_extension.Device`." ) snake_case__ : List[Any] = device if isinstance(__A , __A ) else str(jax.devices()[0] ) # using global variable since `jaxlib.xla_extension.Device` is not serializable neither # with `pickle` nor with `dill`, so we need to use a global variable instead global DEVICE_MAPPING if DEVICE_MAPPING is None: snake_case__ : Any = self._map_devices_to_str() if self.device not in list(DEVICE_MAPPING.keys() ): logger.warning( f'''Device with string identifier {self.device} not listed among the available ''' f'''devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default ''' f'''device: {str(jax.devices()[0] )}.''' ) snake_case__ : str = str(jax.devices()[0] ) snake_case__ : str = jnp_array_kwargs @staticmethod def _lowercase ( ): import jax return {str(__A ): device for device in jax.devices()} def _lowercase ( self : Optional[Any] , __A : str ): import jax import jax.numpy as jnp if isinstance(__A , __A ) and column: if all( isinstance(__A , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ): return jnp.stack(__A , axis=0 ) return column def _lowercase ( self : int , __A : Tuple ): import jax import jax.numpy as jnp if isinstance(__A , (str, bytes, type(__A )) ): return value elif isinstance(__A , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ): return value.tolist() snake_case__ : Optional[int] = {} if isinstance(__A , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ): # the default int precision depends on the jax config # see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision if jax.config.jax_enable_xaa: snake_case__ : Any = {"dtype": jnp.intaa} else: snake_case__ : Tuple = {"dtype": jnp.intaa} elif isinstance(__A , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ): snake_case__ : str = {"dtype": jnp.floataa} elif config.PIL_AVAILABLE and "PIL" in sys.modules: import PIL.Image if isinstance(__A , PIL.Image.Image ): snake_case__ : Optional[Any] = np.asarray(__A ) # using global variable since `jaxlib.xla_extension.Device` is not serializable neither # with `pickle` nor with `dill`, so we need to use a global variable instead global DEVICE_MAPPING if DEVICE_MAPPING is None: snake_case__ : int = self._map_devices_to_str() with jax.default_device(DEVICE_MAPPING[self.device] ): # calling jnp.array on a np.ndarray does copy the data # see https://github.com/google/jax/issues/4486 return jnp.array(__A , **{**default_dtype, **self.jnp_array_kwargs} ) def _lowercase ( self : Union[str, Any] , __A : Optional[int] ): import jax # support for torch, tf, jax etc. if config.TORCH_AVAILABLE and "torch" in sys.modules: import torch if isinstance(__A , torch.Tensor ): return self._tensorize(data_struct.detach().cpu().numpy()[()] ) if hasattr(__A , "__array__" ) and not isinstance(__A , jax.Array ): snake_case__ : Union[str, Any] = data_struct.__array__() # support for nested types like struct of list of struct if isinstance(__A , np.ndarray ): if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects return self._consolidate([self.recursive_tensorize(__A ) for substruct in data_struct] ) elif isinstance(__A , (list, tuple) ): return self._consolidate([self.recursive_tensorize(__A ) for substruct in data_struct] ) return self._tensorize(__A ) def _lowercase ( self : Tuple , __A : dict ): return map_nested(self._recursive_tensorize , __A , map_list=__A ) def _lowercase ( self : Optional[int] , __A : pa.Table ): snake_case__ : int = self.numpy_arrow_extractor().extract_row(__A ) snake_case__ : Tuple = self.python_features_decoder.decode_row(__A ) return self.recursive_tensorize(__A ) def _lowercase ( self : Optional[Any] , __A : pa.Table ): snake_case__ : Any = self.numpy_arrow_extractor().extract_column(__A ) snake_case__ : Optional[int] = self.python_features_decoder.decode_column(__A , pa_table.column_names[0] ) snake_case__ : List[Any] = self.recursive_tensorize(__A ) snake_case__ : Dict = self._consolidate(__A ) return column def _lowercase ( self : str , __A : pa.Table ): snake_case__ : Any = self.numpy_arrow_extractor().extract_batch(__A ) snake_case__ : int = self.python_features_decoder.decode_batch(__A ) snake_case__ : List[Any] = self.recursive_tensorize(__A ) for column_name in batch: snake_case__ : Any = self._consolidate(batch[column_name] ) return batch
25
0
import unittest import numpy as np import torch from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ , unittest.TestCase ): """simple docstring""" a_ = DDIMPipeline a_ = UNCONDITIONAL_IMAGE_GENERATION_PARAMS a_ = PipelineTesterMixin.required_optional_params - { "num_images_per_prompt", "latents", "callback", "callback_steps", } a_ = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS a_ = False def _lowercase ( self : Union[str, Any] ): torch.manual_seed(0 ) snake_case__ : Optional[Any] = UNetaDModel( block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , ) snake_case__ : Dict = DDIMScheduler() snake_case__ : Dict = {"unet": unet, "scheduler": scheduler} return components def _lowercase ( self : Optional[int] , __A : Any , __A : Optional[int]=0 ): if str(__A ).startswith("mps" ): snake_case__ : Union[str, Any] = torch.manual_seed(__A ) else: snake_case__ : Union[str, Any] = torch.Generator(device=__A ).manual_seed(__A ) snake_case__ : Optional[int] = { "batch_size": 1, "generator": generator, "num_inference_steps": 2, "output_type": "numpy", } return inputs def _lowercase ( self : Dict ): snake_case__ : str = "cpu" snake_case__ : List[str] = self.get_dummy_components() snake_case__ : List[Any] = self.pipeline_class(**__A ) pipe.to(__A ) pipe.set_progress_bar_config(disable=__A ) snake_case__ : int = self.get_dummy_inputs(__A ) snake_case__ : Any = pipe(**__A ).images snake_case__ : Any = image[0, -3:, -3:, -1] self.assertEqual(image.shape , (1, 3_2, 3_2, 3) ) snake_case__ : int = np.array( [1.0_0_0e0_0, 5.7_1_7e-0_1, 4.7_1_7e-0_1, 1.0_0_0e0_0, 0.0_0_0e0_0, 1.0_0_0e0_0, 3.0_0_0e-0_4, 0.0_0_0e0_0, 9.0_0_0e-0_4] ) snake_case__ : Optional[int] = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(__A , 1e-3 ) def _lowercase ( self : List[str] ): super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 ) def _lowercase ( self : Union[str, Any] ): super().test_save_load_local(expected_max_difference=3e-3 ) def _lowercase ( self : int ): super().test_save_load_optional_components(expected_max_difference=3e-3 ) def _lowercase ( self : Dict ): super().test_inference_batch_single_identical(expected_max_diff=3e-3 ) @slow @require_torch_gpu class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): """simple docstring""" def _lowercase ( self : int ): snake_case__ : Any = "google/ddpm-cifar10-32" snake_case__ : Optional[Any] = UNetaDModel.from_pretrained(__A ) snake_case__ : int = DDIMScheduler() snake_case__ : Union[str, Any] = DDIMPipeline(unet=__A , scheduler=__A ) ddim.to(__A ) ddim.set_progress_bar_config(disable=__A ) snake_case__ : Tuple = torch.manual_seed(0 ) snake_case__ : List[Any] = ddim(generator=__A , eta=0.0 , output_type="numpy" ).images snake_case__ : Dict = image[0, -3:, -3:, -1] assert image.shape == (1, 3_2, 3_2, 3) snake_case__ : Tuple = np.array([0.1_7_2_3, 0.1_6_1_7, 0.1_6_0_0, 0.1_6_2_6, 0.1_4_9_7, 0.1_5_1_3, 0.1_5_0_5, 0.1_4_4_2, 0.1_4_5_3] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def _lowercase ( self : List[str] ): snake_case__ : Optional[int] = "google/ddpm-ema-bedroom-256" snake_case__ : List[Any] = UNetaDModel.from_pretrained(__A ) snake_case__ : Union[str, Any] = DDIMScheduler.from_pretrained(__A ) snake_case__ : Tuple = DDIMPipeline(unet=__A , scheduler=__A ) ddpm.to(__A ) ddpm.set_progress_bar_config(disable=__A ) snake_case__ : Any = torch.manual_seed(0 ) snake_case__ : Any = ddpm(generator=__A , output_type="numpy" ).images snake_case__ : Optional[int] = image[0, -3:, -3:, -1] assert image.shape == (1, 2_5_6, 2_5_6, 3) snake_case__ : str = np.array([0.0_0_6_0, 0.0_2_0_1, 0.0_3_4_4, 0.0_0_2_4, 0.0_0_1_8, 0.0_0_0_2, 0.0_0_2_2, 0.0_0_0_0, 0.0_0_6_9] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
706
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) __lowerCamelCase : Tuple = { """configuration_roberta_prelayernorm""": [ """ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RobertaPreLayerNormConfig""", """RobertaPreLayerNormOnnxConfig""", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : Tuple = [ """ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST""", """RobertaPreLayerNormForCausalLM""", """RobertaPreLayerNormForMaskedLM""", """RobertaPreLayerNormForMultipleChoice""", """RobertaPreLayerNormForQuestionAnswering""", """RobertaPreLayerNormForSequenceClassification""", """RobertaPreLayerNormForTokenClassification""", """RobertaPreLayerNormModel""", """RobertaPreLayerNormPreTrainedModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : Union[str, Any] = [ """TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFRobertaPreLayerNormForCausalLM""", """TFRobertaPreLayerNormForMaskedLM""", """TFRobertaPreLayerNormForMultipleChoice""", """TFRobertaPreLayerNormForQuestionAnswering""", """TFRobertaPreLayerNormForSequenceClassification""", """TFRobertaPreLayerNormForTokenClassification""", """TFRobertaPreLayerNormMainLayer""", """TFRobertaPreLayerNormModel""", """TFRobertaPreLayerNormPreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : List[Any] = [ """FlaxRobertaPreLayerNormForCausalLM""", """FlaxRobertaPreLayerNormForMaskedLM""", """FlaxRobertaPreLayerNormForMultipleChoice""", """FlaxRobertaPreLayerNormForQuestionAnswering""", """FlaxRobertaPreLayerNormForSequenceClassification""", """FlaxRobertaPreLayerNormForTokenClassification""", """FlaxRobertaPreLayerNormModel""", """FlaxRobertaPreLayerNormPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_roberta_prelayernorm import ( ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaPreLayerNormConfig, RobertaPreLayerNormOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roberta_prelayernorm import ( ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST, RobertaPreLayerNormForCausalLM, RobertaPreLayerNormForMaskedLM, RobertaPreLayerNormForMultipleChoice, RobertaPreLayerNormForQuestionAnswering, RobertaPreLayerNormForSequenceClassification, RobertaPreLayerNormForTokenClassification, RobertaPreLayerNormModel, RobertaPreLayerNormPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_roberta_prelayernorm import ( TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST, TFRobertaPreLayerNormForCausalLM, TFRobertaPreLayerNormForMaskedLM, TFRobertaPreLayerNormForMultipleChoice, TFRobertaPreLayerNormForQuestionAnswering, TFRobertaPreLayerNormForSequenceClassification, TFRobertaPreLayerNormForTokenClassification, TFRobertaPreLayerNormMainLayer, TFRobertaPreLayerNormModel, TFRobertaPreLayerNormPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_roberta_prelayernorm import ( FlaxRobertaPreLayerNormForCausalLM, FlaxRobertaPreLayerNormForMaskedLM, FlaxRobertaPreLayerNormForMultipleChoice, FlaxRobertaPreLayerNormForQuestionAnswering, FlaxRobertaPreLayerNormForSequenceClassification, FlaxRobertaPreLayerNormForTokenClassification, FlaxRobertaPreLayerNormModel, FlaxRobertaPreLayerNormPreTrainedModel, ) else: import sys __lowerCamelCase : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
25
0
from dataclasses import dataclass from typing import Dict, Optional, Union import torch import torch.nn.functional as F from torch import nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from .attention import BasicTransformerBlock from .attention_processor import AttentionProcessor, AttnProcessor from .embeddings import TimestepEmbedding, Timesteps from .modeling_utils import ModelMixin @dataclass class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ): """simple docstring""" a_ = 4_2 class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ , UpperCamelCase_ ): """simple docstring""" @register_to_config def __init__( self : int , __A : int = 3_2 , __A : int = 6_4 , __A : int = 2_0 , __A : int = 7_6_8 , __A : Dict=7_7 , __A : Any=4 , __A : float = 0.0 , __A : str = "silu" , __A : Optional[str] = None , __A : Optional[str] = None , __A : Optional[str] = "linear" , __A : Optional[str] = "prd" , __A : Optional[int] = None , __A : Optional[int] = None , __A : Optional[int] = None , ): super().__init__() snake_case__ : Dict = num_attention_heads snake_case__ : Any = attention_head_dim snake_case__ : Union[str, Any] = num_attention_heads * attention_head_dim snake_case__ : Optional[Any] = additional_embeddings snake_case__ : Tuple = time_embed_dim or inner_dim snake_case__ : Optional[int] = embedding_proj_dim or embedding_dim snake_case__ : Dict = clip_embed_dim or embedding_dim snake_case__ : List[Any] = Timesteps(__A , __A , 0 ) snake_case__ : int = TimestepEmbedding(__A , __A , out_dim=__A , act_fn=__A ) snake_case__ : List[str] = nn.Linear(__A , __A ) if embedding_proj_norm_type is None: snake_case__ : int = None elif embedding_proj_norm_type == "layer": snake_case__ : int = nn.LayerNorm(__A ) else: raise ValueError(f'''unsupported embedding_proj_norm_type: {embedding_proj_norm_type}''' ) snake_case__ : List[str] = nn.Linear(__A , __A ) if encoder_hid_proj_type is None: snake_case__ : str = None elif encoder_hid_proj_type == "linear": snake_case__ : Optional[Any] = nn.Linear(__A , __A ) else: raise ValueError(f'''unsupported encoder_hid_proj_type: {encoder_hid_proj_type}''' ) snake_case__ : Any = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , __A ) ) if added_emb_type == "prd": snake_case__ : int = nn.Parameter(torch.zeros(1 , 1 , __A ) ) elif added_emb_type is None: snake_case__ : Tuple = None else: raise ValueError( f'''`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `\'prd\'` or `None`.''' ) snake_case__ : List[str] = nn.ModuleList( [ BasicTransformerBlock( __A , __A , __A , dropout=__A , activation_fn="gelu" , attention_bias=__A , ) for d in range(__A ) ] ) if norm_in_type == "layer": snake_case__ : Any = nn.LayerNorm(__A ) elif norm_in_type is None: snake_case__ : str = None else: raise ValueError(f'''Unsupported norm_in_type: {norm_in_type}.''' ) snake_case__ : Optional[int] = nn.LayerNorm(__A ) snake_case__ : Optional[Any] = nn.Linear(__A , __A ) snake_case__ : Optional[Any] = torch.full( [num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -1_0_0_0_0.0 ) causal_attention_mask.triu_(1 ) snake_case__ : int = causal_attention_mask[None, ...] self.register_buffer("causal_attention_mask" , __A , persistent=__A ) snake_case__ : Tuple = nn.Parameter(torch.zeros(1 , __A ) ) snake_case__ : Union[str, Any] = nn.Parameter(torch.zeros(1 , __A ) ) @property # Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors def _lowercase ( self : int ): snake_case__ : Tuple = {} def fn_recursive_add_processors(__A : str , __A : torch.nn.Module , __A : Dict[str, AttentionProcessor] ): if hasattr(__A , "set_processor" ): snake_case__ : List[str] = module.processor for sub_name, child in module.named_children(): fn_recursive_add_processors(f'''{name}.{sub_name}''' , __A , __A ) return processors for name, module in self.named_children(): fn_recursive_add_processors(__A , __A , __A ) return processors def _lowercase ( self : Any , __A : Union[AttentionProcessor, Dict[str, AttentionProcessor]] ): snake_case__ : int = len(self.attn_processors.keys() ) if isinstance(__A , __A ) and len(__A ) != count: raise ValueError( f'''A dict of processors was passed, but the number of processors {len(__A )} does not match the''' f''' number of attention layers: {count}. Please make sure to pass {count} processor classes.''' ) def fn_recursive_attn_processor(__A : str , __A : torch.nn.Module , __A : Dict ): if hasattr(__A , "set_processor" ): if not isinstance(__A , __A ): module.set_processor(__A ) else: module.set_processor(processor.pop(f'''{name}.processor''' ) ) for sub_name, child in module.named_children(): fn_recursive_attn_processor(f'''{name}.{sub_name}''' , __A , __A ) for name, module in self.named_children(): fn_recursive_attn_processor(__A , __A , __A ) def _lowercase ( self : str ): self.set_attn_processor(AttnProcessor() ) def _lowercase ( self : str , __A : Any , __A : Union[torch.Tensor, float, int] , __A : torch.FloatTensor , __A : Optional[torch.FloatTensor] = None , __A : Optional[torch.BoolTensor] = None , __A : bool = True , ): snake_case__ : List[str] = hidden_states.shape[0] snake_case__ : int = timestep if not torch.is_tensor(__A ): snake_case__ : Dict = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device ) elif torch.is_tensor(__A ) and len(timesteps.shape ) == 0: snake_case__ : Union[str, Any] = timesteps[None].to(hidden_states.device ) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML snake_case__ : Optional[Any] = timesteps * torch.ones(__A , dtype=timesteps.dtype , device=timesteps.device ) snake_case__ : Any = self.time_proj(__A ) # timesteps does not contain any weights and will always return f32 tensors # but time_embedding might be fp16, so we need to cast here. snake_case__ : int = timesteps_projected.to(dtype=self.dtype ) snake_case__ : List[str] = self.time_embedding(__A ) if self.embedding_proj_norm is not None: snake_case__ : List[str] = self.embedding_proj_norm(__A ) snake_case__ : str = self.embedding_proj(__A ) if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None: snake_case__ : Any = self.encoder_hidden_states_proj(__A ) elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None: raise ValueError("`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set" ) snake_case__ : List[str] = self.proj_in(__A ) snake_case__ : Union[str, Any] = self.positional_embedding.to(hidden_states.dtype ) snake_case__ : Optional[int] = [] snake_case__ : Dict = 0 if encoder_hidden_states is not None: additional_embeds.append(__A ) additional_embeddings_len += encoder_hidden_states.shape[1] if len(proj_embeddings.shape ) == 2: snake_case__ : Tuple = proj_embeddings[:, None, :] if len(hidden_states.shape ) == 2: snake_case__ : Dict = hidden_states[:, None, :] snake_case__ : Any = additional_embeds + [ proj_embeddings, time_embeddings[:, None, :], hidden_states, ] if self.prd_embedding is not None: snake_case__ : Any = self.prd_embedding.to(hidden_states.dtype ).expand(__A , -1 , -1 ) additional_embeds.append(__A ) snake_case__ : Dict = torch.cat( __A , dim=1 , ) # Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens snake_case__ : List[Any] = additional_embeddings_len + proj_embeddings.shape[1] + 1 if positional_embeddings.shape[1] < hidden_states.shape[1]: snake_case__ : Union[str, Any] = F.pad( __A , ( 0, 0, additional_embeddings_len, self.prd_embedding.shape[1] if self.prd_embedding is not None else 0, ) , value=0.0 , ) snake_case__ : List[Any] = hidden_states + positional_embeddings if attention_mask is not None: snake_case__ : Any = (1 - attention_mask.to(hidden_states.dtype )) * -1_0_0_0_0.0 snake_case__ : Tuple = F.pad(__A , (0, self.additional_embeddings) , value=0.0 ) snake_case__ : Dict = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype ) snake_case__ : List[str] = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0 ) if self.norm_in is not None: snake_case__ : Optional[int] = self.norm_in(__A ) for block in self.transformer_blocks: snake_case__ : Union[str, Any] = block(__A , attention_mask=__A ) snake_case__ : Optional[int] = self.norm_out(__A ) if self.prd_embedding is not None: snake_case__ : Dict = hidden_states[:, -1] else: snake_case__ : Optional[int] = hidden_states[:, additional_embeddings_len:] snake_case__ : Union[str, Any] = self.proj_to_clip_embeddings(__A ) if not return_dict: return (predicted_image_embedding,) return PriorTransformerOutput(predicted_image_embedding=__A ) def _lowercase ( self : Any , __A : Optional[int] ): snake_case__ : List[str] = (prior_latents * self.clip_std) + self.clip_mean return prior_latents
707
import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from transformers.activations import gelu_new, gelu_python, get_activation @require_torch class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): """simple docstring""" def _lowercase ( self : Tuple ): snake_case__ : List[str] = torch.tensor([-1_0_0, -1, -0.1, 0, 0.1, 1.0, 1_0_0] ) snake_case__ : Tuple = get_activation("gelu" ) self.assertTrue(torch.allclose(gelu_python(__A ) , torch_builtin(__A ) ) ) self.assertFalse(torch.allclose(gelu_python(__A ) , gelu_new(__A ) ) ) def _lowercase ( self : Dict ): snake_case__ : str = torch.tensor([-1_0_0, -1, -0.1, 0, 0.1, 1.0, 1_0_0] ) snake_case__ : Union[str, Any] = get_activation("gelu" ) snake_case__ : int = get_activation("gelu_10" ) snake_case__ : Optional[int] = torch_builtin(__A ) snake_case__ : Dict = geluaa(__A ) snake_case__ : Optional[Any] = torch.where(y_gelu_aa < 1_0.0 , 1 , 0 ) self.assertTrue(torch.max(__A ).item() == 1_0.0 ) self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) ) def _lowercase ( self : str ): get_activation("gelu" ) get_activation("gelu_10" ) get_activation("gelu_fast" ) get_activation("gelu_new" ) get_activation("gelu_python" ) get_activation("gelu_pytorch_tanh" ) get_activation("linear" ) get_activation("mish" ) get_activation("quick_gelu" ) get_activation("relu" ) get_activation("sigmoid" ) get_activation("silu" ) get_activation("swish" ) get_activation("tanh" ) with self.assertRaises(__A ): get_activation("bogus" ) with self.assertRaises(__A ): get_activation(__A ) def _lowercase ( self : List[str] ): snake_case__ : List[str] = get_activation("gelu" ) snake_case__ : Any = 1 snake_case__ : Union[str, Any] = get_activation("gelu" ) self.assertEqual(acta.a , 1 ) with self.assertRaises(__A ): snake_case__ : int = acta.a
25
0
# Usage: # ./gen-card-facebook-wmt19.py import os from pathlib import Path def SCREAMING_SNAKE_CASE ( snake_case_ : Optional[Any] , snake_case_ : Tuple , snake_case_ : Tuple ): snake_case__ : Tuple = { "en": "Machine learning is great, isn't it?", "ru": "Машинное обучение - это здорово, не так ли?", "de": "Maschinelles Lernen ist großartig, oder?", } # BLUE scores as follows: # "pair": [fairseq, transformers] snake_case__ : str = { "ru-en": ["[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)", "39.20"], "en-ru": ["[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)", "33.47"], "en-de": ["[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)", "42.83"], "de-en": ["[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)", "41.35"], } snake_case__ : Tuple = F'''{src_lang}-{tgt_lang}''' snake_case__ : Any = F''' --- language: - {src_lang} - {tgt_lang} thumbnail: tags: - translation - wmt19 - facebook license: apache-2.0 datasets: - wmt19 metrics: - bleu --- # FSMT ## Model description This is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}. For more details, please see, [Facebook FAIR\'s WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616). The abbreviation FSMT stands for FairSeqMachineTranslation All four models are available: * [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru) * [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en) * [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de) * [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en) ## Intended uses & limitations #### How to use ```python from transformers import FSMTForConditionalGeneration, FSMTTokenizer mname = "facebook/wmt19-{src_lang}-{tgt_lang}" tokenizer = FSMTTokenizer.from_pretrained(mname) model = FSMTForConditionalGeneration.from_pretrained(mname) input = "{texts[src_lang]}" input_ids = tokenizer.encode(input, return_tensors="pt") outputs = model.generate(input_ids) decoded = tokenizer.decode(outputs[0], skip_special_tokens=True) print(decoded) # {texts[tgt_lang]} ``` #### Limitations and bias - The original (and this ported model) doesn\'t seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981) ## Training data Pretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616). ## Eval results pair | fairseq | transformers -------|---------|---------- {pair} | {scores[pair][0]} | {scores[pair][1]} The score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn\'t support: - model ensemble, therefore the best performing checkpoint was ported (``model4.pt``). - re-ranking The score was calculated using this code: ```bash git clone https://github.com/huggingface/transformers cd transformers export PAIR={pair} export DATA_DIR=data/$PAIR export SAVE_DIR=data/$PAIR export BS=8 export NUM_BEAMS=15 mkdir -p $DATA_DIR sacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source sacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target echo $PAIR PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS ``` note: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`. ## Data Sources - [training, etc.](http://www.statmt.org/wmt19/) - [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561) ### BibTeX entry and citation info ```bibtex @inproceedings{{..., year={{2020}}, title={{Facebook FAIR\'s WMT19 News Translation Task Submission}}, author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}}, booktitle={{Proc. of WMT}}, }} ``` ## TODO - port model ensemble (fairseq uses 4 model checkpoints) ''' os.makedirs(snake_case_ , exist_ok=snake_case_ ) snake_case__ : Any = os.path.join(snake_case_ , "README.md" ) print(F'''Generating {path}''' ) with open(snake_case_ , "w" , encoding="utf-8" ) as f: f.write(snake_case_ ) # make sure we are under the root of the project __lowerCamelCase : Optional[int] = Path(__file__).resolve().parent.parent.parent __lowerCamelCase : List[Any] = repo_dir / """model_cards""" for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]: __lowerCamelCase : Any = model_name.split("""-""") __lowerCamelCase : int = model_cards_dir / """facebook""" / model_name write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
708
import argparse import fairseq import torch from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging logging.set_verbosity_info() __lowerCamelCase : int = logging.get_logger(__name__) __lowerCamelCase : int = { """post_extract_proj""": """feature_projection.projection""", """encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""", """self_attn.k_proj""": """encoder.layers.*.attention.k_proj""", """self_attn.v_proj""": """encoder.layers.*.attention.v_proj""", """self_attn.q_proj""": """encoder.layers.*.attention.q_proj""", """self_attn.out_proj""": """encoder.layers.*.attention.out_proj""", """self_attn_layer_norm""": """encoder.layers.*.layer_norm""", """fc1""": """encoder.layers.*.feed_forward.intermediate_dense""", """fc2""": """encoder.layers.*.feed_forward.output_dense""", """final_layer_norm""": """encoder.layers.*.final_layer_norm""", """encoder.layer_norm""": """encoder.layer_norm""", """encoder.layer_norm_for_extract""": """layer_norm_for_extract""", """w2v_model.layer_norm""": """feature_projection.layer_norm""", """quantizer.weight_proj""": """quantizer.weight_proj""", """quantizer.vars""": """quantizer.codevectors""", """project_q""": """project_q""", """final_proj""": """project_hid""", """w2v_encoder.proj""": """lm_head""", """label_embs_concat""": """label_embeddings_concat""", """mask_emb""": """masked_spec_embed""", """spk_proj""": """speaker_proj""", } __lowerCamelCase : Tuple = [ """lm_head""", """quantizer.weight_proj""", """quantizer.codevectors""", """project_q""", """project_hid""", """label_embeddings_concat""", """speaker_proj""", """layer_norm_for_extract""", ] def SCREAMING_SNAKE_CASE ( snake_case_ : Tuple , snake_case_ : Union[str, Any] , snake_case_ : Union[str, Any] , snake_case_ : Any , snake_case_ : Union[str, Any] ): for attribute in key.split("." ): snake_case__ : int = getattr(snake_case_ , snake_case_ ) if weight_type is not None: snake_case__ : Optional[Any] = getattr(snake_case_ , snake_case_ ).shape else: snake_case__ : List[str] = hf_pointer.shape if hf_shape != value.shape: raise ValueError( F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be''' F''' {value.shape} for {full_name}''' ) if weight_type == "weight": snake_case__ : str = value elif weight_type == "weight_g": snake_case__ : Union[str, Any] = value elif weight_type == "weight_v": snake_case__ : Optional[Any] = value elif weight_type == "bias": snake_case__ : str = value else: snake_case__ : Union[str, Any] = value logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' ) def SCREAMING_SNAKE_CASE ( snake_case_ : Any , snake_case_ : Union[str, Any] ): snake_case__ : str = [] snake_case__ : Optional[int] = fairseq_model.state_dict() snake_case__ : int = hf_model.unispeech_sat.feature_extractor for name, value in fairseq_dict.items(): snake_case__ : Dict = False if "conv_layers" in name: load_conv_layer( snake_case_ , snake_case_ , snake_case_ , snake_case_ , hf_model.config.feat_extract_norm == "group" , ) snake_case__ : str = True else: for key, mapped_key in MAPPING.items(): snake_case__ : Optional[int] = "unispeech_sat." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]: if "layer_norm_for_extract" in name and (".".join(name.split("." )[:-1] ) != key): # special case since naming is very similar continue snake_case__ : int = True if "*" in mapped_key: snake_case__ : Any = name.split(snake_case_ )[0].split("." )[-2] snake_case__ : Any = mapped_key.replace("*" , snake_case_ ) if "weight_g" in name: snake_case__ : List[Any] = "weight_g" elif "weight_v" in name: snake_case__ : Optional[Any] = "weight_v" elif "bias" in name: snake_case__ : Optional[Any] = "bias" elif "weight" in name: # TODO: don't match quantizer.weight_proj snake_case__ : Optional[Any] = "weight" else: snake_case__ : Optional[Any] = None set_recursively(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) continue if not is_used: unused_weights.append(snake_case_ ) logger.warning(F'''Unused weights: {unused_weights}''' ) def SCREAMING_SNAKE_CASE ( snake_case_ : Any , snake_case_ : List[str] , snake_case_ : List[Any] , snake_case_ : Optional[Any] , snake_case_ : str ): snake_case__ : Tuple = full_name.split("conv_layers." )[-1] snake_case__ : Union[str, Any] = name.split("." ) snake_case__ : str = int(items[0] ) snake_case__ : str = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' ) snake_case__ : Any = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' ) snake_case__ : Any = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.''' ) snake_case__ : Optional[Any] = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''' ) snake_case__ : int = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) else: unused_weights.append(snake_case_ ) @torch.no_grad() def SCREAMING_SNAKE_CASE ( snake_case_ : int , snake_case_ : Any , snake_case_ : Optional[int]=None , snake_case_ : Optional[int]=None , snake_case_ : Any=True ): if config_path is not None: snake_case__ : Tuple = UniSpeechSatConfig.from_pretrained(snake_case_ ) else: snake_case__ : Tuple = UniSpeechSatConfig() snake_case__ : str = "" if is_finetuned: snake_case__ : Tuple = UniSpeechSatForCTC(snake_case_ ) else: snake_case__ : Any = UniSpeechSatForPreTraining(snake_case_ ) snake_case__, snake_case__, snake_case__ : Any = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} ) snake_case__ : Tuple = model[0].eval() recursively_load_weights(snake_case_ , snake_case_ ) hf_wavavec.save_pretrained(snake_case_ ) if __name__ == "__main__": __lowerCamelCase : int = argparse.ArgumentParser() parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""") parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") parser.add_argument( """--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not""" ) __lowerCamelCase : List[Any] = parser.parse_args() convert_unispeech_sat_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
25
0
def SCREAMING_SNAKE_CASE ( snake_case_ : int = 10**12 ): snake_case__ : List[Any] = 1 snake_case__ : Any = 0 snake_case__ : Dict = 1 snake_case__ : Dict = 1 while numerator <= 2 * min_total - 1: prev_numerator += 2 * numerator numerator += 2 * prev_numerator prev_denominator += 2 * denominator denominator += 2 * prev_denominator return (denominator + 1) // 2 if __name__ == "__main__": print(f"{solution() = }")
709
import copy import tempfile import unittest from transformers import MaMaaaConfig, is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from transformers.utils import cached_property from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaTokenizer from transformers.models.mam_aaa.modeling_mam_aaa import MaMaaaDecoder, MaMaaaEncoder def SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : Dict , snake_case_ : List[Any] , snake_case_ : Dict=None , snake_case_ : Tuple=None , snake_case_ : List[str]=None , snake_case_ : List[str]=None , snake_case_ : List[str]=None , ): if attention_mask is None: snake_case__ : Any = input_ids.ne(config.pad_token_id ) if decoder_attention_mask is None: snake_case__ : List[Any] = decoder_input_ids.ne(config.pad_token_id ) if head_mask is None: snake_case__ : str = torch.ones(config.encoder_layers , config.encoder_attention_heads , device=snake_case_ ) if decoder_head_mask is None: snake_case__ : Optional[int] = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=snake_case_ ) if cross_attn_head_mask is None: snake_case__ : Union[str, Any] = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=snake_case_ ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } class SCREAMING_SNAKE_CASE__ : """simple docstring""" def __init__( self : List[str] , __A : Any , __A : List[str]=1_3 , __A : List[Any]=7 , __A : Union[str, Any]=True , __A : Union[str, Any]=False , __A : str=9_9 , __A : Optional[Any]=1_6 , __A : Optional[Any]=2 , __A : Any=4 , __A : List[Any]=4 , __A : int="relu" , __A : Optional[int]=0.1 , __A : Tuple=0.1 , __A : Optional[int]=0.0 , __A : Optional[Any]=0.0 , __A : List[Any]=2_0 , __A : Optional[Any]=2 , __A : int=1 , __A : Union[str, Any]=0 , ): snake_case__ : Optional[Any] = parent snake_case__ : List[str] = batch_size snake_case__ : Union[str, Any] = seq_length snake_case__ : Optional[Any] = is_training snake_case__ : List[str] = use_labels snake_case__ : Tuple = vocab_size snake_case__ : Optional[Any] = hidden_size snake_case__ : Union[str, Any] = num_hidden_layers snake_case__ : List[Any] = num_attention_heads snake_case__ : Tuple = intermediate_size snake_case__ : str = hidden_act snake_case__ : Optional[Any] = hidden_dropout_prob snake_case__ : int = attention_probs_dropout_prob snake_case__ : int = encoder_layerdrop snake_case__ : Tuple = decoder_layerdrop snake_case__ : List[str] = max_position_embeddings snake_case__ : Tuple = eos_token_id snake_case__ : Dict = pad_token_id snake_case__ : str = bos_token_id def _lowercase ( self : Tuple ): snake_case__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) snake_case__ : Union[str, Any] = self.eos_token_id # Eos Token snake_case__ : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) # we need to clamp the input ids here to avoid having pad token in between # this is because for M2M100 the position_ids are prepared such that # all pad tokens have pos id = 2 and rest are between 2..seq_length # and the seq_length here is seq_length - num_pad_tokens # but when using past, there is no way of knowing if the past input ids had # pad tokens in them, which results in incorrect seq_lenth and which in turn results in # position_ids being off by num_pad_tokens in past input snake_case__ : int = input_ids.clamp(self.pad_token_id + 1 ) snake_case__ : Optional[Any] = decoder_input_ids.clamp(self.pad_token_id + 1 ) snake_case__ : Union[str, Any] = self.get_config() snake_case__ : Union[str, Any] = prepare_mam_aaa_inputs_dict(__A , __A , __A ) return config, inputs_dict def _lowercase ( self : Dict ): return MaMaaaConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , encoder_layerdrop=self.encoder_layerdrop , decoder_layerdrop=self.decoder_layerdrop , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , ) def _lowercase ( self : List[str] ): snake_case__, snake_case__ : Any = self.prepare_config_and_inputs() return config, inputs_dict def _lowercase ( self : Optional[Any] , __A : int , __A : Dict ): snake_case__ : Union[str, Any] = MaMaaaModel(config=__A ).get_decoder().to(__A ).eval() snake_case__ : List[Any] = inputs_dict["input_ids"] snake_case__ : Optional[Any] = inputs_dict["attention_mask"] snake_case__ : Union[str, Any] = inputs_dict["head_mask"] # first forward pass snake_case__ : Dict = model(__A , attention_mask=__A , head_mask=__A , use_cache=__A ) snake_case__, snake_case__ : Dict = outputs.to_tuple() # create hypothetical multiple next token and extent to next_input_ids snake_case__ : int = ids_tensor((self.batch_size, 3) , config.vocab_size ) snake_case__ : List[str] = ids_tensor((self.batch_size, 3) , 2 ) # append to next input_ids and snake_case__ : Union[str, Any] = torch.cat([input_ids, next_tokens] , dim=-1 ) snake_case__ : List[Any] = torch.cat([attention_mask, next_attn_mask] , dim=-1 ) snake_case__ : Tuple = model(__A , attention_mask=__A )["last_hidden_state"] snake_case__ : Tuple = model(__A , attention_mask=__A , past_key_values=__A )[ "last_hidden_state" ] # select random slice snake_case__ : Optional[Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item() snake_case__ : Optional[Any] = output_from_no_past[:, -3:, random_slice_idx].detach() snake_case__ : Any = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(__A , __A , atol=1e-2 ) ) def _lowercase ( self : str , __A : Dict , __A : Optional[Any] ): snake_case__ : Union[str, Any] = MaMaaaModel(config=__A ).to(__A ).eval() snake_case__ : Union[str, Any] = model(**__A ) snake_case__ : Tuple = outputs.encoder_last_hidden_state snake_case__ : Union[str, Any] = outputs.last_hidden_state with tempfile.TemporaryDirectory() as tmpdirname: snake_case__ : Dict = model.get_encoder() encoder.save_pretrained(__A ) snake_case__ : Any = MaMaaaEncoder.from_pretrained(__A ).to(__A ) snake_case__ : List[str] = encoder(inputs_dict["input_ids"] , attention_mask=inputs_dict["attention_mask"] )[ 0 ] self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 ) with tempfile.TemporaryDirectory() as tmpdirname: snake_case__ : Dict = model.get_decoder() decoder.save_pretrained(__A ) snake_case__ : Optional[Any] = MaMaaaDecoder.from_pretrained(__A ).to(__A ) snake_case__ : List[str] = decoder( input_ids=inputs_dict["decoder_input_ids"] , attention_mask=inputs_dict["decoder_attention_mask"] , encoder_hidden_states=__A , encoder_attention_mask=inputs_dict["attention_mask"] , )[0] self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 ) @require_torch class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ): """simple docstring""" a_ = ( ( MaMaaaModel, MaMaaaForConditionalGeneration, ) if is_torch_available() else () ) a_ = (MaMaaaForConditionalGeneration,) if is_torch_available() else () a_ = ( { "conversational": MaMaaaForConditionalGeneration, "feature-extraction": MaMaaaModel, "summarization": MaMaaaForConditionalGeneration, "text2text-generation": MaMaaaForConditionalGeneration, "translation": MaMaaaForConditionalGeneration, } if is_torch_available() else {} ) a_ = True a_ = True a_ = False a_ = False def _lowercase ( self : int , __A : Tuple , __A : Any , __A : Optional[Any] , __A : Optional[Any] , __A : Union[str, Any] ): if pipeline_test_casse_name == "TranslationPipelineTests": # Get `ValueError: Translation requires a `src_lang` and a `tgt_lang` for this model`. # `M2M100Config` was never used in pipeline tests: cannot create a simple tokenizer. return True return False def _lowercase ( self : Tuple ): snake_case__ : Any = MaMaaaModelTester(self ) snake_case__ : Dict = ConfigTester(self , config_class=__A ) def _lowercase ( self : Optional[Any] ): self.config_tester.run_common_tests() def _lowercase ( self : Union[str, Any] ): snake_case__, snake_case__ : int = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: snake_case__ : int = model_class(__A ) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(__A ) snake_case__, snake_case__ : Optional[int] = model_class.from_pretrained(__A , output_loading_info=__A ) self.assertEqual(info["missing_keys"] , [] ) def _lowercase ( self : Dict ): snake_case__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past_large_inputs(*__A ) def _lowercase ( self : Any ): snake_case__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_encoder_decoder_model_standalone(*__A ) def _lowercase ( self : Union[str, Any] ): snake_case__, snake_case__ : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in (MaMaaaModel, MaMaaaForConditionalGeneration): snake_case__ : str = model_class(__A ) model.to(__A ) model.eval() snake_case__ : str = copy.deepcopy(self._prepare_for_class(__A , __A ) ) if not self.is_encoder_decoder: snake_case__ : Optional[Any] = inputs["input_ids"] del inputs["input_ids"] else: snake_case__ : Union[str, Any] = inputs["input_ids"] snake_case__ : List[str] = inputs.get("decoder_input_ids" , __A ) del inputs["input_ids"] inputs.pop("decoder_input_ids" , __A ) snake_case__ : Tuple = model.get_input_embeddings() if not self.is_encoder_decoder: snake_case__ : List[Any] = wte(__A ) else: snake_case__ : Any = wte(__A ) snake_case__ : Optional[int] = wte(__A ) with torch.no_grad(): model(**__A )[0] def _lowercase ( self : Optional[Any] ): snake_case__, snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() snake_case__ : Any = input_dict["input_ids"] snake_case__ : int = input_ids.ne(1 ).to(__A ) snake_case__ : List[Any] = MaMaaaForConditionalGeneration(__A ).eval().to(__A ) if torch_device == "cuda": model.half() model.generate(__A , attention_mask=__A ) model.generate(num_beams=4 , do_sample=__A , early_stopping=__A , num_return_sequences=3 ) def SCREAMING_SNAKE_CASE ( snake_case_ : int ): return torch.tensor(snake_case_ , dtype=torch.long , device=snake_case_ ) __lowerCamelCase : Optional[Any] = 1e-4 @require_torch @require_sentencepiece @require_tokenizers @slow class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): """simple docstring""" @cached_property def _lowercase ( self : str ): return MaMaaaTokenizer.from_pretrained("facebook/m2m100_418M" ) def _lowercase ( self : Optional[int] ): snake_case__ : List[str] = MaMaaaModel.from_pretrained("facebook/m2m100_418M" ).to(__A ) snake_case__ : Optional[Any] = _long_tensor([[1_2_8_0_2_8, 9_8, 1_2, 3_0_5_2_7, 2_7_3_2, 1_5_9, 7_7_5_5, 6_1_9_0_4, 3_9_1_4_4, 3_8, 2]] ) snake_case__ : str = _long_tensor([[2, 1_2_8_0_2_8, 9_8, 1_2, 3_0_5_2_7, 2_7_3_2, 1_5_9, 7_7_5_5, 6_1_9_0_4, 3_9_1_4_4, 3_8]] ) snake_case__ : int = prepare_mam_aaa_inputs_dict(model.config , __A , __A ) with torch.no_grad(): snake_case__ : str = model(**__A )[0] snake_case__ : Tuple = torch.Size((1, 1_1, 1_0_2_4) ) self.assertEqual(output.shape , __A ) # change to expected output here snake_case__ : Optional[Any] = torch.tensor( [[-0.7_7_8_0, -0.1_6_7_6, 0.1_0_3_8], [-6.7_5_5_6, -1.3_9_9_2, 0.0_5_6_7], [-7.5_3_8_3, -0.5_9_2_0, -0.2_7_7_9]] , device=__A ) self.assertTrue(torch.allclose(output[:, :3, :3] , __A , atol=__A ) ) def _lowercase ( self : Union[str, Any] ): snake_case__ : Union[str, Any] = MaMaaaForConditionalGeneration.from_pretrained("facebook/m2m100_418M" ).to(__A ) # change to intended input snake_case__ : Union[str, Any] = _long_tensor([[1_2_8_0_2_8, 9_8, 1_2, 3_0_5_2_7, 2_7_3_2, 1_5_9, 7_7_5_5, 6_1_9_0_4, 3_9_1_4_4, 3_8, 2]] ) snake_case__ : List[str] = _long_tensor([[2, 1_2_8_0_2_8, 9_8, 1_2, 3_0_5_2_7, 2_7_3_2, 1_5_9, 7_7_5_5, 6_1_9_0_4, 3_9_1_4_4, 3_8]] ) snake_case__ : int = prepare_mam_aaa_inputs_dict(model.config , __A , __A ) with torch.no_grad(): snake_case__ : Union[str, Any] = model(**__A )[0] snake_case__ : Tuple = torch.Size((1, 1_1, model.config.vocab_size) ) self.assertEqual(output.shape , __A ) # change to expected output here snake_case__ : List[str] = torch.tensor( [[-1.0_4_4_8, -1.0_4_1_1, 3.7_9_9_2], [-3.2_1_9_1, -3.2_3_8_6, -1.3_4_5_1], [-3.6_2_1_0, -3.5_9_9_3, 0.4_9_2_5]] , device=__A ) self.assertTrue(torch.allclose(output[:, :3, :3] , __A , atol=__A ) ) def _lowercase ( self : Optional[Any] ): snake_case__ : List[Any] = MaMaaaForConditionalGeneration.from_pretrained("facebook/m2m100_418M" ).to(__A ) snake_case__ : List[str] = MaMaaaTokenizer.from_pretrained("facebook/m2m100_418M" , src_lang="fr" , tgt_lang="en" ) snake_case__ : List[Any] = [ "L'affaire NSA souligne l'absence totale de débat sur le renseignement", "Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.", "Lorsque François Hollande téléphone à Barack Obama ou quand le ministre des affaires étrangères Laurent" " Fabius convoque l'ambassadeur des Etats-Unis, ils réagissent à une vraie découverte, qui est celle de" " l'ampleur de la surveillance américaine sur l'ensemble des communications en France.", ] # The below article tests that we don't add any hypotheses outside of the top n_beams snake_case__ : str = tokenizer(__A , padding=__A , return_tensors="pt" ) snake_case__ : Tuple = model.generate( input_ids=dct["input_ids"].to(__A ) , attention_mask=dct["attention_mask"].to(__A ) , num_beams=5 , forced_bos_token_id=tokenizer.get_lang_id("en" ) , ) snake_case__ : List[str] = [ "The NSA case highlights the total absence of intelligence debate", "I think there are two levels of response from the French government.", "When François Hollande calls Barack Obama or when Foreign Minister Laurent Fabius calls the U.S." " Ambassador, they respond to a real discovery, which is that of the scale of U.S. surveillance on all" " communications in France.", ] snake_case__ : Dict = tokenizer.batch_decode( hypotheses_batch.tolist() , clean_up_tokenization_spaces=__A , skip_special_tokens=__A ) assert generated == expected_en
25
0
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, logging __lowerCamelCase : Union[str, Any] = logging.get_logger(__name__) class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ): """simple docstring""" a_ = ["pixel_values"] def __init__( self : Tuple , __A : bool = True , __A : Optional[Dict[str, int]] = None , __A : PILImageResampling = PILImageResampling.BILINEAR , __A : bool = True , __A : Dict[str, int] = None , __A : bool = True , __A : Union[int, float] = 1 / 2_5_5 , __A : bool = True , __A : Optional[Union[float, List[float]]] = None , __A : Optional[Union[float, List[float]]] = None , **__A : List[str] , ): super().__init__(**__A ) snake_case__ : List[Any] = size if size is not None else {"shortest_edge": 2_5_6} snake_case__ : List[str] = get_size_dict(__A , default_to_square=__A ) snake_case__ : str = crop_size if crop_size is not None else {"height": 2_2_4, "width": 2_2_4} snake_case__ : Optional[int] = get_size_dict(__A ) snake_case__ : Dict = do_resize snake_case__ : Union[str, Any] = size snake_case__ : Optional[int] = resample snake_case__ : Dict = do_center_crop snake_case__ : int = crop_size snake_case__ : str = do_rescale snake_case__ : Optional[Any] = rescale_factor snake_case__ : Dict = do_normalize snake_case__ : List[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN snake_case__ : Union[str, Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD def _lowercase ( self : Tuple , __A : np.ndarray , __A : Dict[str, int] , __A : PILImageResampling = PILImageResampling.BICUBIC , __A : Optional[Union[str, ChannelDimension]] = None , **__A : int , ): snake_case__ : Tuple = get_size_dict(__A , default_to_square=__A ) if "shortest_edge" not in size: raise ValueError(f'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' ) snake_case__ : Dict = get_resize_output_image_size(__A , size=size["shortest_edge"] , default_to_square=__A ) return resize(__A , size=__A , resample=__A , data_format=__A , **__A ) def _lowercase ( self : str , __A : np.ndarray , __A : Dict[str, int] , __A : Optional[Union[str, ChannelDimension]] = None , **__A : str , ): snake_case__ : Dict = get_size_dict(__A ) return center_crop(__A , size=(size["height"], size["width"]) , data_format=__A , **__A ) def _lowercase ( self : Dict , __A : np.ndarray , __A : float , __A : Optional[Union[str, ChannelDimension]] = None , **__A : str ): return rescale(__A , scale=__A , data_format=__A , **__A ) def _lowercase ( self : Tuple , __A : np.ndarray , __A : Union[float, List[float]] , __A : Union[float, List[float]] , __A : Optional[Union[str, ChannelDimension]] = None , **__A : Optional[Any] , ): return normalize(__A , mean=__A , std=__A , data_format=__A , **__A ) def _lowercase ( self : Tuple , __A : ImageInput , __A : Optional[bool] = None , __A : Dict[str, int] = None , __A : PILImageResampling = None , __A : bool = None , __A : Dict[str, int] = None , __A : Optional[bool] = None , __A : Optional[float] = None , __A : Optional[bool] = None , __A : Optional[Union[float, List[float]]] = None , __A : Optional[Union[float, List[float]]] = None , __A : Optional[Union[str, TensorType]] = None , __A : Union[str, ChannelDimension] = ChannelDimension.FIRST , **__A : Optional[int] , ): snake_case__ : Any = do_resize if do_resize is not None else self.do_resize snake_case__ : Union[str, Any] = size if size is not None else self.size snake_case__ : Optional[Any] = get_size_dict(__A , default_to_square=__A ) snake_case__ : Dict = resample if resample is not None else self.resample snake_case__ : Optional[int] = do_center_crop if do_center_crop is not None else self.do_center_crop snake_case__ : List[str] = crop_size if crop_size is not None else self.crop_size snake_case__ : Optional[int] = get_size_dict(__A ) snake_case__ : int = do_rescale if do_rescale is not None else self.do_rescale snake_case__ : Any = rescale_factor if rescale_factor is not None else self.rescale_factor snake_case__ : List[str] = do_normalize if do_normalize is not None else self.do_normalize snake_case__ : Optional[int] = image_mean if image_mean is not None else self.image_mean snake_case__ : List[str] = image_std if image_std is not None else self.image_std snake_case__ : Optional[Any] = make_list_of_images(__A ) if not valid_images(__A ): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) if do_resize and size is None: raise ValueError("Size must be specified if do_resize is True." ) if do_center_crop and crop_size is None: raise ValueError("Crop size must be specified if do_center_crop is True." ) if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True." ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("Image mean and std must be specified if do_normalize is True." ) # All transformations expect numpy arrays. snake_case__ : Union[str, Any] = [to_numpy_array(__A ) for image in images] if do_resize: snake_case__ : int = [self.resize(image=__A , size=__A , resample=__A ) for image in images] if do_center_crop: snake_case__ : Dict = [self.center_crop(image=__A , size=__A ) for image in images] if do_rescale: snake_case__ : str = [self.rescale(image=__A , scale=__A ) for image in images] if do_normalize: snake_case__ : Tuple = [self.normalize(image=__A , mean=__A , std=__A ) for image in images] snake_case__ : str = [to_channel_dimension_format(__A , __A ) for image in images] snake_case__ : List[Any] = {"pixel_values": images} return BatchFeature(data=__A , tensor_type=__A )
710
from unittest.mock import patch import pyspark from datasets.packaged_modules.spark.spark import ( Spark, SparkExamplesIterable, _generate_iterable_examples, ) from ..utils import ( require_dill_gt_0_3_2, require_not_windows, ) def SCREAMING_SNAKE_CASE ( snake_case_ : List[Any] , snake_case_ : Union[str, Any] ): snake_case__ : Optional[int] = [] for part_id in partition_order: snake_case__ : List[Any] = df.where(F'''SPARK_PARTITION_ID() = {part_id}''' ).collect() for row_idx, row in enumerate(snake_case_ ): expected_row_ids_and_row_dicts.append((F'''{part_id}_{row_idx}''', row.asDict()) ) return expected_row_ids_and_row_dicts @require_not_windows @require_dill_gt_0_3_2 def SCREAMING_SNAKE_CASE ( ): snake_case__ : Tuple = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate() snake_case__ : Union[str, Any] = spark.range(100 ).repartition(1 ) snake_case__ : Any = Spark(snake_case_ ) # The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means # that each partition can hold 2 rows. spark_builder._repartition_df_if_needed(max_shard_size=16 ) # Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions. assert spark_builder.df.rdd.getNumPartitions() == 50 @require_not_windows @require_dill_gt_0_3_2 def SCREAMING_SNAKE_CASE ( ): snake_case__ : Dict = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate() snake_case__ : Optional[Any] = spark.range(10 ).repartition(2 ) snake_case__ : Optional[Any] = [1, 0] snake_case__ : Dict = _generate_iterable_examples(snake_case_ , snake_case_ ) # Reverse the partitions. snake_case__ : Tuple = _get_expected_row_ids_and_row_dicts_for_partition_order(snake_case_ , snake_case_ ) for i, (row_id, row_dict) in enumerate(generate_fn() ): snake_case__, snake_case__ : Tuple = expected_row_ids_and_row_dicts[i] assert row_id == expected_row_id assert row_dict == expected_row_dict @require_not_windows @require_dill_gt_0_3_2 def SCREAMING_SNAKE_CASE ( ): snake_case__ : Optional[int] = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate() snake_case__ : Optional[int] = spark.range(10 ).repartition(1 ) snake_case__ : Union[str, Any] = SparkExamplesIterable(snake_case_ ) assert it.n_shards == 1 for i, (row_id, row_dict) in enumerate(snake_case_ ): assert row_id == F'''0_{i}''' assert row_dict == {"id": i} @require_not_windows @require_dill_gt_0_3_2 def SCREAMING_SNAKE_CASE ( ): snake_case__ : Optional[int] = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate() snake_case__ : str = spark.range(30 ).repartition(3 ) # Mock the generator so that shuffle reverses the partition indices. with patch("numpy.random.Generator" ) as generator_mock: snake_case__ : Union[str, Any] = lambda snake_case_ : x.reverse() snake_case__ : Optional[int] = _get_expected_row_ids_and_row_dicts_for_partition_order(snake_case_ , [2, 1, 0] ) snake_case__ : List[Any] = SparkExamplesIterable(snake_case_ ).shuffle_data_sources(snake_case_ ) assert shuffled_it.n_shards == 3 for i, (row_id, row_dict) in enumerate(snake_case_ ): snake_case__, snake_case__ : Optional[Any] = expected_row_ids_and_row_dicts[i] assert row_id == expected_row_id assert row_dict == expected_row_dict @require_not_windows @require_dill_gt_0_3_2 def SCREAMING_SNAKE_CASE ( ): snake_case__ : Any = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate() snake_case__ : Tuple = spark.range(20 ).repartition(4 ) # Partitions 0 and 2 snake_case__ : List[Any] = SparkExamplesIterable(snake_case_ ).shard_data_sources(worker_id=0 , num_workers=2 ) assert shard_it_a.n_shards == 2 snake_case__ : List[str] = _get_expected_row_ids_and_row_dicts_for_partition_order(snake_case_ , [0, 2] ) for i, (row_id, row_dict) in enumerate(snake_case_ ): snake_case__, snake_case__ : Optional[int] = expected_row_ids_and_row_dicts_a[i] assert row_id == expected_row_id assert row_dict == expected_row_dict # Partitions 1 and 3 snake_case__ : Any = SparkExamplesIterable(snake_case_ ).shard_data_sources(worker_id=1 , num_workers=2 ) assert shard_it_a.n_shards == 2 snake_case__ : List[Any] = _get_expected_row_ids_and_row_dicts_for_partition_order(snake_case_ , [1, 3] ) for i, (row_id, row_dict) in enumerate(snake_case_ ): snake_case__, snake_case__ : Optional[Any] = expected_row_ids_and_row_dicts_a[i] assert row_id == expected_row_id assert row_dict == expected_row_dict @require_not_windows @require_dill_gt_0_3_2 def SCREAMING_SNAKE_CASE ( ): snake_case__ : Dict = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate() snake_case__ : Tuple = spark.range(100 ).repartition(1 ) snake_case__ : Union[str, Any] = Spark(snake_case_ ) # Choose a small max_shard_size for maximum partitioning. spark_builder._repartition_df_if_needed(max_shard_size=1 ) # The new number of partitions should not be greater than the number of rows. assert spark_builder.df.rdd.getNumPartitions() == 100
25
0
import numpy as np from matplotlib import pyplot as plt from sklearn.datasets import load_iris from sklearn.metrics import ConfusionMatrixDisplay from sklearn.model_selection import train_test_split from xgboost import XGBClassifier def SCREAMING_SNAKE_CASE ( snake_case_ : dict ): return (data["data"], data["target"]) def SCREAMING_SNAKE_CASE ( snake_case_ : np.ndarray , snake_case_ : np.ndarray ): snake_case__ : Optional[int] = XGBClassifier() classifier.fit(snake_case_ , snake_case_ ) return classifier def SCREAMING_SNAKE_CASE ( ): snake_case__ : Any = load_iris() snake_case__ : str = data_handling(snake_case_ ) snake_case__ : int = train_test_split( snake_case_ , snake_case_ , test_size=0.25 ) snake_case__ : Dict = iris["target_names"] # Create an XGBoost Classifier from the training data snake_case__ : Dict = xgboost(snake_case_ , snake_case_ ) # Display the confusion matrix of the classifier with both training and test sets ConfusionMatrixDisplay.from_estimator( snake_case_ , snake_case_ , snake_case_ , display_labels=snake_case_ , cmap="Blues" , normalize="true" , ) plt.title("Normalized Confusion Matrix - IRIS Dataset" ) plt.show() if __name__ == "__main__": import doctest doctest.testmod(verbose=True) main()
711
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __lowerCamelCase : List[str] = {"""configuration_xlnet""": ["""XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XLNetConfig"""]} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : str = ["""XLNetTokenizer"""] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : Dict = ["""XLNetTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : str = [ """XLNET_PRETRAINED_MODEL_ARCHIVE_LIST""", """XLNetForMultipleChoice""", """XLNetForQuestionAnswering""", """XLNetForQuestionAnsweringSimple""", """XLNetForSequenceClassification""", """XLNetForTokenClassification""", """XLNetLMHeadModel""", """XLNetModel""", """XLNetPreTrainedModel""", """load_tf_weights_in_xlnet""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : Dict = [ """TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFXLNetForMultipleChoice""", """TFXLNetForQuestionAnsweringSimple""", """TFXLNetForSequenceClassification""", """TFXLNetForTokenClassification""", """TFXLNetLMHeadModel""", """TFXLNetMainLayer""", """TFXLNetModel""", """TFXLNetPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xlnet import XLNetTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xlnet_fast import XLNetTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xlnet import ( XLNET_PRETRAINED_MODEL_ARCHIVE_LIST, XLNetForMultipleChoice, XLNetForQuestionAnswering, XLNetForQuestionAnsweringSimple, XLNetForSequenceClassification, XLNetForTokenClassification, XLNetLMHeadModel, XLNetModel, XLNetPreTrainedModel, load_tf_weights_in_xlnet, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xlnet import ( TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFXLNetForMultipleChoice, TFXLNetForQuestionAnsweringSimple, TFXLNetForSequenceClassification, TFXLNetForTokenClassification, TFXLNetLMHeadModel, TFXLNetMainLayer, TFXLNetModel, TFXLNetPreTrainedModel, ) else: import sys __lowerCamelCase : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
25
0
import argparse import torch from transformers import ( SpeechTaConfig, SpeechTaFeatureExtractor, SpeechTaForSpeechToSpeech, SpeechTaForSpeechToText, SpeechTaForTextToSpeech, SpeechTaProcessor, SpeechTaTokenizer, logging, ) from transformers.tokenization_utils import AddedToken logging.set_verbosity_info() __lowerCamelCase : Any = logging.get_logger("""transformers.models.speecht5""") __lowerCamelCase : List[Any] = { """speech_encoder_prenet.layer_norm""": """speecht5.encoder.prenet.feature_projection.layer_norm""", """speech_encoder_prenet.post_extract_proj""": """speecht5.encoder.prenet.feature_projection.projection""", """speech_encoder_prenet.pos_conv.0""": """speecht5.encoder.prenet.pos_conv_embed.conv""", """speech_encoder_prenet.mask_emb""": """speecht5.encoder.prenet.masked_spec_embed""", } __lowerCamelCase : List[str] = { """text_encoder_prenet.encoder_prenet.0""": """speecht5.encoder.prenet.embed_tokens""", """text_encoder_prenet.encoder_prenet.1.alpha""": """speecht5.encoder.prenet.encode_positions.alpha""", } __lowerCamelCase : List[Any] = { """speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0""": """speecht5.decoder.prenet.layers.0""", """speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0""": """speecht5.decoder.prenet.layers.1""", """speech_decoder_prenet.decoder_prenet.0.1""": """speecht5.decoder.prenet.final_layer""", """speech_decoder_prenet.decoder_prenet.1.alpha""": """speecht5.decoder.prenet.encode_positions.alpha""", """speech_decoder_prenet.spkembs_layer.0""": """speecht5.decoder.prenet.speaker_embeds_layer""", } __lowerCamelCase : Any = { """speech_decoder_postnet.feat_out""": """speech_decoder_postnet.feat_out""", """speech_decoder_postnet.prob_out""": """speech_decoder_postnet.prob_out""", """speech_decoder_postnet.postnet.postnet.0.0""": """speech_decoder_postnet.layers.0.conv""", """speech_decoder_postnet.postnet.postnet.0.1""": """speech_decoder_postnet.layers.0.batch_norm""", """speech_decoder_postnet.postnet.postnet.1.0""": """speech_decoder_postnet.layers.1.conv""", """speech_decoder_postnet.postnet.postnet.1.1""": """speech_decoder_postnet.layers.1.batch_norm""", """speech_decoder_postnet.postnet.postnet.2.0""": """speech_decoder_postnet.layers.2.conv""", """speech_decoder_postnet.postnet.postnet.2.1""": """speech_decoder_postnet.layers.2.batch_norm""", """speech_decoder_postnet.postnet.postnet.3.0""": """speech_decoder_postnet.layers.3.conv""", """speech_decoder_postnet.postnet.postnet.3.1""": """speech_decoder_postnet.layers.3.batch_norm""", """speech_decoder_postnet.postnet.postnet.4.0""": """speech_decoder_postnet.layers.4.conv""", """speech_decoder_postnet.postnet.postnet.4.1""": """speech_decoder_postnet.layers.4.batch_norm""", } __lowerCamelCase : str = { """text_decoder_prenet.embed_tokens""": """speecht5.decoder.prenet.embed_tokens""", } __lowerCamelCase : str = { """text_decoder_postnet.output_projection""": """text_decoder_postnet.lm_head""", } __lowerCamelCase : Optional[int] = { """encoder.layers.*.self_attn.k_proj""": """speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj""", """encoder.layers.*.self_attn.v_proj""": """speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj""", """encoder.layers.*.self_attn.q_proj""": """speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj""", """encoder.layers.*.self_attn.out_proj""": """speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj""", """encoder.layers.*.self_attn_layer_norm""": """speecht5.encoder.wrapped_encoder.layers.*.layer_norm""", """encoder.layers.*.fc1""": """speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense""", """encoder.layers.*.fc2""": """speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense""", """encoder.layers.*.final_layer_norm""": """speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm""", """encoder.layer_norm""": """speecht5.encoder.wrapped_encoder.layer_norm""", """encoder.pos_emb.pe_k""": """speecht5.encoder.wrapped_encoder.embed_positions.pe_k""", } __lowerCamelCase : List[str] = { """decoder.layers.*.self_attn.k_proj""": """speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj""", """decoder.layers.*.self_attn.v_proj""": """speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj""", """decoder.layers.*.self_attn.q_proj""": """speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj""", """decoder.layers.*.self_attn.out_proj""": """speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj""", """decoder.layers.*.self_attn_layer_norm""": """speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm""", """decoder.layers.*.encoder_attn.k_proj""": """speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj""", """decoder.layers.*.encoder_attn.v_proj""": """speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj""", """decoder.layers.*.encoder_attn.q_proj""": """speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj""", """decoder.layers.*.encoder_attn.out_proj""": """speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj""", """decoder.layers.*.encoder_attn_layer_norm""": """speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm""", """decoder.layers.*.fc1""": """speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense""", """decoder.layers.*.fc2""": """speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense""", """decoder.layers.*.final_layer_norm""": """speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm""", } __lowerCamelCase : Optional[int] = { **MAPPING_SPEECH_ENCODER_PRENET, **MAPPING_ENCODER, **MAPPING_DECODER, **MAPPING_TEXT_DECODER_PRENET, **MAPPING_TEXT_DECODER_POSTNET, } __lowerCamelCase : List[str] = { **MAPPING_TEXT_ENCODER_PRENET, **MAPPING_ENCODER, **MAPPING_DECODER, **MAPPING_SPEECH_DECODER_PRENET, **MAPPING_SPEECH_DECODER_POSTNET, } __lowerCamelCase : Tuple = { **MAPPING_SPEECH_ENCODER_PRENET, **MAPPING_ENCODER, **MAPPING_DECODER, **MAPPING_SPEECH_DECODER_PRENET, **MAPPING_SPEECH_DECODER_POSTNET, } __lowerCamelCase : Dict = [] __lowerCamelCase : Dict = [ """encoder.version""", """encoder.layers.*.norm_k.weight""", """encoder.layers.*.norm_k.bias""", """decoder.version""", """decoder.layers.*.norm_k.weight""", """decoder.layers.*.norm_k.bias""", """decoder.pos_emb.pe_k""", """speech_encoder_prenet.embed_positions._float_tensor""", """text_decoder_prenet.embed_positions._float_tensor""", ] __lowerCamelCase : Any = IGNORE_KEYS + [ """encoder.proj""", """text_encoder_prenet.*""", """speech_decoder_prenet.*""", """speech_decoder_postnet.*""", ] __lowerCamelCase : Dict = IGNORE_KEYS + [ """encoder.proj""", """speech_encoder_prenet.*""", """text_decoder_prenet.*""", """text_decoder_postnet.*""", ] __lowerCamelCase : Union[str, Any] = IGNORE_KEYS + [ """encoder.proj""", """text_encoder_prenet.*""", """text_decoder_prenet.*""", """text_decoder_postnet.*""", ] def SCREAMING_SNAKE_CASE ( snake_case_ : Optional[Any] , snake_case_ : Tuple , snake_case_ : Union[str, Any] , snake_case_ : Any , snake_case_ : List[Any] ): for attribute in key.split("." ): snake_case__ : Union[str, Any] = getattr(snake_case_ , snake_case_ ) if weight_type is not None: snake_case__ : Union[str, Any] = getattr(snake_case_ , snake_case_ ).shape else: snake_case__ : Tuple = hf_pointer.shape if hf_shape != value.shape: raise ValueError( F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be''' F''' {value.shape} for {full_name}''' ) if weight_type == "weight": snake_case__ : Dict = value elif weight_type == "weight_g": snake_case__ : List[Any] = value elif weight_type == "weight_v": snake_case__ : int = value elif weight_type == "bias": snake_case__ : List[Any] = value elif weight_type == "running_mean": snake_case__ : Optional[Any] = value elif weight_type == "running_var": snake_case__ : Dict = value elif weight_type == "num_batches_tracked": snake_case__ : Optional[int] = value else: snake_case__ : Tuple = value logger.info(F'''{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}.''' ) def SCREAMING_SNAKE_CASE ( snake_case_ : Tuple , snake_case_ : Union[str, Any] ): for key in ignore_keys: if key.endswith(".*" ): if name.startswith(key[:-1] ): return True elif ".*." in key: snake_case__ : Optional[Any] = key.split(".*." ) if prefix in name and suffix in name: return True elif key in name: return True return False def SCREAMING_SNAKE_CASE ( snake_case_ : int , snake_case_ : Dict , snake_case_ : Dict ): snake_case__ : Optional[Any] = [] if task == "s2t": snake_case__ : Union[str, Any] = hf_model.speechta.encoder.prenet.feature_encoder snake_case__ : int = MAPPING_S2T snake_case__ : List[str] = IGNORE_KEYS_S2T elif task == "t2s": snake_case__ : str = None snake_case__ : Optional[Any] = MAPPING_T2S snake_case__ : Tuple = IGNORE_KEYS_T2S elif task == "s2s": snake_case__ : List[Any] = hf_model.speechta.encoder.prenet.feature_encoder snake_case__ : Any = MAPPING_S2S snake_case__ : Tuple = IGNORE_KEYS_S2S else: raise ValueError(F'''Unsupported task: {task}''' ) for name, value in fairseq_dict.items(): if should_ignore(snake_case_ , snake_case_ ): logger.info(F'''{name} was ignored''' ) continue snake_case__ : Any = False if "conv_layers" in name: load_conv_layer( snake_case_ , snake_case_ , snake_case_ , snake_case_ , hf_model.config.feat_extract_norm == "group" , ) snake_case__ : Dict = True else: for key, mapped_key in MAPPING.items(): # mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if "*" in key: snake_case__ : Tuple = key.split(".*." ) if prefix in name and suffix in name: snake_case__ : Union[str, Any] = suffix # if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]: if key in name: snake_case__ : Optional[Any] = True if "*" in mapped_key: snake_case__ : Dict = name.split(snake_case_ )[0].split("." )[-2] snake_case__ : List[Any] = mapped_key.replace("*" , snake_case_ ) if "weight_g" in name: snake_case__ : Any = "weight_g" elif "weight_v" in name: snake_case__ : Optional[int] = "weight_v" elif "bias" in name: snake_case__ : List[str] = "bias" elif "weight" in name: snake_case__ : Tuple = "weight" elif "running_mean" in name: snake_case__ : Any = "running_mean" elif "running_var" in name: snake_case__ : Dict = "running_var" elif "num_batches_tracked" in name: snake_case__ : Optional[int] = "num_batches_tracked" else: snake_case__ : List[str] = None set_recursively(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) continue if not is_used: unused_weights.append(snake_case_ ) logger.warning(F'''Unused weights: {unused_weights}''' ) def SCREAMING_SNAKE_CASE ( snake_case_ : int , snake_case_ : str , snake_case_ : Tuple , snake_case_ : Optional[Any] , snake_case_ : int ): snake_case__ : Optional[int] = full_name.split("conv_layers." )[-1] snake_case__ : Optional[Any] = name.split("." ) snake_case__ : Optional[Any] = int(items[0] ) snake_case__ : int = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' ) snake_case__ : Any = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' ) snake_case__ : Union[str, Any] = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' ) snake_case__ : str = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' ) snake_case__ : Dict = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) else: unused_weights.append(snake_case_ ) @torch.no_grad() def SCREAMING_SNAKE_CASE ( snake_case_ : int , snake_case_ : Tuple , snake_case_ : Tuple , snake_case_ : int=None , snake_case_ : str=None , snake_case_ : int=None , ): if config_path is not None: snake_case__ : List[Any] = SpeechTaConfig.from_pretrained(snake_case_ ) else: snake_case__ : List[Any] = SpeechTaConfig() if task == "s2t": snake_case__ : List[Any] = config.max_text_positions snake_case__ : int = SpeechTaForSpeechToText(snake_case_ ) elif task == "t2s": snake_case__ : List[str] = 1876 snake_case__ : List[Any] = 600 snake_case__ : List[Any] = config.max_speech_positions snake_case__ : Any = SpeechTaForTextToSpeech(snake_case_ ) elif task == "s2s": snake_case__ : Tuple = 1876 snake_case__ : int = config.max_speech_positions snake_case__ : Optional[int] = SpeechTaForSpeechToSpeech(snake_case_ ) else: raise ValueError(F'''Unknown task name: {task}''' ) if vocab_path: snake_case__ : Any = SpeechTaTokenizer(snake_case_ , model_max_length=config.max_text_positions ) # Mask token behaves like a normal word, i.e. include the space before it snake_case__ : List[str] = AddedToken("<mask>" , lstrip=snake_case_ , rstrip=snake_case_ ) snake_case__ : List[Any] = mask_token tokenizer.add_special_tokens({"mask_token": mask_token} ) tokenizer.add_tokens(["<ctc_blank>"] ) snake_case__ : Any = SpeechTaFeatureExtractor() snake_case__ : Union[str, Any] = SpeechTaProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ ) processor.save_pretrained(snake_case_ ) snake_case__ : str = torch.load(snake_case_ ) recursively_load_weights(fairseq_checkpoint["model"] , snake_case_ , snake_case_ ) model.save_pretrained(snake_case_ ) if repo_id: print("Pushing to the hub..." ) processor.push_to_hub(snake_case_ ) model.push_to_hub(snake_case_ ) if __name__ == "__main__": __lowerCamelCase : Tuple = argparse.ArgumentParser() parser.add_argument( """--task""", default="""s2t""", type=str, help="""Type of the SpeechT5 model you'd like to convert. Should be one of 's2t', 't2s', 's2s'.""", ) parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to fairseq checkpoint""") parser.add_argument("""--vocab_path""", default=None, type=str, help="""Path to SentencePiece model""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") parser.add_argument( """--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model.""" ) parser.add_argument( """--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub.""" ) __lowerCamelCase : int = parser.parse_args() convert_speechta_checkpoint( args.task, args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.vocab_path, args.push_to_hub, )
712
from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline else: from .pipeline_kandinsky import KandinskyPipeline from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput from .text_encoder import MultilingualCLIP
25
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) __lowerCamelCase : Union[str, Any] = { """configuration_falcon""": ["""FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FalconConfig"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : str = [ """FALCON_PRETRAINED_MODEL_ARCHIVE_LIST""", """FalconForCausalLM""", """FalconModel""", """FalconPreTrainedModel""", """FalconForSequenceClassification""", """FalconForTokenClassification""", """FalconForQuestionAnswering""", ] if TYPE_CHECKING: from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_falcon import ( FALCON_PRETRAINED_MODEL_ARCHIVE_LIST, FalconForCausalLM, FalconForQuestionAnswering, FalconForSequenceClassification, FalconForTokenClassification, FalconModel, FalconPreTrainedModel, ) else: import sys __lowerCamelCase : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
713
import numpy as np from matplotlib import pyplot as plt from sklearn.datasets import load_iris from sklearn.metrics import ConfusionMatrixDisplay from sklearn.model_selection import train_test_split from xgboost import XGBClassifier def SCREAMING_SNAKE_CASE ( snake_case_ : dict ): return (data["data"], data["target"]) def SCREAMING_SNAKE_CASE ( snake_case_ : np.ndarray , snake_case_ : np.ndarray ): snake_case__ : Optional[int] = XGBClassifier() classifier.fit(snake_case_ , snake_case_ ) return classifier def SCREAMING_SNAKE_CASE ( ): snake_case__ : Any = load_iris() snake_case__, snake_case__ : str = data_handling(snake_case_ ) snake_case__, snake_case__, snake_case__, snake_case__ : int = train_test_split( snake_case_ , snake_case_ , test_size=0.25 ) snake_case__ : Dict = iris["target_names"] # Create an XGBoost Classifier from the training data snake_case__ : Dict = xgboost(snake_case_ , snake_case_ ) # Display the confusion matrix of the classifier with both training and test sets ConfusionMatrixDisplay.from_estimator( snake_case_ , snake_case_ , snake_case_ , display_labels=snake_case_ , cmap="Blues" , normalize="true" , ) plt.title("Normalized Confusion Matrix - IRIS Dataset" ) plt.show() if __name__ == "__main__": import doctest doctest.testmod(verbose=True) main()
25
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __lowerCamelCase : List[Any] = {"""configuration_vit_msn""": ["""VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ViTMSNConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : str = [ """VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST""", """ViTMSNModel""", """ViTMSNForImageClassification""", """ViTMSNPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vit_msn import ( VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST, ViTMSNForImageClassification, ViTMSNModel, ViTMSNPreTrainedModel, ) else: import sys __lowerCamelCase : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
714
import argparse import re from typing import Dict import torch from datasets import Audio, Dataset, load_dataset, load_metric from transformers import AutoFeatureExtractor, pipeline def SCREAMING_SNAKE_CASE ( snake_case_ : Dataset , snake_case_ : Dict[str, str] ): snake_case__ : Tuple = args.log_outputs snake_case__ : Union[str, Any] = "_".join(args.dataset.split("/" ) + [args.config, args.split] ) # load metric snake_case__ : List[str] = load_metric("wer" ) snake_case__ : List[str] = load_metric("cer" ) # compute metrics snake_case__ : List[Any] = wer.compute(references=result["target"] , predictions=result["prediction"] ) snake_case__ : List[str] = cer.compute(references=result["target"] , predictions=result["prediction"] ) # print & log results snake_case__ : Dict = F'''WER: {wer_result}\nCER: {cer_result}''' print(snake_case_ ) with open(F'''{dataset_id}_eval_results.txt''' , "w" ) as f: f.write(snake_case_ ) # log all results in text file. Possibly interesting for analysis if log_outputs is not None: snake_case__ : Union[str, Any] = F'''log_{dataset_id}_predictions.txt''' snake_case__ : int = F'''log_{dataset_id}_targets.txt''' with open(snake_case_ , "w" ) as p, open(snake_case_ , "w" ) as t: # mapping function to write output def write_to_file(snake_case_ : Union[str, Any] , snake_case_ : Any ): p.write(F'''{i}''' + "\n" ) p.write(batch["prediction"] + "\n" ) t.write(F'''{i}''' + "\n" ) t.write(batch["target"] + "\n" ) result.map(snake_case_ , with_indices=snake_case_ ) def SCREAMING_SNAKE_CASE ( snake_case_ : str ): snake_case__ : List[Any] = "[,?.!\-\;\:\"“%‘”�—’…–]" # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training snake_case__ : Optional[int] = re.sub(snake_case_ , "" , text.lower() ) # In addition, we can normalize the target text, e.g. removing new lines characters etc... # note that order is important here! snake_case__ : Optional[Any] = ["\n\n", "\n", " ", " "] for t in token_sequences_to_ignore: snake_case__ : Optional[int] = " ".join(text.split(snake_case_ ) ) return text def SCREAMING_SNAKE_CASE ( snake_case_ : int ): # load dataset snake_case__ : int = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=snake_case_ ) # for testing: only process the first two examples as a test # dataset = dataset.select(range(10)) # load processor snake_case__ : List[str] = AutoFeatureExtractor.from_pretrained(args.model_id ) snake_case__ : List[Any] = feature_extractor.sampling_rate # resample audio snake_case__ : Dict = dataset.cast_column("audio" , Audio(sampling_rate=snake_case_ ) ) # load eval pipeline if args.device is None: snake_case__ : int = 0 if torch.cuda.is_available() else -1 snake_case__ : List[str] = pipeline("automatic-speech-recognition" , model=args.model_id , device=args.device ) # map function to decode audio def map_to_pred(snake_case_ : Any ): snake_case__ : Union[str, Any] = asr( batch["audio"]["array"] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s ) snake_case__ : Optional[int] = prediction["text"] snake_case__ : Optional[Any] = normalize_text(batch["sentence"] ) return batch # run inference on all examples snake_case__ : Any = dataset.map(snake_case_ , remove_columns=dataset.column_names ) # compute and log_results # do not change function below log_results(snake_case_ , snake_case_ ) if __name__ == "__main__": __lowerCamelCase : Dict = argparse.ArgumentParser() parser.add_argument( """--model_id""", type=str, required=True, help="""Model identifier. Should be loadable with 🤗 Transformers""" ) parser.add_argument( """--dataset""", type=str, required=True, help="""Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets""", ) parser.add_argument( """--config""", type=str, required=True, help="""Config of the dataset. *E.g.* `'en'` for Common Voice""" ) parser.add_argument("""--split""", type=str, required=True, help="""Split of the dataset. *E.g.* `'test'`""") parser.add_argument( """--chunk_length_s""", type=float, default=None, help="""Chunk length in seconds. Defaults to 5 seconds.""" ) parser.add_argument( """--stride_length_s""", type=float, default=None, help="""Stride of the audio chunks. Defaults to 1 second.""" ) parser.add_argument( """--log_outputs""", action="""store_true""", help="""If defined, write outputs to log file for analysis.""" ) parser.add_argument( """--device""", type=int, default=None, help="""The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.""", ) __lowerCamelCase : str = parser.parse_args() main(args)
25
0
def SCREAMING_SNAKE_CASE ( snake_case_ : Optional[int] , snake_case_ : List[str] ): snake_case__ : Dict = 0 while b > 0: if b & 1: res += a a += a b >>= 1 return res def SCREAMING_SNAKE_CASE ( snake_case_ : Optional[Any] , snake_case_ : str , snake_case_ : Optional[int] ): snake_case__ : Optional[int] = 0 while b > 0: if b & 1: snake_case__ : Optional[Any] = ((res % c) + (a % c)) % c a += a b >>= 1 return res
715
import copy from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import ClassLabel, Features, Value from .base import TaskTemplate @dataclass(frozen=UpperCamelCase_ ) class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ): """simple docstring""" a_ = field(default="text-classification" , metadata={"include_in_asdict_even_if_is_default": True} ) a_ = Features({"text": Value("string" )} ) a_ = Features({"labels": ClassLabel} ) a_ = "text" a_ = "labels" def _lowercase ( self : Tuple , __A : List[Any] ): if self.label_column not in features: raise ValueError(f'''Column {self.label_column} is not present in features.''' ) if not isinstance(features[self.label_column] , __A ): raise ValueError(f'''Column {self.label_column} is not a ClassLabel.''' ) snake_case__ : Any = copy.deepcopy(self ) snake_case__ : Optional[Any] = self.label_schema.copy() snake_case__ : List[str] = features[self.label_column] snake_case__ : Dict = label_schema return task_template @property def _lowercase ( self : Tuple ): return { self.text_column: "text", self.label_column: "labels", }
25
0
from math import factorial, pi def SCREAMING_SNAKE_CASE ( snake_case_ : float , snake_case_ : int = 30 ): if not isinstance(snake_case_ , (int, float) ): raise ValueError("maclaurin_sin() requires either an int or float for theta" ) if not isinstance(snake_case_ , snake_case_ ) or accuracy <= 0: raise ValueError("maclaurin_sin() requires a positive int for accuracy" ) snake_case__ : int = float(snake_case_ ) snake_case__ : int = theta // (2 * pi) theta -= 2 * div * pi return sum( (-1) ** r * theta ** (2 * r + 1) / factorial(2 * r + 1 ) for r in range(snake_case_ ) ) def SCREAMING_SNAKE_CASE ( snake_case_ : float , snake_case_ : int = 30 ): if not isinstance(snake_case_ , (int, float) ): raise ValueError("maclaurin_cos() requires either an int or float for theta" ) if not isinstance(snake_case_ , snake_case_ ) or accuracy <= 0: raise ValueError("maclaurin_cos() requires a positive int for accuracy" ) snake_case__ : int = float(snake_case_ ) snake_case__ : Union[str, Any] = theta // (2 * pi) theta -= 2 * div * pi return sum((-1) ** r * theta ** (2 * r) / factorial(2 * r ) for r in range(snake_case_ ) ) if __name__ == "__main__": import doctest doctest.testmod() print(maclaurin_sin(10)) print(maclaurin_sin(-10)) print(maclaurin_sin(10, 15)) print(maclaurin_sin(-10, 15)) print(maclaurin_cos(5)) print(maclaurin_cos(-5)) print(maclaurin_cos(10, 15)) print(maclaurin_cos(-10, 15))
716
import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES from ...utils import logging from ..auto import CONFIG_MAPPING __lowerCamelCase : Union[str, Any] = logging.get_logger(__name__) __lowerCamelCase : Dict = { """Salesforce/instruct-blip-flan-t5""": """https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json""", } class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ): """simple docstring""" a_ = "instructblip_vision_model" def __init__( self : List[Any] , __A : Dict=1_4_0_8 , __A : Tuple=6_1_4_4 , __A : str=3_9 , __A : int=1_6 , __A : str=2_2_4 , __A : Any=1_4 , __A : Dict="gelu" , __A : List[Any]=1e-6 , __A : Any=0.0 , __A : List[Any]=1e-1_0 , __A : Union[str, Any]=True , **__A : Tuple , ): super().__init__(**__A ) snake_case__ : List[str] = hidden_size snake_case__ : Optional[int] = intermediate_size snake_case__ : List[str] = num_hidden_layers snake_case__ : List[Any] = num_attention_heads snake_case__ : str = patch_size snake_case__ : int = image_size snake_case__ : int = initializer_range snake_case__ : Optional[int] = attention_dropout snake_case__ : str = layer_norm_eps snake_case__ : Optional[Any] = hidden_act snake_case__ : Tuple = qkv_bias @classmethod def _lowercase ( cls : List[str] , __A : Union[str, os.PathLike] , **__A : Optional[Any] ): cls._set_token_in_kwargs(__A ) snake_case__, snake_case__ : str = cls.get_config_dict(__A , **__A ) # get the vision config dict if we are loading from InstructBlipConfig if config_dict.get("model_type" ) == "instructblip": snake_case__ : Union[str, Any] = config_dict["vision_config"] if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type: logger.warning( f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type ''' f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(__A , **__A ) class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ): """simple docstring""" a_ = "instructblip_qformer" def __init__( self : Any , __A : Union[str, Any]=3_0_5_2_2 , __A : Union[str, Any]=7_6_8 , __A : Optional[int]=1_2 , __A : Dict=1_2 , __A : Dict=3_0_7_2 , __A : List[str]="gelu" , __A : Union[str, Any]=0.1 , __A : Tuple=0.1 , __A : Any=5_1_2 , __A : Optional[int]=0.0_2 , __A : List[str]=1e-1_2 , __A : Any=0 , __A : Optional[Any]="absolute" , __A : str=2 , __A : Any=1_4_0_8 , **__A : List[str] , ): super().__init__(pad_token_id=__A , **__A ) snake_case__ : Dict = vocab_size snake_case__ : Optional[int] = hidden_size snake_case__ : Optional[Any] = num_hidden_layers snake_case__ : str = num_attention_heads snake_case__ : int = hidden_act snake_case__ : Optional[Any] = intermediate_size snake_case__ : Union[str, Any] = hidden_dropout_prob snake_case__ : List[Any] = attention_probs_dropout_prob snake_case__ : List[Any] = max_position_embeddings snake_case__ : int = initializer_range snake_case__ : Dict = layer_norm_eps snake_case__ : str = position_embedding_type snake_case__ : Dict = cross_attention_frequency snake_case__ : List[str] = encoder_hidden_size @classmethod def _lowercase ( cls : List[Any] , __A : Union[str, os.PathLike] , **__A : Optional[int] ): cls._set_token_in_kwargs(__A ) snake_case__, snake_case__ : Tuple = cls.get_config_dict(__A , **__A ) # get the qformer config dict if we are loading from InstructBlipConfig if config_dict.get("model_type" ) == "instructblip": snake_case__ : List[Any] = config_dict["qformer_config"] if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type: logger.warning( f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type ''' f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(__A , **__A ) class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ): """simple docstring""" a_ = "instructblip" a_ = True def __init__( self : List[str] , __A : Optional[Any]=None , __A : Tuple=None , __A : Optional[int]=None , __A : Optional[Any]=3_2 , **__A : Optional[int] ): super().__init__(**__A ) if vision_config is None: snake_case__ : Any = {} logger.info("vision_config is None. initializing the InstructBlipVisionConfig with default values." ) if qformer_config is None: snake_case__ : Optional[Any] = {} logger.info("qformer_config is None. Initializing the InstructBlipQFormerConfig with default values." ) if text_config is None: snake_case__ : Optional[int] = {} logger.info("text_config is None. Initializing the text config with default values (`OPTConfig`)." ) snake_case__ : List[Any] = InstructBlipVisionConfig(**__A ) snake_case__ : Union[str, Any] = InstructBlipQFormerConfig(**__A ) snake_case__ : Dict = text_config["model_type"] if "model_type" in text_config else "opt" snake_case__ : List[Any] = CONFIG_MAPPING[text_model_type](**__A ) snake_case__ : Union[str, Any] = self.text_config.tie_word_embeddings snake_case__ : Tuple = self.text_config.is_encoder_decoder snake_case__ : str = num_query_tokens snake_case__ : Dict = self.vision_config.hidden_size snake_case__ : List[Any] = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES snake_case__ : int = 1.0 snake_case__ : Optional[int] = 0.0_2 @classmethod def _lowercase ( cls : List[str] , __A : InstructBlipVisionConfig , __A : InstructBlipQFormerConfig , __A : PretrainedConfig , **__A : int , ): return cls( vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **__A , ) def _lowercase ( self : Optional[int] ): snake_case__ : Any = copy.deepcopy(self.__dict__ ) snake_case__ : Optional[Any] = self.vision_config.to_dict() snake_case__ : List[str] = self.qformer_config.to_dict() snake_case__ : List[Any] = self.text_config.to_dict() snake_case__ : List[Any] = self.__class__.model_type return output
25
0
import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging __lowerCamelCase : Optional[int] = logging.get_logger(__name__) __lowerCamelCase : Dict = { """BridgeTower/bridgetower-base""": """https://huggingface.co/BridgeTower/bridgetower-base/blob/main/config.json""", """BridgeTower/bridgetower-base-itm-mlm""": ( """https://huggingface.co/BridgeTower/bridgetower-base-itm-mlm/blob/main/config.json""" ), } class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ): """simple docstring""" a_ = "bridgetower_vision_model" def __init__( self : Dict , __A : Optional[int]=7_6_8 , __A : Optional[int]=1_2 , __A : Dict=3 , __A : Optional[Any]=1_6 , __A : Any=2_8_8 , __A : str=1 , __A : Any=1e-0_5 , __A : Optional[int]=False , __A : Optional[Any]=True , __A : List[str]=False , **__A : Union[str, Any] , ): super().__init__(**__A ) snake_case__ : Any = hidden_size snake_case__ : List[Any] = num_hidden_layers snake_case__ : Any = num_channels snake_case__ : str = patch_size snake_case__ : Dict = image_size snake_case__ : Union[str, Any] = initializer_factor snake_case__ : Dict = layer_norm_eps snake_case__ : Tuple = stop_gradient snake_case__ : Any = share_layernorm snake_case__ : Tuple = remove_last_layer @classmethod def _lowercase ( cls : Dict , __A : Union[str, os.PathLike] , **__A : Tuple ): snake_case__ : Tuple = cls.get_config_dict(__A , **__A ) if config_dict.get("model_type" ) == "bridgetower": snake_case__ : List[Any] = config_dict["text_config"] if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type: logger.warning( f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type ''' f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(__A , **__A ) class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ): """simple docstring""" a_ = "bridgetower_text_model" def __init__( self : Optional[int] , __A : str=5_0_2_6_5 , __A : List[Any]=7_6_8 , __A : int=1_2 , __A : Optional[Any]=1_2 , __A : str=1 , __A : Dict=3_0_7_2 , __A : Tuple="gelu" , __A : Optional[Any]=0.1 , __A : Tuple=0.1 , __A : Any=5_1_4 , __A : List[str]=1 , __A : List[Any]=1e-0_5 , __A : int=1 , __A : str=0 , __A : str=2 , __A : Union[str, Any]="absolute" , __A : Optional[Any]=True , **__A : Optional[Any] , ): super().__init__(**__A ) snake_case__ : List[str] = vocab_size snake_case__ : Optional[int] = hidden_size snake_case__ : Union[str, Any] = num_hidden_layers snake_case__ : Dict = num_attention_heads snake_case__ : Tuple = hidden_act snake_case__ : Any = initializer_factor snake_case__ : List[Any] = intermediate_size snake_case__ : Tuple = hidden_dropout_prob snake_case__ : Optional[int] = attention_probs_dropout_prob snake_case__ : Any = max_position_embeddings snake_case__ : Union[str, Any] = type_vocab_size snake_case__ : Optional[int] = layer_norm_eps snake_case__ : Optional[Any] = position_embedding_type snake_case__ : Union[str, Any] = use_cache snake_case__ : List[str] = pad_token_id snake_case__ : Tuple = bos_token_id snake_case__ : Dict = eos_token_id @classmethod def _lowercase ( cls : List[Any] , __A : Union[str, os.PathLike] , **__A : Union[str, Any] ): snake_case__ : Any = cls.get_config_dict(__A , **__A ) if config_dict.get("model_type" ) == "bridgetower": snake_case__ : Optional[int] = config_dict["text_config"] if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type: logger.warning( f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type ''' f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(__A , **__A ) class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ): """simple docstring""" a_ = "bridgetower" def __init__( self : Optional[int] , __A : Dict=True , __A : Union[str, Any]="gelu" , __A : Tuple=7_6_8 , __A : Dict=1 , __A : Optional[Any]=1e-0_5 , __A : Optional[int]=False , __A : int="add" , __A : List[Any]=1_2 , __A : Any=6 , __A : List[str]=False , __A : int=False , __A : List[Any]=None , __A : Union[str, Any]=None , **__A : str , ): # TODO: remove this once the Hub files are updated. snake_case__ : Optional[int] = kwargs.pop("text_config_dict" , __A ) snake_case__ : Tuple = kwargs.pop("vision_config_dict" , __A ) super().__init__(**__A ) snake_case__ : Dict = share_cross_modal_transformer_layers snake_case__ : Optional[int] = hidden_act snake_case__ : Optional[Any] = hidden_size snake_case__ : Any = initializer_factor snake_case__ : Any = layer_norm_eps snake_case__ : Optional[Any] = share_link_tower_layers snake_case__ : Any = link_tower_type snake_case__ : Optional[Any] = num_attention_heads snake_case__ : Dict = num_hidden_layers snake_case__ : Tuple = tie_word_embeddings snake_case__ : Any = init_layernorm_from_vision_encoder if text_config is None: snake_case__ : Optional[Any] = {} logger.info("`text_config` is `None`. Initializing the `BridgeTowerTextConfig` with default values." ) if vision_config is None: snake_case__ : int = {} logger.info("`vision_config` is `None`. Initializing the `BridgeTowerVisionConfig` with default values." ) snake_case__ : Optional[int] = BridgeTowerTextConfig(**__A ) snake_case__ : Optional[Any] = BridgeTowerVisionConfig(**__A ) @classmethod def _lowercase ( cls : Any , __A : BridgeTowerTextConfig , __A : BridgeTowerVisionConfig , **__A : List[str] ): return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **__A ) def _lowercase ( self : Tuple ): snake_case__ : Tuple = copy.deepcopy(self.__dict__ ) snake_case__ : str = self.text_config.to_dict() snake_case__ : Dict = self.vision_config.to_dict() snake_case__ : str = self.__class__.model_type return output
717
def SCREAMING_SNAKE_CASE ( snake_case_ : list ): if len(snake_case_ ) <= 1: return lst snake_case__ : List[Any] = 1 while i < len(snake_case_ ): if lst[i - 1] <= lst[i]: i += 1 else: snake_case__, snake_case__ : Tuple = lst[i], lst[i - 1] i -= 1 if i == 0: snake_case__ : Union[str, Any] = 1 return lst if __name__ == "__main__": __lowerCamelCase : Dict = input("""Enter numbers separated by a comma:\n""").strip() __lowerCamelCase : Tuple = [int(item) for item in user_input.split(""",""")] print(gnome_sort(unsorted))
25
0
from ...processing_utils import ProcessorMixin class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ): """simple docstring""" a_ = "SpeechT5FeatureExtractor" a_ = "SpeechT5Tokenizer" def __init__( self : Optional[int] , __A : Any , __A : Any ): super().__init__(__A , __A ) def __call__( self : Any , *__A : Tuple , **__A : List[Any] ): snake_case__ : int = kwargs.pop("audio" , __A ) snake_case__ : Optional[int] = kwargs.pop("text" , __A ) snake_case__ : str = kwargs.pop("text_target" , __A ) snake_case__ : List[Any] = kwargs.pop("audio_target" , __A ) snake_case__ : Any = kwargs.pop("sampling_rate" , __A ) if audio is not None and text is not None: raise ValueError( "Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?" ) if audio_target is not None and text_target is not None: raise ValueError( "Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?" ) if audio is None and audio_target is None and text is None and text_target is None: raise ValueError( "You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process." ) if audio is not None: snake_case__ : Union[str, Any] = self.feature_extractor(__A , *__A , sampling_rate=__A , **__A ) elif text is not None: snake_case__ : Dict = self.tokenizer(__A , **__A ) else: snake_case__ : Optional[int] = None if audio_target is not None: snake_case__ : List[str] = self.feature_extractor(audio_target=__A , *__A , sampling_rate=__A , **__A ) snake_case__ : str = targets["input_values"] elif text_target is not None: snake_case__ : List[str] = self.tokenizer(__A , **__A ) snake_case__ : List[Any] = targets["input_ids"] else: snake_case__ : int = None if inputs is None: return targets if targets is not None: snake_case__ : Optional[int] = labels snake_case__ : Any = targets.get("attention_mask" ) if decoder_attention_mask is not None: snake_case__ : Any = decoder_attention_mask return inputs def _lowercase ( self : Optional[int] , *__A : Any , **__A : List[Any] ): snake_case__ : Optional[Any] = kwargs.pop("input_values" , __A ) snake_case__ : List[Any] = kwargs.pop("input_ids" , __A ) snake_case__ : Optional[Any] = kwargs.pop("labels" , __A ) if input_values is not None and input_ids is not None: raise ValueError("Cannot process both `input_values` and `input_ids` inputs." ) if input_values is None and input_ids is None and labels is None: raise ValueError( "You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded." ) if input_values is not None: snake_case__ : Tuple = self.feature_extractor.pad(__A , *__A , **__A ) elif input_ids is not None: snake_case__ : List[Any] = self.tokenizer.pad(__A , **__A ) else: snake_case__ : Dict = None if labels is not None: if "input_ids" in labels or (isinstance(__A , __A ) and "input_ids" in labels[0]): snake_case__ : Any = self.tokenizer.pad(__A , **__A ) snake_case__ : Optional[Any] = targets["input_ids"] else: snake_case__ : List[str] = self.feature_extractor.feature_size snake_case__ : Optional[int] = self.feature_extractor.num_mel_bins snake_case__ : Dict = self.feature_extractor.pad(__A , *__A , **__A ) snake_case__ : Any = feature_size_hack snake_case__ : int = targets["input_values"] else: snake_case__ : Any = None if inputs is None: return targets if targets is not None: snake_case__ : int = labels snake_case__ : str = targets.get("attention_mask" ) if decoder_attention_mask is not None: snake_case__ : Optional[int] = decoder_attention_mask return inputs def _lowercase ( self : int , *__A : Optional[Any] , **__A : Tuple ): return self.tokenizer.batch_decode(*__A , **__A ) def _lowercase ( self : str , *__A : Dict , **__A : List[Any] ): return self.tokenizer.decode(*__A , **__A )
718
from __future__ import annotations import time __lowerCamelCase : str = list[tuple[int, int]] __lowerCamelCase : Optional[int] = [ [0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles [0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0], [1, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0], ] __lowerCamelCase : Tuple = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right class SCREAMING_SNAKE_CASE__ : """simple docstring""" def __init__( self : Union[str, Any] , __A : int , __A : int , __A : int , __A : int , __A : Node | None ): snake_case__ : Optional[int] = pos_x snake_case__ : Dict = pos_y snake_case__ : int = (pos_y, pos_x) snake_case__ : Optional[int] = goal_x snake_case__ : Tuple = goal_y snake_case__ : str = parent class SCREAMING_SNAKE_CASE__ : """simple docstring""" def __init__( self : List[Any] , __A : tuple[int, int] , __A : tuple[int, int] ): snake_case__ : Tuple = Node(start[1] , start[0] , goal[1] , goal[0] , __A ) snake_case__ : Tuple = Node(goal[1] , goal[0] , goal[1] , goal[0] , __A ) snake_case__ : int = [self.start] snake_case__ : Union[str, Any] = False def _lowercase ( self : Dict ): while self.node_queue: snake_case__ : Optional[Any] = self.node_queue.pop(0 ) if current_node.pos == self.target.pos: snake_case__ : Optional[Any] = True return self.retrace_path(__A ) snake_case__ : int = self.get_successors(__A ) for node in successors: self.node_queue.append(__A ) if not self.reached: return [self.start.pos] return None def _lowercase ( self : Union[str, Any] , __A : Node ): snake_case__ : str = [] for action in delta: snake_case__ : str = parent.pos_x + action[1] snake_case__ : Union[str, Any] = parent.pos_y + action[0] if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(__A ) - 1): continue if grid[pos_y][pos_x] != 0: continue successors.append( Node(__A , __A , self.target.pos_y , self.target.pos_x , __A ) ) return successors def _lowercase ( self : Optional[Any] , __A : Node | None ): snake_case__ : Tuple = node snake_case__ : Any = [] while current_node is not None: path.append((current_node.pos_y, current_node.pos_x) ) snake_case__ : Tuple = current_node.parent path.reverse() return path class SCREAMING_SNAKE_CASE__ : """simple docstring""" def __init__( self : Dict , __A : str , __A : int ): snake_case__ : str = BreadthFirstSearch(__A , __A ) snake_case__ : int = BreadthFirstSearch(__A , __A ) snake_case__ : Tuple = False def _lowercase ( self : Optional[Any] ): while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue: snake_case__ : Any = self.fwd_bfs.node_queue.pop(0 ) snake_case__ : List[str] = self.bwd_bfs.node_queue.pop(0 ) if current_bwd_node.pos == current_fwd_node.pos: snake_case__ : List[str] = True return self.retrace_bidirectional_path( __A , __A ) snake_case__ : Union[str, Any] = current_bwd_node snake_case__ : Dict = current_fwd_node snake_case__ : List[Any] = { self.fwd_bfs: self.fwd_bfs.get_successors(__A ), self.bwd_bfs: self.bwd_bfs.get_successors(__A ), } for bfs in [self.fwd_bfs, self.bwd_bfs]: for node in successors[bfs]: bfs.node_queue.append(__A ) if not self.reached: return [self.fwd_bfs.start.pos] return None def _lowercase ( self : Any , __A : Node , __A : Node ): snake_case__ : List[str] = self.fwd_bfs.retrace_path(__A ) snake_case__ : Optional[Any] = self.bwd_bfs.retrace_path(__A ) bwd_path.pop() bwd_path.reverse() snake_case__ : List[Any] = fwd_path + bwd_path return path if __name__ == "__main__": # all coordinates are given in format [y,x] import doctest doctest.testmod() __lowerCamelCase : str = (0, 0) __lowerCamelCase : List[str] = (len(grid) - 1, len(grid[0]) - 1) for elem in grid: print(elem) __lowerCamelCase : Any = time.time() __lowerCamelCase : Optional[Any] = BreadthFirstSearch(init, goal) __lowerCamelCase : str = bfs.search() __lowerCamelCase : Optional[Any] = time.time() - start_bfs_time print("""Unidirectional BFS computation time : """, bfs_time) __lowerCamelCase : Optional[Any] = time.time() __lowerCamelCase : Optional[int] = BidirectionalBreadthFirstSearch(init, goal) __lowerCamelCase : str = bd_bfs.search() __lowerCamelCase : Optional[Any] = time.time() - start_bd_bfs_time print("""Bidirectional BFS computation time : """, bd_bfs_time)
25
0
from __future__ import annotations from cmath import sqrt def SCREAMING_SNAKE_CASE ( snake_case_ : int , snake_case_ : int , snake_case_ : int ): if a == 0: raise ValueError("Coefficient 'a' must not be zero." ) snake_case__ : Optional[Any] = b * b - 4 * a * c snake_case__ : str = (-b + sqrt(snake_case_ )) / (2 * a) snake_case__ : List[str] = (-b - sqrt(snake_case_ )) / (2 * a) return ( root_a.real if not root_a.imag else root_a, root_a.real if not root_a.imag else root_a, ) def SCREAMING_SNAKE_CASE ( ): snake_case__ : int = quadratic_roots(a=5 , b=6 , c=1 ) print(F'''The solutions are: {solutiona} and {solutiona}''' ) if __name__ == "__main__": main()
719
import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ConditionalDetrImageProcessor class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): """simple docstring""" def __init__( self : List[Any] , __A : Dict , __A : int=7 , __A : Optional[Any]=3 , __A : List[str]=3_0 , __A : List[Any]=4_0_0 , __A : Union[str, Any]=True , __A : List[Any]=None , __A : Optional[Any]=True , __A : Tuple=[0.5, 0.5, 0.5] , __A : Union[str, Any]=[0.5, 0.5, 0.5] , __A : List[str]=True , __A : Any=1 / 2_5_5 , __A : Optional[int]=True , ): # by setting size["longest_edge"] > max_resolution we're effectively not testing this :p snake_case__ : List[str] = size if size is not None else {"shortest_edge": 1_8, "longest_edge": 1_3_3_3} snake_case__ : Dict = parent snake_case__ : Optional[int] = batch_size snake_case__ : Union[str, Any] = num_channels snake_case__ : str = min_resolution snake_case__ : Tuple = max_resolution snake_case__ : List[Any] = do_resize snake_case__ : Dict = size snake_case__ : List[str] = do_normalize snake_case__ : Optional[int] = image_mean snake_case__ : Optional[int] = image_std snake_case__ : Any = do_rescale snake_case__ : Optional[int] = rescale_factor snake_case__ : int = do_pad def _lowercase ( self : Dict ): return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def _lowercase ( self : Optional[int] , __A : Dict , __A : List[Any]=False ): if not batched: snake_case__ : List[str] = image_inputs[0] if isinstance(__A , Image.Image ): snake_case__, snake_case__ : Tuple = image.size else: snake_case__, snake_case__ : List[str] = image.shape[1], image.shape[2] if w < h: snake_case__ : Dict = int(self.size["shortest_edge"] * h / w ) snake_case__ : Optional[int] = self.size["shortest_edge"] elif w > h: snake_case__ : List[Any] = self.size["shortest_edge"] snake_case__ : Union[str, Any] = int(self.size["shortest_edge"] * w / h ) else: snake_case__ : Dict = self.size["shortest_edge"] snake_case__ : Dict = self.size["shortest_edge"] else: snake_case__ : str = [] for image in image_inputs: snake_case__, snake_case__ : str = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) snake_case__ : Dict = max(__A , key=lambda __A : item[0] )[0] snake_case__ : Tuple = max(__A , key=lambda __A : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ , unittest.TestCase ): """simple docstring""" a_ = ConditionalDetrImageProcessor if is_vision_available() else None def _lowercase ( self : int ): snake_case__ : Tuple = ConditionalDetrImageProcessingTester(self ) @property def _lowercase ( self : Any ): return self.image_processor_tester.prepare_image_processor_dict() def _lowercase ( self : Any ): snake_case__ : Dict = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__A , "image_mean" ) ) self.assertTrue(hasattr(__A , "image_std" ) ) self.assertTrue(hasattr(__A , "do_normalize" ) ) self.assertTrue(hasattr(__A , "do_resize" ) ) self.assertTrue(hasattr(__A , "size" ) ) def _lowercase ( self : List[str] ): snake_case__ : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"shortest_edge": 1_8, "longest_edge": 1_3_3_3} ) self.assertEqual(image_processor.do_pad , __A ) snake_case__ : Any = self.image_processing_class.from_dict( self.image_processor_dict , size=4_2 , max_size=8_4 , pad_and_return_pixel_mask=__A ) self.assertEqual(image_processor.size , {"shortest_edge": 4_2, "longest_edge": 8_4} ) self.assertEqual(image_processor.do_pad , __A ) def _lowercase ( self : Union[str, Any] ): pass def _lowercase ( self : List[str] ): # Initialize image_processing snake_case__ : Dict = self.image_processing_class(**self.image_processor_dict ) # create random PIL images snake_case__ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A ) for image in image_inputs: self.assertIsInstance(__A , Image.Image ) # Test not batched input snake_case__ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values snake_case__, snake_case__ : Union[str, Any] = self.image_processor_tester.get_expected_values(__A ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched snake_case__, snake_case__ : Tuple = self.image_processor_tester.get_expected_values(__A , batched=__A ) snake_case__ : int = image_processing(__A , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def _lowercase ( self : Tuple ): # Initialize image_processing snake_case__ : List[str] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors snake_case__ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , numpify=__A ) for image in image_inputs: self.assertIsInstance(__A , np.ndarray ) # Test not batched input snake_case__ : int = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values snake_case__, snake_case__ : Dict = self.image_processor_tester.get_expected_values(__A ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched snake_case__ : Optional[Any] = image_processing(__A , return_tensors="pt" ).pixel_values snake_case__, snake_case__ : str = self.image_processor_tester.get_expected_values(__A , batched=__A ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def _lowercase ( self : Tuple ): # Initialize image_processing snake_case__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors snake_case__ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , torchify=__A ) for image in image_inputs: self.assertIsInstance(__A , torch.Tensor ) # Test not batched input snake_case__ : Tuple = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values snake_case__, snake_case__ : Optional[int] = self.image_processor_tester.get_expected_values(__A ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched snake_case__ : Dict = image_processing(__A , return_tensors="pt" ).pixel_values snake_case__, snake_case__ : int = self.image_processor_tester.get_expected_values(__A , batched=__A ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) @slow def _lowercase ( self : List[Any] ): # prepare image and target snake_case__ : Union[str, Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f: snake_case__ : Union[str, Any] = json.loads(f.read() ) snake_case__ : Optional[Any] = {"image_id": 3_9_7_6_9, "annotations": target} # encode them snake_case__ : Tuple = ConditionalDetrImageProcessor.from_pretrained("microsoft/conditional-detr-resnet-50" ) snake_case__ : int = image_processing(images=__A , annotations=__A , return_tensors="pt" ) # verify pixel values snake_case__ : str = torch.Size([1, 3, 8_0_0, 1_0_6_6] ) self.assertEqual(encoding["pixel_values"].shape , __A ) snake_case__ : Tuple = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] ) self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __A , atol=1e-4 ) ) # verify area snake_case__ : Optional[int] = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] ) self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __A ) ) # verify boxes snake_case__ : Tuple = torch.Size([6, 4] ) self.assertEqual(encoding["labels"][0]["boxes"].shape , __A ) snake_case__ : List[Any] = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] ) self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __A , atol=1e-3 ) ) # verify image_id snake_case__ : str = torch.tensor([3_9_7_6_9] ) self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __A ) ) # verify is_crowd snake_case__ : List[Any] = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __A ) ) # verify class_labels snake_case__ : Optional[int] = torch.tensor([7_5, 7_5, 6_3, 6_5, 1_7, 1_7] ) self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __A ) ) # verify orig_size snake_case__ : Dict = torch.tensor([4_8_0, 6_4_0] ) self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __A ) ) # verify size snake_case__ : List[str] = torch.tensor([8_0_0, 1_0_6_6] ) self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __A ) ) @slow def _lowercase ( self : str ): # prepare image, target and masks_path snake_case__ : Optional[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f: snake_case__ : int = json.loads(f.read() ) snake_case__ : Optional[int] = {"file_name": "000000039769.png", "image_id": 3_9_7_6_9, "segments_info": target} snake_case__ : Optional[Any] = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" ) # encode them snake_case__ : Optional[int] = ConditionalDetrImageProcessor(format="coco_panoptic" ) snake_case__ : Tuple = image_processing(images=__A , annotations=__A , masks_path=__A , return_tensors="pt" ) # verify pixel values snake_case__ : Optional[Any] = torch.Size([1, 3, 8_0_0, 1_0_6_6] ) self.assertEqual(encoding["pixel_values"].shape , __A ) snake_case__ : List[str] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] ) self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __A , atol=1e-4 ) ) # verify area snake_case__ : Tuple = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] ) self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __A ) ) # verify boxes snake_case__ : Dict = torch.Size([6, 4] ) self.assertEqual(encoding["labels"][0]["boxes"].shape , __A ) snake_case__ : int = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] ) self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __A , atol=1e-3 ) ) # verify image_id snake_case__ : str = torch.tensor([3_9_7_6_9] ) self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __A ) ) # verify is_crowd snake_case__ : Tuple = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __A ) ) # verify class_labels snake_case__ : Optional[Any] = torch.tensor([1_7, 1_7, 6_3, 7_5, 7_5, 9_3] ) self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __A ) ) # verify masks snake_case__ : str = 8_2_2_8_7_3 self.assertEqual(encoding["labels"][0]["masks"].sum().item() , __A ) # verify orig_size snake_case__ : int = torch.tensor([4_8_0, 6_4_0] ) self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __A ) ) # verify size snake_case__ : List[Any] = torch.tensor([8_0_0, 1_0_6_6] ) self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __A ) )
25
0
import argparse import os import torch from transformers import FlavaConfig, FlavaForPreTraining from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint def SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any] ): # encoder.embeddings are double copied in original FLAVA return sum(param.float().sum() if "encoder.embeddings" not in key else 0 for key, param in state_dict.items() ) def SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : str ): snake_case__ : Tuple = {} for key, value in state_dict.items(): if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key: continue snake_case__ : Any = key.replace("heads.cmd.mim_head.cls.predictions" , "mmm_image_head" ) snake_case__ : str = key.replace("heads.cmd.mlm_head.cls.predictions" , "mmm_text_head" ) snake_case__ : Optional[int] = key.replace("heads.cmd.itm_head.cls" , "itm_head" ) snake_case__ : Dict = key.replace("heads.cmd.itm_head.pooler" , "itm_head.pooler" ) snake_case__ : Optional[int] = key.replace("heads.cmd.clip_head.logit_scale" , "flava.logit_scale" ) snake_case__ : Optional[int] = key.replace("heads.fairseq_mlm.cls.predictions" , "mlm_head" ) snake_case__ : Tuple = key.replace("heads.imagenet.mim_head.cls.predictions" , "mim_head" ) snake_case__ : Any = key.replace("mm_text_projection" , "flava.text_to_mm_projection" ) snake_case__ : Optional[Any] = key.replace("mm_image_projection" , "flava.image_to_mm_projection" ) snake_case__ : Any = key.replace("image_encoder.module" , "flava.image_model" ) snake_case__ : Dict = key.replace("text_encoder.module" , "flava.text_model" ) snake_case__ : Union[str, Any] = key.replace("mm_encoder.module.encoder.cls_token" , "flava.multimodal_model.cls_token" ) snake_case__ : Dict = key.replace("mm_encoder.module" , "flava.multimodal_model" ) snake_case__ : Dict = key.replace("text_projection" , "flava.text_projection" ) snake_case__ : Optional[int] = key.replace("image_projection" , "flava.image_projection" ) snake_case__ : Optional[Any] = value.float() for key, value in codebook_state_dict.items(): snake_case__ : List[Any] = value return upgrade @torch.no_grad() def SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any] , snake_case_ : Dict , snake_case_ : str , snake_case_ : List[Any]=None ): if config_path is not None: snake_case__ : List[str] = FlavaConfig.from_pretrained(snake_case_ ) else: snake_case__ : List[str] = FlavaConfig() snake_case__ : Dict = FlavaForPreTraining(snake_case_ ).eval() snake_case__ : Optional[int] = convert_dalle_checkpoint(snake_case_ , snake_case_ , save_checkpoint=snake_case_ ) if os.path.exists(snake_case_ ): snake_case__ : Any = torch.load(snake_case_ , map_location="cpu" ) else: snake_case__ : List[Any] = torch.hub.load_state_dict_from_url(snake_case_ , map_location="cpu" ) snake_case__ : Union[str, Any] = upgrade_state_dict(snake_case_ , snake_case_ ) hf_model.load_state_dict(snake_case_ ) snake_case__ : Dict = hf_model.state_dict() snake_case__ : Union[str, Any] = count_parameters(snake_case_ ) snake_case__ : str = count_parameters(snake_case_ ) + count_parameters(snake_case_ ) assert torch.allclose(snake_case_ , snake_case_ , atol=1E-3 ) hf_model.save_pretrained(snake_case_ ) if __name__ == "__main__": __lowerCamelCase : str = argparse.ArgumentParser() parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to flava checkpoint""") parser.add_argument("""--codebook_path""", default=None, type=str, help="""Path to flava codebook checkpoint""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") __lowerCamelCase : Dict = parser.parse_args() convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
720
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on import numpy # noqa: F401 # Here to have a nice missing dependency error message early on import requests # noqa: F401 # Here to have a nice missing dependency error message early on import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on from mauve import compute_mauve # From: mauve-text import datasets __lowerCamelCase : Optional[int] = """\ @inproceedings{pillutla-etal:mauve:neurips2021, title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers}, author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid}, booktitle = {NeurIPS}, year = {2021} } """ __lowerCamelCase : str = """\ MAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure. MAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences. For details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021). This metrics is a wrapper around the official implementation of MAUVE: https://github.com/krishnap25/mauve """ __lowerCamelCase : str = """ Calculates MAUVE scores between two lists of generated text and reference text. Args: predictions: list of generated text to score. Each predictions should be a string with tokens separated by spaces. references: list of reference for each prediction. Each reference should be a string with tokens separated by spaces. Optional Args: num_buckets: the size of the histogram to quantize P and Q. Options: 'auto' (default) or an integer pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1 kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9 kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5 kmeans_max_iter: maximum number of k-means iterations. Default 500 featurize_model_name: name of the model from which features are obtained. Default 'gpt2-large' Use one of ['gpt2', 'gpt2-medium', 'gpt2-large', 'gpt2-xl']. device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU max_text_length: maximum number of tokens to consider. Default 1024 divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25 mauve_scaling_factor: \"c\" from the paper. Default 5. verbose: If True (default), print running time updates seed: random seed to initialize k-means cluster assignments. Returns: mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer, frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer, divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve, p_hist: a discrete distribution, which is a quantized version of the text distribution p_text, q_hist: same as above, but with q_text. Examples: >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest >>> import datasets >>> mauve = datasets.load_metric('mauve') >>> predictions = [\"hello there\", \"general kenobi\"] >>> references = [\"hello there\", \"general kenobi\"] >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP >>> print(out.mauve) # doctest: +SKIP 1.0 """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class SCREAMING_SNAKE_CASE__ ( datasets.Metric ): """simple docstring""" def _lowercase ( self : Dict ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , homepage="https://github.com/krishnap25/mauve" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("string" , id="sequence" ), "references": datasets.Value("string" , id="sequence" ), } ) , codebase_urls=["https://github.com/krishnap25/mauve"] , reference_urls=[ "https://arxiv.org/abs/2102.01454", "https://github.com/krishnap25/mauve", ] , ) def _lowercase ( self : Union[str, Any] , __A : Dict , __A : List[str] , __A : int=None , __A : List[Any]=None , __A : Optional[int]=None , __A : List[Any]=None , __A : Union[str, Any]="auto" , __A : Optional[Any]=-1 , __A : Optional[Any]=0.9 , __A : Any=5 , __A : List[Any]=5_0_0 , __A : Tuple="gpt2-large" , __A : Optional[Any]=-1 , __A : str=1_0_2_4 , __A : Tuple=2_5 , __A : str=5 , __A : Optional[int]=True , __A : Any=2_5 , ): snake_case__ : List[Any] = compute_mauve( p_text=__A , q_text=__A , p_features=__A , q_features=__A , p_tokens=__A , q_tokens=__A , num_buckets=__A , pca_max_data=__A , kmeans_explained_var=__A , kmeans_num_redo=__A , kmeans_max_iter=__A , featurize_model_name=__A , device_id=__A , max_text_length=__A , divergence_curve_discretization_size=__A , mauve_scaling_factor=__A , verbose=__A , seed=__A , ) return out
25
0
import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, EulerAncestralDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, StableDiffusionInstructPixaPixPipeline, UNetaDConditionModel, ) from diffusers.image_processor import VaeImageProcessor from diffusers.utils import floats_tensor, load_image, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ): """simple docstring""" a_ = StableDiffusionInstructPixaPixPipeline a_ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width", "cross_attention_kwargs"} a_ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS a_ = IMAGE_TO_IMAGE_IMAGE_PARAMS a_ = IMAGE_TO_IMAGE_IMAGE_PARAMS def _lowercase ( self : str ): torch.manual_seed(0 ) snake_case__ : Union[str, Any] = UNetaDConditionModel( block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=8 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=3_2 , ) snake_case__ : Any = PNDMScheduler(skip_prk_steps=__A ) torch.manual_seed(0 ) snake_case__ : str = AutoencoderKL( block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , ) torch.manual_seed(0 ) snake_case__ : Tuple = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , ) snake_case__ : int = CLIPTextModel(__A ) snake_case__ : Union[str, Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) snake_case__ : Any = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, } return components def _lowercase ( self : Union[str, Any] , __A : Dict , __A : Optional[Any]=0 ): snake_case__ : List[Any] = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(__A ) ).to(__A ) snake_case__ : str = image.cpu().permute(0 , 2 , 3 , 1 )[0] snake_case__ : int = Image.fromarray(np.uinta(__A ) ).convert("RGB" ) if str(__A ).startswith("mps" ): snake_case__ : List[Any] = torch.manual_seed(__A ) else: snake_case__ : List[Any] = torch.Generator(device=__A ).manual_seed(__A ) snake_case__ : Optional[Any] = { "prompt": "A painting of a squirrel eating a burger", "image": image, "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "image_guidance_scale": 1, "output_type": "numpy", } return inputs def _lowercase ( self : Dict ): snake_case__ : Tuple = "cpu" # ensure determinism for the device-dependent torch.Generator snake_case__ : int = self.get_dummy_components() snake_case__ : List[Any] = StableDiffusionInstructPixaPixPipeline(**__A ) snake_case__ : int = sd_pipe.to(__A ) sd_pipe.set_progress_bar_config(disable=__A ) snake_case__ : Tuple = self.get_dummy_inputs(__A ) snake_case__ : List[str] = sd_pipe(**__A ).images snake_case__ : Dict = image[0, -3:, -3:, -1] assert image.shape == (1, 3_2, 3_2, 3) snake_case__ : int = np.array([0.7_5_2_6, 0.3_7_5_0, 0.4_5_4_7, 0.6_1_1_7, 0.5_8_6_6, 0.5_0_1_6, 0.4_3_2_7, 0.5_6_4_2, 0.4_8_1_5] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def _lowercase ( self : Tuple ): snake_case__ : List[Any] = "cpu" # ensure determinism for the device-dependent torch.Generator snake_case__ : Optional[Any] = self.get_dummy_components() snake_case__ : Dict = StableDiffusionInstructPixaPixPipeline(**__A ) snake_case__ : Optional[int] = sd_pipe.to(__A ) sd_pipe.set_progress_bar_config(disable=__A ) snake_case__ : int = self.get_dummy_inputs(__A ) snake_case__ : Optional[int] = "french fries" snake_case__ : List[Any] = sd_pipe(**__A , negative_prompt=__A ) snake_case__ : str = output.images snake_case__ : str = image[0, -3:, -3:, -1] assert image.shape == (1, 3_2, 3_2, 3) snake_case__ : Any = np.array([0.7_5_1_1, 0.3_6_4_2, 0.4_5_5_3, 0.6_2_3_6, 0.5_7_9_7, 0.5_0_1_3, 0.4_3_4_3, 0.5_6_1_1, 0.4_8_3_1] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def _lowercase ( self : int ): snake_case__ : List[Any] = "cpu" # ensure determinism for the device-dependent torch.Generator snake_case__ : Optional[Any] = self.get_dummy_components() snake_case__ : Dict = StableDiffusionInstructPixaPixPipeline(**__A ) snake_case__ : int = sd_pipe.to(__A ) sd_pipe.set_progress_bar_config(disable=__A ) snake_case__ : str = self.get_dummy_inputs(__A ) snake_case__ : Optional[Any] = [inputs["prompt"]] * 2 snake_case__ : List[Any] = np.array(inputs["image"] ).astype(np.floataa ) / 2_5_5.0 snake_case__ : Tuple = torch.from_numpy(__A ).unsqueeze(0 ).to(__A ) snake_case__ : List[str] = image / 2 + 0.5 snake_case__ : List[Any] = image.permute(0 , 3 , 1 , 2 ) snake_case__ : str = image.repeat(2 , 1 , 1 , 1 ) snake_case__ : str = sd_pipe(**__A ).images snake_case__ : List[Any] = image[-1, -3:, -3:, -1] assert image.shape == (2, 3_2, 3_2, 3) snake_case__ : Union[str, Any] = np.array([0.5_8_1_2, 0.5_7_4_8, 0.5_2_2_2, 0.5_9_0_8, 0.5_6_9_5, 0.7_1_7_4, 0.6_8_0_4, 0.5_5_2_3, 0.5_5_7_9] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def _lowercase ( self : List[str] ): snake_case__ : List[str] = "cpu" # ensure determinism for the device-dependent torch.Generator snake_case__ : Any = self.get_dummy_components() snake_case__ : Optional[Any] = EulerAncestralDiscreteScheduler( beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="scaled_linear" ) snake_case__ : Optional[int] = StableDiffusionInstructPixaPixPipeline(**__A ) snake_case__ : Tuple = sd_pipe.to(__A ) sd_pipe.set_progress_bar_config(disable=__A ) snake_case__ : int = self.get_dummy_inputs(__A ) snake_case__ : Dict = sd_pipe(**__A ).images snake_case__ : int = image[0, -3:, -3:, -1] snake_case__ : Tuple = [round(__A , 4 ) for x in image_slice.flatten().tolist()] print(",".join([str(__A ) for x in slice] ) ) assert image.shape == (1, 3_2, 3_2, 3) snake_case__ : Union[str, Any] = np.array([0.7_4_1_7, 0.3_8_4_2, 0.4_7_3_2, 0.5_7_7_6, 0.5_8_9_1, 0.5_1_3_9, 0.4_0_5_2, 0.5_6_7_3, 0.4_9_8_6] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def _lowercase ( self : Any ): super().test_inference_batch_single_identical(expected_max_diff=3e-3 ) def _lowercase ( self : int ): snake_case__ : Union[str, Any] = self.get_dummy_components() snake_case__ : str = StableDiffusionInstructPixaPixPipeline(**__A ) snake_case__ : List[Any] = VaeImageProcessor(do_resize=__A , do_normalize=__A ) snake_case__ : Optional[int] = pipe.to(__A ) pipe.set_progress_bar_config(disable=__A ) snake_case__ : Tuple = pipe(**self.get_dummy_inputs_by_type(__A , input_image_type="pt" ) )[0] snake_case__ : List[str] = components["vae"] snake_case__ : List[str] = self.get_dummy_inputs_by_type(__A , input_image_type="pt" ) for image_param in self.image_latents_params: if image_param in inputs.keys(): snake_case__ : int = vae.encode(inputs[image_param] ).latent_dist.mode() snake_case__ : List[str] = pipe(**__A )[0] snake_case__ : Optional[Any] = np.abs(out - out_latents_inputs ).max() self.assertLess(__A , 1e-4 , "passing latents as image input generate different result from passing image" ) @slow @require_torch_gpu class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): """simple docstring""" def _lowercase ( self : Optional[Any] ): super().tearDown() gc.collect() torch.cuda.empty_cache() def _lowercase ( self : Optional[Any] , __A : Union[str, Any]=0 ): snake_case__ : List[str] = torch.manual_seed(__A ) snake_case__ : int = load_image( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg" ) snake_case__ : Any = { "prompt": "turn him into a cyborg", "image": image, "generator": generator, "num_inference_steps": 3, "guidance_scale": 7.5, "image_guidance_scale": 1.0, "output_type": "numpy", } return inputs def _lowercase ( self : Dict ): snake_case__ : Dict = StableDiffusionInstructPixaPixPipeline.from_pretrained( "timbrooks/instruct-pix2pix" , safety_checker=__A ) pipe.to(__A ) pipe.set_progress_bar_config(disable=__A ) pipe.enable_attention_slicing() snake_case__ : List[str] = self.get_inputs() snake_case__ : List[Any] = pipe(**__A ).images snake_case__ : Optional[int] = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 5_1_2, 5_1_2, 3) snake_case__ : Union[str, Any] = np.array([0.5_9_0_2, 0.6_0_1_5, 0.6_0_2_7, 0.5_9_8_3, 0.6_0_9_2, 0.6_0_6_1, 0.5_7_6_5, 0.5_7_8_5, 0.5_5_5_5] ) assert np.abs(expected_slice - image_slice ).max() < 1e-3 def _lowercase ( self : Any ): snake_case__ : List[str] = StableDiffusionInstructPixaPixPipeline.from_pretrained( "timbrooks/instruct-pix2pix" , safety_checker=__A ) snake_case__ : Union[str, Any] = LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.to(__A ) pipe.set_progress_bar_config(disable=__A ) pipe.enable_attention_slicing() snake_case__ : Optional[Any] = self.get_inputs() snake_case__ : str = pipe(**__A ).images snake_case__ : Any = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 5_1_2, 5_1_2, 3) snake_case__ : Union[str, Any] = np.array([0.6_5_7_8, 0.6_8_1_7, 0.6_9_7_2, 0.6_7_6_1, 0.6_8_5_6, 0.6_9_1_6, 0.6_4_2_8, 0.6_5_1_6, 0.6_3_0_1] ) assert np.abs(expected_slice - image_slice ).max() < 1e-3 def _lowercase ( self : Dict ): snake_case__ : Dict = StableDiffusionInstructPixaPixPipeline.from_pretrained( "timbrooks/instruct-pix2pix" , safety_checker=__A ) snake_case__ : Dict = DDIMScheduler.from_config(pipe.scheduler.config ) pipe.to(__A ) pipe.set_progress_bar_config(disable=__A ) pipe.enable_attention_slicing() snake_case__ : List[str] = self.get_inputs() snake_case__ : List[str] = pipe(**__A ).images snake_case__ : Optional[Any] = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 5_1_2, 5_1_2, 3) snake_case__ : Optional[int] = np.array([0.3_8_2_8, 0.3_8_3_4, 0.3_8_1_8, 0.3_7_9_2, 0.3_8_6_5, 0.3_7_5_2, 0.3_7_9_2, 0.3_8_4_7, 0.3_7_5_3] ) assert np.abs(expected_slice - image_slice ).max() < 1e-3 def _lowercase ( self : Optional[int] ): snake_case__ : Optional[Any] = 0 def callback_fn(__A : int , __A : int , __A : torch.FloatTensor ) -> None: snake_case__ : Any = True nonlocal number_of_steps number_of_steps += 1 if step == 1: snake_case__ : str = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 6_4, 6_4) snake_case__ : List[str] = latents[0, -3:, -3:, -1] snake_case__ : int = np.array([-0.2_4_6_3, -0.4_6_4_4, -0.9_7_5_6, 1.5_1_7_6, 1.4_4_1_4, 0.7_8_6_6, 0.9_8_9_7, 0.8_5_2_1, 0.7_9_8_3] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2 elif step == 2: snake_case__ : Tuple = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 6_4, 6_4) snake_case__ : List[str] = latents[0, -3:, -3:, -1] snake_case__ : List[str] = np.array([-0.2_6_4_4, -0.4_6_2_6, -0.9_6_5_3, 1.5_1_7_6, 1.4_5_5_1, 0.7_6_8_6, 0.9_8_0_5, 0.8_4_5_2, 0.8_1_1_5] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2 snake_case__ : Union[str, Any] = False snake_case__ : int = StableDiffusionInstructPixaPixPipeline.from_pretrained( "timbrooks/instruct-pix2pix" , safety_checker=__A , torch_dtype=torch.floataa ) snake_case__ : List[Any] = pipe.to(__A ) pipe.set_progress_bar_config(disable=__A ) pipe.enable_attention_slicing() snake_case__ : Optional[int] = self.get_inputs() pipe(**__A , callback=__A , callback_steps=1 ) assert callback_fn.has_been_called assert number_of_steps == 3 def _lowercase ( self : Tuple ): torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() snake_case__ : List[Any] = StableDiffusionInstructPixaPixPipeline.from_pretrained( "timbrooks/instruct-pix2pix" , safety_checker=__A , torch_dtype=torch.floataa ) snake_case__ : Any = pipe.to(__A ) pipe.set_progress_bar_config(disable=__A ) pipe.enable_attention_slicing(1 ) pipe.enable_sequential_cpu_offload() snake_case__ : Optional[Any] = self.get_inputs() snake_case__ : Dict = pipe(**__A ) snake_case__ : Dict = torch.cuda.max_memory_allocated() # make sure that less than 2.2 GB is allocated assert mem_bytes < 2.2 * 1_0**9 def _lowercase ( self : int ): snake_case__ : int = self.get_inputs() # resize to resolution that is divisible by 8 but not 16 or 32 snake_case__ : Optional[Any] = inputs["image"].resize((5_0_4, 5_0_4) ) snake_case__ : List[str] = "timbrooks/instruct-pix2pix" snake_case__ : Union[str, Any] = StableDiffusionInstructPixaPixPipeline.from_pretrained( __A , safety_checker=__A , ) pipe.to(__A ) pipe.set_progress_bar_config(disable=__A ) pipe.enable_attention_slicing() snake_case__ : Union[str, Any] = pipe(**__A ) snake_case__ : Union[str, Any] = output.images[0] snake_case__ : str = image[2_5_5:2_5_8, 3_8_3:3_8_6, -1] assert image.shape == (5_0_4, 5_0_4, 3) snake_case__ : Optional[int] = np.array([0.2_7_2_6, 0.2_5_2_9, 0.2_6_6_4, 0.2_6_5_5, 0.2_6_4_1, 0.2_6_4_2, 0.2_5_9_1, 0.2_6_4_9, 0.2_5_9_0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
721
# Lint as: python3 # pylint: enable=line-too-long # pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position __lowerCamelCase : Union[str, Any] = """2.13.1""" import platform import pyarrow from packaging import version if version.parse(platform.python_version()) < version.parse("""3.7"""): raise ImportWarning( """To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition.""" ) if version.parse(pyarrow.__version__).major < 8: raise ImportWarning( """To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n""" """If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`.""" ) del platform del pyarrow del version from .arrow_dataset import Dataset from .arrow_reader import ReadInstruction from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder from .combine import concatenate_datasets, interleave_datasets from .dataset_dict import DatasetDict, IterableDatasetDict from .download import * from .features import * from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled from .info import DatasetInfo, MetricInfo from .inspect import ( get_dataset_config_info, get_dataset_config_names, get_dataset_infos, get_dataset_split_names, inspect_dataset, inspect_metric, list_datasets, list_metrics, ) from .iterable_dataset import IterableDataset from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric from .metric import Metric from .splits import ( NamedSplit, NamedSplitAll, Split, SplitBase, SplitDict, SplitGenerator, SplitInfo, SubSplitInfo, percent, ) from .tasks import * from .utils import * from .utils import logging # deprecated modules from datasets import arrow_dataset as _arrow_dataset # isort:skip from datasets import utils as _utils # isort:skip from datasets.utils import download_manager as _deprecated_download_manager # isort:skip __lowerCamelCase : List[Any] = concatenate_datasets __lowerCamelCase : List[str] = DownloadConfig __lowerCamelCase : Union[str, Any] = DownloadManager __lowerCamelCase : str = DownloadMode __lowerCamelCase : Union[str, Any] = DownloadConfig __lowerCamelCase : List[str] = DownloadMode __lowerCamelCase : Dict = DownloadManager del _arrow_dataset, _utils, _deprecated_download_manager
25
0
from __future__ import annotations import requests __lowerCamelCase : Optional[Any] = set( """approved_at_utc approved_by author_flair_background_color author_flair_css_class author_flair_richtext author_flair_template_id author_fullname author_premium can_mod_post category clicked content_categories created_utc downs edited gilded gildings hidden hide_score is_created_from_ads_ui is_meta is_original_content is_reddit_media_domain is_video link_flair_css_class link_flair_richtext link_flair_text link_flair_text_color media_embed mod_reason_title name permalink pwls quarantine saved score secure_media secure_media_embed selftext subreddit subreddit_name_prefixed subreddit_type thumbnail title top_awarded_type total_awards_received ups upvote_ratio url user_reports""".split() ) def SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : int = 1 , snake_case_ : str = "new" , snake_case_ : list | None = None ): snake_case__ : Optional[int] = wanted_data or [] if invalid_search_terms := ", ".join(sorted(set(snake_case_ ) - valid_terms ) ): snake_case__ : str = F'''Invalid search term: {invalid_search_terms}''' raise ValueError(snake_case_ ) snake_case__ : Optional[int] = requests.get( F'''https://reddit.com/r/{subreddit}/{age}.json?limit={limit}''' , headers={"User-agent": "A random string"} , ) if response.status_code == 429: raise requests.HTTPError snake_case__ : List[Any] = response.json() if not wanted_data: return {id_: data["data"]["children"][id_] for id_ in range(snake_case_ )} snake_case__ : Optional[Any] = {} for id_ in range(snake_case_ ): snake_case__ : Tuple = { item: data["data"]["children"][id_]["data"][item] for item in wanted_data } return data_dict if __name__ == "__main__": # If you get Error 429, that means you are rate limited.Try after some time print(get_subreddit_data("""learnpython""", wanted_data=["""title""", """url""", """selftext"""]))
700
from __future__ import annotations def SCREAMING_SNAKE_CASE ( snake_case_ : int ): snake_case__ : str = [True] * limit snake_case__ : str = False snake_case__ : str = False snake_case__ : str = True for i in range(3 , int(limit**0.5 + 1 ) , 2 ): snake_case__ : Optional[Any] = i * 2 while index < limit: snake_case__ : Union[str, Any] = False snake_case__ : Any = index + i snake_case__ : Optional[Any] = [2] for i in range(3 , snake_case_ , 2 ): if is_prime[i]: primes.append(snake_case_ ) return primes def SCREAMING_SNAKE_CASE ( snake_case_ : int = 1000000 ): snake_case__ : Optional[int] = prime_sieve(snake_case_ ) snake_case__ : List[Any] = 0 snake_case__ : List[str] = 0 for i in range(len(snake_case_ ) ): for j in range(i + length , len(snake_case_ ) ): snake_case__ : Dict = sum(primes[i:j] ) if sol >= ceiling: break if sol in primes: snake_case__ : Tuple = j - i snake_case__ : str = sol return largest if __name__ == "__main__": print(f"{solution() = }")
25
0
# Lint as: python3 import os import re import urllib.parse from pathlib import Path from typing import Callable, List, Optional, Union from zipfile import ZipFile from ..utils.file_utils import cached_path, hf_github_url from ..utils.logging import get_logger from ..utils.version import Version __lowerCamelCase : int = get_logger(__name__) class SCREAMING_SNAKE_CASE__ : """simple docstring""" a_ = "dummy_data" a_ = "datasets" a_ = False def __init__( self : Optional[int] , __A : str , __A : str , __A : Union[Version, str] , __A : Optional[str] = None , __A : bool = False , __A : bool = True , __A : Optional[List[Callable]] = None , ): snake_case__ : Tuple = 0 snake_case__ : Union[str, Any] = dataset_name snake_case__ : List[str] = cache_dir snake_case__ : str = use_local_dummy_data snake_case__ : Any = config # download_callbacks take a single url as input snake_case__ : List[Callable] = download_callbacks or [] # if False, it doesn't load existing files and it returns the paths of the dummy files relative # to the dummy_data zip file root snake_case__ : Union[str, Any] = load_existing_dummy_data # TODO(PVP, QL) might need to make this more general snake_case__ : Union[str, Any] = str(__A ) # to be downloaded snake_case__ : List[str] = None snake_case__ : Union[str, Any] = None @property def _lowercase ( self : str ): if self._dummy_file is None: snake_case__ : Dict = self.download_dummy_data() return self._dummy_file @property def _lowercase ( self : Any ): if self.config is not None: # structure is dummy / config_name / version_name return os.path.join("dummy" , self.config.name , self.version_name ) # structure is dummy / version_name return os.path.join("dummy" , self.version_name ) @property def _lowercase ( self : Optional[Any] ): return os.path.join(self.dummy_data_folder , "dummy_data.zip" ) def _lowercase ( self : List[Any] ): snake_case__ : List[Any] = ( self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data ) snake_case__ : Dict = cached_path( __A , cache_dir=self.cache_dir , extract_compressed_file=__A , force_extract=__A ) return os.path.join(__A , self.dummy_file_name ) @property def _lowercase ( self : str ): return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file ) @property def _lowercase ( self : List[str] ): if self._bucket_url is None: snake_case__ : List[Any] = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , "/" ) ) return self._bucket_url @property def _lowercase ( self : List[str] ): # return full path if its a dir if os.path.isdir(self.dummy_file ): return self.dummy_file # else cut off path to file -> example `xsum`. return "/".join(self.dummy_file.replace(os.sep , "/" ).split("/" )[:-1] ) def _lowercase ( self : Union[str, Any] , __A : Optional[Any] , *__A : Optional[int] ): if self.load_existing_dummy_data: # dummy data is downloaded and tested snake_case__ : List[str] = self.dummy_file else: # dummy data cannot be downloaded and only the path to dummy file is returned snake_case__ : Dict = self.dummy_file_name # special case when data_url is a dict if isinstance(__A , __A ): return self.create_dummy_data_dict(__A , __A ) elif isinstance(__A , (list, tuple) ): return self.create_dummy_data_list(__A , __A ) else: return self.create_dummy_data_single(__A , __A ) def _lowercase ( self : Union[str, Any] , __A : List[str] , *__A : List[Any] ): return self.download_and_extract(__A ) def _lowercase ( self : Any , __A : Union[str, Any] , __A : str ): return self.download_and_extract(__A ) def _lowercase ( self : Union[str, Any] , __A : int , *__A : List[str] , **__A : str ): return path def _lowercase ( self : Optional[Any] ): return {} def _lowercase ( self : Dict , __A : int , __A : Tuple ): snake_case__ : str = {} for key, single_urls in data_url.items(): for download_callback in self.download_callbacks: if isinstance(__A , __A ): for single_url in single_urls: download_callback(__A ) else: snake_case__ : Optional[Any] = single_urls download_callback(__A ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus if isinstance(__A , __A ): snake_case__ : List[str] = [os.path.join(__A , urllib.parse.quote_plus(Path(__A ).name ) ) for x in single_urls] else: snake_case__ : List[str] = single_urls snake_case__ : Dict = os.path.join(__A , urllib.parse.quote_plus(Path(__A ).name ) ) snake_case__ : List[str] = value # make sure that values are unique if all(isinstance(__A , __A ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len( dummy_data_dict.values() ): # append key to value to make its name unique snake_case__ : str = {key: value + key for key, value in dummy_data_dict.items()} return dummy_data_dict def _lowercase ( self : List[Any] , __A : Optional[Any] , __A : Union[str, Any] ): snake_case__ : Tuple = [] # trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one snake_case__ : List[str] = all(bool(re.findall("[0-9]{3,}-of-[0-9]{3,}" , __A ) ) for url in data_url ) snake_case__ : Optional[int] = all( url.startswith("https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed" ) for url in data_url ) if data_url and (is_tf_records or is_pubmed_records): snake_case__ : Optional[Any] = [data_url[0]] * len(__A ) for single_url in data_url: for download_callback in self.download_callbacks: download_callback(__A ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus snake_case__ : List[str] = os.path.join(__A , urllib.parse.quote_plus(single_url.split("/" )[-1] ) ) dummy_data_list.append(__A ) return dummy_data_list def _lowercase ( self : Optional[int] , __A : List[Any] , __A : Any ): for download_callback in self.download_callbacks: download_callback(__A ) # we force the name of each key to be the last file / folder name of the url path # if the url has arguments, we need to encode them with urllib.parse.quote_plus snake_case__ : int = os.path.join(__A , urllib.parse.quote_plus(data_url.split("/" )[-1] ) ) if os.path.exists(__A ) or not self.load_existing_dummy_data: return value else: # Backward compatibility, maybe deprecate at one point. # For many datasets with single url calls to dl_manager.download_and_extract, # the dummy_data.zip file is actually the zipped downloaded file # while now we expected the dummy_data.zip file to be a directory containing # the downloaded file. return path_to_dummy_data def _lowercase ( self : List[str] ): pass def _lowercase ( self : Dict ): pass def _lowercase ( self : Any , __A : Any ): def _iter_archive_members(__A : Tuple ): # this preserves the order of the members inside the ZIP archive snake_case__ : List[str] = Path(self.dummy_file ).parent snake_case__ : Optional[Any] = path.relative_to(__A ) with ZipFile(self.local_path_to_dummy_data ) as zip_file: snake_case__ : List[Any] = zip_file.namelist() for member in members: if member.startswith(relative_path.as_posix() ): yield dummy_parent_path.joinpath(__A ) snake_case__ : int = Path(__A ) snake_case__ : Any = _iter_archive_members(__A ) if self.use_local_dummy_data else path.rglob("*" ) for file_path in file_paths: if file_path.is_file() and not file_path.name.startswith((".", "__") ): yield file_path.relative_to(__A ).as_posix(), file_path.open("rb" ) def _lowercase ( self : int , __A : Any ): if not isinstance(__A , __A ): snake_case__ : Union[str, Any] = [paths] for path in paths: if os.path.isfile(__A ): if os.path.basename(__A ).startswith((".", "__") ): return yield path else: for dirpath, dirnames, filenames in os.walk(__A ): if os.path.basename(__A ).startswith((".", "__") ): continue dirnames.sort() for filename in sorted(__A ): if filename.startswith((".", "__") ): continue yield os.path.join(__A , __A )
701
import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DeformableDetrImageProcessor class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): """simple docstring""" def __init__( self : int , __A : List[str] , __A : Union[str, Any]=7 , __A : Any=3 , __A : Optional[Any]=3_0 , __A : List[str]=4_0_0 , __A : str=True , __A : Optional[Any]=None , __A : Optional[int]=True , __A : int=[0.5, 0.5, 0.5] , __A : Dict=[0.5, 0.5, 0.5] , __A : Optional[int]=True , __A : int=1 / 2_5_5 , __A : List[str]=True , ): # by setting size["longest_edge"] > max_resolution we're effectively not testing this :p snake_case__ : List[str] = size if size is not None else {"shortest_edge": 1_8, "longest_edge": 1_3_3_3} snake_case__ : Optional[Any] = parent snake_case__ : str = batch_size snake_case__ : Union[str, Any] = num_channels snake_case__ : Optional[Any] = min_resolution snake_case__ : List[str] = max_resolution snake_case__ : Tuple = do_resize snake_case__ : str = size snake_case__ : str = do_normalize snake_case__ : Optional[Any] = image_mean snake_case__ : List[str] = image_std snake_case__ : List[str] = do_rescale snake_case__ : Tuple = rescale_factor snake_case__ : Tuple = do_pad def _lowercase ( self : str ): return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def _lowercase ( self : Optional[Any] , __A : List[Any] , __A : List[Any]=False ): if not batched: snake_case__ : List[Any] = image_inputs[0] if isinstance(__A , Image.Image ): snake_case__, snake_case__ : str = image.size else: snake_case__, snake_case__ : Dict = image.shape[1], image.shape[2] if w < h: snake_case__ : Any = int(self.size["shortest_edge"] * h / w ) snake_case__ : Any = self.size["shortest_edge"] elif w > h: snake_case__ : Optional[int] = self.size["shortest_edge"] snake_case__ : Any = int(self.size["shortest_edge"] * w / h ) else: snake_case__ : Tuple = self.size["shortest_edge"] snake_case__ : int = self.size["shortest_edge"] else: snake_case__ : Any = [] for image in image_inputs: snake_case__, snake_case__ : Optional[Any] = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) snake_case__ : List[Any] = max(__A , key=lambda __A : item[0] )[0] snake_case__ : int = max(__A , key=lambda __A : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ , unittest.TestCase ): """simple docstring""" a_ = DeformableDetrImageProcessor if is_vision_available() else None def _lowercase ( self : str ): snake_case__ : Optional[Any] = DeformableDetrImageProcessingTester(self ) @property def _lowercase ( self : List[Any] ): return self.image_processor_tester.prepare_image_processor_dict() def _lowercase ( self : Tuple ): snake_case__ : Any = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__A , "image_mean" ) ) self.assertTrue(hasattr(__A , "image_std" ) ) self.assertTrue(hasattr(__A , "do_normalize" ) ) self.assertTrue(hasattr(__A , "do_resize" ) ) self.assertTrue(hasattr(__A , "do_rescale" ) ) self.assertTrue(hasattr(__A , "do_pad" ) ) self.assertTrue(hasattr(__A , "size" ) ) def _lowercase ( self : Any ): snake_case__ : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"shortest_edge": 1_8, "longest_edge": 1_3_3_3} ) self.assertEqual(image_processor.do_pad , __A ) snake_case__ : Tuple = self.image_processing_class.from_dict( self.image_processor_dict , size=4_2 , max_size=8_4 , pad_and_return_pixel_mask=__A ) self.assertEqual(image_processor.size , {"shortest_edge": 4_2, "longest_edge": 8_4} ) self.assertEqual(image_processor.do_pad , __A ) def _lowercase ( self : str ): pass def _lowercase ( self : List[str] ): # Initialize image_processing snake_case__ : Any = self.image_processing_class(**self.image_processor_dict ) # create random PIL images snake_case__ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A ) for image in image_inputs: self.assertIsInstance(__A , Image.Image ) # Test not batched input snake_case__ : Tuple = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values snake_case__, snake_case__ : List[str] = self.image_processor_tester.get_expected_values(__A ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched snake_case__, snake_case__ : List[Any] = self.image_processor_tester.get_expected_values(__A , batched=__A ) snake_case__ : int = image_processing(__A , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def _lowercase ( self : int ): # Initialize image_processing snake_case__ : Any = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors snake_case__ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , numpify=__A ) for image in image_inputs: self.assertIsInstance(__A , np.ndarray ) # Test not batched input snake_case__ : Optional[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values snake_case__, snake_case__ : int = self.image_processor_tester.get_expected_values(__A ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched snake_case__ : Tuple = image_processing(__A , return_tensors="pt" ).pixel_values snake_case__, snake_case__ : int = self.image_processor_tester.get_expected_values(__A , batched=__A ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def _lowercase ( self : Union[str, Any] ): # Initialize image_processing snake_case__ : Optional[int] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors snake_case__ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , torchify=__A ) for image in image_inputs: self.assertIsInstance(__A , torch.Tensor ) # Test not batched input snake_case__ : str = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values snake_case__, snake_case__ : Union[str, Any] = self.image_processor_tester.get_expected_values(__A ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched snake_case__ : Tuple = image_processing(__A , return_tensors="pt" ).pixel_values snake_case__, snake_case__ : Tuple = self.image_processor_tester.get_expected_values(__A , batched=__A ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) @slow def _lowercase ( self : Optional[Any] ): # prepare image and target snake_case__ : Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f: snake_case__ : Tuple = json.loads(f.read() ) snake_case__ : Union[str, Any] = {"image_id": 3_9_7_6_9, "annotations": target} # encode them snake_case__ : str = DeformableDetrImageProcessor() snake_case__ : Tuple = image_processing(images=__A , annotations=__A , return_tensors="pt" ) # verify pixel values snake_case__ : Optional[Any] = torch.Size([1, 3, 8_0_0, 1_0_6_6] ) self.assertEqual(encoding["pixel_values"].shape , __A ) snake_case__ : Union[str, Any] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] ) self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __A , atol=1e-4 ) ) # verify area snake_case__ : str = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] ) self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __A ) ) # verify boxes snake_case__ : Union[str, Any] = torch.Size([6, 4] ) self.assertEqual(encoding["labels"][0]["boxes"].shape , __A ) snake_case__ : List[Any] = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] ) self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __A , atol=1e-3 ) ) # verify image_id snake_case__ : Any = torch.tensor([3_9_7_6_9] ) self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __A ) ) # verify is_crowd snake_case__ : Tuple = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __A ) ) # verify class_labels snake_case__ : int = torch.tensor([7_5, 7_5, 6_3, 6_5, 1_7, 1_7] ) self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __A ) ) # verify orig_size snake_case__ : List[str] = torch.tensor([4_8_0, 6_4_0] ) self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __A ) ) # verify size snake_case__ : Tuple = torch.tensor([8_0_0, 1_0_6_6] ) self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __A ) ) @slow def _lowercase ( self : Optional[int] ): # prepare image, target and masks_path snake_case__ : Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f: snake_case__ : Any = json.loads(f.read() ) snake_case__ : Dict = {"file_name": "000000039769.png", "image_id": 3_9_7_6_9, "segments_info": target} snake_case__ : int = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" ) # encode them snake_case__ : List[str] = DeformableDetrImageProcessor(format="coco_panoptic" ) snake_case__ : List[Any] = image_processing(images=__A , annotations=__A , masks_path=__A , return_tensors="pt" ) # verify pixel values snake_case__ : List[Any] = torch.Size([1, 3, 8_0_0, 1_0_6_6] ) self.assertEqual(encoding["pixel_values"].shape , __A ) snake_case__ : Optional[int] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] ) self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __A , atol=1e-4 ) ) # verify area snake_case__ : Tuple = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] ) self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __A ) ) # verify boxes snake_case__ : Any = torch.Size([6, 4] ) self.assertEqual(encoding["labels"][0]["boxes"].shape , __A ) snake_case__ : Any = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] ) self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __A , atol=1e-3 ) ) # verify image_id snake_case__ : List[str] = torch.tensor([3_9_7_6_9] ) self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __A ) ) # verify is_crowd snake_case__ : Any = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __A ) ) # verify class_labels snake_case__ : List[str] = torch.tensor([1_7, 1_7, 6_3, 7_5, 7_5, 9_3] ) self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __A ) ) # verify masks snake_case__ : Union[str, Any] = 8_2_2_8_7_3 self.assertEqual(encoding["labels"][0]["masks"].sum().item() , __A ) # verify orig_size snake_case__ : int = torch.tensor([4_8_0, 6_4_0] ) self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __A ) ) # verify size snake_case__ : Union[str, Any] = torch.tensor([8_0_0, 1_0_6_6] ) self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __A ) )
25
0
from collections import Counter from timeit import timeit def SCREAMING_SNAKE_CASE ( snake_case_ : str = "" , ): return sum(c % 2 for c in Counter(input_str.replace(" " , "" ).lower() ).values() ) < 2 def SCREAMING_SNAKE_CASE ( snake_case_ : str = "" ): if len(snake_case_ ) == 0: return True snake_case__ : Tuple = input_str.replace(" " , "" ).lower() # character_freq_dict: Stores the frequency of every character in the input string snake_case__ : dict[str, int] = {} for character in lower_case_input_str: snake_case__ : Tuple = character_freq_dict.get(snake_case_ , 0 ) + 1 snake_case__ : str = 0 for character_count in character_freq_dict.values(): if character_count % 2: odd_char += 1 if odd_char > 1: return False return True def SCREAMING_SNAKE_CASE ( snake_case_ : str = "" ): print("\nFor string = " , snake_case_ , ":" ) print( "> can_string_be_rearranged_as_palindrome_counter()" , "\tans =" , can_string_be_rearranged_as_palindrome_counter(snake_case_ ) , "\ttime =" , timeit( "z.can_string_be_rearranged_as_palindrome_counter(z.check_str)" , setup="import __main__ as z" , ) , "seconds" , ) print( "> can_string_be_rearranged_as_palindrome()" , "\tans =" , can_string_be_rearranged_as_palindrome(snake_case_ ) , "\ttime =" , timeit( "z.can_string_be_rearranged_as_palindrome(z.check_str)" , setup="import __main__ as z" , ) , "seconds" , ) if __name__ == "__main__": __lowerCamelCase : Union[str, Any] = input( """Enter string to determine if it can be rearranged as a palindrome or not: """ ).strip() benchmark(check_str) __lowerCamelCase : Any = can_string_be_rearranged_as_palindrome_counter(check_str) print(f"{check_str} can {'' if status else 'not '}be rearranged as a palindrome")
702
import json import os from functools import lru_cache from typing import Dict, List, Optional, Tuple, Union import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...tokenization_utils_base import BatchEncoding, EncodedInput from ...utils import PaddingStrategy, logging __lowerCamelCase : List[str] = logging.get_logger(__name__) __lowerCamelCase : Optional[Any] = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""} # See all LED models at https://huggingface.co/models?filter=LED __lowerCamelCase : Tuple = { """vocab_file""": { """allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json""", }, """merges_file""": { """allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt""", }, """tokenizer_file""": { """allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json""", }, } __lowerCamelCase : Dict = { """allenai/led-base-16384""": 1_6384, } @lru_cache() # Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode def SCREAMING_SNAKE_CASE ( ): snake_case__ : Optional[int] = ( list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) ) ) snake_case__ : Optional[int] = bs[:] snake_case__ : Any = 0 for b in range(2**8 ): if b not in bs: bs.append(snake_case_ ) cs.append(2**8 + n ) n += 1 snake_case__ : Dict = [chr(snake_case_ ) for n in cs] return dict(zip(snake_case_ , snake_case_ ) ) def SCREAMING_SNAKE_CASE ( snake_case_ : List[Any] ): snake_case__ : Dict = set() snake_case__ : Tuple = word[0] for char in word[1:]: pairs.add((prev_char, char) ) snake_case__ : List[Any] = char return pairs class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ): """simple docstring""" a_ = VOCAB_FILES_NAMES a_ = PRETRAINED_VOCAB_FILES_MAP a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a_ = ["input_ids", "attention_mask"] def __init__( self : List[str] , __A : Any , __A : List[str] , __A : Optional[Any]="replace" , __A : Optional[int]="<s>" , __A : Union[str, Any]="</s>" , __A : Tuple="</s>" , __A : List[Any]="<s>" , __A : Dict="<unk>" , __A : Any="<pad>" , __A : Optional[int]="<mask>" , __A : List[str]=False , **__A : Union[str, Any] , ): snake_case__ : List[Any] = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else bos_token snake_case__ : List[str] = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else eos_token snake_case__ : Any = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else sep_token snake_case__ : List[Any] = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else cls_token snake_case__ : Tuple = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else unk_token snake_case__ : List[Any] = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else pad_token # Mask token behave like a normal word, i.e. include the space before it snake_case__ : List[str] = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else mask_token super().__init__( errors=__A , bos_token=__A , eos_token=__A , unk_token=__A , sep_token=__A , cls_token=__A , pad_token=__A , mask_token=__A , add_prefix_space=__A , **__A , ) with open(__A , encoding="utf-8" ) as vocab_handle: snake_case__ : Any = json.load(__A ) snake_case__ : Optional[Any] = {v: k for k, v in self.encoder.items()} snake_case__ : Union[str, Any] = errors # how to handle errors in decoding snake_case__ : Any = bytes_to_unicode() snake_case__ : Optional[Any] = {v: k for k, v in self.byte_encoder.items()} with open(__A , encoding="utf-8" ) as merges_handle: snake_case__ : str = merges_handle.read().split("\n" )[1:-1] snake_case__ : int = [tuple(merge.split() ) for merge in bpe_merges] snake_case__ : str = dict(zip(__A , range(len(__A ) ) ) ) snake_case__ : Optional[int] = {} snake_case__ : Any = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions snake_case__ : Union[str, Any] = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" ) @property # Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size def _lowercase ( self : List[Any] ): return len(self.encoder ) def _lowercase ( self : Any ): return dict(self.encoder , **self.added_tokens_encoder ) def _lowercase ( self : Optional[Any] , __A : Optional[int] ): if token in self.cache: return self.cache[token] snake_case__ : Union[str, Any] = tuple(__A ) snake_case__ : List[Any] = get_pairs(__A ) if not pairs: return token while True: snake_case__ : Tuple = min(__A , key=lambda __A : self.bpe_ranks.get(__A , float("inf" ) ) ) if bigram not in self.bpe_ranks: break snake_case__, snake_case__ : Dict = bigram snake_case__ : str = [] snake_case__ : Union[str, Any] = 0 while i < len(__A ): try: snake_case__ : Dict = word.index(__A , __A ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) snake_case__ : str = j if word[i] == first and i < len(__A ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 snake_case__ : str = tuple(__A ) snake_case__ : int = new_word if len(__A ) == 1: break else: snake_case__ : List[str] = get_pairs(__A ) snake_case__ : List[Any] = " ".join(__A ) snake_case__ : Optional[int] = word return word def _lowercase ( self : Optional[Any] , __A : Optional[Any] ): snake_case__ : List[str] = [] for token in re.findall(self.pat , __A ): snake_case__ : Dict = "".join( self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__A ).split(" " ) ) return bpe_tokens def _lowercase ( self : Union[str, Any] , __A : Optional[int] ): return self.encoder.get(__A , self.encoder.get(self.unk_token ) ) def _lowercase ( self : Optional[int] , __A : Optional[Any] ): return self.decoder.get(__A ) def _lowercase ( self : Union[str, Any] , __A : Dict ): snake_case__ : Optional[Any] = "".join(__A ) snake_case__ : int = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors ) return text def _lowercase ( self : Optional[int] , __A : str , __A : Optional[str] = None ): if not os.path.isdir(__A ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return snake_case__ : List[Any] = os.path.join( __A , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) snake_case__ : str = os.path.join( __A , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] ) with open(__A , "w" , encoding="utf-8" ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=__A , ensure_ascii=__A ) + "\n" ) snake_case__ : str = 0 with open(__A , "w" , encoding="utf-8" ) as writer: writer.write("#version: 0.2\n" ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __A : kv[1] ): if index != token_index: logger.warning( f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.''' " Please check that the tokenizer is not corrupted!" ) snake_case__ : int = token_index writer.write(" ".join(__A ) + "\n" ) index += 1 return vocab_file, merge_file def _lowercase ( self : int , __A : List[int] , __A : Optional[List[int]] = None ): if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] snake_case__ : Tuple = [self.cls_token_id] snake_case__ : List[Any] = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def _lowercase ( self : Optional[Any] , __A : List[int] , __A : Optional[List[int]] = None , __A : bool = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__A , token_ids_a=__A , already_has_special_tokens=__A ) if token_ids_a is None: return [1] + ([0] * len(__A )) + [1] return [1] + ([0] * len(__A )) + [1, 1] + ([0] * len(__A )) + [1] def _lowercase ( self : List[Any] , __A : List[int] , __A : Optional[List[int]] = None ): snake_case__ : Any = [self.sep_token_id] snake_case__ : List[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def _lowercase ( self : Optional[Any] , __A : int , __A : int=False , **__A : Dict ): snake_case__ : Optional[int] = kwargs.pop("add_prefix_space" , self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(__A ) > 0 and not text[0].isspace()): snake_case__ : Optional[int] = " " + text return (text, kwargs) def _lowercase ( self : Any , __A : Union[Dict[str, EncodedInput], BatchEncoding] , __A : Optional[int] = None , __A : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , __A : Optional[int] = None , __A : Optional[bool] = None , ): snake_case__ : Optional[Any] = super()._pad( encoded_inputs=__A , max_length=__A , padding_strategy=__A , pad_to_multiple_of=__A , return_attention_mask=__A , ) # Load from model defaults if return_attention_mask is None: snake_case__ : Union[str, Any] = "attention_mask" in self.model_input_names if return_attention_mask and "global_attention_mask" in encoded_inputs: snake_case__ : Union[str, Any] = encoded_inputs[self.model_input_names[0]] # `global_attention_mask` need to have the same length as other (sequential) inputs. snake_case__ : Tuple = len(encoded_inputs["global_attention_mask"] ) != len(__A ) if needs_to_be_padded: snake_case__ : int = len(__A ) - len(encoded_inputs["global_attention_mask"] ) if self.padding_side == "right": # Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend` snake_case__ : int = ( encoded_inputs["global_attention_mask"] + [-1] * difference ) elif self.padding_side == "left": snake_case__ : Tuple = [-1] * difference + encoded_inputs[ "global_attention_mask" ] else: raise ValueError("Invalid padding strategy:" + str(self.padding_side ) ) return encoded_inputs
25
0
import unittest from transformers import GPTNeoXJapaneseConfig, is_torch_available from transformers.models.gpt_neox_japanese.tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseModel class SCREAMING_SNAKE_CASE__ : """simple docstring""" def __init__( self : Optional[Any] , __A : List[str] , __A : str=1_3 , __A : Any=7 , __A : List[Any]=True , __A : List[str]=True , __A : Optional[Any]=True , __A : List[str]=True , __A : List[Any]=9_9 , __A : Optional[Any]=3_2 , __A : Optional[int]=5 , __A : Union[str, Any]=4 , __A : int=4 , __A : Optional[int]="gelu" , __A : int=0.0 , __A : int=0.1 , __A : Any=True , __A : Any=5_1_2 , __A : List[str]=1_6 , __A : List[str]=2 , __A : str=0.0_2 , __A : int=3 , __A : List[Any]=4 , __A : str=None , ): snake_case__ : Optional[Any] = parent snake_case__ : str = batch_size snake_case__ : Union[str, Any] = seq_length snake_case__ : List[str] = is_training snake_case__ : Optional[Any] = use_input_mask snake_case__ : Optional[int] = use_token_type_ids snake_case__ : Any = use_labels snake_case__ : Dict = vocab_size snake_case__ : Optional[Any] = hidden_size snake_case__ : Optional[Any] = num_hidden_layers snake_case__ : Union[str, Any] = num_attention_heads snake_case__ : Union[str, Any] = intermediate_multiple_size snake_case__ : int = hidden_act snake_case__ : List[str] = hidden_dropout snake_case__ : str = attention_dropout snake_case__ : Dict = weight_tying snake_case__ : str = max_position_embeddings snake_case__ : Tuple = type_vocab_size snake_case__ : Union[str, Any] = type_sequence_label_size snake_case__ : Dict = initializer_range snake_case__ : str = num_labels snake_case__ : Dict = num_choices snake_case__ : int = scope def _lowercase ( self : Optional[Any] ): snake_case__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) snake_case__ : Dict = None if self.use_input_mask: snake_case__ : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] ) snake_case__ : List[str] = None if self.use_labels: snake_case__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) snake_case__ : Any = self.get_config() return config, input_ids, input_mask, token_labels def _lowercase ( self : Optional[int] ): return GPTNeoXJapaneseConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_multiple_size=self.intermediate_multiple_size , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , weight_tying=self.weight_tying , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__A , initializer_range=self.initializer_range , ) def _lowercase ( self : int ): snake_case__ : Optional[Any] = self.prepare_config_and_inputs() snake_case__ : Dict = True return config, input_ids, input_mask, token_labels def _lowercase ( self : Tuple , __A : int , __A : Dict , __A : List[Any] ): snake_case__ : Tuple = GPTNeoXJapaneseModel(config=__A ) model.to(__A ) model.eval() snake_case__ : Dict = model(__A , attention_mask=__A ) snake_case__ : Dict = model(__A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _lowercase ( self : List[Any] , __A : int , __A : List[str] , __A : Optional[Any] ): snake_case__ : Optional[int] = True snake_case__ : Optional[Any] = GPTNeoXJapaneseModel(__A ) model.to(__A ) model.eval() snake_case__ : Dict = model(__A , attention_mask=__A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _lowercase ( self : Optional[Any] , __A : List[str] , __A : List[Any] , __A : Union[str, Any] , __A : int ): snake_case__ : Any = GPTNeoXJapaneseForCausalLM(config=__A ) model.to(__A ) model.eval() snake_case__ : Union[str, Any] = model(__A , attention_mask=__A , labels=__A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _lowercase ( self : List[str] , __A : List[Any] , __A : str , __A : List[Any] ): snake_case__ : Union[str, Any] = True snake_case__ : Tuple = GPTNeoXJapaneseForCausalLM(config=__A ) model.to(__A ) model.eval() # first forward pass snake_case__ : List[str] = model(__A , attention_mask=__A , use_cache=__A ) snake_case__ : Optional[int] = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids snake_case__ : Any = ids_tensor((self.batch_size, 3) , config.vocab_size ) snake_case__ : Optional[int] = ids_tensor((self.batch_size, 3) , vocab_size=2 ) # append to next input_ids and snake_case__ : int = torch.cat([input_ids, next_tokens] , dim=-1 ) snake_case__ : List[Any] = torch.cat([input_mask, next_mask] , dim=-1 ) snake_case__ : Any = model(__A , attention_mask=__A , output_hidden_states=__A ) snake_case__ : Optional[int] = output_from_no_past["hidden_states"][0] snake_case__ : Dict = model( __A , attention_mask=__A , past_key_values=__A , output_hidden_states=__A , )["hidden_states"][0] # select random slice snake_case__ : Dict = ids_tensor((1,) , output_from_past.shape[-1] ).item() snake_case__ : Union[str, Any] = output_from_no_past[:, -3:, random_slice_idx].detach() snake_case__ : Any = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(__A , __A , atol=1e-3 ) ) def _lowercase ( self : Dict ): snake_case__ : Dict = self.prepare_config_and_inputs() snake_case__ : Any = config_and_inputs snake_case__ : Tuple = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ): """simple docstring""" a_ = (GPTNeoXJapaneseModel, GPTNeoXJapaneseForCausalLM) if is_torch_available() else () a_ = (GPTNeoXJapaneseForCausalLM,) if is_torch_available() else () a_ = ( {"feature-extraction": GPTNeoXJapaneseModel, "text-generation": GPTNeoXJapaneseForCausalLM} if is_torch_available() else {} ) a_ = False a_ = False a_ = False a_ = False def _lowercase ( self : int ): snake_case__ : Dict = GPTNeoXJapaneseModelTester(self ) snake_case__ : Optional[int] = ConfigTester(self , config_class=__A , hidden_size=3_7 ) def _lowercase ( self : Optional[int] ): self.config_tester.run_common_tests() def _lowercase ( self : int ): snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(__A , __A , __A ) def _lowercase ( self : List[Any] ): snake_case__ : Any = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(__A , __A , __A ) def _lowercase ( self : Tuple ): # This regression test was failing with PyTorch < 1.3 snake_case__ : str = self.model_tester.prepare_config_and_inputs_for_decoder() snake_case__ : List[Any] = None self.model_tester.create_and_check_model_as_decoder(__A , __A , __A ) def _lowercase ( self : Any ): snake_case__ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past_large_inputs(__A , __A , __A ) def _lowercase ( self : Optional[Any] ): snake_case__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_causal_lm(*__A ) @slow def _lowercase ( self : Union[str, Any] ): snake_case__ : Optional[Any] = "abeja/gpt-neox-japanese-2.7b" snake_case__ : Any = ["データサイエンティストとは、", "100年後に必要とされる会社は、", "フルリモートの環境で働くために必要なことは、", "国境の長いトンネルを抜けると", "美味しい日本食といえば、"] snake_case__ : Optional[int] = [ "データサイエンティストとは、データを分析し、ビジネスに役立つ知見を導き出す専門家のことです。", "100年後に必要とされる会社は、「人」が中心の会社です。", "フルリモートの環境で働くために必要なことは、「自分の時間をコントロールする」ことです。", "国境の長いトンネルを抜けると、そこは雪国だった。", "美味しい日本食といえば、やっぱりお寿司ですよね。", ] snake_case__ : Dict = GPTNeoXJapaneseTokenizer.from_pretrained(__A ) snake_case__ : Dict = GPTNeoXJapaneseForCausalLM.from_pretrained(__A ) snake_case__ : Dict = [] for prompt in prompts: snake_case__ : Tuple = tokenizer(__A , return_tensors="pt" ).input_ids snake_case__ : List[str] = model.generate(__A , max_length=5_0 ) snake_case__ : Optional[int] = tokenizer.batch_decode(__A , skip_special_tokens=__A ) predicted_outputs += generated_string self.assertListEqual(__A , __A )
703
# tests directory-specific settings - this file is run automatically # by pytest before any tests are run import sys import warnings from os.path import abspath, dirname, join # allow having multiple repository checkouts and not needing to remember to rerun # 'pip install -e .[dev]' when switching between checkouts and running tests. __lowerCamelCase : Dict = abspath(join(dirname(dirname(__file__)), """src""")) sys.path.insert(1, git_repo_path) # silence FutureWarning warnings in tests since often we can't act on them until # they become normal warnings - i.e. the tests still need to test the current functionality warnings.simplefilter(action="""ignore""", category=FutureWarning) def SCREAMING_SNAKE_CASE ( snake_case_ : str ): from diffusers.utils.testing_utils import pytest_addoption_shared pytest_addoption_shared(snake_case_ ) def SCREAMING_SNAKE_CASE ( snake_case_ : Any ): from diffusers.utils.testing_utils import pytest_terminal_summary_main snake_case__ : Optional[int] = terminalreporter.config.getoption("--make-reports" ) if make_reports: pytest_terminal_summary_main(snake_case_ , id=snake_case_ )
25
0
import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from timm.data import resolve_data_config from timm.data.transforms_factory import create_transform from transformers import ( BitConfig, ViTHybridConfig, ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel, ) from transformers.image_utils import PILImageResampling from transformers.utils import logging logging.set_verbosity_info() __lowerCamelCase : Optional[Any] = logging.get_logger(__name__) def SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any] , snake_case_ : List[Any]=False ): snake_case__ : Dict = [] # fmt: off # stem: rename_keys.append(("cls_token", "vit.embeddings.cls_token") ) rename_keys.append(("pos_embed", "vit.embeddings.position_embeddings") ) rename_keys.append(("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight") ) rename_keys.append(("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias") ) # backbone rename_keys.append(("patch_embed.backbone.stem.conv.weight", "vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight") ) rename_keys.append(("patch_embed.backbone.stem.norm.weight", "vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight") ) rename_keys.append(("patch_embed.backbone.stem.norm.bias", "vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias") ) for stage_idx in range(len(config.backbone_config.depths ) ): for layer_idx in range(config.backbone_config.depths[stage_idx] ): rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight''') ) rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias''') ) # transformer encoder for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((F'''blocks.{i}.norm1.weight''', F'''vit.encoder.layer.{i}.layernorm_before.weight''') ) rename_keys.append((F'''blocks.{i}.norm1.bias''', F'''vit.encoder.layer.{i}.layernorm_before.bias''') ) rename_keys.append((F'''blocks.{i}.attn.proj.weight''', F'''vit.encoder.layer.{i}.attention.output.dense.weight''') ) rename_keys.append((F'''blocks.{i}.attn.proj.bias''', F'''vit.encoder.layer.{i}.attention.output.dense.bias''') ) rename_keys.append((F'''blocks.{i}.norm2.weight''', F'''vit.encoder.layer.{i}.layernorm_after.weight''') ) rename_keys.append((F'''blocks.{i}.norm2.bias''', F'''vit.encoder.layer.{i}.layernorm_after.bias''') ) rename_keys.append((F'''blocks.{i}.mlp.fc1.weight''', F'''vit.encoder.layer.{i}.intermediate.dense.weight''') ) rename_keys.append((F'''blocks.{i}.mlp.fc1.bias''', F'''vit.encoder.layer.{i}.intermediate.dense.bias''') ) rename_keys.append((F'''blocks.{i}.mlp.fc2.weight''', F'''vit.encoder.layer.{i}.output.dense.weight''') ) rename_keys.append((F'''blocks.{i}.mlp.fc2.bias''', F'''vit.encoder.layer.{i}.output.dense.bias''') ) if base_model: # layernorm + pooler rename_keys.extend( [ ("norm.weight", "layernorm.weight"), ("norm.bias", "layernorm.bias"), ("pre_logits.fc.weight", "pooler.dense.weight"), ("pre_logits.fc.bias", "pooler.dense.bias"), ] ) # if just the base model, we should remove "vit" from all keys that start with "vit" snake_case__ : List[str] = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys] else: # layernorm + classification head rename_keys.extend( [ ("norm.weight", "vit.layernorm.weight"), ("norm.bias", "vit.layernorm.bias"), ("head.weight", "classifier.weight"), ("head.bias", "classifier.bias"), ] ) # fmt: on return rename_keys def SCREAMING_SNAKE_CASE ( snake_case_ : List[Any] , snake_case_ : Optional[Any] , snake_case_ : Optional[Any]=False ): for i in range(config.num_hidden_layers ): if base_model: snake_case__ : Tuple = "" else: snake_case__ : Optional[int] = "vit." # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) snake_case__ : List[Any] = state_dict.pop(F'''blocks.{i}.attn.qkv.weight''' ) snake_case__ : List[Any] = state_dict.pop(F'''blocks.{i}.attn.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict snake_case__ : Any = in_proj_weight[ : config.hidden_size, : ] snake_case__ : int = in_proj_bias[: config.hidden_size] snake_case__ : Optional[Any] = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] snake_case__ : Tuple = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] snake_case__ : int = in_proj_weight[ -config.hidden_size :, : ] snake_case__ : List[Any] = in_proj_bias[-config.hidden_size :] def SCREAMING_SNAKE_CASE ( snake_case_ : str ): snake_case__ : Dict = ["head.weight", "head.bias"] for k in ignore_keys: state_dict.pop(snake_case_ , snake_case_ ) def SCREAMING_SNAKE_CASE ( snake_case_ : Any , snake_case_ : int , snake_case_ : str ): snake_case__ : str = dct.pop(snake_case_ ) snake_case__ : int = val def SCREAMING_SNAKE_CASE ( ): snake_case__ : str = "http://images.cocodataset.org/val2017/000000039769.jpg" snake_case__ : str = Image.open(requests.get(snake_case_ , stream=snake_case_ ).raw ) return im @torch.no_grad() def SCREAMING_SNAKE_CASE ( snake_case_ : List[str] , snake_case_ : List[str] , snake_case_ : str=False ): snake_case__ : Union[str, Any] = BitConfig( global_padding="same" , layer_type="bottleneck" , depths=(3, 4, 9) , out_features=["stage3"] , embedding_dynamic_padding=snake_case_ , ) snake_case__ : Optional[int] = ViTHybridConfig(backbone_config=snake_case_ , image_size=384 , num_labels=1000 ) snake_case__ : Optional[int] = False # load original model from timm snake_case__ : Dict = timm.create_model(snake_case_ , pretrained=snake_case_ ) timm_model.eval() # load state_dict of original model, remove and rename some keys snake_case__ : str = timm_model.state_dict() if base_model: remove_classification_head_(snake_case_ ) snake_case__ : Any = create_rename_keys(snake_case_ , snake_case_ ) for src, dest in rename_keys: rename_key(snake_case_ , snake_case_ , snake_case_ ) read_in_q_k_v(snake_case_ , snake_case_ , snake_case_ ) snake_case__ : Dict = "huggingface/label-files" snake_case__ : List[str] = "imagenet-1k-id2label.json" snake_case__ : Union[str, Any] = json.load(open(hf_hub_download(snake_case_ , snake_case_ , repo_type="dataset" ) , "r" ) ) snake_case__ : Any = {int(snake_case_ ): v for k, v in idalabel.items()} snake_case__ : Optional[Any] = idalabel snake_case__ : List[Any] = {v: k for k, v in idalabel.items()} # load HuggingFace model if vit_name[-5:] == "in21k": snake_case__ : str = ViTHybridModel(snake_case_ ).eval() else: snake_case__ : Any = ViTHybridForImageClassification(snake_case_ ).eval() model.load_state_dict(snake_case_ ) # create image processor snake_case__ : int = create_transform(**resolve_data_config({} , model=snake_case_ ) ) snake_case__ : List[str] = transform.transforms snake_case__ : Optional[int] = { "bilinear": PILImageResampling.BILINEAR, "bicubic": PILImageResampling.BICUBIC, "nearest": PILImageResampling.NEAREST, } snake_case__ : str = ViTHybridImageProcessor( do_resize=snake_case_ , size={"shortest_edge": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=snake_case_ , crop_size={"height": timm_transforms[1].size[0], "width": timm_transforms[1].size[1]} , do_normalize=snake_case_ , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , ) snake_case__ : Dict = prepare_img() snake_case__ : Optional[Any] = transform(snake_case_ ).unsqueeze(0 ) snake_case__ : Dict = processor(snake_case_ , return_tensors="pt" ).pixel_values # verify pixel values assert torch.allclose(snake_case_ , snake_case_ ) # verify logits with torch.no_grad(): snake_case__ : int = model(snake_case_ ) snake_case__ : Union[str, Any] = outputs.logits print("Predicted class:" , logits.argmax(-1 ).item() ) if base_model: snake_case__ : Optional[Any] = timm_model.forward_features(snake_case_ ) assert timm_pooled_output.shape == outputs.pooler_output.shape assert torch.allclose(snake_case_ , outputs.pooler_output , atol=1E-3 ) else: snake_case__ : Optional[Any] = timm_model(snake_case_ ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(snake_case_ , outputs.logits , atol=1E-3 ) print("Looks ok!" ) if pytorch_dump_folder_path is not None: Path(snake_case_ ).mkdir(exist_ok=snake_case_ ) print(F'''Saving model {vit_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(snake_case_ ) print(F'''Saving processor to {pytorch_dump_folder_path}''' ) processor.save_pretrained(snake_case_ ) if push_to_hub: print(F'''Pushing model and processor to the hub {vit_name}''' ) model.push_to_hub(F'''ybelkada/{vit_name}''' ) processor.push_to_hub(F'''ybelkada/{vit_name}''' ) if __name__ == "__main__": __lowerCamelCase : int = argparse.ArgumentParser() # Required parameters parser.add_argument( """--vit_name""", default="""vit_base_r50_s16_384""", type=str, help="""Name of the hybrid ViT timm model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether to upload the model to the HuggingFace hub.""" ) __lowerCamelCase : Union[str, Any] = parser.parse_args() convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
704
def SCREAMING_SNAKE_CASE ( snake_case_ : str ): snake_case__ : Any = [0] * len(snake_case_ ) for i in range(1 , len(snake_case_ ) ): # use last results for better performance - dynamic programming snake_case__ : Union[str, Any] = prefix_result[i - 1] while j > 0 and input_string[i] != input_string[j]: snake_case__ : str = prefix_result[j - 1] if input_string[i] == input_string[j]: j += 1 snake_case__ : int = j return prefix_result def SCREAMING_SNAKE_CASE ( snake_case_ : str ): return max(prefix_function(snake_case_ ) ) if __name__ == "__main__": import doctest doctest.testmod()
25
0
import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ): """simple docstring""" a_ = StableDiffusionInpaintPipeline a_ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS a_ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS a_ = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess a_ = frozenset([] ) def _lowercase ( self : str ): torch.manual_seed(0 ) snake_case__ : Optional[Any] = UNetaDConditionModel( block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=9 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=3_2 , attention_head_dim=(2, 4) , use_linear_projection=__A , ) snake_case__ : int = PNDMScheduler(skip_prk_steps=__A ) torch.manual_seed(0 ) snake_case__ : Union[str, Any] = AutoencoderKL( block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=1_2_8 , ) torch.manual_seed(0 ) snake_case__ : List[Any] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act="gelu" , projection_dim=5_1_2 , ) snake_case__ : Tuple = CLIPTextModel(__A ) snake_case__ : Optional[int] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) snake_case__ : Dict = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, } return components def _lowercase ( self : Tuple , __A : Optional[int] , __A : str=0 ): # TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched snake_case__ : str = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(__A ) ).to(__A ) snake_case__ : str = image.cpu().permute(0 , 2 , 3 , 1 )[0] snake_case__ : int = Image.fromarray(np.uinta(__A ) ).convert("RGB" ).resize((6_4, 6_4) ) snake_case__ : Optional[int] = Image.fromarray(np.uinta(image + 4 ) ).convert("RGB" ).resize((6_4, 6_4) ) if str(__A ).startswith("mps" ): snake_case__ : str = torch.manual_seed(__A ) else: snake_case__ : Any = torch.Generator(device=__A ).manual_seed(__A ) snake_case__ : List[Any] = { "prompt": "A painting of a squirrel eating a burger", "image": init_image, "mask_image": mask_image, "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "numpy", } return inputs def _lowercase ( self : List[Any] ): snake_case__ : str = "cpu" # ensure determinism for the device-dependent torch.Generator snake_case__ : Tuple = self.get_dummy_components() snake_case__ : str = StableDiffusionInpaintPipeline(**__A ) snake_case__ : int = sd_pipe.to(__A ) sd_pipe.set_progress_bar_config(disable=__A ) snake_case__ : str = self.get_dummy_inputs(__A ) snake_case__ : int = sd_pipe(**__A ).images snake_case__ : List[Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 6_4, 6_4, 3) snake_case__ : Dict = np.array([0.4_7_2_7, 0.5_7_3_5, 0.3_9_4_1, 0.5_4_4_6, 0.5_9_2_6, 0.4_3_9_4, 0.5_0_6_2, 0.4_6_5_4, 0.4_4_7_6] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def _lowercase ( self : int ): super().test_inference_batch_single_identical(expected_max_diff=3e-3 ) @slow @require_torch_gpu class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): """simple docstring""" def _lowercase ( self : Union[str, Any] ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def _lowercase ( self : List[Any] ): snake_case__ : Optional[Any] = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/sd2-inpaint/init_image.png" ) snake_case__ : Tuple = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" ) snake_case__ : List[Any] = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint" "/yellow_cat_sitting_on_a_park_bench.npy" ) snake_case__ : str = "stabilityai/stable-diffusion-2-inpainting" snake_case__ : Union[str, Any] = StableDiffusionInpaintPipeline.from_pretrained(__A , safety_checker=__A ) pipe.to(__A ) pipe.set_progress_bar_config(disable=__A ) pipe.enable_attention_slicing() snake_case__ : Union[str, Any] = "Face of a yellow cat, high resolution, sitting on a park bench" snake_case__ : Tuple = torch.manual_seed(0 ) snake_case__ : int = pipe( prompt=__A , image=__A , mask_image=__A , generator=__A , output_type="np" , ) snake_case__ : Dict = output.images[0] assert image.shape == (5_1_2, 5_1_2, 3) assert np.abs(expected_image - image ).max() < 9e-3 def _lowercase ( self : Dict ): snake_case__ : Any = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/sd2-inpaint/init_image.png" ) snake_case__ : List[str] = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" ) snake_case__ : str = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint" "/yellow_cat_sitting_on_a_park_bench_fp16.npy" ) snake_case__ : int = "stabilityai/stable-diffusion-2-inpainting" snake_case__ : List[Any] = StableDiffusionInpaintPipeline.from_pretrained( __A , torch_dtype=torch.floataa , safety_checker=__A , ) pipe.to(__A ) pipe.set_progress_bar_config(disable=__A ) pipe.enable_attention_slicing() snake_case__ : List[str] = "Face of a yellow cat, high resolution, sitting on a park bench" snake_case__ : Optional[Any] = torch.manual_seed(0 ) snake_case__ : Union[str, Any] = pipe( prompt=__A , image=__A , mask_image=__A , generator=__A , output_type="np" , ) snake_case__ : Any = output.images[0] assert image.shape == (5_1_2, 5_1_2, 3) assert np.abs(expected_image - image ).max() < 5e-1 def _lowercase ( self : List[Any] ): torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() snake_case__ : Optional[int] = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/sd2-inpaint/init_image.png" ) snake_case__ : Any = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" ) snake_case__ : Union[str, Any] = "stabilityai/stable-diffusion-2-inpainting" snake_case__ : List[str] = PNDMScheduler.from_pretrained(__A , subfolder="scheduler" ) snake_case__ : Tuple = StableDiffusionInpaintPipeline.from_pretrained( __A , safety_checker=__A , scheduler=__A , torch_dtype=torch.floataa , ) pipe.to(__A ) pipe.set_progress_bar_config(disable=__A ) pipe.enable_attention_slicing(1 ) pipe.enable_sequential_cpu_offload() snake_case__ : Union[str, Any] = "Face of a yellow cat, high resolution, sitting on a park bench" snake_case__ : Tuple = torch.manual_seed(0 ) snake_case__ : List[Any] = pipe( prompt=__A , image=__A , mask_image=__A , generator=__A , num_inference_steps=2 , output_type="np" , ) snake_case__ : Tuple = torch.cuda.max_memory_allocated() # make sure that less than 2.65 GB is allocated assert mem_bytes < 2.6_5 * 1_0**9
705
# Lint as: python3 import sys from collections.abc import Mapping from typing import TYPE_CHECKING, Dict, Optional import numpy as np import pyarrow as pa from .. import config from ..utils.logging import get_logger from ..utils.py_utils import map_nested from .formatting import TensorFormatter if TYPE_CHECKING: import jax import jaxlib __lowerCamelCase : Optional[int] = get_logger() __lowerCamelCase : Optional[dict] = None class SCREAMING_SNAKE_CASE__ ( TensorFormatter[Mapping, "jax.Array", Mapping] ): """simple docstring""" def __init__( self : Optional[Any] , __A : Dict=None , __A : List[str]=None , **__A : str ): super().__init__(features=__A ) import jax from jaxlib.xla_client import Device if isinstance(__A , __A ): raise ValueError( f'''Expected {device} to be a `str` not {type(__A )}, as `jaxlib.xla_extension.Device` ''' "is not serializable neither with `pickle` nor with `dill`. Instead you can surround " "the device with `str()` to get its string identifier that will be internally mapped " "to the actual `jaxlib.xla_extension.Device`." ) snake_case__ : List[Any] = device if isinstance(__A , __A ) else str(jax.devices()[0] ) # using global variable since `jaxlib.xla_extension.Device` is not serializable neither # with `pickle` nor with `dill`, so we need to use a global variable instead global DEVICE_MAPPING if DEVICE_MAPPING is None: snake_case__ : Any = self._map_devices_to_str() if self.device not in list(DEVICE_MAPPING.keys() ): logger.warning( f'''Device with string identifier {self.device} not listed among the available ''' f'''devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default ''' f'''device: {str(jax.devices()[0] )}.''' ) snake_case__ : str = str(jax.devices()[0] ) snake_case__ : str = jnp_array_kwargs @staticmethod def _lowercase ( ): import jax return {str(__A ): device for device in jax.devices()} def _lowercase ( self : Optional[Any] , __A : str ): import jax import jax.numpy as jnp if isinstance(__A , __A ) and column: if all( isinstance(__A , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ): return jnp.stack(__A , axis=0 ) return column def _lowercase ( self : int , __A : Tuple ): import jax import jax.numpy as jnp if isinstance(__A , (str, bytes, type(__A )) ): return value elif isinstance(__A , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ): return value.tolist() snake_case__ : Optional[int] = {} if isinstance(__A , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ): # the default int precision depends on the jax config # see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision if jax.config.jax_enable_xaa: snake_case__ : Any = {"dtype": jnp.intaa} else: snake_case__ : Tuple = {"dtype": jnp.intaa} elif isinstance(__A , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ): snake_case__ : str = {"dtype": jnp.floataa} elif config.PIL_AVAILABLE and "PIL" in sys.modules: import PIL.Image if isinstance(__A , PIL.Image.Image ): snake_case__ : Optional[Any] = np.asarray(__A ) # using global variable since `jaxlib.xla_extension.Device` is not serializable neither # with `pickle` nor with `dill`, so we need to use a global variable instead global DEVICE_MAPPING if DEVICE_MAPPING is None: snake_case__ : int = self._map_devices_to_str() with jax.default_device(DEVICE_MAPPING[self.device] ): # calling jnp.array on a np.ndarray does copy the data # see https://github.com/google/jax/issues/4486 return jnp.array(__A , **{**default_dtype, **self.jnp_array_kwargs} ) def _lowercase ( self : Union[str, Any] , __A : Optional[int] ): import jax # support for torch, tf, jax etc. if config.TORCH_AVAILABLE and "torch" in sys.modules: import torch if isinstance(__A , torch.Tensor ): return self._tensorize(data_struct.detach().cpu().numpy()[()] ) if hasattr(__A , "__array__" ) and not isinstance(__A , jax.Array ): snake_case__ : Union[str, Any] = data_struct.__array__() # support for nested types like struct of list of struct if isinstance(__A , np.ndarray ): if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects return self._consolidate([self.recursive_tensorize(__A ) for substruct in data_struct] ) elif isinstance(__A , (list, tuple) ): return self._consolidate([self.recursive_tensorize(__A ) for substruct in data_struct] ) return self._tensorize(__A ) def _lowercase ( self : Tuple , __A : dict ): return map_nested(self._recursive_tensorize , __A , map_list=__A ) def _lowercase ( self : Optional[int] , __A : pa.Table ): snake_case__ : int = self.numpy_arrow_extractor().extract_row(__A ) snake_case__ : Tuple = self.python_features_decoder.decode_row(__A ) return self.recursive_tensorize(__A ) def _lowercase ( self : Optional[Any] , __A : pa.Table ): snake_case__ : Any = self.numpy_arrow_extractor().extract_column(__A ) snake_case__ : Optional[int] = self.python_features_decoder.decode_column(__A , pa_table.column_names[0] ) snake_case__ : List[Any] = self.recursive_tensorize(__A ) snake_case__ : Dict = self._consolidate(__A ) return column def _lowercase ( self : str , __A : pa.Table ): snake_case__ : Any = self.numpy_arrow_extractor().extract_batch(__A ) snake_case__ : int = self.python_features_decoder.decode_batch(__A ) snake_case__ : List[Any] = self.recursive_tensorize(__A ) for column_name in batch: snake_case__ : Any = self._consolidate(batch[column_name] ) return batch
25
0
import logging import math import os from dataclasses import dataclass, field from glob import glob from typing import Optional from torch.utils.data import ConcatDataset import transformers from transformers import ( CONFIG_MAPPING, MODEL_WITH_LM_HEAD_MAPPING, AutoConfig, AutoModelWithLMHead, AutoTokenizer, DataCollatorForLanguageModeling, DataCollatorForPermutationLanguageModeling, DataCollatorForWholeWordMask, HfArgumentParser, LineByLineTextDataset, LineByLineWithRefDataset, PreTrainedTokenizer, TextDataset, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import is_main_process __lowerCamelCase : Optional[Any] = logging.getLogger(__name__) __lowerCamelCase : Any = list(MODEL_WITH_LM_HEAD_MAPPING.keys()) __lowerCamelCase : List[Any] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class SCREAMING_SNAKE_CASE__ : """simple docstring""" a_ = field( default=UpperCamelCase_ , metadata={ "help": ( "The model checkpoint for weights initialization. Leave None if you want to train a model from" " scratch." ) } , ) a_ = field( default=UpperCamelCase_ , metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(UpperCamelCase_ )} , ) a_ = field( default=UpperCamelCase_ , metadata={"help": "Pretrained config name or path if not the same as model_name"} ) a_ = field( default=UpperCamelCase_ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} ) a_ = field( default=UpperCamelCase_ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , ) @dataclass class SCREAMING_SNAKE_CASE__ : """simple docstring""" a_ = field( default=UpperCamelCase_ , metadata={"help": "The input training data file (a text file)."} ) a_ = field( default=UpperCamelCase_ , metadata={ "help": ( "The input training data files (multiple files in glob format). " "Very often splitting large files to smaller files can prevent tokenizer going out of memory" ) } , ) a_ = field( default=UpperCamelCase_ , metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."} , ) a_ = field( default=UpperCamelCase_ , metadata={"help": "An optional input train ref data file for whole word mask in Chinese."} , ) a_ = field( default=UpperCamelCase_ , metadata={"help": "An optional input eval ref data file for whole word mask in Chinese."} , ) a_ = field( default=UpperCamelCase_ , metadata={"help": "Whether distinct lines of text in the dataset are to be handled as distinct sequences."} , ) a_ = field( default=UpperCamelCase_ , metadata={"help": "Train with masked-language modeling loss instead of language modeling."} ) a_ = field(default=UpperCamelCase_ , metadata={"help": "Whether ot not to use whole word mask."} ) a_ = field( default=0.15 , metadata={"help": "Ratio of tokens to mask for masked language modeling loss"} ) a_ = field( default=1 / 6 , metadata={ "help": ( "Ratio of length of a span of masked tokens to surrounding context length for permutation language" " modeling." ) } , ) a_ = field( default=5 , metadata={"help": "Maximum length of a span of masked tokens for permutation language modeling."} ) a_ = field( default=-1 , metadata={ "help": ( "Optional input sequence length after tokenization." "The training dataset will be truncated in block of this size for training." "Default to the model max input length for single sentence inputs (take into account special tokens)." ) } , ) a_ = field( default=UpperCamelCase_ , metadata={"help": "Overwrite the cached training and evaluation sets"} ) def SCREAMING_SNAKE_CASE ( snake_case_ : DataTrainingArguments , snake_case_ : PreTrainedTokenizer , snake_case_ : bool = False , snake_case_ : Optional[str] = None , ): def _dataset(snake_case_ : str , snake_case_ : str=None ): if args.line_by_line: if ref_path is not None: if not args.whole_word_mask or not args.mlm: raise ValueError("You need to set world whole masking and mlm to True for Chinese Whole Word Mask" ) return LineByLineWithRefDataset( tokenizer=snake_case_ , file_path=snake_case_ , block_size=args.block_size , ref_path=snake_case_ , ) return LineByLineTextDataset(tokenizer=snake_case_ , file_path=snake_case_ , block_size=args.block_size ) else: return TextDataset( tokenizer=snake_case_ , file_path=snake_case_ , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=snake_case_ , ) if evaluate: return _dataset(args.eval_data_file , args.eval_ref_file ) elif args.train_data_files: return ConcatDataset([_dataset(snake_case_ ) for f in glob(args.train_data_files )] ) else: return _dataset(args.train_data_file , args.train_ref_file ) def SCREAMING_SNAKE_CASE ( ): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. snake_case__ : int = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) snake_case__ : List[Any] = parser.parse_args_into_dataclasses() if data_args.eval_data_file is None and training_args.do_eval: raise ValueError( "Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file " "or remove the --do_eval argument." ) if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( F'''Output directory ({training_args.output_dir}) already exists and is not empty. Use''' " --overwrite_output_dir to overcome." ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( "Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info("Training/evaluation parameters %s" , snake_case_ ) # Set seed set_seed(training_args.seed ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. if model_args.config_name: snake_case__ : Union[str, Any] = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir ) elif model_args.model_name_or_path: snake_case__ : Union[str, Any] = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir ) else: snake_case__ : List[str] = CONFIG_MAPPING[model_args.model_type]() logger.warning("You are instantiating a new config instance from scratch." ) if model_args.tokenizer_name: snake_case__ : Union[str, Any] = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir ) elif model_args.model_name_or_path: snake_case__ : Optional[Any] = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir ) else: raise ValueError( "You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another" " script, save it,and load it from here, using --tokenizer_name" ) if model_args.model_name_or_path: snake_case__ : Optional[Any] = AutoModelWithLMHead.from_pretrained( model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=snake_case_ , cache_dir=model_args.cache_dir , ) else: logger.info("Training new model from scratch" ) snake_case__ : Optional[int] = AutoModelWithLMHead.from_config(snake_case_ ) model.resize_token_embeddings(len(snake_case_ ) ) if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm: raise ValueError( "BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the" "--mlm flag (masked language modeling)." ) if data_args.block_size <= 0: snake_case__ : Dict = tokenizer.max_len # Our input block size will be the max possible for the model else: snake_case__ : Any = min(data_args.block_size , tokenizer.max_len ) # Get datasets snake_case__ : Optional[int] = ( get_dataset(snake_case_ , tokenizer=snake_case_ , cache_dir=model_args.cache_dir ) if training_args.do_train else None ) snake_case__ : str = ( get_dataset(snake_case_ , tokenizer=snake_case_ , evaluate=snake_case_ , cache_dir=model_args.cache_dir ) if training_args.do_eval else None ) if config.model_type == "xlnet": snake_case__ : int = DataCollatorForPermutationLanguageModeling( tokenizer=snake_case_ , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , ) else: if data_args.mlm and data_args.whole_word_mask: snake_case__ : int = DataCollatorForWholeWordMask( tokenizer=snake_case_ , mlm_probability=data_args.mlm_probability ) else: snake_case__ : List[str] = DataCollatorForLanguageModeling( tokenizer=snake_case_ , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability ) # Initialize our Trainer snake_case__ : Optional[Any] = Trainer( model=snake_case_ , args=snake_case_ , data_collator=snake_case_ , train_dataset=snake_case_ , eval_dataset=snake_case_ , prediction_loss_only=snake_case_ , ) # Training if training_args.do_train: snake_case__ : Any = ( model_args.model_name_or_path if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ) else None ) trainer.train(model_path=snake_case_ ) trainer.save_model() # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) if trainer.is_world_master(): tokenizer.save_pretrained(training_args.output_dir ) # Evaluation snake_case__ : Dict = {} if training_args.do_eval: logger.info("*** Evaluate ***" ) snake_case__ : Union[str, Any] = trainer.evaluate() snake_case__ : str = math.exp(eval_output["eval_loss"] ) snake_case__ : List[str] = {"perplexity": perplexity} snake_case__ : int = os.path.join(training_args.output_dir , "eval_results_lm.txt" ) if trainer.is_world_master(): with open(snake_case_ , "w" ) as writer: logger.info("***** Eval results *****" ) for key in sorted(result.keys() ): logger.info(" %s = %s" , snake_case_ , str(result[key] ) ) writer.write("%s = %s\n" % (key, str(result[key] )) ) results.update(snake_case_ ) return results def SCREAMING_SNAKE_CASE ( snake_case_ : Any ): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
706
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) __lowerCamelCase : Tuple = { """configuration_roberta_prelayernorm""": [ """ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RobertaPreLayerNormConfig""", """RobertaPreLayerNormOnnxConfig""", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : Tuple = [ """ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST""", """RobertaPreLayerNormForCausalLM""", """RobertaPreLayerNormForMaskedLM""", """RobertaPreLayerNormForMultipleChoice""", """RobertaPreLayerNormForQuestionAnswering""", """RobertaPreLayerNormForSequenceClassification""", """RobertaPreLayerNormForTokenClassification""", """RobertaPreLayerNormModel""", """RobertaPreLayerNormPreTrainedModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : Union[str, Any] = [ """TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFRobertaPreLayerNormForCausalLM""", """TFRobertaPreLayerNormForMaskedLM""", """TFRobertaPreLayerNormForMultipleChoice""", """TFRobertaPreLayerNormForQuestionAnswering""", """TFRobertaPreLayerNormForSequenceClassification""", """TFRobertaPreLayerNormForTokenClassification""", """TFRobertaPreLayerNormMainLayer""", """TFRobertaPreLayerNormModel""", """TFRobertaPreLayerNormPreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : List[Any] = [ """FlaxRobertaPreLayerNormForCausalLM""", """FlaxRobertaPreLayerNormForMaskedLM""", """FlaxRobertaPreLayerNormForMultipleChoice""", """FlaxRobertaPreLayerNormForQuestionAnswering""", """FlaxRobertaPreLayerNormForSequenceClassification""", """FlaxRobertaPreLayerNormForTokenClassification""", """FlaxRobertaPreLayerNormModel""", """FlaxRobertaPreLayerNormPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_roberta_prelayernorm import ( ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaPreLayerNormConfig, RobertaPreLayerNormOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roberta_prelayernorm import ( ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST, RobertaPreLayerNormForCausalLM, RobertaPreLayerNormForMaskedLM, RobertaPreLayerNormForMultipleChoice, RobertaPreLayerNormForQuestionAnswering, RobertaPreLayerNormForSequenceClassification, RobertaPreLayerNormForTokenClassification, RobertaPreLayerNormModel, RobertaPreLayerNormPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_roberta_prelayernorm import ( TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST, TFRobertaPreLayerNormForCausalLM, TFRobertaPreLayerNormForMaskedLM, TFRobertaPreLayerNormForMultipleChoice, TFRobertaPreLayerNormForQuestionAnswering, TFRobertaPreLayerNormForSequenceClassification, TFRobertaPreLayerNormForTokenClassification, TFRobertaPreLayerNormMainLayer, TFRobertaPreLayerNormModel, TFRobertaPreLayerNormPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_roberta_prelayernorm import ( FlaxRobertaPreLayerNormForCausalLM, FlaxRobertaPreLayerNormForMaskedLM, FlaxRobertaPreLayerNormForMultipleChoice, FlaxRobertaPreLayerNormForQuestionAnswering, FlaxRobertaPreLayerNormForSequenceClassification, FlaxRobertaPreLayerNormForTokenClassification, FlaxRobertaPreLayerNormModel, FlaxRobertaPreLayerNormPreTrainedModel, ) else: import sys __lowerCamelCase : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
25
0
from unittest.mock import patch import pyspark from datasets.packaged_modules.spark.spark import ( Spark, SparkExamplesIterable, _generate_iterable_examples, ) from ..utils import ( require_dill_gt_0_3_2, require_not_windows, ) def SCREAMING_SNAKE_CASE ( snake_case_ : List[Any] , snake_case_ : Union[str, Any] ): snake_case__ : Optional[int] = [] for part_id in partition_order: snake_case__ : List[Any] = df.where(F'''SPARK_PARTITION_ID() = {part_id}''' ).collect() for row_idx, row in enumerate(snake_case_ ): expected_row_ids_and_row_dicts.append((F'''{part_id}_{row_idx}''', row.asDict()) ) return expected_row_ids_and_row_dicts @require_not_windows @require_dill_gt_0_3_2 def SCREAMING_SNAKE_CASE ( ): snake_case__ : Tuple = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate() snake_case__ : Union[str, Any] = spark.range(100 ).repartition(1 ) snake_case__ : Any = Spark(snake_case_ ) # The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means # that each partition can hold 2 rows. spark_builder._repartition_df_if_needed(max_shard_size=16 ) # Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions. assert spark_builder.df.rdd.getNumPartitions() == 50 @require_not_windows @require_dill_gt_0_3_2 def SCREAMING_SNAKE_CASE ( ): snake_case__ : Dict = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate() snake_case__ : Optional[Any] = spark.range(10 ).repartition(2 ) snake_case__ : Optional[Any] = [1, 0] snake_case__ : Dict = _generate_iterable_examples(snake_case_ , snake_case_ ) # Reverse the partitions. snake_case__ : Tuple = _get_expected_row_ids_and_row_dicts_for_partition_order(snake_case_ , snake_case_ ) for i, (row_id, row_dict) in enumerate(generate_fn() ): snake_case__ : Tuple = expected_row_ids_and_row_dicts[i] assert row_id == expected_row_id assert row_dict == expected_row_dict @require_not_windows @require_dill_gt_0_3_2 def SCREAMING_SNAKE_CASE ( ): snake_case__ : Optional[int] = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate() snake_case__ : Optional[int] = spark.range(10 ).repartition(1 ) snake_case__ : Union[str, Any] = SparkExamplesIterable(snake_case_ ) assert it.n_shards == 1 for i, (row_id, row_dict) in enumerate(snake_case_ ): assert row_id == F'''0_{i}''' assert row_dict == {"id": i} @require_not_windows @require_dill_gt_0_3_2 def SCREAMING_SNAKE_CASE ( ): snake_case__ : Optional[int] = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate() snake_case__ : str = spark.range(30 ).repartition(3 ) # Mock the generator so that shuffle reverses the partition indices. with patch("numpy.random.Generator" ) as generator_mock: snake_case__ : Union[str, Any] = lambda snake_case_ : x.reverse() snake_case__ : Optional[int] = _get_expected_row_ids_and_row_dicts_for_partition_order(snake_case_ , [2, 1, 0] ) snake_case__ : List[Any] = SparkExamplesIterable(snake_case_ ).shuffle_data_sources(snake_case_ ) assert shuffled_it.n_shards == 3 for i, (row_id, row_dict) in enumerate(snake_case_ ): snake_case__ : Optional[Any] = expected_row_ids_and_row_dicts[i] assert row_id == expected_row_id assert row_dict == expected_row_dict @require_not_windows @require_dill_gt_0_3_2 def SCREAMING_SNAKE_CASE ( ): snake_case__ : Any = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate() snake_case__ : Tuple = spark.range(20 ).repartition(4 ) # Partitions 0 and 2 snake_case__ : List[Any] = SparkExamplesIterable(snake_case_ ).shard_data_sources(worker_id=0 , num_workers=2 ) assert shard_it_a.n_shards == 2 snake_case__ : List[str] = _get_expected_row_ids_and_row_dicts_for_partition_order(snake_case_ , [0, 2] ) for i, (row_id, row_dict) in enumerate(snake_case_ ): snake_case__ : Optional[int] = expected_row_ids_and_row_dicts_a[i] assert row_id == expected_row_id assert row_dict == expected_row_dict # Partitions 1 and 3 snake_case__ : Any = SparkExamplesIterable(snake_case_ ).shard_data_sources(worker_id=1 , num_workers=2 ) assert shard_it_a.n_shards == 2 snake_case__ : List[Any] = _get_expected_row_ids_and_row_dicts_for_partition_order(snake_case_ , [1, 3] ) for i, (row_id, row_dict) in enumerate(snake_case_ ): snake_case__ : Optional[Any] = expected_row_ids_and_row_dicts_a[i] assert row_id == expected_row_id assert row_dict == expected_row_dict @require_not_windows @require_dill_gt_0_3_2 def SCREAMING_SNAKE_CASE ( ): snake_case__ : Dict = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate() snake_case__ : Tuple = spark.range(100 ).repartition(1 ) snake_case__ : Union[str, Any] = Spark(snake_case_ ) # Choose a small max_shard_size for maximum partitioning. spark_builder._repartition_df_if_needed(max_shard_size=1 ) # The new number of partitions should not be greater than the number of rows. assert spark_builder.df.rdd.getNumPartitions() == 100
707
import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from transformers.activations import gelu_new, gelu_python, get_activation @require_torch class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): """simple docstring""" def _lowercase ( self : Tuple ): snake_case__ : List[str] = torch.tensor([-1_0_0, -1, -0.1, 0, 0.1, 1.0, 1_0_0] ) snake_case__ : Tuple = get_activation("gelu" ) self.assertTrue(torch.allclose(gelu_python(__A ) , torch_builtin(__A ) ) ) self.assertFalse(torch.allclose(gelu_python(__A ) , gelu_new(__A ) ) ) def _lowercase ( self : Dict ): snake_case__ : str = torch.tensor([-1_0_0, -1, -0.1, 0, 0.1, 1.0, 1_0_0] ) snake_case__ : Union[str, Any] = get_activation("gelu" ) snake_case__ : int = get_activation("gelu_10" ) snake_case__ : Optional[int] = torch_builtin(__A ) snake_case__ : Dict = geluaa(__A ) snake_case__ : Optional[Any] = torch.where(y_gelu_aa < 1_0.0 , 1 , 0 ) self.assertTrue(torch.max(__A ).item() == 1_0.0 ) self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) ) def _lowercase ( self : str ): get_activation("gelu" ) get_activation("gelu_10" ) get_activation("gelu_fast" ) get_activation("gelu_new" ) get_activation("gelu_python" ) get_activation("gelu_pytorch_tanh" ) get_activation("linear" ) get_activation("mish" ) get_activation("quick_gelu" ) get_activation("relu" ) get_activation("sigmoid" ) get_activation("silu" ) get_activation("swish" ) get_activation("tanh" ) with self.assertRaises(__A ): get_activation("bogus" ) with self.assertRaises(__A ): get_activation(__A ) def _lowercase ( self : List[str] ): snake_case__ : List[str] = get_activation("gelu" ) snake_case__ : Any = 1 snake_case__ : Union[str, Any] = get_activation("gelu" ) self.assertEqual(acta.a , 1 ) with self.assertRaises(__A ): snake_case__ : int = acta.a
25
0
import unittest from transformers import AlbertConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, AlbertForMaskedLM, AlbertForMultipleChoice, AlbertForPreTraining, AlbertForQuestionAnswering, AlbertForSequenceClassification, AlbertForTokenClassification, AlbertModel, ) from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST class SCREAMING_SNAKE_CASE__ : """simple docstring""" def __init__( self : Optional[Any] , __A : List[Any] , __A : Optional[int]=1_3 , __A : Optional[int]=7 , __A : Optional[Any]=True , __A : int=True , __A : Optional[Any]=True , __A : Dict=True , __A : Tuple=9_9 , __A : Optional[Any]=1_6 , __A : Dict=3_6 , __A : Optional[Any]=6 , __A : List[Any]=6 , __A : Tuple=6 , __A : Any=3_7 , __A : Optional[Any]="gelu" , __A : Tuple=0.1 , __A : int=0.1 , __A : List[Any]=5_1_2 , __A : Tuple=1_6 , __A : List[Any]=2 , __A : Any=0.0_2 , __A : int=3 , __A : Any=4 , __A : int=None , ): snake_case__ : Dict = parent snake_case__ : Optional[Any] = batch_size snake_case__ : List[str] = seq_length snake_case__ : List[str] = is_training snake_case__ : Optional[int] = use_input_mask snake_case__ : List[Any] = use_token_type_ids snake_case__ : List[Any] = use_labels snake_case__ : Optional[Any] = vocab_size snake_case__ : Optional[int] = embedding_size snake_case__ : Optional[Any] = hidden_size snake_case__ : Optional[int] = num_hidden_layers snake_case__ : List[Any] = num_hidden_groups snake_case__ : Any = num_attention_heads snake_case__ : int = intermediate_size snake_case__ : str = hidden_act snake_case__ : Dict = hidden_dropout_prob snake_case__ : List[Any] = attention_probs_dropout_prob snake_case__ : Dict = max_position_embeddings snake_case__ : Tuple = type_vocab_size snake_case__ : int = type_sequence_label_size snake_case__ : int = initializer_range snake_case__ : List[str] = num_labels snake_case__ : Optional[int] = num_choices snake_case__ : Tuple = scope def _lowercase ( self : List[Any] ): snake_case__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) snake_case__ : Dict = None if self.use_input_mask: snake_case__ : List[Any] = random_attention_mask([self.batch_size, self.seq_length] ) snake_case__ : int = None if self.use_token_type_ids: snake_case__ : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) snake_case__ : Union[str, Any] = None snake_case__ : List[Any] = None snake_case__ : str = None if self.use_labels: snake_case__ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) snake_case__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) snake_case__ : List[str] = ids_tensor([self.batch_size] , self.num_choices ) snake_case__ : int = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def _lowercase ( self : Optional[Any] ): return AlbertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , ) def _lowercase ( self : Dict , __A : Dict , __A : List[Any] , __A : Tuple , __A : Tuple , __A : Union[str, Any] , __A : Dict , __A : str ): snake_case__ : List[str] = AlbertModel(config=__A ) model.to(__A ) model.eval() snake_case__ : Dict = model(__A , attention_mask=__A , token_type_ids=__A ) snake_case__ : List[str] = model(__A , token_type_ids=__A ) snake_case__ : Tuple = model(__A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def _lowercase ( self : Any , __A : Dict , __A : Optional[Any] , __A : Optional[Any] , __A : Dict , __A : Union[str, Any] , __A : Optional[int] , __A : Dict ): snake_case__ : List[Any] = AlbertForPreTraining(config=__A ) model.to(__A ) model.eval() snake_case__ : str = model( __A , attention_mask=__A , token_type_ids=__A , labels=__A , sentence_order_label=__A , ) self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) ) def _lowercase ( self : List[Any] , __A : int , __A : str , __A : Optional[Any] , __A : str , __A : List[str] , __A : int , __A : str ): snake_case__ : Any = AlbertForMaskedLM(config=__A ) model.to(__A ) model.eval() snake_case__ : Union[str, Any] = model(__A , attention_mask=__A , token_type_ids=__A , labels=__A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _lowercase ( self : List[str] , __A : int , __A : int , __A : Optional[int] , __A : Optional[int] , __A : Dict , __A : str , __A : int ): snake_case__ : List[str] = AlbertForQuestionAnswering(config=__A ) model.to(__A ) model.eval() snake_case__ : List[Any] = model( __A , attention_mask=__A , token_type_ids=__A , start_positions=__A , end_positions=__A , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def _lowercase ( self : Optional[Any] , __A : Tuple , __A : Any , __A : Tuple , __A : Optional[int] , __A : Dict , __A : int , __A : Any ): snake_case__ : Any = self.num_labels snake_case__ : str = AlbertForSequenceClassification(__A ) model.to(__A ) model.eval() snake_case__ : Union[str, Any] = model(__A , attention_mask=__A , token_type_ids=__A , labels=__A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _lowercase ( self : Optional[Any] , __A : Union[str, Any] , __A : Optional[Any] , __A : List[str] , __A : str , __A : Union[str, Any] , __A : int , __A : Union[str, Any] ): snake_case__ : List[str] = self.num_labels snake_case__ : Optional[int] = AlbertForTokenClassification(config=__A ) model.to(__A ) model.eval() snake_case__ : Tuple = model(__A , attention_mask=__A , token_type_ids=__A , labels=__A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def _lowercase ( self : str , __A : Optional[int] , __A : Any , __A : Tuple , __A : Union[str, Any] , __A : Optional[Any] , __A : str , __A : int ): snake_case__ : str = self.num_choices snake_case__ : Union[str, Any] = AlbertForMultipleChoice(config=__A ) model.to(__A ) model.eval() snake_case__ : Dict = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() snake_case__ : Any = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() snake_case__ : Tuple = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() snake_case__ : Optional[Any] = model( __A , attention_mask=__A , token_type_ids=__A , labels=__A , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def _lowercase ( self : Optional[Any] ): snake_case__ : Union[str, Any] = self.prepare_config_and_inputs() ( snake_case__ ) : Union[str, Any] = config_and_inputs snake_case__ : Dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ): """simple docstring""" a_ = ( ( AlbertModel, AlbertForPreTraining, AlbertForMaskedLM, AlbertForMultipleChoice, AlbertForSequenceClassification, AlbertForTokenClassification, AlbertForQuestionAnswering, ) if is_torch_available() else () ) a_ = ( { "feature-extraction": AlbertModel, "fill-mask": AlbertForMaskedLM, "question-answering": AlbertForQuestionAnswering, "text-classification": AlbertForSequenceClassification, "token-classification": AlbertForTokenClassification, "zero-shot": AlbertForSequenceClassification, } if is_torch_available() else {} ) a_ = True def _lowercase ( self : Union[str, Any] , __A : List[str] , __A : Tuple , __A : Union[str, Any]=False ): snake_case__ : Optional[Any] = super()._prepare_for_class(__A , __A , return_labels=__A ) if return_labels: if model_class in get_values(__A ): snake_case__ : Optional[Any] = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__A ) snake_case__ : Optional[int] = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__A ) return inputs_dict def _lowercase ( self : Union[str, Any] ): snake_case__ : int = AlbertModelTester(self ) snake_case__ : int = ConfigTester(self , config_class=__A , hidden_size=3_7 ) def _lowercase ( self : str ): self.config_tester.run_common_tests() def _lowercase ( self : Union[str, Any] ): snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__A ) def _lowercase ( self : List[Any] ): snake_case__ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*__A ) def _lowercase ( self : List[str] ): snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*__A ) def _lowercase ( self : List[Any] ): snake_case__ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*__A ) def _lowercase ( self : Tuple ): snake_case__ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*__A ) def _lowercase ( self : Dict ): snake_case__ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*__A ) def _lowercase ( self : Union[str, Any] ): snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: snake_case__ : List[str] = type self.model_tester.create_and_check_model(*__A ) @slow def _lowercase ( self : str ): for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: snake_case__ : str = AlbertModel.from_pretrained(__A ) self.assertIsNotNone(__A ) @require_torch class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): """simple docstring""" @slow def _lowercase ( self : Any ): snake_case__ : Optional[int] = AlbertModel.from_pretrained("albert-base-v2" ) snake_case__ : Union[str, Any] = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] ) snake_case__ : Tuple = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): snake_case__ : Any = model(__A , attention_mask=__A )[0] snake_case__ : str = torch.Size((1, 1_1, 7_6_8) ) self.assertEqual(output.shape , __A ) snake_case__ : List[str] = torch.tensor( [[[-0.6_5_1_3, 1.5_0_3_5, -0.2_7_6_6], [-0.6_5_1_5, 1.5_0_4_6, -0.2_7_8_0], [-0.6_5_1_2, 1.5_0_4_9, -0.2_7_8_4]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __A , atol=1e-4 ) )
708
import argparse import fairseq import torch from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging logging.set_verbosity_info() __lowerCamelCase : int = logging.get_logger(__name__) __lowerCamelCase : int = { """post_extract_proj""": """feature_projection.projection""", """encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""", """self_attn.k_proj""": """encoder.layers.*.attention.k_proj""", """self_attn.v_proj""": """encoder.layers.*.attention.v_proj""", """self_attn.q_proj""": """encoder.layers.*.attention.q_proj""", """self_attn.out_proj""": """encoder.layers.*.attention.out_proj""", """self_attn_layer_norm""": """encoder.layers.*.layer_norm""", """fc1""": """encoder.layers.*.feed_forward.intermediate_dense""", """fc2""": """encoder.layers.*.feed_forward.output_dense""", """final_layer_norm""": """encoder.layers.*.final_layer_norm""", """encoder.layer_norm""": """encoder.layer_norm""", """encoder.layer_norm_for_extract""": """layer_norm_for_extract""", """w2v_model.layer_norm""": """feature_projection.layer_norm""", """quantizer.weight_proj""": """quantizer.weight_proj""", """quantizer.vars""": """quantizer.codevectors""", """project_q""": """project_q""", """final_proj""": """project_hid""", """w2v_encoder.proj""": """lm_head""", """label_embs_concat""": """label_embeddings_concat""", """mask_emb""": """masked_spec_embed""", """spk_proj""": """speaker_proj""", } __lowerCamelCase : Tuple = [ """lm_head""", """quantizer.weight_proj""", """quantizer.codevectors""", """project_q""", """project_hid""", """label_embeddings_concat""", """speaker_proj""", """layer_norm_for_extract""", ] def SCREAMING_SNAKE_CASE ( snake_case_ : Tuple , snake_case_ : Union[str, Any] , snake_case_ : Union[str, Any] , snake_case_ : Any , snake_case_ : Union[str, Any] ): for attribute in key.split("." ): snake_case__ : int = getattr(snake_case_ , snake_case_ ) if weight_type is not None: snake_case__ : Optional[Any] = getattr(snake_case_ , snake_case_ ).shape else: snake_case__ : List[str] = hf_pointer.shape if hf_shape != value.shape: raise ValueError( F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be''' F''' {value.shape} for {full_name}''' ) if weight_type == "weight": snake_case__ : str = value elif weight_type == "weight_g": snake_case__ : Union[str, Any] = value elif weight_type == "weight_v": snake_case__ : Optional[Any] = value elif weight_type == "bias": snake_case__ : str = value else: snake_case__ : Union[str, Any] = value logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' ) def SCREAMING_SNAKE_CASE ( snake_case_ : Any , snake_case_ : Union[str, Any] ): snake_case__ : str = [] snake_case__ : Optional[int] = fairseq_model.state_dict() snake_case__ : int = hf_model.unispeech_sat.feature_extractor for name, value in fairseq_dict.items(): snake_case__ : Dict = False if "conv_layers" in name: load_conv_layer( snake_case_ , snake_case_ , snake_case_ , snake_case_ , hf_model.config.feat_extract_norm == "group" , ) snake_case__ : str = True else: for key, mapped_key in MAPPING.items(): snake_case__ : Optional[int] = "unispeech_sat." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]: if "layer_norm_for_extract" in name and (".".join(name.split("." )[:-1] ) != key): # special case since naming is very similar continue snake_case__ : int = True if "*" in mapped_key: snake_case__ : Any = name.split(snake_case_ )[0].split("." )[-2] snake_case__ : Any = mapped_key.replace("*" , snake_case_ ) if "weight_g" in name: snake_case__ : List[Any] = "weight_g" elif "weight_v" in name: snake_case__ : Optional[Any] = "weight_v" elif "bias" in name: snake_case__ : Optional[Any] = "bias" elif "weight" in name: # TODO: don't match quantizer.weight_proj snake_case__ : Optional[Any] = "weight" else: snake_case__ : Optional[Any] = None set_recursively(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ) continue if not is_used: unused_weights.append(snake_case_ ) logger.warning(F'''Unused weights: {unused_weights}''' ) def SCREAMING_SNAKE_CASE ( snake_case_ : Any , snake_case_ : List[str] , snake_case_ : List[Any] , snake_case_ : Optional[Any] , snake_case_ : str ): snake_case__ : Tuple = full_name.split("conv_layers." )[-1] snake_case__ : Union[str, Any] = name.split("." ) snake_case__ : str = int(items[0] ) snake_case__ : str = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' ) snake_case__ : Any = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' ) snake_case__ : Any = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.''' ) snake_case__ : Optional[Any] = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''' ) snake_case__ : int = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) else: unused_weights.append(snake_case_ ) @torch.no_grad() def SCREAMING_SNAKE_CASE ( snake_case_ : int , snake_case_ : Any , snake_case_ : Optional[int]=None , snake_case_ : Optional[int]=None , snake_case_ : Any=True ): if config_path is not None: snake_case__ : Tuple = UniSpeechSatConfig.from_pretrained(snake_case_ ) else: snake_case__ : Tuple = UniSpeechSatConfig() snake_case__ : str = "" if is_finetuned: snake_case__ : Tuple = UniSpeechSatForCTC(snake_case_ ) else: snake_case__ : Any = UniSpeechSatForPreTraining(snake_case_ ) snake_case__, snake_case__, snake_case__ : Any = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} ) snake_case__ : Tuple = model[0].eval() recursively_load_weights(snake_case_ , snake_case_ ) hf_wavavec.save_pretrained(snake_case_ ) if __name__ == "__main__": __lowerCamelCase : int = argparse.ArgumentParser() parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""") parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") parser.add_argument( """--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not""" ) __lowerCamelCase : List[Any] = parser.parse_args() convert_unispeech_sat_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
25
0
# NOTE: This file is deprecated and will be removed in a future version. # It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works from ...utils import deprecate from ..controlnet.pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline # noqa: F401 deprecate( """stable diffusion controlnet""", """0.22.0""", """Importing `FlaxStableDiffusionControlNetPipeline` from diffusers.pipelines.stable_diffusion.flax_pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import FlaxStableDiffusionControlNetPipeline` instead.""", standard_warn=False, stacklevel=3, )
709
import copy import tempfile import unittest from transformers import MaMaaaConfig, is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from transformers.utils import cached_property from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaTokenizer from transformers.models.mam_aaa.modeling_mam_aaa import MaMaaaDecoder, MaMaaaEncoder def SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : Dict , snake_case_ : List[Any] , snake_case_ : Dict=None , snake_case_ : Tuple=None , snake_case_ : List[str]=None , snake_case_ : List[str]=None , snake_case_ : List[str]=None , ): if attention_mask is None: snake_case__ : Any = input_ids.ne(config.pad_token_id ) if decoder_attention_mask is None: snake_case__ : List[Any] = decoder_input_ids.ne(config.pad_token_id ) if head_mask is None: snake_case__ : str = torch.ones(config.encoder_layers , config.encoder_attention_heads , device=snake_case_ ) if decoder_head_mask is None: snake_case__ : Optional[int] = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=snake_case_ ) if cross_attn_head_mask is None: snake_case__ : Union[str, Any] = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=snake_case_ ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } class SCREAMING_SNAKE_CASE__ : """simple docstring""" def __init__( self : List[str] , __A : Any , __A : List[str]=1_3 , __A : List[Any]=7 , __A : Union[str, Any]=True , __A : Union[str, Any]=False , __A : str=9_9 , __A : Optional[Any]=1_6 , __A : Optional[Any]=2 , __A : Any=4 , __A : List[Any]=4 , __A : int="relu" , __A : Optional[int]=0.1 , __A : Tuple=0.1 , __A : Optional[int]=0.0 , __A : Optional[Any]=0.0 , __A : List[Any]=2_0 , __A : Optional[Any]=2 , __A : int=1 , __A : Union[str, Any]=0 , ): snake_case__ : Optional[Any] = parent snake_case__ : List[str] = batch_size snake_case__ : Union[str, Any] = seq_length snake_case__ : Optional[Any] = is_training snake_case__ : List[str] = use_labels snake_case__ : Tuple = vocab_size snake_case__ : Optional[Any] = hidden_size snake_case__ : Union[str, Any] = num_hidden_layers snake_case__ : List[Any] = num_attention_heads snake_case__ : Tuple = intermediate_size snake_case__ : str = hidden_act snake_case__ : Optional[Any] = hidden_dropout_prob snake_case__ : int = attention_probs_dropout_prob snake_case__ : int = encoder_layerdrop snake_case__ : Tuple = decoder_layerdrop snake_case__ : List[str] = max_position_embeddings snake_case__ : Tuple = eos_token_id snake_case__ : Dict = pad_token_id snake_case__ : str = bos_token_id def _lowercase ( self : Tuple ): snake_case__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) snake_case__ : Union[str, Any] = self.eos_token_id # Eos Token snake_case__ : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) # we need to clamp the input ids here to avoid having pad token in between # this is because for M2M100 the position_ids are prepared such that # all pad tokens have pos id = 2 and rest are between 2..seq_length # and the seq_length here is seq_length - num_pad_tokens # but when using past, there is no way of knowing if the past input ids had # pad tokens in them, which results in incorrect seq_lenth and which in turn results in # position_ids being off by num_pad_tokens in past input snake_case__ : int = input_ids.clamp(self.pad_token_id + 1 ) snake_case__ : Optional[Any] = decoder_input_ids.clamp(self.pad_token_id + 1 ) snake_case__ : Union[str, Any] = self.get_config() snake_case__ : Union[str, Any] = prepare_mam_aaa_inputs_dict(__A , __A , __A ) return config, inputs_dict def _lowercase ( self : Dict ): return MaMaaaConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , encoder_layerdrop=self.encoder_layerdrop , decoder_layerdrop=self.decoder_layerdrop , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , ) def _lowercase ( self : List[str] ): snake_case__, snake_case__ : Any = self.prepare_config_and_inputs() return config, inputs_dict def _lowercase ( self : Optional[Any] , __A : int , __A : Dict ): snake_case__ : Union[str, Any] = MaMaaaModel(config=__A ).get_decoder().to(__A ).eval() snake_case__ : List[Any] = inputs_dict["input_ids"] snake_case__ : Optional[Any] = inputs_dict["attention_mask"] snake_case__ : Union[str, Any] = inputs_dict["head_mask"] # first forward pass snake_case__ : Dict = model(__A , attention_mask=__A , head_mask=__A , use_cache=__A ) snake_case__, snake_case__ : Dict = outputs.to_tuple() # create hypothetical multiple next token and extent to next_input_ids snake_case__ : int = ids_tensor((self.batch_size, 3) , config.vocab_size ) snake_case__ : List[str] = ids_tensor((self.batch_size, 3) , 2 ) # append to next input_ids and snake_case__ : Union[str, Any] = torch.cat([input_ids, next_tokens] , dim=-1 ) snake_case__ : List[Any] = torch.cat([attention_mask, next_attn_mask] , dim=-1 ) snake_case__ : Tuple = model(__A , attention_mask=__A )["last_hidden_state"] snake_case__ : Tuple = model(__A , attention_mask=__A , past_key_values=__A )[ "last_hidden_state" ] # select random slice snake_case__ : Optional[Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item() snake_case__ : Optional[Any] = output_from_no_past[:, -3:, random_slice_idx].detach() snake_case__ : Any = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(__A , __A , atol=1e-2 ) ) def _lowercase ( self : str , __A : Dict , __A : Optional[Any] ): snake_case__ : Union[str, Any] = MaMaaaModel(config=__A ).to(__A ).eval() snake_case__ : Union[str, Any] = model(**__A ) snake_case__ : Tuple = outputs.encoder_last_hidden_state snake_case__ : Union[str, Any] = outputs.last_hidden_state with tempfile.TemporaryDirectory() as tmpdirname: snake_case__ : Dict = model.get_encoder() encoder.save_pretrained(__A ) snake_case__ : Any = MaMaaaEncoder.from_pretrained(__A ).to(__A ) snake_case__ : List[str] = encoder(inputs_dict["input_ids"] , attention_mask=inputs_dict["attention_mask"] )[ 0 ] self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 ) with tempfile.TemporaryDirectory() as tmpdirname: snake_case__ : Dict = model.get_decoder() decoder.save_pretrained(__A ) snake_case__ : Optional[Any] = MaMaaaDecoder.from_pretrained(__A ).to(__A ) snake_case__ : List[str] = decoder( input_ids=inputs_dict["decoder_input_ids"] , attention_mask=inputs_dict["decoder_attention_mask"] , encoder_hidden_states=__A , encoder_attention_mask=inputs_dict["attention_mask"] , )[0] self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 ) @require_torch class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ): """simple docstring""" a_ = ( ( MaMaaaModel, MaMaaaForConditionalGeneration, ) if is_torch_available() else () ) a_ = (MaMaaaForConditionalGeneration,) if is_torch_available() else () a_ = ( { "conversational": MaMaaaForConditionalGeneration, "feature-extraction": MaMaaaModel, "summarization": MaMaaaForConditionalGeneration, "text2text-generation": MaMaaaForConditionalGeneration, "translation": MaMaaaForConditionalGeneration, } if is_torch_available() else {} ) a_ = True a_ = True a_ = False a_ = False def _lowercase ( self : int , __A : Tuple , __A : Any , __A : Optional[Any] , __A : Optional[Any] , __A : Union[str, Any] ): if pipeline_test_casse_name == "TranslationPipelineTests": # Get `ValueError: Translation requires a `src_lang` and a `tgt_lang` for this model`. # `M2M100Config` was never used in pipeline tests: cannot create a simple tokenizer. return True return False def _lowercase ( self : Tuple ): snake_case__ : Any = MaMaaaModelTester(self ) snake_case__ : Dict = ConfigTester(self , config_class=__A ) def _lowercase ( self : Optional[Any] ): self.config_tester.run_common_tests() def _lowercase ( self : Union[str, Any] ): snake_case__, snake_case__ : int = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: snake_case__ : int = model_class(__A ) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(__A ) snake_case__, snake_case__ : Optional[int] = model_class.from_pretrained(__A , output_loading_info=__A ) self.assertEqual(info["missing_keys"] , [] ) def _lowercase ( self : Dict ): snake_case__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past_large_inputs(*__A ) def _lowercase ( self : Any ): snake_case__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_encoder_decoder_model_standalone(*__A ) def _lowercase ( self : Union[str, Any] ): snake_case__, snake_case__ : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in (MaMaaaModel, MaMaaaForConditionalGeneration): snake_case__ : str = model_class(__A ) model.to(__A ) model.eval() snake_case__ : str = copy.deepcopy(self._prepare_for_class(__A , __A ) ) if not self.is_encoder_decoder: snake_case__ : Optional[Any] = inputs["input_ids"] del inputs["input_ids"] else: snake_case__ : Union[str, Any] = inputs["input_ids"] snake_case__ : List[str] = inputs.get("decoder_input_ids" , __A ) del inputs["input_ids"] inputs.pop("decoder_input_ids" , __A ) snake_case__ : Tuple = model.get_input_embeddings() if not self.is_encoder_decoder: snake_case__ : List[Any] = wte(__A ) else: snake_case__ : Any = wte(__A ) snake_case__ : Optional[int] = wte(__A ) with torch.no_grad(): model(**__A )[0] def _lowercase ( self : Optional[Any] ): snake_case__, snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() snake_case__ : Any = input_dict["input_ids"] snake_case__ : int = input_ids.ne(1 ).to(__A ) snake_case__ : List[Any] = MaMaaaForConditionalGeneration(__A ).eval().to(__A ) if torch_device == "cuda": model.half() model.generate(__A , attention_mask=__A ) model.generate(num_beams=4 , do_sample=__A , early_stopping=__A , num_return_sequences=3 ) def SCREAMING_SNAKE_CASE ( snake_case_ : int ): return torch.tensor(snake_case_ , dtype=torch.long , device=snake_case_ ) __lowerCamelCase : Optional[Any] = 1e-4 @require_torch @require_sentencepiece @require_tokenizers @slow class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): """simple docstring""" @cached_property def _lowercase ( self : str ): return MaMaaaTokenizer.from_pretrained("facebook/m2m100_418M" ) def _lowercase ( self : Optional[int] ): snake_case__ : List[str] = MaMaaaModel.from_pretrained("facebook/m2m100_418M" ).to(__A ) snake_case__ : Optional[Any] = _long_tensor([[1_2_8_0_2_8, 9_8, 1_2, 3_0_5_2_7, 2_7_3_2, 1_5_9, 7_7_5_5, 6_1_9_0_4, 3_9_1_4_4, 3_8, 2]] ) snake_case__ : str = _long_tensor([[2, 1_2_8_0_2_8, 9_8, 1_2, 3_0_5_2_7, 2_7_3_2, 1_5_9, 7_7_5_5, 6_1_9_0_4, 3_9_1_4_4, 3_8]] ) snake_case__ : int = prepare_mam_aaa_inputs_dict(model.config , __A , __A ) with torch.no_grad(): snake_case__ : str = model(**__A )[0] snake_case__ : Tuple = torch.Size((1, 1_1, 1_0_2_4) ) self.assertEqual(output.shape , __A ) # change to expected output here snake_case__ : Optional[Any] = torch.tensor( [[-0.7_7_8_0, -0.1_6_7_6, 0.1_0_3_8], [-6.7_5_5_6, -1.3_9_9_2, 0.0_5_6_7], [-7.5_3_8_3, -0.5_9_2_0, -0.2_7_7_9]] , device=__A ) self.assertTrue(torch.allclose(output[:, :3, :3] , __A , atol=__A ) ) def _lowercase ( self : Union[str, Any] ): snake_case__ : Union[str, Any] = MaMaaaForConditionalGeneration.from_pretrained("facebook/m2m100_418M" ).to(__A ) # change to intended input snake_case__ : Union[str, Any] = _long_tensor([[1_2_8_0_2_8, 9_8, 1_2, 3_0_5_2_7, 2_7_3_2, 1_5_9, 7_7_5_5, 6_1_9_0_4, 3_9_1_4_4, 3_8, 2]] ) snake_case__ : List[str] = _long_tensor([[2, 1_2_8_0_2_8, 9_8, 1_2, 3_0_5_2_7, 2_7_3_2, 1_5_9, 7_7_5_5, 6_1_9_0_4, 3_9_1_4_4, 3_8]] ) snake_case__ : int = prepare_mam_aaa_inputs_dict(model.config , __A , __A ) with torch.no_grad(): snake_case__ : Union[str, Any] = model(**__A )[0] snake_case__ : Tuple = torch.Size((1, 1_1, model.config.vocab_size) ) self.assertEqual(output.shape , __A ) # change to expected output here snake_case__ : List[str] = torch.tensor( [[-1.0_4_4_8, -1.0_4_1_1, 3.7_9_9_2], [-3.2_1_9_1, -3.2_3_8_6, -1.3_4_5_1], [-3.6_2_1_0, -3.5_9_9_3, 0.4_9_2_5]] , device=__A ) self.assertTrue(torch.allclose(output[:, :3, :3] , __A , atol=__A ) ) def _lowercase ( self : Optional[Any] ): snake_case__ : List[Any] = MaMaaaForConditionalGeneration.from_pretrained("facebook/m2m100_418M" ).to(__A ) snake_case__ : List[str] = MaMaaaTokenizer.from_pretrained("facebook/m2m100_418M" , src_lang="fr" , tgt_lang="en" ) snake_case__ : List[Any] = [ "L'affaire NSA souligne l'absence totale de débat sur le renseignement", "Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.", "Lorsque François Hollande téléphone à Barack Obama ou quand le ministre des affaires étrangères Laurent" " Fabius convoque l'ambassadeur des Etats-Unis, ils réagissent à une vraie découverte, qui est celle de" " l'ampleur de la surveillance américaine sur l'ensemble des communications en France.", ] # The below article tests that we don't add any hypotheses outside of the top n_beams snake_case__ : str = tokenizer(__A , padding=__A , return_tensors="pt" ) snake_case__ : Tuple = model.generate( input_ids=dct["input_ids"].to(__A ) , attention_mask=dct["attention_mask"].to(__A ) , num_beams=5 , forced_bos_token_id=tokenizer.get_lang_id("en" ) , ) snake_case__ : List[str] = [ "The NSA case highlights the total absence of intelligence debate", "I think there are two levels of response from the French government.", "When François Hollande calls Barack Obama or when Foreign Minister Laurent Fabius calls the U.S." " Ambassador, they respond to a real discovery, which is that of the scale of U.S. surveillance on all" " communications in France.", ] snake_case__ : Dict = tokenizer.batch_decode( hypotheses_batch.tolist() , clean_up_tokenization_spaces=__A , skip_special_tokens=__A ) assert generated == expected_en
25
0
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __lowerCamelCase : Union[str, Any] = logging.get_logger(__name__) __lowerCamelCase : Dict = { """google/bigbird-roberta-base""": """https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json""", """google/bigbird-roberta-large""": """https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json""", """google/bigbird-base-trivia-itc""": """https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json""", # See all BigBird models at https://huggingface.co/models?filter=big_bird } class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ): """simple docstring""" a_ = "big_bird" def __init__( self : Union[str, Any] , __A : int=5_0_3_5_8 , __A : List[Any]=7_6_8 , __A : List[str]=1_2 , __A : int=1_2 , __A : int=3_0_7_2 , __A : Dict="gelu_new" , __A : Union[str, Any]=0.1 , __A : Tuple=0.1 , __A : Any=4_0_9_6 , __A : Union[str, Any]=2 , __A : Tuple=0.0_2 , __A : Union[str, Any]=1e-1_2 , __A : Tuple=True , __A : Union[str, Any]=0 , __A : List[str]=1 , __A : Any=2 , __A : Dict=6_6 , __A : Union[str, Any]="block_sparse" , __A : int=True , __A : str=False , __A : str=6_4 , __A : List[str]=3 , __A : int=None , **__A : Optional[Any] , ): super().__init__( pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , sep_token_id=__A , **__A , ) snake_case__ : Optional[Any] = vocab_size snake_case__ : Optional[int] = max_position_embeddings snake_case__ : Union[str, Any] = hidden_size snake_case__ : int = num_hidden_layers snake_case__ : List[str] = num_attention_heads snake_case__ : Optional[Any] = intermediate_size snake_case__ : Dict = hidden_act snake_case__ : List[str] = hidden_dropout_prob snake_case__ : Any = attention_probs_dropout_prob snake_case__ : Union[str, Any] = initializer_range snake_case__ : int = type_vocab_size snake_case__ : List[Any] = layer_norm_eps snake_case__ : int = use_cache snake_case__ : Any = rescale_embeddings snake_case__ : Union[str, Any] = attention_type snake_case__ : Any = use_bias snake_case__ : Union[str, Any] = block_size snake_case__ : Optional[Any] = num_random_blocks snake_case__ : int = classifier_dropout class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ): """simple docstring""" @property def _lowercase ( self : Union[str, Any] ): if self.task == "multiple-choice": snake_case__ : Optional[Any] = {0: "batch", 1: "choice", 2: "sequence"} else: snake_case__ : List[str] = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ] )
710
from unittest.mock import patch import pyspark from datasets.packaged_modules.spark.spark import ( Spark, SparkExamplesIterable, _generate_iterable_examples, ) from ..utils import ( require_dill_gt_0_3_2, require_not_windows, ) def SCREAMING_SNAKE_CASE ( snake_case_ : List[Any] , snake_case_ : Union[str, Any] ): snake_case__ : Optional[int] = [] for part_id in partition_order: snake_case__ : List[Any] = df.where(F'''SPARK_PARTITION_ID() = {part_id}''' ).collect() for row_idx, row in enumerate(snake_case_ ): expected_row_ids_and_row_dicts.append((F'''{part_id}_{row_idx}''', row.asDict()) ) return expected_row_ids_and_row_dicts @require_not_windows @require_dill_gt_0_3_2 def SCREAMING_SNAKE_CASE ( ): snake_case__ : Tuple = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate() snake_case__ : Union[str, Any] = spark.range(100 ).repartition(1 ) snake_case__ : Any = Spark(snake_case_ ) # The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means # that each partition can hold 2 rows. spark_builder._repartition_df_if_needed(max_shard_size=16 ) # Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions. assert spark_builder.df.rdd.getNumPartitions() == 50 @require_not_windows @require_dill_gt_0_3_2 def SCREAMING_SNAKE_CASE ( ): snake_case__ : Dict = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate() snake_case__ : Optional[Any] = spark.range(10 ).repartition(2 ) snake_case__ : Optional[Any] = [1, 0] snake_case__ : Dict = _generate_iterable_examples(snake_case_ , snake_case_ ) # Reverse the partitions. snake_case__ : Tuple = _get_expected_row_ids_and_row_dicts_for_partition_order(snake_case_ , snake_case_ ) for i, (row_id, row_dict) in enumerate(generate_fn() ): snake_case__, snake_case__ : Tuple = expected_row_ids_and_row_dicts[i] assert row_id == expected_row_id assert row_dict == expected_row_dict @require_not_windows @require_dill_gt_0_3_2 def SCREAMING_SNAKE_CASE ( ): snake_case__ : Optional[int] = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate() snake_case__ : Optional[int] = spark.range(10 ).repartition(1 ) snake_case__ : Union[str, Any] = SparkExamplesIterable(snake_case_ ) assert it.n_shards == 1 for i, (row_id, row_dict) in enumerate(snake_case_ ): assert row_id == F'''0_{i}''' assert row_dict == {"id": i} @require_not_windows @require_dill_gt_0_3_2 def SCREAMING_SNAKE_CASE ( ): snake_case__ : Optional[int] = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate() snake_case__ : str = spark.range(30 ).repartition(3 ) # Mock the generator so that shuffle reverses the partition indices. with patch("numpy.random.Generator" ) as generator_mock: snake_case__ : Union[str, Any] = lambda snake_case_ : x.reverse() snake_case__ : Optional[int] = _get_expected_row_ids_and_row_dicts_for_partition_order(snake_case_ , [2, 1, 0] ) snake_case__ : List[Any] = SparkExamplesIterable(snake_case_ ).shuffle_data_sources(snake_case_ ) assert shuffled_it.n_shards == 3 for i, (row_id, row_dict) in enumerate(snake_case_ ): snake_case__, snake_case__ : Optional[Any] = expected_row_ids_and_row_dicts[i] assert row_id == expected_row_id assert row_dict == expected_row_dict @require_not_windows @require_dill_gt_0_3_2 def SCREAMING_SNAKE_CASE ( ): snake_case__ : Any = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate() snake_case__ : Tuple = spark.range(20 ).repartition(4 ) # Partitions 0 and 2 snake_case__ : List[Any] = SparkExamplesIterable(snake_case_ ).shard_data_sources(worker_id=0 , num_workers=2 ) assert shard_it_a.n_shards == 2 snake_case__ : List[str] = _get_expected_row_ids_and_row_dicts_for_partition_order(snake_case_ , [0, 2] ) for i, (row_id, row_dict) in enumerate(snake_case_ ): snake_case__, snake_case__ : Optional[int] = expected_row_ids_and_row_dicts_a[i] assert row_id == expected_row_id assert row_dict == expected_row_dict # Partitions 1 and 3 snake_case__ : Any = SparkExamplesIterable(snake_case_ ).shard_data_sources(worker_id=1 , num_workers=2 ) assert shard_it_a.n_shards == 2 snake_case__ : List[Any] = _get_expected_row_ids_and_row_dicts_for_partition_order(snake_case_ , [1, 3] ) for i, (row_id, row_dict) in enumerate(snake_case_ ): snake_case__, snake_case__ : Optional[Any] = expected_row_ids_and_row_dicts_a[i] assert row_id == expected_row_id assert row_dict == expected_row_dict @require_not_windows @require_dill_gt_0_3_2 def SCREAMING_SNAKE_CASE ( ): snake_case__ : Dict = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate() snake_case__ : Tuple = spark.range(100 ).repartition(1 ) snake_case__ : Union[str, Any] = Spark(snake_case_ ) # Choose a small max_shard_size for maximum partitioning. spark_builder._repartition_df_if_needed(max_shard_size=1 ) # The new number of partitions should not be greater than the number of rows. assert spark_builder.df.rdd.getNumPartitions() == 100
25
0
import unittest import numpy as np from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ , unittest.TestCase ): """simple docstring""" pass @nightly @require_onnxruntime @require_torch_gpu class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): """simple docstring""" @property def _lowercase ( self : List[Any] ): return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def _lowercase ( self : Union[str, Any] ): snake_case__ : Optional[Any] = ort.SessionOptions() snake_case__ : Dict = False return options def _lowercase ( self : Optional[Any] ): snake_case__ : List[str] = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/in_paint/overture-creations-5sI6fQgYIuo.png" ) snake_case__ : Union[str, Any] = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/in_paint/overture-creations-5sI6fQgYIuo_mask.png" ) snake_case__ : List[Any] = OnnxStableDiffusionInpaintPipeline.from_pretrained( "runwayml/stable-diffusion-inpainting" , revision="onnx" , safety_checker=__A , feature_extractor=__A , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=__A ) snake_case__ : Dict = "A red cat sitting on a park bench" snake_case__ : int = np.random.RandomState(0 ) snake_case__ : List[Any] = pipe( prompt=__A , image=__A , mask_image=__A , guidance_scale=7.5 , num_inference_steps=1_0 , generator=__A , output_type="np" , ) snake_case__ : List[Any] = output.images snake_case__ : Tuple = images[0, 2_5_5:2_5_8, 2_5_5:2_5_8, -1] assert images.shape == (1, 5_1_2, 5_1_2, 3) snake_case__ : int = np.array([0.2_5_1_4, 0.3_0_0_7, 0.3_5_1_7, 0.1_7_9_0, 0.2_3_8_2, 0.3_1_6_7, 0.1_9_4_4, 0.2_2_7_3, 0.2_4_6_4] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def _lowercase ( self : Optional[Any] ): snake_case__ : Optional[int] = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/in_paint/overture-creations-5sI6fQgYIuo.png" ) snake_case__ : List[str] = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/in_paint/overture-creations-5sI6fQgYIuo_mask.png" ) snake_case__ : Dict = LMSDiscreteScheduler.from_pretrained( "runwayml/stable-diffusion-inpainting" , subfolder="scheduler" , revision="onnx" ) snake_case__ : Optional[Any] = OnnxStableDiffusionInpaintPipeline.from_pretrained( "runwayml/stable-diffusion-inpainting" , revision="onnx" , scheduler=__A , safety_checker=__A , feature_extractor=__A , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=__A ) snake_case__ : Tuple = "A red cat sitting on a park bench" snake_case__ : Union[str, Any] = np.random.RandomState(0 ) snake_case__ : Optional[Any] = pipe( prompt=__A , image=__A , mask_image=__A , guidance_scale=7.5 , num_inference_steps=2_0 , generator=__A , output_type="np" , ) snake_case__ : Optional[int] = output.images snake_case__ : Any = images[0, 2_5_5:2_5_8, 2_5_5:2_5_8, -1] assert images.shape == (1, 5_1_2, 5_1_2, 3) snake_case__ : List[str] = np.array([0.0_0_8_6, 0.0_0_7_7, 0.0_0_8_3, 0.0_0_9_3, 0.0_1_0_7, 0.0_1_3_9, 0.0_0_9_4, 0.0_0_9_7, 0.0_1_2_5] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
711
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __lowerCamelCase : List[str] = {"""configuration_xlnet""": ["""XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XLNetConfig"""]} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : str = ["""XLNetTokenizer"""] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : Dict = ["""XLNetTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : str = [ """XLNET_PRETRAINED_MODEL_ARCHIVE_LIST""", """XLNetForMultipleChoice""", """XLNetForQuestionAnswering""", """XLNetForQuestionAnsweringSimple""", """XLNetForSequenceClassification""", """XLNetForTokenClassification""", """XLNetLMHeadModel""", """XLNetModel""", """XLNetPreTrainedModel""", """load_tf_weights_in_xlnet""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : Dict = [ """TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFXLNetForMultipleChoice""", """TFXLNetForQuestionAnsweringSimple""", """TFXLNetForSequenceClassification""", """TFXLNetForTokenClassification""", """TFXLNetLMHeadModel""", """TFXLNetMainLayer""", """TFXLNetModel""", """TFXLNetPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xlnet import XLNetTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xlnet_fast import XLNetTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xlnet import ( XLNET_PRETRAINED_MODEL_ARCHIVE_LIST, XLNetForMultipleChoice, XLNetForQuestionAnswering, XLNetForQuestionAnsweringSimple, XLNetForSequenceClassification, XLNetForTokenClassification, XLNetLMHeadModel, XLNetModel, XLNetPreTrainedModel, load_tf_weights_in_xlnet, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xlnet import ( TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFXLNetForMultipleChoice, TFXLNetForQuestionAnsweringSimple, TFXLNetForSequenceClassification, TFXLNetForTokenClassification, TFXLNetLMHeadModel, TFXLNetMainLayer, TFXLNetModel, TFXLNetPreTrainedModel, ) else: import sys __lowerCamelCase : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
25
0
import os import tempfile import unittest from transformers import DistilBertConfig, is_torch_available from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, DistilBertModel, ) class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ): """simple docstring""" def __init__( self : Optional[Any] , __A : List[str] , __A : Dict=1_3 , __A : str=7 , __A : int=True , __A : Union[str, Any]=True , __A : Union[str, Any]=False , __A : Tuple=True , __A : Any=9_9 , __A : Any=3_2 , __A : Dict=5 , __A : Optional[int]=4 , __A : Tuple=3_7 , __A : Dict="gelu" , __A : Dict=0.1 , __A : Any=0.1 , __A : List[Any]=5_1_2 , __A : Union[str, Any]=1_6 , __A : Any=2 , __A : List[str]=0.0_2 , __A : Optional[Any]=3 , __A : Union[str, Any]=4 , __A : Tuple=None , ): snake_case__ : Tuple = parent snake_case__ : Optional[Any] = batch_size snake_case__ : Optional[Any] = seq_length snake_case__ : str = is_training snake_case__ : Any = use_input_mask snake_case__ : Tuple = use_token_type_ids snake_case__ : Optional[int] = use_labels snake_case__ : Optional[int] = vocab_size snake_case__ : Any = hidden_size snake_case__ : List[str] = num_hidden_layers snake_case__ : Optional[Any] = num_attention_heads snake_case__ : Dict = intermediate_size snake_case__ : Dict = hidden_act snake_case__ : List[str] = hidden_dropout_prob snake_case__ : int = attention_probs_dropout_prob snake_case__ : Optional[int] = max_position_embeddings snake_case__ : int = type_vocab_size snake_case__ : int = type_sequence_label_size snake_case__ : Dict = initializer_range snake_case__ : List[Any] = num_labels snake_case__ : str = num_choices snake_case__ : Tuple = scope def _lowercase ( self : Union[str, Any] ): snake_case__ : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) snake_case__ : Dict = None if self.use_input_mask: snake_case__ : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] ) snake_case__ : Union[str, Any] = None snake_case__ : Tuple = None snake_case__ : Union[str, Any] = None if self.use_labels: snake_case__ : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) snake_case__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) snake_case__ : Optional[int] = ids_tensor([self.batch_size] , self.num_choices ) snake_case__ : Optional[int] = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def _lowercase ( self : Optional[Any] ): return DistilBertConfig( vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , ) def _lowercase ( self : Any , __A : Optional[int] , __A : str , __A : List[str] , __A : Tuple , __A : Union[str, Any] , __A : Any ): snake_case__ : List[Any] = DistilBertModel(config=__A ) model.to(__A ) model.eval() snake_case__ : str = model(__A , __A ) snake_case__ : Tuple = model(__A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _lowercase ( self : Optional[Any] , __A : List[str] , __A : int , __A : List[Any] , __A : str , __A : int , __A : int ): snake_case__ : str = DistilBertForMaskedLM(config=__A ) model.to(__A ) model.eval() snake_case__ : List[Any] = model(__A , attention_mask=__A , labels=__A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _lowercase ( self : List[str] , __A : Tuple , __A : Optional[int] , __A : Optional[int] , __A : Tuple , __A : Dict , __A : Any ): snake_case__ : str = DistilBertForQuestionAnswering(config=__A ) model.to(__A ) model.eval() snake_case__ : Dict = model( __A , attention_mask=__A , start_positions=__A , end_positions=__A ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def _lowercase ( self : int , __A : str , __A : Optional[Any] , __A : Optional[Any] , __A : Optional[int] , __A : List[Any] , __A : Dict ): snake_case__ : Optional[Any] = self.num_labels snake_case__ : Dict = DistilBertForSequenceClassification(__A ) model.to(__A ) model.eval() snake_case__ : List[str] = model(__A , attention_mask=__A , labels=__A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _lowercase ( self : Tuple , __A : Dict , __A : Optional[Any] , __A : int , __A : List[str] , __A : Tuple , __A : Any ): snake_case__ : Optional[Any] = self.num_labels snake_case__ : Tuple = DistilBertForTokenClassification(config=__A ) model.to(__A ) model.eval() snake_case__ : Tuple = model(__A , attention_mask=__A , labels=__A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def _lowercase ( self : List[Any] , __A : str , __A : str , __A : Dict , __A : Optional[Any] , __A : Any , __A : Tuple ): snake_case__ : Union[str, Any] = self.num_choices snake_case__ : List[Any] = DistilBertForMultipleChoice(config=__A ) model.to(__A ) model.eval() snake_case__ : str = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() snake_case__ : Optional[int] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() snake_case__ : List[Any] = model( __A , attention_mask=__A , labels=__A , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def _lowercase ( self : str ): snake_case__ : List[str] = self.prepare_config_and_inputs() (snake_case__) : Optional[Any] = config_and_inputs snake_case__ : List[str] = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ): """simple docstring""" a_ = ( ( DistilBertModel, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, ) if is_torch_available() else None ) a_ = ( { "feature-extraction": DistilBertModel, "fill-mask": DistilBertForMaskedLM, "question-answering": DistilBertForQuestionAnswering, "text-classification": DistilBertForSequenceClassification, "token-classification": DistilBertForTokenClassification, "zero-shot": DistilBertForSequenceClassification, } if is_torch_available() else {} ) a_ = True a_ = True a_ = True a_ = True def _lowercase ( self : Tuple ): snake_case__ : List[Any] = DistilBertModelTester(self ) snake_case__ : str = ConfigTester(self , config_class=__A , dim=3_7 ) def _lowercase ( self : Optional[Any] ): self.config_tester.run_common_tests() def _lowercase ( self : Any ): snake_case__ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_model(*__A ) def _lowercase ( self : int ): snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_masked_lm(*__A ) def _lowercase ( self : List[str] ): snake_case__ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_question_answering(*__A ) def _lowercase ( self : str ): snake_case__ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_sequence_classification(*__A ) def _lowercase ( self : Union[str, Any] ): snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_token_classification(*__A ) def _lowercase ( self : str ): snake_case__ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_multiple_choice(*__A ) @slow def _lowercase ( self : List[Any] ): for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: snake_case__ : List[str] = DistilBertModel.from_pretrained(__A ) self.assertIsNotNone(__A ) @slow @require_torch_gpu def _lowercase ( self : Dict ): snake_case__ : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # BertForMultipleChoice behaves incorrectly in JIT environments. if model_class == DistilBertForMultipleChoice: return snake_case__ : Any = True snake_case__ : int = model_class(config=__A ) snake_case__ : Tuple = self._prepare_for_class(__A , __A ) snake_case__ : Any = torch.jit.trace( __A , (inputs_dict["input_ids"].to("cpu" ), inputs_dict["attention_mask"].to("cpu" )) ) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(__A , os.path.join(__A , "traced_model.pt" ) ) snake_case__ : int = torch.jit.load(os.path.join(__A , "traced_model.pt" ) , map_location=__A ) loaded(inputs_dict["input_ids"].to(__A ) , inputs_dict["attention_mask"].to(__A ) ) @require_torch class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): """simple docstring""" @slow def _lowercase ( self : Optional[Any] ): snake_case__ : List[Any] = DistilBertModel.from_pretrained("distilbert-base-uncased" ) snake_case__ : int = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] ) snake_case__ : Optional[int] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): snake_case__ : Optional[Any] = model(__A , attention_mask=__A )[0] snake_case__ : Any = torch.Size((1, 1_1, 7_6_8) ) self.assertEqual(output.shape , __A ) snake_case__ : Union[str, Any] = torch.tensor( [[[-0.1_6_3_9, 0.3_2_9_9, 0.1_6_4_8], [-0.1_7_4_6, 0.3_2_8_9, 0.1_7_1_0], [-0.1_8_8_4, 0.3_3_5_7, 0.1_8_1_0]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __A , atol=1e-4 ) )
712
from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline else: from .pipeline_kandinsky import KandinskyPipeline from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput from .text_encoder import MultilingualCLIP
25
0
def SCREAMING_SNAKE_CASE ( snake_case_ : int ): if a < 0: raise ValueError("Input value must be a positive integer" ) elif isinstance(snake_case_ , snake_case_ ): raise TypeError("Input value must be a 'int' type" ) return bin(snake_case_ ).count("1" ) if __name__ == "__main__": import doctest doctest.testmod()
713
import numpy as np from matplotlib import pyplot as plt from sklearn.datasets import load_iris from sklearn.metrics import ConfusionMatrixDisplay from sklearn.model_selection import train_test_split from xgboost import XGBClassifier def SCREAMING_SNAKE_CASE ( snake_case_ : dict ): return (data["data"], data["target"]) def SCREAMING_SNAKE_CASE ( snake_case_ : np.ndarray , snake_case_ : np.ndarray ): snake_case__ : Optional[int] = XGBClassifier() classifier.fit(snake_case_ , snake_case_ ) return classifier def SCREAMING_SNAKE_CASE ( ): snake_case__ : Any = load_iris() snake_case__, snake_case__ : str = data_handling(snake_case_ ) snake_case__, snake_case__, snake_case__, snake_case__ : int = train_test_split( snake_case_ , snake_case_ , test_size=0.25 ) snake_case__ : Dict = iris["target_names"] # Create an XGBoost Classifier from the training data snake_case__ : Dict = xgboost(snake_case_ , snake_case_ ) # Display the confusion matrix of the classifier with both training and test sets ConfusionMatrixDisplay.from_estimator( snake_case_ , snake_case_ , snake_case_ , display_labels=snake_case_ , cmap="Blues" , normalize="true" , ) plt.title("Normalized Confusion Matrix - IRIS Dataset" ) plt.show() if __name__ == "__main__": import doctest doctest.testmod(verbose=True) main()
25
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) __lowerCamelCase : Tuple = {"""configuration_reformer""": ["""REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ReformerConfig"""]} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : List[str] = ["""ReformerTokenizer"""] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : Union[str, Any] = ["""ReformerTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : List[str] = [ """REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """ReformerAttention""", """ReformerForMaskedLM""", """ReformerForQuestionAnswering""", """ReformerForSequenceClassification""", """ReformerLayer""", """ReformerModel""", """ReformerModelWithLMHead""", """ReformerPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_reformer import ReformerTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_reformer_fast import ReformerTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_reformer import ( REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ReformerAttention, ReformerForMaskedLM, ReformerForQuestionAnswering, ReformerForSequenceClassification, ReformerLayer, ReformerModel, ReformerModelWithLMHead, ReformerPreTrainedModel, ) else: import sys __lowerCamelCase : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
714
import argparse import re from typing import Dict import torch from datasets import Audio, Dataset, load_dataset, load_metric from transformers import AutoFeatureExtractor, pipeline def SCREAMING_SNAKE_CASE ( snake_case_ : Dataset , snake_case_ : Dict[str, str] ): snake_case__ : Tuple = args.log_outputs snake_case__ : Union[str, Any] = "_".join(args.dataset.split("/" ) + [args.config, args.split] ) # load metric snake_case__ : List[str] = load_metric("wer" ) snake_case__ : List[str] = load_metric("cer" ) # compute metrics snake_case__ : List[Any] = wer.compute(references=result["target"] , predictions=result["prediction"] ) snake_case__ : List[str] = cer.compute(references=result["target"] , predictions=result["prediction"] ) # print & log results snake_case__ : Dict = F'''WER: {wer_result}\nCER: {cer_result}''' print(snake_case_ ) with open(F'''{dataset_id}_eval_results.txt''' , "w" ) as f: f.write(snake_case_ ) # log all results in text file. Possibly interesting for analysis if log_outputs is not None: snake_case__ : Union[str, Any] = F'''log_{dataset_id}_predictions.txt''' snake_case__ : int = F'''log_{dataset_id}_targets.txt''' with open(snake_case_ , "w" ) as p, open(snake_case_ , "w" ) as t: # mapping function to write output def write_to_file(snake_case_ : Union[str, Any] , snake_case_ : Any ): p.write(F'''{i}''' + "\n" ) p.write(batch["prediction"] + "\n" ) t.write(F'''{i}''' + "\n" ) t.write(batch["target"] + "\n" ) result.map(snake_case_ , with_indices=snake_case_ ) def SCREAMING_SNAKE_CASE ( snake_case_ : str ): snake_case__ : List[Any] = "[,?.!\-\;\:\"“%‘”�—’…–]" # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training snake_case__ : Optional[int] = re.sub(snake_case_ , "" , text.lower() ) # In addition, we can normalize the target text, e.g. removing new lines characters etc... # note that order is important here! snake_case__ : Optional[Any] = ["\n\n", "\n", " ", " "] for t in token_sequences_to_ignore: snake_case__ : Optional[int] = " ".join(text.split(snake_case_ ) ) return text def SCREAMING_SNAKE_CASE ( snake_case_ : int ): # load dataset snake_case__ : int = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=snake_case_ ) # for testing: only process the first two examples as a test # dataset = dataset.select(range(10)) # load processor snake_case__ : List[str] = AutoFeatureExtractor.from_pretrained(args.model_id ) snake_case__ : List[Any] = feature_extractor.sampling_rate # resample audio snake_case__ : Dict = dataset.cast_column("audio" , Audio(sampling_rate=snake_case_ ) ) # load eval pipeline if args.device is None: snake_case__ : int = 0 if torch.cuda.is_available() else -1 snake_case__ : List[str] = pipeline("automatic-speech-recognition" , model=args.model_id , device=args.device ) # map function to decode audio def map_to_pred(snake_case_ : Any ): snake_case__ : Union[str, Any] = asr( batch["audio"]["array"] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s ) snake_case__ : Optional[int] = prediction["text"] snake_case__ : Optional[Any] = normalize_text(batch["sentence"] ) return batch # run inference on all examples snake_case__ : Any = dataset.map(snake_case_ , remove_columns=dataset.column_names ) # compute and log_results # do not change function below log_results(snake_case_ , snake_case_ ) if __name__ == "__main__": __lowerCamelCase : Dict = argparse.ArgumentParser() parser.add_argument( """--model_id""", type=str, required=True, help="""Model identifier. Should be loadable with 🤗 Transformers""" ) parser.add_argument( """--dataset""", type=str, required=True, help="""Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets""", ) parser.add_argument( """--config""", type=str, required=True, help="""Config of the dataset. *E.g.* `'en'` for Common Voice""" ) parser.add_argument("""--split""", type=str, required=True, help="""Split of the dataset. *E.g.* `'test'`""") parser.add_argument( """--chunk_length_s""", type=float, default=None, help="""Chunk length in seconds. Defaults to 5 seconds.""" ) parser.add_argument( """--stride_length_s""", type=float, default=None, help="""Stride of the audio chunks. Defaults to 1 second.""" ) parser.add_argument( """--log_outputs""", action="""store_true""", help="""If defined, write outputs to log file for analysis.""" ) parser.add_argument( """--device""", type=int, default=None, help="""The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.""", ) __lowerCamelCase : str = parser.parse_args() main(args)
25
0
from __future__ import annotations def SCREAMING_SNAKE_CASE ( snake_case_ : list[int] , snake_case_ : int ): snake_case__ : List[str] = 0 snake_case__ : List[str] = len(snake_case_ ) - 1 while i < j: if nums[i] + nums[j] == target: return [i, j] elif nums[i] + nums[j] < target: snake_case__ : Optional[Any] = i + 1 else: snake_case__ : List[str] = j - 1 return [] if __name__ == "__main__": import doctest doctest.testmod() print(f"{two_pointer([2, 7, 11, 15], 9) = }")
715
import copy from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import ClassLabel, Features, Value from .base import TaskTemplate @dataclass(frozen=UpperCamelCase_ ) class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ): """simple docstring""" a_ = field(default="text-classification" , metadata={"include_in_asdict_even_if_is_default": True} ) a_ = Features({"text": Value("string" )} ) a_ = Features({"labels": ClassLabel} ) a_ = "text" a_ = "labels" def _lowercase ( self : Tuple , __A : List[Any] ): if self.label_column not in features: raise ValueError(f'''Column {self.label_column} is not present in features.''' ) if not isinstance(features[self.label_column] , __A ): raise ValueError(f'''Column {self.label_column} is not a ClassLabel.''' ) snake_case__ : Any = copy.deepcopy(self ) snake_case__ : Optional[Any] = self.label_schema.copy() snake_case__ : List[str] = features[self.label_column] snake_case__ : Dict = label_schema return task_template @property def _lowercase ( self : Tuple ): return { self.text_column: "text", self.label_column: "labels", }
25
0
from typing import Callable, Optional from .. import Features from ..packaged_modules.generator.generator import Generator from .abc import AbstractDatasetInputStream class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ): """simple docstring""" def __init__( self : Tuple , __A : Callable , __A : Optional[Features] = None , __A : str = None , __A : bool = False , __A : bool = False , __A : Optional[dict] = None , __A : Optional[int] = None , **__A : int , ): super().__init__( features=__A , cache_dir=__A , keep_in_memory=__A , streaming=__A , num_proc=__A , **__A , ) snake_case__ : Tuple = Generator( cache_dir=__A , features=__A , generator=__A , gen_kwargs=__A , **__A , ) def _lowercase ( self : Any ): # Build iterable dataset if self.streaming: snake_case__ : Optional[int] = self.builder.as_streaming_dataset(split="train" ) # Build regular (map-style) dataset else: snake_case__ : Any = None snake_case__ : Union[str, Any] = None snake_case__ : Optional[Any] = None snake_case__ : str = None self.builder.download_and_prepare( download_config=__A , download_mode=__A , verification_mode=__A , base_path=__A , num_proc=self.num_proc , ) snake_case__ : Optional[Any] = self.builder.as_dataset( split="train" , verification_mode=__A , in_memory=self.keep_in_memory ) return dataset
716
import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES from ...utils import logging from ..auto import CONFIG_MAPPING __lowerCamelCase : Union[str, Any] = logging.get_logger(__name__) __lowerCamelCase : Dict = { """Salesforce/instruct-blip-flan-t5""": """https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json""", } class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ): """simple docstring""" a_ = "instructblip_vision_model" def __init__( self : List[Any] , __A : Dict=1_4_0_8 , __A : Tuple=6_1_4_4 , __A : str=3_9 , __A : int=1_6 , __A : str=2_2_4 , __A : Any=1_4 , __A : Dict="gelu" , __A : List[Any]=1e-6 , __A : Any=0.0 , __A : List[Any]=1e-1_0 , __A : Union[str, Any]=True , **__A : Tuple , ): super().__init__(**__A ) snake_case__ : List[str] = hidden_size snake_case__ : Optional[int] = intermediate_size snake_case__ : List[str] = num_hidden_layers snake_case__ : List[Any] = num_attention_heads snake_case__ : str = patch_size snake_case__ : int = image_size snake_case__ : int = initializer_range snake_case__ : Optional[int] = attention_dropout snake_case__ : str = layer_norm_eps snake_case__ : Optional[Any] = hidden_act snake_case__ : Tuple = qkv_bias @classmethod def _lowercase ( cls : List[str] , __A : Union[str, os.PathLike] , **__A : Optional[Any] ): cls._set_token_in_kwargs(__A ) snake_case__, snake_case__ : str = cls.get_config_dict(__A , **__A ) # get the vision config dict if we are loading from InstructBlipConfig if config_dict.get("model_type" ) == "instructblip": snake_case__ : Union[str, Any] = config_dict["vision_config"] if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type: logger.warning( f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type ''' f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(__A , **__A ) class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ): """simple docstring""" a_ = "instructblip_qformer" def __init__( self : Any , __A : Union[str, Any]=3_0_5_2_2 , __A : Union[str, Any]=7_6_8 , __A : Optional[int]=1_2 , __A : Dict=1_2 , __A : Dict=3_0_7_2 , __A : List[str]="gelu" , __A : Union[str, Any]=0.1 , __A : Tuple=0.1 , __A : Any=5_1_2 , __A : Optional[int]=0.0_2 , __A : List[str]=1e-1_2 , __A : Any=0 , __A : Optional[Any]="absolute" , __A : str=2 , __A : Any=1_4_0_8 , **__A : List[str] , ): super().__init__(pad_token_id=__A , **__A ) snake_case__ : Dict = vocab_size snake_case__ : Optional[int] = hidden_size snake_case__ : Optional[Any] = num_hidden_layers snake_case__ : str = num_attention_heads snake_case__ : int = hidden_act snake_case__ : Optional[Any] = intermediate_size snake_case__ : Union[str, Any] = hidden_dropout_prob snake_case__ : List[Any] = attention_probs_dropout_prob snake_case__ : List[Any] = max_position_embeddings snake_case__ : int = initializer_range snake_case__ : Dict = layer_norm_eps snake_case__ : str = position_embedding_type snake_case__ : Dict = cross_attention_frequency snake_case__ : List[str] = encoder_hidden_size @classmethod def _lowercase ( cls : List[Any] , __A : Union[str, os.PathLike] , **__A : Optional[int] ): cls._set_token_in_kwargs(__A ) snake_case__, snake_case__ : Tuple = cls.get_config_dict(__A , **__A ) # get the qformer config dict if we are loading from InstructBlipConfig if config_dict.get("model_type" ) == "instructblip": snake_case__ : List[Any] = config_dict["qformer_config"] if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type: logger.warning( f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type ''' f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(__A , **__A ) class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ): """simple docstring""" a_ = "instructblip" a_ = True def __init__( self : List[str] , __A : Optional[Any]=None , __A : Tuple=None , __A : Optional[int]=None , __A : Optional[Any]=3_2 , **__A : Optional[int] ): super().__init__(**__A ) if vision_config is None: snake_case__ : Any = {} logger.info("vision_config is None. initializing the InstructBlipVisionConfig with default values." ) if qformer_config is None: snake_case__ : Optional[Any] = {} logger.info("qformer_config is None. Initializing the InstructBlipQFormerConfig with default values." ) if text_config is None: snake_case__ : Optional[int] = {} logger.info("text_config is None. Initializing the text config with default values (`OPTConfig`)." ) snake_case__ : List[Any] = InstructBlipVisionConfig(**__A ) snake_case__ : Union[str, Any] = InstructBlipQFormerConfig(**__A ) snake_case__ : Dict = text_config["model_type"] if "model_type" in text_config else "opt" snake_case__ : List[Any] = CONFIG_MAPPING[text_model_type](**__A ) snake_case__ : Union[str, Any] = self.text_config.tie_word_embeddings snake_case__ : Tuple = self.text_config.is_encoder_decoder snake_case__ : str = num_query_tokens snake_case__ : Dict = self.vision_config.hidden_size snake_case__ : List[Any] = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES snake_case__ : int = 1.0 snake_case__ : Optional[int] = 0.0_2 @classmethod def _lowercase ( cls : List[str] , __A : InstructBlipVisionConfig , __A : InstructBlipQFormerConfig , __A : PretrainedConfig , **__A : int , ): return cls( vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **__A , ) def _lowercase ( self : Optional[int] ): snake_case__ : Any = copy.deepcopy(self.__dict__ ) snake_case__ : Optional[Any] = self.vision_config.to_dict() snake_case__ : List[str] = self.qformer_config.to_dict() snake_case__ : List[Any] = self.text_config.to_dict() snake_case__ : List[Any] = self.__class__.model_type return output
25
0
import gc import random import tempfile import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline from diffusers.utils import floats_tensor, nightly, torch_device from diffusers.utils.testing_utils import require_torch_gpu class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): """simple docstring""" def _lowercase ( self : str ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() @property def _lowercase ( self : int ): snake_case__ : Any = 1 snake_case__ : Dict = 3 snake_case__ : Union[str, Any] = (3_2, 3_2) snake_case__ : Union[str, Any] = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(__A ) return image @property def _lowercase ( self : List[str] ): torch.manual_seed(0 ) snake_case__ : List[Any] = UNetaDConditionModel( block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=3_2 , ) return model @property def _lowercase ( self : str ): torch.manual_seed(0 ) snake_case__ : Dict = AutoencoderKL( block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , ) return model @property def _lowercase ( self : Optional[Any] ): torch.manual_seed(0 ) snake_case__ : List[str] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , ) return CLIPTextModel(__A ) @property def _lowercase ( self : int ): def extract(*__A : str , **__A : List[str] ): class SCREAMING_SNAKE_CASE__ : """simple docstring""" def __init__( self : Optional[Any] ): snake_case__ : str = torch.ones([0] ) def _lowercase ( self : Any , __A : str ): self.pixel_values.to(__A ) return self return Out() return extract def _lowercase ( self : Union[str, Any] ): snake_case__ : Optional[Any] = "cpu" # ensure determinism for the device-dependent torch.Generator snake_case__ : Optional[Any] = self.dummy_cond_unet snake_case__ : Optional[Any] = DDIMScheduler( beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="scaled_linear" , clip_sample=__A , set_alpha_to_one=__A , ) snake_case__ : Dict = self.dummy_vae snake_case__ : Union[str, Any] = self.dummy_text_encoder snake_case__ : Dict = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) # make sure here that pndm scheduler skips prk snake_case__ : Dict = StableDiffusionPipeline( unet=__A , scheduler=__A , vae=__A , text_encoder=__A , tokenizer=__A , safety_checker=__A , feature_extractor=self.dummy_extractor , ) snake_case__ : Any = sd_pipe.to(__A ) sd_pipe.set_progress_bar_config(disable=__A ) snake_case__ : str = "A painting of a squirrel eating a burger" snake_case__ : Optional[int] = torch.Generator(device=__A ).manual_seed(0 ) snake_case__ : Optional[Any] = sd_pipe([prompt] , generator=__A , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" ) snake_case__ : int = output.images snake_case__ : Dict = torch.Generator(device=__A ).manual_seed(0 ) snake_case__ : int = sd_pipe( [prompt] , generator=__A , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , return_dict=__A , )[0] snake_case__ : Tuple = image[0, -3:, -3:, -1] snake_case__ : Optional[Any] = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 6_4, 6_4, 3) snake_case__ : int = np.array([0.5_7_5_6, 0.6_1_1_8, 0.5_0_0_5, 0.5_0_4_1, 0.5_4_7_1, 0.4_7_2_6, 0.4_9_7_6, 0.4_8_6_5, 0.4_8_6_4] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 def _lowercase ( self : Dict ): snake_case__ : List[Any] = "cpu" # ensure determinism for the device-dependent torch.Generator snake_case__ : Any = self.dummy_cond_unet snake_case__ : List[Any] = PNDMScheduler(skip_prk_steps=__A ) snake_case__ : List[Any] = self.dummy_vae snake_case__ : Tuple = self.dummy_text_encoder snake_case__ : Optional[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) # make sure here that pndm scheduler skips prk snake_case__ : Dict = StableDiffusionPipeline( unet=__A , scheduler=__A , vae=__A , text_encoder=__A , tokenizer=__A , safety_checker=__A , feature_extractor=self.dummy_extractor , ) snake_case__ : List[str] = sd_pipe.to(__A ) sd_pipe.set_progress_bar_config(disable=__A ) snake_case__ : List[str] = "A painting of a squirrel eating a burger" snake_case__ : Union[str, Any] = torch.Generator(device=__A ).manual_seed(0 ) snake_case__ : Optional[int] = sd_pipe([prompt] , generator=__A , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" ) snake_case__ : Tuple = output.images snake_case__ : int = torch.Generator(device=__A ).manual_seed(0 ) snake_case__ : str = sd_pipe( [prompt] , generator=__A , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , return_dict=__A , )[0] snake_case__ : List[str] = image[0, -3:, -3:, -1] snake_case__ : Optional[int] = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 6_4, 6_4, 3) snake_case__ : Dict = np.array([0.5_1_2_5, 0.5_7_1_6, 0.4_8_2_8, 0.5_0_6_0, 0.5_6_5_0, 0.4_7_6_8, 0.5_1_8_5, 0.4_8_9_5, 0.4_9_9_3] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 def _lowercase ( self : str ): snake_case__ : Dict = StableDiffusionPipeline.from_pretrained( "hf-internal-testing/tiny-stable-diffusion-lms-pipe" , safety_checker=__A ) assert isinstance(__A , __A ) assert isinstance(pipe.scheduler , __A ) assert pipe.safety_checker is None snake_case__ : List[str] = pipe("example prompt" , num_inference_steps=2 ).images[0] assert image is not None # check that there's no error when saving a pipeline with one of the models being None with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(__A ) snake_case__ : Optional[int] = StableDiffusionPipeline.from_pretrained(__A ) # sanity check that the pipeline still works assert pipe.safety_checker is None snake_case__ : Dict = pipe("example prompt" , num_inference_steps=2 ).images[0] assert image is not None @unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" ) def _lowercase ( self : Union[str, Any] ): snake_case__ : Any = self.dummy_cond_unet snake_case__ : List[str] = PNDMScheduler(skip_prk_steps=__A ) snake_case__ : List[Any] = self.dummy_vae snake_case__ : Union[str, Any] = self.dummy_text_encoder snake_case__ : str = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) # put models in fp16 snake_case__ : List[Any] = unet.half() snake_case__ : Dict = vae.half() snake_case__ : str = bert.half() # make sure here that pndm scheduler skips prk snake_case__ : Dict = StableDiffusionPipeline( unet=__A , scheduler=__A , vae=__A , text_encoder=__A , tokenizer=__A , safety_checker=__A , feature_extractor=self.dummy_extractor , ) snake_case__ : Tuple = sd_pipe.to(__A ) sd_pipe.set_progress_bar_config(disable=__A ) snake_case__ : List[str] = "A painting of a squirrel eating a burger" snake_case__ : Optional[Any] = sd_pipe([prompt] , num_inference_steps=2 , output_type="np" ).images assert image.shape == (1, 6_4, 6_4, 3) @nightly @require_torch_gpu class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): """simple docstring""" def _lowercase ( self : Union[str, Any] ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def _lowercase ( self : List[Any] ): snake_case__ : Dict = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5" , safety_checker=__A ) snake_case__ : Union[str, Any] = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config ) snake_case__ : Dict = sd_pipe.to(__A ) sd_pipe.set_progress_bar_config(disable=__A ) snake_case__ : Optional[int] = ( "portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle" " coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with" " anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and" " children from bahnhof zoo, detailed " ) snake_case__ : Optional[int] = 4_0_0_3_6_6_0_3_4_6 snake_case__ : Tuple = 7 # without safety guidance (sld_guidance_scale = 0) snake_case__ : Optional[Any] = torch.manual_seed(__A ) snake_case__ : Optional[int] = sd_pipe( [prompt] , generator=__A , guidance_scale=__A , num_inference_steps=5_0 , output_type="np" , width=5_1_2 , height=5_1_2 , sld_guidance_scale=0 , ) snake_case__ : Dict = output.images snake_case__ : List[Any] = image[0, -3:, -3:, -1] snake_case__ : Tuple = [0.2_2_7_8, 0.2_2_3_1, 0.2_2_4_9, 0.2_3_3_3, 0.2_3_0_3, 0.1_8_8_5, 0.2_2_7_3, 0.2_1_4_4, 0.2_1_7_6] assert image.shape == (1, 5_1_2, 5_1_2, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 # without safety guidance (strong configuration) snake_case__ : Any = torch.manual_seed(__A ) snake_case__ : Dict = sd_pipe( [prompt] , generator=__A , guidance_scale=__A , num_inference_steps=5_0 , output_type="np" , width=5_1_2 , height=5_1_2 , sld_guidance_scale=2_0_0_0 , sld_warmup_steps=7 , sld_threshold=0.0_2_5 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , ) snake_case__ : List[str] = output.images snake_case__ : Any = image[0, -3:, -3:, -1] snake_case__ : str = [0.2_3_8_3, 0.2_2_7_6, 0.2_3_6, 0.2_1_9_2, 0.2_1_8_6, 0.2_0_5_3, 0.1_9_7_1, 0.1_9_0_1, 0.1_7_1_9] assert image.shape == (1, 5_1_2, 5_1_2, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def _lowercase ( self : Tuple ): snake_case__ : Any = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5" , safety_checker=__A ) snake_case__ : Any = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config ) snake_case__ : List[str] = sd_pipe.to(__A ) sd_pipe.set_progress_bar_config(disable=__A ) snake_case__ : Optional[Any] = "padme amidala taking a bath artwork, safe for work, no nudity" snake_case__ : Any = 2_7_3_4_9_7_1_7_5_5 snake_case__ : List[Any] = 7 snake_case__ : int = torch.manual_seed(__A ) snake_case__ : Any = sd_pipe( [prompt] , generator=__A , guidance_scale=__A , num_inference_steps=5_0 , output_type="np" , width=5_1_2 , height=5_1_2 , sld_guidance_scale=0 , ) snake_case__ : List[Any] = output.images snake_case__ : Optional[int] = image[0, -3:, -3:, -1] snake_case__ : Optional[Any] = [0.3_5_0_2, 0.3_6_2_2, 0.3_3_9_6, 0.3_6_4_2, 0.3_4_7_8, 0.3_3_1_8, 0.3_5, 0.3_3_4_8, 0.3_2_9_7] assert image.shape == (1, 5_1_2, 5_1_2, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 snake_case__ : Dict = torch.manual_seed(__A ) snake_case__ : str = sd_pipe( [prompt] , generator=__A , guidance_scale=__A , num_inference_steps=5_0 , output_type="np" , width=5_1_2 , height=5_1_2 , sld_guidance_scale=2_0_0_0 , sld_warmup_steps=7 , sld_threshold=0.0_2_5 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , ) snake_case__ : Optional[int] = output.images snake_case__ : Dict = image[0, -3:, -3:, -1] snake_case__ : Any = [0.5_5_3_1, 0.5_2_0_6, 0.4_8_9_5, 0.5_1_5_6, 0.5_1_8_2, 0.4_7_5_1, 0.4_8_0_2, 0.4_8_0_3, 0.4_4_4_3] assert image.shape == (1, 5_1_2, 5_1_2, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def _lowercase ( self : int ): snake_case__ : str = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5" ) snake_case__ : Any = sd_pipe.to(__A ) sd_pipe.set_progress_bar_config(disable=__A ) snake_case__ : Union[str, Any] = ( "the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c." " leyendecker" ) snake_case__ : Any = 1_0_4_4_3_5_5_2_3_4 snake_case__ : List[str] = 1_2 snake_case__ : Dict = torch.manual_seed(__A ) snake_case__ : List[str] = sd_pipe( [prompt] , generator=__A , guidance_scale=__A , num_inference_steps=5_0 , output_type="np" , width=5_1_2 , height=5_1_2 , sld_guidance_scale=0 , ) snake_case__ : List[Any] = output.images snake_case__ : Any = image[0, -3:, -3:, -1] snake_case__ : str = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] ) assert image.shape == (1, 5_1_2, 5_1_2, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-7 snake_case__ : Tuple = torch.manual_seed(__A ) snake_case__ : Any = sd_pipe( [prompt] , generator=__A , guidance_scale=__A , num_inference_steps=5_0 , output_type="np" , width=5_1_2 , height=5_1_2 , sld_guidance_scale=2_0_0_0 , sld_warmup_steps=7 , sld_threshold=0.0_2_5 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , ) snake_case__ : Optional[Any] = output.images snake_case__ : Union[str, Any] = image[0, -3:, -3:, -1] snake_case__ : Any = np.array([0.5_8_1_8, 0.6_2_8_5, 0.6_8_3_5, 0.6_0_1_9, 0.6_2_5, 0.6_7_5_4, 0.6_0_9_6, 0.6_3_3_4, 0.6_5_6_1] ) assert image.shape == (1, 5_1_2, 5_1_2, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
717
def SCREAMING_SNAKE_CASE ( snake_case_ : list ): if len(snake_case_ ) <= 1: return lst snake_case__ : List[Any] = 1 while i < len(snake_case_ ): if lst[i - 1] <= lst[i]: i += 1 else: snake_case__, snake_case__ : Tuple = lst[i], lst[i - 1] i -= 1 if i == 0: snake_case__ : Union[str, Any] = 1 return lst if __name__ == "__main__": __lowerCamelCase : Dict = input("""Enter numbers separated by a comma:\n""").strip() __lowerCamelCase : Tuple = [int(item) for item in user_input.split(""",""")] print(gnome_sort(unsorted))
25
0
from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available __lowerCamelCase : Optional[int] = {"""configuration_van""": ["""VAN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """VanConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : List[str] = [ """VAN_PRETRAINED_MODEL_ARCHIVE_LIST""", """VanForImageClassification""", """VanModel""", """VanPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_van import ( VAN_PRETRAINED_MODEL_ARCHIVE_LIST, VanForImageClassification, VanModel, VanPreTrainedModel, ) else: import sys __lowerCamelCase : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
718
from __future__ import annotations import time __lowerCamelCase : str = list[tuple[int, int]] __lowerCamelCase : Optional[int] = [ [0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles [0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0], [1, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0], ] __lowerCamelCase : Tuple = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right class SCREAMING_SNAKE_CASE__ : """simple docstring""" def __init__( self : Union[str, Any] , __A : int , __A : int , __A : int , __A : int , __A : Node | None ): snake_case__ : Optional[int] = pos_x snake_case__ : Dict = pos_y snake_case__ : int = (pos_y, pos_x) snake_case__ : Optional[int] = goal_x snake_case__ : Tuple = goal_y snake_case__ : str = parent class SCREAMING_SNAKE_CASE__ : """simple docstring""" def __init__( self : List[Any] , __A : tuple[int, int] , __A : tuple[int, int] ): snake_case__ : Tuple = Node(start[1] , start[0] , goal[1] , goal[0] , __A ) snake_case__ : Tuple = Node(goal[1] , goal[0] , goal[1] , goal[0] , __A ) snake_case__ : int = [self.start] snake_case__ : Union[str, Any] = False def _lowercase ( self : Dict ): while self.node_queue: snake_case__ : Optional[Any] = self.node_queue.pop(0 ) if current_node.pos == self.target.pos: snake_case__ : Optional[Any] = True return self.retrace_path(__A ) snake_case__ : int = self.get_successors(__A ) for node in successors: self.node_queue.append(__A ) if not self.reached: return [self.start.pos] return None def _lowercase ( self : Union[str, Any] , __A : Node ): snake_case__ : str = [] for action in delta: snake_case__ : str = parent.pos_x + action[1] snake_case__ : Union[str, Any] = parent.pos_y + action[0] if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(__A ) - 1): continue if grid[pos_y][pos_x] != 0: continue successors.append( Node(__A , __A , self.target.pos_y , self.target.pos_x , __A ) ) return successors def _lowercase ( self : Optional[Any] , __A : Node | None ): snake_case__ : Tuple = node snake_case__ : Any = [] while current_node is not None: path.append((current_node.pos_y, current_node.pos_x) ) snake_case__ : Tuple = current_node.parent path.reverse() return path class SCREAMING_SNAKE_CASE__ : """simple docstring""" def __init__( self : Dict , __A : str , __A : int ): snake_case__ : str = BreadthFirstSearch(__A , __A ) snake_case__ : int = BreadthFirstSearch(__A , __A ) snake_case__ : Tuple = False def _lowercase ( self : Optional[Any] ): while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue: snake_case__ : Any = self.fwd_bfs.node_queue.pop(0 ) snake_case__ : List[str] = self.bwd_bfs.node_queue.pop(0 ) if current_bwd_node.pos == current_fwd_node.pos: snake_case__ : List[str] = True return self.retrace_bidirectional_path( __A , __A ) snake_case__ : Union[str, Any] = current_bwd_node snake_case__ : Dict = current_fwd_node snake_case__ : List[Any] = { self.fwd_bfs: self.fwd_bfs.get_successors(__A ), self.bwd_bfs: self.bwd_bfs.get_successors(__A ), } for bfs in [self.fwd_bfs, self.bwd_bfs]: for node in successors[bfs]: bfs.node_queue.append(__A ) if not self.reached: return [self.fwd_bfs.start.pos] return None def _lowercase ( self : Any , __A : Node , __A : Node ): snake_case__ : List[str] = self.fwd_bfs.retrace_path(__A ) snake_case__ : Optional[Any] = self.bwd_bfs.retrace_path(__A ) bwd_path.pop() bwd_path.reverse() snake_case__ : List[Any] = fwd_path + bwd_path return path if __name__ == "__main__": # all coordinates are given in format [y,x] import doctest doctest.testmod() __lowerCamelCase : str = (0, 0) __lowerCamelCase : List[str] = (len(grid) - 1, len(grid[0]) - 1) for elem in grid: print(elem) __lowerCamelCase : Any = time.time() __lowerCamelCase : Optional[Any] = BreadthFirstSearch(init, goal) __lowerCamelCase : str = bfs.search() __lowerCamelCase : Optional[Any] = time.time() - start_bfs_time print("""Unidirectional BFS computation time : """, bfs_time) __lowerCamelCase : Optional[Any] = time.time() __lowerCamelCase : Optional[int] = BidirectionalBreadthFirstSearch(init, goal) __lowerCamelCase : str = bd_bfs.search() __lowerCamelCase : Optional[Any] = time.time() - start_bd_bfs_time print("""Bidirectional BFS computation time : """, bd_bfs_time)
25
0
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim from dataclasses import dataclass from typing import Optional, Tuple, Union import flax import jax import jax.numpy as jnp from ..configuration_utils import ConfigMixin, register_to_config from .scheduling_utils_flax import ( CommonSchedulerState, FlaxKarrasDiffusionSchedulers, FlaxSchedulerMixin, FlaxSchedulerOutput, add_noise_common, get_velocity_common, ) @flax.struct.dataclass class SCREAMING_SNAKE_CASE__ : """simple docstring""" a_ = 4_2 # setable values a_ = 4_2 a_ = 4_2 a_ = None @classmethod def _lowercase ( cls : Optional[int] , __A : CommonSchedulerState , __A : jnp.ndarray , __A : jnp.ndarray ): return cls(common=__A , init_noise_sigma=__A , timesteps=__A ) @dataclass class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ): """simple docstring""" a_ = 4_2 class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ , UpperCamelCase_ ): """simple docstring""" a_ = [e.name for e in FlaxKarrasDiffusionSchedulers] a_ = 4_2 @property def _lowercase ( self : Dict ): return True @register_to_config def __init__( self : Dict , __A : int = 1_0_0_0 , __A : float = 0.0_0_0_1 , __A : float = 0.0_2 , __A : str = "linear" , __A : Optional[jnp.ndarray] = None , __A : str = "fixed_small" , __A : bool = True , __A : str = "epsilon" , __A : jnp.dtype = jnp.floataa , ): snake_case__ : Any = dtype def _lowercase ( self : str , __A : Optional[CommonSchedulerState] = None ): if common is None: snake_case__ : str = CommonSchedulerState.create(self ) # standard deviation of the initial noise distribution snake_case__ : Union[str, Any] = jnp.array(1.0 , dtype=self.dtype ) snake_case__ : Tuple = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1] return DDPMSchedulerState.create( common=__A , init_noise_sigma=__A , timesteps=__A , ) def _lowercase ( self : List[Any] , __A : DDPMSchedulerState , __A : jnp.ndarray , __A : Optional[int] = None ): return sample def _lowercase ( self : List[str] , __A : DDPMSchedulerState , __A : int , __A : Tuple = () ): snake_case__ : Optional[Any] = self.config.num_train_timesteps // num_inference_steps # creates integer timesteps by multiplying by ratio # rounding to avoid issues when num_inference_step is power of 3 snake_case__ : int = (jnp.arange(0 , __A ) * step_ratio).round()[::-1] return state.replace( num_inference_steps=__A , timesteps=__A , ) def _lowercase ( self : Dict , __A : DDPMSchedulerState , __A : Optional[Any] , __A : List[Any]=None , __A : Optional[int]=None ): snake_case__ : Union[str, Any] = state.common.alphas_cumprod[t] snake_case__ : Tuple = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) ) # For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf) # and sample from it to get previous sample # x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample snake_case__ : List[str] = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t] if variance_type is None: snake_case__ : Dict = self.config.variance_type # hacks - were probably added for training stability if variance_type == "fixed_small": snake_case__ : Union[str, Any] = jnp.clip(__A , a_min=1e-2_0 ) # for rl-diffuser https://arxiv.org/abs/2205.09991 elif variance_type == "fixed_small_log": snake_case__ : Optional[int] = jnp.log(jnp.clip(__A , a_min=1e-2_0 ) ) elif variance_type == "fixed_large": snake_case__ : Any = state.common.betas[t] elif variance_type == "fixed_large_log": # Glide max_log snake_case__ : int = jnp.log(state.common.betas[t] ) elif variance_type == "learned": return predicted_variance elif variance_type == "learned_range": snake_case__ : int = variance snake_case__ : Dict = state.common.betas[t] snake_case__ : int = (predicted_variance + 1) / 2 snake_case__ : str = frac * max_log + (1 - frac) * min_log return variance def _lowercase ( self : Optional[int] , __A : DDPMSchedulerState , __A : jnp.ndarray , __A : int , __A : jnp.ndarray , __A : Optional[jax.random.KeyArray] = None , __A : bool = True , ): snake_case__ : int = timestep if key is None: snake_case__ : str = jax.random.PRNGKey(0 ) if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]: snake_case__ : Tuple = jnp.split(__A , sample.shape[1] , axis=1 ) else: snake_case__ : Any = None # 1. compute alphas, betas snake_case__ : Optional[int] = state.common.alphas_cumprod[t] snake_case__ : List[Any] = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) ) snake_case__ : Optional[int] = 1 - alpha_prod_t snake_case__ : Tuple = 1 - alpha_prod_t_prev # 2. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf if self.config.prediction_type == "epsilon": snake_case__ : Optional[Any] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 elif self.config.prediction_type == "sample": snake_case__ : str = model_output elif self.config.prediction_type == "v_prediction": snake_case__ : Optional[int] = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output else: raise ValueError( f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` ''' " for the FlaxDDPMScheduler." ) # 3. Clip "predicted x_0" if self.config.clip_sample: snake_case__ : Optional[Any] = jnp.clip(__A , -1 , 1 ) # 4. Compute coefficients for pred_original_sample x_0 and current sample x_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf snake_case__ : Optional[Any] = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t snake_case__ : List[Any] = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t # 5. Compute predicted previous sample µ_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf snake_case__ : Dict = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample # 6. Add noise def random_variance(): snake_case__ : Optional[int] = jax.random.split(__A , num=1 ) snake_case__ : Union[str, Any] = jax.random.normal(__A , shape=model_output.shape , dtype=self.dtype ) return (self._get_variance(__A , __A , predicted_variance=__A ) ** 0.5) * noise snake_case__ : List[Any] = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) ) snake_case__ : List[str] = pred_prev_sample + variance if not return_dict: return (pred_prev_sample, state) return FlaxDDPMSchedulerOutput(prev_sample=__A , state=__A ) def _lowercase ( self : str , __A : DDPMSchedulerState , __A : jnp.ndarray , __A : jnp.ndarray , __A : jnp.ndarray , ): return add_noise_common(state.common , __A , __A , __A ) def _lowercase ( self : List[Any] , __A : DDPMSchedulerState , __A : jnp.ndarray , __A : jnp.ndarray , __A : jnp.ndarray , ): return get_velocity_common(state.common , __A , __A , __A ) def __len__( self : Dict ): return self.config.num_train_timesteps
719
import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ConditionalDetrImageProcessor class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): """simple docstring""" def __init__( self : List[Any] , __A : Dict , __A : int=7 , __A : Optional[Any]=3 , __A : List[str]=3_0 , __A : List[Any]=4_0_0 , __A : Union[str, Any]=True , __A : List[Any]=None , __A : Optional[Any]=True , __A : Tuple=[0.5, 0.5, 0.5] , __A : Union[str, Any]=[0.5, 0.5, 0.5] , __A : List[str]=True , __A : Any=1 / 2_5_5 , __A : Optional[int]=True , ): # by setting size["longest_edge"] > max_resolution we're effectively not testing this :p snake_case__ : List[str] = size if size is not None else {"shortest_edge": 1_8, "longest_edge": 1_3_3_3} snake_case__ : Dict = parent snake_case__ : Optional[int] = batch_size snake_case__ : Union[str, Any] = num_channels snake_case__ : str = min_resolution snake_case__ : Tuple = max_resolution snake_case__ : List[Any] = do_resize snake_case__ : Dict = size snake_case__ : List[str] = do_normalize snake_case__ : Optional[int] = image_mean snake_case__ : Optional[int] = image_std snake_case__ : Any = do_rescale snake_case__ : Optional[int] = rescale_factor snake_case__ : int = do_pad def _lowercase ( self : Dict ): return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def _lowercase ( self : Optional[int] , __A : Dict , __A : List[Any]=False ): if not batched: snake_case__ : List[str] = image_inputs[0] if isinstance(__A , Image.Image ): snake_case__, snake_case__ : Tuple = image.size else: snake_case__, snake_case__ : List[str] = image.shape[1], image.shape[2] if w < h: snake_case__ : Dict = int(self.size["shortest_edge"] * h / w ) snake_case__ : Optional[int] = self.size["shortest_edge"] elif w > h: snake_case__ : List[Any] = self.size["shortest_edge"] snake_case__ : Union[str, Any] = int(self.size["shortest_edge"] * w / h ) else: snake_case__ : Dict = self.size["shortest_edge"] snake_case__ : Dict = self.size["shortest_edge"] else: snake_case__ : str = [] for image in image_inputs: snake_case__, snake_case__ : str = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) snake_case__ : Dict = max(__A , key=lambda __A : item[0] )[0] snake_case__ : Tuple = max(__A , key=lambda __A : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ , unittest.TestCase ): """simple docstring""" a_ = ConditionalDetrImageProcessor if is_vision_available() else None def _lowercase ( self : int ): snake_case__ : Tuple = ConditionalDetrImageProcessingTester(self ) @property def _lowercase ( self : Any ): return self.image_processor_tester.prepare_image_processor_dict() def _lowercase ( self : Any ): snake_case__ : Dict = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__A , "image_mean" ) ) self.assertTrue(hasattr(__A , "image_std" ) ) self.assertTrue(hasattr(__A , "do_normalize" ) ) self.assertTrue(hasattr(__A , "do_resize" ) ) self.assertTrue(hasattr(__A , "size" ) ) def _lowercase ( self : List[str] ): snake_case__ : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"shortest_edge": 1_8, "longest_edge": 1_3_3_3} ) self.assertEqual(image_processor.do_pad , __A ) snake_case__ : Any = self.image_processing_class.from_dict( self.image_processor_dict , size=4_2 , max_size=8_4 , pad_and_return_pixel_mask=__A ) self.assertEqual(image_processor.size , {"shortest_edge": 4_2, "longest_edge": 8_4} ) self.assertEqual(image_processor.do_pad , __A ) def _lowercase ( self : Union[str, Any] ): pass def _lowercase ( self : List[str] ): # Initialize image_processing snake_case__ : Dict = self.image_processing_class(**self.image_processor_dict ) # create random PIL images snake_case__ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A ) for image in image_inputs: self.assertIsInstance(__A , Image.Image ) # Test not batched input snake_case__ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values snake_case__, snake_case__ : Union[str, Any] = self.image_processor_tester.get_expected_values(__A ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched snake_case__, snake_case__ : Tuple = self.image_processor_tester.get_expected_values(__A , batched=__A ) snake_case__ : int = image_processing(__A , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def _lowercase ( self : Tuple ): # Initialize image_processing snake_case__ : List[str] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors snake_case__ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , numpify=__A ) for image in image_inputs: self.assertIsInstance(__A , np.ndarray ) # Test not batched input snake_case__ : int = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values snake_case__, snake_case__ : Dict = self.image_processor_tester.get_expected_values(__A ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched snake_case__ : Optional[Any] = image_processing(__A , return_tensors="pt" ).pixel_values snake_case__, snake_case__ : str = self.image_processor_tester.get_expected_values(__A , batched=__A ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def _lowercase ( self : Tuple ): # Initialize image_processing snake_case__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors snake_case__ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , torchify=__A ) for image in image_inputs: self.assertIsInstance(__A , torch.Tensor ) # Test not batched input snake_case__ : Tuple = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values snake_case__, snake_case__ : Optional[int] = self.image_processor_tester.get_expected_values(__A ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched snake_case__ : Dict = image_processing(__A , return_tensors="pt" ).pixel_values snake_case__, snake_case__ : int = self.image_processor_tester.get_expected_values(__A , batched=__A ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) @slow def _lowercase ( self : List[Any] ): # prepare image and target snake_case__ : Union[str, Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f: snake_case__ : Union[str, Any] = json.loads(f.read() ) snake_case__ : Optional[Any] = {"image_id": 3_9_7_6_9, "annotations": target} # encode them snake_case__ : Tuple = ConditionalDetrImageProcessor.from_pretrained("microsoft/conditional-detr-resnet-50" ) snake_case__ : int = image_processing(images=__A , annotations=__A , return_tensors="pt" ) # verify pixel values snake_case__ : str = torch.Size([1, 3, 8_0_0, 1_0_6_6] ) self.assertEqual(encoding["pixel_values"].shape , __A ) snake_case__ : Tuple = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] ) self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __A , atol=1e-4 ) ) # verify area snake_case__ : Optional[int] = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] ) self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __A ) ) # verify boxes snake_case__ : Tuple = torch.Size([6, 4] ) self.assertEqual(encoding["labels"][0]["boxes"].shape , __A ) snake_case__ : List[Any] = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] ) self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __A , atol=1e-3 ) ) # verify image_id snake_case__ : str = torch.tensor([3_9_7_6_9] ) self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __A ) ) # verify is_crowd snake_case__ : List[Any] = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __A ) ) # verify class_labels snake_case__ : Optional[int] = torch.tensor([7_5, 7_5, 6_3, 6_5, 1_7, 1_7] ) self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __A ) ) # verify orig_size snake_case__ : Dict = torch.tensor([4_8_0, 6_4_0] ) self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __A ) ) # verify size snake_case__ : List[str] = torch.tensor([8_0_0, 1_0_6_6] ) self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __A ) ) @slow def _lowercase ( self : str ): # prepare image, target and masks_path snake_case__ : Optional[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f: snake_case__ : int = json.loads(f.read() ) snake_case__ : Optional[int] = {"file_name": "000000039769.png", "image_id": 3_9_7_6_9, "segments_info": target} snake_case__ : Optional[Any] = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" ) # encode them snake_case__ : Optional[int] = ConditionalDetrImageProcessor(format="coco_panoptic" ) snake_case__ : Tuple = image_processing(images=__A , annotations=__A , masks_path=__A , return_tensors="pt" ) # verify pixel values snake_case__ : Optional[Any] = torch.Size([1, 3, 8_0_0, 1_0_6_6] ) self.assertEqual(encoding["pixel_values"].shape , __A ) snake_case__ : List[str] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] ) self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __A , atol=1e-4 ) ) # verify area snake_case__ : Tuple = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] ) self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __A ) ) # verify boxes snake_case__ : Dict = torch.Size([6, 4] ) self.assertEqual(encoding["labels"][0]["boxes"].shape , __A ) snake_case__ : int = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] ) self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __A , atol=1e-3 ) ) # verify image_id snake_case__ : str = torch.tensor([3_9_7_6_9] ) self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __A ) ) # verify is_crowd snake_case__ : Tuple = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __A ) ) # verify class_labels snake_case__ : Optional[Any] = torch.tensor([1_7, 1_7, 6_3, 7_5, 7_5, 9_3] ) self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __A ) ) # verify masks snake_case__ : str = 8_2_2_8_7_3 self.assertEqual(encoding["labels"][0]["masks"].sum().item() , __A ) # verify orig_size snake_case__ : int = torch.tensor([4_8_0, 6_4_0] ) self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __A ) ) # verify size snake_case__ : List[Any] = torch.tensor([8_0_0, 1_0_6_6] ) self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __A ) )
25
0
from PIL import Image def SCREAMING_SNAKE_CASE ( snake_case_ : Image ): snake_case__ : Dict = image.size snake_case__ : List[str] = 0 snake_case__ : List[Any] = image.load() for i in range(snake_case_ ): for j in range(snake_case_ ): snake_case__ : int = pixels[j, i] mean += pixel mean //= width * height for j in range(snake_case_ ): for i in range(snake_case_ ): snake_case__ : str = 255 if pixels[i, j] > mean else 0 return image if __name__ == "__main__": __lowerCamelCase : Dict = mean_threshold(Image.open("""path_to_image""").convert("""L""")) image.save("""output_image_path""")
720
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on import numpy # noqa: F401 # Here to have a nice missing dependency error message early on import requests # noqa: F401 # Here to have a nice missing dependency error message early on import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on from mauve import compute_mauve # From: mauve-text import datasets __lowerCamelCase : Optional[int] = """\ @inproceedings{pillutla-etal:mauve:neurips2021, title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers}, author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid}, booktitle = {NeurIPS}, year = {2021} } """ __lowerCamelCase : str = """\ MAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure. MAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences. For details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021). This metrics is a wrapper around the official implementation of MAUVE: https://github.com/krishnap25/mauve """ __lowerCamelCase : str = """ Calculates MAUVE scores between two lists of generated text and reference text. Args: predictions: list of generated text to score. Each predictions should be a string with tokens separated by spaces. references: list of reference for each prediction. Each reference should be a string with tokens separated by spaces. Optional Args: num_buckets: the size of the histogram to quantize P and Q. Options: 'auto' (default) or an integer pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1 kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9 kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5 kmeans_max_iter: maximum number of k-means iterations. Default 500 featurize_model_name: name of the model from which features are obtained. Default 'gpt2-large' Use one of ['gpt2', 'gpt2-medium', 'gpt2-large', 'gpt2-xl']. device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU max_text_length: maximum number of tokens to consider. Default 1024 divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25 mauve_scaling_factor: \"c\" from the paper. Default 5. verbose: If True (default), print running time updates seed: random seed to initialize k-means cluster assignments. Returns: mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer, frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer, divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve, p_hist: a discrete distribution, which is a quantized version of the text distribution p_text, q_hist: same as above, but with q_text. Examples: >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest >>> import datasets >>> mauve = datasets.load_metric('mauve') >>> predictions = [\"hello there\", \"general kenobi\"] >>> references = [\"hello there\", \"general kenobi\"] >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP >>> print(out.mauve) # doctest: +SKIP 1.0 """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class SCREAMING_SNAKE_CASE__ ( datasets.Metric ): """simple docstring""" def _lowercase ( self : Dict ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , homepage="https://github.com/krishnap25/mauve" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("string" , id="sequence" ), "references": datasets.Value("string" , id="sequence" ), } ) , codebase_urls=["https://github.com/krishnap25/mauve"] , reference_urls=[ "https://arxiv.org/abs/2102.01454", "https://github.com/krishnap25/mauve", ] , ) def _lowercase ( self : Union[str, Any] , __A : Dict , __A : List[str] , __A : int=None , __A : List[Any]=None , __A : Optional[int]=None , __A : List[Any]=None , __A : Union[str, Any]="auto" , __A : Optional[Any]=-1 , __A : Optional[Any]=0.9 , __A : Any=5 , __A : List[Any]=5_0_0 , __A : Tuple="gpt2-large" , __A : Optional[Any]=-1 , __A : str=1_0_2_4 , __A : Tuple=2_5 , __A : str=5 , __A : Optional[int]=True , __A : Any=2_5 , ): snake_case__ : List[Any] = compute_mauve( p_text=__A , q_text=__A , p_features=__A , q_features=__A , p_tokens=__A , q_tokens=__A , num_buckets=__A , pca_max_data=__A , kmeans_explained_var=__A , kmeans_num_redo=__A , kmeans_max_iter=__A , featurize_model_name=__A , device_id=__A , max_text_length=__A , divergence_curve_discretization_size=__A , mauve_scaling_factor=__A , verbose=__A , seed=__A , ) return out
25
0
from math import sqrt def SCREAMING_SNAKE_CASE ( snake_case_ : int ): if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(sqrt(snake_case_ ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def SCREAMING_SNAKE_CASE ( snake_case_ : int = 10001 ): snake_case__ : List[str] = 0 snake_case__ : Optional[Any] = 1 while count != nth and number < 3: number += 1 if is_prime(snake_case_ ): count += 1 while count != nth: number += 2 if is_prime(snake_case_ ): count += 1 return number if __name__ == "__main__": print(f"{solution() = }")
721
# Lint as: python3 # pylint: enable=line-too-long # pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position __lowerCamelCase : Union[str, Any] = """2.13.1""" import platform import pyarrow from packaging import version if version.parse(platform.python_version()) < version.parse("""3.7"""): raise ImportWarning( """To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition.""" ) if version.parse(pyarrow.__version__).major < 8: raise ImportWarning( """To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n""" """If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`.""" ) del platform del pyarrow del version from .arrow_dataset import Dataset from .arrow_reader import ReadInstruction from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder from .combine import concatenate_datasets, interleave_datasets from .dataset_dict import DatasetDict, IterableDatasetDict from .download import * from .features import * from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled from .info import DatasetInfo, MetricInfo from .inspect import ( get_dataset_config_info, get_dataset_config_names, get_dataset_infos, get_dataset_split_names, inspect_dataset, inspect_metric, list_datasets, list_metrics, ) from .iterable_dataset import IterableDataset from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric from .metric import Metric from .splits import ( NamedSplit, NamedSplitAll, Split, SplitBase, SplitDict, SplitGenerator, SplitInfo, SubSplitInfo, percent, ) from .tasks import * from .utils import * from .utils import logging # deprecated modules from datasets import arrow_dataset as _arrow_dataset # isort:skip from datasets import utils as _utils # isort:skip from datasets.utils import download_manager as _deprecated_download_manager # isort:skip __lowerCamelCase : List[Any] = concatenate_datasets __lowerCamelCase : List[str] = DownloadConfig __lowerCamelCase : Union[str, Any] = DownloadManager __lowerCamelCase : str = DownloadMode __lowerCamelCase : Union[str, Any] = DownloadConfig __lowerCamelCase : List[str] = DownloadMode __lowerCamelCase : Dict = DownloadManager del _arrow_dataset, _utils, _deprecated_download_manager
25
0
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_big_bird import BigBirdTokenizer else: __lowerCamelCase : Dict = None __lowerCamelCase : Tuple = logging.get_logger(__name__) __lowerCamelCase : Dict = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""} __lowerCamelCase : Optional[Any] = { """vocab_file""": { """google/bigbird-roberta-base""": """https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model""", """google/bigbird-roberta-large""": ( """https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model""" ), """google/bigbird-base-trivia-itc""": ( """https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model""" ), }, """tokenizer_file""": { """google/bigbird-roberta-base""": ( """https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json""" ), """google/bigbird-roberta-large""": ( """https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json""" ), """google/bigbird-base-trivia-itc""": ( """https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json""" ), }, } __lowerCamelCase : int = { """google/bigbird-roberta-base""": 4096, """google/bigbird-roberta-large""": 4096, """google/bigbird-base-trivia-itc""": 4096, } __lowerCamelCase : List[str] = """▁""" class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ): """simple docstring""" a_ = VOCAB_FILES_NAMES a_ = PRETRAINED_VOCAB_FILES_MAP a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a_ = BigBirdTokenizer a_ = ["input_ids", "attention_mask"] a_ = [] def __init__( self : int , __A : Dict=None , __A : int=None , __A : Dict="<unk>" , __A : Union[str, Any]="<s>" , __A : Tuple="</s>" , __A : List[str]="<pad>" , __A : List[Any]="[SEP]" , __A : List[Any]="[MASK]" , __A : List[str]="[CLS]" , **__A : Union[str, Any] , ): snake_case__ : Tuple = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else bos_token snake_case__ : Union[str, Any] = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else eos_token snake_case__ : Union[str, Any] = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else unk_token snake_case__ : Optional[int] = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else pad_token snake_case__ : int = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else cls_token snake_case__ : str = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else sep_token # Mask token behave like a normal word, i.e. include the space before it snake_case__ : Union[str, Any] = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else mask_token super().__init__( __A , tokenizer_file=__A , bos_token=__A , eos_token=__A , unk_token=__A , sep_token=__A , pad_token=__A , cls_token=__A , mask_token=__A , **__A , ) snake_case__ : List[Any] = vocab_file snake_case__ : Tuple = False if not self.vocab_file else True def _lowercase ( self : Any , __A : List[int] , __A : Optional[List[int]] = None ): snake_case__ : str = [self.sep_token_id] snake_case__ : List[str] = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def _lowercase ( self : int , __A : List[int] , __A : Optional[List[int]] = None , __A : bool = False ): if already_has_special_tokens: if token_ids_a is not None: raise ValueError( "You should not supply a second sequence if the provided sequence of " "ids is already formatted with special tokens for the model." ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is None: return [1] + ([0] * len(__A )) + [1] return [1] + ([0] * len(__A )) + [1] + ([0] * len(__A )) + [1] def _lowercase ( self : Any , __A : List[int] , __A : Optional[List[int]] = None ): snake_case__ : Union[str, Any] = [self.sep_token_id] snake_case__ : List[str] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def _lowercase ( self : List[Any] , __A : str , __A : Optional[str] = None ): if not self.can_save_slow_tokenizer: raise ValueError( "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow " "tokenizer." ) if not os.path.isdir(__A ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return snake_case__ : Any = os.path.join( __A , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__A ): copyfile(self.vocab_file , __A ) return (out_vocab_file,)
700
from __future__ import annotations def SCREAMING_SNAKE_CASE ( snake_case_ : int ): snake_case__ : str = [True] * limit snake_case__ : str = False snake_case__ : str = False snake_case__ : str = True for i in range(3 , int(limit**0.5 + 1 ) , 2 ): snake_case__ : Optional[Any] = i * 2 while index < limit: snake_case__ : Union[str, Any] = False snake_case__ : Any = index + i snake_case__ : Optional[Any] = [2] for i in range(3 , snake_case_ , 2 ): if is_prime[i]: primes.append(snake_case_ ) return primes def SCREAMING_SNAKE_CASE ( snake_case_ : int = 1000000 ): snake_case__ : Optional[int] = prime_sieve(snake_case_ ) snake_case__ : List[Any] = 0 snake_case__ : List[str] = 0 for i in range(len(snake_case_ ) ): for j in range(i + length , len(snake_case_ ) ): snake_case__ : Dict = sum(primes[i:j] ) if sol >= ceiling: break if sol in primes: snake_case__ : Tuple = j - i snake_case__ : str = sol return largest if __name__ == "__main__": print(f"{solution() = }")
25
0
from ...configuration_utils import PretrainedConfig from ...utils import logging __lowerCamelCase : Optional[int] = logging.get_logger(__name__) __lowerCamelCase : List[Any] = { """edbeeching/decision-transformer-gym-hopper-medium""": ( """https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json""" ), # See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer } class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ): """simple docstring""" a_ = "decision_transformer" a_ = ["past_key_values"] a_ = { "max_position_embeddings": "n_positions", "num_attention_heads": "n_head", "num_hidden_layers": "n_layer", } def __init__( self : Optional[Any] , __A : Dict=1_7 , __A : Any=4 , __A : Tuple=1_2_8 , __A : Any=4_0_9_6 , __A : Tuple=True , __A : List[Any]=1 , __A : List[Any]=1_0_2_4 , __A : Optional[Any]=3 , __A : Any=1 , __A : Any=None , __A : List[str]="relu" , __A : Dict=0.1 , __A : Any=0.1 , __A : Union[str, Any]=0.1 , __A : Tuple=1e-5 , __A : Optional[Any]=0.0_2 , __A : Tuple=True , __A : Any=True , __A : Tuple=5_0_2_5_6 , __A : List[Any]=5_0_2_5_6 , __A : Any=False , __A : Optional[int]=False , **__A : Optional[Any] , ): snake_case__ : List[str] = state_dim snake_case__ : List[str] = act_dim snake_case__ : Union[str, Any] = hidden_size snake_case__ : List[str] = max_ep_len snake_case__ : List[str] = action_tanh snake_case__ : List[str] = vocab_size snake_case__ : List[Any] = n_positions snake_case__ : Union[str, Any] = n_layer snake_case__ : List[str] = n_head snake_case__ : Tuple = n_inner snake_case__ : str = activation_function snake_case__ : Dict = resid_pdrop snake_case__ : Any = embd_pdrop snake_case__ : Any = attn_pdrop snake_case__ : Union[str, Any] = layer_norm_epsilon snake_case__ : List[str] = initializer_range snake_case__ : List[Any] = scale_attn_weights snake_case__ : Optional[int] = use_cache snake_case__ : Union[str, Any] = scale_attn_by_inverse_layer_idx snake_case__ : Dict = reorder_and_upcast_attn snake_case__ : List[str] = bos_token_id snake_case__ : Optional[int] = eos_token_id super().__init__(bos_token_id=__A , eos_token_id=__A , **__A )
701
import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DeformableDetrImageProcessor class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): """simple docstring""" def __init__( self : int , __A : List[str] , __A : Union[str, Any]=7 , __A : Any=3 , __A : Optional[Any]=3_0 , __A : List[str]=4_0_0 , __A : str=True , __A : Optional[Any]=None , __A : Optional[int]=True , __A : int=[0.5, 0.5, 0.5] , __A : Dict=[0.5, 0.5, 0.5] , __A : Optional[int]=True , __A : int=1 / 2_5_5 , __A : List[str]=True , ): # by setting size["longest_edge"] > max_resolution we're effectively not testing this :p snake_case__ : List[str] = size if size is not None else {"shortest_edge": 1_8, "longest_edge": 1_3_3_3} snake_case__ : Optional[Any] = parent snake_case__ : str = batch_size snake_case__ : Union[str, Any] = num_channels snake_case__ : Optional[Any] = min_resolution snake_case__ : List[str] = max_resolution snake_case__ : Tuple = do_resize snake_case__ : str = size snake_case__ : str = do_normalize snake_case__ : Optional[Any] = image_mean snake_case__ : List[str] = image_std snake_case__ : List[str] = do_rescale snake_case__ : Tuple = rescale_factor snake_case__ : Tuple = do_pad def _lowercase ( self : str ): return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def _lowercase ( self : Optional[Any] , __A : List[Any] , __A : List[Any]=False ): if not batched: snake_case__ : List[Any] = image_inputs[0] if isinstance(__A , Image.Image ): snake_case__, snake_case__ : str = image.size else: snake_case__, snake_case__ : Dict = image.shape[1], image.shape[2] if w < h: snake_case__ : Any = int(self.size["shortest_edge"] * h / w ) snake_case__ : Any = self.size["shortest_edge"] elif w > h: snake_case__ : Optional[int] = self.size["shortest_edge"] snake_case__ : Any = int(self.size["shortest_edge"] * w / h ) else: snake_case__ : Tuple = self.size["shortest_edge"] snake_case__ : int = self.size["shortest_edge"] else: snake_case__ : Any = [] for image in image_inputs: snake_case__, snake_case__ : Optional[Any] = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) snake_case__ : List[Any] = max(__A , key=lambda __A : item[0] )[0] snake_case__ : int = max(__A , key=lambda __A : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ , unittest.TestCase ): """simple docstring""" a_ = DeformableDetrImageProcessor if is_vision_available() else None def _lowercase ( self : str ): snake_case__ : Optional[Any] = DeformableDetrImageProcessingTester(self ) @property def _lowercase ( self : List[Any] ): return self.image_processor_tester.prepare_image_processor_dict() def _lowercase ( self : Tuple ): snake_case__ : Any = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__A , "image_mean" ) ) self.assertTrue(hasattr(__A , "image_std" ) ) self.assertTrue(hasattr(__A , "do_normalize" ) ) self.assertTrue(hasattr(__A , "do_resize" ) ) self.assertTrue(hasattr(__A , "do_rescale" ) ) self.assertTrue(hasattr(__A , "do_pad" ) ) self.assertTrue(hasattr(__A , "size" ) ) def _lowercase ( self : Any ): snake_case__ : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"shortest_edge": 1_8, "longest_edge": 1_3_3_3} ) self.assertEqual(image_processor.do_pad , __A ) snake_case__ : Tuple = self.image_processing_class.from_dict( self.image_processor_dict , size=4_2 , max_size=8_4 , pad_and_return_pixel_mask=__A ) self.assertEqual(image_processor.size , {"shortest_edge": 4_2, "longest_edge": 8_4} ) self.assertEqual(image_processor.do_pad , __A ) def _lowercase ( self : str ): pass def _lowercase ( self : List[str] ): # Initialize image_processing snake_case__ : Any = self.image_processing_class(**self.image_processor_dict ) # create random PIL images snake_case__ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A ) for image in image_inputs: self.assertIsInstance(__A , Image.Image ) # Test not batched input snake_case__ : Tuple = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values snake_case__, snake_case__ : List[str] = self.image_processor_tester.get_expected_values(__A ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched snake_case__, snake_case__ : List[Any] = self.image_processor_tester.get_expected_values(__A , batched=__A ) snake_case__ : int = image_processing(__A , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def _lowercase ( self : int ): # Initialize image_processing snake_case__ : Any = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors snake_case__ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , numpify=__A ) for image in image_inputs: self.assertIsInstance(__A , np.ndarray ) # Test not batched input snake_case__ : Optional[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values snake_case__, snake_case__ : int = self.image_processor_tester.get_expected_values(__A ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched snake_case__ : Tuple = image_processing(__A , return_tensors="pt" ).pixel_values snake_case__, snake_case__ : int = self.image_processor_tester.get_expected_values(__A , batched=__A ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def _lowercase ( self : Union[str, Any] ): # Initialize image_processing snake_case__ : Optional[int] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors snake_case__ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , torchify=__A ) for image in image_inputs: self.assertIsInstance(__A , torch.Tensor ) # Test not batched input snake_case__ : str = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values snake_case__, snake_case__ : Union[str, Any] = self.image_processor_tester.get_expected_values(__A ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched snake_case__ : Tuple = image_processing(__A , return_tensors="pt" ).pixel_values snake_case__, snake_case__ : Tuple = self.image_processor_tester.get_expected_values(__A , batched=__A ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) @slow def _lowercase ( self : Optional[Any] ): # prepare image and target snake_case__ : Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f: snake_case__ : Tuple = json.loads(f.read() ) snake_case__ : Union[str, Any] = {"image_id": 3_9_7_6_9, "annotations": target} # encode them snake_case__ : str = DeformableDetrImageProcessor() snake_case__ : Tuple = image_processing(images=__A , annotations=__A , return_tensors="pt" ) # verify pixel values snake_case__ : Optional[Any] = torch.Size([1, 3, 8_0_0, 1_0_6_6] ) self.assertEqual(encoding["pixel_values"].shape , __A ) snake_case__ : Union[str, Any] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] ) self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __A , atol=1e-4 ) ) # verify area snake_case__ : str = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] ) self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __A ) ) # verify boxes snake_case__ : Union[str, Any] = torch.Size([6, 4] ) self.assertEqual(encoding["labels"][0]["boxes"].shape , __A ) snake_case__ : List[Any] = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] ) self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __A , atol=1e-3 ) ) # verify image_id snake_case__ : Any = torch.tensor([3_9_7_6_9] ) self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __A ) ) # verify is_crowd snake_case__ : Tuple = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __A ) ) # verify class_labels snake_case__ : int = torch.tensor([7_5, 7_5, 6_3, 6_5, 1_7, 1_7] ) self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __A ) ) # verify orig_size snake_case__ : List[str] = torch.tensor([4_8_0, 6_4_0] ) self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __A ) ) # verify size snake_case__ : Tuple = torch.tensor([8_0_0, 1_0_6_6] ) self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __A ) ) @slow def _lowercase ( self : Optional[int] ): # prepare image, target and masks_path snake_case__ : Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f: snake_case__ : Any = json.loads(f.read() ) snake_case__ : Dict = {"file_name": "000000039769.png", "image_id": 3_9_7_6_9, "segments_info": target} snake_case__ : int = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" ) # encode them snake_case__ : List[str] = DeformableDetrImageProcessor(format="coco_panoptic" ) snake_case__ : List[Any] = image_processing(images=__A , annotations=__A , masks_path=__A , return_tensors="pt" ) # verify pixel values snake_case__ : List[Any] = torch.Size([1, 3, 8_0_0, 1_0_6_6] ) self.assertEqual(encoding["pixel_values"].shape , __A ) snake_case__ : Optional[int] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] ) self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __A , atol=1e-4 ) ) # verify area snake_case__ : Tuple = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] ) self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __A ) ) # verify boxes snake_case__ : Any = torch.Size([6, 4] ) self.assertEqual(encoding["labels"][0]["boxes"].shape , __A ) snake_case__ : Any = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] ) self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __A , atol=1e-3 ) ) # verify image_id snake_case__ : List[str] = torch.tensor([3_9_7_6_9] ) self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __A ) ) # verify is_crowd snake_case__ : Any = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __A ) ) # verify class_labels snake_case__ : List[str] = torch.tensor([1_7, 1_7, 6_3, 7_5, 7_5, 9_3] ) self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __A ) ) # verify masks snake_case__ : Union[str, Any] = 8_2_2_8_7_3 self.assertEqual(encoding["labels"][0]["masks"].sum().item() , __A ) # verify orig_size snake_case__ : int = torch.tensor([4_8_0, 6_4_0] ) self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __A ) ) # verify size snake_case__ : Union[str, Any] = torch.tensor([8_0_0, 1_0_6_6] ) self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __A ) )
25
0
import mpmath # for roots of unity import numpy as np class SCREAMING_SNAKE_CASE__ : """simple docstring""" def __init__( self : Optional[int] , __A : int=None , __A : List[Any]=None ): # Input as list snake_case__ : Optional[int] = list(poly_a or [0] )[:] snake_case__ : Optional[int] = list(poly_b or [0] )[:] # Remove leading zero coefficients while self.polyA[-1] == 0: self.polyA.pop() snake_case__ : Optional[int] = len(self.polyA ) while self.polyB[-1] == 0: self.polyB.pop() snake_case__ : Dict = len(self.polyB ) # Add 0 to make lengths equal a power of 2 snake_case__ : Optional[Any] = int( 2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) ) while len(self.polyA ) < self.c_max_length: self.polyA.append(0 ) while len(self.polyB ) < self.c_max_length: self.polyB.append(0 ) # A complex root used for the fourier transform snake_case__ : List[str] = complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) ) # The product snake_case__ : List[Any] = self.__multiply() def _lowercase ( self : str , __A : str ): snake_case__ : List[Any] = [[x] for x in self.polyA] if which == "A" else [[x] for x in self.polyB] # Corner case if len(__A ) <= 1: return dft[0] # snake_case__ : Union[str, Any] = self.c_max_length // 2 while next_ncol > 0: snake_case__ : Dict = [[] for i in range(__A )] snake_case__ : Optional[Any] = self.root**next_ncol # First half of next step snake_case__ : Any = 1 for j in range(self.c_max_length // (next_ncol * 2) ): for i in range(__A ): new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] ) current_root *= root # Second half of next step snake_case__ : str = 1 for j in range(self.c_max_length // (next_ncol * 2) ): for i in range(__A ): new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] ) current_root *= root # Update snake_case__ : List[Any] = new_dft snake_case__ : Dict = next_ncol // 2 return dft[0] def _lowercase ( self : Optional[Any] ): snake_case__ : Tuple = self.__dft("A" ) snake_case__ : int = self.__dft("B" ) snake_case__ : Optional[Any] = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]] del dft_a del dft_b # Corner Case if len(inverce_c[0] ) <= 1: return inverce_c[0] # Inverse DFT snake_case__ : str = 2 while next_ncol <= self.c_max_length: snake_case__ : List[Any] = [[] for i in range(__A )] snake_case__ : Any = self.root ** (next_ncol // 2) snake_case__ : Tuple = 1 # First half of next step for j in range(self.c_max_length // next_ncol ): for i in range(next_ncol // 2 ): # Even positions new_inverse_c[i].append( ( inverce_c[i][j] + inverce_c[i][j + self.c_max_length // next_ncol] ) / 2 ) # Odd positions new_inverse_c[i + next_ncol // 2].append( ( inverce_c[i][j] - inverce_c[i][j + self.c_max_length // next_ncol] ) / (2 * current_root) ) current_root *= root # Update snake_case__ : Dict = new_inverse_c next_ncol *= 2 # Unpack snake_case__ : List[str] = [round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1j for x in inverce_c] # Remove leading 0's while inverce_c[-1] == 0: inverce_c.pop() return inverce_c def __str__( self : List[Any] ): snake_case__ : Union[str, Any] = "A = " + " + ".join( f'''{coef}*x^{i}''' for coef, i in enumerate(self.polyA[: self.len_A] ) ) snake_case__ : Optional[Any] = "B = " + " + ".join( f'''{coef}*x^{i}''' for coef, i in enumerate(self.polyB[: self.len_B] ) ) snake_case__ : Tuple = "A*B = " + " + ".join( f'''{coef}*x^{i}''' for coef, i in enumerate(self.product ) ) return f'''{a}\n{b}\n{c}''' # Unit tests if __name__ == "__main__": import doctest doctest.testmod()
702
import json import os from functools import lru_cache from typing import Dict, List, Optional, Tuple, Union import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...tokenization_utils_base import BatchEncoding, EncodedInput from ...utils import PaddingStrategy, logging __lowerCamelCase : List[str] = logging.get_logger(__name__) __lowerCamelCase : Optional[Any] = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""} # See all LED models at https://huggingface.co/models?filter=LED __lowerCamelCase : Tuple = { """vocab_file""": { """allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json""", }, """merges_file""": { """allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt""", }, """tokenizer_file""": { """allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json""", }, } __lowerCamelCase : Dict = { """allenai/led-base-16384""": 1_6384, } @lru_cache() # Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode def SCREAMING_SNAKE_CASE ( ): snake_case__ : Optional[int] = ( list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) ) ) snake_case__ : Optional[int] = bs[:] snake_case__ : Any = 0 for b in range(2**8 ): if b not in bs: bs.append(snake_case_ ) cs.append(2**8 + n ) n += 1 snake_case__ : Dict = [chr(snake_case_ ) for n in cs] return dict(zip(snake_case_ , snake_case_ ) ) def SCREAMING_SNAKE_CASE ( snake_case_ : List[Any] ): snake_case__ : Dict = set() snake_case__ : Tuple = word[0] for char in word[1:]: pairs.add((prev_char, char) ) snake_case__ : List[Any] = char return pairs class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ): """simple docstring""" a_ = VOCAB_FILES_NAMES a_ = PRETRAINED_VOCAB_FILES_MAP a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a_ = ["input_ids", "attention_mask"] def __init__( self : List[str] , __A : Any , __A : List[str] , __A : Optional[Any]="replace" , __A : Optional[int]="<s>" , __A : Union[str, Any]="</s>" , __A : Tuple="</s>" , __A : List[Any]="<s>" , __A : Dict="<unk>" , __A : Any="<pad>" , __A : Optional[int]="<mask>" , __A : List[str]=False , **__A : Union[str, Any] , ): snake_case__ : List[Any] = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else bos_token snake_case__ : List[str] = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else eos_token snake_case__ : Any = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else sep_token snake_case__ : List[Any] = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else cls_token snake_case__ : Tuple = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else unk_token snake_case__ : List[Any] = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else pad_token # Mask token behave like a normal word, i.e. include the space before it snake_case__ : List[str] = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else mask_token super().__init__( errors=__A , bos_token=__A , eos_token=__A , unk_token=__A , sep_token=__A , cls_token=__A , pad_token=__A , mask_token=__A , add_prefix_space=__A , **__A , ) with open(__A , encoding="utf-8" ) as vocab_handle: snake_case__ : Any = json.load(__A ) snake_case__ : Optional[Any] = {v: k for k, v in self.encoder.items()} snake_case__ : Union[str, Any] = errors # how to handle errors in decoding snake_case__ : Any = bytes_to_unicode() snake_case__ : Optional[Any] = {v: k for k, v in self.byte_encoder.items()} with open(__A , encoding="utf-8" ) as merges_handle: snake_case__ : str = merges_handle.read().split("\n" )[1:-1] snake_case__ : int = [tuple(merge.split() ) for merge in bpe_merges] snake_case__ : str = dict(zip(__A , range(len(__A ) ) ) ) snake_case__ : Optional[int] = {} snake_case__ : Any = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions snake_case__ : Union[str, Any] = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" ) @property # Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size def _lowercase ( self : List[Any] ): return len(self.encoder ) def _lowercase ( self : Any ): return dict(self.encoder , **self.added_tokens_encoder ) def _lowercase ( self : Optional[Any] , __A : Optional[int] ): if token in self.cache: return self.cache[token] snake_case__ : Union[str, Any] = tuple(__A ) snake_case__ : List[Any] = get_pairs(__A ) if not pairs: return token while True: snake_case__ : Tuple = min(__A , key=lambda __A : self.bpe_ranks.get(__A , float("inf" ) ) ) if bigram not in self.bpe_ranks: break snake_case__, snake_case__ : Dict = bigram snake_case__ : str = [] snake_case__ : Union[str, Any] = 0 while i < len(__A ): try: snake_case__ : Dict = word.index(__A , __A ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) snake_case__ : str = j if word[i] == first and i < len(__A ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 snake_case__ : str = tuple(__A ) snake_case__ : int = new_word if len(__A ) == 1: break else: snake_case__ : List[str] = get_pairs(__A ) snake_case__ : List[Any] = " ".join(__A ) snake_case__ : Optional[int] = word return word def _lowercase ( self : Optional[Any] , __A : Optional[Any] ): snake_case__ : List[str] = [] for token in re.findall(self.pat , __A ): snake_case__ : Dict = "".join( self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__A ).split(" " ) ) return bpe_tokens def _lowercase ( self : Union[str, Any] , __A : Optional[int] ): return self.encoder.get(__A , self.encoder.get(self.unk_token ) ) def _lowercase ( self : Optional[int] , __A : Optional[Any] ): return self.decoder.get(__A ) def _lowercase ( self : Union[str, Any] , __A : Dict ): snake_case__ : Optional[Any] = "".join(__A ) snake_case__ : int = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors ) return text def _lowercase ( self : Optional[int] , __A : str , __A : Optional[str] = None ): if not os.path.isdir(__A ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return snake_case__ : List[Any] = os.path.join( __A , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) snake_case__ : str = os.path.join( __A , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] ) with open(__A , "w" , encoding="utf-8" ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=__A , ensure_ascii=__A ) + "\n" ) snake_case__ : str = 0 with open(__A , "w" , encoding="utf-8" ) as writer: writer.write("#version: 0.2\n" ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __A : kv[1] ): if index != token_index: logger.warning( f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.''' " Please check that the tokenizer is not corrupted!" ) snake_case__ : int = token_index writer.write(" ".join(__A ) + "\n" ) index += 1 return vocab_file, merge_file def _lowercase ( self : int , __A : List[int] , __A : Optional[List[int]] = None ): if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] snake_case__ : Tuple = [self.cls_token_id] snake_case__ : List[Any] = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def _lowercase ( self : Optional[Any] , __A : List[int] , __A : Optional[List[int]] = None , __A : bool = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__A , token_ids_a=__A , already_has_special_tokens=__A ) if token_ids_a is None: return [1] + ([0] * len(__A )) + [1] return [1] + ([0] * len(__A )) + [1, 1] + ([0] * len(__A )) + [1] def _lowercase ( self : List[Any] , __A : List[int] , __A : Optional[List[int]] = None ): snake_case__ : Any = [self.sep_token_id] snake_case__ : List[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def _lowercase ( self : Optional[Any] , __A : int , __A : int=False , **__A : Dict ): snake_case__ : Optional[int] = kwargs.pop("add_prefix_space" , self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(__A ) > 0 and not text[0].isspace()): snake_case__ : Optional[int] = " " + text return (text, kwargs) def _lowercase ( self : Any , __A : Union[Dict[str, EncodedInput], BatchEncoding] , __A : Optional[int] = None , __A : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , __A : Optional[int] = None , __A : Optional[bool] = None , ): snake_case__ : Optional[Any] = super()._pad( encoded_inputs=__A , max_length=__A , padding_strategy=__A , pad_to_multiple_of=__A , return_attention_mask=__A , ) # Load from model defaults if return_attention_mask is None: snake_case__ : Union[str, Any] = "attention_mask" in self.model_input_names if return_attention_mask and "global_attention_mask" in encoded_inputs: snake_case__ : Union[str, Any] = encoded_inputs[self.model_input_names[0]] # `global_attention_mask` need to have the same length as other (sequential) inputs. snake_case__ : Tuple = len(encoded_inputs["global_attention_mask"] ) != len(__A ) if needs_to_be_padded: snake_case__ : int = len(__A ) - len(encoded_inputs["global_attention_mask"] ) if self.padding_side == "right": # Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend` snake_case__ : int = ( encoded_inputs["global_attention_mask"] + [-1] * difference ) elif self.padding_side == "left": snake_case__ : Tuple = [-1] * difference + encoded_inputs[ "global_attention_mask" ] else: raise ValueError("Invalid padding strategy:" + str(self.padding_side ) ) return encoded_inputs
25
0
def SCREAMING_SNAKE_CASE ( snake_case_ : list ): if len(snake_case_ ) <= 1: return lst snake_case__ : List[Any] = 1 while i < len(snake_case_ ): if lst[i - 1] <= lst[i]: i += 1 else: snake_case__ : Tuple = lst[i], lst[i - 1] i -= 1 if i == 0: snake_case__ : Union[str, Any] = 1 return lst if __name__ == "__main__": __lowerCamelCase : Dict = input("""Enter numbers separated by a comma:\n""").strip() __lowerCamelCase : Tuple = [int(item) for item in user_input.split(""",""")] print(gnome_sort(unsorted))
703
# tests directory-specific settings - this file is run automatically # by pytest before any tests are run import sys import warnings from os.path import abspath, dirname, join # allow having multiple repository checkouts and not needing to remember to rerun # 'pip install -e .[dev]' when switching between checkouts and running tests. __lowerCamelCase : Dict = abspath(join(dirname(dirname(__file__)), """src""")) sys.path.insert(1, git_repo_path) # silence FutureWarning warnings in tests since often we can't act on them until # they become normal warnings - i.e. the tests still need to test the current functionality warnings.simplefilter(action="""ignore""", category=FutureWarning) def SCREAMING_SNAKE_CASE ( snake_case_ : str ): from diffusers.utils.testing_utils import pytest_addoption_shared pytest_addoption_shared(snake_case_ ) def SCREAMING_SNAKE_CASE ( snake_case_ : Any ): from diffusers.utils.testing_utils import pytest_terminal_summary_main snake_case__ : Optional[int] = terminalreporter.config.getoption("--make-reports" ) if make_reports: pytest_terminal_summary_main(snake_case_ , id=snake_case_ )
25
0
import math def SCREAMING_SNAKE_CASE ( snake_case_ : int = 100 ): snake_case__ : Tuple = sum(i * i for i in range(1 , n + 1 ) ) snake_case__ : Optional[int] = int(math.pow(sum(range(1 , n + 1 ) ) , 2 ) ) return square_of_sum - sum_of_squares if __name__ == "__main__": print(f"{solution() = }")
704
def SCREAMING_SNAKE_CASE ( snake_case_ : str ): snake_case__ : Any = [0] * len(snake_case_ ) for i in range(1 , len(snake_case_ ) ): # use last results for better performance - dynamic programming snake_case__ : Union[str, Any] = prefix_result[i - 1] while j > 0 and input_string[i] != input_string[j]: snake_case__ : str = prefix_result[j - 1] if input_string[i] == input_string[j]: j += 1 snake_case__ : int = j return prefix_result def SCREAMING_SNAKE_CASE ( snake_case_ : str ): return max(prefix_function(snake_case_ ) ) if __name__ == "__main__": import doctest doctest.testmod()
25
0