code
stringlengths
87
55.2k
code_codestyle
int64
0
349
style_context
stringlengths
135
49.1k
style_context_codestyle
int64
0
349
label
int64
0
1
import math import unittest def UpperCamelCase ( lowerCAmelCase__ ): '''simple docstring''' assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and ( number >= 0 ), "'number' must been an int and positive" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(lowerCAmelCase__ ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True class lowercase ( unittest.TestCase ): def A__ ( self): self.assertTrue(is_prime(2)) self.assertTrue(is_prime(3)) self.assertTrue(is_prime(5)) self.assertTrue(is_prime(7)) self.assertTrue(is_prime(1_1)) self.assertTrue(is_prime(1_3)) self.assertTrue(is_prime(1_7)) self.assertTrue(is_prime(1_9)) self.assertTrue(is_prime(2_3)) self.assertTrue(is_prime(2_9)) def A__ ( self): with self.assertRaises(A__): is_prime(-1_9) self.assertFalse( is_prime(0) ,'''Zero doesn\'t have any positive factors, primes must have exactly two.''' ,) self.assertFalse( is_prime(1) ,'''One only has 1 positive factor, primes must have exactly two.''' ,) self.assertFalse(is_prime(2 * 2)) self.assertFalse(is_prime(2 * 3)) self.assertFalse(is_prime(3 * 3)) self.assertFalse(is_prime(3 * 5)) self.assertFalse(is_prime(3 * 5 * 7)) if __name__ == "__main__": unittest.main()
101
import functools def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> int: UpperCamelCase : Optional[int] = len(_lowerCAmelCase ) UpperCamelCase : List[str] = len(_lowerCAmelCase ) @functools.cache def min_distance(_lowerCAmelCase , _lowerCAmelCase ) -> int: # if first word index is overflow - delete all from the second word if indexa >= len_worda: return len_worda - indexa # if second word index is overflow - delete all from the first word if indexa >= len_worda: return len_worda - indexa UpperCamelCase : Union[str, Any] = int(worda[indexa] != worda[indexa] ) # current letters not identical return min( 1 + min_distance(indexa + 1 , _lowerCAmelCase ) , 1 + min_distance(_lowerCAmelCase , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , ) return min_distance(0 , 0 ) if __name__ == "__main__": import doctest doctest.testmod()
52
0
"""simple docstring""" from typing import Callable, Optional, Union from ...configuration_utils import PretrainedConfig from ...utils import logging SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__) SCREAMING_SNAKE_CASE : Tuple = { """microsoft/xprophetnet-large-wiki100-cased""": ( """https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/config.json""" ), } class _UpperCAmelCase ( __snake_case ): '''simple docstring''' lowerCamelCase__ ='xlm-prophetnet' lowerCamelCase__ =['past_key_values'] lowerCamelCase__ ={ 'num_attention_heads': 'num_encoder_attention_heads', } def __init__(self , a_ = 0.1 , a_ = "gelu" , a_ = 3_05_22 , a_ = 10_24 , a_ = 40_96 , a_ = 12 , a_ = 16 , a_ = 40_96 , a_ = 12 , a_ = 16 , a_ = 0.1 , a_ = 0.1 , a_ = 5_12 , a_ = 0.02 , a_ = True , a_ = True , a_ = 0 , a_ = 2 , a_ = 32 , a_ = 1_28 , a_ = False , a_ = 0.0 , a_ = True , a_ = 0 , a_ = 1 , a_ = 2 , **a_ , ): '''simple docstring''' __snake_case : str = vocab_size __snake_case : Optional[int] = hidden_size __snake_case : List[str] = encoder_ffn_dim __snake_case : List[str] = num_encoder_layers __snake_case : Dict = num_encoder_attention_heads __snake_case : Dict = decoder_ffn_dim __snake_case : List[Any] = num_decoder_layers __snake_case : Optional[int] = num_decoder_attention_heads __snake_case : List[str] = max_position_embeddings __snake_case : Any = init_std # Normal(0, this parameter) __snake_case : str = activation_function # parameters for xlmprophetnet __snake_case : Union[str, Any] = ngram __snake_case : Union[str, Any] = num_buckets __snake_case : Optional[int] = relative_max_distance __snake_case : List[Any] = disable_ngram_loss __snake_case : int = eps # 3 Types of Dropout __snake_case : List[str] = attention_dropout __snake_case : Union[str, Any] = activation_dropout __snake_case : Optional[int] = dropout __snake_case : List[str] = use_cache super().__init__( pad_token_id=a_ , bos_token_id=a_ , eos_token_id=a_ , is_encoder_decoder=a_ , add_cross_attention=a_ , decoder_start_token_id=a_ , **a_ , ) @property def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' return self.num_encoder_layers + self.num_decoder_layers @num_hidden_layers.setter def SCREAMING_SNAKE_CASE (self , a_ ): '''simple docstring''' raise NotImplementedError( '''This model does not support the setting of `num_hidden_layers`. Please set `num_encoder_layers` and''' ''' `num_decoder_layers`.''' )
102
import itertools import random import unittest import numpy as np from transformers import ASTFeatureExtractor from transformers.testing_utils import require_torch, require_torchaudio from transformers.utils.import_utils import is_torch_available from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin __lowerCamelCase : str = random.Random() if is_torch_available(): import torch def A_ ( _lowerCAmelCase , _lowerCAmelCase=1.0 , _lowerCAmelCase=None , _lowerCAmelCase=None ) -> Optional[Any]: if rng is None: UpperCamelCase : Optional[int] = global_rng UpperCamelCase : Optional[Any] = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values class A__ ( unittest.TestCase ): def __init__( self , A_ , A_=7 , A_=400 , A_=2000 , A_=1 , A_=0.0 , A_=1_6000 , A_=True , A_=True , ): '''simple docstring''' UpperCamelCase : Tuple = parent UpperCamelCase : List[Any] = batch_size UpperCamelCase : List[Any] = min_seq_length UpperCamelCase : List[str] = max_seq_length UpperCamelCase : int = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) UpperCamelCase : Union[str, Any] = feature_size UpperCamelCase : List[str] = padding_value UpperCamelCase : Optional[Any] = sampling_rate UpperCamelCase : List[str] = return_attention_mask UpperCamelCase : List[Any] = do_normalize def __UpperCamelCase( self ): '''simple docstring''' return { "feature_size": self.feature_size, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "return_attention_mask": self.return_attention_mask, "do_normalize": self.do_normalize, } def __UpperCamelCase( self , A_=False , A_=False ): '''simple docstring''' def _flatten(A_ ): return list(itertools.chain(*A_ ) ) if equal_length: UpperCamelCase : List[str] = floats_list((self.batch_size, self.max_seq_length) ) else: # make sure that inputs increase in size UpperCamelCase : Dict = [ _flatten(floats_list((x, self.feature_size) ) ) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff ) ] if numpify: UpperCamelCase : Union[str, Any] = [np.asarray(A_ ) for x in speech_inputs] return speech_inputs @require_torch @require_torchaudio class A__ ( __snake_case , unittest.TestCase ): _UpperCAmelCase :Optional[Any] = ASTFeatureExtractor def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Tuple = ASTFeatureExtractionTester(self ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Optional[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 UpperCamelCase : Tuple = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] UpperCamelCase : Dict = [np.asarray(A_ ) for speech_input in speech_inputs] # Test not batched input UpperCamelCase : Dict = feat_extract(speech_inputs[0] , return_tensors="np" ).input_values UpperCamelCase : Union[str, Any] = feat_extract(np_speech_inputs[0] , return_tensors="np" ).input_values self.assertTrue(np.allclose(A_ , A_ , atol=1e-3 ) ) # Test batched UpperCamelCase : Any = feat_extract(A_ , padding=A_ , return_tensors="np" ).input_values UpperCamelCase : Any = feat_extract(A_ , padding=A_ , return_tensors="np" ).input_values for enc_seq_a, enc_seq_a in zip(A_ , A_ ): self.assertTrue(np.allclose(A_ , A_ , atol=1e-3 ) ) # Test 2-D numpy arrays are batched. UpperCamelCase : Dict = [floats_list((1, x) )[0] for x in (800, 800, 800)] UpperCamelCase : int = np.asarray(A_ ) UpperCamelCase : Any = feat_extract(A_ , return_tensors="np" ).input_values UpperCamelCase : List[str] = feat_extract(A_ , return_tensors="np" ).input_values for enc_seq_a, enc_seq_a in zip(A_ , A_ ): self.assertTrue(np.allclose(A_ , A_ , atol=1e-3 ) ) @require_torch def __UpperCamelCase( self ): '''simple docstring''' import torch UpperCamelCase : List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) UpperCamelCase : int = np.random.rand(100 ).astype(np.floataa ) UpperCamelCase : str = np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: UpperCamelCase : List[Any] = feature_extractor.pad([{"input_values": inputs}] , return_tensors="np" ) self.assertTrue(np_processed.input_values.dtype == np.floataa ) UpperCamelCase : List[str] = feature_extractor.pad([{"input_values": inputs}] , return_tensors="pt" ) self.assertTrue(pt_processed.input_values.dtype == torch.floataa ) def __UpperCamelCase( self , A_ ): '''simple docstring''' from datasets import load_dataset UpperCamelCase : Dict = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" ) # automatic decoding with librispeech UpperCamelCase : Any = ds.sort("id" ).select(range(A_ ) )[:num_samples]["audio"] return [x["array"] for x in speech_samples] @require_torch def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Any = torch.tensor( [-0.98_94, -1.27_76, -0.90_66, -1.27_76, -0.93_49, -1.26_09, -1.03_86, -1.27_76, -1.15_61, -1.27_76, -1.20_52, -1.27_23, -1.21_90, -1.21_32, -1.27_76, -1.11_33, -1.19_53, -1.13_43, -1.15_84, -1.22_03, -1.17_70, -1.24_74, -1.23_81, -1.19_36, -0.92_70, -0.83_17, -0.80_49, -0.77_06, -0.75_65, -0.78_69] ) # fmt: on UpperCamelCase : List[Any] = self._load_datasamples(1 ) UpperCamelCase : Tuple = ASTFeatureExtractor() UpperCamelCase : str = feature_extractor(A_ , return_tensors="pt" ).input_values self.assertEquals(input_values.shape , (1, 1024, 128) ) self.assertTrue(torch.allclose(input_values[0, 0, :30] , A_ , atol=1e-4 ) )
52
0
def UpperCamelCase( __UpperCamelCase : int ): return number & 1 == 0 if __name__ == "__main__": import doctest doctest.testmod()
103
import pickle import numpy as np from matplotlib import pyplot as plt class A__ : def __init__( self , A_ , A_ , A_ , A_ , A_ , A_=0.2 , A_=0.2 ): '''simple docstring''' UpperCamelCase : int = bp_numa UpperCamelCase : int = bp_numa UpperCamelCase : List[Any] = bp_numa UpperCamelCase : Optional[int] = conva_get[:2] UpperCamelCase : Optional[Any] = conva_get[2] UpperCamelCase : Dict = size_pa UpperCamelCase : Union[str, Any] = rate_w UpperCamelCase : Dict = rate_t UpperCamelCase : Union[str, Any] = [ np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0] ) + 0.5 ) for i in range(self.conva[1] ) ] UpperCamelCase : Any = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 ) UpperCamelCase : List[Any] = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 ) UpperCamelCase : Optional[Any] = -2 * np.random.rand(self.conva[1] ) + 1 UpperCamelCase : Any = -2 * np.random.rand(self.num_bpa ) + 1 UpperCamelCase : int = -2 * np.random.rand(self.num_bpa ) + 1 def __UpperCamelCase( self , A_ ): '''simple docstring''' UpperCamelCase : Optional[int] = { "num_bp1": self.num_bpa, "num_bp2": self.num_bpa, "num_bp3": self.num_bpa, "conv1": self.conva, "step_conv1": self.step_conva, "size_pooling1": self.size_poolinga, "rate_weight": self.rate_weight, "rate_thre": self.rate_thre, "w_conv1": self.w_conva, "wkj": self.wkj, "vji": self.vji, "thre_conv1": self.thre_conva, "thre_bp2": self.thre_bpa, "thre_bp3": self.thre_bpa, } with open(A_ , "wb" ) as f: pickle.dump(A_ , A_ ) print(F"""Model saved: {save_path}""" ) @classmethod def __UpperCamelCase( cls , A_ ): '''simple docstring''' with open(A_ , "rb" ) as f: UpperCamelCase : Optional[Any] = pickle.load(A_ ) # noqa: S301 UpperCamelCase : List[Any] = model_dic.get("conv1" ) conv_get.append(model_dic.get("step_conv1" ) ) UpperCamelCase : Union[str, Any] = model_dic.get("size_pooling1" ) UpperCamelCase : List[Any] = model_dic.get("num_bp1" ) UpperCamelCase : Dict = model_dic.get("num_bp2" ) UpperCamelCase : Dict = model_dic.get("num_bp3" ) UpperCamelCase : Dict = model_dic.get("rate_weight" ) UpperCamelCase : str = model_dic.get("rate_thre" ) # create model instance UpperCamelCase : Any = CNN(A_ , A_ , A_ , A_ , A_ , A_ , A_ ) # modify model parameter UpperCamelCase : str = model_dic.get("w_conv1" ) UpperCamelCase : Optional[Any] = model_dic.get("wkj" ) UpperCamelCase : int = model_dic.get("vji" ) UpperCamelCase : Any = model_dic.get("thre_conv1" ) UpperCamelCase : Optional[int] = model_dic.get("thre_bp2" ) UpperCamelCase : Union[str, Any] = model_dic.get("thre_bp3" ) return conv_ins def __UpperCamelCase( self , A_ ): '''simple docstring''' return 1 / (1 + np.exp(-1 * x )) def __UpperCamelCase( self , A_ ): '''simple docstring''' return round(A_ , 3 ) def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ ): '''simple docstring''' UpperCamelCase : str = convs[0] UpperCamelCase : Optional[Any] = convs[1] UpperCamelCase : Optional[Any] = np.shape(A_ )[0] # get the data slice of original image data, data_focus UpperCamelCase : List[str] = [] for i_focus in range(0 , size_data - size_conv + 1 , A_ ): for j_focus in range(0 , size_data - size_conv + 1 , A_ ): UpperCamelCase : Union[str, Any] = data[ i_focus : i_focus + size_conv, j_focus : j_focus + size_conv ] data_focus.append(A_ ) # calculate the feature map of every single kernel, and saved as list of matrix UpperCamelCase : int = [] UpperCamelCase : Optional[Any] = int((size_data - size_conv) / conv_step + 1 ) for i_map in range(A_ ): UpperCamelCase : str = [] for i_focus in range(len(A_ ) ): UpperCamelCase : List[Any] = ( np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map] ) ) - thre_convs[i_map] ) featuremap.append(self.sig(A_ ) ) UpperCamelCase : Optional[int] = np.asmatrix(A_ ).reshape( A_ , A_ ) data_featuremap.append(A_ ) # expanding the data slice to One dimenssion UpperCamelCase : List[Any] = [] for each_focus in data_focus: focusa_list.extend(self.Expand_Mat(A_ ) ) UpperCamelCase : Tuple = np.asarray(A_ ) return focus_list, data_featuremap def __UpperCamelCase( self , A_ , A_ , A_="average_pool" ): '''simple docstring''' UpperCamelCase : Any = len(featuremaps[0] ) UpperCamelCase : str = int(size_map / size_pooling ) UpperCamelCase : Optional[int] = [] for i_map in range(len(A_ ) ): UpperCamelCase : Tuple = featuremaps[i_map] UpperCamelCase : Any = [] for i_focus in range(0 , A_ , A_ ): for j_focus in range(0 , A_ , A_ ): UpperCamelCase : int = feature_map[ i_focus : i_focus + size_pooling, j_focus : j_focus + size_pooling, ] if pooling_type == "average_pool": # average pooling map_pooled.append(np.average(A_ ) ) elif pooling_type == "max_pooling": # max pooling map_pooled.append(np.max(A_ ) ) UpperCamelCase : Optional[Any] = np.asmatrix(A_ ).reshape(A_ , A_ ) featuremap_pooled.append(A_ ) return featuremap_pooled def __UpperCamelCase( self , A_ ): '''simple docstring''' UpperCamelCase : List[Any] = [] for i in range(len(A_ ) ): UpperCamelCase : List[Any] = np.shape(data[i] ) UpperCamelCase : str = data[i].reshape(1 , shapes[0] * shapes[1] ) UpperCamelCase : Optional[int] = data_listed.getA().tolist()[0] data_expanded.extend(A_ ) UpperCamelCase : Any = np.asarray(A_ ) return data_expanded def __UpperCamelCase( self , A_ ): '''simple docstring''' UpperCamelCase : List[Any] = np.asarray(A_ ) UpperCamelCase : List[Any] = np.shape(A_ ) UpperCamelCase : Any = data_mat.reshape(1 , shapes[0] * shapes[1] ) return data_expanded def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ ): '''simple docstring''' UpperCamelCase : int = [] UpperCamelCase : Optional[int] = 0 for i_map in range(A_ ): UpperCamelCase : int = np.ones((size_map, size_map) ) for i in range(0 , A_ , A_ ): for j in range(0 , A_ , A_ ): UpperCamelCase : str = pd_pool[ i_pool ] UpperCamelCase : str = i_pool + 1 UpperCamelCase : str = np.multiply( A_ , np.multiply(out_map[i_map] , (1 - out_map[i_map]) ) ) pd_all.append(A_ ) return pd_all def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_=bool ): '''simple docstring''' print("----------------------Start Training-------------------------" ) print((" - - Shape: Train_Data ", np.shape(A_ )) ) print((" - - Shape: Teach_Data ", np.shape(A_ )) ) UpperCamelCase : List[str] = 0 UpperCamelCase : Union[str, Any] = [] UpperCamelCase : int = 1_0000 while rp < n_repeat and mse >= error_accuracy: UpperCamelCase : Tuple = 0 print(F"""-------------Learning Time {rp}--------------""" ) for p in range(len(A_ ) ): # print('------------Learning Image: %d--------------'%p) UpperCamelCase : Any = np.asmatrix(datas_train[p] ) UpperCamelCase : List[str] = np.asarray(datas_teach[p] ) UpperCamelCase , UpperCamelCase : Dict = self.convolute( A_ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , ) UpperCamelCase : Tuple = self.pooling(A_ , self.size_poolinga ) UpperCamelCase : int = np.shape(A_ ) UpperCamelCase : List[str] = self._expand(A_ ) UpperCamelCase : Optional[int] = data_bp_input UpperCamelCase : str = np.dot(A_ , self.vji.T ) - self.thre_bpa UpperCamelCase : Optional[int] = self.sig(A_ ) UpperCamelCase : List[Any] = np.dot(A_ , self.wkj.T ) - self.thre_bpa UpperCamelCase : Dict = self.sig(A_ ) # --------------Model Leaning ------------------------ # calculate error and gradient--------------- UpperCamelCase : List[Any] = np.multiply( (data_teach - bp_outa) , np.multiply(A_ , (1 - bp_outa) ) ) UpperCamelCase : str = np.multiply( np.dot(A_ , self.wkj ) , np.multiply(A_ , (1 - bp_outa) ) ) UpperCamelCase : Any = np.dot(A_ , self.vji ) UpperCamelCase : Dict = pd_i_all / (self.size_poolinga * self.size_poolinga) UpperCamelCase : List[Any] = pd_conva_pooled.T.getA().tolist() UpperCamelCase : List[Any] = self._calculate_gradient_from_pool( A_ , A_ , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , ) # weight and threshold learning process--------- # convolution layer for k_conv in range(self.conva[1] ): UpperCamelCase : List[Any] = self._expand_mat(pd_conva_all[k_conv] ) UpperCamelCase : List[Any] = self.rate_weight * np.dot(A_ , A_ ) UpperCamelCase : str = self.w_conva[k_conv] + delta_w.reshape( (self.conva[0], self.conva[0]) ) UpperCamelCase : Dict = ( self.thre_conva[k_conv] - np.sum(pd_conva_all[k_conv] ) * self.rate_thre ) # all connected layer UpperCamelCase : Optional[Any] = self.wkj + pd_k_all.T * bp_outa * self.rate_weight UpperCamelCase : List[Any] = self.vji + pd_j_all.T * bp_outa * self.rate_weight UpperCamelCase : Optional[Any] = self.thre_bpa - pd_k_all * self.rate_thre UpperCamelCase : List[str] = self.thre_bpa - pd_j_all * self.rate_thre # calculate the sum error of all single image UpperCamelCase : List[Any] = np.sum(abs(data_teach - bp_outa ) ) error_count += errors # print(' ----Teach ',data_teach) # print(' ----BP_output ',bp_out3) UpperCamelCase : Any = rp + 1 UpperCamelCase : Union[str, Any] = error_count / patterns all_mse.append(A_ ) def draw_error(): UpperCamelCase : Tuple = [error_accuracy for i in range(int(n_repeat * 1.2 ) )] plt.plot(A_ , "+-" ) plt.plot(A_ , "r--" ) plt.xlabel("Learning Times" ) plt.ylabel("All_mse" ) plt.grid(A_ , alpha=0.5 ) plt.show() print("------------------Training Complished---------------------" ) print((" - - Training epoch: ", rp, F""" - - Mse: {mse:.6f}""") ) if draw_e: draw_error() return mse def __UpperCamelCase( self , A_ ): '''simple docstring''' UpperCamelCase : Optional[Any] = [] print("-------------------Start Testing-------------------------" ) print((" - - Shape: Test_Data ", np.shape(A_ )) ) for p in range(len(A_ ) ): UpperCamelCase : int = np.asmatrix(datas_test[p] ) UpperCamelCase , UpperCamelCase : Any = self.convolute( A_ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , ) UpperCamelCase : List[str] = self.pooling(A_ , self.size_poolinga ) UpperCamelCase : Dict = self._expand(A_ ) UpperCamelCase : List[Any] = data_bp_input UpperCamelCase : Any = bp_outa * self.vji.T - self.thre_bpa UpperCamelCase : List[Any] = self.sig(A_ ) UpperCamelCase : int = bp_outa * self.wkj.T - self.thre_bpa UpperCamelCase : Optional[int] = self.sig(A_ ) produce_out.extend(bp_outa.getA().tolist() ) UpperCamelCase : List[str] = [list(map(self.do_round , A_ ) ) for each in produce_out] return np.asarray(A_ ) def __UpperCamelCase( self , A_ ): '''simple docstring''' UpperCamelCase : Union[str, Any] = np.asmatrix(A_ ) UpperCamelCase , UpperCamelCase : List[Any] = self.convolute( A_ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , ) UpperCamelCase : str = self.pooling(A_ , self.size_poolinga ) return data_conveda, data_pooleda if __name__ == "__main__": pass
52
0
'''simple docstring''' import unittest from typing import Tuple import torch from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device from diffusers.utils.testing_utils import require_torch @require_torch class lowercase_ : """simple docstring""" @property def SCREAMING_SNAKE_CASE ( self : List[Any] ): return self.get_dummy_input() @property def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): if self.block_type == "down": return (4, 3_2, 1_6, 1_6) elif self.block_type == "mid": return (4, 3_2, 3_2, 3_2) elif self.block_type == "up": return (4, 3_2, 6_4, 6_4) raise ValueError(F"'{self.block_type}' is not a supported block_type. Set it to 'up', 'mid', or 'down'." ) def SCREAMING_SNAKE_CASE ( self : Optional[int] ,lowercase__ : int=True ,lowercase__ : Dict=False ,lowercase__ : Union[str, Any]=False ,lowercase__ : Tuple=False ,): __lowercase = 4 __lowercase = 3_2 __lowercase = (3_2, 3_2) __lowercase = torch.manual_seed(0 ) __lowercase = torch.device(lowercase__ ) __lowercase = (batch_size, num_channels) + sizes __lowercase = randn_tensor(lowercase__ ,generator=lowercase__ ,device=lowercase__ ) __lowercase = {'''hidden_states''': hidden_states} if include_temb: __lowercase = 1_2_8 __lowercase = randn_tensor((batch_size, temb_channels) ,generator=lowercase__ ,device=lowercase__ ) if include_res_hidden_states_tuple: __lowercase = torch.manual_seed(1 ) __lowercase = (randn_tensor(lowercase__ ,generator=lowercase__ ,device=lowercase__ ),) if include_encoder_hidden_states: __lowercase = floats_tensor((batch_size, 3_2, 3_2) ).to(lowercase__ ) if include_skip_sample: __lowercase = randn_tensor(((batch_size, 3) + sizes) ,generator=lowercase__ ,device=lowercase__ ) return dummy_input def SCREAMING_SNAKE_CASE ( self : Any ): __lowercase = { '''in_channels''': 3_2, '''out_channels''': 3_2, '''temb_channels''': 1_2_8, } if self.block_type == "up": __lowercase = 3_2 if self.block_type == "mid": init_dict.pop('''out_channels''' ) __lowercase = self.dummy_input return init_dict, inputs_dict def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : Tuple ): __lowercase , __lowercase = self.prepare_init_args_and_inputs_for_common() __lowercase = self.block_class(**lowercase__ ) unet_block.to(lowercase__ ) unet_block.eval() with torch.no_grad(): __lowercase = unet_block(**lowercase__ ) if isinstance(lowercase__ ,lowercase__ ): __lowercase = output[0] self.assertEqual(output.shape ,self.output_shape ) __lowercase = output[0, -1, -3:, -3:] __lowercase = torch.tensor(lowercase__ ).to(lowercase__ ) assert torch_all_close(output_slice.flatten() ,lowercase__ ,atol=5e-3 ) @unittest.skipIf(torch_device == '''mps''' ,'''Training is not supported in mps''' ) def SCREAMING_SNAKE_CASE ( self : Dict ): __lowercase , __lowercase = self.prepare_init_args_and_inputs_for_common() __lowercase = self.block_class(**lowercase__ ) model.to(lowercase__ ) model.train() __lowercase = model(**lowercase__ ) if isinstance(lowercase__ ,lowercase__ ): __lowercase = output[0] __lowercase = torch.device(lowercase__ ) __lowercase = randn_tensor(output.shape ,device=lowercase__ ) __lowercase = torch.nn.functional.mse_loss(lowercase__ ,lowercase__ ) loss.backward()
104
import warnings from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast from ...onnx.utils import compute_effective_axis_dimension from ...utils import TensorType, is_torch_available, logging __lowerCamelCase : Union[str, Any] = logging.get_logger(__name__) __lowerCamelCase : Any = { """facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/config.json""", # See all BART models at https://huggingface.co/models?filter=bart } class A__ ( __snake_case ): _UpperCAmelCase :Dict = 'bart' _UpperCAmelCase :str = ['past_key_values'] _UpperCAmelCase :Any = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'} def __init__( self , A_=5_0265 , A_=1024 , A_=12 , A_=4096 , A_=16 , A_=12 , A_=4096 , A_=16 , A_=0.0 , A_=0.0 , A_="gelu" , A_=1024 , A_=0.1 , A_=0.0 , A_=0.0 , A_=0.02 , A_=0.0 , A_=False , A_=True , A_=3 , A_=1 , A_=0 , A_=2 , A_=True , A_=2 , A_=2 , **A_ , ): '''simple docstring''' UpperCamelCase : int = vocab_size UpperCamelCase : List[Any] = max_position_embeddings UpperCamelCase : Any = d_model UpperCamelCase : Optional[Any] = encoder_ffn_dim UpperCamelCase : List[Any] = encoder_layers UpperCamelCase : int = encoder_attention_heads UpperCamelCase : Optional[int] = decoder_ffn_dim UpperCamelCase : List[str] = decoder_layers UpperCamelCase : Optional[int] = decoder_attention_heads UpperCamelCase : int = dropout UpperCamelCase : int = attention_dropout UpperCamelCase : Tuple = activation_dropout UpperCamelCase : Tuple = activation_function UpperCamelCase : int = init_std UpperCamelCase : List[Any] = encoder_layerdrop UpperCamelCase : List[str] = decoder_layerdrop UpperCamelCase : Dict = classifier_dropout UpperCamelCase : Optional[int] = use_cache UpperCamelCase : List[Any] = encoder_layers UpperCamelCase : int = scale_embedding # scale factor will be sqrt(d_model) if True super().__init__( num_labels=A_ , pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , is_encoder_decoder=A_ , decoder_start_token_id=A_ , forced_eos_token_id=A_ , **A_ , ) # ensure backward compatibility for BART CNN models if self.forced_bos_token_id is None and kwargs.get("force_bos_token_to_be_generated" , A_ ): UpperCamelCase : int = self.bos_token_id warnings.warn( F"""Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. """ "The config can simply be saved and uploaded again to be fixed." ) class A__ ( __snake_case ): @property def __UpperCamelCase( self ): '''simple docstring''' if self.task in ["default", "seq2seq-lm"]: UpperCamelCase : Optional[int] = OrderedDict( [ ("input_ids", {0: "batch", 1: "encoder_sequence"}), ("attention_mask", {0: "batch", 1: "encoder_sequence"}), ] ) if self.use_past: UpperCamelCase : List[str] = {0: "batch"} UpperCamelCase : Dict = {0: "batch", 1: "past_decoder_sequence + sequence"} else: UpperCamelCase : Dict = {0: "batch", 1: "decoder_sequence"} UpperCamelCase : Union[str, Any] = {0: "batch", 1: "decoder_sequence"} if self.use_past: self.fill_with_past_key_values_(A_ , direction="inputs" ) elif self.task == "causal-lm": # TODO: figure this case out. UpperCamelCase : Any = OrderedDict( [ ("input_ids", {0: "batch", 1: "encoder_sequence"}), ("attention_mask", {0: "batch", 1: "encoder_sequence"}), ] ) if self.use_past: UpperCamelCase , UpperCamelCase : Optional[int] = self.num_layers for i in range(A_ ): UpperCamelCase : Optional[Any] = {0: "batch", 2: "past_sequence + sequence"} UpperCamelCase : Union[str, Any] = {0: "batch", 2: "past_sequence + sequence"} else: UpperCamelCase : Optional[Any] = OrderedDict( [ ("input_ids", {0: "batch", 1: "encoder_sequence"}), ("attention_mask", {0: "batch", 1: "encoder_sequence"}), ("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}), ("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}), ] ) return common_inputs @property def __UpperCamelCase( self ): '''simple docstring''' if self.task in ["default", "seq2seq-lm"]: UpperCamelCase : Tuple = super().outputs else: UpperCamelCase : Dict = super(A_ , self ).outputs if self.use_past: UpperCamelCase , UpperCamelCase : int = self.num_layers for i in range(A_ ): UpperCamelCase : int = {0: "batch", 2: "past_sequence + sequence"} UpperCamelCase : Tuple = {0: "batch", 2: "past_sequence + sequence"} return common_outputs def __UpperCamelCase( self , A_ , A_ = -1 , A_ = -1 , A_ = False , A_ = None , ): '''simple docstring''' UpperCamelCase : List[Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( A_ , A_ , A_ , A_ , A_ ) # Generate decoder inputs UpperCamelCase : List[Any] = seq_length if not self.use_past else 1 UpperCamelCase : Tuple = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( A_ , A_ , A_ , A_ , A_ ) UpperCamelCase : Optional[int] = {F"""decoder_{name}""": tensor for name, tensor in decoder_inputs.items()} UpperCamelCase : List[Any] = dict(**A_ , **A_ ) if self.use_past: if not is_torch_available(): raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." ) else: import torch UpperCamelCase , UpperCamelCase : Optional[Any] = common_inputs["input_ids"].shape UpperCamelCase : List[Any] = common_inputs["decoder_input_ids"].shape[1] UpperCamelCase , UpperCamelCase : List[str] = self.num_attention_heads UpperCamelCase : int = ( batch, num_encoder_attention_heads, encoder_seq_length, self._config.hidden_size // num_encoder_attention_heads, ) UpperCamelCase : List[Any] = decoder_seq_length + 3 UpperCamelCase : str = ( batch, num_decoder_attention_heads, decoder_past_length, self._config.hidden_size // num_decoder_attention_heads, ) UpperCamelCase : int = torch.cat( [common_inputs["decoder_attention_mask"], torch.ones(A_ , A_ )] , dim=1 ) UpperCamelCase : int = [] # If the number of encoder and decoder layers are present in the model configuration, both are considered UpperCamelCase , UpperCamelCase : Union[str, Any] = self.num_layers UpperCamelCase : Any = min(A_ , A_ ) UpperCamelCase : List[str] = max(A_ , A_ ) - min_num_layers UpperCamelCase : Dict = "encoder" if num_encoder_layers > num_decoder_layers else "decoder" for _ in range(A_ ): common_inputs["past_key_values"].append( ( torch.zeros(A_ ), torch.zeros(A_ ), torch.zeros(A_ ), torch.zeros(A_ ), ) ) # TODO: test this. UpperCamelCase : Optional[Any] = encoder_shape if remaining_side_name == "encoder" else decoder_shape for _ in range(A_ , A_ ): common_inputs["past_key_values"].append((torch.zeros(A_ ), torch.zeros(A_ )) ) return common_inputs def __UpperCamelCase( self , A_ , A_ = -1 , A_ = -1 , A_ = False , A_ = None , ): '''simple docstring''' UpperCamelCase : int = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( A_ , A_ , A_ , A_ , A_ ) if self.use_past: if not is_torch_available(): raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." ) else: import torch UpperCamelCase , UpperCamelCase : Union[str, Any] = common_inputs["input_ids"].shape # Not using the same length for past_key_values UpperCamelCase : Optional[Any] = seqlen + 2 UpperCamelCase , UpperCamelCase : List[Any] = self.num_layers UpperCamelCase , UpperCamelCase : Optional[int] = self.num_attention_heads UpperCamelCase : str = ( batch, num_encoder_attention_heads, past_key_values_length, self._config.hidden_size // num_encoder_attention_heads, ) UpperCamelCase : Optional[Any] = common_inputs["attention_mask"].dtype UpperCamelCase : int = torch.cat( [common_inputs["attention_mask"], torch.ones(A_ , A_ , dtype=A_ )] , dim=1 ) UpperCamelCase : Optional[Any] = [ (torch.zeros(A_ ), torch.zeros(A_ )) for _ in range(A_ ) ] return common_inputs def __UpperCamelCase( self , A_ , A_ = -1 , A_ = -1 , A_ = False , A_ = None , ): '''simple docstring''' UpperCamelCase : Optional[Any] = compute_effective_axis_dimension( A_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX UpperCamelCase : Union[str, Any] = tokenizer.num_special_tokens_to_add(A_ ) UpperCamelCase : int = compute_effective_axis_dimension( A_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=A_ ) # Generate dummy inputs according to compute batch and sequence UpperCamelCase : int = [" ".join([tokenizer.unk_token] ) * seq_length] * batch_size UpperCamelCase : Dict = dict(tokenizer(A_ , return_tensors=A_ ) ) return common_inputs def __UpperCamelCase( self , A_ , A_ = -1 , A_ = -1 , A_ = False , A_ = None , ): '''simple docstring''' if self.task in ["default", "seq2seq-lm"]: UpperCamelCase : Optional[int] = self._generate_dummy_inputs_for_default_and_seqaseq_lm( A_ , batch_size=A_ , seq_length=A_ , is_pair=A_ , framework=A_ ) elif self.task == "causal-lm": UpperCamelCase : List[str] = self._generate_dummy_inputs_for_causal_lm( A_ , batch_size=A_ , seq_length=A_ , is_pair=A_ , framework=A_ ) else: UpperCamelCase : List[str] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( A_ , batch_size=A_ , seq_length=A_ , is_pair=A_ , framework=A_ ) return common_inputs def __UpperCamelCase( self , A_ , A_ , A_ , A_ ): '''simple docstring''' if self.task in ["default", "seq2seq-lm"]: UpperCamelCase : Optional[Any] = super()._flatten_past_key_values_(A_ , A_ , A_ , A_ ) else: UpperCamelCase : Optional[Any] = super(A_ , self )._flatten_past_key_values_( A_ , A_ , A_ , A_ )
52
0
"""simple docstring""" import unittest from datasets import load_dataset from transformers import BloomTokenizerFast from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class __UpperCamelCase ( a__ , unittest.TestCase ): lowerCamelCase : Dict =None lowerCamelCase : Dict =BloomTokenizerFast lowerCamelCase : int =BloomTokenizerFast lowerCamelCase : List[Any] =True lowerCamelCase : Optional[int] =False lowerCamelCase : Dict ="""tokenizer_file""" lowerCamelCase : Union[str, Any] ={"""bos_token""": """<s>""", """eos_token""": """</s>""", """unk_token""": """<unk>""", """pad_token""": """<pad>"""} def __a ( self ) -> str: super().setUp() a : Dict = BloomTokenizerFast.from_pretrained("bigscience/tokenizer" ) tokenizer.save_pretrained(self.tmpdirname ) def __a ( self , **lowerCAmelCase__ ) -> List[str]: kwargs.update(self.special_tokens_map ) return BloomTokenizerFast.from_pretrained(self.tmpdirname , **lowerCAmelCase__ ) def __a ( self ) -> Any: a : Union[str, Any] = self.get_rust_tokenizer() a : Dict = ["The quick brown fox</s>", "jumps over the lazy dog</s>"] a : Optional[int] = [[2175, 2_3714, 7_3173, 14_4252, 2], [77, 13_2619, 3478, 368, 10_9586, 3_5433, 2]] a : Optional[Any] = tokenizer.batch_encode_plus(lowerCAmelCase__ )["input_ids"] self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ ) a : List[str] = tokenizer.batch_decode(lowerCAmelCase__ ) self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ ) def __a ( self , lowerCAmelCase__=6 ) -> Optional[int]: for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): a : Dict = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ ) # tokenizer_r.pad_token = None # Hotfixing padding = None # Simple input a : Optional[Any] = "This is a simple input" a : str = ["This is a simple input 1", "This is a simple input 2"] a : int = ("This is a simple input", "This is a pair") a : List[str] = [ ("This is a simple input 1", "This is a simple input 2"), ("This is a simple pair 1", "This is a simple pair 2"), ] # Simple input tests try: tokenizer_r.encode(lowerCAmelCase__ , max_length=lowerCAmelCase__ ) tokenizer_r.encode_plus(lowerCAmelCase__ , max_length=lowerCAmelCase__ ) tokenizer_r.batch_encode_plus(lowerCAmelCase__ , max_length=lowerCAmelCase__ ) tokenizer_r.encode(lowerCAmelCase__ , max_length=lowerCAmelCase__ ) tokenizer_r.batch_encode_plus(lowerCAmelCase__ , max_length=lowerCAmelCase__ ) except ValueError: self.fail("Bloom Tokenizer should be able to deal with padding" ) a : Tuple = None # Hotfixing padding = None self.assertRaises(lowerCAmelCase__ , tokenizer_r.encode , lowerCAmelCase__ , max_length=lowerCAmelCase__ , padding="max_length" ) # Simple input self.assertRaises(lowerCAmelCase__ , tokenizer_r.encode_plus , lowerCAmelCase__ , max_length=lowerCAmelCase__ , padding="max_length" ) # Simple input self.assertRaises( lowerCAmelCase__ , tokenizer_r.batch_encode_plus , lowerCAmelCase__ , max_length=lowerCAmelCase__ , padding="max_length" , ) # Pair input self.assertRaises(lowerCAmelCase__ , tokenizer_r.encode , lowerCAmelCase__ , max_length=lowerCAmelCase__ , padding="max_length" ) # Pair input self.assertRaises(lowerCAmelCase__ , tokenizer_r.encode_plus , lowerCAmelCase__ , max_length=lowerCAmelCase__ , padding="max_length" ) # Pair input self.assertRaises( lowerCAmelCase__ , tokenizer_r.batch_encode_plus , lowerCAmelCase__ , max_length=lowerCAmelCase__ , padding="max_length" , ) def __a ( self ) -> Optional[int]: a : Optional[int] = self.get_rust_tokenizer() a : Tuple = load_dataset("xnli" , "all_languages" , split="test" , streaming=lowerCAmelCase__ ) a : List[Any] = next(iter(lowerCAmelCase__ ) )["premise"] # pick up one data a : Union[str, Any] = list(sample_data.values() ) a : Tuple = list(map(tokenizer.encode , lowerCAmelCase__ ) ) a : Union[str, Any] = [tokenizer.decode(lowerCAmelCase__ , clean_up_tokenization_spaces=lowerCAmelCase__ ) for x in output_tokens] self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ ) def __a ( self ) -> Union[str, Any]: # The test has to be overriden because BLOOM uses ALiBi positional embeddings that does not have # any sequence length constraints. This test of the parent class will fail since it relies on the # maximum sequence length of the positoonal embeddings. self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) , 1 ) self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) , 1 )
105
from math import sqrt def A_ ( _lowerCAmelCase ) -> bool: assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and ( number >= 0 ), "'number' must been an int and positive" UpperCamelCase : List[Any] = True # 0 and 1 are none primes. if number <= 1: UpperCamelCase : List[Any] = False for divisor in range(2 , int(round(sqrt(_lowerCAmelCase ) ) ) + 1 ): # if 'number' divisible by 'divisor' then sets 'status' # of false and break up the loop. if number % divisor == 0: UpperCamelCase : Union[str, Any] = False break # precondition assert isinstance(_lowerCAmelCase , _lowerCAmelCase ), "'status' must been from type bool" return status def A_ ( _lowerCAmelCase ) -> Any: assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (n > 2), "'N' must been an int and > 2" # beginList: contains all natural numbers from 2 up to N UpperCamelCase : int = list(range(2 , n + 1 ) ) UpperCamelCase : Optional[int] = [] # this list will be returns. # actual sieve of erathostenes for i in range(len(_lowerCAmelCase ) ): for j in range(i + 1 , len(_lowerCAmelCase ) ): if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0): UpperCamelCase : Tuple = 0 # filters actual prime numbers. UpperCamelCase : str = [x for x in begin_list if x != 0] # precondition assert isinstance(_lowerCAmelCase , _lowerCAmelCase ), "'ans' must been from type list" return ans def A_ ( _lowerCAmelCase ) -> Optional[Any]: assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (n > 2), "'N' must been an int and > 2" UpperCamelCase : str = [] # iterates over all numbers between 2 up to N+1 # if a number is prime then appends to list 'ans' for number in range(2 , n + 1 ): if is_prime(_lowerCAmelCase ): ans.append(_lowerCAmelCase ) # precondition assert isinstance(_lowerCAmelCase , _lowerCAmelCase ), "'ans' must been from type list" return ans def A_ ( _lowerCAmelCase ) -> Any: assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and number >= 0, "'number' must been an int and >= 0" UpperCamelCase : Optional[Any] = [] # this list will be returns of the function. # potential prime number factors. UpperCamelCase : Tuple = 2 UpperCamelCase : str = number if number == 0 or number == 1: ans.append(_lowerCAmelCase ) # if 'number' not prime then builds the prime factorization of 'number' elif not is_prime(_lowerCAmelCase ): while quotient != 1: if is_prime(_lowerCAmelCase ) and (quotient % factor == 0): ans.append(_lowerCAmelCase ) quotient /= factor else: factor += 1 else: ans.append(_lowerCAmelCase ) # precondition assert isinstance(_lowerCAmelCase , _lowerCAmelCase ), "'ans' must been from type list" return ans def A_ ( _lowerCAmelCase ) -> Any: assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and ( number >= 0 ), "'number' bust been an int and >= 0" UpperCamelCase : List[Any] = 0 # prime factorization of 'number' UpperCamelCase : Any = prime_factorization(_lowerCAmelCase ) UpperCamelCase : List[Any] = max(_lowerCAmelCase ) # precondition assert isinstance(_lowerCAmelCase , _lowerCAmelCase ), "'ans' must been from type int" return ans def A_ ( _lowerCAmelCase ) -> Union[str, Any]: assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and ( number >= 0 ), "'number' bust been an int and >= 0" UpperCamelCase : List[Any] = 0 # prime factorization of 'number' UpperCamelCase : Dict = prime_factorization(_lowerCAmelCase ) UpperCamelCase : List[Any] = min(_lowerCAmelCase ) # precondition assert isinstance(_lowerCAmelCase , _lowerCAmelCase ), "'ans' must been from type int" return ans def A_ ( _lowerCAmelCase ) -> Optional[Any]: assert isinstance(_lowerCAmelCase , _lowerCAmelCase ), "'number' must been an int" assert isinstance(number % 2 == 0 , _lowerCAmelCase ), "compare bust been from type bool" return number % 2 == 0 def A_ ( _lowerCAmelCase ) -> List[Any]: assert isinstance(_lowerCAmelCase , _lowerCAmelCase ), "'number' must been an int" assert isinstance(number % 2 != 0 , _lowerCAmelCase ), "compare bust been from type bool" return number % 2 != 0 def A_ ( _lowerCAmelCase ) -> Any: assert ( isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (number > 2) and is_even(_lowerCAmelCase ) ), "'number' must been an int, even and > 2" UpperCamelCase : List[str] = [] # this list will returned # creates a list of prime numbers between 2 up to 'number' UpperCamelCase : Dict = get_prime_numbers(_lowerCAmelCase ) UpperCamelCase : Tuple = len(_lowerCAmelCase ) # run variable for while-loops. UpperCamelCase : Optional[int] = 0 UpperCamelCase : int = None # exit variable. for break up the loops UpperCamelCase : Union[str, Any] = True while i < len_pn and loop: UpperCamelCase : Tuple = i + 1 while j < len_pn and loop: if prime_numbers[i] + prime_numbers[j] == number: UpperCamelCase : Any = False ans.append(prime_numbers[i] ) ans.append(prime_numbers[j] ) j += 1 i += 1 # precondition assert ( isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (len(_lowerCAmelCase ) == 2) and (ans[0] + ans[1] == number) and is_prime(ans[0] ) and is_prime(ans[1] ) ), "'ans' must contains two primes. And sum of elements must been eq 'number'" return ans def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> str: assert ( isinstance(_lowerCAmelCase , _lowerCAmelCase ) and isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (numbera >= 0) and (numbera >= 0) ), "'number1' and 'number2' must been positive integer." UpperCamelCase : Tuple = 0 while numbera != 0: UpperCamelCase : Tuple = numbera % numbera UpperCamelCase : Any = numbera UpperCamelCase : Union[str, Any] = rest # precondition assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and ( numbera >= 0 ), "'number' must been from type int and positive" return numbera def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> int: assert ( isinstance(_lowerCAmelCase , _lowerCAmelCase ) and isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (numbera >= 1) and (numbera >= 1) ), "'number1' and 'number2' must been positive integer." UpperCamelCase : Optional[int] = 1 # actual answer that will be return. # for kgV (x,1) if numbera > 1 and numbera > 1: # builds the prime factorization of 'number1' and 'number2' UpperCamelCase : List[Any] = prime_factorization(_lowerCAmelCase ) UpperCamelCase : Union[str, Any] = prime_factorization(_lowerCAmelCase ) elif numbera == 1 or numbera == 1: UpperCamelCase : Optional[Any] = [] UpperCamelCase : int = [] UpperCamelCase : List[Any] = max(_lowerCAmelCase , _lowerCAmelCase ) UpperCamelCase : Optional[int] = 0 UpperCamelCase : Tuple = 0 UpperCamelCase : List[str] = [] # captured numbers int both 'primeFac1' and 'primeFac2' # iterates through primeFac1 for n in prime_fac_a: if n not in done: if n in prime_fac_a: UpperCamelCase : str = prime_fac_a.count(_lowerCAmelCase ) UpperCamelCase : Tuple = prime_fac_a.count(_lowerCAmelCase ) for _ in range(max(_lowerCAmelCase , _lowerCAmelCase ) ): ans *= n else: UpperCamelCase : str = prime_fac_a.count(_lowerCAmelCase ) for _ in range(_lowerCAmelCase ): ans *= n done.append(_lowerCAmelCase ) # iterates through primeFac2 for n in prime_fac_a: if n not in done: UpperCamelCase : Any = prime_fac_a.count(_lowerCAmelCase ) for _ in range(_lowerCAmelCase ): ans *= n done.append(_lowerCAmelCase ) # precondition assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and ( ans >= 0 ), "'ans' must been from type int and positive" return ans def A_ ( _lowerCAmelCase ) -> Tuple: assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (n >= 0), "'number' must been a positive int" UpperCamelCase : int = 0 UpperCamelCase : int = 2 # this variable holds the answer while index < n: index += 1 ans += 1 # counts to the next number # if ans not prime then # runs to the next prime number. while not is_prime(_lowerCAmelCase ): ans += 1 # precondition assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and is_prime( _lowerCAmelCase ), "'ans' must been a prime number and from type int" return ans def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> int: assert ( is_prime(_lowerCAmelCase ) and is_prime(_lowerCAmelCase ) and (p_number_a < p_number_a) ), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'" UpperCamelCase : str = p_number_a + 1 # jump to the next number UpperCamelCase : Dict = [] # this list will be returns. # if number is not prime then # fetch the next prime number. while not is_prime(_lowerCAmelCase ): number += 1 while number < p_number_a: ans.append(_lowerCAmelCase ) number += 1 # fetch the next prime number. while not is_prime(_lowerCAmelCase ): number += 1 # precondition assert ( isinstance(_lowerCAmelCase , _lowerCAmelCase ) and ans[0] != p_number_a and ans[len(_lowerCAmelCase ) - 1] != p_number_a ), "'ans' must been a list without the arguments" # 'ans' contains not 'pNumber1' and 'pNumber2' ! return ans def A_ ( _lowerCAmelCase ) -> List[str]: assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (n >= 1), "'n' must been int and >= 1" UpperCamelCase : Dict = [] # will be returned. for divisor in range(1 , n + 1 ): if n % divisor == 0: ans.append(_lowerCAmelCase ) # precondition assert ans[0] == 1 and ans[len(_lowerCAmelCase ) - 1] == n, "Error in function getDivisiors(...)" return ans def A_ ( _lowerCAmelCase ) -> int: assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and ( number > 1 ), "'number' must been an int and >= 1" UpperCamelCase : int = get_divisors(_lowerCAmelCase ) # precondition assert ( isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (divisors[0] == 1) and (divisors[len(_lowerCAmelCase ) - 1] == number) ), "Error in help-function getDivisiors(...)" # summed all divisors up to 'number' (exclusive), hence [:-1] return sum(divisors[:-1] ) == number def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]: assert ( isinstance(_lowerCAmelCase , _lowerCAmelCase ) and isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (denominator != 0) ), "The arguments must been from type int and 'denominator' != 0" # build the greatest common divisor of numerator and denominator. UpperCamelCase : List[str] = gcd(abs(_lowerCAmelCase ) , abs(_lowerCAmelCase ) ) # precondition assert ( isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (numerator % gcd_of_fraction == 0) and (denominator % gcd_of_fraction == 0) ), "Error in function gcd(...,...)" return (numerator // gcd_of_fraction, denominator // gcd_of_fraction) def A_ ( _lowerCAmelCase ) -> Dict: assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (n >= 0), "'n' must been a int and >= 0" UpperCamelCase : str = 1 # this will be return. for factor in range(1 , n + 1 ): ans *= factor return ans def A_ ( _lowerCAmelCase ) -> Tuple: assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (n >= 0), "'n' must been an int and >= 0" UpperCamelCase : Dict = 0 UpperCamelCase : Dict = 1 UpperCamelCase : Union[str, Any] = 1 # this will be return for _ in range(n - 1 ): UpperCamelCase : Any = ans ans += fiba UpperCamelCase : str = tmp return ans
52
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available __UpperCamelCase : int = { '''configuration_maskformer''': ['''MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MaskFormerConfig'''], '''configuration_maskformer_swin''': ['''MaskFormerSwinConfig'''], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : str = ['''MaskFormerFeatureExtractor'''] __UpperCamelCase : Dict = ['''MaskFormerImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : Tuple = [ '''MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''MaskFormerForInstanceSegmentation''', '''MaskFormerModel''', '''MaskFormerPreTrainedModel''', ] __UpperCamelCase : Optional[Any] = [ '''MaskFormerSwinBackbone''', '''MaskFormerSwinModel''', '''MaskFormerSwinPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig from .configuration_maskformer_swin import MaskFormerSwinConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_maskformer import MaskFormerFeatureExtractor from .image_processing_maskformer import MaskFormerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_maskformer import ( MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, MaskFormerForInstanceSegmentation, MaskFormerModel, MaskFormerPreTrainedModel, ) from .modeling_maskformer_swin import ( MaskFormerSwinBackbone, MaskFormerSwinModel, MaskFormerSwinPreTrainedModel, ) else: import sys __UpperCamelCase : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
106
import inspect import re from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_config_docstrings.py __lowerCamelCase : str = """src/transformers""" # This is to make sure the transformers module imported is the one in the repo. __lowerCamelCase : Tuple = direct_transformers_import(PATH_TO_TRANSFORMERS) __lowerCamelCase : List[str] = transformers.models.auto.configuration_auto.CONFIG_MAPPING # Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`. # For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)` __lowerCamelCase : Optional[Any] = re.compile(r"""\[(.+?)\]\((https://huggingface\.co/.+?)\)""") __lowerCamelCase : List[str] = { """DecisionTransformerConfig""", """EncoderDecoderConfig""", """MusicgenConfig""", """RagConfig""", """SpeechEncoderDecoderConfig""", """TimmBackboneConfig""", """VisionEncoderDecoderConfig""", """VisionTextDualEncoderConfig""", """LlamaConfig""", } def A_ ( _lowerCAmelCase ) -> List[str]: UpperCamelCase : Optional[Any] = None # source code of `config_class` UpperCamelCase : Tuple = inspect.getsource(_lowerCAmelCase ) UpperCamelCase : Optional[Any] = _re_checkpoint.findall(_lowerCAmelCase ) # Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link. # For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')` for ckpt_name, ckpt_link in checkpoints: # allow the link to end with `/` if ckpt_link.endswith("/" ): UpperCamelCase : Dict = ckpt_link[:-1] # verify the checkpoint name corresponds to the checkpoint link UpperCamelCase : Any = F"""https://huggingface.co/{ckpt_name}""" if ckpt_link == ckpt_link_from_name: UpperCamelCase : List[Any] = ckpt_name break return checkpoint def A_ ( ) -> List[str]: UpperCamelCase : Optional[int] = [] for config_class in list(CONFIG_MAPPING.values() ): # Skip deprecated models if "models.deprecated" in config_class.__module__: continue UpperCamelCase : Union[str, Any] = get_checkpoint_from_config_class(_lowerCAmelCase ) UpperCamelCase : Optional[int] = config_class.__name__ if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK: configs_without_checkpoint.append(_lowerCAmelCase ) if len(_lowerCAmelCase ) > 0: UpperCamelCase : Any = "\n".join(sorted(_lowerCAmelCase ) ) raise ValueError(F"""The following configurations don't contain any valid checkpoint:\n{message}""" ) if __name__ == "__main__": check_config_docstrings_have_checkpoints()
52
0
import gc import tempfile import unittest import numpy as np import torch from diffusers import VersatileDiffusionTextToImagePipeline from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device __lowerCAmelCase : Optional[int] = False class snake_case__ (unittest.TestCase ): """simple docstring""" pass @nightly @require_torch_gpu class snake_case__ (unittest.TestCase ): """simple docstring""" def __UpperCAmelCase ( self : Optional[Any] ) -> List[str]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __UpperCAmelCase ( self : Dict ) -> int: a = VersatileDiffusionTextToImagePipeline.from_pretrained("shi-labs/versatile-diffusion" ) # remove text_unet pipe.remove_unused_weights() pipe.to(__lowerCamelCase ) pipe.set_progress_bar_config(disable=__lowerCamelCase ) a = "A painting of a squirrel eating a burger " a = torch.manual_seed(0 ) a = pipe( prompt=__lowerCamelCase , generator=__lowerCamelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" ).images with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(__lowerCamelCase ) a = VersatileDiffusionTextToImagePipeline.from_pretrained(__lowerCamelCase ) pipe.to(__lowerCamelCase ) pipe.set_progress_bar_config(disable=__lowerCamelCase ) a = generator.manual_seed(0 ) a = pipe( prompt=__lowerCamelCase , generator=__lowerCamelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" ).images assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass" def __UpperCAmelCase ( self : str ) -> List[str]: a = VersatileDiffusionTextToImagePipeline.from_pretrained( "shi-labs/versatile-diffusion" , torch_dtype=torch.floataa ) pipe.to(__lowerCamelCase ) pipe.set_progress_bar_config(disable=__lowerCamelCase ) a = "A painting of a squirrel eating a burger " a = torch.manual_seed(0 ) a = pipe( prompt=__lowerCamelCase , generator=__lowerCamelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" ).images a = image[0, 2_53:2_56, 2_53:2_56, -1] assert image.shape == (1, 5_12, 5_12, 3) a = np.array([0.3_367, 0.3_169, 0.2_656, 0.3_870, 0.4_790, 0.3_796, 0.4_009, 0.4_878, 0.4_778] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
107
from __future__ import annotations from functools import lru_cache from math import ceil __lowerCamelCase : str = 100 __lowerCamelCase : Any = set(range(3, NUM_PRIMES, 2)) primes.add(2) __lowerCamelCase : int for prime in range(3, ceil(NUM_PRIMES**0.5), 2): if prime not in primes: continue primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime))) @lru_cache(maxsize=100 ) def A_ ( _lowerCAmelCase ) -> set[int]: if number_to_partition < 0: return set() elif number_to_partition == 0: return {1} UpperCamelCase : set[int] = set() UpperCamelCase : int UpperCamelCase : int for prime in primes: if prime > number_to_partition: continue for sub in partition(number_to_partition - prime ): ret.add(sub * prime ) return ret def A_ ( _lowerCAmelCase = 5000 ) -> int | None: for number_to_partition in range(1 , _lowerCAmelCase ): if len(partition(_lowerCAmelCase ) ) > number_unique_partitions: return number_to_partition return None if __name__ == "__main__": print(f"""{solution() = }""")
52
0
"""simple docstring""" from __future__ import annotations def a__ ( SCREAMING_SNAKE_CASE : list[int] ): '''simple docstring''' return len(set(SCREAMING_SNAKE_CASE ) ) == len(SCREAMING_SNAKE_CASE ) if __name__ == "__main__": import doctest doctest.testmod()
108
def A_ ( _lowerCAmelCase ) -> str: UpperCamelCase : Optional[int] = int(_lowerCAmelCase ) if decimal in (0, 1): # Exit cases for the recursion return str(_lowerCAmelCase ) UpperCamelCase , UpperCamelCase : Dict = divmod(_lowerCAmelCase , 2 ) return binary_recursive(_lowerCAmelCase ) + str(_lowerCAmelCase ) def A_ ( _lowerCAmelCase ) -> str: UpperCamelCase : Tuple = str(_lowerCAmelCase ).strip() if not number: raise ValueError("No input value was provided" ) UpperCamelCase : Optional[int] = "-" if number.startswith("-" ) else "" UpperCamelCase : Any = number.lstrip("-" ) if not number.isnumeric(): raise ValueError("Input value is not an integer" ) return F"""{negative}0b{binary_recursive(int(_lowerCAmelCase ) )}""" if __name__ == "__main__": from doctest import testmod testmod()
52
0
"""simple docstring""" import copy from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto import CONFIG_MAPPING A: List[str] = logging.get_logger(__name__) A: Dict = { "microsoft/conditional-detr-resnet-50": ( "https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json" ), } class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ): __lowerCAmelCase : Any = 'conditional_detr' __lowerCAmelCase : Union[str, Any] = ['past_key_values'] __lowerCAmelCase : int = { 'hidden_size': 'd_model', 'num_attention_heads': 'encoder_attention_heads', } def __init__( self , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=300 , _SCREAMING_SNAKE_CASE=6 , _SCREAMING_SNAKE_CASE=2048 , _SCREAMING_SNAKE_CASE=8 , _SCREAMING_SNAKE_CASE=6 , _SCREAMING_SNAKE_CASE=2048 , _SCREAMING_SNAKE_CASE=8 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE="relu" , _SCREAMING_SNAKE_CASE=256 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=1.0 , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE="sine" , _SCREAMING_SNAKE_CASE="resnet50" , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.25 , **_SCREAMING_SNAKE_CASE , ) -> Tuple: '''simple docstring''' if backbone_config is not None and use_timm_backbone: raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" ) if not use_timm_backbone: if backbone_config is None: logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" ) UpperCAmelCase : Tuple = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] ) elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): UpperCAmelCase : str = backbone_config.get("""model_type""" ) UpperCAmelCase : int = CONFIG_MAPPING[backbone_model_type] UpperCAmelCase : Union[str, Any] = config_class.from_dict(_SCREAMING_SNAKE_CASE ) UpperCAmelCase : Union[str, Any] = use_timm_backbone UpperCAmelCase : Optional[int] = backbone_config UpperCAmelCase : List[str] = num_channels UpperCAmelCase : Any = num_queries UpperCAmelCase : Union[str, Any] = d_model UpperCAmelCase : List[str] = encoder_ffn_dim UpperCAmelCase : Optional[int] = encoder_layers UpperCAmelCase : Union[str, Any] = encoder_attention_heads UpperCAmelCase : Optional[Any] = decoder_ffn_dim UpperCAmelCase : Any = decoder_layers UpperCAmelCase : Optional[int] = decoder_attention_heads UpperCAmelCase : Optional[int] = dropout UpperCAmelCase : Dict = attention_dropout UpperCAmelCase : Dict = activation_dropout UpperCAmelCase : Any = activation_function UpperCAmelCase : Any = init_std UpperCAmelCase : Tuple = init_xavier_std UpperCAmelCase : Optional[int] = encoder_layerdrop UpperCAmelCase : Any = decoder_layerdrop UpperCAmelCase : Any = encoder_layers UpperCAmelCase : Optional[Any] = auxiliary_loss UpperCAmelCase : List[Any] = position_embedding_type UpperCAmelCase : Union[str, Any] = backbone UpperCAmelCase : List[Any] = use_pretrained_backbone UpperCAmelCase : Dict = dilation # Hungarian matcher UpperCAmelCase : Optional[int] = class_cost UpperCAmelCase : List[str] = bbox_cost UpperCAmelCase : List[str] = giou_cost # Loss coefficients UpperCAmelCase : List[Any] = mask_loss_coefficient UpperCAmelCase : List[str] = dice_loss_coefficient UpperCAmelCase : Optional[int] = cls_loss_coefficient UpperCAmelCase : Union[str, Any] = bbox_loss_coefficient UpperCAmelCase : Union[str, Any] = giou_loss_coefficient UpperCAmelCase : int = focal_alpha super().__init__(is_encoder_decoder=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) @property def SCREAMING_SNAKE_CASE ( self ) -> int: '''simple docstring''' return self.encoder_attention_heads @property def SCREAMING_SNAKE_CASE ( self ) -> int: '''simple docstring''' return self.d_model def SCREAMING_SNAKE_CASE ( self ) -> Any: '''simple docstring''' UpperCAmelCase : List[Any] = copy.deepcopy(self.__dict__ ) if self.backbone_config is not None: UpperCAmelCase : Union[str, Any] = self.backbone_config.to_dict() UpperCAmelCase : Dict = self.__class__.model_type return output class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ): __lowerCAmelCase : Any = version.parse('1.11' ) @property def SCREAMING_SNAKE_CASE ( self ) -> Mapping[str, Mapping[int, str]]: '''simple docstring''' return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ("""pixel_mask""", {0: """batch"""}), ] ) @property def SCREAMING_SNAKE_CASE ( self ) -> float: '''simple docstring''' return 1E-5 @property def SCREAMING_SNAKE_CASE ( self ) -> int: '''simple docstring''' return 12
109
import unittest from transformers import LiltConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( LiltForQuestionAnswering, LiltForSequenceClassification, LiltForTokenClassification, LiltModel, ) from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST class A__ : def __init__( self , A_ , A_=13 , A_=7 , A_=True , A_=True , A_=True , A_=True , A_=99 , A_=24 , A_=2 , A_=6 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=16 , A_=2 , A_=0.02 , A_=3 , A_=None , A_=1000 , ): '''simple docstring''' UpperCamelCase : Union[str, Any] = parent UpperCamelCase : List[Any] = batch_size UpperCamelCase : Dict = seq_length UpperCamelCase : Tuple = is_training UpperCamelCase : Union[str, Any] = use_input_mask UpperCamelCase : Tuple = use_token_type_ids UpperCamelCase : Optional[Any] = use_labels UpperCamelCase : str = vocab_size UpperCamelCase : Optional[int] = hidden_size UpperCamelCase : Any = num_hidden_layers UpperCamelCase : Optional[Any] = num_attention_heads UpperCamelCase : Optional[Any] = intermediate_size UpperCamelCase : Optional[Any] = hidden_act UpperCamelCase : Union[str, Any] = hidden_dropout_prob UpperCamelCase : Union[str, Any] = attention_probs_dropout_prob UpperCamelCase : List[Any] = max_position_embeddings UpperCamelCase : str = type_vocab_size UpperCamelCase : Optional[int] = type_sequence_label_size UpperCamelCase : Dict = initializer_range UpperCamelCase : int = num_labels UpperCamelCase : Optional[int] = scope UpperCamelCase : int = range_bbox def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCamelCase : Any = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ) # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: UpperCamelCase : Union[str, Any] = bbox[i, j, 3] UpperCamelCase : int = bbox[i, j, 1] UpperCamelCase : int = t if bbox[i, j, 2] < bbox[i, j, 0]: UpperCamelCase : List[str] = bbox[i, j, 2] UpperCamelCase : Optional[int] = bbox[i, j, 0] UpperCamelCase : Optional[Any] = t UpperCamelCase : Dict = None if self.use_input_mask: UpperCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) UpperCamelCase : str = None if self.use_token_type_ids: UpperCamelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) UpperCamelCase : Dict = None UpperCamelCase : int = None if self.use_labels: UpperCamelCase : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCamelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCamelCase : List[Any] = self.get_config() return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels def __UpperCamelCase( self ): '''simple docstring''' return LiltConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ): '''simple docstring''' UpperCamelCase : Any = LiltModel(config=A_ ) model.to(A_ ) model.eval() UpperCamelCase : str = model(A_ , bbox=A_ , attention_mask=A_ , token_type_ids=A_ ) UpperCamelCase : Optional[int] = model(A_ , bbox=A_ , token_type_ids=A_ ) UpperCamelCase : Any = model(A_ , bbox=A_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ): '''simple docstring''' UpperCamelCase : Any = self.num_labels UpperCamelCase : Dict = LiltForTokenClassification(config=A_ ) model.to(A_ ) model.eval() UpperCamelCase : Dict = model( A_ , bbox=A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ): '''simple docstring''' UpperCamelCase : Dict = LiltForQuestionAnswering(config=A_ ) model.to(A_ ) model.eval() UpperCamelCase : List[str] = model( A_ , bbox=A_ , attention_mask=A_ , token_type_ids=A_ , start_positions=A_ , end_positions=A_ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Any = self.prepare_config_and_inputs() ( ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ) : Tuple = config_and_inputs UpperCamelCase : Tuple = { "input_ids": input_ids, "bbox": bbox, "token_type_ids": token_type_ids, "attention_mask": input_mask, } return config, inputs_dict @require_torch class A__ ( __snake_case , __snake_case , __snake_case , unittest.TestCase ): _UpperCAmelCase :Union[str, Any] = ( ( LiltModel, LiltForSequenceClassification, LiltForTokenClassification, LiltForQuestionAnswering, ) if is_torch_available() else () ) _UpperCAmelCase :Optional[Any] = ( { 'feature-extraction': LiltModel, 'question-answering': LiltForQuestionAnswering, 'text-classification': LiltForSequenceClassification, 'token-classification': LiltForTokenClassification, 'zero-shot': LiltForSequenceClassification, } if is_torch_available() else {} ) _UpperCAmelCase :Dict = False _UpperCAmelCase :Union[str, Any] = False def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ ): '''simple docstring''' return True def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Optional[int] = LiltModelTester(self ) UpperCamelCase : Optional[int] = ConfigTester(self , config_class=A_ , hidden_size=37 ) def __UpperCamelCase( self ): '''simple docstring''' self.config_tester.run_common_tests() def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*A_ ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: UpperCamelCase : Union[str, Any] = type self.model_tester.create_and_check_model(*A_ ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*A_ ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*A_ ) @slow def __UpperCamelCase( self ): '''simple docstring''' for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCamelCase : Dict = LiltModel.from_pretrained(A_ ) self.assertIsNotNone(A_ ) @require_torch @slow class A__ ( unittest.TestCase ): def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : int = LiltModel.from_pretrained("SCUT-DLVCLab/lilt-roberta-en-base" ).to(A_ ) UpperCamelCase : Tuple = torch.tensor([[1, 2]] , device=A_ ) UpperCamelCase : List[str] = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=A_ ) # forward pass with torch.no_grad(): UpperCamelCase : Optional[int] = model(input_ids=A_ , bbox=A_ ) UpperCamelCase : List[str] = torch.Size([1, 2, 768] ) UpperCamelCase : Any = torch.tensor( [[-0.06_53, 0.09_50, -0.00_61], [-0.05_45, 0.09_26, -0.03_24]] , device=A_ , ) self.assertTrue(outputs.last_hidden_state.shape , A_ ) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , A_ , atol=1e-3 ) )
52
0
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): """simple docstring""" lowercase__ = int(SCREAMING_SNAKE_CASE ) # Initialize Result lowercase__ = [] # Traverse through all denomination for denomination in reversed(SCREAMING_SNAKE_CASE ): # Find denominations while int(SCREAMING_SNAKE_CASE ) >= int(SCREAMING_SNAKE_CASE ): total_value -= int(SCREAMING_SNAKE_CASE ) answer.append(SCREAMING_SNAKE_CASE ) # Append the "answers" array return answer # Driver Code if __name__ == "__main__": lowerCAmelCase = [] lowerCAmelCase = '0' if ( input('Do you want to enter your denominations ? (yY/n): ').strip().lower() == "y" ): lowerCAmelCase = int(input('Enter the number of denominations you want to add: ').strip()) for i in range(0, n): denominations.append(int(input(f"""Denomination {i}: """).strip())) lowerCAmelCase = input('Enter the change you want to make in Indian Currency: ').strip() else: # All denominations of Indian Currency if user does not enter lowerCAmelCase = [1, 2, 5, 10, 20, 50, 100, 500, 2000] lowerCAmelCase = input('Enter the change you want to make: ').strip() if int(value) == 0 or int(value) < 0: print('The total value cannot be zero or negative.') else: print(f"""Following is minimal change for {value}: """) lowerCAmelCase = find_minimum_change(denominations, value) # Print result for i in range(len(answer)): print(answer[i], end=' ')
110
import os import tempfile from functools import partial from unittest import TestCase from unittest.mock import patch import numpy as np import pytest from datasets.arrow_dataset import Dataset from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex from .utils import require_elasticsearch, require_faiss __lowerCamelCase : Union[str, Any] = pytest.mark.integration @require_faiss class A__ ( __snake_case ): def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Union[str, Any] = Dataset.from_dict({"filename": ["my_name-train" + "_" + str(A_ ) for x in np.arange(30 ).tolist()]} ) return dset def __UpperCamelCase( self ): '''simple docstring''' import faiss UpperCamelCase : Dataset = self._create_dummy_dataset() UpperCamelCase : List[Any] = dset.map( lambda A_ , A_ : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=A_ , keep_in_memory=A_ ) UpperCamelCase : List[str] = dset.add_faiss_index("vecs" , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT ) UpperCamelCase , UpperCamelCase : Tuple = dset.get_nearest_examples("vecs" , np.ones(5 , dtype=np.floataa ) ) self.assertEqual(examples["filename"][0] , "my_name-train_29" ) dset.drop_index("vecs" ) def __UpperCamelCase( self ): '''simple docstring''' import faiss UpperCamelCase : Dataset = self._create_dummy_dataset() dset.add_faiss_index_from_external_arrays( external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT , ) UpperCamelCase , UpperCamelCase : int = dset.get_nearest_examples("vecs" , np.ones(5 , dtype=np.floataa ) ) self.assertEqual(examples["filename"][0] , "my_name-train_29" ) def __UpperCamelCase( self ): '''simple docstring''' import faiss UpperCamelCase : Dataset = self._create_dummy_dataset() dset.add_faiss_index_from_external_arrays( external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" , metric_type=faiss.METRIC_INNER_PRODUCT , ) # Setting delete=False and unlinking manually is not pretty... but it is required on Windows to # ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue. # see https://bugs.python.org/issue14243 and # https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515 with tempfile.NamedTemporaryFile(delete=A_ ) as tmp_file: dset.save_faiss_index("vecs" , tmp_file.name ) dset.load_faiss_index("vecs2" , tmp_file.name ) os.unlink(tmp_file.name ) UpperCamelCase , UpperCamelCase : List[str] = dset.get_nearest_examples("vecs2" , np.ones(5 , dtype=np.floataa ) ) self.assertEqual(examples["filename"][0] , "my_name-train_29" ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Dataset = self._create_dummy_dataset() dset.add_faiss_index_from_external_arrays( external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" ) dset.drop_index("vecs" ) self.assertRaises(A_ , partial(dset.get_nearest_examples , "vecs2" , np.ones(5 , dtype=np.floataa ) ) ) def __UpperCamelCase( self ): '''simple docstring''' from elasticsearch import Elasticsearch UpperCamelCase : Dataset = self._create_dummy_dataset() with patch("elasticsearch.Elasticsearch.search" ) as mocked_search, patch( "elasticsearch.client.IndicesClient.create" ) as mocked_index_create, patch("elasticsearch.helpers.streaming_bulk" ) as mocked_bulk: UpperCamelCase : List[str] = {"acknowledged": True} mocked_bulk.return_value([(True, None)] * 30 ) UpperCamelCase : List[Any] = {"hits": {"hits": [{"_score": 1, "_id": 29}]}} UpperCamelCase : Optional[Any] = Elasticsearch() dset.add_elasticsearch_index("filename" , es_client=A_ ) UpperCamelCase , UpperCamelCase : List[str] = dset.get_nearest_examples("filename" , "my_name-train_29" ) self.assertEqual(examples["filename"][0] , "my_name-train_29" ) @require_faiss class A__ ( __snake_case ): def __UpperCamelCase( self ): '''simple docstring''' import faiss UpperCamelCase : Optional[int] = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT ) # add vectors index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsNotNone(index.faiss_index ) self.assertEqual(index.faiss_index.ntotal , 5 ) index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) ) self.assertEqual(index.faiss_index.ntotal , 10 ) # single query UpperCamelCase : Any = np.zeros(5 , dtype=np.floataa ) UpperCamelCase : Optional[Any] = 1 UpperCamelCase , UpperCamelCase : Optional[Any] = index.search(A_ ) self.assertRaises(A_ , index.search , query.reshape(-1 , 1 ) ) self.assertGreater(scores[0] , 0 ) self.assertEqual(indices[0] , 1 ) # batched queries UpperCamelCase : Optional[int] = np.eye(5 , dtype=np.floataa )[::-1] UpperCamelCase , UpperCamelCase : Tuple = index.search_batch(A_ ) self.assertRaises(A_ , index.search_batch , queries[0] ) UpperCamelCase : Optional[int] = [scores[0] for scores in total_scores] UpperCamelCase : Tuple = [indices[0] for indices in total_indices] self.assertGreater(np.min(A_ ) , 0 ) self.assertListEqual([4, 3, 2, 1, 0] , A_ ) def __UpperCamelCase( self ): '''simple docstring''' import faiss UpperCamelCase : List[str] = FaissIndex(string_factory="Flat" ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsInstance(index.faiss_index , faiss.IndexFlat ) UpperCamelCase : List[str] = FaissIndex(string_factory="LSH" ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsInstance(index.faiss_index , faiss.IndexLSH ) with self.assertRaises(A_ ): UpperCamelCase : List[str] = FaissIndex(string_factory="Flat" , custom_index=faiss.IndexFlat(5 ) ) def __UpperCamelCase( self ): '''simple docstring''' import faiss UpperCamelCase : Dict = faiss.IndexFlat(5 ) UpperCamelCase : Union[str, Any] = FaissIndex(custom_index=A_ ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsInstance(index.faiss_index , faiss.IndexFlat ) def __UpperCamelCase( self ): '''simple docstring''' import faiss UpperCamelCase : str = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) # Setting delete=False and unlinking manually is not pretty... but it is required on Windows to # ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue. # see https://bugs.python.org/issue14243 and # https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515 with tempfile.NamedTemporaryFile(delete=A_ ) as tmp_file: index.save(tmp_file.name ) UpperCamelCase : int = FaissIndex.load(tmp_file.name ) os.unlink(tmp_file.name ) UpperCamelCase : str = np.zeros(5 , dtype=np.floataa ) UpperCamelCase : int = 1 UpperCamelCase , UpperCamelCase : Dict = index.search(A_ ) self.assertGreater(scores[0] , 0 ) self.assertEqual(indices[0] , 1 ) @require_faiss def A_ ( _lowerCAmelCase ) -> Optional[int]: import faiss UpperCamelCase : Union[str, Any] = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) UpperCamelCase : List[Any] = "index.faiss" UpperCamelCase : List[str] = F"""mock://{index_name}""" index.save(_lowerCAmelCase , storage_options=mockfs.storage_options ) UpperCamelCase : List[str] = FaissIndex.load(_lowerCAmelCase , storage_options=mockfs.storage_options ) UpperCamelCase : List[str] = np.zeros(5 , dtype=np.floataa ) UpperCamelCase : Optional[int] = 1 UpperCamelCase , UpperCamelCase : List[str] = index.search(_lowerCAmelCase ) assert scores[0] > 0 assert indices[0] == 1 @require_elasticsearch class A__ ( __snake_case ): def __UpperCamelCase( self ): '''simple docstring''' from elasticsearch import Elasticsearch with patch("elasticsearch.Elasticsearch.search" ) as mocked_search, patch( "elasticsearch.client.IndicesClient.create" ) as mocked_index_create, patch("elasticsearch.helpers.streaming_bulk" ) as mocked_bulk: UpperCamelCase : List[str] = Elasticsearch() UpperCamelCase : Union[str, Any] = {"acknowledged": True} UpperCamelCase : Union[str, Any] = ElasticSearchIndex(es_client=A_ ) mocked_bulk.return_value([(True, None)] * 3 ) index.add_documents(["foo", "bar", "foobar"] ) # single query UpperCamelCase : str = "foo" UpperCamelCase : Dict = {"hits": {"hits": [{"_score": 1, "_id": 0}]}} UpperCamelCase , UpperCamelCase : Tuple = index.search(A_ ) self.assertEqual(scores[0] , 1 ) self.assertEqual(indices[0] , 0 ) # single query with timeout UpperCamelCase : Dict = "foo" UpperCamelCase : Optional[Any] = {"hits": {"hits": [{"_score": 1, "_id": 0}]}} UpperCamelCase , UpperCamelCase : str = index.search(A_ , request_timeout=30 ) self.assertEqual(scores[0] , 1 ) self.assertEqual(indices[0] , 0 ) # batched queries UpperCamelCase : Dict = ["foo", "bar", "foobar"] UpperCamelCase : List[Any] = {"hits": {"hits": [{"_score": 1, "_id": 1}]}} UpperCamelCase , UpperCamelCase : Optional[int] = index.search_batch(A_ ) UpperCamelCase : str = [scores[0] for scores in total_scores] UpperCamelCase : Optional[Any] = [indices[0] for indices in total_indices] self.assertGreater(np.min(A_ ) , 0 ) self.assertListEqual([1, 1, 1] , A_ ) # batched queries with timeout UpperCamelCase : int = ["foo", "bar", "foobar"] UpperCamelCase : List[Any] = {"hits": {"hits": [{"_score": 1, "_id": 1}]}} UpperCamelCase , UpperCamelCase : Union[str, Any] = index.search_batch(A_ , request_timeout=30 ) UpperCamelCase : Union[str, Any] = [scores[0] for scores in total_scores] UpperCamelCase : Dict = [indices[0] for indices in total_indices] self.assertGreater(np.min(A_ ) , 0 ) self.assertListEqual([1, 1, 1] , A_ )
52
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCamelCase_ = { """configuration_clipseg""": [ """CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CLIPSegConfig""", """CLIPSegTextConfig""", """CLIPSegVisionConfig""", ], """processing_clipseg""": ["""CLIPSegProcessor"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = [ """CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST""", """CLIPSegModel""", """CLIPSegPreTrainedModel""", """CLIPSegTextModel""", """CLIPSegVisionModel""", """CLIPSegForImageSegmentation""", ] if TYPE_CHECKING: from .configuration_clipseg import ( CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP, CLIPSegConfig, CLIPSegTextConfig, CLIPSegVisionConfig, ) from .processing_clipseg import CLIPSegProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_clipseg import ( CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST, CLIPSegForImageSegmentation, CLIPSegModel, CLIPSegPreTrainedModel, CLIPSegTextModel, CLIPSegVisionModel, ) else: import sys UpperCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
251
def A_ ( _lowerCAmelCase = 50 ) -> int: UpperCamelCase : List[Any] = [[0] * 3 for _ in range(length + 1 )] for row_length in range(length + 1 ): for tile_length in range(2 , 5 ): for tile_start in range(row_length - tile_length + 1 ): different_colour_ways_number[row_length][tile_length - 2] += ( different_colour_ways_number[row_length - tile_start - tile_length][ tile_length - 2 ] + 1 ) return sum(different_colour_ways_number[length] ) if __name__ == "__main__": print(f"""{solution() = }""")
52
0
'''simple docstring''' # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available a : int = {"""configuration_mra""": ["""MRA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MraConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a : Tuple = [ """MRA_PRETRAINED_MODEL_ARCHIVE_LIST""", """MraForMaskedLM""", """MraForMultipleChoice""", """MraForQuestionAnswering""", """MraForSequenceClassification""", """MraForTokenClassification""", """MraLayer""", """MraModel""", """MraPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mra import ( MRA_PRETRAINED_MODEL_ARCHIVE_LIST, MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, MraLayer, MraModel, MraPreTrainedModel, ) else: import sys a : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
265
def A_ ( _lowerCAmelCase ) -> str: UpperCamelCase : List[Any] = "" for ch in key: if ch == " " or ch not in key_no_dups and ch.isalpha(): key_no_dups += ch return key_no_dups def A_ ( _lowerCAmelCase ) -> dict[str, str]: UpperCamelCase : Optional[Any] = [chr(i + 65 ) for i in range(26 )] # Remove duplicate characters from key UpperCamelCase : Tuple = remove_duplicates(key.upper() ) UpperCamelCase : int = len(_lowerCAmelCase ) # First fill cipher with key characters UpperCamelCase : int = {alphabet[i]: char for i, char in enumerate(_lowerCAmelCase )} # Then map remaining characters in alphabet to # the alphabet from the beginning for i in range(len(_lowerCAmelCase ) , 26 ): UpperCamelCase : Optional[Any] = alphabet[i - offset] # Ensure we are not mapping letters to letters previously mapped while char in key: offset -= 1 UpperCamelCase : List[str] = alphabet[i - offset] UpperCamelCase : List[Any] = char return cipher_alphabet def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> str: return "".join(cipher_map.get(_lowerCAmelCase , _lowerCAmelCase ) for ch in message.upper() ) def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> str: UpperCamelCase : Union[str, Any] = {v: k for k, v in cipher_map.items()} return "".join(rev_cipher_map.get(_lowerCAmelCase , _lowerCAmelCase ) for ch in message.upper() ) def A_ ( ) -> None: UpperCamelCase : int = input("Enter message to encode or decode: " ).strip() UpperCamelCase : str = input("Enter keyword: " ).strip() UpperCamelCase : Union[str, Any] = input("Encipher or decipher? E/D:" ).strip()[0].lower() try: UpperCamelCase : List[str] = {"e": encipher, "d": decipher}[option] except KeyError: raise KeyError("invalid input option" ) UpperCamelCase : str = create_cipher_map(_lowerCAmelCase ) print(func(_lowerCAmelCase , _lowerCAmelCase ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
52
0
"""simple docstring""" from typing import Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images from ...utils import TensorType, logging UpperCAmelCase = logging.get_logger(__name__) class UpperCAmelCase_ ( __snake_case): snake_case__ = ['pixel_values'] def __init__( self : str , __UpperCamelCase : str = True , __UpperCamelCase : List[str] = 1 / 255 , __UpperCamelCase : Optional[int] = True , __UpperCamelCase : Tuple = 8 , **__UpperCamelCase : Any , ) -> int: super().__init__(**A_ ) _UpperCamelCase = do_rescale _UpperCamelCase = rescale_factor _UpperCamelCase = do_pad _UpperCamelCase = pad_size def _UpperCamelCase ( self : List[str] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : int = None , **__UpperCamelCase : Optional[int] ) -> int: return rescale(A_ , scale=A_ , data_format=A_ , **A_ ) def _UpperCamelCase ( self : List[str] , __UpperCamelCase : Any , __UpperCamelCase : Optional[Any] , __UpperCamelCase : str = None ) -> Any: _UpperCamelCase = get_image_size(A_ ) _UpperCamelCase = (old_height // size + 1) * size - old_height _UpperCamelCase = (old_width // size + 1) * size - old_width return pad(A_ , ((0, pad_height), (0, pad_width)) , mode='''symmetric''' , data_format=A_ ) def _UpperCamelCase ( self : str , __UpperCamelCase : int , __UpperCamelCase : str = None , __UpperCamelCase : Tuple = None , __UpperCamelCase : Optional[int] = None , __UpperCamelCase : Optional[Any] = None , __UpperCamelCase : List[Any] = None , __UpperCamelCase : int = ChannelDimension.FIRST , **__UpperCamelCase : str , ) -> List[Any]: _UpperCamelCase = do_rescale if do_rescale is not None else self.do_rescale _UpperCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor _UpperCamelCase = do_pad if do_pad is not None else self.do_pad _UpperCamelCase = pad_size if pad_size is not None else self.pad_size _UpperCamelCase = make_list_of_images(A_ ) if not valid_images(A_ ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) # All transformations expect numpy arrays. _UpperCamelCase = [to_numpy_array(A_ ) for image in images] if do_rescale: _UpperCamelCase = [self.rescale(image=A_ , scale=A_ ) for image in images] if do_pad: _UpperCamelCase = [self.pad(A_ , size=A_ ) for image in images] _UpperCamelCase = [to_channel_dimension_format(A_ , A_ ) for image in images] _UpperCamelCase = {"pixel_values": images} return BatchFeature(data=A_ , tensor_type=A_ )
256
from sklearn.metrics import fa_score import datasets __lowerCamelCase : List[Any] = """ The F1 score is the harmonic mean of the precision and recall. It can be computed with the equation: F1 = 2 * (precision * recall) / (precision + recall) """ __lowerCamelCase : List[Any] = """ Args: predictions (`list` of `int`): Predicted labels. references (`list` of `int`): Ground truth labels. labels (`list` of `int`): The set of labels to include when `average` is not set to `'binary'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None. pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1. average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`. - 'binary': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary. - 'micro': Calculate metrics globally by counting the total true positives, false negatives and false positives. - 'macro': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account. - 'weighted': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. This option can result in an F-score that is not between precision and recall. - 'samples': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification). sample_weight (`list` of `float`): Sample weights Defaults to None. Returns: f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better. Examples: Example 1-A simple binary example >>> f1_metric = datasets.load_metric(\"f1\") >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0]) >>> print(results) {'f1': 0.5} Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`. >>> f1_metric = datasets.load_metric(\"f1\") >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0) >>> print(round(results['f1'], 2)) 0.67 Example 3-The same simple binary example as in Example 1, but with `sample_weight` included. >>> f1_metric = datasets.load_metric(\"f1\") >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3]) >>> print(round(results['f1'], 2)) 0.35 Example 4-A multiclass example, with different values for the `average` input. >>> predictions = [0, 2, 1, 0, 0, 1] >>> references = [0, 1, 2, 0, 1, 2] >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"macro\") >>> print(round(results['f1'], 2)) 0.27 >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"micro\") >>> print(round(results['f1'], 2)) 0.33 >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"weighted\") >>> print(round(results['f1'], 2)) 0.27 >>> results = f1_metric.compute(predictions=predictions, references=references, average=None) >>> print(results) {'f1': array([0.8, 0. , 0. ])} """ __lowerCamelCase : str = """ @article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011} } """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class A__ ( datasets.Metric ): def __UpperCamelCase( self ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Sequence(datasets.Value("int32" ) ), "references": datasets.Sequence(datasets.Value("int32" ) ), } if self.config_name == "multilabel" else { "predictions": datasets.Value("int32" ), "references": datasets.Value("int32" ), } ) , reference_urls=["https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html"] , ) def __UpperCamelCase( self , A_ , A_ , A_=None , A_=1 , A_="binary" , A_=None ): '''simple docstring''' UpperCamelCase : List[str] = fa_score( A_ , A_ , labels=A_ , pos_label=A_ , average=A_ , sample_weight=A_ ) return {"f1": float(A_ ) if score.size == 1 else score}
52
0
"""simple docstring""" from collections import defaultdict from math import gcd def A__ ( UpperCamelCase = 1_500_000 ): A = defaultdict(_lowerCAmelCase ) A = 2 while 2 * euclid_m * (euclid_m + 1) <= limit: for euclid_n in range((euclid_m % 2) + 1 , _lowerCAmelCase , 2 ): if gcd(_lowerCAmelCase , _lowerCAmelCase ) > 1: continue A = 2 * euclid_m * (euclid_m + euclid_n) for perimeter in range(_lowerCAmelCase , limit + 1 , _lowerCAmelCase ): frequencies[perimeter] += 1 euclid_m += 1 return sum(1 for frequency in frequencies.values() if frequency == 1 ) if __name__ == "__main__": print(F"""{solution() = }""")
292
import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import XLMRobertaTokenizerFast from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class A__ ( __snake_case , unittest.TestCase ): _UpperCAmelCase :List[str] = KandinskyInpaintPipeline _UpperCAmelCase :List[str] = ['prompt', 'image_embeds', 'negative_image_embeds', 'image', 'mask_image'] _UpperCAmelCase :Dict = [ 'prompt', 'negative_prompt', 'image_embeds', 'negative_image_embeds', 'image', 'mask_image', ] _UpperCAmelCase :Optional[int] = [ 'generator', 'height', 'width', 'latents', 'guidance_scale', 'negative_prompt', 'num_inference_steps', 'return_dict', 'guidance_scale', 'num_images_per_prompt', 'output_type', 'return_dict', ] _UpperCAmelCase :int = False @property def __UpperCamelCase( self ): '''simple docstring''' return 32 @property def __UpperCamelCase( self ): '''simple docstring''' return 32 @property def __UpperCamelCase( self ): '''simple docstring''' return self.time_input_dim @property def __UpperCamelCase( self ): '''simple docstring''' return self.time_input_dim * 4 @property def __UpperCamelCase( self ): '''simple docstring''' return 100 @property def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Any = XLMRobertaTokenizerFast.from_pretrained("YiYiXu/tiny-random-mclip-base" ) return tokenizer @property def __UpperCamelCase( self ): '''simple docstring''' torch.manual_seed(0 ) UpperCamelCase : Optional[int] = MCLIPConfig( numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , ) UpperCamelCase : Optional[int] = MultilingualCLIP(A_ ) UpperCamelCase : Union[str, Any] = text_encoder.eval() return text_encoder @property def __UpperCamelCase( self ): '''simple docstring''' torch.manual_seed(0 ) UpperCamelCase : Optional[int] = { "in_channels": 9, # Out channels is double in channels because predicts mean and variance "out_channels": 8, "addition_embed_type": "text_image", "down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"), "up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"), "mid_block_type": "UNetMidBlock2DSimpleCrossAttn", "block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2), "layers_per_block": 1, "encoder_hid_dim": self.text_embedder_hidden_size, "encoder_hid_dim_type": "text_image_proj", "cross_attention_dim": self.cross_attention_dim, "attention_head_dim": 4, "resnet_time_scale_shift": "scale_shift", "class_embed_type": None, } UpperCamelCase : List[Any] = UNetaDConditionModel(**A_ ) return model @property def __UpperCamelCase( self ): '''simple docstring''' return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def __UpperCamelCase( self ): '''simple docstring''' torch.manual_seed(0 ) UpperCamelCase : List[str] = VQModel(**self.dummy_movq_kwargs ) return model def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Any = self.dummy_text_encoder UpperCamelCase : str = self.dummy_tokenizer UpperCamelCase : List[Any] = self.dummy_unet UpperCamelCase : Optional[Any] = self.dummy_movq UpperCamelCase : Union[str, Any] = DDIMScheduler( num_train_timesteps=1000 , beta_schedule="linear" , beta_start=0.0_00_85 , beta_end=0.0_12 , clip_sample=A_ , set_alpha_to_one=A_ , steps_offset=1 , prediction_type="epsilon" , thresholding=A_ , ) UpperCamelCase : Optional[Any] = { "text_encoder": text_encoder, "tokenizer": tokenizer, "unet": unet, "scheduler": scheduler, "movq": movq, } return components def __UpperCamelCase( self , A_ , A_=0 ): '''simple docstring''' UpperCamelCase : Dict = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(A_ ) ).to(A_ ) UpperCamelCase : Union[str, Any] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(A_ ) # create init_image UpperCamelCase : Union[str, Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(A_ ) ).to(A_ ) UpperCamelCase : str = image.cpu().permute(0 , 2 , 3 , 1 )[0] UpperCamelCase : List[Any] = Image.fromarray(np.uinta(A_ ) ).convert("RGB" ).resize((256, 256) ) # create mask UpperCamelCase : str = np.ones((64, 64) , dtype=np.floataa ) UpperCamelCase : str = 0 if str(A_ ).startswith("mps" ): UpperCamelCase : int = torch.manual_seed(A_ ) else: UpperCamelCase : Tuple = torch.Generator(device=A_ ).manual_seed(A_ ) UpperCamelCase : Union[str, Any] = { "prompt": "horse", "image": init_image, "mask_image": mask, "image_embeds": image_embeds, "negative_image_embeds": negative_image_embeds, "generator": generator, "height": 64, "width": 64, "num_inference_steps": 2, "guidance_scale": 4.0, "output_type": "np", } return inputs def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Optional[int] = "cpu" UpperCamelCase : Tuple = self.get_dummy_components() UpperCamelCase : str = self.pipeline_class(**A_ ) UpperCamelCase : Tuple = pipe.to(A_ ) pipe.set_progress_bar_config(disable=A_ ) UpperCamelCase : Any = pipe(**self.get_dummy_inputs(A_ ) ) UpperCamelCase : List[Any] = output.images UpperCamelCase : List[Any] = pipe( **self.get_dummy_inputs(A_ ) , return_dict=A_ , )[0] UpperCamelCase : List[Any] = image[0, -3:, -3:, -1] UpperCamelCase : Any = image_from_tuple[0, -3:, -3:, -1] print(F"""image.shape {image.shape}""" ) assert image.shape == (1, 64, 64, 3) UpperCamelCase : Union[str, Any] = np.array( [0.8_32_69_19, 0.73_79_04_67, 0.20_91_85_81, 0.9_30_96_12, 0.5_51_17_91, 0.43_71_33_28, 0.5_51_33_21, 0.49_92_29_34, 0.59_49_77_86] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 ), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}""" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 ), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}""" def __UpperCamelCase( self ): '''simple docstring''' super().test_inference_batch_single_identical(expected_max_diff=3e-3 ) @slow @require_torch_gpu class A__ ( unittest.TestCase ): def __UpperCamelCase( self ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Optional[Any] = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy" ) UpperCamelCase : List[str] = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" ) UpperCamelCase : Dict = np.ones((768, 768) , dtype=np.floataa ) UpperCamelCase : str = 0 UpperCamelCase : List[Any] = "a hat" UpperCamelCase : Tuple = KandinskyPriorPipeline.from_pretrained( "kandinsky-community/kandinsky-2-1-prior" , torch_dtype=torch.floataa ) pipe_prior.to(A_ ) UpperCamelCase : Union[str, Any] = KandinskyInpaintPipeline.from_pretrained( "kandinsky-community/kandinsky-2-1-inpaint" , torch_dtype=torch.floataa ) UpperCamelCase : Optional[Any] = pipeline.to(A_ ) pipeline.set_progress_bar_config(disable=A_ ) UpperCamelCase : List[Any] = torch.Generator(device="cpu" ).manual_seed(0 ) UpperCamelCase , UpperCamelCase : Optional[Any] = pipe_prior( A_ , generator=A_ , num_inference_steps=5 , negative_prompt="" , ).to_tuple() UpperCamelCase : Dict = pipeline( A_ , image=A_ , mask_image=A_ , image_embeds=A_ , negative_image_embeds=A_ , generator=A_ , num_inference_steps=100 , height=768 , width=768 , output_type="np" , ) UpperCamelCase : List[str] = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(A_ , A_ )
52
0
'''simple docstring''' import numpy as np def __UpperCAmelCase ( a_: Optional[Any], a_: str, a_: Optional[Any] = 1e-1_2, a_: Dict = 100, ): assert np.shape(_lowerCAmelCase )[0] == np.shape(_lowerCAmelCase )[1] # Ensure proper dimensionality. assert np.shape(_lowerCAmelCase )[0] == np.shape(_lowerCAmelCase )[0] # Ensure inputs are either both complex or both real assert np.iscomplexobj(_lowerCAmelCase ) == np.iscomplexobj(_lowerCAmelCase ) _UpperCAmelCase : Optional[int] = np.iscomplexobj(_lowerCAmelCase ) if is_complex: # Ensure complex input_matrix is Hermitian assert np.array_equal(_lowerCAmelCase, input_matrix.conj().T ) # Set convergence to False. Will define convergence when we exceed max_iterations # or when we have small changes from one iteration to next. _UpperCAmelCase : str = False _UpperCAmelCase : int = 0 _UpperCAmelCase : Optional[int] = 0 _UpperCAmelCase : Tuple = 1e1_2 while not convergence: # Multiple matrix by the vector. _UpperCAmelCase : Any = np.dot(_lowerCAmelCase, _lowerCAmelCase ) # Normalize the resulting output vector. _UpperCAmelCase : List[str] = w / np.linalg.norm(_lowerCAmelCase ) # Find rayleigh quotient # (faster than usual b/c we know vector is normalized already) _UpperCAmelCase : List[str] = vector.conj().T if is_complex else vector.T _UpperCAmelCase : List[Any] = np.dot(_lowerCAmelCase, np.dot(_lowerCAmelCase, _lowerCAmelCase ) ) # Check convergence. _UpperCAmelCase : List[Any] = np.abs(lambda_ - lambda_previous ) / lambda_ iterations += 1 if error <= error_tol or iterations >= max_iterations: _UpperCAmelCase : str = True _UpperCAmelCase : Union[str, Any] = lambda_ if is_complex: _UpperCAmelCase : Optional[Any] = np.real(lambda_ ) return lambda_, vector def __UpperCAmelCase ( ): _UpperCAmelCase : Any = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]] ) _UpperCAmelCase : str = np.array([41, 4, 20] ) _UpperCAmelCase : Optional[Any] = real_input_matrix.astype(np.complexaaa ) _UpperCAmelCase : Dict = np.triu(1J * complex_input_matrix, 1 ) complex_input_matrix += imag_matrix complex_input_matrix += -1 * imag_matrix.T _UpperCAmelCase : Optional[int] = np.array([41, 4, 20] ).astype(np.complexaaa ) for problem_type in ["real", "complex"]: if problem_type == "real": _UpperCAmelCase : int = real_input_matrix _UpperCAmelCase : Any = real_vector elif problem_type == "complex": _UpperCAmelCase : Union[str, Any] = complex_input_matrix _UpperCAmelCase : Tuple = complex_vector # Our implementation. _UpperCAmelCase : List[Any] = power_iteration(_lowerCAmelCase, _lowerCAmelCase ) # Numpy implementation. # Get eigenvalues and eigenvectors using built-in numpy # eigh (eigh used for symmetric or hermetian matrices). _UpperCAmelCase : Optional[int] = np.linalg.eigh(_lowerCAmelCase ) # Last eigenvalue is the maximum one. _UpperCAmelCase : Tuple = eigen_values[-1] # Last column in this matrix is eigenvector corresponding to largest eigenvalue. _UpperCAmelCase : List[Any] = eigen_vectors[:, -1] # Check our implementation and numpy gives close answers. assert np.abs(eigen_value - eigen_value_max ) <= 1e-6 # Take absolute values element wise of each eigenvector. # as they are only unique to a minus sign. assert np.linalg.norm(np.abs(_lowerCAmelCase ) - np.abs(_lowerCAmelCase ) ) <= 1e-6 if __name__ == "__main__": import doctest doctest.testmod() test_power_iteration()
145
class A__ : def __init__( self , A_ ): '''simple docstring''' UpperCamelCase : Union[str, Any] = set_counts UpperCamelCase : int = max(A_ ) UpperCamelCase : Optional[Any] = len(A_ ) UpperCamelCase : Union[str, Any] = [1] * num_sets UpperCamelCase : Union[str, Any] = list(range(A_ ) ) def __UpperCamelCase( self , A_ , A_ ): '''simple docstring''' UpperCamelCase : Any = self.get_parent(A_ ) UpperCamelCase : Optional[int] = self.get_parent(A_ ) if src_parent == dst_parent: return False if self.ranks[dst_parent] >= self.ranks[src_parent]: self.set_counts[dst_parent] += self.set_counts[src_parent] UpperCamelCase : int = 0 UpperCamelCase : Dict = dst_parent if self.ranks[dst_parent] == self.ranks[src_parent]: self.ranks[dst_parent] += 1 UpperCamelCase : Optional[int] = self.set_counts[dst_parent] else: self.set_counts[src_parent] += self.set_counts[dst_parent] UpperCamelCase : Any = 0 UpperCamelCase : Optional[int] = src_parent UpperCamelCase : int = self.set_counts[src_parent] UpperCamelCase : Any = max(self.max_set , A_ ) return True def __UpperCamelCase( self , A_ ): '''simple docstring''' if self.parents[disj_set] == disj_set: return disj_set UpperCamelCase : Optional[int] = self.get_parent(self.parents[disj_set] ) return self.parents[disj_set]
52
0
"""simple docstring""" # Copyright (c) 2021-, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #################################################################################################### # # Note: If when running this conversion script you're getting an exception: # ModuleNotFoundError: No module named 'megatron.model.enums' # you need to tell python where to find the clone of Megatron-LM, e.g.: # # cd /tmp # git clone https://github.com/NVIDIA/Megatron-LM # PYTHONPATH=/tmp/Megatron-LM python src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py ... # # if you already have it cloned elsewhere, simply adjust the path to the existing path # # If the training was done using a Megatron-LM fork, e.g., # https://github.com/microsoft/Megatron-DeepSpeed/ then chances are that you need to have that one # in your path, i.e., /path/to/Megatron-DeepSpeed/ # import argparse import os import re import zipfile import torch from transformers import AutoTokenizer, GPTaConfig def lowerCAmelCase__ ( _UpperCamelCase : Optional[int] , _UpperCamelCase : str , _UpperCamelCase : Any=0 ) -> Any: """simple docstring""" if name is None: snake_case = None else: snake_case = "." * max(0 , spaces - 2 ) + "# {:" + str(5_0 - spaces ) + "s}" snake_case = fmt.format(_lowerCAmelCase ) # Print and recurse (if needed). if isinstance(_lowerCAmelCase , _lowerCAmelCase ): if msg is not None: print(_lowerCAmelCase ) for k in val.keys(): recursive_print(_lowerCAmelCase , val[k] , spaces + 2 ) elif isinstance(_lowerCAmelCase , torch.Tensor ): print(_lowerCAmelCase , ':' , val.size() ) else: print(_lowerCAmelCase , ':' , _lowerCAmelCase ) def lowerCAmelCase__ ( _UpperCamelCase : Tuple , _UpperCamelCase : List[Any] , _UpperCamelCase : str , _UpperCamelCase : Any , _UpperCamelCase : str ) -> Dict: """simple docstring""" snake_case = param.size() if checkpoint_version == 1.0: # version 1.0 stores [num_heads * hidden_size * num_splits, :] snake_case = (num_heads, hidden_size, num_splits) + input_shape[1:] snake_case = param.view(*_lowerCAmelCase ) snake_case = param.transpose(0 , 2 ) snake_case = param.transpose(1 , 2 ).contiguous() elif checkpoint_version >= 2.0: # other versions store [num_heads * num_splits * hidden_size, :] snake_case = (num_heads, num_splits, hidden_size) + input_shape[1:] snake_case = param.view(*_lowerCAmelCase ) snake_case = param.transpose(0 , 1 ).contiguous() snake_case = param.view(*_lowerCAmelCase ) return param def lowerCAmelCase__ ( _UpperCamelCase : int , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : List[str] ) -> str: """simple docstring""" snake_case = {} # old versions did not store training args snake_case = input_state_dict.get('args' , _lowerCAmelCase ) if ds_args is not None: # do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint # from pprint import pprint # pprint(vars(ds_args)) snake_case = ds_args.padded_vocab_size snake_case = ds_args.max_position_embeddings snake_case = ds_args.hidden_size snake_case = ds_args.num_layers snake_case = ds_args.num_attention_heads snake_case = ds_args.ffn_hidden_size # pprint(config) # The number of heads. snake_case = config.n_head # The hidden_size per head. snake_case = config.n_embd // config.n_head # Megatron-LM checkpoint version if "checkpoint_version" in input_state_dict.keys(): snake_case = input_state_dict["checkpoint_version"] else: snake_case = 0.0 # The model. snake_case = input_state_dict["model"] # The language model. snake_case = model["language_model"] # The embeddings. snake_case = lm["embedding"] # The word embeddings. snake_case = embeddings["word_embeddings"]["weight"] # Truncate the embedding table to vocab_size rows. snake_case = word_embeddings[: config.vocab_size, :] snake_case = word_embeddings # The position embeddings. snake_case = embeddings["position_embeddings"]["weight"] # Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size] snake_case = pos_embeddings.size(0 ) if n_positions != config.n_positions: raise ValueError( f"""pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don't match""" ) # Store the position embeddings. snake_case = pos_embeddings # The transformer. snake_case = lm["transformer"] if "transformer" in lm.keys() else lm["encoder"] # The regex to extract layer names. snake_case = re.compile(r'layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)' ) # The simple map of names for "automated" rules. snake_case = { "attention.dense": ".attn.c_proj.", "self_attention.dense": ".attn.c_proj.", "mlp.dense_h_to_4h": ".mlp.c_fc.", "mlp.dense_4h_to_h": ".mlp.c_proj.", } # Extract the layers. for key, val in transformer.items(): # Match the name. snake_case = layer_re.match(_lowerCAmelCase ) # Stop if that's not a layer if m is None: break # The index of the layer. snake_case = int(m.group(1 ) ) # The name of the operation. snake_case = m.group(2 ) # Is it a weight or a bias? snake_case = m.group(3 ) # The name of the layer. snake_case = f"""transformer.h.{layer_idx}""" # For layernorm(s), simply store the layer norm. if op_name.endswith('layernorm' ): snake_case = "ln_1" if op_name.startswith('input' ) else "ln_2" snake_case = val # Transpose the QKV matrix. elif ( op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value" ) and weight_or_bias == "weight": # Insert a tensor of 1x1xDxD bias. snake_case = torch.tril(torch.ones((n_positions, n_positions) , dtype=torch.floataa ) ).view( 1 , 1 , _lowerCAmelCase , _lowerCAmelCase ) snake_case = causal_mask # Insert a "dummy" tensor for masked_bias. snake_case = torch.tensor(-1e4 , dtype=torch.floataa ) snake_case = masked_bias snake_case = fix_query_key_value_ordering(_lowerCAmelCase , _lowerCAmelCase , 3 , _lowerCAmelCase , _lowerCAmelCase ) # Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D. snake_case = out_val.transpose(0 , 1 ).contiguous() # Store. snake_case = out_val # Transpose the bias. elif ( op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value" ) and weight_or_bias == "bias": snake_case = fix_query_key_value_ordering(_lowerCAmelCase , _lowerCAmelCase , 3 , _lowerCAmelCase , _lowerCAmelCase ) # Store. No change of shape. snake_case = out_val # Transpose the weights. elif weight_or_bias == "weight": snake_case = megatron_to_transformers[op_name] snake_case = val.transpose(0 , 1 ) # Copy the bias. elif weight_or_bias == "bias": snake_case = megatron_to_transformers[op_name] snake_case = val # DEBUG. assert config.n_layer == layer_idx + 1 # The final layernorm. snake_case = transformer["final_layernorm.weight"] snake_case = transformer["final_layernorm.bias"] # For LM head, transformers' wants the matrix to weight embeddings. snake_case = word_embeddings # It should be done! return output_state_dict def lowerCAmelCase__ ( ) -> List[str]: """simple docstring""" snake_case = argparse.ArgumentParser() parser.add_argument('--print-checkpoint-structure' , action='store_true' ) parser.add_argument( 'path_to_checkpoint' , type=_lowerCAmelCase , help='Path to the checkpoint file (.zip archive or direct .pt file)' , ) parser.add_argument( '--config_file' , default='' , type=_lowerCAmelCase , help='An optional config json file describing the pre-trained model.' , ) snake_case = parser.parse_args() # Extract the basename. snake_case = os.path.dirname(args.path_to_checkpoint ) # Load the model. # the .zip is very optional, let's keep it for backward compatibility print(f"""Extracting PyTorch state dictionary from {args.path_to_checkpoint}""" ) if args.path_to_checkpoint.endswith('.zip' ): with zipfile.ZipFile(args.path_to_checkpoint , 'r' ) as checkpoint: with checkpoint.open('release/mp_rank_00/model_optim_rng.pt' ) as pytorch_dict: snake_case = torch.load(_lowerCAmelCase , map_location='cpu' ) else: snake_case = torch.load(args.path_to_checkpoint , map_location='cpu' ) snake_case = input_state_dict.get('args' , _lowerCAmelCase ) # Read the config, or default to the model released by NVIDIA. if args.config_file == "": if ds_args is not None: if ds_args.bias_gelu_fusion: snake_case = "gelu_fast" elif ds_args.openai_gelu: snake_case = "gelu_new" else: snake_case = "gelu" else: # in the very early days this used to be "gelu_new" snake_case = "gelu_new" # Spell out all parameters in case the defaults change. snake_case = GPTaConfig( vocab_size=5_0_2_5_7 , n_positions=1_0_2_4 , n_embd=1_0_2_4 , n_layer=2_4 , n_head=1_6 , n_inner=4_0_9_6 , activation_function=_lowerCAmelCase , resid_pdrop=0.1 , embd_pdrop=0.1 , attn_pdrop=0.1 , layer_norm_epsilon=1e-5 , initializer_range=0.02 , summary_type='cls_index' , summary_use_proj=_lowerCAmelCase , summary_activation=_lowerCAmelCase , summary_proj_to_labels=_lowerCAmelCase , summary_first_dropout=0.1 , scale_attn_weights=_lowerCAmelCase , use_cache=_lowerCAmelCase , bos_token_id=5_0_2_5_6 , eos_token_id=5_0_2_5_6 , ) else: snake_case = GPTaConfig.from_json_file(args.config_file ) snake_case = ["GPT2LMHeadModel"] # Convert. print('Converting' ) snake_case = convert_megatron_checkpoint(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) # Print the structure of converted state dict. if args.print_checkpoint_structure: recursive_print(_lowerCAmelCase , _lowerCAmelCase ) # Add tokenizer class info to config # see https://github.com/huggingface/transformers/issues/13906) if ds_args is not None: snake_case = ds_args.tokenizer_type if tokenizer_type == "GPT2BPETokenizer": snake_case = "gpt2" elif tokenizer_type == "PretrainedFromHF": snake_case = ds_args.tokenizer_name_or_path else: raise ValueError(f"""Unrecognized tokenizer_type {tokenizer_type}""" ) else: snake_case = "gpt2" snake_case = AutoTokenizer.from_pretrained(_lowerCAmelCase ) snake_case = type(_lowerCAmelCase ).__name__ snake_case = tokenizer_class # Store the config to file. print('Saving config' ) config.save_pretrained(_lowerCAmelCase ) # Save tokenizer based on args print(f"""Adding {tokenizer_class} tokenizer files""" ) tokenizer.save_pretrained(_lowerCAmelCase ) # Store the state_dict to file. snake_case = os.path.join(_lowerCAmelCase , 'pytorch_model.bin' ) print(f"""Saving checkpoint to \"{output_checkpoint_file}\"""" ) torch.save(_lowerCAmelCase , _lowerCAmelCase ) #################################################################################################### if __name__ == "__main__": main() ####################################################################################################
150
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __lowerCamelCase : Any = { """configuration_electra""": ["""ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ElectraConfig""", """ElectraOnnxConfig"""], """tokenization_electra""": ["""ElectraTokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : Dict = ["""ElectraTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : Tuple = [ """ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST""", """ElectraForCausalLM""", """ElectraForMaskedLM""", """ElectraForMultipleChoice""", """ElectraForPreTraining""", """ElectraForQuestionAnswering""", """ElectraForSequenceClassification""", """ElectraForTokenClassification""", """ElectraModel""", """ElectraPreTrainedModel""", """load_tf_weights_in_electra""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : List[Any] = [ """TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFElectraForMaskedLM""", """TFElectraForMultipleChoice""", """TFElectraForPreTraining""", """TFElectraForQuestionAnswering""", """TFElectraForSequenceClassification""", """TFElectraForTokenClassification""", """TFElectraModel""", """TFElectraPreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : List[str] = [ """FlaxElectraForCausalLM""", """FlaxElectraForMaskedLM""", """FlaxElectraForMultipleChoice""", """FlaxElectraForPreTraining""", """FlaxElectraForQuestionAnswering""", """FlaxElectraForSequenceClassification""", """FlaxElectraForTokenClassification""", """FlaxElectraModel""", """FlaxElectraPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig from .tokenization_electra import ElectraTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_electra_fast import ElectraTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_electra import ( ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST, ElectraForCausalLM, ElectraForMaskedLM, ElectraForMultipleChoice, ElectraForPreTraining, ElectraForQuestionAnswering, ElectraForSequenceClassification, ElectraForTokenClassification, ElectraModel, ElectraPreTrainedModel, load_tf_weights_in_electra, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_electra import ( TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST, TFElectraForMaskedLM, TFElectraForMultipleChoice, TFElectraForPreTraining, TFElectraForQuestionAnswering, TFElectraForSequenceClassification, TFElectraForTokenClassification, TFElectraModel, TFElectraPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_electra import ( FlaxElectraForCausalLM, FlaxElectraForMaskedLM, FlaxElectraForMultipleChoice, FlaxElectraForPreTraining, FlaxElectraForQuestionAnswering, FlaxElectraForSequenceClassification, FlaxElectraForTokenClassification, FlaxElectraModel, FlaxElectraPreTrainedModel, ) else: import sys __lowerCamelCase : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
52
0
'''simple docstring''' import importlib.util import os import platform from argparse import ArgumentParser import huggingface_hub from .. import __version__ as version from ..utils import ( is_accelerate_available, is_flax_available, is_safetensors_available, is_tf_available, is_torch_available, ) from . import BaseTransformersCLICommand def a__ ( lowercase : Optional[int] ) -> Optional[Any]: """simple docstring""" return EnvironmentCommand() def a__ ( lowercase : List[Any] ) -> List[str]: """simple docstring""" return EnvironmentCommand(args.accelerate_config_file ) class __lowerCAmelCase ( __snake_case ): """simple docstring""" @staticmethod def snake_case__ ( lowerCAmelCase__ : Union[str, Any] ) -> List[Any]: '''simple docstring''' _UpperCamelCase = parser.add_parser('''env''' ) download_parser.set_defaults(func=A_ ) download_parser.add_argument( '''--accelerate-config_file''' , default=A_ , help='''The accelerate config file to use for the default values in the launching script.''' , ) download_parser.set_defaults(func=A_ ) def __init__( self : List[Any] , lowerCAmelCase__ : List[Any] , *lowerCAmelCase__ : int ) -> Optional[Any]: '''simple docstring''' _UpperCamelCase = accelerate_config_file def snake_case__ ( self : Optional[Any] ) -> List[Any]: '''simple docstring''' _UpperCamelCase = "not installed" if is_safetensors_available(): import safetensors _UpperCamelCase = safetensors.__version__ elif importlib.util.find_spec('''safetensors''' ) is not None: import safetensors _UpperCamelCase = f"""{safetensors.__version__} but is ignored because of PyTorch version too old.""" _UpperCamelCase = "not installed" _UpperCamelCase = "not found" if is_accelerate_available(): import accelerate from accelerate.commands.config import default_config_file, load_config_from_file _UpperCamelCase = accelerate.__version__ # Get the default from the config file. if self._accelerate_config_file is not None or os.path.isfile(A_ ): _UpperCamelCase = load_config_from_file(self._accelerate_config_file ).to_dict() _UpperCamelCase = ( "\n".join([f"""\t- {prop}: {val}""" for prop, val in accelerate_config.items()] ) if isinstance(A_ , A_ ) else f"""\t{accelerate_config}""" ) _UpperCamelCase = "not installed" _UpperCamelCase = "NA" if is_torch_available(): import torch _UpperCamelCase = torch.__version__ _UpperCamelCase = torch.cuda.is_available() _UpperCamelCase = "not installed" _UpperCamelCase = "NA" if is_tf_available(): import tensorflow as tf _UpperCamelCase = tf.__version__ try: # deprecated in v2.1 _UpperCamelCase = tf.test.is_gpu_available() except AttributeError: # returns list of devices, convert to bool _UpperCamelCase = bool(tf.config.list_physical_devices('''GPU''' ) ) _UpperCamelCase = "not installed" _UpperCamelCase = "not installed" _UpperCamelCase = "not installed" _UpperCamelCase = "NA" if is_flax_available(): import flax import jax import jaxlib _UpperCamelCase = flax.__version__ _UpperCamelCase = jax.__version__ _UpperCamelCase = jaxlib.__version__ _UpperCamelCase = jax.lib.xla_bridge.get_backend().platform _UpperCamelCase = { "`transformers` version": version, "Platform": platform.platform(), "Python version": platform.python_version(), "Huggingface_hub version": huggingface_hub.__version__, "Safetensors version": f"""{safetensors_version}""", "Accelerate version": f"""{accelerate_version}""", "Accelerate config": f"""{accelerate_config_str}""", "PyTorch version (GPU?)": f"""{pt_version} ({pt_cuda_available})""", "Tensorflow version (GPU?)": f"""{tf_version} ({tf_cuda_available})""", "Flax version (CPU?/GPU?/TPU?)": f"""{flax_version} ({jax_backend})""", "Jax version": f"""{jax_version}""", "JaxLib version": f"""{jaxlib_version}""", "Using GPU in script?": "<fill in>", "Using distributed or parallel set-up in script?": "<fill in>", } print('''\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n''' ) print(self.format_dict(A_ ) ) return info @staticmethod def snake_case__ ( lowerCAmelCase__ : str ) -> str: '''simple docstring''' return "\n".join([f"""- {prop}: {val}""" for prop, val in d.items()] ) + "\n"
324
from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices __lowerCamelCase : str = logging.get_logger(__name__) __lowerCamelCase : str = { """facebook/convnextv2-tiny-1k-224""": """https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json""", } class A__ ( __snake_case , __snake_case ): _UpperCAmelCase :Optional[int] = 'convnextv2' def __init__( self , A_=3 , A_=4 , A_=4 , A_=None , A_=None , A_="gelu" , A_=0.02 , A_=1e-12 , A_=0.0 , A_=224 , A_=None , A_=None , **A_ , ): '''simple docstring''' super().__init__(**A_ ) UpperCamelCase : Dict = num_channels UpperCamelCase : Union[str, Any] = patch_size UpperCamelCase : Union[str, Any] = num_stages UpperCamelCase : List[Any] = [96, 192, 384, 768] if hidden_sizes is None else hidden_sizes UpperCamelCase : List[str] = [3, 3, 9, 3] if depths is None else depths UpperCamelCase : Dict = hidden_act UpperCamelCase : Union[str, Any] = initializer_range UpperCamelCase : Tuple = layer_norm_eps UpperCamelCase : str = drop_path_rate UpperCamelCase : List[str] = image_size UpperCamelCase : List[str] = ["stem"] + [F"""stage{idx}""" for idx in range(1 , len(self.depths ) + 1 )] UpperCamelCase , UpperCamelCase : str = get_aligned_output_features_output_indices( out_features=A_ , out_indices=A_ , stage_names=self.stage_names )
52
0
from ...configuration_utils import PretrainedConfig from ...utils import logging a__: Optional[int] = logging.get_logger(__name__) a__: str = { """google/fnet-base""": """https://huggingface.co/google/fnet-base/resolve/main/config.json""", """google/fnet-large""": """https://huggingface.co/google/fnet-large/resolve/main/config.json""" # See all FNet models at https://huggingface.co/models?filter=fnet } class SCREAMING_SNAKE_CASE__ ( __snake_case ): __SCREAMING_SNAKE_CASE = 'fnet' def __init__( self,__lowerCamelCase=3_2000,__lowerCamelCase=768,__lowerCamelCase=12,__lowerCamelCase=3072,__lowerCamelCase="gelu_new",__lowerCamelCase=0.1,__lowerCamelCase=512,__lowerCamelCase=4,__lowerCamelCase=0.02,__lowerCamelCase=1E-12,__lowerCamelCase=False,__lowerCamelCase=512,__lowerCamelCase=3,__lowerCamelCase=1,__lowerCamelCase=2,**__lowerCamelCase,): super().__init__(pad_token_id=A_,bos_token_id=A_,eos_token_id=A_,**A_ ) A__ = vocab_size A__ = max_position_embeddings A__ = hidden_size A__ = num_hidden_layers A__ = intermediate_size A__ = hidden_act A__ = hidden_dropout_prob A__ = initializer_range A__ = type_vocab_size A__ = layer_norm_eps A__ = use_tpu_fourier_optimizations A__ = tpu_short_seq_length
193
import pytest import requests from datasets.utils.file_utils import http_head from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline @pytest.mark.integration def A_ ( ) -> List[Any]: with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ): with pytest.raises(_lowerCAmelCase ): requests.request("GET" , "https://huggingface.co" ) with pytest.raises(requests.exceptions.ConnectTimeout ): requests.request("GET" , "https://huggingface.co" , timeout=1.0 ) @pytest.mark.integration def A_ ( ) -> Tuple: with offline(OfflineSimulationMode.CONNECTION_FAILS ): with pytest.raises(requests.exceptions.ConnectionError ): requests.request("GET" , "https://huggingface.co" ) def A_ ( ) -> Optional[int]: with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ): with pytest.raises(_lowerCAmelCase ): http_head("https://huggingface.co" )
52
0
import inspect import re from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_config_docstrings.py A : str = """src/transformers""" # This is to make sure the transformers module imported is the one in the repo. A : Tuple = direct_transformers_import(PATH_TO_TRANSFORMERS) A : List[str] = transformers.models.auto.configuration_auto.CONFIG_MAPPING # Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`. # For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)` A : Optional[Any] = re.compile(r'\[(.+?)\]\((https://huggingface\.co/.+?)\)') A : List[str] = { """DecisionTransformerConfig""", """EncoderDecoderConfig""", """MusicgenConfig""", """RagConfig""", """SpeechEncoderDecoderConfig""", """TimmBackboneConfig""", """VisionEncoderDecoderConfig""", """VisionTextDualEncoderConfig""", """LlamaConfig""", } def UpperCamelCase ( __magic_name__ : Optional[Any] ) -> List[str]: """simple docstring""" lowercase__ = None # source code of `config_class` lowercase__ = inspect.getsource(_lowerCAmelCase ) lowercase__ = _re_checkpoint.findall(_lowerCAmelCase ) # Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link. # For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')` for ckpt_name, ckpt_link in checkpoints: # allow the link to end with `/` if ckpt_link.endswith("""/""" ): lowercase__ = ckpt_link[:-1] # verify the checkpoint name corresponds to the checkpoint link lowercase__ = f'''https://huggingface.co/{ckpt_name}''' if ckpt_link == ckpt_link_from_name: lowercase__ = ckpt_name break return checkpoint def UpperCamelCase ( ) -> List[str]: """simple docstring""" lowercase__ = [] for config_class in list(CONFIG_MAPPING.values() ): # Skip deprecated models if "models.deprecated" in config_class.__module__: continue lowercase__ = get_checkpoint_from_config_class(_lowerCAmelCase ) lowercase__ = config_class.__name__ if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK: configs_without_checkpoint.append(_lowerCAmelCase ) if len(_lowerCAmelCase ) > 0: lowercase__ = "\n".join(sorted(_lowerCAmelCase ) ) raise ValueError(f'''The following configurations don\'t contain any valid checkpoint:\n{message}''' ) if __name__ == "__main__": check_config_docstrings_have_checkpoints()
305
from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __lowerCamelCase : Optional[int] = {"""configuration_mmbt""": ["""MMBTConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : List[Any] = ["""MMBTForClassification""", """MMBTModel""", """ModalEmbeddings"""] if TYPE_CHECKING: from .configuration_mmbt import MMBTConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings else: import sys __lowerCamelCase : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
52
0
def a ( lowerCamelCase_ ): '''simple docstring''' stooge(_lowerCAmelCase , 0 , len(_lowerCAmelCase ) - 1 ) return arr def a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ): '''simple docstring''' if i >= h: return # If first element is smaller than the last then swap them if arr[i] > arr[h]: lowercase__ = arr[h], arr[i] # If there are more than 2 elements in the array if h - i + 1 > 2: lowercase__ = (int)((h - i + 1) / 3 ) # Recursively sort first 2/3 elements stooge(_lowerCAmelCase , _lowerCAmelCase , (h - t) ) # Recursively sort last 2/3 elements stooge(_lowerCAmelCase , i + t , (_lowerCAmelCase) ) # Recursively sort first 2/3 elements stooge(_lowerCAmelCase , _lowerCAmelCase , (h - t) ) if __name__ == "__main__": A__ : Any = input('Enter numbers separated by a comma:\n').strip() A__ : Optional[Any] = [int(item) for item in user_input.split(',')] print(stooge_sort(unsorted))
207
import re import string from collections import Counter import sacrebleu import sacremoses from packaging import version import datasets __lowerCamelCase : List[Any] = """ @inproceedings{xu-etal-2016-optimizing, title = {Optimizing Statistical Machine Translation for Text Simplification}, authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris}, journal = {Transactions of the Association for Computational Linguistics}, volume = {4}, year={2016}, url = {https://www.aclweb.org/anthology/Q16-1029}, pages = {401--415 }, @inproceedings{post-2018-call, title = \"A Call for Clarity in Reporting {BLEU} Scores\", author = \"Post, Matt\", booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\", month = oct, year = \"2018\", address = \"Belgium, Brussels\", publisher = \"Association for Computational Linguistics\", url = \"https://www.aclweb.org/anthology/W18-6319\", pages = \"186--191\", } """ __lowerCamelCase : Optional[int] = """\ WIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU It can be used to evaluate the quality of machine-generated texts. """ __lowerCamelCase : str = """ Calculates sari score (between 0 and 100) given a list of source and predicted sentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score. Args: sources: list of source sentences where each sentence should be a string. predictions: list of predicted sentences where each sentence should be a string. references: list of lists of reference sentences where each sentence should be a string. Returns: sari: sari score sacrebleu: sacrebleu score exact: exact score Examples: >>> sources=[\"About 95 species are currently accepted .\"] >>> predictions=[\"About 95 you now get in .\"] >>> references=[[\"About 95 species are currently known .\"]] >>> wiki_split = datasets.load_metric(\"wiki_split\") >>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references) >>> print(results) {'sari': 21.805555555555557, 'sacrebleu': 14.535768424205482, 'exact': 0.0} """ def A_ ( _lowerCAmelCase ) -> str: def remove_articles(_lowerCAmelCase ): UpperCamelCase : Tuple = re.compile(r"\b(a|an|the)\b" , re.UNICODE ) return re.sub(_lowerCAmelCase , " " , _lowerCAmelCase ) def white_space_fix(_lowerCAmelCase ): return " ".join(text.split() ) def remove_punc(_lowerCAmelCase ): UpperCamelCase : int = set(string.punctuation ) return "".join(ch for ch in text if ch not in exclude ) def lower(_lowerCAmelCase ): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(_lowerCAmelCase ) ) ) ) def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> Any: return int(normalize_answer(_lowerCAmelCase ) == normalize_answer(_lowerCAmelCase ) ) def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> str: UpperCamelCase : Tuple = [any(compute_exact(_lowerCAmelCase , _lowerCAmelCase ) for ref in refs ) for pred, refs in zip(_lowerCAmelCase , _lowerCAmelCase )] return (sum(_lowerCAmelCase ) / len(_lowerCAmelCase )) * 100 def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Union[str, Any]: UpperCamelCase : Union[str, Any] = [rgram for rgrams in rgramslist for rgram in rgrams] UpperCamelCase : Union[str, Any] = Counter(_lowerCAmelCase ) UpperCamelCase : Optional[int] = Counter(_lowerCAmelCase ) UpperCamelCase : List[Any] = Counter() for sgram, scount in sgramcounter.items(): UpperCamelCase : Tuple = scount * numref UpperCamelCase : Union[str, Any] = Counter(_lowerCAmelCase ) UpperCamelCase : Tuple = Counter() for cgram, ccount in cgramcounter.items(): UpperCamelCase : Dict = ccount * numref # KEEP UpperCamelCase : List[Any] = sgramcounter_rep & cgramcounter_rep UpperCamelCase : Union[str, Any] = keepgramcounter_rep & rgramcounter UpperCamelCase : Dict = sgramcounter_rep & rgramcounter UpperCamelCase : Optional[int] = 0 UpperCamelCase : Tuple = 0 for keepgram in keepgramcountergood_rep: keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram] # Fix an alleged bug [2] in the keep score computation. # keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram] keeptmpscorea += keepgramcountergood_rep[keepgram] # Define 0/0=1 instead of 0 to give higher scores for predictions that match # a target exactly. UpperCamelCase : Any = 1 UpperCamelCase : Any = 1 if len(_lowerCAmelCase ) > 0: UpperCamelCase : Dict = keeptmpscorea / len(_lowerCAmelCase ) if len(_lowerCAmelCase ) > 0: # Fix an alleged bug [2] in the keep score computation. # keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep) UpperCamelCase : Union[str, Any] = keeptmpscorea / sum(keepgramcounterall_rep.values() ) UpperCamelCase : Any = 0 if keepscore_precision > 0 or keepscore_recall > 0: UpperCamelCase : List[str] = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall) # DELETION UpperCamelCase : Any = sgramcounter_rep - cgramcounter_rep UpperCamelCase : str = delgramcounter_rep - rgramcounter UpperCamelCase : Any = sgramcounter_rep - rgramcounter UpperCamelCase : Optional[int] = 0 UpperCamelCase : Union[str, Any] = 0 for delgram in delgramcountergood_rep: deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram] deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram] # Define 0/0=1 instead of 0 to give higher scores for predictions that match # a target exactly. UpperCamelCase : Dict = 1 if len(_lowerCAmelCase ) > 0: UpperCamelCase : str = deltmpscorea / len(_lowerCAmelCase ) # ADDITION UpperCamelCase : List[str] = set(_lowerCAmelCase ) - set(_lowerCAmelCase ) UpperCamelCase : List[str] = set(_lowerCAmelCase ) & set(_lowerCAmelCase ) UpperCamelCase : Dict = set(_lowerCAmelCase ) - set(_lowerCAmelCase ) UpperCamelCase : Optional[Any] = 0 for addgram in addgramcountergood: addtmpscore += 1 # Define 0/0=1 instead of 0 to give higher scores for predictions that match # a target exactly. UpperCamelCase : Tuple = 1 UpperCamelCase : Tuple = 1 if len(_lowerCAmelCase ) > 0: UpperCamelCase : Dict = addtmpscore / len(_lowerCAmelCase ) if len(_lowerCAmelCase ) > 0: UpperCamelCase : Tuple = addtmpscore / len(_lowerCAmelCase ) UpperCamelCase : List[str] = 0 if addscore_precision > 0 or addscore_recall > 0: UpperCamelCase : List[str] = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall) return (keepscore, delscore_precision, addscore) def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> List[str]: UpperCamelCase : int = len(_lowerCAmelCase ) UpperCamelCase : Optional[Any] = ssent.split(" " ) UpperCamelCase : Dict = csent.split(" " ) UpperCamelCase : str = [] UpperCamelCase : Any = [] UpperCamelCase : Any = [] UpperCamelCase : Union[str, Any] = [] UpperCamelCase : str = [] UpperCamelCase : str = [] UpperCamelCase : Dict = [] UpperCamelCase : int = [] UpperCamelCase : Optional[Any] = [] UpperCamelCase : Tuple = [] for rsent in rsents: UpperCamelCase : List[Any] = rsent.split(" " ) UpperCamelCase : List[str] = [] UpperCamelCase : int = [] UpperCamelCase : Tuple = [] ragramslist.append(_lowerCAmelCase ) for i in range(0 , len(_lowerCAmelCase ) - 1 ): if i < len(_lowerCAmelCase ) - 1: UpperCamelCase : Dict = ragrams[i] + " " + ragrams[i + 1] ragrams.append(_lowerCAmelCase ) if i < len(_lowerCAmelCase ) - 2: UpperCamelCase : Dict = ragrams[i] + " " + ragrams[i + 1] + " " + ragrams[i + 2] ragrams.append(_lowerCAmelCase ) if i < len(_lowerCAmelCase ) - 3: UpperCamelCase : List[Any] = ragrams[i] + " " + ragrams[i + 1] + " " + ragrams[i + 2] + " " + ragrams[i + 3] ragrams.append(_lowerCAmelCase ) ragramslist.append(_lowerCAmelCase ) ragramslist.append(_lowerCAmelCase ) ragramslist.append(_lowerCAmelCase ) for i in range(0 , len(_lowerCAmelCase ) - 1 ): if i < len(_lowerCAmelCase ) - 1: UpperCamelCase : Union[str, Any] = sagrams[i] + " " + sagrams[i + 1] sagrams.append(_lowerCAmelCase ) if i < len(_lowerCAmelCase ) - 2: UpperCamelCase : List[str] = sagrams[i] + " " + sagrams[i + 1] + " " + sagrams[i + 2] sagrams.append(_lowerCAmelCase ) if i < len(_lowerCAmelCase ) - 3: UpperCamelCase : Optional[int] = sagrams[i] + " " + sagrams[i + 1] + " " + sagrams[i + 2] + " " + sagrams[i + 3] sagrams.append(_lowerCAmelCase ) for i in range(0 , len(_lowerCAmelCase ) - 1 ): if i < len(_lowerCAmelCase ) - 1: UpperCamelCase : Union[str, Any] = cagrams[i] + " " + cagrams[i + 1] cagrams.append(_lowerCAmelCase ) if i < len(_lowerCAmelCase ) - 2: UpperCamelCase : Optional[Any] = cagrams[i] + " " + cagrams[i + 1] + " " + cagrams[i + 2] cagrams.append(_lowerCAmelCase ) if i < len(_lowerCAmelCase ) - 3: UpperCamelCase : Union[str, Any] = cagrams[i] + " " + cagrams[i + 1] + " " + cagrams[i + 2] + " " + cagrams[i + 3] cagrams.append(_lowerCAmelCase ) ((UpperCamelCase) , (UpperCamelCase) , (UpperCamelCase)) : Optional[int] = SARIngram(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) ((UpperCamelCase) , (UpperCamelCase) , (UpperCamelCase)) : Optional[Any] = SARIngram(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) ((UpperCamelCase) , (UpperCamelCase) , (UpperCamelCase)) : str = SARIngram(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) ((UpperCamelCase) , (UpperCamelCase) , (UpperCamelCase)) : Optional[int] = SARIngram(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) UpperCamelCase : Tuple = sum([keepascore, keepascore, keepascore, keepascore] ) / 4 UpperCamelCase : str = sum([delascore, delascore, delascore, delascore] ) / 4 UpperCamelCase : Union[str, Any] = sum([addascore, addascore, addascore, addascore] ) / 4 UpperCamelCase : Union[str, Any] = (avgkeepscore + avgdelscore + avgaddscore) / 3 return finalscore def A_ ( _lowerCAmelCase , _lowerCAmelCase = True , _lowerCAmelCase = "13a" , _lowerCAmelCase = True ) -> Optional[Any]: # Normalization is requried for the ASSET dataset (one of the primary # datasets in sentence simplification) to allow using space # to split the sentence. Even though Wiki-Auto and TURK datasets, # do not require normalization, we do it for consistency. # Code adapted from the EASSE library [1] written by the authors of the ASSET dataset. # [1] https://github.com/feralvam/easse/blob/580bba7e1378fc8289c663f864e0487188fe8067/easse/utils/preprocessing.py#L7 if lowercase: UpperCamelCase : Dict = sentence.lower() if tokenizer in ["13a", "intl"]: if version.parse(sacrebleu.__version__ ).major >= 2: UpperCamelCase : str = sacrebleu.metrics.bleu._get_tokenizer(_lowerCAmelCase )()(_lowerCAmelCase ) else: UpperCamelCase : Dict = sacrebleu.TOKENIZERS[tokenizer]()(_lowerCAmelCase ) elif tokenizer == "moses": UpperCamelCase : Union[str, Any] = sacremoses.MosesTokenizer().tokenize(_lowerCAmelCase , return_str=_lowerCAmelCase , escape=_lowerCAmelCase ) elif tokenizer == "penn": UpperCamelCase : str = sacremoses.MosesTokenizer().penn_tokenize(_lowerCAmelCase , return_str=_lowerCAmelCase ) else: UpperCamelCase : Union[str, Any] = sentence if not return_str: UpperCamelCase : Tuple = normalized_sent.split() return normalized_sent def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> List[str]: if not (len(_lowerCAmelCase ) == len(_lowerCAmelCase ) == len(_lowerCAmelCase )): raise ValueError("Sources length must match predictions and references lengths." ) UpperCamelCase : Optional[Any] = 0 for src, pred, refs in zip(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): sari_score += SARIsent(normalize(_lowerCAmelCase ) , normalize(_lowerCAmelCase ) , [normalize(_lowerCAmelCase ) for sent in refs] ) UpperCamelCase : Optional[int] = sari_score / len(_lowerCAmelCase ) return 100 * sari_score def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase="exp" , _lowerCAmelCase=None , _lowerCAmelCase=False , _lowerCAmelCase=False , _lowerCAmelCase=False , ) -> List[str]: UpperCamelCase : Optional[Any] = len(references[0] ) if any(len(_lowerCAmelCase ) != references_per_prediction for refs in references ): raise ValueError("Sacrebleu requires the same number of references for each prediction" ) UpperCamelCase : Optional[int] = [[refs[i] for refs in references] for i in range(_lowerCAmelCase )] UpperCamelCase : Tuple = sacrebleu.corpus_bleu( _lowerCAmelCase , _lowerCAmelCase , smooth_method=_lowerCAmelCase , smooth_value=_lowerCAmelCase , force=_lowerCAmelCase , lowercase=_lowerCAmelCase , use_effective_order=_lowerCAmelCase , ) return output.score @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class A__ ( datasets.Metric ): def __UpperCamelCase( self ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("string" , id="sequence" ), "references": datasets.Sequence(datasets.Value("string" , id="sequence" ) , id="references" ), } ) , codebase_urls=[ "https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py", "https://github.com/cocoxu/simplification/blob/master/SARI.py", "https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py", "https://github.com/mjpost/sacreBLEU", ] , reference_urls=[ "https://www.aclweb.org/anthology/Q16-1029.pdf", "https://github.com/mjpost/sacreBLEU", "https://en.wikipedia.org/wiki/BLEU", "https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213", ] , ) def __UpperCamelCase( self , A_ , A_ , A_ ): '''simple docstring''' UpperCamelCase : Optional[Any] = {} result.update({"sari": compute_sari(sources=A_ , predictions=A_ , references=A_ )} ) result.update({"sacrebleu": compute_sacrebleu(predictions=A_ , references=A_ )} ) result.update({"exact": compute_em(predictions=A_ , references=A_ )} ) return result
52
0
"""simple docstring""" # Lint as: python3 # pylint: enable=line-too-long # pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position UpperCAmelCase__ = """2.13.1""" import platform import pyarrow from packaging import version if version.parse(platform.python_version()) < version.parse("""3.7"""): raise ImportWarning( """To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition.""" ) if version.parse(pyarrow.__version__).major < 8: raise ImportWarning( """To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n""" """If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`.""" ) del platform del pyarrow del version from .arrow_dataset import Dataset from .arrow_reader import ReadInstruction from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder from .combine import concatenate_datasets, interleave_datasets from .dataset_dict import DatasetDict, IterableDatasetDict from .download import * from .features import * from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled from .info import DatasetInfo, MetricInfo from .inspect import ( get_dataset_config_info, get_dataset_config_names, get_dataset_infos, get_dataset_split_names, inspect_dataset, inspect_metric, list_datasets, list_metrics, ) from .iterable_dataset import IterableDataset from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric from .metric import Metric from .splits import ( NamedSplit, NamedSplitAll, Split, SplitBase, SplitDict, SplitGenerator, SplitInfo, SubSplitInfo, percent, ) from .tasks import * from .utils import * from .utils import logging # deprecated modules from datasets import arrow_dataset as _arrow_dataset # isort:skip from datasets import utils as _utils # isort:skip from datasets.utils import download_manager as _deprecated_download_manager # isort:skip UpperCAmelCase__ = concatenate_datasets UpperCAmelCase__ = DownloadConfig UpperCAmelCase__ = DownloadManager UpperCAmelCase__ = DownloadMode UpperCAmelCase__ = DownloadConfig UpperCAmelCase__ = DownloadMode UpperCAmelCase__ = DownloadManager del _arrow_dataset, _utils, _deprecated_download_manager
289
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __lowerCamelCase : List[Any] = logging.get_logger(__name__) __lowerCamelCase : str = { """roberta-base""": """https://huggingface.co/roberta-base/resolve/main/config.json""", """roberta-large""": """https://huggingface.co/roberta-large/resolve/main/config.json""", """roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/config.json""", """distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/config.json""", """roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json""", """roberta-large-openai-detector""": """https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json""", } class A__ ( __snake_case ): _UpperCAmelCase :Union[str, Any] = 'roberta' def __init__( self , A_=5_0265 , A_=768 , A_=12 , A_=12 , A_=3072 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=2 , A_=0.02 , A_=1e-12 , A_=1 , A_=0 , A_=2 , A_="absolute" , A_=True , A_=None , **A_ , ): '''simple docstring''' super().__init__(pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , **A_ ) UpperCamelCase : Optional[int] = vocab_size UpperCamelCase : Dict = hidden_size UpperCamelCase : str = num_hidden_layers UpperCamelCase : Any = num_attention_heads UpperCamelCase : List[str] = hidden_act UpperCamelCase : Optional[Any] = intermediate_size UpperCamelCase : Tuple = hidden_dropout_prob UpperCamelCase : Tuple = attention_probs_dropout_prob UpperCamelCase : Tuple = max_position_embeddings UpperCamelCase : Any = type_vocab_size UpperCamelCase : int = initializer_range UpperCamelCase : str = layer_norm_eps UpperCamelCase : Dict = position_embedding_type UpperCamelCase : Any = use_cache UpperCamelCase : Union[str, Any] = classifier_dropout class A__ ( __snake_case ): @property def __UpperCamelCase( self ): '''simple docstring''' if self.task == "multiple-choice": UpperCamelCase : Optional[int] = {0: "batch", 1: "choice", 2: "sequence"} else: UpperCamelCase : Optional[int] = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ] )
52
0
'''simple docstring''' # DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch import math from typing import Union import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import randn_tensor from .scheduling_utils import SchedulerMixin class _a ( __snake_case , __snake_case ): '''simple docstring''' A : Dict = 1 @register_to_config def __init__( self, A=2_000, A=0.1, A=20, A=1E-3 ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = None SCREAMING_SNAKE_CASE : Any = None SCREAMING_SNAKE_CASE : Dict = None def UpperCamelCase_ ( self, A, A = None ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = torch.linspace(1, self.config.sampling_eps, A_, device=A_ ) def UpperCamelCase_ ( self, A, A, A, A=None ): '''simple docstring''' if self.timesteps is None: raise ValueError( '`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler' ) # TODO(Patrick) better comments + non-PyTorch # postprocess model score SCREAMING_SNAKE_CASE : int = ( -0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min ) SCREAMING_SNAKE_CASE : List[str] = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) ) SCREAMING_SNAKE_CASE : List[str] = std.flatten() while len(std.shape ) < len(score.shape ): SCREAMING_SNAKE_CASE : Optional[Any] = std.unsqueeze(-1 ) SCREAMING_SNAKE_CASE : Union[str, Any] = -score / std # compute SCREAMING_SNAKE_CASE : str = -1.0 / len(self.timesteps ) SCREAMING_SNAKE_CASE : Optional[int] = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min) SCREAMING_SNAKE_CASE : Tuple = beta_t.flatten() while len(beta_t.shape ) < len(x.shape ): SCREAMING_SNAKE_CASE : Optional[int] = beta_t.unsqueeze(-1 ) SCREAMING_SNAKE_CASE : List[Any] = -0.5 * beta_t * x SCREAMING_SNAKE_CASE : List[Any] = torch.sqrt(A_ ) SCREAMING_SNAKE_CASE : Any = drift - diffusion**2 * score SCREAMING_SNAKE_CASE : List[Any] = x + drift * dt # add noise SCREAMING_SNAKE_CASE : str = randn_tensor(x.shape, layout=x.layout, generator=A_, device=x.device, dtype=x.dtype ) SCREAMING_SNAKE_CASE : Union[str, Any] = x_mean + diffusion * math.sqrt(-dt ) * noise return x, x_mean def __len__( self ): '''simple docstring''' return self.config.num_train_timesteps
251
from typing import List, Optional, Tuple, Union import torch from ...utils import logging, randn_tensor from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline __lowerCamelCase : str = logging.get_logger(__name__) # pylint: disable=invalid-name class A__ ( __snake_case ): def __init__( self , A_ , A_ ): '''simple docstring''' super().__init__() self.register_modules(unet=A_ , scheduler=A_ ) @torch.no_grad() def __call__( self , A_ = 1 , A_ = 100 , A_ = None , A_ = None , A_ = True , ): '''simple docstring''' if audio_length_in_s is None: UpperCamelCase : str = self.unet.config.sample_size / self.unet.config.sample_rate UpperCamelCase : Optional[Any] = audio_length_in_s * self.unet.config.sample_rate UpperCamelCase : Any = 2 ** len(self.unet.up_blocks ) if sample_size < 3 * down_scale_factor: raise ValueError( F"""{audio_length_in_s} is too small. Make sure it's bigger or equal to""" F""" {3 * down_scale_factor / self.unet.config.sample_rate}.""" ) UpperCamelCase : Union[str, Any] = int(A_ ) if sample_size % down_scale_factor != 0: UpperCamelCase : List[str] = ( (audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1 ) * down_scale_factor logger.info( F"""{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled""" F""" by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising""" " process." ) UpperCamelCase : Any = int(A_ ) UpperCamelCase : Union[str, Any] = next(iter(self.unet.parameters() ) ).dtype UpperCamelCase : Optional[int] = (batch_size, self.unet.config.in_channels, sample_size) if isinstance(A_ , A_ ) and len(A_ ) != batch_size: raise ValueError( F"""You have passed a list of generators of length {len(A_ )}, but requested an effective batch""" F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" ) UpperCamelCase : Optional[Any] = randn_tensor(A_ , generator=A_ , device=self.device , dtype=A_ ) # set step values self.scheduler.set_timesteps(A_ , device=audio.device ) UpperCamelCase : Optional[int] = self.scheduler.timesteps.to(A_ ) for t in self.progress_bar(self.scheduler.timesteps ): # 1. predict noise model_output UpperCamelCase : Dict = self.unet(A_ , A_ ).sample # 2. compute previous image: x_t -> t_t-1 UpperCamelCase : int = self.scheduler.step(A_ , A_ , A_ ).prev_sample UpperCamelCase : Optional[Any] = audio.clamp(-1 , 1 ).float().cpu().numpy() UpperCamelCase : Dict = audio[:, :, :original_sample_size] if not return_dict: return (audio,) return AudioPipelineOutput(audios=A_ )
52
0
'''simple docstring''' from typing import Dict from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available from transformers.testing_utils import ( TestCasePlus, execute_subprocess_async, get_torch_dist_unique_port, require_torch_multi_gpu, require_torch_neuroncore, ) from transformers.training_args import ParallelMode from transformers.utils import logging a : Dict = logging.get_logger(__name__) if is_torch_available(): import torch from torch import nn from torch.utils.data import Dataset from transformers import Trainer class UpperCamelCase_ ( __snake_case ): def __init__( self , A = 101 ) -> int: UpperCAmelCase : Dict = length def __len__( self ) -> str: return self.length def __getitem__( self , A ) -> Optional[Any]: return i class UpperCamelCase_ : def __call__( self , A ) -> List[Any]: return {"input_ids": torch.tensor(A_ ), "labels": torch.tensor(A_ )} class UpperCamelCase_ ( nn.Module ): def __init__( self ) -> Union[str, Any]: super().__init__() # Add some (unused) params otherwise DDP will complain. UpperCAmelCase : Optional[int] = nn.Linear(120 , 80 ) def _lowercase( self , A , A=None ) -> Dict: if labels is not None: return torch.tensor(0.0 , device=input_ids.device ), input_ids else: return input_ids class UpperCamelCase_ ( __snake_case ): @require_torch_neuroncore def _lowercase( self ) -> Union[str, Any]: UpperCAmelCase : List[Any] = f'''--nproc_per_node=2 --master_port={get_torch_dist_unique_port()} {self.test_file_dir}/test_trainer_distributed.py '''.split() UpperCAmelCase : Optional[int] = self.get_auto_remove_tmp_dir() UpperCAmelCase : List[str] = f'''--output_dir {output_dir}'''.split() UpperCAmelCase : Optional[int] = ["torchrun"] + distributed_args + args execute_subprocess_async(A_ , env=self.get_env() ) # successful return here == success - any errors would have caused an error in the sub-call class UpperCamelCase_ ( __snake_case ): @require_torch_multi_gpu def _lowercase( self ) -> List[Any]: UpperCAmelCase : List[Any] = f'''--nproc_per_node={torch.cuda.device_count()} --master_port={get_torch_dist_unique_port()} {self.test_file_dir}/test_trainer_distributed.py '''.split() UpperCAmelCase : Optional[Any] = self.get_auto_remove_tmp_dir() UpperCAmelCase : List[str] = f'''--output_dir {output_dir}'''.split() UpperCAmelCase : Optional[Any] = ["torchrun"] + distributed_args + args execute_subprocess_async(A_ , env=self.get_env() ) # successful return here == success - any errors would have caused an error in the sub-call if __name__ == "__main__": # The script below is meant to be run under torch.distributed, on a machine with multiple GPUs: # # PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py a : List[str] = HfArgumentParser((TrainingArguments,)) a : List[str] = parser.parse_args_into_dataclasses()[0] logger.warning( F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, ''' F'''distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}''' ) # Essentially, what we want to verify in the distributed case is that we get all samples back, # in the right order. (this is crucial for prediction for instance) for dataset_length in [1_0_1, 4_0, 7]: a : str = DummyDataset(dataset_length) def __lowerCamelCase ( _lowercase ) -> Dict: UpperCAmelCase : Optional[Any] = list(range(len(_lowerCAmelCase ) ) ) UpperCAmelCase : List[Any] = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential if not success and training_args.local_rank == 0: logger.warning( """Predictions and/or labels do not match expected results:\n - predictions: """ F'''{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}''' ) return {"success": success} a : str = Trainer( model=DummyModel(), args=training_args, data_collator=DummyDataCollator(), eval_dataset=dataset, compute_metrics=compute_metrics, ) a : Any = trainer.evaluate() logger.info(metrics) if metrics["eval_success"] is not True: logger.error(metrics) exit(1) a : List[Any] = trainer.predict(dataset) logger.info(p.metrics) if p.metrics["test_success"] is not True: logger.error(p.metrics) exit(1) a : Optional[int] = 2 a : Tuple = trainer.evaluate() logger.info(metrics) if metrics["eval_success"] is not True: logger.error(metrics) exit(1) a : Tuple = trainer.predict(dataset) logger.info(p.metrics) if p.metrics["test_success"] is not True: logger.error(p.metrics) exit(1) a : Dict = None
265
import functools def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> int: UpperCamelCase : Optional[int] = len(_lowerCAmelCase ) UpperCamelCase : List[str] = len(_lowerCAmelCase ) @functools.cache def min_distance(_lowerCAmelCase , _lowerCAmelCase ) -> int: # if first word index is overflow - delete all from the second word if indexa >= len_worda: return len_worda - indexa # if second word index is overflow - delete all from the first word if indexa >= len_worda: return len_worda - indexa UpperCamelCase : Union[str, Any] = int(worda[indexa] != worda[indexa] ) # current letters not identical return min( 1 + min_distance(indexa + 1 , _lowerCAmelCase ) , 1 + min_distance(_lowerCAmelCase , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , ) return min_distance(0 , 0 ) if __name__ == "__main__": import doctest doctest.testmod()
52
0
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase = logging.get_logger(__name__) UpperCAmelCase = { """studio-ousia/luke-base""": """https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json""", """studio-ousia/luke-large""": """https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json""", } class UpperCAmelCase_ ( __snake_case): snake_case__ = 'luke' def __init__( self : str , __UpperCamelCase : Optional[int]=5_0267 , __UpperCamelCase : Dict=50_0000 , __UpperCamelCase : List[Any]=768 , __UpperCamelCase : Union[str, Any]=256 , __UpperCamelCase : Dict=12 , __UpperCamelCase : Optional[int]=12 , __UpperCamelCase : str=3072 , __UpperCamelCase : str="gelu" , __UpperCamelCase : Optional[Any]=0.1 , __UpperCamelCase : Dict=0.1 , __UpperCamelCase : Dict=512 , __UpperCamelCase : Optional[Any]=2 , __UpperCamelCase : int=0.0_2 , __UpperCamelCase : Optional[Any]=1E-12 , __UpperCamelCase : List[str]=True , __UpperCamelCase : Tuple=None , __UpperCamelCase : Union[str, Any]=1 , __UpperCamelCase : Tuple=0 , __UpperCamelCase : Optional[Any]=2 , **__UpperCamelCase : Tuple , ) -> Optional[Any]: super().__init__(pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , **A_ ) _UpperCamelCase = vocab_size _UpperCamelCase = entity_vocab_size _UpperCamelCase = hidden_size _UpperCamelCase = entity_emb_size _UpperCamelCase = num_hidden_layers _UpperCamelCase = num_attention_heads _UpperCamelCase = hidden_act _UpperCamelCase = intermediate_size _UpperCamelCase = hidden_dropout_prob _UpperCamelCase = attention_probs_dropout_prob _UpperCamelCase = max_position_embeddings _UpperCamelCase = type_vocab_size _UpperCamelCase = initializer_range _UpperCamelCase = layer_norm_eps _UpperCamelCase = use_entity_aware_attention _UpperCamelCase = classifier_dropout
256
import itertools import random import unittest import numpy as np from transformers import ASTFeatureExtractor from transformers.testing_utils import require_torch, require_torchaudio from transformers.utils.import_utils import is_torch_available from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin __lowerCamelCase : str = random.Random() if is_torch_available(): import torch def A_ ( _lowerCAmelCase , _lowerCAmelCase=1.0 , _lowerCAmelCase=None , _lowerCAmelCase=None ) -> Optional[Any]: if rng is None: UpperCamelCase : Optional[int] = global_rng UpperCamelCase : Optional[Any] = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values class A__ ( unittest.TestCase ): def __init__( self , A_ , A_=7 , A_=400 , A_=2000 , A_=1 , A_=0.0 , A_=1_6000 , A_=True , A_=True , ): '''simple docstring''' UpperCamelCase : Tuple = parent UpperCamelCase : List[Any] = batch_size UpperCamelCase : List[Any] = min_seq_length UpperCamelCase : List[str] = max_seq_length UpperCamelCase : int = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) UpperCamelCase : Union[str, Any] = feature_size UpperCamelCase : List[str] = padding_value UpperCamelCase : Optional[Any] = sampling_rate UpperCamelCase : List[str] = return_attention_mask UpperCamelCase : List[Any] = do_normalize def __UpperCamelCase( self ): '''simple docstring''' return { "feature_size": self.feature_size, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "return_attention_mask": self.return_attention_mask, "do_normalize": self.do_normalize, } def __UpperCamelCase( self , A_=False , A_=False ): '''simple docstring''' def _flatten(A_ ): return list(itertools.chain(*A_ ) ) if equal_length: UpperCamelCase : List[str] = floats_list((self.batch_size, self.max_seq_length) ) else: # make sure that inputs increase in size UpperCamelCase : Dict = [ _flatten(floats_list((x, self.feature_size) ) ) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff ) ] if numpify: UpperCamelCase : Union[str, Any] = [np.asarray(A_ ) for x in speech_inputs] return speech_inputs @require_torch @require_torchaudio class A__ ( __snake_case , unittest.TestCase ): _UpperCAmelCase :Optional[Any] = ASTFeatureExtractor def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Tuple = ASTFeatureExtractionTester(self ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Optional[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 UpperCamelCase : Tuple = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] UpperCamelCase : Dict = [np.asarray(A_ ) for speech_input in speech_inputs] # Test not batched input UpperCamelCase : Dict = feat_extract(speech_inputs[0] , return_tensors="np" ).input_values UpperCamelCase : Union[str, Any] = feat_extract(np_speech_inputs[0] , return_tensors="np" ).input_values self.assertTrue(np.allclose(A_ , A_ , atol=1e-3 ) ) # Test batched UpperCamelCase : Any = feat_extract(A_ , padding=A_ , return_tensors="np" ).input_values UpperCamelCase : Any = feat_extract(A_ , padding=A_ , return_tensors="np" ).input_values for enc_seq_a, enc_seq_a in zip(A_ , A_ ): self.assertTrue(np.allclose(A_ , A_ , atol=1e-3 ) ) # Test 2-D numpy arrays are batched. UpperCamelCase : Dict = [floats_list((1, x) )[0] for x in (800, 800, 800)] UpperCamelCase : int = np.asarray(A_ ) UpperCamelCase : Any = feat_extract(A_ , return_tensors="np" ).input_values UpperCamelCase : List[str] = feat_extract(A_ , return_tensors="np" ).input_values for enc_seq_a, enc_seq_a in zip(A_ , A_ ): self.assertTrue(np.allclose(A_ , A_ , atol=1e-3 ) ) @require_torch def __UpperCamelCase( self ): '''simple docstring''' import torch UpperCamelCase : List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) UpperCamelCase : int = np.random.rand(100 ).astype(np.floataa ) UpperCamelCase : str = np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: UpperCamelCase : List[Any] = feature_extractor.pad([{"input_values": inputs}] , return_tensors="np" ) self.assertTrue(np_processed.input_values.dtype == np.floataa ) UpperCamelCase : List[str] = feature_extractor.pad([{"input_values": inputs}] , return_tensors="pt" ) self.assertTrue(pt_processed.input_values.dtype == torch.floataa ) def __UpperCamelCase( self , A_ ): '''simple docstring''' from datasets import load_dataset UpperCamelCase : Dict = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" ) # automatic decoding with librispeech UpperCamelCase : Any = ds.sort("id" ).select(range(A_ ) )[:num_samples]["audio"] return [x["array"] for x in speech_samples] @require_torch def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Any = torch.tensor( [-0.98_94, -1.27_76, -0.90_66, -1.27_76, -0.93_49, -1.26_09, -1.03_86, -1.27_76, -1.15_61, -1.27_76, -1.20_52, -1.27_23, -1.21_90, -1.21_32, -1.27_76, -1.11_33, -1.19_53, -1.13_43, -1.15_84, -1.22_03, -1.17_70, -1.24_74, -1.23_81, -1.19_36, -0.92_70, -0.83_17, -0.80_49, -0.77_06, -0.75_65, -0.78_69] ) # fmt: on UpperCamelCase : List[Any] = self._load_datasamples(1 ) UpperCamelCase : Tuple = ASTFeatureExtractor() UpperCamelCase : str = feature_extractor(A_ , return_tensors="pt" ).input_values self.assertEquals(input_values.shape , (1, 1024, 128) ) self.assertTrue(torch.allclose(input_values[0, 0, :30] , A_ , atol=1e-4 ) )
52
0
"""simple docstring""" from typing import List, Optional, Tuple, Union import torch from ...utils import logging, randn_tensor from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline _snake_case : str = logging.get_logger(__name__) # pylint: disable=invalid-name class _UpperCAmelCase ( __snake_case ): def __init__( self :str , __UpperCamelCase :Dict , __UpperCamelCase :Optional[Any] ): super().__init__() self.register_modules(unet=A_ , scheduler=A_ ) @torch.no_grad() def __call__( self :Union[str, Any] , __UpperCamelCase :List[str] = 1 , __UpperCamelCase :int = 1_00 , __UpperCamelCase :Union[str, Any] = None , __UpperCamelCase :Optional[Any] = None , __UpperCamelCase :List[str] = True , ): if audio_length_in_s is None: A = self.unet.config.sample_size / self.unet.config.sample_rate A = audio_length_in_s * self.unet.config.sample_rate A = 2 ** len(self.unet.up_blocks ) if sample_size < 3 * down_scale_factor: raise ValueError( f"{audio_length_in_s} is too small. Make sure it's bigger or equal to" f" {3 * down_scale_factor / self.unet.config.sample_rate}." ) A = int(A_ ) if sample_size % down_scale_factor != 0: A = ( (audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1 ) * down_scale_factor logger.info( f"{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled" f" by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising" " process." ) A = int(A_ ) A = next(iter(self.unet.parameters() ) ).dtype A = (batch_size, self.unet.config.in_channels, sample_size) if isinstance(A_ , A_ ) and len(A_ ) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(A_ )}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) A = randn_tensor(A_ , generator=A_ , device=self.device , dtype=A_ ) # set step values self.scheduler.set_timesteps(A_ , device=audio.device ) A = self.scheduler.timesteps.to(A_ ) for t in self.progress_bar(self.scheduler.timesteps ): # 1. predict noise model_output A = self.unet(A_ , A_ ).sample # 2. compute previous image: x_t -> t_t-1 A = self.scheduler.step(A_ , A_ , A_ ).prev_sample A = audio.clamp(-1 , 1 ).float().cpu().numpy() A = audio[:, :, :original_sample_size] if not return_dict: return (audio,) return AudioPipelineOutput(audios=A_ )
292
import pickle import numpy as np from matplotlib import pyplot as plt class A__ : def __init__( self , A_ , A_ , A_ , A_ , A_ , A_=0.2 , A_=0.2 ): '''simple docstring''' UpperCamelCase : int = bp_numa UpperCamelCase : int = bp_numa UpperCamelCase : List[Any] = bp_numa UpperCamelCase : Optional[int] = conva_get[:2] UpperCamelCase : Optional[Any] = conva_get[2] UpperCamelCase : Dict = size_pa UpperCamelCase : Union[str, Any] = rate_w UpperCamelCase : Dict = rate_t UpperCamelCase : Union[str, Any] = [ np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0] ) + 0.5 ) for i in range(self.conva[1] ) ] UpperCamelCase : Any = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 ) UpperCamelCase : List[Any] = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 ) UpperCamelCase : Optional[Any] = -2 * np.random.rand(self.conva[1] ) + 1 UpperCamelCase : Any = -2 * np.random.rand(self.num_bpa ) + 1 UpperCamelCase : int = -2 * np.random.rand(self.num_bpa ) + 1 def __UpperCamelCase( self , A_ ): '''simple docstring''' UpperCamelCase : Optional[int] = { "num_bp1": self.num_bpa, "num_bp2": self.num_bpa, "num_bp3": self.num_bpa, "conv1": self.conva, "step_conv1": self.step_conva, "size_pooling1": self.size_poolinga, "rate_weight": self.rate_weight, "rate_thre": self.rate_thre, "w_conv1": self.w_conva, "wkj": self.wkj, "vji": self.vji, "thre_conv1": self.thre_conva, "thre_bp2": self.thre_bpa, "thre_bp3": self.thre_bpa, } with open(A_ , "wb" ) as f: pickle.dump(A_ , A_ ) print(F"""Model saved: {save_path}""" ) @classmethod def __UpperCamelCase( cls , A_ ): '''simple docstring''' with open(A_ , "rb" ) as f: UpperCamelCase : Optional[Any] = pickle.load(A_ ) # noqa: S301 UpperCamelCase : List[Any] = model_dic.get("conv1" ) conv_get.append(model_dic.get("step_conv1" ) ) UpperCamelCase : Union[str, Any] = model_dic.get("size_pooling1" ) UpperCamelCase : List[Any] = model_dic.get("num_bp1" ) UpperCamelCase : Dict = model_dic.get("num_bp2" ) UpperCamelCase : Dict = model_dic.get("num_bp3" ) UpperCamelCase : Dict = model_dic.get("rate_weight" ) UpperCamelCase : str = model_dic.get("rate_thre" ) # create model instance UpperCamelCase : Any = CNN(A_ , A_ , A_ , A_ , A_ , A_ , A_ ) # modify model parameter UpperCamelCase : str = model_dic.get("w_conv1" ) UpperCamelCase : Optional[Any] = model_dic.get("wkj" ) UpperCamelCase : int = model_dic.get("vji" ) UpperCamelCase : Any = model_dic.get("thre_conv1" ) UpperCamelCase : Optional[int] = model_dic.get("thre_bp2" ) UpperCamelCase : Union[str, Any] = model_dic.get("thre_bp3" ) return conv_ins def __UpperCamelCase( self , A_ ): '''simple docstring''' return 1 / (1 + np.exp(-1 * x )) def __UpperCamelCase( self , A_ ): '''simple docstring''' return round(A_ , 3 ) def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ ): '''simple docstring''' UpperCamelCase : str = convs[0] UpperCamelCase : Optional[Any] = convs[1] UpperCamelCase : Optional[Any] = np.shape(A_ )[0] # get the data slice of original image data, data_focus UpperCamelCase : List[str] = [] for i_focus in range(0 , size_data - size_conv + 1 , A_ ): for j_focus in range(0 , size_data - size_conv + 1 , A_ ): UpperCamelCase : Union[str, Any] = data[ i_focus : i_focus + size_conv, j_focus : j_focus + size_conv ] data_focus.append(A_ ) # calculate the feature map of every single kernel, and saved as list of matrix UpperCamelCase : int = [] UpperCamelCase : Optional[Any] = int((size_data - size_conv) / conv_step + 1 ) for i_map in range(A_ ): UpperCamelCase : str = [] for i_focus in range(len(A_ ) ): UpperCamelCase : List[Any] = ( np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map] ) ) - thre_convs[i_map] ) featuremap.append(self.sig(A_ ) ) UpperCamelCase : Optional[int] = np.asmatrix(A_ ).reshape( A_ , A_ ) data_featuremap.append(A_ ) # expanding the data slice to One dimenssion UpperCamelCase : List[Any] = [] for each_focus in data_focus: focusa_list.extend(self.Expand_Mat(A_ ) ) UpperCamelCase : Tuple = np.asarray(A_ ) return focus_list, data_featuremap def __UpperCamelCase( self , A_ , A_ , A_="average_pool" ): '''simple docstring''' UpperCamelCase : Any = len(featuremaps[0] ) UpperCamelCase : str = int(size_map / size_pooling ) UpperCamelCase : Optional[int] = [] for i_map in range(len(A_ ) ): UpperCamelCase : Tuple = featuremaps[i_map] UpperCamelCase : Any = [] for i_focus in range(0 , A_ , A_ ): for j_focus in range(0 , A_ , A_ ): UpperCamelCase : int = feature_map[ i_focus : i_focus + size_pooling, j_focus : j_focus + size_pooling, ] if pooling_type == "average_pool": # average pooling map_pooled.append(np.average(A_ ) ) elif pooling_type == "max_pooling": # max pooling map_pooled.append(np.max(A_ ) ) UpperCamelCase : Optional[Any] = np.asmatrix(A_ ).reshape(A_ , A_ ) featuremap_pooled.append(A_ ) return featuremap_pooled def __UpperCamelCase( self , A_ ): '''simple docstring''' UpperCamelCase : List[Any] = [] for i in range(len(A_ ) ): UpperCamelCase : List[Any] = np.shape(data[i] ) UpperCamelCase : str = data[i].reshape(1 , shapes[0] * shapes[1] ) UpperCamelCase : Optional[int] = data_listed.getA().tolist()[0] data_expanded.extend(A_ ) UpperCamelCase : Any = np.asarray(A_ ) return data_expanded def __UpperCamelCase( self , A_ ): '''simple docstring''' UpperCamelCase : List[Any] = np.asarray(A_ ) UpperCamelCase : List[Any] = np.shape(A_ ) UpperCamelCase : Any = data_mat.reshape(1 , shapes[0] * shapes[1] ) return data_expanded def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ ): '''simple docstring''' UpperCamelCase : int = [] UpperCamelCase : Optional[int] = 0 for i_map in range(A_ ): UpperCamelCase : int = np.ones((size_map, size_map) ) for i in range(0 , A_ , A_ ): for j in range(0 , A_ , A_ ): UpperCamelCase : str = pd_pool[ i_pool ] UpperCamelCase : str = i_pool + 1 UpperCamelCase : str = np.multiply( A_ , np.multiply(out_map[i_map] , (1 - out_map[i_map]) ) ) pd_all.append(A_ ) return pd_all def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_=bool ): '''simple docstring''' print("----------------------Start Training-------------------------" ) print((" - - Shape: Train_Data ", np.shape(A_ )) ) print((" - - Shape: Teach_Data ", np.shape(A_ )) ) UpperCamelCase : List[str] = 0 UpperCamelCase : Union[str, Any] = [] UpperCamelCase : int = 1_0000 while rp < n_repeat and mse >= error_accuracy: UpperCamelCase : Tuple = 0 print(F"""-------------Learning Time {rp}--------------""" ) for p in range(len(A_ ) ): # print('------------Learning Image: %d--------------'%p) UpperCamelCase : Any = np.asmatrix(datas_train[p] ) UpperCamelCase : List[str] = np.asarray(datas_teach[p] ) UpperCamelCase , UpperCamelCase : Dict = self.convolute( A_ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , ) UpperCamelCase : Tuple = self.pooling(A_ , self.size_poolinga ) UpperCamelCase : int = np.shape(A_ ) UpperCamelCase : List[str] = self._expand(A_ ) UpperCamelCase : Optional[int] = data_bp_input UpperCamelCase : str = np.dot(A_ , self.vji.T ) - self.thre_bpa UpperCamelCase : Optional[int] = self.sig(A_ ) UpperCamelCase : List[Any] = np.dot(A_ , self.wkj.T ) - self.thre_bpa UpperCamelCase : Dict = self.sig(A_ ) # --------------Model Leaning ------------------------ # calculate error and gradient--------------- UpperCamelCase : List[Any] = np.multiply( (data_teach - bp_outa) , np.multiply(A_ , (1 - bp_outa) ) ) UpperCamelCase : str = np.multiply( np.dot(A_ , self.wkj ) , np.multiply(A_ , (1 - bp_outa) ) ) UpperCamelCase : Any = np.dot(A_ , self.vji ) UpperCamelCase : Dict = pd_i_all / (self.size_poolinga * self.size_poolinga) UpperCamelCase : List[Any] = pd_conva_pooled.T.getA().tolist() UpperCamelCase : List[Any] = self._calculate_gradient_from_pool( A_ , A_ , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , ) # weight and threshold learning process--------- # convolution layer for k_conv in range(self.conva[1] ): UpperCamelCase : List[Any] = self._expand_mat(pd_conva_all[k_conv] ) UpperCamelCase : List[Any] = self.rate_weight * np.dot(A_ , A_ ) UpperCamelCase : str = self.w_conva[k_conv] + delta_w.reshape( (self.conva[0], self.conva[0]) ) UpperCamelCase : Dict = ( self.thre_conva[k_conv] - np.sum(pd_conva_all[k_conv] ) * self.rate_thre ) # all connected layer UpperCamelCase : Optional[Any] = self.wkj + pd_k_all.T * bp_outa * self.rate_weight UpperCamelCase : List[Any] = self.vji + pd_j_all.T * bp_outa * self.rate_weight UpperCamelCase : Optional[Any] = self.thre_bpa - pd_k_all * self.rate_thre UpperCamelCase : List[str] = self.thre_bpa - pd_j_all * self.rate_thre # calculate the sum error of all single image UpperCamelCase : List[Any] = np.sum(abs(data_teach - bp_outa ) ) error_count += errors # print(' ----Teach ',data_teach) # print(' ----BP_output ',bp_out3) UpperCamelCase : Any = rp + 1 UpperCamelCase : Union[str, Any] = error_count / patterns all_mse.append(A_ ) def draw_error(): UpperCamelCase : Tuple = [error_accuracy for i in range(int(n_repeat * 1.2 ) )] plt.plot(A_ , "+-" ) plt.plot(A_ , "r--" ) plt.xlabel("Learning Times" ) plt.ylabel("All_mse" ) plt.grid(A_ , alpha=0.5 ) plt.show() print("------------------Training Complished---------------------" ) print((" - - Training epoch: ", rp, F""" - - Mse: {mse:.6f}""") ) if draw_e: draw_error() return mse def __UpperCamelCase( self , A_ ): '''simple docstring''' UpperCamelCase : Optional[Any] = [] print("-------------------Start Testing-------------------------" ) print((" - - Shape: Test_Data ", np.shape(A_ )) ) for p in range(len(A_ ) ): UpperCamelCase : int = np.asmatrix(datas_test[p] ) UpperCamelCase , UpperCamelCase : Any = self.convolute( A_ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , ) UpperCamelCase : List[str] = self.pooling(A_ , self.size_poolinga ) UpperCamelCase : Dict = self._expand(A_ ) UpperCamelCase : List[Any] = data_bp_input UpperCamelCase : Any = bp_outa * self.vji.T - self.thre_bpa UpperCamelCase : List[Any] = self.sig(A_ ) UpperCamelCase : int = bp_outa * self.wkj.T - self.thre_bpa UpperCamelCase : Optional[int] = self.sig(A_ ) produce_out.extend(bp_outa.getA().tolist() ) UpperCamelCase : List[str] = [list(map(self.do_round , A_ ) ) for each in produce_out] return np.asarray(A_ ) def __UpperCamelCase( self , A_ ): '''simple docstring''' UpperCamelCase : Union[str, Any] = np.asmatrix(A_ ) UpperCamelCase , UpperCamelCase : List[Any] = self.convolute( A_ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , ) UpperCamelCase : str = self.pooling(A_ , self.size_poolinga ) return data_conveda, data_pooleda if __name__ == "__main__": pass
52
0
'''simple docstring''' import shutil import tempfile import unittest from transformers import ( SPIECE_UNDERLINE, AddedToken, BatchEncoding, NllbTokenizer, NllbTokenizerFast, is_torch_available, ) from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, ) from ...test_tokenization_common import TokenizerTesterMixin __a = get_tests_dir('fixtures/test_sentencepiece.model') if is_torch_available(): from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right __a = 256_047 __a = 256_145 @require_sentencepiece @require_tokenizers class A__ ( __snake_case , unittest.TestCase ): """simple docstring""" UpperCamelCase_ : Dict = NllbTokenizer UpperCamelCase_ : List[str] = NllbTokenizerFast UpperCamelCase_ : Any = True UpperCamelCase_ : str = True UpperCamelCase_ : str = {} def _lowerCAmelCase ( self : Any ) -> Union[str, Any]: """simple docstring""" super().setUp() # We have a SentencePiece fixture for testing _UpperCAmelCase : str = NllbTokenizer(A_ , keep_accents=A_ ) tokenizer.save_pretrained(self.tmpdirname ) def _lowerCAmelCase ( self : Any ) -> Dict: """simple docstring""" _UpperCAmelCase : List[str] = NllbTokenizer(A_ , keep_accents=A_ ) _UpperCAmelCase : List[Any] = tokenizer.tokenize("This is a test" ) self.assertListEqual(A_ , ["▁This", "▁is", "▁a", "▁t", "est"] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(A_ ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , ) _UpperCAmelCase : str = tokenizer.tokenize("I was born in 92000, and this is falsé." ) self.assertListEqual( A_ , [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", ".", ] , ) _UpperCAmelCase : Union[str, Any] = tokenizer.convert_tokens_to_ids(A_ ) self.assertListEqual( A_ , [ value + tokenizer.fairseq_offset for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4] ] , ) _UpperCAmelCase : List[Any] = tokenizer.convert_ids_to_tokens(A_ ) self.assertListEqual( A_ , [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", ".", ] , ) def _lowerCAmelCase ( self : List[Any] ) -> List[str]: """simple docstring""" _UpperCAmelCase : Optional[Any] = (self.rust_tokenizer_class, "hf-internal-testing/tiny-random-nllb", {}) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): _UpperCAmelCase : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(A_ , **A_ ) _UpperCAmelCase : Optional[Any] = self.tokenizer_class.from_pretrained(A_ , **A_ ) _UpperCAmelCase : Dict = tempfile.mkdtemp() _UpperCAmelCase : List[Any] = tokenizer_r.save_pretrained(A_ ) _UpperCAmelCase : int = tokenizer_p.save_pretrained(A_ ) # Checks it save with the same files + the tokenizer.json file for the fast one self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) ) _UpperCAmelCase : int = tuple(f for f in tokenizer_r_files if "tokenizer.json" not in f ) self.assertSequenceEqual(A_ , A_ ) # Checks everything loads correctly in the same way _UpperCAmelCase : Any = tokenizer_r.from_pretrained(A_ ) _UpperCAmelCase : Tuple = tokenizer_p.from_pretrained(A_ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(A_ , A_ ) ) shutil.rmtree(A_ ) # Save tokenizer rust, legacy_format=True _UpperCAmelCase : str = tempfile.mkdtemp() _UpperCAmelCase : Union[str, Any] = tokenizer_r.save_pretrained(A_ , legacy_format=A_ ) _UpperCAmelCase : Dict = tokenizer_p.save_pretrained(A_ ) # Checks it save with the same files self.assertSequenceEqual(A_ , A_ ) # Checks everything loads correctly in the same way _UpperCAmelCase : Dict = tokenizer_r.from_pretrained(A_ ) _UpperCAmelCase : Dict = tokenizer_p.from_pretrained(A_ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(A_ , A_ ) ) shutil.rmtree(A_ ) # Save tokenizer rust, legacy_format=False _UpperCAmelCase : int = tempfile.mkdtemp() _UpperCAmelCase : Optional[Any] = tokenizer_r.save_pretrained(A_ , legacy_format=A_ ) _UpperCAmelCase : List[Any] = tokenizer_p.save_pretrained(A_ ) # Checks it saved the tokenizer.json file self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) ) # Checks everything loads correctly in the same way _UpperCAmelCase : Optional[int] = tokenizer_r.from_pretrained(A_ ) _UpperCAmelCase : Dict = tokenizer_p.from_pretrained(A_ ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(A_ , A_ ) ) shutil.rmtree(A_ ) @require_torch def _lowerCAmelCase ( self : Dict ) -> int: """simple docstring""" if not self.test_seqaseq: return _UpperCAmelCase : Any = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F"""{tokenizer.__class__.__name__}""" ): # Longer text that will definitely require truncation. _UpperCAmelCase : int = [ " UN Chief Says There Is No Military Solution in Syria", " Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for" " Syria is that 'there is no military solution' to the nearly five-year conflict and more weapons" " will only worsen the violence and misery for millions of people.", ] _UpperCAmelCase : Dict = [ "Şeful ONU declară că nu există o soluţie militară în Siria", "Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al" " Rusiei pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi" " că noi arme nu vor face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.", ] try: _UpperCAmelCase : List[str] = tokenizer.prepare_seqaseq_batch( src_texts=A_ , tgt_texts=A_ , max_length=3 , max_target_length=1_0 , return_tensors="pt" , src_lang="eng_Latn" , tgt_lang="ron_Latn" , ) except NotImplementedError: return self.assertEqual(batch.input_ids.shape[1] , 3 ) self.assertEqual(batch.labels.shape[1] , 1_0 ) # max_target_length will default to max_length if not specified _UpperCAmelCase : Union[str, Any] = tokenizer.prepare_seqaseq_batch( A_ , tgt_texts=A_ , max_length=3 , return_tensors="pt" ) self.assertEqual(batch.input_ids.shape[1] , 3 ) self.assertEqual(batch.labels.shape[1] , 3 ) _UpperCAmelCase : str = tokenizer.prepare_seqaseq_batch( src_texts=A_ , max_length=3 , max_target_length=1_0 , return_tensors="pt" ) self.assertEqual(batch_encoder_only.input_ids.shape[1] , 3 ) self.assertEqual(batch_encoder_only.attention_mask.shape[1] , 3 ) self.assertNotIn("decoder_input_ids" , A_ ) @unittest.skip("Unfortunately way too slow to build a BPE with SentencePiece." ) def _lowerCAmelCase ( self : List[str] ) -> Union[str, Any]: """simple docstring""" pass def _lowerCAmelCase ( self : int ) -> List[str]: """simple docstring""" for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ): _UpperCAmelCase : Dict = [AddedToken("<special>" , lstrip=A_ )] _UpperCAmelCase : List[Any] = self.rust_tokenizer_class.from_pretrained( A_ , additional_special_tokens=A_ , **A_ ) _UpperCAmelCase : Union[str, Any] = tokenizer_r.encode("Hey this is a <special> token" ) _UpperCAmelCase : List[Any] = tokenizer_r.encode("<special>" , add_special_tokens=A_ )[0] self.assertTrue(special_token_id in r_output ) if self.test_slow_tokenizer: _UpperCAmelCase : List[Any] = self.rust_tokenizer_class.from_pretrained( A_ , additional_special_tokens=A_ , **A_ , ) _UpperCAmelCase : Tuple = self.tokenizer_class.from_pretrained( A_ , additional_special_tokens=A_ , **A_ ) _UpperCAmelCase : List[str] = tokenizer_p.encode("Hey this is a <special> token" ) _UpperCAmelCase : Optional[int] = tokenizer_cr.encode("Hey this is a <special> token" ) self.assertEqual(A_ , A_ ) self.assertEqual(A_ , A_ ) self.assertTrue(special_token_id in p_output ) self.assertTrue(special_token_id in cr_output ) @require_torch @require_sentencepiece @require_tokenizers class A__ ( unittest.TestCase ): """simple docstring""" UpperCamelCase_ : List[Any] = 'facebook/nllb-200-distilled-600M' UpperCamelCase_ : Optional[Any] = [ ' UN Chief Says There Is No Military Solution in Syria', ' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.', ] UpperCamelCase_ : Optional[int] = [ 'Şeful ONU declară că nu există o soluţie militară în Siria', 'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei' ' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor' ' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.', ] UpperCamelCase_ : Any = [ 25_60_47, 1_62_97, 13_44_08, 81_65, 24_80_66, 1_47_34, 9_50, 11_35, 10_57_21, 35_73, 83, 2_73_52, 1_08, 4_94_86, 2, ] @classmethod def _lowerCAmelCase ( cls : List[str] ) -> Dict: """simple docstring""" _UpperCAmelCase : NllbTokenizer = NllbTokenizer.from_pretrained( cls.checkpoint_name , src_lang="eng_Latn" , tgt_lang="ron_Latn" ) _UpperCAmelCase : int = 1 return cls def _lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]: """simple docstring""" self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ace_Arab"] , 2_5_6_0_0_1 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ace_Latn"] , 2_5_6_0_0_2 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["fra_Latn"] , 2_5_6_0_5_7 ) def _lowerCAmelCase ( self : Union[str, Any] ) -> str: """simple docstring""" _UpperCAmelCase : Tuple = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0] self.assertListEqual(self.expected_src_tokens , A_ ) def _lowerCAmelCase ( self : Optional[int] ) -> List[str]: """simple docstring""" self.assertIn(A_ , self.tokenizer.all_special_ids ) # fmt: off _UpperCAmelCase : str = [RO_CODE, 4_2_5_4, 9_8_0_6_8, 1_1_2_9_2_3, 3_9_0_7_2, 3_9_0_9, 7_1_3, 1_0_2_7_6_7, 2_6, 1_7_3_1_4, 3_5_6_4_2, 1_4_6_8_3, 3_3_1_1_8, 2_0_2_2, 6_6_9_8_7, 2, 2_5_6_0_4_7] # fmt: on _UpperCAmelCase : Optional[Any] = self.tokenizer.decode(A_ , skip_special_tokens=A_ ) _UpperCAmelCase : Union[str, Any] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=A_ ) self.assertEqual(A_ , A_ ) self.assertNotIn(self.tokenizer.eos_token , A_ ) def _lowerCAmelCase ( self : List[Any] ) -> int: """simple docstring""" _UpperCAmelCase : Union[str, Any] = ["this is gunna be a long sentence " * 2_0] assert isinstance(src_text[0] , A_ ) _UpperCAmelCase : str = 1_0 _UpperCAmelCase : Dict = self.tokenizer(A_ , max_length=A_ , truncation=A_ ).input_ids[0] self.assertEqual(ids[-1] , 2 ) self.assertEqual(ids[0] , A_ ) self.assertEqual(len(A_ ) , A_ ) def _lowerCAmelCase ( self : Any ) -> Tuple: """simple docstring""" self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["<mask>", "ar_AR"] ) , [2_5_6_2_0_3, 3] ) def _lowerCAmelCase ( self : int ) -> Union[str, Any]: """simple docstring""" _UpperCAmelCase : Optional[Any] = tempfile.mkdtemp() _UpperCAmelCase : Optional[int] = self.tokenizer.fairseq_tokens_to_ids self.tokenizer.save_pretrained(A_ ) _UpperCAmelCase : List[str] = NllbTokenizer.from_pretrained(A_ ) self.assertDictEqual(new_tok.fairseq_tokens_to_ids , A_ ) @require_torch def _lowerCAmelCase ( self : Tuple ) -> Optional[int]: """simple docstring""" _UpperCAmelCase : Dict = self.tokenizer( self.src_text , text_target=self.tgt_text , padding=A_ , truncation=A_ , max_length=len(self.expected_src_tokens ) , return_tensors="pt" , ) _UpperCAmelCase : Optional[Any] = shift_tokens_right( batch["labels"] , self.tokenizer.pad_token_id , self.tokenizer.lang_code_to_id["ron_Latn"] ) self.assertIsInstance(A_ , A_ ) self.assertEqual((2, 1_5) , batch.input_ids.shape ) self.assertEqual((2, 1_5) , batch.attention_mask.shape ) _UpperCAmelCase : List[Any] = batch.input_ids.tolist()[0] self.assertListEqual(self.expected_src_tokens , A_ ) self.assertEqual(A_ , batch.decoder_input_ids[0, 0] ) # EOS # Test that special tokens are reset self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] ) self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) def _lowerCAmelCase ( self : Optional[Any] ) -> List[str]: """simple docstring""" _UpperCAmelCase : Any = self.tokenizer(self.src_text , padding=A_ , truncation=A_ , max_length=3 , return_tensors="pt" ) _UpperCAmelCase : List[Any] = self.tokenizer( text_target=self.tgt_text , padding=A_ , truncation=A_ , max_length=1_0 , return_tensors="pt" ) _UpperCAmelCase : str = targets["input_ids"] _UpperCAmelCase : List[Any] = shift_tokens_right( A_ , self.tokenizer.pad_token_id , decoder_start_token_id=self.tokenizer.lang_code_to_id[self.tokenizer.tgt_lang] , ) self.assertEqual(batch.input_ids.shape[1] , 3 ) self.assertEqual(batch.decoder_input_ids.shape[1] , 1_0 ) @require_torch def _lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]: """simple docstring""" _UpperCAmelCase : List[str] = self.tokenizer._build_translation_inputs( "A test" , return_tensors="pt" , src_lang="eng_Latn" , tgt_lang="fra_Latn" ) self.assertEqual( nested_simplify(A_ ) , { # A, test, EOS, en_XX "input_ids": [[2_5_6_0_4_7, 7_0, 7_3_5_6, 2]], "attention_mask": [[1, 1, 1, 1]], # ar_AR "forced_bos_token_id": 2_5_6_0_5_7, } , ) @require_torch def _lowerCAmelCase ( self : Optional[int] ) -> Optional[int]: """simple docstring""" _UpperCAmelCase : Any = True _UpperCAmelCase : List[Any] = self.tokenizer( "UN Chief says there is no military solution in Syria" , src_lang="eng_Latn" , tgt_lang="fra_Latn" ) self.assertEqual( inputs.input_ids , [1_6_2_9_7, 1_3_4_4_0_8, 2_5_6_5_3, 6_3_7_0, 2_4_8, 2_5_4, 1_0_3_9_2_9, 9_4_9_9_5, 1_0_8, 4_9_4_8_6, 2, 2_5_6_0_4_7] ) _UpperCAmelCase : Optional[Any] = False _UpperCAmelCase : List[Any] = self.tokenizer( "UN Chief says there is no military solution in Syria" , src_lang="eng_Latn" , tgt_lang="fra_Latn" ) self.assertEqual( inputs.input_ids , [2_5_6_0_4_7, 1_6_2_9_7, 1_3_4_4_0_8, 2_5_6_5_3, 6_3_7_0, 2_4_8, 2_5_4, 1_0_3_9_2_9, 9_4_9_9_5, 1_0_8, 4_9_4_8_6, 2] )
145
import warnings from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast from ...onnx.utils import compute_effective_axis_dimension from ...utils import TensorType, is_torch_available, logging __lowerCamelCase : Union[str, Any] = logging.get_logger(__name__) __lowerCamelCase : Any = { """facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/config.json""", # See all BART models at https://huggingface.co/models?filter=bart } class A__ ( __snake_case ): _UpperCAmelCase :Dict = 'bart' _UpperCAmelCase :str = ['past_key_values'] _UpperCAmelCase :Any = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'} def __init__( self , A_=5_0265 , A_=1024 , A_=12 , A_=4096 , A_=16 , A_=12 , A_=4096 , A_=16 , A_=0.0 , A_=0.0 , A_="gelu" , A_=1024 , A_=0.1 , A_=0.0 , A_=0.0 , A_=0.02 , A_=0.0 , A_=False , A_=True , A_=3 , A_=1 , A_=0 , A_=2 , A_=True , A_=2 , A_=2 , **A_ , ): '''simple docstring''' UpperCamelCase : int = vocab_size UpperCamelCase : List[Any] = max_position_embeddings UpperCamelCase : Any = d_model UpperCamelCase : Optional[Any] = encoder_ffn_dim UpperCamelCase : List[Any] = encoder_layers UpperCamelCase : int = encoder_attention_heads UpperCamelCase : Optional[int] = decoder_ffn_dim UpperCamelCase : List[str] = decoder_layers UpperCamelCase : Optional[int] = decoder_attention_heads UpperCamelCase : int = dropout UpperCamelCase : int = attention_dropout UpperCamelCase : Tuple = activation_dropout UpperCamelCase : Tuple = activation_function UpperCamelCase : int = init_std UpperCamelCase : List[Any] = encoder_layerdrop UpperCamelCase : List[str] = decoder_layerdrop UpperCamelCase : Dict = classifier_dropout UpperCamelCase : Optional[int] = use_cache UpperCamelCase : List[Any] = encoder_layers UpperCamelCase : int = scale_embedding # scale factor will be sqrt(d_model) if True super().__init__( num_labels=A_ , pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , is_encoder_decoder=A_ , decoder_start_token_id=A_ , forced_eos_token_id=A_ , **A_ , ) # ensure backward compatibility for BART CNN models if self.forced_bos_token_id is None and kwargs.get("force_bos_token_to_be_generated" , A_ ): UpperCamelCase : int = self.bos_token_id warnings.warn( F"""Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. """ "The config can simply be saved and uploaded again to be fixed." ) class A__ ( __snake_case ): @property def __UpperCamelCase( self ): '''simple docstring''' if self.task in ["default", "seq2seq-lm"]: UpperCamelCase : Optional[int] = OrderedDict( [ ("input_ids", {0: "batch", 1: "encoder_sequence"}), ("attention_mask", {0: "batch", 1: "encoder_sequence"}), ] ) if self.use_past: UpperCamelCase : List[str] = {0: "batch"} UpperCamelCase : Dict = {0: "batch", 1: "past_decoder_sequence + sequence"} else: UpperCamelCase : Dict = {0: "batch", 1: "decoder_sequence"} UpperCamelCase : Union[str, Any] = {0: "batch", 1: "decoder_sequence"} if self.use_past: self.fill_with_past_key_values_(A_ , direction="inputs" ) elif self.task == "causal-lm": # TODO: figure this case out. UpperCamelCase : Any = OrderedDict( [ ("input_ids", {0: "batch", 1: "encoder_sequence"}), ("attention_mask", {0: "batch", 1: "encoder_sequence"}), ] ) if self.use_past: UpperCamelCase , UpperCamelCase : Optional[int] = self.num_layers for i in range(A_ ): UpperCamelCase : Optional[Any] = {0: "batch", 2: "past_sequence + sequence"} UpperCamelCase : Union[str, Any] = {0: "batch", 2: "past_sequence + sequence"} else: UpperCamelCase : Optional[Any] = OrderedDict( [ ("input_ids", {0: "batch", 1: "encoder_sequence"}), ("attention_mask", {0: "batch", 1: "encoder_sequence"}), ("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}), ("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}), ] ) return common_inputs @property def __UpperCamelCase( self ): '''simple docstring''' if self.task in ["default", "seq2seq-lm"]: UpperCamelCase : Tuple = super().outputs else: UpperCamelCase : Dict = super(A_ , self ).outputs if self.use_past: UpperCamelCase , UpperCamelCase : int = self.num_layers for i in range(A_ ): UpperCamelCase : int = {0: "batch", 2: "past_sequence + sequence"} UpperCamelCase : Tuple = {0: "batch", 2: "past_sequence + sequence"} return common_outputs def __UpperCamelCase( self , A_ , A_ = -1 , A_ = -1 , A_ = False , A_ = None , ): '''simple docstring''' UpperCamelCase : List[Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( A_ , A_ , A_ , A_ , A_ ) # Generate decoder inputs UpperCamelCase : List[Any] = seq_length if not self.use_past else 1 UpperCamelCase : Tuple = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( A_ , A_ , A_ , A_ , A_ ) UpperCamelCase : Optional[int] = {F"""decoder_{name}""": tensor for name, tensor in decoder_inputs.items()} UpperCamelCase : List[Any] = dict(**A_ , **A_ ) if self.use_past: if not is_torch_available(): raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." ) else: import torch UpperCamelCase , UpperCamelCase : Optional[Any] = common_inputs["input_ids"].shape UpperCamelCase : List[Any] = common_inputs["decoder_input_ids"].shape[1] UpperCamelCase , UpperCamelCase : List[str] = self.num_attention_heads UpperCamelCase : int = ( batch, num_encoder_attention_heads, encoder_seq_length, self._config.hidden_size // num_encoder_attention_heads, ) UpperCamelCase : List[Any] = decoder_seq_length + 3 UpperCamelCase : str = ( batch, num_decoder_attention_heads, decoder_past_length, self._config.hidden_size // num_decoder_attention_heads, ) UpperCamelCase : int = torch.cat( [common_inputs["decoder_attention_mask"], torch.ones(A_ , A_ )] , dim=1 ) UpperCamelCase : int = [] # If the number of encoder and decoder layers are present in the model configuration, both are considered UpperCamelCase , UpperCamelCase : Union[str, Any] = self.num_layers UpperCamelCase : Any = min(A_ , A_ ) UpperCamelCase : List[str] = max(A_ , A_ ) - min_num_layers UpperCamelCase : Dict = "encoder" if num_encoder_layers > num_decoder_layers else "decoder" for _ in range(A_ ): common_inputs["past_key_values"].append( ( torch.zeros(A_ ), torch.zeros(A_ ), torch.zeros(A_ ), torch.zeros(A_ ), ) ) # TODO: test this. UpperCamelCase : Optional[Any] = encoder_shape if remaining_side_name == "encoder" else decoder_shape for _ in range(A_ , A_ ): common_inputs["past_key_values"].append((torch.zeros(A_ ), torch.zeros(A_ )) ) return common_inputs def __UpperCamelCase( self , A_ , A_ = -1 , A_ = -1 , A_ = False , A_ = None , ): '''simple docstring''' UpperCamelCase : int = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( A_ , A_ , A_ , A_ , A_ ) if self.use_past: if not is_torch_available(): raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." ) else: import torch UpperCamelCase , UpperCamelCase : Union[str, Any] = common_inputs["input_ids"].shape # Not using the same length for past_key_values UpperCamelCase : Optional[Any] = seqlen + 2 UpperCamelCase , UpperCamelCase : List[Any] = self.num_layers UpperCamelCase , UpperCamelCase : Optional[int] = self.num_attention_heads UpperCamelCase : str = ( batch, num_encoder_attention_heads, past_key_values_length, self._config.hidden_size // num_encoder_attention_heads, ) UpperCamelCase : Optional[Any] = common_inputs["attention_mask"].dtype UpperCamelCase : int = torch.cat( [common_inputs["attention_mask"], torch.ones(A_ , A_ , dtype=A_ )] , dim=1 ) UpperCamelCase : Optional[Any] = [ (torch.zeros(A_ ), torch.zeros(A_ )) for _ in range(A_ ) ] return common_inputs def __UpperCamelCase( self , A_ , A_ = -1 , A_ = -1 , A_ = False , A_ = None , ): '''simple docstring''' UpperCamelCase : Optional[Any] = compute_effective_axis_dimension( A_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX UpperCamelCase : Union[str, Any] = tokenizer.num_special_tokens_to_add(A_ ) UpperCamelCase : int = compute_effective_axis_dimension( A_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=A_ ) # Generate dummy inputs according to compute batch and sequence UpperCamelCase : int = [" ".join([tokenizer.unk_token] ) * seq_length] * batch_size UpperCamelCase : Dict = dict(tokenizer(A_ , return_tensors=A_ ) ) return common_inputs def __UpperCamelCase( self , A_ , A_ = -1 , A_ = -1 , A_ = False , A_ = None , ): '''simple docstring''' if self.task in ["default", "seq2seq-lm"]: UpperCamelCase : Optional[int] = self._generate_dummy_inputs_for_default_and_seqaseq_lm( A_ , batch_size=A_ , seq_length=A_ , is_pair=A_ , framework=A_ ) elif self.task == "causal-lm": UpperCamelCase : List[str] = self._generate_dummy_inputs_for_causal_lm( A_ , batch_size=A_ , seq_length=A_ , is_pair=A_ , framework=A_ ) else: UpperCamelCase : List[str] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( A_ , batch_size=A_ , seq_length=A_ , is_pair=A_ , framework=A_ ) return common_inputs def __UpperCamelCase( self , A_ , A_ , A_ , A_ ): '''simple docstring''' if self.task in ["default", "seq2seq-lm"]: UpperCamelCase : Optional[Any] = super()._flatten_past_key_values_(A_ , A_ , A_ , A_ ) else: UpperCamelCase : Optional[Any] = super(A_ , self )._flatten_past_key_values_( A_ , A_ , A_ , A_ )
52
0
"""simple docstring""" import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( SwiftFormerConfig, SwiftFormerForImageClassification, ViTImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ = torch.device("cpu") def lowerCAmelCase__ ( ) -> Optional[int]: """simple docstring""" snake_case = "http://images.cocodataset.org/val2017/000000039769.jpg" snake_case = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw ) return im def lowerCAmelCase__ ( _UpperCamelCase : List[Any] ) -> str: """simple docstring""" if swiftformer_name == "swiftformer_xs": return torch.tensor([-2.1_7_0_3e0_0, 2.1_1_0_7e0_0, -2.0_8_1_1e0_0, 8.8_6_8_5e-0_1, 2.4_3_6_0e-0_1] ) elif swiftformer_name == "swiftformer_s": return torch.tensor([3.9_6_3_6e-0_1, 2.3_4_7_8e-0_1, -1.6_9_6_3e0_0, -1.7_3_8_1e0_0, -8.6_3_3_7e-0_1] ) elif swiftformer_name == "swiftformer_l1": return torch.tensor([-4.2_7_6_8e-0_1, -4.7_4_2_9e-0_1, -1.0_8_9_7e0_0, -1.0_2_4_8e0_0, 3.5_5_2_3e-0_2] ) elif swiftformer_name == "swiftformer_l3": return torch.tensor([-2.5_3_3_0e-0_1, 2.4_2_1_1e-0_1, -6.0_1_8_5e-0_1, -8.2_7_8_9e-0_1, -6.0_4_4_6e-0_2] ) def lowerCAmelCase__ ( _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Optional[int] , _UpperCamelCase : List[str] ) -> List[Any]: """simple docstring""" snake_case = dct.pop(_lowerCAmelCase ) snake_case = val def lowerCAmelCase__ ( _UpperCamelCase : Optional[int] ) -> List[Any]: """simple docstring""" snake_case = [] for k in state_dict.keys(): snake_case = k if ".pwconv" in k: snake_case = k_new.replace('.pwconv' , '.point_wise_conv' ) if ".dwconv" in k: snake_case = k_new.replace('.dwconv' , '.depth_wise_conv' ) if ".Proj." in k: snake_case = k_new.replace('.Proj.' , '.proj.' ) if "patch_embed" in k_new: snake_case = k_new.replace('patch_embed' , 'swiftformer.patch_embed.patch_embedding' ) if "network" in k_new: snake_case = k_new.split('.' ) if ls[2].isdigit(): snake_case = "swiftformer.encoder.network." + ls[1] + ".blocks." + ls[2] + "." + ".".join(ls[3:] ) else: snake_case = k_new.replace('network' , 'swiftformer.encoder.network' ) rename_keys.append((k, k_new) ) return rename_keys @torch.no_grad() def lowerCAmelCase__ ( _UpperCamelCase : Optional[Any] , _UpperCamelCase : int , _UpperCamelCase : int ) -> List[Any]: """simple docstring""" snake_case = SwiftFormerConfig() # dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size snake_case = 1_0_0_0 snake_case = "huggingface/label-files" snake_case = "imagenet-1k-id2label.json" snake_case = json.load(open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type='dataset' ) , 'r' ) ) snake_case = {int(_lowerCAmelCase ): v for k, v in idalabel.items()} snake_case = idalabel snake_case = {v: k for k, v in idalabel.items()} # size of the architecture if swiftformer_name == "swiftformer_xs": snake_case = [3, 3, 6, 4] snake_case = [4_8, 5_6, 1_1_2, 2_2_0] elif swiftformer_name == "swiftformer_s": snake_case = [3, 3, 9, 6] snake_case = [4_8, 6_4, 1_6_8, 2_2_4] elif swiftformer_name == "swiftformer_l1": snake_case = [4, 3, 1_0, 5] snake_case = [4_8, 9_6, 1_9_2, 3_8_4] elif swiftformer_name == "swiftformer_l3": snake_case = [4, 4, 1_2, 6] snake_case = [6_4, 1_2_8, 3_2_0, 5_1_2] # load state_dict of original model, remove and rename some keys if original_ckpt: if original_ckpt.startswith('https' ): snake_case = torch.hub.load_state_dict_from_url(_lowerCAmelCase , map_location='cpu' , check_hash=_lowerCAmelCase ) else: snake_case = torch.load(_lowerCAmelCase , map_location='cpu' ) snake_case = checkpoint snake_case = create_rename_keys(_lowerCAmelCase ) for rename_key_src, rename_key_dest in rename_keys: rename_key(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) # load HuggingFace model snake_case = SwiftFormerForImageClassification(_lowerCAmelCase ).eval() hf_model.load_state_dict(_lowerCAmelCase ) # prepare test inputs snake_case = prepare_img() snake_case = ViTImageProcessor.from_pretrained('preprocessor_config' ) snake_case = processor(images=_lowerCAmelCase , return_tensors='pt' ) # compare outputs from both models snake_case = get_expected_output(_lowerCAmelCase ) snake_case = hf_model(inputs['pixel_values'] ).logits assert hf_logits.shape == torch.Size([1, 1_0_0_0] ) assert torch.allclose(hf_logits[0, 0:5] , _lowerCAmelCase , atol=1e-3 ) Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase ) print(f"""Saving model {swiftformer_name} to {pytorch_dump_folder_path}""" ) hf_model.save_pretrained(_lowerCAmelCase ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser() # Required parameters parser.add_argument( "--swiftformer_name", default="swiftformer_xs", choices=["swiftformer_xs", "swiftformer_s", "swiftformer_l1", "swiftformer_l3"], type=str, help="Name of the SwiftFormer model you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default="./converted_outputs/", type=str, help="Path to the output PyTorch model directory.", ) parser.add_argument("--original_ckpt", default=None, type=str, help="Path to the original model checkpoint.") SCREAMING_SNAKE_CASE__ = parser.parse_args() convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
150
from math import sqrt def A_ ( _lowerCAmelCase ) -> bool: assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and ( number >= 0 ), "'number' must been an int and positive" UpperCamelCase : List[Any] = True # 0 and 1 are none primes. if number <= 1: UpperCamelCase : List[Any] = False for divisor in range(2 , int(round(sqrt(_lowerCAmelCase ) ) ) + 1 ): # if 'number' divisible by 'divisor' then sets 'status' # of false and break up the loop. if number % divisor == 0: UpperCamelCase : Union[str, Any] = False break # precondition assert isinstance(_lowerCAmelCase , _lowerCAmelCase ), "'status' must been from type bool" return status def A_ ( _lowerCAmelCase ) -> Any: assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (n > 2), "'N' must been an int and > 2" # beginList: contains all natural numbers from 2 up to N UpperCamelCase : int = list(range(2 , n + 1 ) ) UpperCamelCase : Optional[int] = [] # this list will be returns. # actual sieve of erathostenes for i in range(len(_lowerCAmelCase ) ): for j in range(i + 1 , len(_lowerCAmelCase ) ): if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0): UpperCamelCase : Tuple = 0 # filters actual prime numbers. UpperCamelCase : str = [x for x in begin_list if x != 0] # precondition assert isinstance(_lowerCAmelCase , _lowerCAmelCase ), "'ans' must been from type list" return ans def A_ ( _lowerCAmelCase ) -> Optional[Any]: assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (n > 2), "'N' must been an int and > 2" UpperCamelCase : str = [] # iterates over all numbers between 2 up to N+1 # if a number is prime then appends to list 'ans' for number in range(2 , n + 1 ): if is_prime(_lowerCAmelCase ): ans.append(_lowerCAmelCase ) # precondition assert isinstance(_lowerCAmelCase , _lowerCAmelCase ), "'ans' must been from type list" return ans def A_ ( _lowerCAmelCase ) -> Any: assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and number >= 0, "'number' must been an int and >= 0" UpperCamelCase : Optional[Any] = [] # this list will be returns of the function. # potential prime number factors. UpperCamelCase : Tuple = 2 UpperCamelCase : str = number if number == 0 or number == 1: ans.append(_lowerCAmelCase ) # if 'number' not prime then builds the prime factorization of 'number' elif not is_prime(_lowerCAmelCase ): while quotient != 1: if is_prime(_lowerCAmelCase ) and (quotient % factor == 0): ans.append(_lowerCAmelCase ) quotient /= factor else: factor += 1 else: ans.append(_lowerCAmelCase ) # precondition assert isinstance(_lowerCAmelCase , _lowerCAmelCase ), "'ans' must been from type list" return ans def A_ ( _lowerCAmelCase ) -> Any: assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and ( number >= 0 ), "'number' bust been an int and >= 0" UpperCamelCase : List[Any] = 0 # prime factorization of 'number' UpperCamelCase : Any = prime_factorization(_lowerCAmelCase ) UpperCamelCase : List[Any] = max(_lowerCAmelCase ) # precondition assert isinstance(_lowerCAmelCase , _lowerCAmelCase ), "'ans' must been from type int" return ans def A_ ( _lowerCAmelCase ) -> Union[str, Any]: assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and ( number >= 0 ), "'number' bust been an int and >= 0" UpperCamelCase : List[Any] = 0 # prime factorization of 'number' UpperCamelCase : Dict = prime_factorization(_lowerCAmelCase ) UpperCamelCase : List[Any] = min(_lowerCAmelCase ) # precondition assert isinstance(_lowerCAmelCase , _lowerCAmelCase ), "'ans' must been from type int" return ans def A_ ( _lowerCAmelCase ) -> Optional[Any]: assert isinstance(_lowerCAmelCase , _lowerCAmelCase ), "'number' must been an int" assert isinstance(number % 2 == 0 , _lowerCAmelCase ), "compare bust been from type bool" return number % 2 == 0 def A_ ( _lowerCAmelCase ) -> List[Any]: assert isinstance(_lowerCAmelCase , _lowerCAmelCase ), "'number' must been an int" assert isinstance(number % 2 != 0 , _lowerCAmelCase ), "compare bust been from type bool" return number % 2 != 0 def A_ ( _lowerCAmelCase ) -> Any: assert ( isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (number > 2) and is_even(_lowerCAmelCase ) ), "'number' must been an int, even and > 2" UpperCamelCase : List[str] = [] # this list will returned # creates a list of prime numbers between 2 up to 'number' UpperCamelCase : Dict = get_prime_numbers(_lowerCAmelCase ) UpperCamelCase : Tuple = len(_lowerCAmelCase ) # run variable for while-loops. UpperCamelCase : Optional[int] = 0 UpperCamelCase : int = None # exit variable. for break up the loops UpperCamelCase : Union[str, Any] = True while i < len_pn and loop: UpperCamelCase : Tuple = i + 1 while j < len_pn and loop: if prime_numbers[i] + prime_numbers[j] == number: UpperCamelCase : Any = False ans.append(prime_numbers[i] ) ans.append(prime_numbers[j] ) j += 1 i += 1 # precondition assert ( isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (len(_lowerCAmelCase ) == 2) and (ans[0] + ans[1] == number) and is_prime(ans[0] ) and is_prime(ans[1] ) ), "'ans' must contains two primes. And sum of elements must been eq 'number'" return ans def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> str: assert ( isinstance(_lowerCAmelCase , _lowerCAmelCase ) and isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (numbera >= 0) and (numbera >= 0) ), "'number1' and 'number2' must been positive integer." UpperCamelCase : Tuple = 0 while numbera != 0: UpperCamelCase : Tuple = numbera % numbera UpperCamelCase : Any = numbera UpperCamelCase : Union[str, Any] = rest # precondition assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and ( numbera >= 0 ), "'number' must been from type int and positive" return numbera def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> int: assert ( isinstance(_lowerCAmelCase , _lowerCAmelCase ) and isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (numbera >= 1) and (numbera >= 1) ), "'number1' and 'number2' must been positive integer." UpperCamelCase : Optional[int] = 1 # actual answer that will be return. # for kgV (x,1) if numbera > 1 and numbera > 1: # builds the prime factorization of 'number1' and 'number2' UpperCamelCase : List[Any] = prime_factorization(_lowerCAmelCase ) UpperCamelCase : Union[str, Any] = prime_factorization(_lowerCAmelCase ) elif numbera == 1 or numbera == 1: UpperCamelCase : Optional[Any] = [] UpperCamelCase : int = [] UpperCamelCase : List[Any] = max(_lowerCAmelCase , _lowerCAmelCase ) UpperCamelCase : Optional[int] = 0 UpperCamelCase : Tuple = 0 UpperCamelCase : List[str] = [] # captured numbers int both 'primeFac1' and 'primeFac2' # iterates through primeFac1 for n in prime_fac_a: if n not in done: if n in prime_fac_a: UpperCamelCase : str = prime_fac_a.count(_lowerCAmelCase ) UpperCamelCase : Tuple = prime_fac_a.count(_lowerCAmelCase ) for _ in range(max(_lowerCAmelCase , _lowerCAmelCase ) ): ans *= n else: UpperCamelCase : str = prime_fac_a.count(_lowerCAmelCase ) for _ in range(_lowerCAmelCase ): ans *= n done.append(_lowerCAmelCase ) # iterates through primeFac2 for n in prime_fac_a: if n not in done: UpperCamelCase : Any = prime_fac_a.count(_lowerCAmelCase ) for _ in range(_lowerCAmelCase ): ans *= n done.append(_lowerCAmelCase ) # precondition assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and ( ans >= 0 ), "'ans' must been from type int and positive" return ans def A_ ( _lowerCAmelCase ) -> Tuple: assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (n >= 0), "'number' must been a positive int" UpperCamelCase : int = 0 UpperCamelCase : int = 2 # this variable holds the answer while index < n: index += 1 ans += 1 # counts to the next number # if ans not prime then # runs to the next prime number. while not is_prime(_lowerCAmelCase ): ans += 1 # precondition assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and is_prime( _lowerCAmelCase ), "'ans' must been a prime number and from type int" return ans def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> int: assert ( is_prime(_lowerCAmelCase ) and is_prime(_lowerCAmelCase ) and (p_number_a < p_number_a) ), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'" UpperCamelCase : str = p_number_a + 1 # jump to the next number UpperCamelCase : Dict = [] # this list will be returns. # if number is not prime then # fetch the next prime number. while not is_prime(_lowerCAmelCase ): number += 1 while number < p_number_a: ans.append(_lowerCAmelCase ) number += 1 # fetch the next prime number. while not is_prime(_lowerCAmelCase ): number += 1 # precondition assert ( isinstance(_lowerCAmelCase , _lowerCAmelCase ) and ans[0] != p_number_a and ans[len(_lowerCAmelCase ) - 1] != p_number_a ), "'ans' must been a list without the arguments" # 'ans' contains not 'pNumber1' and 'pNumber2' ! return ans def A_ ( _lowerCAmelCase ) -> List[str]: assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (n >= 1), "'n' must been int and >= 1" UpperCamelCase : Dict = [] # will be returned. for divisor in range(1 , n + 1 ): if n % divisor == 0: ans.append(_lowerCAmelCase ) # precondition assert ans[0] == 1 and ans[len(_lowerCAmelCase ) - 1] == n, "Error in function getDivisiors(...)" return ans def A_ ( _lowerCAmelCase ) -> int: assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and ( number > 1 ), "'number' must been an int and >= 1" UpperCamelCase : int = get_divisors(_lowerCAmelCase ) # precondition assert ( isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (divisors[0] == 1) and (divisors[len(_lowerCAmelCase ) - 1] == number) ), "Error in help-function getDivisiors(...)" # summed all divisors up to 'number' (exclusive), hence [:-1] return sum(divisors[:-1] ) == number def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]: assert ( isinstance(_lowerCAmelCase , _lowerCAmelCase ) and isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (denominator != 0) ), "The arguments must been from type int and 'denominator' != 0" # build the greatest common divisor of numerator and denominator. UpperCamelCase : List[str] = gcd(abs(_lowerCAmelCase ) , abs(_lowerCAmelCase ) ) # precondition assert ( isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (numerator % gcd_of_fraction == 0) and (denominator % gcd_of_fraction == 0) ), "Error in function gcd(...,...)" return (numerator // gcd_of_fraction, denominator // gcd_of_fraction) def A_ ( _lowerCAmelCase ) -> Dict: assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (n >= 0), "'n' must been a int and >= 0" UpperCamelCase : str = 1 # this will be return. for factor in range(1 , n + 1 ): ans *= factor return ans def A_ ( _lowerCAmelCase ) -> Tuple: assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (n >= 0), "'n' must been an int and >= 0" UpperCamelCase : Dict = 0 UpperCamelCase : Dict = 1 UpperCamelCase : Union[str, Any] = 1 # this will be return for _ in range(n - 1 ): UpperCamelCase : Any = ans ans += fiba UpperCamelCase : str = tmp return ans
52
0
'''simple docstring''' import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, DDIMScheduler, LDMTextToImagePipeline, UNetaDConditionModel from diffusers.utils.testing_utils import ( enable_full_determinism, load_numpy, nightly, require_torch_gpu, slow, torch_device, ) from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class __lowerCAmelCase ( __snake_case , unittest.TestCase ): """simple docstring""" _snake_case : Dict = LDMTextToImagePipeline _snake_case : int = TEXT_TO_IMAGE_PARAMS - { 'negative_prompt', 'negative_prompt_embeds', 'cross_attention_kwargs', 'prompt_embeds', } _snake_case : Tuple = PipelineTesterMixin.required_optional_params - { 'num_images_per_prompt', 'callback', 'callback_steps', } _snake_case : Union[str, Any] = TEXT_TO_IMAGE_BATCH_PARAMS _snake_case : Optional[Any] = False def snake_case__ ( self : str ) -> Optional[int]: '''simple docstring''' torch.manual_seed(0 ) _UpperCamelCase = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , ) _UpperCamelCase = DDIMScheduler( beta_start=0.00085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=A_ , set_alpha_to_one=A_ , ) torch.manual_seed(0 ) _UpperCamelCase = AutoencoderKL( block_out_channels=(32, 64) , in_channels=3 , out_channels=3 , down_block_types=('''DownEncoderBlock2D''', '''DownEncoderBlock2D''') , up_block_types=('''UpDecoderBlock2D''', '''UpDecoderBlock2D''') , latent_channels=4 , ) torch.manual_seed(0 ) _UpperCamelCase = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) _UpperCamelCase = CLIPTextModel(A_ ) _UpperCamelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) _UpperCamelCase = { "unet": unet, "scheduler": scheduler, "vqvae": vae, "bert": text_encoder, "tokenizer": tokenizer, } return components def snake_case__ ( self : Tuple , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : int=0 ) -> Dict: '''simple docstring''' if str(A_ ).startswith('''mps''' ): _UpperCamelCase = torch.manual_seed(A_ ) else: _UpperCamelCase = torch.Generator(device=A_ ).manual_seed(A_ ) _UpperCamelCase = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "numpy", } return inputs def snake_case__ ( self : int ) -> List[str]: '''simple docstring''' _UpperCamelCase = "cpu" # ensure determinism for the device-dependent torch.Generator _UpperCamelCase = self.get_dummy_components() _UpperCamelCase = LDMTextToImagePipeline(**A_ ) pipe.to(A_ ) pipe.set_progress_bar_config(disable=A_ ) _UpperCamelCase = self.get_dummy_inputs(A_ ) _UpperCamelCase = pipe(**A_ ).images _UpperCamelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 16, 16, 3) _UpperCamelCase = np.array([0.6101, 0.6156, 0.5622, 0.4895, 0.6661, 0.3804, 0.5748, 0.6136, 0.5014] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 @slow @require_torch_gpu class __lowerCAmelCase ( unittest.TestCase ): """simple docstring""" def snake_case__ ( self : Union[str, Any] ) -> Tuple: '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def snake_case__ ( self : List[str] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Any=torch.floataa , lowerCAmelCase__ : int=0 ) -> Any: '''simple docstring''' _UpperCamelCase = torch.manual_seed(A_ ) _UpperCamelCase = np.random.RandomState(A_ ).standard_normal((1, 4, 32, 32) ) _UpperCamelCase = torch.from_numpy(A_ ).to(device=A_ , dtype=A_ ) _UpperCamelCase = { "prompt": "A painting of a squirrel eating a burger", "latents": latents, "generator": generator, "num_inference_steps": 3, "guidance_scale": 6.0, "output_type": "numpy", } return inputs def snake_case__ ( self : Any ) -> int: '''simple docstring''' _UpperCamelCase = LDMTextToImagePipeline.from_pretrained('''CompVis/ldm-text2im-large-256''' ).to(A_ ) pipe.set_progress_bar_config(disable=A_ ) _UpperCamelCase = self.get_inputs(A_ ) _UpperCamelCase = pipe(**A_ ).images _UpperCamelCase = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 256, 256, 3) _UpperCamelCase = np.array([0.51825, 0.52850, 0.52543, 0.54258, 0.52304, 0.52569, 0.54363, 0.55276, 0.56878] ) _UpperCamelCase = np.abs(expected_slice - image_slice ).max() assert max_diff < 1e-3 @nightly @require_torch_gpu class __lowerCAmelCase ( unittest.TestCase ): """simple docstring""" def snake_case__ ( self : str ) -> Optional[int]: '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def snake_case__ ( self : List[Any] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Optional[int]=torch.floataa , lowerCAmelCase__ : Optional[int]=0 ) -> List[str]: '''simple docstring''' _UpperCamelCase = torch.manual_seed(A_ ) _UpperCamelCase = np.random.RandomState(A_ ).standard_normal((1, 4, 32, 32) ) _UpperCamelCase = torch.from_numpy(A_ ).to(device=A_ , dtype=A_ ) _UpperCamelCase = { "prompt": "A painting of a squirrel eating a burger", "latents": latents, "generator": generator, "num_inference_steps": 50, "guidance_scale": 6.0, "output_type": "numpy", } return inputs def snake_case__ ( self : Dict ) -> List[str]: '''simple docstring''' _UpperCamelCase = LDMTextToImagePipeline.from_pretrained('''CompVis/ldm-text2im-large-256''' ).to(A_ ) pipe.set_progress_bar_config(disable=A_ ) _UpperCamelCase = self.get_inputs(A_ ) _UpperCamelCase = pipe(**A_ ).images[0] _UpperCamelCase = load_numpy( '''https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/ldm_text2img/ldm_large_256_ddim.npy''' ) _UpperCamelCase = np.abs(expected_image - image ).max() assert max_diff < 1e-3
324
import inspect import re from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_config_docstrings.py __lowerCamelCase : str = """src/transformers""" # This is to make sure the transformers module imported is the one in the repo. __lowerCamelCase : Tuple = direct_transformers_import(PATH_TO_TRANSFORMERS) __lowerCamelCase : List[str] = transformers.models.auto.configuration_auto.CONFIG_MAPPING # Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`. # For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)` __lowerCamelCase : Optional[Any] = re.compile(r"""\[(.+?)\]\((https://huggingface\.co/.+?)\)""") __lowerCamelCase : List[str] = { """DecisionTransformerConfig""", """EncoderDecoderConfig""", """MusicgenConfig""", """RagConfig""", """SpeechEncoderDecoderConfig""", """TimmBackboneConfig""", """VisionEncoderDecoderConfig""", """VisionTextDualEncoderConfig""", """LlamaConfig""", } def A_ ( _lowerCAmelCase ) -> List[str]: UpperCamelCase : Optional[Any] = None # source code of `config_class` UpperCamelCase : Tuple = inspect.getsource(_lowerCAmelCase ) UpperCamelCase : Optional[Any] = _re_checkpoint.findall(_lowerCAmelCase ) # Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link. # For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')` for ckpt_name, ckpt_link in checkpoints: # allow the link to end with `/` if ckpt_link.endswith("/" ): UpperCamelCase : Dict = ckpt_link[:-1] # verify the checkpoint name corresponds to the checkpoint link UpperCamelCase : Any = F"""https://huggingface.co/{ckpt_name}""" if ckpt_link == ckpt_link_from_name: UpperCamelCase : List[Any] = ckpt_name break return checkpoint def A_ ( ) -> List[str]: UpperCamelCase : Optional[int] = [] for config_class in list(CONFIG_MAPPING.values() ): # Skip deprecated models if "models.deprecated" in config_class.__module__: continue UpperCamelCase : Union[str, Any] = get_checkpoint_from_config_class(_lowerCAmelCase ) UpperCamelCase : Optional[int] = config_class.__name__ if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK: configs_without_checkpoint.append(_lowerCAmelCase ) if len(_lowerCAmelCase ) > 0: UpperCamelCase : Any = "\n".join(sorted(_lowerCAmelCase ) ) raise ValueError(F"""The following configurations don't contain any valid checkpoint:\n{message}""" ) if __name__ == "__main__": check_config_docstrings_have_checkpoints()
52
0
def UpperCamelCase__( UpperCamelCase__ : Any )->str: A__ = "" for ch in key: if ch == " " or ch not in key_no_dups and ch.isalpha(): key_no_dups += ch return key_no_dups def UpperCamelCase__( UpperCamelCase__ : int )->dict[str, str]: A__ = [chr(i + 65 ) for i in range(26 )] # Remove duplicate characters from key A__ = remove_duplicates(key.upper() ) A__ = len(_lowerCAmelCase ) # First fill cipher with key characters A__ = {alphabet[i]: char for i, char in enumerate(_lowerCAmelCase )} # Then map remaining characters in alphabet to # the alphabet from the beginning for i in range(len(_lowerCAmelCase ) , 26 ): A__ = alphabet[i - offset] # Ensure we are not mapping letters to letters previously mapped while char in key: offset -= 1 A__ = alphabet[i - offset] A__ = char return cipher_alphabet def UpperCamelCase__( UpperCamelCase__ : Any , UpperCamelCase__ : List[str] )->str: return "".join(cipher_map.get(_lowerCAmelCase , _lowerCAmelCase ) for ch in message.upper() ) def UpperCamelCase__( UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[Any] )->str: A__ = {v: k for k, v in cipher_map.items()} return "".join(rev_cipher_map.get(_lowerCAmelCase , _lowerCAmelCase ) for ch in message.upper() ) def UpperCamelCase__( )->None: A__ = input('''Enter message to encode or decode: ''' ).strip() A__ = input('''Enter keyword: ''' ).strip() A__ = input('''Encipher or decipher? E/D:''' ).strip()[0].lower() try: A__ = {"e": encipher, "d": decipher}[option] except KeyError: raise KeyError('''invalid input option''' ) A__ = create_cipher_map(_lowerCAmelCase ) print(func(_lowerCAmelCase , _lowerCAmelCase ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
193
from __future__ import annotations from functools import lru_cache from math import ceil __lowerCamelCase : str = 100 __lowerCamelCase : Any = set(range(3, NUM_PRIMES, 2)) primes.add(2) __lowerCamelCase : int for prime in range(3, ceil(NUM_PRIMES**0.5), 2): if prime not in primes: continue primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime))) @lru_cache(maxsize=100 ) def A_ ( _lowerCAmelCase ) -> set[int]: if number_to_partition < 0: return set() elif number_to_partition == 0: return {1} UpperCamelCase : set[int] = set() UpperCamelCase : int UpperCamelCase : int for prime in primes: if prime > number_to_partition: continue for sub in partition(number_to_partition - prime ): ret.add(sub * prime ) return ret def A_ ( _lowerCAmelCase = 5000 ) -> int | None: for number_to_partition in range(1 , _lowerCAmelCase ): if len(partition(_lowerCAmelCase ) ) > number_unique_partitions: return number_to_partition return None if __name__ == "__main__": print(f"""{solution() = }""")
52
0
# coding=utf-8 # Copyright 2020 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # this script dumps information about the environment import os import sys import transformers A : str = """3""" print('Python version:', sys.version) print('transformers version:', transformers.__version__) try: import torch print('Torch version:', torch.__version__) print('Cuda available:', torch.cuda.is_available()) print('Cuda version:', torch.version.cuda) print('CuDNN version:', torch.backends.cudnn.version()) print('Number of GPUs available:', torch.cuda.device_count()) print('NCCL version:', torch.cuda.nccl.version()) except ImportError: print('Torch version:', None) try: import deepspeed print('DeepSpeed version:', deepspeed.__version__) except ImportError: print('DeepSpeed version:', None) try: import tensorflow as tf print('TensorFlow version:', tf.__version__) print('TF GPUs available:', bool(tf.config.list_physical_devices('GPU'))) print('Number of TF GPUs available:', len(tf.config.list_physical_devices('GPU'))) except ImportError: print('TensorFlow version:', None)
305
def A_ ( _lowerCAmelCase ) -> str: UpperCamelCase : Optional[int] = int(_lowerCAmelCase ) if decimal in (0, 1): # Exit cases for the recursion return str(_lowerCAmelCase ) UpperCamelCase , UpperCamelCase : Dict = divmod(_lowerCAmelCase , 2 ) return binary_recursive(_lowerCAmelCase ) + str(_lowerCAmelCase ) def A_ ( _lowerCAmelCase ) -> str: UpperCamelCase : Tuple = str(_lowerCAmelCase ).strip() if not number: raise ValueError("No input value was provided" ) UpperCamelCase : Optional[int] = "-" if number.startswith("-" ) else "" UpperCamelCase : Any = number.lstrip("-" ) if not number.isnumeric(): raise ValueError("Input value is not an integer" ) return F"""{negative}0b{binary_recursive(int(_lowerCAmelCase ) )}""" if __name__ == "__main__": from doctest import testmod testmod()
52
0
from dataclasses import dataclass from typing import Dict, Optional, Union import torch import torch.nn.functional as F from torch import nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from .attention import BasicTransformerBlock from .attention_processor import AttentionProcessor, AttnProcessor from .embeddings import TimestepEmbedding, Timesteps from .modeling_utils import ModelMixin @dataclass class _UpperCAmelCase ( __snake_case ): """simple docstring""" lowercase__ = 42 class _UpperCAmelCase ( __snake_case ,__snake_case ): """simple docstring""" @register_to_config def __init__( self : List[Any], lowerCamelCase : List[Any] = 32, lowerCamelCase : str = 64, lowerCamelCase : str = 20, lowerCamelCase : Any = 768, lowerCamelCase : List[Any]=77, lowerCamelCase : Dict=4, lowerCamelCase : Any = 0.0, lowerCamelCase : List[Any] = "silu", lowerCamelCase : List[Any] = None, lowerCamelCase : Union[str, Any] = None, lowerCamelCase : int = "linear", lowerCamelCase : Optional[int] = "prd", lowerCamelCase : Optional[Any] = None, lowerCamelCase : Any = None, lowerCamelCase : List[Any] = None, ): '''simple docstring''' super().__init__() lowercase__ = num_attention_heads lowercase__ = attention_head_dim lowercase__ = num_attention_heads * attention_head_dim lowercase__ = additional_embeddings lowercase__ = time_embed_dim or inner_dim lowercase__ = embedding_proj_dim or embedding_dim lowercase__ = clip_embed_dim or embedding_dim lowercase__ = Timesteps(A_, A_, 0 ) lowercase__ = TimestepEmbedding(A_, A_, out_dim=A_, act_fn=A_ ) lowercase__ = nn.Linear(A_, A_ ) if embedding_proj_norm_type is None: lowercase__ = None elif embedding_proj_norm_type == "layer": lowercase__ = nn.LayerNorm(A_ ) else: raise ValueError(F"""unsupported embedding_proj_norm_type: {embedding_proj_norm_type}""" ) lowercase__ = nn.Linear(A_, A_ ) if encoder_hid_proj_type is None: lowercase__ = None elif encoder_hid_proj_type == "linear": lowercase__ = nn.Linear(A_, A_ ) else: raise ValueError(F"""unsupported encoder_hid_proj_type: {encoder_hid_proj_type}""" ) lowercase__ = nn.Parameter(torch.zeros(1, num_embeddings + additional_embeddings, A_ ) ) if added_emb_type == "prd": lowercase__ = nn.Parameter(torch.zeros(1, 1, A_ ) ) elif added_emb_type is None: lowercase__ = None else: raise ValueError( F"""`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `'prd'` or `None`.""" ) lowercase__ = nn.ModuleList( [ BasicTransformerBlock( A_, A_, A_, dropout=A_, activation_fn='''gelu''', attention_bias=A_, ) for d in range(A_ ) ] ) if norm_in_type == "layer": lowercase__ = nn.LayerNorm(A_ ) elif norm_in_type is None: lowercase__ = None else: raise ValueError(F"""Unsupported norm_in_type: {norm_in_type}.""" ) lowercase__ = nn.LayerNorm(A_ ) lowercase__ = nn.Linear(A_, A_ ) lowercase__ = torch.full( [num_embeddings + additional_embeddings, num_embeddings + additional_embeddings], -10_000.0 ) causal_attention_mask.triu_(1 ) lowercase__ = causal_attention_mask[None, ...] self.register_buffer('''causal_attention_mask''', A_, persistent=A_ ) lowercase__ = nn.Parameter(torch.zeros(1, A_ ) ) lowercase__ = nn.Parameter(torch.zeros(1, A_ ) ) @property # Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors def lowercase__ ( self : List[Any] ): '''simple docstring''' lowercase__ = {} def fn_recursive_add_processors(lowerCamelCase : str, lowerCamelCase : int, lowerCamelCase : Dict ): if hasattr(A_, '''set_processor''' ): lowercase__ = module.processor for sub_name, child in module.named_children(): fn_recursive_add_processors(F"""{name}.{sub_name}""", A_, A_ ) return processors for name, module in self.named_children(): fn_recursive_add_processors(A_, A_, A_ ) return processors def lowercase__ ( self : str, lowerCamelCase : List[Any] ): '''simple docstring''' lowercase__ = len(self.attn_processors.keys() ) if isinstance(A_, A_ ) and len(A_ ) != count: raise ValueError( F"""A dict of processors was passed, but the number of processors {len(A_ )} does not match the""" F""" number of attention layers: {count}. Please make sure to pass {count} processor classes.""" ) def fn_recursive_attn_processor(lowerCamelCase : List[Any], lowerCamelCase : Any, lowerCamelCase : int ): if hasattr(A_, '''set_processor''' ): if not isinstance(A_, A_ ): module.set_processor(A_ ) else: module.set_processor(processor.pop(F"""{name}.processor""" ) ) for sub_name, child in module.named_children(): fn_recursive_attn_processor(F"""{name}.{sub_name}""", A_, A_ ) for name, module in self.named_children(): fn_recursive_attn_processor(A_, A_, A_ ) def lowercase__ ( self : Tuple ): '''simple docstring''' self.set_attn_processor(AttnProcessor() ) def lowercase__ ( self : int, lowerCamelCase : Tuple, lowerCamelCase : Optional[Any], lowerCamelCase : List[Any], lowerCamelCase : str = None, lowerCamelCase : int = None, lowerCamelCase : Any = True, ): '''simple docstring''' lowercase__ = hidden_states.shape[0] lowercase__ = timestep if not torch.is_tensor(A_ ): lowercase__ = torch.tensor([timesteps], dtype=torch.long, device=hidden_states.device ) elif torch.is_tensor(A_ ) and len(timesteps.shape ) == 0: lowercase__ = timesteps[None].to(hidden_states.device ) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML lowercase__ = timesteps * torch.ones(A_, dtype=timesteps.dtype, device=timesteps.device ) lowercase__ = self.time_proj(A_ ) # timesteps does not contain any weights and will always return f32 tensors # but time_embedding might be fp16, so we need to cast here. lowercase__ = timesteps_projected.to(dtype=self.dtype ) lowercase__ = self.time_embedding(A_ ) if self.embedding_proj_norm is not None: lowercase__ = self.embedding_proj_norm(A_ ) lowercase__ = self.embedding_proj(A_ ) if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None: lowercase__ = self.encoder_hidden_states_proj(A_ ) elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None: raise ValueError('''`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set''' ) lowercase__ = self.proj_in(A_ ) lowercase__ = self.positional_embedding.to(hidden_states.dtype ) lowercase__ = [] lowercase__ = 0 if encoder_hidden_states is not None: additional_embeds.append(A_ ) additional_embeddings_len += encoder_hidden_states.shape[1] if len(proj_embeddings.shape ) == 2: lowercase__ = proj_embeddings[:, None, :] if len(hidden_states.shape ) == 2: lowercase__ = hidden_states[:, None, :] lowercase__ = additional_embeds + [ proj_embeddings, time_embeddings[:, None, :], hidden_states, ] if self.prd_embedding is not None: lowercase__ = self.prd_embedding.to(hidden_states.dtype ).expand(A_, -1, -1 ) additional_embeds.append(A_ ) lowercase__ = torch.cat( A_, dim=1, ) # Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens lowercase__ = additional_embeddings_len + proj_embeddings.shape[1] + 1 if positional_embeddings.shape[1] < hidden_states.shape[1]: lowercase__ = F.pad( A_, ( 0, 0, additional_embeddings_len, self.prd_embedding.shape[1] if self.prd_embedding is not None else 0, ), value=0.0, ) lowercase__ = hidden_states + positional_embeddings if attention_mask is not None: lowercase__ = (1 - attention_mask.to(hidden_states.dtype )) * -10_000.0 lowercase__ = F.pad(A_, (0, self.additional_embeddings), value=0.0 ) lowercase__ = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype ) lowercase__ = attention_mask.repeat_interleave(self.config.num_attention_heads, dim=0 ) if self.norm_in is not None: lowercase__ = self.norm_in(A_ ) for block in self.transformer_blocks: lowercase__ = block(A_, attention_mask=A_ ) lowercase__ = self.norm_out(A_ ) if self.prd_embedding is not None: lowercase__ = hidden_states[:, -1] else: lowercase__ = hidden_states[:, additional_embeddings_len:] lowercase__ = self.proj_to_clip_embeddings(A_ ) if not return_dict: return (predicted_image_embedding,) return PriorTransformerOutput(predicted_image_embedding=A_ ) def lowercase__ ( self : Optional[Any], lowerCamelCase : int ): '''simple docstring''' lowercase__ = (prior_latents * self.clip_std) + self.clip_mean return prior_latents
207
import unittest from transformers import LiltConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( LiltForQuestionAnswering, LiltForSequenceClassification, LiltForTokenClassification, LiltModel, ) from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST class A__ : def __init__( self , A_ , A_=13 , A_=7 , A_=True , A_=True , A_=True , A_=True , A_=99 , A_=24 , A_=2 , A_=6 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=16 , A_=2 , A_=0.02 , A_=3 , A_=None , A_=1000 , ): '''simple docstring''' UpperCamelCase : Union[str, Any] = parent UpperCamelCase : List[Any] = batch_size UpperCamelCase : Dict = seq_length UpperCamelCase : Tuple = is_training UpperCamelCase : Union[str, Any] = use_input_mask UpperCamelCase : Tuple = use_token_type_ids UpperCamelCase : Optional[Any] = use_labels UpperCamelCase : str = vocab_size UpperCamelCase : Optional[int] = hidden_size UpperCamelCase : Any = num_hidden_layers UpperCamelCase : Optional[Any] = num_attention_heads UpperCamelCase : Optional[Any] = intermediate_size UpperCamelCase : Optional[Any] = hidden_act UpperCamelCase : Union[str, Any] = hidden_dropout_prob UpperCamelCase : Union[str, Any] = attention_probs_dropout_prob UpperCamelCase : List[Any] = max_position_embeddings UpperCamelCase : str = type_vocab_size UpperCamelCase : Optional[int] = type_sequence_label_size UpperCamelCase : Dict = initializer_range UpperCamelCase : int = num_labels UpperCamelCase : Optional[int] = scope UpperCamelCase : int = range_bbox def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCamelCase : Any = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ) # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: UpperCamelCase : Union[str, Any] = bbox[i, j, 3] UpperCamelCase : int = bbox[i, j, 1] UpperCamelCase : int = t if bbox[i, j, 2] < bbox[i, j, 0]: UpperCamelCase : List[str] = bbox[i, j, 2] UpperCamelCase : Optional[int] = bbox[i, j, 0] UpperCamelCase : Optional[Any] = t UpperCamelCase : Dict = None if self.use_input_mask: UpperCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) UpperCamelCase : str = None if self.use_token_type_ids: UpperCamelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) UpperCamelCase : Dict = None UpperCamelCase : int = None if self.use_labels: UpperCamelCase : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCamelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCamelCase : List[Any] = self.get_config() return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels def __UpperCamelCase( self ): '''simple docstring''' return LiltConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ): '''simple docstring''' UpperCamelCase : Any = LiltModel(config=A_ ) model.to(A_ ) model.eval() UpperCamelCase : str = model(A_ , bbox=A_ , attention_mask=A_ , token_type_ids=A_ ) UpperCamelCase : Optional[int] = model(A_ , bbox=A_ , token_type_ids=A_ ) UpperCamelCase : Any = model(A_ , bbox=A_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ): '''simple docstring''' UpperCamelCase : Any = self.num_labels UpperCamelCase : Dict = LiltForTokenClassification(config=A_ ) model.to(A_ ) model.eval() UpperCamelCase : Dict = model( A_ , bbox=A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ): '''simple docstring''' UpperCamelCase : Dict = LiltForQuestionAnswering(config=A_ ) model.to(A_ ) model.eval() UpperCamelCase : List[str] = model( A_ , bbox=A_ , attention_mask=A_ , token_type_ids=A_ , start_positions=A_ , end_positions=A_ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Any = self.prepare_config_and_inputs() ( ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ) : Tuple = config_and_inputs UpperCamelCase : Tuple = { "input_ids": input_ids, "bbox": bbox, "token_type_ids": token_type_ids, "attention_mask": input_mask, } return config, inputs_dict @require_torch class A__ ( __snake_case , __snake_case , __snake_case , unittest.TestCase ): _UpperCAmelCase :Union[str, Any] = ( ( LiltModel, LiltForSequenceClassification, LiltForTokenClassification, LiltForQuestionAnswering, ) if is_torch_available() else () ) _UpperCAmelCase :Optional[Any] = ( { 'feature-extraction': LiltModel, 'question-answering': LiltForQuestionAnswering, 'text-classification': LiltForSequenceClassification, 'token-classification': LiltForTokenClassification, 'zero-shot': LiltForSequenceClassification, } if is_torch_available() else {} ) _UpperCAmelCase :Dict = False _UpperCAmelCase :Union[str, Any] = False def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ ): '''simple docstring''' return True def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Optional[int] = LiltModelTester(self ) UpperCamelCase : Optional[int] = ConfigTester(self , config_class=A_ , hidden_size=37 ) def __UpperCamelCase( self ): '''simple docstring''' self.config_tester.run_common_tests() def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*A_ ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: UpperCamelCase : Union[str, Any] = type self.model_tester.create_and_check_model(*A_ ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*A_ ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*A_ ) @slow def __UpperCamelCase( self ): '''simple docstring''' for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCamelCase : Dict = LiltModel.from_pretrained(A_ ) self.assertIsNotNone(A_ ) @require_torch @slow class A__ ( unittest.TestCase ): def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : int = LiltModel.from_pretrained("SCUT-DLVCLab/lilt-roberta-en-base" ).to(A_ ) UpperCamelCase : Tuple = torch.tensor([[1, 2]] , device=A_ ) UpperCamelCase : List[str] = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=A_ ) # forward pass with torch.no_grad(): UpperCamelCase : Optional[int] = model(input_ids=A_ , bbox=A_ ) UpperCamelCase : List[str] = torch.Size([1, 2, 768] ) UpperCamelCase : Any = torch.tensor( [[-0.06_53, 0.09_50, -0.00_61], [-0.05_45, 0.09_26, -0.03_24]] , device=A_ , ) self.assertTrue(outputs.last_hidden_state.shape , A_ ) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , A_ , atol=1e-3 ) )
52
0
"""simple docstring""" import time from dataclasses import dataclass from multiprocessing import Pool from unittest import TestCase from unittest.mock import patch import multiprocess import numpy as np import pytest from datasets.utils.py_utils import ( NestedDataStructure, asdict, iflatmap_unordered, map_nested, temp_seed, temporary_assignment, zip_dict, ) from .utils import require_tf, require_torch def __UpperCAmelCase ( lowercase ): # picklable for multiprocessing """simple docstring""" return x.sum() def __UpperCAmelCase ( lowercase ): # picklable for multiprocessing """simple docstring""" return i + 1 @dataclass class a : _snake_case : int _snake_case : str class a ( __snake_case ): def lowerCAmelCase_ ( self : int ): _UpperCAmelCase = {} _UpperCAmelCase = [] _UpperCAmelCase = 1 _UpperCAmelCase = [1, 2] _UpperCAmelCase = {"a": 1, "b": 2} _UpperCAmelCase = {"a": [1, 2], "b": [3, 4]} _UpperCAmelCase = {"a": {"1": 1}, "b": 2} _UpperCAmelCase = {"a": 1, "b": 2, "c": 3, "d": 4} _UpperCAmelCase = {} _UpperCAmelCase = [] _UpperCAmelCase = 2 _UpperCAmelCase = [2, 3] _UpperCAmelCase = {"a": 2, "b": 3} _UpperCAmelCase = {"a": [2, 3], "b": [4, 5]} _UpperCAmelCase = {"a": {"1": 2}, "b": 3} _UpperCAmelCase = {"a": 2, "b": 3, "c": 4, "d": 5} self.assertEqual(map_nested(A_ , A_ ) , A_ ) self.assertEqual(map_nested(A_ , A_ ) , A_ ) self.assertEqual(map_nested(A_ , A_ ) , A_ ) self.assertEqual(map_nested(A_ , A_ ) , A_ ) self.assertEqual(map_nested(A_ , A_ ) , A_ ) self.assertEqual(map_nested(A_ , A_ ) , A_ ) self.assertEqual(map_nested(A_ , A_ ) , A_ ) self.assertEqual(map_nested(A_ , A_ ) , A_ ) _UpperCAmelCase = 2 self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ ) self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ ) self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ ) self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ ) self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ ) self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ ) self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ ) self.assertEqual(map_nested(A_ , A_ , num_proc=A_ ) , A_ ) _UpperCAmelCase = {"a": np.eye(2 ), "b": np.zeros(3 ), "c": np.ones(2 )} _UpperCAmelCase = {"a": 2, "b": 0, "c": 2} _UpperCAmelCase = { "a": np.eye(2 ).astype(A_ ), "b": np.zeros(3 ).astype(A_ ), "c": np.ones(2 ).astype(A_ ), } self.assertEqual(map_nested(A_ , A_ , map_numpy=A_ ) , A_ ) self.assertEqual( {k: v.tolist() for k, v in map_nested(A_ , A_ , map_numpy=A_ ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , ) self.assertEqual(map_nested(A_ , A_ , map_numpy=A_ , num_proc=A_ ) , A_ ) self.assertEqual( {k: v.tolist() for k, v in map_nested(A_ , A_ , map_numpy=A_ , num_proc=A_ ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , ) with self.assertRaises(A_ ): # can't pickle a local lambda map_nested(lambda __lowerCAmelCase : x + 1 , A_ , num_proc=A_ ) def lowerCAmelCase_ ( self : Optional[Any] ): _UpperCAmelCase = {"a": 1, "b": 2} _UpperCAmelCase = {"a": 3, "b": 4} _UpperCAmelCase = {"a": 5, "b": 6} _UpperCAmelCase = sorted([("""a""", (1, 3, 5)), ("""b""", (2, 4, 6))] ) self.assertEqual(sorted(zip_dict(A_ , A_ , A_ ) ) , A_ ) def lowerCAmelCase_ ( self : Any ): class a : _snake_case : int = 'bar' _UpperCAmelCase = Foo() self.assertEqual(foo.my_attr , """bar""" ) with temporary_assignment(A_ , """my_attr""" , """BAR""" ): self.assertEqual(foo.my_attr , """BAR""" ) self.assertEqual(foo.my_attr , """bar""" ) @pytest.mark.parametrize( """iterable_length, num_proc, expected_num_proc""" ,[ (1, None, 1), (1, 1, 1), (2, None, 1), (2, 1, 1), (2, 2, 1), (2, 3, 1), (3, 2, 1), (16, 16, 16), (16, 17, 16), (17, 16, 16), ] ,) def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ): """simple docstring""" with patch("""datasets.utils.py_utils._single_map_nested""" ) as mock_single_map_nested, patch( """datasets.parallel.parallel.Pool""" ) as mock_multiprocessing_pool: _UpperCAmelCase = {f'''{i}''': i for i in range(_lowerCAmelCase )} _UpperCAmelCase = map_nested(lambda lowercase : x + 10 ,_lowerCAmelCase ,num_proc=_lowerCAmelCase ,parallel_min_length=16 ) if expected_num_proc == 1: assert mock_single_map_nested.called assert not mock_multiprocessing_pool.called else: assert not mock_single_map_nested.called assert mock_multiprocessing_pool.called assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc class a ( __snake_case ): @require_tf def lowerCAmelCase_ ( self : Optional[int] ): import tensorflow as tf from tensorflow.keras import layers _UpperCAmelCase = layers.Dense(2 ) def gen_random_output(): _UpperCAmelCase = tf.random.uniform((1, 3) ) return model(A_ ).numpy() with temp_seed(42 , set_tensorflow=A_ ): _UpperCAmelCase = gen_random_output() with temp_seed(42 , set_tensorflow=A_ ): _UpperCAmelCase = gen_random_output() _UpperCAmelCase = gen_random_output() np.testing.assert_equal(A_ , A_ ) self.assertGreater(np.abs(outa - outa ).sum() , 0 ) @require_torch def lowerCAmelCase_ ( self : Optional[int] ): import torch def gen_random_output(): _UpperCAmelCase = torch.nn.Linear(3 , 2 ) _UpperCAmelCase = torch.rand(1 , 3 ) return model(A_ ).detach().numpy() with temp_seed(42 , set_pytorch=A_ ): _UpperCAmelCase = gen_random_output() with temp_seed(42 , set_pytorch=A_ ): _UpperCAmelCase = gen_random_output() _UpperCAmelCase = gen_random_output() np.testing.assert_equal(A_ , A_ ) self.assertGreater(np.abs(outa - outa ).sum() , 0 ) def lowerCAmelCase_ ( self : Optional[Any] ): def gen_random_output(): return np.random.rand(1 , 3 ) with temp_seed(42 ): _UpperCAmelCase = gen_random_output() with temp_seed(42 ): _UpperCAmelCase = gen_random_output() _UpperCAmelCase = gen_random_output() np.testing.assert_equal(A_ , A_ ) self.assertGreater(np.abs(outa - outa ).sum() , 0 ) @pytest.mark.parametrize("""input_data""" ,[{}] ) def __UpperCAmelCase ( lowercase ): """simple docstring""" _UpperCAmelCase = NestedDataStructure(_lowerCAmelCase ).data assert output_data == input_data @pytest.mark.parametrize( """data, expected_output""" ,[ ({}, []), ([], []), ("""foo""", ["""foo"""]), (["""foo""", """bar"""], ["""foo""", """bar"""]), ([["""foo""", """bar"""]], ["""foo""", """bar"""]), ([[["""foo"""], ["""bar"""]]], ["""foo""", """bar"""]), ([[["""foo"""], """bar"""]], ["""foo""", """bar"""]), ({"""a""": 1, """b""": 2}, [1, 2]), ({"""a""": [1, 2], """b""": [3, 4]}, [1, 2, 3, 4]), ({"""a""": [[1, 2]], """b""": [[3, 4]]}, [1, 2, 3, 4]), ({"""a""": [[1, 2]], """b""": [3, 4]}, [1, 2, 3, 4]), ({"""a""": [[[1], [2]]], """b""": [[[3], [4]]]}, [1, 2, 3, 4]), ({"""a""": [[[1], [2]]], """b""": [[3, 4]]}, [1, 2, 3, 4]), ({"""a""": [[[1], [2]]], """b""": [3, 4]}, [1, 2, 3, 4]), ({"""a""": [[[1], [2]]], """b""": [3, [4]]}, [1, 2, 3, 4]), ({"""a""": {"""1""": 1}, """b""": 2}, [1, 2]), ({"""a""": {"""1""": [1]}, """b""": 2}, [1, 2]), ({"""a""": {"""1""": [1]}, """b""": [2]}, [1, 2]), ] ,) def __UpperCAmelCase ( lowercase ,lowercase ): """simple docstring""" _UpperCAmelCase = NestedDataStructure(_lowerCAmelCase ).flatten() assert output == expected_output def __UpperCAmelCase ( ): """simple docstring""" _UpperCAmelCase = A(x=1 ,y="""foobar""" ) _UpperCAmelCase = {"x": 1, "y": "foobar"} assert asdict(_lowerCAmelCase ) == expected_output _UpperCAmelCase = {"a": {"b": A(x=10 ,y="""foo""" )}, "c": [A(x=20 ,y="""bar""" )]} _UpperCAmelCase = {"a": {"b": {"x": 10, "y": "foo"}}, "c": [{"x": 20, "y": "bar"}]} assert asdict(_lowerCAmelCase ) == expected_output with pytest.raises(_lowerCAmelCase ): asdict([1, A(x=10 ,y="""foo""" )] ) def __UpperCAmelCase ( lowercase ): """simple docstring""" return text.split() def __UpperCAmelCase ( lowercase ): """simple docstring""" yield (time.time(), content) time.sleep(2 ) yield (time.time(), content) def __UpperCAmelCase ( ): """simple docstring""" with Pool(2 ) as pool: _UpperCAmelCase = list(iflatmap_unordered(_lowerCAmelCase ,_split_text ,kwargs_iterable=[{"""text""": """hello there"""}] * 10 ) ) assert out.count("""hello""" ) == 10 assert out.count("""there""" ) == 10 assert len(_lowerCAmelCase ) == 20 # check multiprocess from pathos (uses dill for pickling) with multiprocess.Pool(2 ) as pool: _UpperCAmelCase = list(iflatmap_unordered(_lowerCAmelCase ,_split_text ,kwargs_iterable=[{"""text""": """hello there"""}] * 10 ) ) assert out.count("""hello""" ) == 10 assert out.count("""there""" ) == 10 assert len(_lowerCAmelCase ) == 20 # check that we get items as fast as possible with Pool(2 ) as pool: _UpperCAmelCase = [] for yield_time, content in iflatmap_unordered( _lowerCAmelCase ,_aseconds_generator_of_aitems_with_timing ,kwargs_iterable=[{"""content""": """a"""}, {"""content""": """b"""}] ): assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded" out.append(_lowerCAmelCase ) assert out.count("""a""" ) == 2 assert out.count("""b""" ) == 2 assert len(_lowerCAmelCase ) == 4
289
import os import tempfile from functools import partial from unittest import TestCase from unittest.mock import patch import numpy as np import pytest from datasets.arrow_dataset import Dataset from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex from .utils import require_elasticsearch, require_faiss __lowerCamelCase : Union[str, Any] = pytest.mark.integration @require_faiss class A__ ( __snake_case ): def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Union[str, Any] = Dataset.from_dict({"filename": ["my_name-train" + "_" + str(A_ ) for x in np.arange(30 ).tolist()]} ) return dset def __UpperCamelCase( self ): '''simple docstring''' import faiss UpperCamelCase : Dataset = self._create_dummy_dataset() UpperCamelCase : List[Any] = dset.map( lambda A_ , A_ : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=A_ , keep_in_memory=A_ ) UpperCamelCase : List[str] = dset.add_faiss_index("vecs" , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT ) UpperCamelCase , UpperCamelCase : Tuple = dset.get_nearest_examples("vecs" , np.ones(5 , dtype=np.floataa ) ) self.assertEqual(examples["filename"][0] , "my_name-train_29" ) dset.drop_index("vecs" ) def __UpperCamelCase( self ): '''simple docstring''' import faiss UpperCamelCase : Dataset = self._create_dummy_dataset() dset.add_faiss_index_from_external_arrays( external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT , ) UpperCamelCase , UpperCamelCase : int = dset.get_nearest_examples("vecs" , np.ones(5 , dtype=np.floataa ) ) self.assertEqual(examples["filename"][0] , "my_name-train_29" ) def __UpperCamelCase( self ): '''simple docstring''' import faiss UpperCamelCase : Dataset = self._create_dummy_dataset() dset.add_faiss_index_from_external_arrays( external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" , metric_type=faiss.METRIC_INNER_PRODUCT , ) # Setting delete=False and unlinking manually is not pretty... but it is required on Windows to # ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue. # see https://bugs.python.org/issue14243 and # https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515 with tempfile.NamedTemporaryFile(delete=A_ ) as tmp_file: dset.save_faiss_index("vecs" , tmp_file.name ) dset.load_faiss_index("vecs2" , tmp_file.name ) os.unlink(tmp_file.name ) UpperCamelCase , UpperCamelCase : List[str] = dset.get_nearest_examples("vecs2" , np.ones(5 , dtype=np.floataa ) ) self.assertEqual(examples["filename"][0] , "my_name-train_29" ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Dataset = self._create_dummy_dataset() dset.add_faiss_index_from_external_arrays( external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" ) dset.drop_index("vecs" ) self.assertRaises(A_ , partial(dset.get_nearest_examples , "vecs2" , np.ones(5 , dtype=np.floataa ) ) ) def __UpperCamelCase( self ): '''simple docstring''' from elasticsearch import Elasticsearch UpperCamelCase : Dataset = self._create_dummy_dataset() with patch("elasticsearch.Elasticsearch.search" ) as mocked_search, patch( "elasticsearch.client.IndicesClient.create" ) as mocked_index_create, patch("elasticsearch.helpers.streaming_bulk" ) as mocked_bulk: UpperCamelCase : List[str] = {"acknowledged": True} mocked_bulk.return_value([(True, None)] * 30 ) UpperCamelCase : List[Any] = {"hits": {"hits": [{"_score": 1, "_id": 29}]}} UpperCamelCase : Optional[Any] = Elasticsearch() dset.add_elasticsearch_index("filename" , es_client=A_ ) UpperCamelCase , UpperCamelCase : List[str] = dset.get_nearest_examples("filename" , "my_name-train_29" ) self.assertEqual(examples["filename"][0] , "my_name-train_29" ) @require_faiss class A__ ( __snake_case ): def __UpperCamelCase( self ): '''simple docstring''' import faiss UpperCamelCase : Optional[int] = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT ) # add vectors index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsNotNone(index.faiss_index ) self.assertEqual(index.faiss_index.ntotal , 5 ) index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) ) self.assertEqual(index.faiss_index.ntotal , 10 ) # single query UpperCamelCase : Any = np.zeros(5 , dtype=np.floataa ) UpperCamelCase : Optional[Any] = 1 UpperCamelCase , UpperCamelCase : Optional[Any] = index.search(A_ ) self.assertRaises(A_ , index.search , query.reshape(-1 , 1 ) ) self.assertGreater(scores[0] , 0 ) self.assertEqual(indices[0] , 1 ) # batched queries UpperCamelCase : Optional[int] = np.eye(5 , dtype=np.floataa )[::-1] UpperCamelCase , UpperCamelCase : Tuple = index.search_batch(A_ ) self.assertRaises(A_ , index.search_batch , queries[0] ) UpperCamelCase : Optional[int] = [scores[0] for scores in total_scores] UpperCamelCase : Tuple = [indices[0] for indices in total_indices] self.assertGreater(np.min(A_ ) , 0 ) self.assertListEqual([4, 3, 2, 1, 0] , A_ ) def __UpperCamelCase( self ): '''simple docstring''' import faiss UpperCamelCase : List[str] = FaissIndex(string_factory="Flat" ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsInstance(index.faiss_index , faiss.IndexFlat ) UpperCamelCase : List[str] = FaissIndex(string_factory="LSH" ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsInstance(index.faiss_index , faiss.IndexLSH ) with self.assertRaises(A_ ): UpperCamelCase : List[str] = FaissIndex(string_factory="Flat" , custom_index=faiss.IndexFlat(5 ) ) def __UpperCamelCase( self ): '''simple docstring''' import faiss UpperCamelCase : Dict = faiss.IndexFlat(5 ) UpperCamelCase : Union[str, Any] = FaissIndex(custom_index=A_ ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsInstance(index.faiss_index , faiss.IndexFlat ) def __UpperCamelCase( self ): '''simple docstring''' import faiss UpperCamelCase : str = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) # Setting delete=False and unlinking manually is not pretty... but it is required on Windows to # ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue. # see https://bugs.python.org/issue14243 and # https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515 with tempfile.NamedTemporaryFile(delete=A_ ) as tmp_file: index.save(tmp_file.name ) UpperCamelCase : int = FaissIndex.load(tmp_file.name ) os.unlink(tmp_file.name ) UpperCamelCase : str = np.zeros(5 , dtype=np.floataa ) UpperCamelCase : int = 1 UpperCamelCase , UpperCamelCase : Dict = index.search(A_ ) self.assertGreater(scores[0] , 0 ) self.assertEqual(indices[0] , 1 ) @require_faiss def A_ ( _lowerCAmelCase ) -> Optional[int]: import faiss UpperCamelCase : Union[str, Any] = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) UpperCamelCase : List[Any] = "index.faiss" UpperCamelCase : List[str] = F"""mock://{index_name}""" index.save(_lowerCAmelCase , storage_options=mockfs.storage_options ) UpperCamelCase : List[str] = FaissIndex.load(_lowerCAmelCase , storage_options=mockfs.storage_options ) UpperCamelCase : List[str] = np.zeros(5 , dtype=np.floataa ) UpperCamelCase : Optional[int] = 1 UpperCamelCase , UpperCamelCase : List[str] = index.search(_lowerCAmelCase ) assert scores[0] > 0 assert indices[0] == 1 @require_elasticsearch class A__ ( __snake_case ): def __UpperCamelCase( self ): '''simple docstring''' from elasticsearch import Elasticsearch with patch("elasticsearch.Elasticsearch.search" ) as mocked_search, patch( "elasticsearch.client.IndicesClient.create" ) as mocked_index_create, patch("elasticsearch.helpers.streaming_bulk" ) as mocked_bulk: UpperCamelCase : List[str] = Elasticsearch() UpperCamelCase : Union[str, Any] = {"acknowledged": True} UpperCamelCase : Union[str, Any] = ElasticSearchIndex(es_client=A_ ) mocked_bulk.return_value([(True, None)] * 3 ) index.add_documents(["foo", "bar", "foobar"] ) # single query UpperCamelCase : str = "foo" UpperCamelCase : Dict = {"hits": {"hits": [{"_score": 1, "_id": 0}]}} UpperCamelCase , UpperCamelCase : Tuple = index.search(A_ ) self.assertEqual(scores[0] , 1 ) self.assertEqual(indices[0] , 0 ) # single query with timeout UpperCamelCase : Dict = "foo" UpperCamelCase : Optional[Any] = {"hits": {"hits": [{"_score": 1, "_id": 0}]}} UpperCamelCase , UpperCamelCase : str = index.search(A_ , request_timeout=30 ) self.assertEqual(scores[0] , 1 ) self.assertEqual(indices[0] , 0 ) # batched queries UpperCamelCase : Dict = ["foo", "bar", "foobar"] UpperCamelCase : List[Any] = {"hits": {"hits": [{"_score": 1, "_id": 1}]}} UpperCamelCase , UpperCamelCase : Optional[int] = index.search_batch(A_ ) UpperCamelCase : str = [scores[0] for scores in total_scores] UpperCamelCase : Optional[Any] = [indices[0] for indices in total_indices] self.assertGreater(np.min(A_ ) , 0 ) self.assertListEqual([1, 1, 1] , A_ ) # batched queries with timeout UpperCamelCase : int = ["foo", "bar", "foobar"] UpperCamelCase : List[Any] = {"hits": {"hits": [{"_score": 1, "_id": 1}]}} UpperCamelCase , UpperCamelCase : Union[str, Any] = index.search_batch(A_ , request_timeout=30 ) UpperCamelCase : Union[str, Any] = [scores[0] for scores in total_scores] UpperCamelCase : Dict = [indices[0] for indices in total_indices] self.assertGreater(np.min(A_ ) , 0 ) self.assertListEqual([1, 1, 1] , A_ )
52
0
'''simple docstring''' import argparse import json import os import torch from torch import nn from transformers import NllbMoeConfig, NllbMoeModel from transformers.modeling_utils import dtype_byte_size from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME def lowercase__( __UpperCamelCase: Optional[int] ): """simple docstring""" SCREAMING_SNAKE_CASE : str = [ "encoder.version", "decoder.version", "model.encoder.version", "model.decoder.version", "decoder.output_projection.weight", "_float_tensor", "encoder.embed_positions._float_tensor", "decoder.embed_positions._float_tensor", ] for k in ignore_keys: state_dict.pop(_lowerCAmelCase ,_lowerCAmelCase ) def lowercase__( __UpperCamelCase: Optional[int] ): """simple docstring""" SCREAMING_SNAKE_CASE : str = emb.weight.shape SCREAMING_SNAKE_CASE : int = nn.Linear(_lowerCAmelCase ,_lowerCAmelCase ,bias=_lowerCAmelCase ) SCREAMING_SNAKE_CASE : str = emb.weight.data return lin_layer def lowercase__( __UpperCamelCase: List[Any] ,__UpperCamelCase: int=None ): """simple docstring""" SCREAMING_SNAKE_CASE : Optional[int] = {} for old_key in state_dict.keys(): SCREAMING_SNAKE_CASE : Optional[Any] = old_key if "moe_layer.experts." in key: if expert_idx is not None: SCREAMING_SNAKE_CASE : Any = key.replace('moe_layer.experts.0' ,f"ffn.experts.expert_{expert_idx}" ) else: SCREAMING_SNAKE_CASE : List[Any] = key.replace('moe_layer.experts.' ,'ffn.experts.expert_' ) if "gate" in key: SCREAMING_SNAKE_CASE : List[Any] = key.replace('.moe_layer.gate.wg' ,'.ffn.router.classifier' ) if "fc2" and "experts" not in key: SCREAMING_SNAKE_CASE : Optional[Any] = key.replace('.fc2.' ,'.ffn.fc2.' ) if "fc1" and "experts" not in key: SCREAMING_SNAKE_CASE : Dict = key.replace('.fc1.' ,'.ffn.fc1.' ) if ".encoder_attn." in key: SCREAMING_SNAKE_CASE : List[str] = key.replace('.encoder_attn.' ,'.cross_attention.' ) if "encoder_attn_layer_norm" in key: SCREAMING_SNAKE_CASE : Union[str, Any] = key.replace('encoder_attn_layer_norm' ,'cross_attention_layer_norm' ) if "final_layer_norm" in key: SCREAMING_SNAKE_CASE : Dict = key.replace('final_layer_norm' ,'ff_layer_norm' ) SCREAMING_SNAKE_CASE : Optional[int] = state_dict[old_key] return new_dict def lowercase__( __UpperCamelCase: int ,__UpperCamelCase: Tuple ,__UpperCamelCase: Union[str, Any] ,__UpperCamelCase: Tuple ,__UpperCamelCase: Dict = WEIGHTS_NAME ): """simple docstring""" SCREAMING_SNAKE_CASE : Optional[Any] = [] SCREAMING_SNAKE_CASE : Optional[int] = 0 os.makedirs(_lowerCAmelCase ,exist_ok=_lowerCAmelCase ) for expert in range(_lowerCAmelCase ): SCREAMING_SNAKE_CASE : Tuple = switch_checkpoint_path + f"-rank-{expert}.pt" if os.path.isfile(_lowerCAmelCase ): SCREAMING_SNAKE_CASE : str = torch.load(_lowerCAmelCase )["model"] remove_ignore_keys_(_lowerCAmelCase ) SCREAMING_SNAKE_CASE : List[Any] = rename_fairseq_keys(_lowerCAmelCase ,_lowerCAmelCase ) SCREAMING_SNAKE_CASE : Optional[int] = os.path.join( _lowerCAmelCase ,weights_name.replace('.bin' ,f"-{len(_lowerCAmelCase )+1:05d}-of-???.bin" ) ) torch.save(_lowerCAmelCase ,_lowerCAmelCase ) sharded_state_dicts.append(expert_state.keys() ) total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size( expert_state[list(_lowerCAmelCase )[0]].dtype ) # Add the last block SCREAMING_SNAKE_CASE : int = os.path.join(_lowerCAmelCase ,weights_name.replace('.bin' ,f"-{len(_lowerCAmelCase )+1:05d}-of-???.bin" ) ) SCREAMING_SNAKE_CASE : Optional[int] = torch.load(switch_checkpoint_path + '-shared.pt' )["model"] remove_ignore_keys_(_lowerCAmelCase ) SCREAMING_SNAKE_CASE : List[Any] = rename_fairseq_keys(_lowerCAmelCase ,_lowerCAmelCase ) SCREAMING_SNAKE_CASE : List[str] = shared_weights["decoder.embed_tokens.weight"] sharded_state_dicts.append(shared_weights.keys() ) # If we only have the shared weights (dummy model/experts saved on the same file) if len(_lowerCAmelCase ) == 1: SCREAMING_SNAKE_CASE : Tuple = os.path.join(_lowerCAmelCase ,_lowerCAmelCase ) torch.save(_lowerCAmelCase ,_lowerCAmelCase ) return {weights_name: sharded_state_dicts[0]}, None else: torch.save(_lowerCAmelCase ,_lowerCAmelCase ) # Otherwise, let's build the index SCREAMING_SNAKE_CASE : List[str] = {} for idx, shard in enumerate(_lowerCAmelCase ): SCREAMING_SNAKE_CASE : Optional[int] = weights_name.replace('.bin' ,f"-{idx+1:05d}-of-{len(_lowerCAmelCase ):05d}.bin" ) SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.join(_lowerCAmelCase ,weights_name.replace('.bin' ,f"-{idx+1:05d}-of-???.bin" ) ) os.rename(_lowerCAmelCase ,os.path.join(_lowerCAmelCase ,_lowerCAmelCase ) ) for key in shard: SCREAMING_SNAKE_CASE : Optional[Any] = shard_file # Add the metadata SCREAMING_SNAKE_CASE : Tuple = {"total_size": total_size} SCREAMING_SNAKE_CASE : str = {"metadata": metadata, "weight_map": weight_map} with open(os.path.join(_lowerCAmelCase ,_lowerCAmelCase ) ,'w' ,encoding='utf-8' ) as f: SCREAMING_SNAKE_CASE : str = json.dumps(_lowerCAmelCase ,indent=2 ,sort_keys=_lowerCAmelCase ) + "\n" f.write(_lowerCAmelCase ) return metadata, index if __name__ == "__main__": UpperCamelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( "--nllb_moe_checkpoint_path", default="/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000", type=str, required=False, help="Path to a directory containing a folder per layer. Follows the original Google format.", ) parser.add_argument("--dtype", default="float32", type=str, required=False, help="dtype of the saved model") parser.add_argument( "--pytorch_dump_folder_path", default="/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b", type=str, required=False, help="Path to the output pytorch model.", ) UpperCamelCase_ = parser.parse_args() UpperCamelCase_ = shard_on_the_fly( args.nllb_moe_checkpoint_path, args.pytorch_dump_folder_path, 1_2_8, args.dtype, ) UpperCamelCase_ = NllbMoeConfig.from_pretrained( "facebook/nllb-200-3.3B", encoder_sparse_step=4, decoder_sparse_step=4, num_experts=1_2_8 ) config.save_pretrained(args.pytorch_dump_folder_path) UpperCamelCase_ = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path) print("Done") model.save_pretrained(args.pytorch_dump_folder_path)
251
def A_ ( _lowerCAmelCase = 50 ) -> int: UpperCamelCase : List[Any] = [[0] * 3 for _ in range(length + 1 )] for row_length in range(length + 1 ): for tile_length in range(2 , 5 ): for tile_start in range(row_length - tile_length + 1 ): different_colour_ways_number[row_length][tile_length - 2] += ( different_colour_ways_number[row_length - tile_start - tile_length][ tile_length - 2 ] + 1 ) return sum(different_colour_ways_number[length] ) if __name__ == "__main__": print(f"""{solution() = }""")
52
0
'''simple docstring''' import qiskit def __lowerCamelCase ( _lowercase , _lowercase ) -> qiskit.result.counts.Counts: UpperCAmelCase : List[str] = qiskit.Aer.get_backend("""aer_simulator""" ) # Create a Quantum Circuit acting on the q register UpperCAmelCase : List[Any] = qiskit.QuantumCircuit(_lowerCAmelCase , _lowerCAmelCase ) # Apply X (NOT) Gate to Qubits 0 & 1 circuit.x(0 ) circuit.x(1 ) # Map the quantum measurement to the classical bits circuit.measure([0, 1] , [0, 1] ) # Execute the circuit on the qasm simulator UpperCAmelCase : Optional[int] = qiskit.execute(_lowerCAmelCase , _lowerCAmelCase , shots=1_0_0_0 ) # Return the histogram data of the results of the experiment. return job.result().get_counts(_lowerCAmelCase ) if __name__ == "__main__": a : Optional[Any] = single_qubit_measure(2, 2) print(F'''Total count for various states are: {counts}''')
265
def A_ ( _lowerCAmelCase ) -> str: UpperCamelCase : List[Any] = "" for ch in key: if ch == " " or ch not in key_no_dups and ch.isalpha(): key_no_dups += ch return key_no_dups def A_ ( _lowerCAmelCase ) -> dict[str, str]: UpperCamelCase : Optional[Any] = [chr(i + 65 ) for i in range(26 )] # Remove duplicate characters from key UpperCamelCase : Tuple = remove_duplicates(key.upper() ) UpperCamelCase : int = len(_lowerCAmelCase ) # First fill cipher with key characters UpperCamelCase : int = {alphabet[i]: char for i, char in enumerate(_lowerCAmelCase )} # Then map remaining characters in alphabet to # the alphabet from the beginning for i in range(len(_lowerCAmelCase ) , 26 ): UpperCamelCase : Optional[Any] = alphabet[i - offset] # Ensure we are not mapping letters to letters previously mapped while char in key: offset -= 1 UpperCamelCase : List[str] = alphabet[i - offset] UpperCamelCase : List[Any] = char return cipher_alphabet def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> str: return "".join(cipher_map.get(_lowerCAmelCase , _lowerCAmelCase ) for ch in message.upper() ) def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> str: UpperCamelCase : Union[str, Any] = {v: k for k, v in cipher_map.items()} return "".join(rev_cipher_map.get(_lowerCAmelCase , _lowerCAmelCase ) for ch in message.upper() ) def A_ ( ) -> None: UpperCamelCase : int = input("Enter message to encode or decode: " ).strip() UpperCamelCase : str = input("Enter keyword: " ).strip() UpperCamelCase : Union[str, Any] = input("Encipher or decipher? E/D:" ).strip()[0].lower() try: UpperCamelCase : List[str] = {"e": encipher, "d": decipher}[option] except KeyError: raise KeyError("invalid input option" ) UpperCamelCase : str = create_cipher_map(_lowerCAmelCase ) print(func(_lowerCAmelCase , _lowerCAmelCase ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
52
0
"""simple docstring""" import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import OwlViTImageProcessor, OwlViTProcessor @require_vision class UpperCAmelCase_ ( unittest.TestCase): def _UpperCamelCase ( self : Tuple ) -> Tuple: _UpperCamelCase = tempfile.mkdtemp() # fmt: off _UpperCamelCase = ["", "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"] # fmt: on _UpperCamelCase = dict(zip(A_ , range(len(A_ ) ) ) ) _UpperCamelCase = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""] _UpperCamelCase = {"unk_token": "<unk>"} _UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) _UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(A_ ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(A_ ) ) _UpperCamelCase = { "do_resize": True, "size": 20, "do_center_crop": True, "crop_size": 18, "do_normalize": True, "image_mean": [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3], "image_std": [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1], } _UpperCamelCase = os.path.join(self.tmpdirname , A_ ) with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp: json.dump(A_ , A_ ) def _UpperCamelCase ( self : Dict , **__UpperCamelCase : List[str] ) -> Dict: return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token='''!''' , **A_ ) def _UpperCamelCase ( self : str , **__UpperCamelCase : Union[str, Any] ) -> Optional[Any]: return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token='''!''' , **A_ ) def _UpperCamelCase ( self : int , **__UpperCamelCase : List[Any] ) -> List[str]: return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **A_ ) def _UpperCamelCase ( self : Optional[Any] ) -> List[str]: shutil.rmtree(self.tmpdirname ) def _UpperCamelCase ( self : Tuple ) -> int: _UpperCamelCase = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] _UpperCamelCase = [Image.fromarray(np.moveaxis(A_ , 0 , -1 ) ) for x in image_inputs] return image_inputs def _UpperCamelCase ( self : Optional[int] ) -> str: _UpperCamelCase = self.get_tokenizer() _UpperCamelCase = self.get_rust_tokenizer() _UpperCamelCase = self.get_image_processor() _UpperCamelCase = OwlViTProcessor(tokenizer=A_ , image_processor=A_ ) processor_slow.save_pretrained(self.tmpdirname ) _UpperCamelCase = OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=A_ ) _UpperCamelCase = OwlViTProcessor(tokenizer=A_ , image_processor=A_ ) processor_fast.save_pretrained(self.tmpdirname ) _UpperCamelCase = OwlViTProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , A_ ) self.assertIsInstance(processor_fast.tokenizer , A_ ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , A_ ) self.assertIsInstance(processor_fast.image_processor , A_ ) def _UpperCamelCase ( self : List[str] ) -> Tuple: _UpperCamelCase = OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) _UpperCamelCase = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' ) _UpperCamelCase = self.get_image_processor(do_normalize=A_ ) _UpperCamelCase = OwlViTProcessor.from_pretrained( self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=A_ ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , A_ ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , A_ ) def _UpperCamelCase ( self : Optional[int] ) -> Optional[Any]: _UpperCamelCase = self.get_image_processor() _UpperCamelCase = self.get_tokenizer() _UpperCamelCase = OwlViTProcessor(tokenizer=A_ , image_processor=A_ ) _UpperCamelCase = self.prepare_image_inputs() _UpperCamelCase = image_processor(A_ , return_tensors='''np''' ) _UpperCamelCase = processor(images=A_ , return_tensors='''np''' ) for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 ) def _UpperCamelCase ( self : int ) -> Optional[int]: _UpperCamelCase = self.get_image_processor() _UpperCamelCase = self.get_tokenizer() _UpperCamelCase = OwlViTProcessor(tokenizer=A_ , image_processor=A_ ) _UpperCamelCase = "lower newer" _UpperCamelCase = processor(text=A_ , return_tensors='''np''' ) _UpperCamelCase = tokenizer(A_ , return_tensors='''np''' ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist() ) def _UpperCamelCase ( self : Union[str, Any] ) -> List[str]: _UpperCamelCase = self.get_image_processor() _UpperCamelCase = self.get_tokenizer() _UpperCamelCase = OwlViTProcessor(tokenizer=A_ , image_processor=A_ ) _UpperCamelCase = "lower newer" _UpperCamelCase = self.prepare_image_inputs() _UpperCamelCase = processor(text=A_ , images=A_ ) self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''pixel_values'''] ) # test if it raises when no input is passed with pytest.raises(A_ ): processor() def _UpperCamelCase ( self : Tuple ) -> Tuple: _UpperCamelCase = "google/owlvit-base-patch32" _UpperCamelCase = OwlViTProcessor.from_pretrained(A_ ) _UpperCamelCase = ["cat", "nasa badge"] _UpperCamelCase = processor(text=A_ ) _UpperCamelCase = 16 self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask'''] ) self.assertEqual(inputs['''input_ids'''].shape , (2, seq_length) ) # test if it raises when no input is passed with pytest.raises(A_ ): processor() def _UpperCamelCase ( self : Any ) -> List[str]: _UpperCamelCase = "google/owlvit-base-patch32" _UpperCamelCase = OwlViTProcessor.from_pretrained(A_ ) _UpperCamelCase = [["cat", "nasa badge"], ["person"]] _UpperCamelCase = processor(text=A_ ) _UpperCamelCase = 16 _UpperCamelCase = len(A_ ) _UpperCamelCase = max([len(A_ ) for texts in input_texts] ) self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask'''] ) self.assertEqual(inputs['''input_ids'''].shape , (batch_size * num_max_text_queries, seq_length) ) # test if it raises when no input is passed with pytest.raises(A_ ): processor() def _UpperCamelCase ( self : Optional[int] ) -> List[str]: _UpperCamelCase = "google/owlvit-base-patch32" _UpperCamelCase = OwlViTProcessor.from_pretrained(A_ ) _UpperCamelCase = ["cat", "nasa badge"] _UpperCamelCase = processor(text=A_ ) _UpperCamelCase = 16 _UpperCamelCase = inputs["input_ids"] _UpperCamelCase = [ [4_9406, 2368, 4_9407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [4_9406, 6841, 1_1301, 4_9407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], ] self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask'''] ) self.assertEqual(inputs['''input_ids'''].shape , (2, seq_length) ) self.assertListEqual(list(input_ids[0] ) , predicted_ids[0] ) self.assertListEqual(list(input_ids[1] ) , predicted_ids[1] ) def _UpperCamelCase ( self : int ) -> Optional[Any]: _UpperCamelCase = self.get_image_processor() _UpperCamelCase = self.get_tokenizer() _UpperCamelCase = OwlViTProcessor(tokenizer=A_ , image_processor=A_ ) _UpperCamelCase = self.prepare_image_inputs() _UpperCamelCase = self.prepare_image_inputs() _UpperCamelCase = processor(images=A_ , query_images=A_ ) self.assertListEqual(list(inputs.keys() ) , ['''query_pixel_values''', '''pixel_values'''] ) # test if it raises when no input is passed with pytest.raises(A_ ): processor() def _UpperCamelCase ( self : List[str] ) -> int: _UpperCamelCase = self.get_image_processor() _UpperCamelCase = self.get_tokenizer() _UpperCamelCase = OwlViTProcessor(tokenizer=A_ , image_processor=A_ ) _UpperCamelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] _UpperCamelCase = processor.batch_decode(A_ ) _UpperCamelCase = tokenizer.batch_decode(A_ ) self.assertListEqual(A_ , A_ )
256
from sklearn.metrics import fa_score import datasets __lowerCamelCase : List[Any] = """ The F1 score is the harmonic mean of the precision and recall. It can be computed with the equation: F1 = 2 * (precision * recall) / (precision + recall) """ __lowerCamelCase : List[Any] = """ Args: predictions (`list` of `int`): Predicted labels. references (`list` of `int`): Ground truth labels. labels (`list` of `int`): The set of labels to include when `average` is not set to `'binary'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None. pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1. average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`. - 'binary': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary. - 'micro': Calculate metrics globally by counting the total true positives, false negatives and false positives. - 'macro': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account. - 'weighted': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. This option can result in an F-score that is not between precision and recall. - 'samples': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification). sample_weight (`list` of `float`): Sample weights Defaults to None. Returns: f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better. Examples: Example 1-A simple binary example >>> f1_metric = datasets.load_metric(\"f1\") >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0]) >>> print(results) {'f1': 0.5} Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`. >>> f1_metric = datasets.load_metric(\"f1\") >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0) >>> print(round(results['f1'], 2)) 0.67 Example 3-The same simple binary example as in Example 1, but with `sample_weight` included. >>> f1_metric = datasets.load_metric(\"f1\") >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3]) >>> print(round(results['f1'], 2)) 0.35 Example 4-A multiclass example, with different values for the `average` input. >>> predictions = [0, 2, 1, 0, 0, 1] >>> references = [0, 1, 2, 0, 1, 2] >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"macro\") >>> print(round(results['f1'], 2)) 0.27 >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"micro\") >>> print(round(results['f1'], 2)) 0.33 >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"weighted\") >>> print(round(results['f1'], 2)) 0.27 >>> results = f1_metric.compute(predictions=predictions, references=references, average=None) >>> print(results) {'f1': array([0.8, 0. , 0. ])} """ __lowerCamelCase : str = """ @article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011} } """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class A__ ( datasets.Metric ): def __UpperCamelCase( self ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Sequence(datasets.Value("int32" ) ), "references": datasets.Sequence(datasets.Value("int32" ) ), } if self.config_name == "multilabel" else { "predictions": datasets.Value("int32" ), "references": datasets.Value("int32" ), } ) , reference_urls=["https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html"] , ) def __UpperCamelCase( self , A_ , A_ , A_=None , A_=1 , A_="binary" , A_=None ): '''simple docstring''' UpperCamelCase : List[str] = fa_score( A_ , A_ , labels=A_ , pos_label=A_ , average=A_ , sample_weight=A_ ) return {"f1": float(A_ ) if score.size == 1 else score}
52
0
"""simple docstring""" import json import os import unittest from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class _UpperCAmelCase ( __snake_case , unittest.TestCase ): UpperCamelCase = OpenAIGPTTokenizer UpperCamelCase = OpenAIGPTTokenizerFast UpperCamelCase = True UpperCamelCase = False def lowerCamelCase ( self :int ): super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt A = [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "w</w>", "r</w>", "t</w>", "lo", "low", "er</w>", "low</w>", "lowest</w>", "newer</w>", "wider</w>", "<unk>", ] A = dict(zip(A_ , range(len(A_ ) ) ) ) A = ["#version: 0.2", "l o", "lo w", "e r</w>", ""] A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) A = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" ) as fp: fp.write(json.dumps(A_ ) ) with open(self.merges_file , "w" ) as fp: fp.write("\n".join(A_ ) ) def lowerCamelCase ( self :List[str] , __UpperCamelCase :int ): return "lower newer", "lower newer" def lowerCamelCase ( self :List[Any] ): A = OpenAIGPTTokenizer(self.vocab_file , self.merges_file ) A = "lower" A = ["low", "er</w>"] A = tokenizer.tokenize(A_ ) self.assertListEqual(A_ , A_ ) A = tokens + ["<unk>"] A = [14, 15, 20] self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) , A_ ) def lowerCamelCase ( self :Dict , __UpperCamelCase :Optional[int]=15 ): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ): A = self.rust_tokenizer_class.from_pretrained(A_ , **A_ ) # Simple input A = "This is a simple input" A = ["This is a simple input 1", "This is a simple input 2"] A = ("This is a simple input", "This is a pair") A = [ ("This is a simple input 1", "This is a simple input 2"), ("This is a simple pair 1", "This is a simple pair 2"), ] # Simple input tests self.assertRaises(A_ , tokenizer_r.encode , A_ , max_length=A_ , padding="max_length" ) # Simple input self.assertRaises(A_ , tokenizer_r.encode_plus , A_ , max_length=A_ , padding="max_length" ) # Simple input self.assertRaises( A_ , tokenizer_r.batch_encode_plus , A_ , max_length=A_ , padding="max_length" , ) # Pair input self.assertRaises(A_ , tokenizer_r.encode , A_ , max_length=A_ , padding="max_length" ) # Pair input self.assertRaises(A_ , tokenizer_r.encode_plus , A_ , max_length=A_ , padding="max_length" ) # Pair input self.assertRaises( A_ , tokenizer_r.batch_encode_plus , A_ , max_length=A_ , padding="max_length" , ) def lowerCamelCase ( self :Optional[int] ): pass @require_ftfy @require_spacy @require_tokenizers class _UpperCAmelCase ( __snake_case ): pass
292
import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import XLMRobertaTokenizerFast from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class A__ ( __snake_case , unittest.TestCase ): _UpperCAmelCase :List[str] = KandinskyInpaintPipeline _UpperCAmelCase :List[str] = ['prompt', 'image_embeds', 'negative_image_embeds', 'image', 'mask_image'] _UpperCAmelCase :Dict = [ 'prompt', 'negative_prompt', 'image_embeds', 'negative_image_embeds', 'image', 'mask_image', ] _UpperCAmelCase :Optional[int] = [ 'generator', 'height', 'width', 'latents', 'guidance_scale', 'negative_prompt', 'num_inference_steps', 'return_dict', 'guidance_scale', 'num_images_per_prompt', 'output_type', 'return_dict', ] _UpperCAmelCase :int = False @property def __UpperCamelCase( self ): '''simple docstring''' return 32 @property def __UpperCamelCase( self ): '''simple docstring''' return 32 @property def __UpperCamelCase( self ): '''simple docstring''' return self.time_input_dim @property def __UpperCamelCase( self ): '''simple docstring''' return self.time_input_dim * 4 @property def __UpperCamelCase( self ): '''simple docstring''' return 100 @property def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Any = XLMRobertaTokenizerFast.from_pretrained("YiYiXu/tiny-random-mclip-base" ) return tokenizer @property def __UpperCamelCase( self ): '''simple docstring''' torch.manual_seed(0 ) UpperCamelCase : Optional[int] = MCLIPConfig( numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , ) UpperCamelCase : Optional[int] = MultilingualCLIP(A_ ) UpperCamelCase : Union[str, Any] = text_encoder.eval() return text_encoder @property def __UpperCamelCase( self ): '''simple docstring''' torch.manual_seed(0 ) UpperCamelCase : Optional[int] = { "in_channels": 9, # Out channels is double in channels because predicts mean and variance "out_channels": 8, "addition_embed_type": "text_image", "down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"), "up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"), "mid_block_type": "UNetMidBlock2DSimpleCrossAttn", "block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2), "layers_per_block": 1, "encoder_hid_dim": self.text_embedder_hidden_size, "encoder_hid_dim_type": "text_image_proj", "cross_attention_dim": self.cross_attention_dim, "attention_head_dim": 4, "resnet_time_scale_shift": "scale_shift", "class_embed_type": None, } UpperCamelCase : List[Any] = UNetaDConditionModel(**A_ ) return model @property def __UpperCamelCase( self ): '''simple docstring''' return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def __UpperCamelCase( self ): '''simple docstring''' torch.manual_seed(0 ) UpperCamelCase : List[str] = VQModel(**self.dummy_movq_kwargs ) return model def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Any = self.dummy_text_encoder UpperCamelCase : str = self.dummy_tokenizer UpperCamelCase : List[Any] = self.dummy_unet UpperCamelCase : Optional[Any] = self.dummy_movq UpperCamelCase : Union[str, Any] = DDIMScheduler( num_train_timesteps=1000 , beta_schedule="linear" , beta_start=0.0_00_85 , beta_end=0.0_12 , clip_sample=A_ , set_alpha_to_one=A_ , steps_offset=1 , prediction_type="epsilon" , thresholding=A_ , ) UpperCamelCase : Optional[Any] = { "text_encoder": text_encoder, "tokenizer": tokenizer, "unet": unet, "scheduler": scheduler, "movq": movq, } return components def __UpperCamelCase( self , A_ , A_=0 ): '''simple docstring''' UpperCamelCase : Dict = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(A_ ) ).to(A_ ) UpperCamelCase : Union[str, Any] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(A_ ) # create init_image UpperCamelCase : Union[str, Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(A_ ) ).to(A_ ) UpperCamelCase : str = image.cpu().permute(0 , 2 , 3 , 1 )[0] UpperCamelCase : List[Any] = Image.fromarray(np.uinta(A_ ) ).convert("RGB" ).resize((256, 256) ) # create mask UpperCamelCase : str = np.ones((64, 64) , dtype=np.floataa ) UpperCamelCase : str = 0 if str(A_ ).startswith("mps" ): UpperCamelCase : int = torch.manual_seed(A_ ) else: UpperCamelCase : Tuple = torch.Generator(device=A_ ).manual_seed(A_ ) UpperCamelCase : Union[str, Any] = { "prompt": "horse", "image": init_image, "mask_image": mask, "image_embeds": image_embeds, "negative_image_embeds": negative_image_embeds, "generator": generator, "height": 64, "width": 64, "num_inference_steps": 2, "guidance_scale": 4.0, "output_type": "np", } return inputs def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Optional[int] = "cpu" UpperCamelCase : Tuple = self.get_dummy_components() UpperCamelCase : str = self.pipeline_class(**A_ ) UpperCamelCase : Tuple = pipe.to(A_ ) pipe.set_progress_bar_config(disable=A_ ) UpperCamelCase : Any = pipe(**self.get_dummy_inputs(A_ ) ) UpperCamelCase : List[Any] = output.images UpperCamelCase : List[Any] = pipe( **self.get_dummy_inputs(A_ ) , return_dict=A_ , )[0] UpperCamelCase : List[Any] = image[0, -3:, -3:, -1] UpperCamelCase : Any = image_from_tuple[0, -3:, -3:, -1] print(F"""image.shape {image.shape}""" ) assert image.shape == (1, 64, 64, 3) UpperCamelCase : Union[str, Any] = np.array( [0.8_32_69_19, 0.73_79_04_67, 0.20_91_85_81, 0.9_30_96_12, 0.5_51_17_91, 0.43_71_33_28, 0.5_51_33_21, 0.49_92_29_34, 0.59_49_77_86] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 ), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}""" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 ), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}""" def __UpperCamelCase( self ): '''simple docstring''' super().test_inference_batch_single_identical(expected_max_diff=3e-3 ) @slow @require_torch_gpu class A__ ( unittest.TestCase ): def __UpperCamelCase( self ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Optional[Any] = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy" ) UpperCamelCase : List[str] = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" ) UpperCamelCase : Dict = np.ones((768, 768) , dtype=np.floataa ) UpperCamelCase : str = 0 UpperCamelCase : List[Any] = "a hat" UpperCamelCase : Tuple = KandinskyPriorPipeline.from_pretrained( "kandinsky-community/kandinsky-2-1-prior" , torch_dtype=torch.floataa ) pipe_prior.to(A_ ) UpperCamelCase : Union[str, Any] = KandinskyInpaintPipeline.from_pretrained( "kandinsky-community/kandinsky-2-1-inpaint" , torch_dtype=torch.floataa ) UpperCamelCase : Optional[Any] = pipeline.to(A_ ) pipeline.set_progress_bar_config(disable=A_ ) UpperCamelCase : List[Any] = torch.Generator(device="cpu" ).manual_seed(0 ) UpperCamelCase , UpperCamelCase : Optional[Any] = pipe_prior( A_ , generator=A_ , num_inference_steps=5 , negative_prompt="" , ).to_tuple() UpperCamelCase : Dict = pipeline( A_ , image=A_ , mask_image=A_ , image_embeds=A_ , negative_image_embeds=A_ , generator=A_ , num_inference_steps=100 , height=768 , width=768 , output_type="np" , ) UpperCamelCase : List[str] = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(A_ , A_ )
52
0
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices __a = logging.get_logger(__name__) __a = { """facebook/convnextv2-tiny-1k-224""": """https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json""", } class A__ ( __snake_case , __snake_case ): """simple docstring""" UpperCamelCase_ : Optional[int] = 'convnextv2' def __init__( self : Optional[Any] , lowerCAmelCase__ : List[str]=3 , lowerCAmelCase__ : List[str]=4 , lowerCAmelCase__ : Optional[int]=4 , lowerCAmelCase__ : Dict=None , lowerCAmelCase__ : Optional[Any]=None , lowerCAmelCase__ : Any="gelu" , lowerCAmelCase__ : Dict=0.02 , lowerCAmelCase__ : Optional[int]=1e-12 , lowerCAmelCase__ : Optional[Any]=0.0 , lowerCAmelCase__ : Dict=2_2_4 , lowerCAmelCase__ : Dict=None , lowerCAmelCase__ : List[str]=None , **lowerCAmelCase__ : Any , ) -> Any: """simple docstring""" super().__init__(**A_ ) _UpperCAmelCase : Dict = num_channels _UpperCAmelCase : Union[str, Any] = patch_size _UpperCAmelCase : Union[str, Any] = num_stages _UpperCAmelCase : List[Any] = [9_6, 1_9_2, 3_8_4, 7_6_8] if hidden_sizes is None else hidden_sizes _UpperCAmelCase : List[str] = [3, 3, 9, 3] if depths is None else depths _UpperCAmelCase : Dict = hidden_act _UpperCAmelCase : Union[str, Any] = initializer_range _UpperCAmelCase : Tuple = layer_norm_eps _UpperCAmelCase : str = drop_path_rate _UpperCAmelCase : List[str] = image_size _UpperCAmelCase : List[str] = ["stem"] + [F"""stage{idx}""" for idx in range(1 , len(self.depths ) + 1 )] _UpperCAmelCase : str = get_aligned_output_features_output_indices( out_features=A_ , out_indices=A_ , stage_names=self.stage_names )
145
class A__ : def __init__( self , A_ ): '''simple docstring''' UpperCamelCase : Union[str, Any] = set_counts UpperCamelCase : int = max(A_ ) UpperCamelCase : Optional[Any] = len(A_ ) UpperCamelCase : Union[str, Any] = [1] * num_sets UpperCamelCase : Union[str, Any] = list(range(A_ ) ) def __UpperCamelCase( self , A_ , A_ ): '''simple docstring''' UpperCamelCase : Any = self.get_parent(A_ ) UpperCamelCase : Optional[int] = self.get_parent(A_ ) if src_parent == dst_parent: return False if self.ranks[dst_parent] >= self.ranks[src_parent]: self.set_counts[dst_parent] += self.set_counts[src_parent] UpperCamelCase : int = 0 UpperCamelCase : Dict = dst_parent if self.ranks[dst_parent] == self.ranks[src_parent]: self.ranks[dst_parent] += 1 UpperCamelCase : Optional[int] = self.set_counts[dst_parent] else: self.set_counts[src_parent] += self.set_counts[dst_parent] UpperCamelCase : Any = 0 UpperCamelCase : Optional[int] = src_parent UpperCamelCase : int = self.set_counts[src_parent] UpperCamelCase : Any = max(self.max_set , A_ ) return True def __UpperCamelCase( self , A_ ): '''simple docstring''' if self.parents[disj_set] == disj_set: return disj_set UpperCamelCase : Optional[int] = self.get_parent(self.parents[disj_set] ) return self.parents[disj_set]
52
0
"""simple docstring""" import argparse import hashlib # hashlib is only used inside the Test class import struct class lowerCAmelCase_ : """simple docstring""" def __init__( self , lowerCAmelCase ): """simple docstring""" snake_case = data snake_case = [0x6_7_4_5_2_3_0_1, 0xE_F_C_D_A_B_8_9, 0x9_8_B_A_D_C_F_E, 0x1_0_3_2_5_4_7_6, 0xC_3_D_2_E_1_F_0] @staticmethod def snake_case ( lowerCAmelCase , lowerCAmelCase ): """simple docstring""" return ((n << b) | (n >> (32 - b))) & 0xF_F_F_F_F_F_F_F def snake_case ( self ): """simple docstring""" snake_case = b"\x80" + b"\x00" * (63 - (len(self.data ) + 8) % 64) snake_case = self.data + padding + struct.pack('>Q' , 8 * len(self.data ) ) return padded_data def snake_case ( self ): """simple docstring""" return [ self.padded_data[i : i + 64] for i in range(0 , len(self.padded_data ) , 64 ) ] def snake_case ( self , lowerCAmelCase ): """simple docstring""" snake_case = list(struct.unpack('>16L' , A_ ) ) + [0] * 64 for i in range(16 , 80 ): snake_case = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]) , 1 ) return w def snake_case ( self ): """simple docstring""" snake_case = self.padding() snake_case = self.split_blocks() for block in self.blocks: snake_case = self.expand_block(A_ ) snake_case = self.h for i in range(0 , 80 ): if 0 <= i < 20: snake_case = (b & c) | ((~b) & d) snake_case = 0x5_A_8_2_7_9_9_9 elif 20 <= i < 40: snake_case = b ^ c ^ d snake_case = 0x6_E_D_9_E_B_A_1 elif 40 <= i < 60: snake_case = (b & c) | (b & d) | (c & d) snake_case = 0x8_F_1_B_B_C_D_C elif 60 <= i < 80: snake_case = b ^ c ^ d snake_case = 0xC_A_6_2_C_1_D_6 snake_case = ( self.rotate(A_ , 5 ) + f + e + k + expanded_block[i] & 0xF_F_F_F_F_F_F_F, a, self.rotate(A_ , 30 ), c, d, ) snake_case = ( self.h[0] + a & 0xF_F_F_F_F_F_F_F, self.h[1] + b & 0xF_F_F_F_F_F_F_F, self.h[2] + c & 0xF_F_F_F_F_F_F_F, self.h[3] + d & 0xF_F_F_F_F_F_F_F, self.h[4] + e & 0xF_F_F_F_F_F_F_F, ) return ("{:08x}" * 5).format(*self.h ) def lowerCAmelCase__ ( ) -> Any: """simple docstring""" snake_case = b"Test String" assert SHAaHash(_lowerCAmelCase ).final_hash() == hashlib.shaa(_lowerCAmelCase ).hexdigest() # noqa: S324 def lowerCAmelCase__ ( ) -> Any: """simple docstring""" snake_case = argparse.ArgumentParser(description='Process some strings or files' ) parser.add_argument( '--string' , dest='input_string' , default='Hello World!! Welcome to Cryptography' , help='Hash the string' , ) parser.add_argument('--file' , dest='input_file' , help='Hash contents of a file' ) snake_case = parser.parse_args() snake_case = args.input_string # In any case hash input should be a bytestring if args.input_file: with open(args.input_file , 'rb' ) as f: snake_case = f.read() else: snake_case = bytes(_lowerCAmelCase , 'utf-8' ) print(SHAaHash(_lowerCAmelCase ).final_hash() ) if __name__ == "__main__": main() import doctest doctest.testmod()
150
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __lowerCamelCase : Any = { """configuration_electra""": ["""ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ElectraConfig""", """ElectraOnnxConfig"""], """tokenization_electra""": ["""ElectraTokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : Dict = ["""ElectraTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : Tuple = [ """ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST""", """ElectraForCausalLM""", """ElectraForMaskedLM""", """ElectraForMultipleChoice""", """ElectraForPreTraining""", """ElectraForQuestionAnswering""", """ElectraForSequenceClassification""", """ElectraForTokenClassification""", """ElectraModel""", """ElectraPreTrainedModel""", """load_tf_weights_in_electra""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : List[Any] = [ """TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFElectraForMaskedLM""", """TFElectraForMultipleChoice""", """TFElectraForPreTraining""", """TFElectraForQuestionAnswering""", """TFElectraForSequenceClassification""", """TFElectraForTokenClassification""", """TFElectraModel""", """TFElectraPreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : List[str] = [ """FlaxElectraForCausalLM""", """FlaxElectraForMaskedLM""", """FlaxElectraForMultipleChoice""", """FlaxElectraForPreTraining""", """FlaxElectraForQuestionAnswering""", """FlaxElectraForSequenceClassification""", """FlaxElectraForTokenClassification""", """FlaxElectraModel""", """FlaxElectraPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig from .tokenization_electra import ElectraTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_electra_fast import ElectraTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_electra import ( ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST, ElectraForCausalLM, ElectraForMaskedLM, ElectraForMultipleChoice, ElectraForPreTraining, ElectraForQuestionAnswering, ElectraForSequenceClassification, ElectraForTokenClassification, ElectraModel, ElectraPreTrainedModel, load_tf_weights_in_electra, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_electra import ( TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST, TFElectraForMaskedLM, TFElectraForMultipleChoice, TFElectraForPreTraining, TFElectraForQuestionAnswering, TFElectraForSequenceClassification, TFElectraForTokenClassification, TFElectraModel, TFElectraPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_electra import ( FlaxElectraForCausalLM, FlaxElectraForMaskedLM, FlaxElectraForMultipleChoice, FlaxElectraForPreTraining, FlaxElectraForQuestionAnswering, FlaxElectraForSequenceClassification, FlaxElectraForTokenClassification, FlaxElectraModel, FlaxElectraPreTrainedModel, ) else: import sys __lowerCamelCase : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
52
0
'''simple docstring''' lowercase__ : Optional[Any] = tuple[float, float, float] lowercase__ : int = tuple[float, float, float] def a__ ( lowercase : Dict, lowercase : Any ) -> Vectorad: """simple docstring""" _UpperCamelCase = end_pointa[0] - end_pointa[0] _UpperCamelCase = end_pointa[1] - end_pointa[1] _UpperCamelCase = end_pointa[2] - end_pointa[2] return (x, y, z) def a__ ( lowercase : Any, lowercase : Dict ) -> Vectorad: """simple docstring""" _UpperCamelCase = ab[1] * ac[2] - ab[2] * ac[1] # *i _UpperCamelCase = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j _UpperCamelCase = ab[0] * ac[1] - ab[1] * ac[0] # *k return (x, y, z) def a__ ( lowercase : Union[str, Any], lowercase : Tuple ) -> bool: """simple docstring""" return tuple(round(_lowerCAmelCase, _lowerCAmelCase ) for x in vector ) == (0, 0, 0) def a__ ( lowercase : int, lowercase : Optional[Any], lowercase : str, lowercase : Optional[Any] = 10 ) -> bool: """simple docstring""" _UpperCamelCase = create_vector(_lowerCAmelCase, _lowerCAmelCase ) _UpperCamelCase = create_vector(_lowerCAmelCase, _lowerCAmelCase ) return is_zero_vector(get_ad_vectors_cross(_lowerCAmelCase, _lowerCAmelCase ), _lowerCAmelCase )
324
from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices __lowerCamelCase : str = logging.get_logger(__name__) __lowerCamelCase : str = { """facebook/convnextv2-tiny-1k-224""": """https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json""", } class A__ ( __snake_case , __snake_case ): _UpperCAmelCase :Optional[int] = 'convnextv2' def __init__( self , A_=3 , A_=4 , A_=4 , A_=None , A_=None , A_="gelu" , A_=0.02 , A_=1e-12 , A_=0.0 , A_=224 , A_=None , A_=None , **A_ , ): '''simple docstring''' super().__init__(**A_ ) UpperCamelCase : Dict = num_channels UpperCamelCase : Union[str, Any] = patch_size UpperCamelCase : Union[str, Any] = num_stages UpperCamelCase : List[Any] = [96, 192, 384, 768] if hidden_sizes is None else hidden_sizes UpperCamelCase : List[str] = [3, 3, 9, 3] if depths is None else depths UpperCamelCase : Dict = hidden_act UpperCamelCase : Union[str, Any] = initializer_range UpperCamelCase : Tuple = layer_norm_eps UpperCamelCase : str = drop_path_rate UpperCamelCase : List[str] = image_size UpperCamelCase : List[str] = ["stem"] + [F"""stage{idx}""" for idx in range(1 , len(self.depths ) + 1 )] UpperCamelCase , UpperCamelCase : str = get_aligned_output_features_output_indices( out_features=A_ , out_indices=A_ , stage_names=self.stage_names )
52
0
import torch from diffusers import EulerDiscreteScheduler from diffusers.utils import torch_device from .test_schedulers import SchedulerCommonTest class SCREAMING_SNAKE_CASE__ ( __snake_case ): __SCREAMING_SNAKE_CASE = (EulerDiscreteScheduler,) __SCREAMING_SNAKE_CASE = 10 def UpperCamelCase ( self,**__lowerCamelCase ): A__ = { "num_train_timesteps": 1100, "beta_start": 0.0001, "beta_end": 0.02, "beta_schedule": "linear", } config.update(**A_ ) return config def UpperCamelCase ( self ): for timesteps in [10, 50, 100, 1000]: self.check_over_configs(num_train_timesteps=A_ ) def UpperCamelCase ( self ): for beta_start, beta_end in zip([0.00001, 0.0001, 0.001],[0.0002, 0.002, 0.02] ): self.check_over_configs(beta_start=A_,beta_end=A_ ) def UpperCamelCase ( self ): for schedule in ["linear", "scaled_linear"]: self.check_over_configs(beta_schedule=A_ ) def UpperCamelCase ( self ): for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=A_ ) def UpperCamelCase ( self ): A__ = self.scheduler_classes[0] A__ = self.get_scheduler_config() A__ = scheduler_class(**A_ ) scheduler.set_timesteps(self.num_inference_steps ) A__ = torch.manual_seed(0 ) A__ = self.dummy_model() A__ = self.dummy_sample_deter * scheduler.init_noise_sigma A__ = sample.to(A_ ) for i, t in enumerate(scheduler.timesteps ): A__ = scheduler.scale_model_input(A_,A_ ) A__ = model(A_,A_ ) A__ = scheduler.step(A_,A_,A_,generator=A_ ) A__ = output.prev_sample A__ = torch.sum(torch.abs(A_ ) ) A__ = torch.mean(torch.abs(A_ ) ) assert abs(result_sum.item() - 10.0807 ) < 1E-2 assert abs(result_mean.item() - 0.0131 ) < 1E-3 def UpperCamelCase ( self ): A__ = self.scheduler_classes[0] A__ = self.get_scheduler_config(prediction_type='''v_prediction''' ) A__ = scheduler_class(**A_ ) scheduler.set_timesteps(self.num_inference_steps ) A__ = torch.manual_seed(0 ) A__ = self.dummy_model() A__ = self.dummy_sample_deter * scheduler.init_noise_sigma A__ = sample.to(A_ ) for i, t in enumerate(scheduler.timesteps ): A__ = scheduler.scale_model_input(A_,A_ ) A__ = model(A_,A_ ) A__ = scheduler.step(A_,A_,A_,generator=A_ ) A__ = output.prev_sample A__ = torch.sum(torch.abs(A_ ) ) A__ = torch.mean(torch.abs(A_ ) ) assert abs(result_sum.item() - 0.0002 ) < 1E-2 assert abs(result_mean.item() - 2.26_76E-06 ) < 1E-3 def UpperCamelCase ( self ): A__ = self.scheduler_classes[0] A__ = self.get_scheduler_config() A__ = scheduler_class(**A_ ) scheduler.set_timesteps(self.num_inference_steps,device=A_ ) A__ = torch.manual_seed(0 ) A__ = self.dummy_model() A__ = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu() A__ = sample.to(A_ ) for t in scheduler.timesteps: A__ = scheduler.scale_model_input(A_,A_ ) A__ = model(A_,A_ ) A__ = scheduler.step(A_,A_,A_,generator=A_ ) A__ = output.prev_sample A__ = torch.sum(torch.abs(A_ ) ) A__ = torch.mean(torch.abs(A_ ) ) assert abs(result_sum.item() - 10.0807 ) < 1E-2 assert abs(result_mean.item() - 0.0131 ) < 1E-3 def UpperCamelCase ( self ): A__ = self.scheduler_classes[0] A__ = self.get_scheduler_config() A__ = scheduler_class(**A_,use_karras_sigmas=A_ ) scheduler.set_timesteps(self.num_inference_steps,device=A_ ) A__ = torch.manual_seed(0 ) A__ = self.dummy_model() A__ = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu() A__ = sample.to(A_ ) for t in scheduler.timesteps: A__ = scheduler.scale_model_input(A_,A_ ) A__ = model(A_,A_ ) A__ = scheduler.step(A_,A_,A_,generator=A_ ) A__ = output.prev_sample A__ = torch.sum(torch.abs(A_ ) ) A__ = torch.mean(torch.abs(A_ ) ) assert abs(result_sum.item() - 124.52_2994_9951_1719 ) < 1E-2 assert abs(result_mean.item() - 0.16213932633399963 ) < 1E-3
193
import pytest import requests from datasets.utils.file_utils import http_head from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline @pytest.mark.integration def A_ ( ) -> List[Any]: with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ): with pytest.raises(_lowerCAmelCase ): requests.request("GET" , "https://huggingface.co" ) with pytest.raises(requests.exceptions.ConnectTimeout ): requests.request("GET" , "https://huggingface.co" , timeout=1.0 ) @pytest.mark.integration def A_ ( ) -> Tuple: with offline(OfflineSimulationMode.CONNECTION_FAILS ): with pytest.raises(requests.exceptions.ConnectionError ): requests.request("GET" , "https://huggingface.co" ) def A_ ( ) -> Optional[int]: with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ): with pytest.raises(_lowerCAmelCase ): http_head("https://huggingface.co" )
52
0
from sklearn.metrics import fa_score import datasets A : List[Any] = """ The F1 score is the harmonic mean of the precision and recall. It can be computed with the equation: F1 = 2 * (precision * recall) / (precision + recall) """ A : List[Any] = """ Args: predictions (`list` of `int`): Predicted labels. references (`list` of `int`): Ground truth labels. labels (`list` of `int`): The set of labels to include when `average` is not set to `'binary'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None. pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1. average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`. - 'binary': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary. - 'micro': Calculate metrics globally by counting the total true positives, false negatives and false positives. - 'macro': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account. - 'weighted': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. This option can result in an F-score that is not between precision and recall. - 'samples': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification). sample_weight (`list` of `float`): Sample weights Defaults to None. Returns: f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better. Examples: Example 1-A simple binary example >>> f1_metric = datasets.load_metric(\"f1\") >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0]) >>> print(results) {'f1': 0.5} Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`. >>> f1_metric = datasets.load_metric(\"f1\") >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0) >>> print(round(results['f1'], 2)) 0.67 Example 3-The same simple binary example as in Example 1, but with `sample_weight` included. >>> f1_metric = datasets.load_metric(\"f1\") >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3]) >>> print(round(results['f1'], 2)) 0.35 Example 4-A multiclass example, with different values for the `average` input. >>> predictions = [0, 2, 1, 0, 0, 1] >>> references = [0, 1, 2, 0, 1, 2] >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"macro\") >>> print(round(results['f1'], 2)) 0.27 >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"micro\") >>> print(round(results['f1'], 2)) 0.33 >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"weighted\") >>> print(round(results['f1'], 2)) 0.27 >>> results = f1_metric.compute(predictions=predictions, references=references, average=None) >>> print(results) {'f1': array([0.8, 0. , 0. ])} """ A : str = """ @article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011} } """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class A ( datasets.Metric ): '''simple docstring''' def lowerCamelCase__ (self : List[str] ) -> int: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Sequence(datasets.Value("""int32""" ) ), """references""": datasets.Sequence(datasets.Value("""int32""" ) ), } if self.config_name == """multilabel""" else { """predictions""": datasets.Value("""int32""" ), """references""": datasets.Value("""int32""" ), } ) , reference_urls=["""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html"""] , ) def lowerCamelCase__ (self : str , _UpperCAmelCase : int , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Union[str, Any]=None , _UpperCAmelCase : List[Any]=1 , _UpperCAmelCase : Optional[int]="binary" , _UpperCAmelCase : List[str]=None ) -> Any: """simple docstring""" lowercase__ = fa_score( A_ , A_ , labels=A_ , pos_label=A_ , average=A_ , sample_weight=A_ ) return {"f1": float(A_ ) if score.size == 1 else score}
305
from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __lowerCamelCase : Optional[int] = {"""configuration_mmbt""": ["""MMBTConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : List[Any] = ["""MMBTForClassification""", """MMBTModel""", """ModalEmbeddings"""] if TYPE_CHECKING: from .configuration_mmbt import MMBTConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings else: import sys __lowerCamelCase : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
52
0
import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import XLMRobertaTokenizerFast from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class _UpperCAmelCase ( __snake_case ,unittest.TestCase ): """simple docstring""" lowercase__ = KandinskyInpaintPipeline lowercase__ = ['prompt', 'image_embeds', 'negative_image_embeds', 'image', 'mask_image'] lowercase__ = [ 'prompt', 'negative_prompt', 'image_embeds', 'negative_image_embeds', 'image', 'mask_image', ] lowercase__ = [ 'generator', 'height', 'width', 'latents', 'guidance_scale', 'negative_prompt', 'num_inference_steps', 'return_dict', 'guidance_scale', 'num_images_per_prompt', 'output_type', 'return_dict', ] lowercase__ = False @property def lowercase__ ( self : int ): '''simple docstring''' return 32 @property def lowercase__ ( self : List[str] ): '''simple docstring''' return 32 @property def lowercase__ ( self : int ): '''simple docstring''' return self.time_input_dim @property def lowercase__ ( self : str ): '''simple docstring''' return self.time_input_dim * 4 @property def lowercase__ ( self : Union[str, Any] ): '''simple docstring''' return 100 @property def lowercase__ ( self : List[str] ): '''simple docstring''' lowercase__ = XLMRobertaTokenizerFast.from_pretrained('''YiYiXu/tiny-random-mclip-base''' ) return tokenizer @property def lowercase__ ( self : str ): '''simple docstring''' torch.manual_seed(0 ) lowercase__ = MCLIPConfig( numDims=self.cross_attention_dim, transformerDimensions=self.text_embedder_hidden_size, hidden_size=self.text_embedder_hidden_size, intermediate_size=37, num_attention_heads=4, num_hidden_layers=5, vocab_size=1_005, ) lowercase__ = MultilingualCLIP(A_ ) lowercase__ = text_encoder.eval() return text_encoder @property def lowercase__ ( self : List[Any] ): '''simple docstring''' torch.manual_seed(0 ) lowercase__ = { "in_channels": 9, # Out channels is double in channels because predicts mean and variance "out_channels": 8, "addition_embed_type": "text_image", "down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"), "up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"), "mid_block_type": "UNetMidBlock2DSimpleCrossAttn", "block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2), "layers_per_block": 1, "encoder_hid_dim": self.text_embedder_hidden_size, "encoder_hid_dim_type": "text_image_proj", "cross_attention_dim": self.cross_attention_dim, "attention_head_dim": 4, "resnet_time_scale_shift": "scale_shift", "class_embed_type": None, } lowercase__ = UNetaDConditionModel(**A_ ) return model @property def lowercase__ ( self : Union[str, Any] ): '''simple docstring''' return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def lowercase__ ( self : int ): '''simple docstring''' torch.manual_seed(0 ) lowercase__ = VQModel(**self.dummy_movq_kwargs ) return model def lowercase__ ( self : Union[str, Any] ): '''simple docstring''' lowercase__ = self.dummy_text_encoder lowercase__ = self.dummy_tokenizer lowercase__ = self.dummy_unet lowercase__ = self.dummy_movq lowercase__ = DDIMScheduler( num_train_timesteps=1_000, beta_schedule='''linear''', beta_start=0.00085, beta_end=0.012, clip_sample=A_, set_alpha_to_one=A_, steps_offset=1, prediction_type='''epsilon''', thresholding=A_, ) lowercase__ = { "text_encoder": text_encoder, "tokenizer": tokenizer, "unet": unet, "scheduler": scheduler, "movq": movq, } return components def lowercase__ ( self : Optional[int], lowerCamelCase : List[str], lowerCamelCase : Dict=0 ): '''simple docstring''' lowercase__ = floats_tensor((1, self.cross_attention_dim), rng=random.Random(A_ ) ).to(A_ ) lowercase__ = floats_tensor((1, self.cross_attention_dim), rng=random.Random(seed + 1 ) ).to(A_ ) # create init_image lowercase__ = floats_tensor((1, 3, 64, 64), rng=random.Random(A_ ) ).to(A_ ) lowercase__ = image.cpu().permute(0, 2, 3, 1 )[0] lowercase__ = Image.fromarray(np.uinta(A_ ) ).convert('''RGB''' ).resize((256, 256) ) # create mask lowercase__ = np.ones((64, 64), dtype=np.floataa ) lowercase__ = 0 if str(A_ ).startswith('''mps''' ): lowercase__ = torch.manual_seed(A_ ) else: lowercase__ = torch.Generator(device=A_ ).manual_seed(A_ ) lowercase__ = { "prompt": "horse", "image": init_image, "mask_image": mask, "image_embeds": image_embeds, "negative_image_embeds": negative_image_embeds, "generator": generator, "height": 64, "width": 64, "num_inference_steps": 2, "guidance_scale": 4.0, "output_type": "np", } return inputs def lowercase__ ( self : str ): '''simple docstring''' lowercase__ = "cpu" lowercase__ = self.get_dummy_components() lowercase__ = self.pipeline_class(**A_ ) lowercase__ = pipe.to(A_ ) pipe.set_progress_bar_config(disable=A_ ) lowercase__ = pipe(**self.get_dummy_inputs(A_ ) ) lowercase__ = output.images lowercase__ = pipe( **self.get_dummy_inputs(A_ ), return_dict=A_, )[0] lowercase__ = image[0, -3:, -3:, -1] lowercase__ = image_from_tuple[0, -3:, -3:, -1] print(F"""image.shape {image.shape}""" ) assert image.shape == (1, 64, 64, 3) lowercase__ = np.array( [0.8326919, 0.73790467, 0.20918581, 0.9309612, 0.5511791, 0.43713328, 0.5513321, 0.49922934, 0.59497786] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 ), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}""" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 ), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}""" def lowercase__ ( self : List[Any] ): '''simple docstring''' super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) @slow @require_torch_gpu class _UpperCAmelCase ( unittest.TestCase ): """simple docstring""" def lowercase__ ( self : Union[str, Any] ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def lowercase__ ( self : Optional[Any] ): '''simple docstring''' lowercase__ = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy''' ) lowercase__ = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' ) lowercase__ = np.ones((768, 768), dtype=np.floataa ) lowercase__ = 0 lowercase__ = "a hat" lowercase__ = KandinskyPriorPipeline.from_pretrained( '''kandinsky-community/kandinsky-2-1-prior''', torch_dtype=torch.floataa ) pipe_prior.to(A_ ) lowercase__ = KandinskyInpaintPipeline.from_pretrained( '''kandinsky-community/kandinsky-2-1-inpaint''', torch_dtype=torch.floataa ) lowercase__ = pipeline.to(A_ ) pipeline.set_progress_bar_config(disable=A_ ) lowercase__ = torch.Generator(device='''cpu''' ).manual_seed(0 ) lowercase__ = pipe_prior( A_, generator=A_, num_inference_steps=5, negative_prompt='''''', ).to_tuple() lowercase__ = pipeline( A_, image=A_, mask_image=A_, image_embeds=A_, negative_image_embeds=A_, generator=A_, num_inference_steps=100, height=768, width=768, output_type='''np''', ) lowercase__ = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(A_, A_ )
207
import re import string from collections import Counter import sacrebleu import sacremoses from packaging import version import datasets __lowerCamelCase : List[Any] = """ @inproceedings{xu-etal-2016-optimizing, title = {Optimizing Statistical Machine Translation for Text Simplification}, authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris}, journal = {Transactions of the Association for Computational Linguistics}, volume = {4}, year={2016}, url = {https://www.aclweb.org/anthology/Q16-1029}, pages = {401--415 }, @inproceedings{post-2018-call, title = \"A Call for Clarity in Reporting {BLEU} Scores\", author = \"Post, Matt\", booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\", month = oct, year = \"2018\", address = \"Belgium, Brussels\", publisher = \"Association for Computational Linguistics\", url = \"https://www.aclweb.org/anthology/W18-6319\", pages = \"186--191\", } """ __lowerCamelCase : Optional[int] = """\ WIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU It can be used to evaluate the quality of machine-generated texts. """ __lowerCamelCase : str = """ Calculates sari score (between 0 and 100) given a list of source and predicted sentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score. Args: sources: list of source sentences where each sentence should be a string. predictions: list of predicted sentences where each sentence should be a string. references: list of lists of reference sentences where each sentence should be a string. Returns: sari: sari score sacrebleu: sacrebleu score exact: exact score Examples: >>> sources=[\"About 95 species are currently accepted .\"] >>> predictions=[\"About 95 you now get in .\"] >>> references=[[\"About 95 species are currently known .\"]] >>> wiki_split = datasets.load_metric(\"wiki_split\") >>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references) >>> print(results) {'sari': 21.805555555555557, 'sacrebleu': 14.535768424205482, 'exact': 0.0} """ def A_ ( _lowerCAmelCase ) -> str: def remove_articles(_lowerCAmelCase ): UpperCamelCase : Tuple = re.compile(r"\b(a|an|the)\b" , re.UNICODE ) return re.sub(_lowerCAmelCase , " " , _lowerCAmelCase ) def white_space_fix(_lowerCAmelCase ): return " ".join(text.split() ) def remove_punc(_lowerCAmelCase ): UpperCamelCase : int = set(string.punctuation ) return "".join(ch for ch in text if ch not in exclude ) def lower(_lowerCAmelCase ): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(_lowerCAmelCase ) ) ) ) def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> Any: return int(normalize_answer(_lowerCAmelCase ) == normalize_answer(_lowerCAmelCase ) ) def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> str: UpperCamelCase : Tuple = [any(compute_exact(_lowerCAmelCase , _lowerCAmelCase ) for ref in refs ) for pred, refs in zip(_lowerCAmelCase , _lowerCAmelCase )] return (sum(_lowerCAmelCase ) / len(_lowerCAmelCase )) * 100 def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Union[str, Any]: UpperCamelCase : Union[str, Any] = [rgram for rgrams in rgramslist for rgram in rgrams] UpperCamelCase : Union[str, Any] = Counter(_lowerCAmelCase ) UpperCamelCase : Optional[int] = Counter(_lowerCAmelCase ) UpperCamelCase : List[Any] = Counter() for sgram, scount in sgramcounter.items(): UpperCamelCase : Tuple = scount * numref UpperCamelCase : Union[str, Any] = Counter(_lowerCAmelCase ) UpperCamelCase : Tuple = Counter() for cgram, ccount in cgramcounter.items(): UpperCamelCase : Dict = ccount * numref # KEEP UpperCamelCase : List[Any] = sgramcounter_rep & cgramcounter_rep UpperCamelCase : Union[str, Any] = keepgramcounter_rep & rgramcounter UpperCamelCase : Dict = sgramcounter_rep & rgramcounter UpperCamelCase : Optional[int] = 0 UpperCamelCase : Tuple = 0 for keepgram in keepgramcountergood_rep: keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram] # Fix an alleged bug [2] in the keep score computation. # keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram] keeptmpscorea += keepgramcountergood_rep[keepgram] # Define 0/0=1 instead of 0 to give higher scores for predictions that match # a target exactly. UpperCamelCase : Any = 1 UpperCamelCase : Any = 1 if len(_lowerCAmelCase ) > 0: UpperCamelCase : Dict = keeptmpscorea / len(_lowerCAmelCase ) if len(_lowerCAmelCase ) > 0: # Fix an alleged bug [2] in the keep score computation. # keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep) UpperCamelCase : Union[str, Any] = keeptmpscorea / sum(keepgramcounterall_rep.values() ) UpperCamelCase : Any = 0 if keepscore_precision > 0 or keepscore_recall > 0: UpperCamelCase : List[str] = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall) # DELETION UpperCamelCase : Any = sgramcounter_rep - cgramcounter_rep UpperCamelCase : str = delgramcounter_rep - rgramcounter UpperCamelCase : Any = sgramcounter_rep - rgramcounter UpperCamelCase : Optional[int] = 0 UpperCamelCase : Union[str, Any] = 0 for delgram in delgramcountergood_rep: deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram] deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram] # Define 0/0=1 instead of 0 to give higher scores for predictions that match # a target exactly. UpperCamelCase : Dict = 1 if len(_lowerCAmelCase ) > 0: UpperCamelCase : str = deltmpscorea / len(_lowerCAmelCase ) # ADDITION UpperCamelCase : List[str] = set(_lowerCAmelCase ) - set(_lowerCAmelCase ) UpperCamelCase : List[str] = set(_lowerCAmelCase ) & set(_lowerCAmelCase ) UpperCamelCase : Dict = set(_lowerCAmelCase ) - set(_lowerCAmelCase ) UpperCamelCase : Optional[Any] = 0 for addgram in addgramcountergood: addtmpscore += 1 # Define 0/0=1 instead of 0 to give higher scores for predictions that match # a target exactly. UpperCamelCase : Tuple = 1 UpperCamelCase : Tuple = 1 if len(_lowerCAmelCase ) > 0: UpperCamelCase : Dict = addtmpscore / len(_lowerCAmelCase ) if len(_lowerCAmelCase ) > 0: UpperCamelCase : Tuple = addtmpscore / len(_lowerCAmelCase ) UpperCamelCase : List[str] = 0 if addscore_precision > 0 or addscore_recall > 0: UpperCamelCase : List[str] = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall) return (keepscore, delscore_precision, addscore) def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> List[str]: UpperCamelCase : int = len(_lowerCAmelCase ) UpperCamelCase : Optional[Any] = ssent.split(" " ) UpperCamelCase : Dict = csent.split(" " ) UpperCamelCase : str = [] UpperCamelCase : Any = [] UpperCamelCase : Any = [] UpperCamelCase : Union[str, Any] = [] UpperCamelCase : str = [] UpperCamelCase : str = [] UpperCamelCase : Dict = [] UpperCamelCase : int = [] UpperCamelCase : Optional[Any] = [] UpperCamelCase : Tuple = [] for rsent in rsents: UpperCamelCase : List[Any] = rsent.split(" " ) UpperCamelCase : List[str] = [] UpperCamelCase : int = [] UpperCamelCase : Tuple = [] ragramslist.append(_lowerCAmelCase ) for i in range(0 , len(_lowerCAmelCase ) - 1 ): if i < len(_lowerCAmelCase ) - 1: UpperCamelCase : Dict = ragrams[i] + " " + ragrams[i + 1] ragrams.append(_lowerCAmelCase ) if i < len(_lowerCAmelCase ) - 2: UpperCamelCase : Dict = ragrams[i] + " " + ragrams[i + 1] + " " + ragrams[i + 2] ragrams.append(_lowerCAmelCase ) if i < len(_lowerCAmelCase ) - 3: UpperCamelCase : List[Any] = ragrams[i] + " " + ragrams[i + 1] + " " + ragrams[i + 2] + " " + ragrams[i + 3] ragrams.append(_lowerCAmelCase ) ragramslist.append(_lowerCAmelCase ) ragramslist.append(_lowerCAmelCase ) ragramslist.append(_lowerCAmelCase ) for i in range(0 , len(_lowerCAmelCase ) - 1 ): if i < len(_lowerCAmelCase ) - 1: UpperCamelCase : Union[str, Any] = sagrams[i] + " " + sagrams[i + 1] sagrams.append(_lowerCAmelCase ) if i < len(_lowerCAmelCase ) - 2: UpperCamelCase : List[str] = sagrams[i] + " " + sagrams[i + 1] + " " + sagrams[i + 2] sagrams.append(_lowerCAmelCase ) if i < len(_lowerCAmelCase ) - 3: UpperCamelCase : Optional[int] = sagrams[i] + " " + sagrams[i + 1] + " " + sagrams[i + 2] + " " + sagrams[i + 3] sagrams.append(_lowerCAmelCase ) for i in range(0 , len(_lowerCAmelCase ) - 1 ): if i < len(_lowerCAmelCase ) - 1: UpperCamelCase : Union[str, Any] = cagrams[i] + " " + cagrams[i + 1] cagrams.append(_lowerCAmelCase ) if i < len(_lowerCAmelCase ) - 2: UpperCamelCase : Optional[Any] = cagrams[i] + " " + cagrams[i + 1] + " " + cagrams[i + 2] cagrams.append(_lowerCAmelCase ) if i < len(_lowerCAmelCase ) - 3: UpperCamelCase : Union[str, Any] = cagrams[i] + " " + cagrams[i + 1] + " " + cagrams[i + 2] + " " + cagrams[i + 3] cagrams.append(_lowerCAmelCase ) ((UpperCamelCase) , (UpperCamelCase) , (UpperCamelCase)) : Optional[int] = SARIngram(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) ((UpperCamelCase) , (UpperCamelCase) , (UpperCamelCase)) : Optional[Any] = SARIngram(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) ((UpperCamelCase) , (UpperCamelCase) , (UpperCamelCase)) : str = SARIngram(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) ((UpperCamelCase) , (UpperCamelCase) , (UpperCamelCase)) : Optional[int] = SARIngram(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) UpperCamelCase : Tuple = sum([keepascore, keepascore, keepascore, keepascore] ) / 4 UpperCamelCase : str = sum([delascore, delascore, delascore, delascore] ) / 4 UpperCamelCase : Union[str, Any] = sum([addascore, addascore, addascore, addascore] ) / 4 UpperCamelCase : Union[str, Any] = (avgkeepscore + avgdelscore + avgaddscore) / 3 return finalscore def A_ ( _lowerCAmelCase , _lowerCAmelCase = True , _lowerCAmelCase = "13a" , _lowerCAmelCase = True ) -> Optional[Any]: # Normalization is requried for the ASSET dataset (one of the primary # datasets in sentence simplification) to allow using space # to split the sentence. Even though Wiki-Auto and TURK datasets, # do not require normalization, we do it for consistency. # Code adapted from the EASSE library [1] written by the authors of the ASSET dataset. # [1] https://github.com/feralvam/easse/blob/580bba7e1378fc8289c663f864e0487188fe8067/easse/utils/preprocessing.py#L7 if lowercase: UpperCamelCase : Dict = sentence.lower() if tokenizer in ["13a", "intl"]: if version.parse(sacrebleu.__version__ ).major >= 2: UpperCamelCase : str = sacrebleu.metrics.bleu._get_tokenizer(_lowerCAmelCase )()(_lowerCAmelCase ) else: UpperCamelCase : Dict = sacrebleu.TOKENIZERS[tokenizer]()(_lowerCAmelCase ) elif tokenizer == "moses": UpperCamelCase : Union[str, Any] = sacremoses.MosesTokenizer().tokenize(_lowerCAmelCase , return_str=_lowerCAmelCase , escape=_lowerCAmelCase ) elif tokenizer == "penn": UpperCamelCase : str = sacremoses.MosesTokenizer().penn_tokenize(_lowerCAmelCase , return_str=_lowerCAmelCase ) else: UpperCamelCase : Union[str, Any] = sentence if not return_str: UpperCamelCase : Tuple = normalized_sent.split() return normalized_sent def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> List[str]: if not (len(_lowerCAmelCase ) == len(_lowerCAmelCase ) == len(_lowerCAmelCase )): raise ValueError("Sources length must match predictions and references lengths." ) UpperCamelCase : Optional[Any] = 0 for src, pred, refs in zip(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): sari_score += SARIsent(normalize(_lowerCAmelCase ) , normalize(_lowerCAmelCase ) , [normalize(_lowerCAmelCase ) for sent in refs] ) UpperCamelCase : Optional[int] = sari_score / len(_lowerCAmelCase ) return 100 * sari_score def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase="exp" , _lowerCAmelCase=None , _lowerCAmelCase=False , _lowerCAmelCase=False , _lowerCAmelCase=False , ) -> List[str]: UpperCamelCase : Optional[Any] = len(references[0] ) if any(len(_lowerCAmelCase ) != references_per_prediction for refs in references ): raise ValueError("Sacrebleu requires the same number of references for each prediction" ) UpperCamelCase : Optional[int] = [[refs[i] for refs in references] for i in range(_lowerCAmelCase )] UpperCamelCase : Tuple = sacrebleu.corpus_bleu( _lowerCAmelCase , _lowerCAmelCase , smooth_method=_lowerCAmelCase , smooth_value=_lowerCAmelCase , force=_lowerCAmelCase , lowercase=_lowerCAmelCase , use_effective_order=_lowerCAmelCase , ) return output.score @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class A__ ( datasets.Metric ): def __UpperCamelCase( self ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("string" , id="sequence" ), "references": datasets.Sequence(datasets.Value("string" , id="sequence" ) , id="references" ), } ) , codebase_urls=[ "https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py", "https://github.com/cocoxu/simplification/blob/master/SARI.py", "https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py", "https://github.com/mjpost/sacreBLEU", ] , reference_urls=[ "https://www.aclweb.org/anthology/Q16-1029.pdf", "https://github.com/mjpost/sacreBLEU", "https://en.wikipedia.org/wiki/BLEU", "https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213", ] , ) def __UpperCamelCase( self , A_ , A_ , A_ ): '''simple docstring''' UpperCamelCase : Optional[Any] = {} result.update({"sari": compute_sari(sources=A_ , predictions=A_ , references=A_ )} ) result.update({"sacrebleu": compute_sacrebleu(predictions=A_ , references=A_ )} ) result.update({"exact": compute_em(predictions=A_ , references=A_ )} ) return result
52
0
"""simple docstring""" import unittest import numpy as np import torch from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device enable_full_determinism() class a ( unittest.TestCase ): @property def lowerCAmelCase_ ( self : Tuple ): torch.manual_seed(0 ) _UpperCAmelCase = UNetaDModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , ) return model def lowerCAmelCase_ ( self : Optional[Any] ): _UpperCAmelCase = self.dummy_uncond_unet _UpperCAmelCase = KarrasVeScheduler() _UpperCAmelCase = KarrasVePipeline(unet=A_ , scheduler=A_ ) pipe.to(A_ ) pipe.set_progress_bar_config(disable=A_ ) _UpperCAmelCase = torch.manual_seed(0 ) _UpperCAmelCase = pipe(num_inference_steps=2 , generator=A_ , output_type="""numpy""" ).images _UpperCAmelCase = torch.manual_seed(0 ) _UpperCAmelCase = pipe(num_inference_steps=2 , generator=A_ , output_type="""numpy""" , return_dict=A_ )[0] _UpperCAmelCase = image[0, -3:, -3:, -1] _UpperCAmelCase = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) _UpperCAmelCase = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 @slow @require_torch class a ( unittest.TestCase ): def lowerCAmelCase_ ( self : Any ): _UpperCAmelCase = "google/ncsnpp-celebahq-256" _UpperCAmelCase = UNetaDModel.from_pretrained(A_ ) _UpperCAmelCase = KarrasVeScheduler() _UpperCAmelCase = KarrasVePipeline(unet=A_ , scheduler=A_ ) pipe.to(A_ ) pipe.set_progress_bar_config(disable=A_ ) _UpperCAmelCase = torch.manual_seed(0 ) _UpperCAmelCase = pipe(num_inference_steps=20 , generator=A_ , output_type="""numpy""" ).images _UpperCAmelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 256, 256, 3) _UpperCAmelCase = np.array([0.578, 0.5_811, 0.5_924, 0.5_809, 0.587, 0.5_886, 0.5_861, 0.5_802, 0.586] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
289
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __lowerCamelCase : List[Any] = logging.get_logger(__name__) __lowerCamelCase : str = { """roberta-base""": """https://huggingface.co/roberta-base/resolve/main/config.json""", """roberta-large""": """https://huggingface.co/roberta-large/resolve/main/config.json""", """roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/config.json""", """distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/config.json""", """roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json""", """roberta-large-openai-detector""": """https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json""", } class A__ ( __snake_case ): _UpperCAmelCase :Union[str, Any] = 'roberta' def __init__( self , A_=5_0265 , A_=768 , A_=12 , A_=12 , A_=3072 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=2 , A_=0.02 , A_=1e-12 , A_=1 , A_=0 , A_=2 , A_="absolute" , A_=True , A_=None , **A_ , ): '''simple docstring''' super().__init__(pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , **A_ ) UpperCamelCase : Optional[int] = vocab_size UpperCamelCase : Dict = hidden_size UpperCamelCase : str = num_hidden_layers UpperCamelCase : Any = num_attention_heads UpperCamelCase : List[str] = hidden_act UpperCamelCase : Optional[Any] = intermediate_size UpperCamelCase : Tuple = hidden_dropout_prob UpperCamelCase : Tuple = attention_probs_dropout_prob UpperCamelCase : Tuple = max_position_embeddings UpperCamelCase : Any = type_vocab_size UpperCamelCase : int = initializer_range UpperCamelCase : str = layer_norm_eps UpperCamelCase : Dict = position_embedding_type UpperCamelCase : Any = use_cache UpperCamelCase : Union[str, Any] = classifier_dropout class A__ ( __snake_case ): @property def __UpperCamelCase( self ): '''simple docstring''' if self.task == "multiple-choice": UpperCamelCase : Optional[int] = {0: "batch", 1: "choice", 2: "sequence"} else: UpperCamelCase : Optional[int] = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ] )
52
0
'''simple docstring''' import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DeformableDetrImageProcessor class _a ( unittest.TestCase ): '''simple docstring''' def __init__( self, A, A=7, A=3, A=30, A=400, A=True, A=None, A=True, A=[0.5, 0.5, 0.5], A=[0.5, 0.5, 0.5], A=True, A=1 / 255, A=True, ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = size if size is not None else {"shortest_edge": 18, "longest_edge": 1_333} SCREAMING_SNAKE_CASE : List[str] = parent SCREAMING_SNAKE_CASE : Dict = batch_size SCREAMING_SNAKE_CASE : str = num_channels SCREAMING_SNAKE_CASE : Optional[Any] = min_resolution SCREAMING_SNAKE_CASE : Dict = max_resolution SCREAMING_SNAKE_CASE : int = do_resize SCREAMING_SNAKE_CASE : Any = size SCREAMING_SNAKE_CASE : Tuple = do_normalize SCREAMING_SNAKE_CASE : Optional[int] = image_mean SCREAMING_SNAKE_CASE : Union[str, Any] = image_std SCREAMING_SNAKE_CASE : Optional[int] = do_rescale SCREAMING_SNAKE_CASE : str = rescale_factor SCREAMING_SNAKE_CASE : int = do_pad def UpperCamelCase_ ( self ): '''simple docstring''' return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def UpperCamelCase_ ( self, A, A=False ): '''simple docstring''' if not batched: SCREAMING_SNAKE_CASE : Optional[int] = image_inputs[0] if isinstance(A_, Image.Image ): SCREAMING_SNAKE_CASE : List[str] = image.size else: SCREAMING_SNAKE_CASE : Tuple = image.shape[1], image.shape[2] if w < h: SCREAMING_SNAKE_CASE : Dict = int(self.size['shortest_edge'] * h / w ) SCREAMING_SNAKE_CASE : Dict = self.size["shortest_edge"] elif w > h: SCREAMING_SNAKE_CASE : Optional[int] = self.size["shortest_edge"] SCREAMING_SNAKE_CASE : Union[str, Any] = int(self.size['shortest_edge'] * w / h ) else: SCREAMING_SNAKE_CASE : List[str] = self.size["shortest_edge"] SCREAMING_SNAKE_CASE : Optional[int] = self.size["shortest_edge"] else: SCREAMING_SNAKE_CASE : List[Any] = [] for image in image_inputs: SCREAMING_SNAKE_CASE : int = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) SCREAMING_SNAKE_CASE : List[Any] = max(A_, key=lambda A : item[0] )[0] SCREAMING_SNAKE_CASE : int = max(A_, key=lambda A : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class _a ( __snake_case , unittest.TestCase ): '''simple docstring''' A : int = DeformableDetrImageProcessor if is_vision_available() else None def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = DeformableDetrImageProcessingTester(self ) @property def UpperCamelCase_ ( self ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(A_, 'image_mean' ) ) self.assertTrue(hasattr(A_, 'image_std' ) ) self.assertTrue(hasattr(A_, 'do_normalize' ) ) self.assertTrue(hasattr(A_, 'do_resize' ) ) self.assertTrue(hasattr(A_, 'do_rescale' ) ) self.assertTrue(hasattr(A_, 'do_pad' ) ) self.assertTrue(hasattr(A_, 'size' ) ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : str = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size, {'shortest_edge': 18, 'longest_edge': 1_333} ) self.assertEqual(image_processor.do_pad, A_ ) SCREAMING_SNAKE_CASE : str = self.image_processing_class.from_dict( self.image_processor_dict, size=42, max_size=84, pad_and_return_pixel_mask=A_ ) self.assertEqual(image_processor.size, {'shortest_edge': 42, 'longest_edge': 84} ) self.assertEqual(image_processor.do_pad, A_ ) def UpperCamelCase_ ( self ): '''simple docstring''' pass def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = self.image_processing_class(**self.image_processor_dict ) # create random PIL images SCREAMING_SNAKE_CASE : Any = prepare_image_inputs(self.image_processor_tester, equal_resolution=A_ ) for image in image_inputs: self.assertIsInstance(A_, Image.Image ) # Test not batched input SCREAMING_SNAKE_CASE : Dict = image_processing(image_inputs[0], return_tensors='pt' ).pixel_values SCREAMING_SNAKE_CASE : Optional[Any] = self.image_processor_tester.get_expected_values(A_ ) self.assertEqual( encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), ) # Test batched SCREAMING_SNAKE_CASE : int = self.image_processor_tester.get_expected_values(A_, batched=A_ ) SCREAMING_SNAKE_CASE : Any = image_processing(A_, return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ), ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors SCREAMING_SNAKE_CASE : Optional[Any] = prepare_image_inputs(self.image_processor_tester, equal_resolution=A_, numpify=A_ ) for image in image_inputs: self.assertIsInstance(A_, np.ndarray ) # Test not batched input SCREAMING_SNAKE_CASE : Dict = image_processing(image_inputs[0], return_tensors='pt' ).pixel_values SCREAMING_SNAKE_CASE : int = self.image_processor_tester.get_expected_values(A_ ) self.assertEqual( encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), ) # Test batched SCREAMING_SNAKE_CASE : Optional[int] = image_processing(A_, return_tensors='pt' ).pixel_values SCREAMING_SNAKE_CASE : Optional[Any] = self.image_processor_tester.get_expected_values(A_, batched=A_ ) self.assertEqual( encoded_images.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ), ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors SCREAMING_SNAKE_CASE : List[Any] = prepare_image_inputs(self.image_processor_tester, equal_resolution=A_, torchify=A_ ) for image in image_inputs: self.assertIsInstance(A_, torch.Tensor ) # Test not batched input SCREAMING_SNAKE_CASE : Optional[int] = image_processing(image_inputs[0], return_tensors='pt' ).pixel_values SCREAMING_SNAKE_CASE : Optional[int] = self.image_processor_tester.get_expected_values(A_ ) self.assertEqual( encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), ) # Test batched SCREAMING_SNAKE_CASE : Union[str, Any] = image_processing(A_, return_tensors='pt' ).pixel_values SCREAMING_SNAKE_CASE : Dict = self.image_processor_tester.get_expected_values(A_, batched=A_ ) self.assertEqual( encoded_images.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ), ) @slow def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt', 'r' ) as f: SCREAMING_SNAKE_CASE : str = json.loads(f.read() ) SCREAMING_SNAKE_CASE : Dict = {"image_id": 39_769, "annotations": target} # encode them SCREAMING_SNAKE_CASE : str = DeformableDetrImageProcessor() SCREAMING_SNAKE_CASE : Optional[Any] = image_processing(images=A_, annotations=A_, return_tensors='pt' ) # verify pixel values SCREAMING_SNAKE_CASE : Optional[int] = torch.Size([1, 3, 800, 1_066] ) self.assertEqual(encoding['pixel_values'].shape, A_ ) SCREAMING_SNAKE_CASE : Any = torch.tensor([0.27_96, 0.31_38, 0.34_81] ) self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3], A_, atol=1E-4 ) ) # verify area SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([5_887.9_600, 11_250.2_061, 489_353.8_438, 837_122.7_500, 147_967.5_156, 165_732.3_438] ) self.assertTrue(torch.allclose(encoding['labels'][0]['area'], A_ ) ) # verify boxes SCREAMING_SNAKE_CASE : List[str] = torch.Size([6, 4] ) self.assertEqual(encoding['labels'][0]['boxes'].shape, A_ ) SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([0.55_03, 0.27_65, 0.06_04, 0.22_15] ) self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0], A_, atol=1E-3 ) ) # verify image_id SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([39_769] ) self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'], A_ ) ) # verify is_crowd SCREAMING_SNAKE_CASE : str = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'], A_ ) ) # verify class_labels SCREAMING_SNAKE_CASE : str = torch.tensor([75, 75, 63, 65, 17, 17] ) self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'], A_ ) ) # verify orig_size SCREAMING_SNAKE_CASE : Dict = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'], A_ ) ) # verify size SCREAMING_SNAKE_CASE : Any = torch.tensor([800, 1_066] ) self.assertTrue(torch.allclose(encoding['labels'][0]['size'], A_ ) ) @slow def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt', 'r' ) as f: SCREAMING_SNAKE_CASE : Optional[int] = json.loads(f.read() ) SCREAMING_SNAKE_CASE : Any = {"file_name": "000000039769.png", "image_id": 39_769, "segments_info": target} SCREAMING_SNAKE_CASE : Tuple = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' ) # encode them SCREAMING_SNAKE_CASE : List[Any] = DeformableDetrImageProcessor(format='coco_panoptic' ) SCREAMING_SNAKE_CASE : Dict = image_processing(images=A_, annotations=A_, masks_path=A_, return_tensors='pt' ) # verify pixel values SCREAMING_SNAKE_CASE : Any = torch.Size([1, 3, 800, 1_066] ) self.assertEqual(encoding['pixel_values'].shape, A_ ) SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([0.27_96, 0.31_38, 0.34_81] ) self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3], A_, atol=1E-4 ) ) # verify area SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([147_979.6_875, 165_527.0_469, 484_638.5_938, 11_292.9_375, 5_879.6_562, 7_634.1_147] ) self.assertTrue(torch.allclose(encoding['labels'][0]['area'], A_ ) ) # verify boxes SCREAMING_SNAKE_CASE : Dict = torch.Size([6, 4] ) self.assertEqual(encoding['labels'][0]['boxes'].shape, A_ ) SCREAMING_SNAKE_CASE : Any = torch.tensor([0.26_25, 0.54_37, 0.46_88, 0.86_25] ) self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0], A_, atol=1E-3 ) ) # verify image_id SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([39_769] ) self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'], A_ ) ) # verify is_crowd SCREAMING_SNAKE_CASE : Dict = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'], A_ ) ) # verify class_labels SCREAMING_SNAKE_CASE : Any = torch.tensor([17, 17, 63, 75, 75, 93] ) self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'], A_ ) ) # verify masks SCREAMING_SNAKE_CASE : Dict = 822_873 self.assertEqual(encoding['labels'][0]['masks'].sum().item(), A_ ) # verify orig_size SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'], A_ ) ) # verify size SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([800, 1_066] ) self.assertTrue(torch.allclose(encoding['labels'][0]['size'], A_ ) )
251
from typing import List, Optional, Tuple, Union import torch from ...utils import logging, randn_tensor from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline __lowerCamelCase : str = logging.get_logger(__name__) # pylint: disable=invalid-name class A__ ( __snake_case ): def __init__( self , A_ , A_ ): '''simple docstring''' super().__init__() self.register_modules(unet=A_ , scheduler=A_ ) @torch.no_grad() def __call__( self , A_ = 1 , A_ = 100 , A_ = None , A_ = None , A_ = True , ): '''simple docstring''' if audio_length_in_s is None: UpperCamelCase : str = self.unet.config.sample_size / self.unet.config.sample_rate UpperCamelCase : Optional[Any] = audio_length_in_s * self.unet.config.sample_rate UpperCamelCase : Any = 2 ** len(self.unet.up_blocks ) if sample_size < 3 * down_scale_factor: raise ValueError( F"""{audio_length_in_s} is too small. Make sure it's bigger or equal to""" F""" {3 * down_scale_factor / self.unet.config.sample_rate}.""" ) UpperCamelCase : Union[str, Any] = int(A_ ) if sample_size % down_scale_factor != 0: UpperCamelCase : List[str] = ( (audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1 ) * down_scale_factor logger.info( F"""{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled""" F""" by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising""" " process." ) UpperCamelCase : Any = int(A_ ) UpperCamelCase : Union[str, Any] = next(iter(self.unet.parameters() ) ).dtype UpperCamelCase : Optional[int] = (batch_size, self.unet.config.in_channels, sample_size) if isinstance(A_ , A_ ) and len(A_ ) != batch_size: raise ValueError( F"""You have passed a list of generators of length {len(A_ )}, but requested an effective batch""" F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" ) UpperCamelCase : Optional[Any] = randn_tensor(A_ , generator=A_ , device=self.device , dtype=A_ ) # set step values self.scheduler.set_timesteps(A_ , device=audio.device ) UpperCamelCase : Optional[int] = self.scheduler.timesteps.to(A_ ) for t in self.progress_bar(self.scheduler.timesteps ): # 1. predict noise model_output UpperCamelCase : Dict = self.unet(A_ , A_ ).sample # 2. compute previous image: x_t -> t_t-1 UpperCamelCase : int = self.scheduler.step(A_ , A_ , A_ ).prev_sample UpperCamelCase : Optional[Any] = audio.clamp(-1 , 1 ).float().cpu().numpy() UpperCamelCase : Dict = audio[:, :, :original_sample_size] if not return_dict: return (audio,) return AudioPipelineOutput(audios=A_ )
52
0
'''simple docstring''' import unittest from transformers import ( MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TextClassificationPipeline, pipeline, ) from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow from .test_pipelines_common import ANY # These 2 model types require different inputs than those of the usual text models. a : Any = {"""LayoutLMv2Config""", """LayoutLMv3Config"""} @is_pipeline_test class UpperCamelCase_ ( unittest.TestCase ): lowercase = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING lowercase = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if model_mapping is not None: lowercase = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP} if tf_model_mapping is not None: lowercase = { config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP } @require_torch def _lowercase( self ) -> List[str]: UpperCAmelCase : Dict = pipeline( task="""text-classification""" , model="""hf-internal-testing/tiny-random-distilbert""" , framework="""pt""" ) UpperCAmelCase : str = text_classifier("""This is great !""" ) self.assertEqual(nested_simplify(A_ ) , [{"""label""": """LABEL_0""", """score""": 0.5_0_4}] ) UpperCAmelCase : Optional[Any] = text_classifier("""This is great !""" , top_k=2 ) self.assertEqual( nested_simplify(A_ ) , [{"""label""": """LABEL_0""", """score""": 0.5_0_4}, {"""label""": """LABEL_1""", """score""": 0.4_9_6}] ) UpperCAmelCase : str = text_classifier(["""This is great !""", """This is bad"""] , top_k=2 ) self.assertEqual( nested_simplify(A_ ) , [ [{"""label""": """LABEL_0""", """score""": 0.5_0_4}, {"""label""": """LABEL_1""", """score""": 0.4_9_6}], [{"""label""": """LABEL_0""", """score""": 0.5_0_4}, {"""label""": """LABEL_1""", """score""": 0.4_9_6}], ] , ) UpperCAmelCase : str = text_classifier("""This is great !""" , top_k=1 ) self.assertEqual(nested_simplify(A_ ) , [{"""label""": """LABEL_0""", """score""": 0.5_0_4}] ) # Legacy behavior UpperCAmelCase : str = text_classifier("""This is great !""" , return_all_scores=A_ ) self.assertEqual(nested_simplify(A_ ) , [{"""label""": """LABEL_0""", """score""": 0.5_0_4}] ) UpperCAmelCase : Any = text_classifier("""This is great !""" , return_all_scores=A_ ) self.assertEqual( nested_simplify(A_ ) , [[{"""label""": """LABEL_0""", """score""": 0.5_0_4}, {"""label""": """LABEL_1""", """score""": 0.4_9_6}]] ) UpperCAmelCase : Optional[Any] = text_classifier(["""This is great !""", """Something else"""] , return_all_scores=A_ ) self.assertEqual( nested_simplify(A_ ) , [ [{"""label""": """LABEL_0""", """score""": 0.5_0_4}, {"""label""": """LABEL_1""", """score""": 0.4_9_6}], [{"""label""": """LABEL_0""", """score""": 0.5_0_4}, {"""label""": """LABEL_1""", """score""": 0.4_9_6}], ] , ) UpperCAmelCase : Union[str, Any] = text_classifier(["""This is great !""", """Something else"""] , return_all_scores=A_ ) self.assertEqual( nested_simplify(A_ ) , [ {"""label""": """LABEL_0""", """score""": 0.5_0_4}, {"""label""": """LABEL_0""", """score""": 0.5_0_4}, ] , ) @require_torch def _lowercase( self ) -> Optional[Any]: import torch UpperCAmelCase : Tuple = pipeline( task="""text-classification""" , model="""hf-internal-testing/tiny-random-distilbert""" , framework="""pt""" , device=torch.device("""cpu""" ) , ) UpperCAmelCase : str = text_classifier("""This is great !""" ) self.assertEqual(nested_simplify(A_ ) , [{"""label""": """LABEL_0""", """score""": 0.5_0_4}] ) @require_tf def _lowercase( self ) -> int: UpperCAmelCase : Optional[Any] = pipeline( task="""text-classification""" , model="""hf-internal-testing/tiny-random-distilbert""" , framework="""tf""" ) UpperCAmelCase : Tuple = text_classifier("""This is great !""" ) self.assertEqual(nested_simplify(A_ ) , [{"""label""": """LABEL_0""", """score""": 0.5_0_4}] ) @slow @require_torch def _lowercase( self ) -> List[str]: UpperCAmelCase : List[str] = pipeline("""text-classification""" ) UpperCAmelCase : Any = text_classifier("""This is great !""" ) self.assertEqual(nested_simplify(A_ ) , [{"""label""": """POSITIVE""", """score""": 1.0}] ) UpperCAmelCase : Dict = text_classifier("""This is bad !""" ) self.assertEqual(nested_simplify(A_ ) , [{"""label""": """NEGATIVE""", """score""": 1.0}] ) UpperCAmelCase : List[Any] = text_classifier("""Birds are a type of animal""" ) self.assertEqual(nested_simplify(A_ ) , [{"""label""": """POSITIVE""", """score""": 0.9_8_8}] ) @slow @require_tf def _lowercase( self ) -> Tuple: UpperCAmelCase : int = pipeline("""text-classification""" , framework="""tf""" ) UpperCAmelCase : Dict = text_classifier("""This is great !""" ) self.assertEqual(nested_simplify(A_ ) , [{"""label""": """POSITIVE""", """score""": 1.0}] ) UpperCAmelCase : Dict = text_classifier("""This is bad !""" ) self.assertEqual(nested_simplify(A_ ) , [{"""label""": """NEGATIVE""", """score""": 1.0}] ) UpperCAmelCase : Optional[int] = text_classifier("""Birds are a type of animal""" ) self.assertEqual(nested_simplify(A_ ) , [{"""label""": """POSITIVE""", """score""": 0.9_8_8}] ) def _lowercase( self , A , A , A ) -> Optional[Any]: UpperCAmelCase : Optional[int] = TextClassificationPipeline(model=A_ , tokenizer=A_ ) return text_classifier, ["HuggingFace is in", "This is another test"] def _lowercase( self , A , A ) -> str: UpperCAmelCase : Union[str, Any] = text_classifier.model # Small inputs because BartTokenizer tiny has maximum position embeddings = 22 UpperCAmelCase : Optional[int] = "HuggingFace is in" UpperCAmelCase : List[Any] = text_classifier(A_ ) self.assertEqual(nested_simplify(A_ ) , [{"""label""": ANY(A_ ), """score""": ANY(A_ )}] ) self.assertTrue(outputs[0]["""label"""] in model.config.idalabel.values() ) UpperCAmelCase : Union[str, Any] = ["HuggingFace is in ", "Paris is in France"] UpperCAmelCase : Optional[int] = text_classifier(A_ ) self.assertEqual( nested_simplify(A_ ) , [{"""label""": ANY(A_ ), """score""": ANY(A_ )}, {"""label""": ANY(A_ ), """score""": ANY(A_ )}] , ) self.assertTrue(outputs[0]["""label"""] in model.config.idalabel.values() ) self.assertTrue(outputs[1]["""label"""] in model.config.idalabel.values() ) # Forcing to get all results with `top_k=None` # This is NOT the legacy format UpperCAmelCase : List[str] = text_classifier(A_ , top_k=A_ ) UpperCAmelCase : int = len(model.config.idalabel.values() ) self.assertEqual( nested_simplify(A_ ) , [[{"""label""": ANY(A_ ), """score""": ANY(A_ )}] * N, [{"""label""": ANY(A_ ), """score""": ANY(A_ )}] * N] , ) UpperCAmelCase : Optional[int] = {"text": "HuggingFace is in ", "text_pair": "Paris is in France"} UpperCAmelCase : List[Any] = text_classifier(A_ ) self.assertEqual( nested_simplify(A_ ) , {"""label""": ANY(A_ ), """score""": ANY(A_ )} , ) self.assertTrue(outputs["""label"""] in model.config.idalabel.values() ) # This might be used a text pair, but tokenizer + pipe interaction # makes it hard to understand that it's not using the pair properly # https://github.com/huggingface/transformers/issues/17305 # We disabled this usage instead as it was outputting wrong outputs. UpperCAmelCase : int = [["HuggingFace is in ", "Paris is in France"]] with self.assertRaises(A_ ): text_classifier(A_ ) # This used to be valid for doing text pairs # We're keeping it working because of backward compatibility UpperCAmelCase : Any = text_classifier([[["""HuggingFace is in """, """Paris is in France"""]]] ) self.assertEqual( nested_simplify(A_ ) , [{"""label""": ANY(A_ ), """score""": ANY(A_ )}] , ) self.assertTrue(outputs[0]["""label"""] in model.config.idalabel.values() )
265
import functools def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> int: UpperCamelCase : Optional[int] = len(_lowerCAmelCase ) UpperCamelCase : List[str] = len(_lowerCAmelCase ) @functools.cache def min_distance(_lowerCAmelCase , _lowerCAmelCase ) -> int: # if first word index is overflow - delete all from the second word if indexa >= len_worda: return len_worda - indexa # if second word index is overflow - delete all from the first word if indexa >= len_worda: return len_worda - indexa UpperCamelCase : Union[str, Any] = int(worda[indexa] != worda[indexa] ) # current letters not identical return min( 1 + min_distance(indexa + 1 , _lowerCAmelCase ) , 1 + min_distance(_lowerCAmelCase , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , ) return min_distance(0 , 0 ) if __name__ == "__main__": import doctest doctest.testmod()
52
0
"""simple docstring""" import functools def lowercase ( a__ : Union[str, Any] , a__ : Optional[Any] ) -> int: _UpperCamelCase = len(_lowerCAmelCase ) _UpperCamelCase = len(_lowerCAmelCase ) @functools.cache def min_distance(a__ : Any , a__ : Optional[Any] ) -> int: # if first word index is overflow - delete all from the second word if indexa >= len_worda: return len_worda - indexa # if second word index is overflow - delete all from the first word if indexa >= len_worda: return len_worda - indexa _UpperCamelCase = int(worda[indexa] != worda[indexa] ) # current letters not identical return min( 1 + min_distance(indexa + 1 , _lowerCAmelCase ) , 1 + min_distance(_lowerCAmelCase , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , ) return min_distance(0 , 0 ) if __name__ == "__main__": import doctest doctest.testmod()
256
import itertools import random import unittest import numpy as np from transformers import ASTFeatureExtractor from transformers.testing_utils import require_torch, require_torchaudio from transformers.utils.import_utils import is_torch_available from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin __lowerCamelCase : str = random.Random() if is_torch_available(): import torch def A_ ( _lowerCAmelCase , _lowerCAmelCase=1.0 , _lowerCAmelCase=None , _lowerCAmelCase=None ) -> Optional[Any]: if rng is None: UpperCamelCase : Optional[int] = global_rng UpperCamelCase : Optional[Any] = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values class A__ ( unittest.TestCase ): def __init__( self , A_ , A_=7 , A_=400 , A_=2000 , A_=1 , A_=0.0 , A_=1_6000 , A_=True , A_=True , ): '''simple docstring''' UpperCamelCase : Tuple = parent UpperCamelCase : List[Any] = batch_size UpperCamelCase : List[Any] = min_seq_length UpperCamelCase : List[str] = max_seq_length UpperCamelCase : int = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) UpperCamelCase : Union[str, Any] = feature_size UpperCamelCase : List[str] = padding_value UpperCamelCase : Optional[Any] = sampling_rate UpperCamelCase : List[str] = return_attention_mask UpperCamelCase : List[Any] = do_normalize def __UpperCamelCase( self ): '''simple docstring''' return { "feature_size": self.feature_size, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "return_attention_mask": self.return_attention_mask, "do_normalize": self.do_normalize, } def __UpperCamelCase( self , A_=False , A_=False ): '''simple docstring''' def _flatten(A_ ): return list(itertools.chain(*A_ ) ) if equal_length: UpperCamelCase : List[str] = floats_list((self.batch_size, self.max_seq_length) ) else: # make sure that inputs increase in size UpperCamelCase : Dict = [ _flatten(floats_list((x, self.feature_size) ) ) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff ) ] if numpify: UpperCamelCase : Union[str, Any] = [np.asarray(A_ ) for x in speech_inputs] return speech_inputs @require_torch @require_torchaudio class A__ ( __snake_case , unittest.TestCase ): _UpperCAmelCase :Optional[Any] = ASTFeatureExtractor def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Tuple = ASTFeatureExtractionTester(self ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Optional[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 UpperCamelCase : Tuple = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] UpperCamelCase : Dict = [np.asarray(A_ ) for speech_input in speech_inputs] # Test not batched input UpperCamelCase : Dict = feat_extract(speech_inputs[0] , return_tensors="np" ).input_values UpperCamelCase : Union[str, Any] = feat_extract(np_speech_inputs[0] , return_tensors="np" ).input_values self.assertTrue(np.allclose(A_ , A_ , atol=1e-3 ) ) # Test batched UpperCamelCase : Any = feat_extract(A_ , padding=A_ , return_tensors="np" ).input_values UpperCamelCase : Any = feat_extract(A_ , padding=A_ , return_tensors="np" ).input_values for enc_seq_a, enc_seq_a in zip(A_ , A_ ): self.assertTrue(np.allclose(A_ , A_ , atol=1e-3 ) ) # Test 2-D numpy arrays are batched. UpperCamelCase : Dict = [floats_list((1, x) )[0] for x in (800, 800, 800)] UpperCamelCase : int = np.asarray(A_ ) UpperCamelCase : Any = feat_extract(A_ , return_tensors="np" ).input_values UpperCamelCase : List[str] = feat_extract(A_ , return_tensors="np" ).input_values for enc_seq_a, enc_seq_a in zip(A_ , A_ ): self.assertTrue(np.allclose(A_ , A_ , atol=1e-3 ) ) @require_torch def __UpperCamelCase( self ): '''simple docstring''' import torch UpperCamelCase : List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) UpperCamelCase : int = np.random.rand(100 ).astype(np.floataa ) UpperCamelCase : str = np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: UpperCamelCase : List[Any] = feature_extractor.pad([{"input_values": inputs}] , return_tensors="np" ) self.assertTrue(np_processed.input_values.dtype == np.floataa ) UpperCamelCase : List[str] = feature_extractor.pad([{"input_values": inputs}] , return_tensors="pt" ) self.assertTrue(pt_processed.input_values.dtype == torch.floataa ) def __UpperCamelCase( self , A_ ): '''simple docstring''' from datasets import load_dataset UpperCamelCase : Dict = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" ) # automatic decoding with librispeech UpperCamelCase : Any = ds.sort("id" ).select(range(A_ ) )[:num_samples]["audio"] return [x["array"] for x in speech_samples] @require_torch def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Any = torch.tensor( [-0.98_94, -1.27_76, -0.90_66, -1.27_76, -0.93_49, -1.26_09, -1.03_86, -1.27_76, -1.15_61, -1.27_76, -1.20_52, -1.27_23, -1.21_90, -1.21_32, -1.27_76, -1.11_33, -1.19_53, -1.13_43, -1.15_84, -1.22_03, -1.17_70, -1.24_74, -1.23_81, -1.19_36, -0.92_70, -0.83_17, -0.80_49, -0.77_06, -0.75_65, -0.78_69] ) # fmt: on UpperCamelCase : List[Any] = self._load_datasamples(1 ) UpperCamelCase : Tuple = ASTFeatureExtractor() UpperCamelCase : str = feature_extractor(A_ , return_tensors="pt" ).input_values self.assertEquals(input_values.shape , (1, 1024, 128) ) self.assertTrue(torch.allclose(input_values[0, 0, :30] , A_ , atol=1e-4 ) )
52
0
"""simple docstring""" from __future__ import annotations from functools import lru_cache from math import ceil _snake_case : str = 100 _snake_case : Any = set(range(3, NUM_PRIMES, 2)) primes.add(2) _snake_case : int for prime in range(3, ceil(NUM_PRIMES**0.5), 2): if prime not in primes: continue primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime))) @lru_cache(maxsize=100 ) def A__ ( UpperCamelCase ): if number_to_partition < 0: return set() elif number_to_partition == 0: return {1} A = set() A = 42 A = 42 for prime in primes: if prime > number_to_partition: continue for sub in partition(number_to_partition - prime ): ret.add(sub * prime ) return ret def A__ ( UpperCamelCase = 5_000 ): for number_to_partition in range(1 , _lowerCAmelCase ): if len(partition(_lowerCAmelCase ) ) > number_unique_partitions: return number_to_partition return None if __name__ == "__main__": print(F"""{solution() = }""")
292
import pickle import numpy as np from matplotlib import pyplot as plt class A__ : def __init__( self , A_ , A_ , A_ , A_ , A_ , A_=0.2 , A_=0.2 ): '''simple docstring''' UpperCamelCase : int = bp_numa UpperCamelCase : int = bp_numa UpperCamelCase : List[Any] = bp_numa UpperCamelCase : Optional[int] = conva_get[:2] UpperCamelCase : Optional[Any] = conva_get[2] UpperCamelCase : Dict = size_pa UpperCamelCase : Union[str, Any] = rate_w UpperCamelCase : Dict = rate_t UpperCamelCase : Union[str, Any] = [ np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0] ) + 0.5 ) for i in range(self.conva[1] ) ] UpperCamelCase : Any = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 ) UpperCamelCase : List[Any] = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 ) UpperCamelCase : Optional[Any] = -2 * np.random.rand(self.conva[1] ) + 1 UpperCamelCase : Any = -2 * np.random.rand(self.num_bpa ) + 1 UpperCamelCase : int = -2 * np.random.rand(self.num_bpa ) + 1 def __UpperCamelCase( self , A_ ): '''simple docstring''' UpperCamelCase : Optional[int] = { "num_bp1": self.num_bpa, "num_bp2": self.num_bpa, "num_bp3": self.num_bpa, "conv1": self.conva, "step_conv1": self.step_conva, "size_pooling1": self.size_poolinga, "rate_weight": self.rate_weight, "rate_thre": self.rate_thre, "w_conv1": self.w_conva, "wkj": self.wkj, "vji": self.vji, "thre_conv1": self.thre_conva, "thre_bp2": self.thre_bpa, "thre_bp3": self.thre_bpa, } with open(A_ , "wb" ) as f: pickle.dump(A_ , A_ ) print(F"""Model saved: {save_path}""" ) @classmethod def __UpperCamelCase( cls , A_ ): '''simple docstring''' with open(A_ , "rb" ) as f: UpperCamelCase : Optional[Any] = pickle.load(A_ ) # noqa: S301 UpperCamelCase : List[Any] = model_dic.get("conv1" ) conv_get.append(model_dic.get("step_conv1" ) ) UpperCamelCase : Union[str, Any] = model_dic.get("size_pooling1" ) UpperCamelCase : List[Any] = model_dic.get("num_bp1" ) UpperCamelCase : Dict = model_dic.get("num_bp2" ) UpperCamelCase : Dict = model_dic.get("num_bp3" ) UpperCamelCase : Dict = model_dic.get("rate_weight" ) UpperCamelCase : str = model_dic.get("rate_thre" ) # create model instance UpperCamelCase : Any = CNN(A_ , A_ , A_ , A_ , A_ , A_ , A_ ) # modify model parameter UpperCamelCase : str = model_dic.get("w_conv1" ) UpperCamelCase : Optional[Any] = model_dic.get("wkj" ) UpperCamelCase : int = model_dic.get("vji" ) UpperCamelCase : Any = model_dic.get("thre_conv1" ) UpperCamelCase : Optional[int] = model_dic.get("thre_bp2" ) UpperCamelCase : Union[str, Any] = model_dic.get("thre_bp3" ) return conv_ins def __UpperCamelCase( self , A_ ): '''simple docstring''' return 1 / (1 + np.exp(-1 * x )) def __UpperCamelCase( self , A_ ): '''simple docstring''' return round(A_ , 3 ) def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ ): '''simple docstring''' UpperCamelCase : str = convs[0] UpperCamelCase : Optional[Any] = convs[1] UpperCamelCase : Optional[Any] = np.shape(A_ )[0] # get the data slice of original image data, data_focus UpperCamelCase : List[str] = [] for i_focus in range(0 , size_data - size_conv + 1 , A_ ): for j_focus in range(0 , size_data - size_conv + 1 , A_ ): UpperCamelCase : Union[str, Any] = data[ i_focus : i_focus + size_conv, j_focus : j_focus + size_conv ] data_focus.append(A_ ) # calculate the feature map of every single kernel, and saved as list of matrix UpperCamelCase : int = [] UpperCamelCase : Optional[Any] = int((size_data - size_conv) / conv_step + 1 ) for i_map in range(A_ ): UpperCamelCase : str = [] for i_focus in range(len(A_ ) ): UpperCamelCase : List[Any] = ( np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map] ) ) - thre_convs[i_map] ) featuremap.append(self.sig(A_ ) ) UpperCamelCase : Optional[int] = np.asmatrix(A_ ).reshape( A_ , A_ ) data_featuremap.append(A_ ) # expanding the data slice to One dimenssion UpperCamelCase : List[Any] = [] for each_focus in data_focus: focusa_list.extend(self.Expand_Mat(A_ ) ) UpperCamelCase : Tuple = np.asarray(A_ ) return focus_list, data_featuremap def __UpperCamelCase( self , A_ , A_ , A_="average_pool" ): '''simple docstring''' UpperCamelCase : Any = len(featuremaps[0] ) UpperCamelCase : str = int(size_map / size_pooling ) UpperCamelCase : Optional[int] = [] for i_map in range(len(A_ ) ): UpperCamelCase : Tuple = featuremaps[i_map] UpperCamelCase : Any = [] for i_focus in range(0 , A_ , A_ ): for j_focus in range(0 , A_ , A_ ): UpperCamelCase : int = feature_map[ i_focus : i_focus + size_pooling, j_focus : j_focus + size_pooling, ] if pooling_type == "average_pool": # average pooling map_pooled.append(np.average(A_ ) ) elif pooling_type == "max_pooling": # max pooling map_pooled.append(np.max(A_ ) ) UpperCamelCase : Optional[Any] = np.asmatrix(A_ ).reshape(A_ , A_ ) featuremap_pooled.append(A_ ) return featuremap_pooled def __UpperCamelCase( self , A_ ): '''simple docstring''' UpperCamelCase : List[Any] = [] for i in range(len(A_ ) ): UpperCamelCase : List[Any] = np.shape(data[i] ) UpperCamelCase : str = data[i].reshape(1 , shapes[0] * shapes[1] ) UpperCamelCase : Optional[int] = data_listed.getA().tolist()[0] data_expanded.extend(A_ ) UpperCamelCase : Any = np.asarray(A_ ) return data_expanded def __UpperCamelCase( self , A_ ): '''simple docstring''' UpperCamelCase : List[Any] = np.asarray(A_ ) UpperCamelCase : List[Any] = np.shape(A_ ) UpperCamelCase : Any = data_mat.reshape(1 , shapes[0] * shapes[1] ) return data_expanded def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ ): '''simple docstring''' UpperCamelCase : int = [] UpperCamelCase : Optional[int] = 0 for i_map in range(A_ ): UpperCamelCase : int = np.ones((size_map, size_map) ) for i in range(0 , A_ , A_ ): for j in range(0 , A_ , A_ ): UpperCamelCase : str = pd_pool[ i_pool ] UpperCamelCase : str = i_pool + 1 UpperCamelCase : str = np.multiply( A_ , np.multiply(out_map[i_map] , (1 - out_map[i_map]) ) ) pd_all.append(A_ ) return pd_all def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_=bool ): '''simple docstring''' print("----------------------Start Training-------------------------" ) print((" - - Shape: Train_Data ", np.shape(A_ )) ) print((" - - Shape: Teach_Data ", np.shape(A_ )) ) UpperCamelCase : List[str] = 0 UpperCamelCase : Union[str, Any] = [] UpperCamelCase : int = 1_0000 while rp < n_repeat and mse >= error_accuracy: UpperCamelCase : Tuple = 0 print(F"""-------------Learning Time {rp}--------------""" ) for p in range(len(A_ ) ): # print('------------Learning Image: %d--------------'%p) UpperCamelCase : Any = np.asmatrix(datas_train[p] ) UpperCamelCase : List[str] = np.asarray(datas_teach[p] ) UpperCamelCase , UpperCamelCase : Dict = self.convolute( A_ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , ) UpperCamelCase : Tuple = self.pooling(A_ , self.size_poolinga ) UpperCamelCase : int = np.shape(A_ ) UpperCamelCase : List[str] = self._expand(A_ ) UpperCamelCase : Optional[int] = data_bp_input UpperCamelCase : str = np.dot(A_ , self.vji.T ) - self.thre_bpa UpperCamelCase : Optional[int] = self.sig(A_ ) UpperCamelCase : List[Any] = np.dot(A_ , self.wkj.T ) - self.thre_bpa UpperCamelCase : Dict = self.sig(A_ ) # --------------Model Leaning ------------------------ # calculate error and gradient--------------- UpperCamelCase : List[Any] = np.multiply( (data_teach - bp_outa) , np.multiply(A_ , (1 - bp_outa) ) ) UpperCamelCase : str = np.multiply( np.dot(A_ , self.wkj ) , np.multiply(A_ , (1 - bp_outa) ) ) UpperCamelCase : Any = np.dot(A_ , self.vji ) UpperCamelCase : Dict = pd_i_all / (self.size_poolinga * self.size_poolinga) UpperCamelCase : List[Any] = pd_conva_pooled.T.getA().tolist() UpperCamelCase : List[Any] = self._calculate_gradient_from_pool( A_ , A_ , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , ) # weight and threshold learning process--------- # convolution layer for k_conv in range(self.conva[1] ): UpperCamelCase : List[Any] = self._expand_mat(pd_conva_all[k_conv] ) UpperCamelCase : List[Any] = self.rate_weight * np.dot(A_ , A_ ) UpperCamelCase : str = self.w_conva[k_conv] + delta_w.reshape( (self.conva[0], self.conva[0]) ) UpperCamelCase : Dict = ( self.thre_conva[k_conv] - np.sum(pd_conva_all[k_conv] ) * self.rate_thre ) # all connected layer UpperCamelCase : Optional[Any] = self.wkj + pd_k_all.T * bp_outa * self.rate_weight UpperCamelCase : List[Any] = self.vji + pd_j_all.T * bp_outa * self.rate_weight UpperCamelCase : Optional[Any] = self.thre_bpa - pd_k_all * self.rate_thre UpperCamelCase : List[str] = self.thre_bpa - pd_j_all * self.rate_thre # calculate the sum error of all single image UpperCamelCase : List[Any] = np.sum(abs(data_teach - bp_outa ) ) error_count += errors # print(' ----Teach ',data_teach) # print(' ----BP_output ',bp_out3) UpperCamelCase : Any = rp + 1 UpperCamelCase : Union[str, Any] = error_count / patterns all_mse.append(A_ ) def draw_error(): UpperCamelCase : Tuple = [error_accuracy for i in range(int(n_repeat * 1.2 ) )] plt.plot(A_ , "+-" ) plt.plot(A_ , "r--" ) plt.xlabel("Learning Times" ) plt.ylabel("All_mse" ) plt.grid(A_ , alpha=0.5 ) plt.show() print("------------------Training Complished---------------------" ) print((" - - Training epoch: ", rp, F""" - - Mse: {mse:.6f}""") ) if draw_e: draw_error() return mse def __UpperCamelCase( self , A_ ): '''simple docstring''' UpperCamelCase : Optional[Any] = [] print("-------------------Start Testing-------------------------" ) print((" - - Shape: Test_Data ", np.shape(A_ )) ) for p in range(len(A_ ) ): UpperCamelCase : int = np.asmatrix(datas_test[p] ) UpperCamelCase , UpperCamelCase : Any = self.convolute( A_ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , ) UpperCamelCase : List[str] = self.pooling(A_ , self.size_poolinga ) UpperCamelCase : Dict = self._expand(A_ ) UpperCamelCase : List[Any] = data_bp_input UpperCamelCase : Any = bp_outa * self.vji.T - self.thre_bpa UpperCamelCase : List[Any] = self.sig(A_ ) UpperCamelCase : int = bp_outa * self.wkj.T - self.thre_bpa UpperCamelCase : Optional[int] = self.sig(A_ ) produce_out.extend(bp_outa.getA().tolist() ) UpperCamelCase : List[str] = [list(map(self.do_round , A_ ) ) for each in produce_out] return np.asarray(A_ ) def __UpperCamelCase( self , A_ ): '''simple docstring''' UpperCamelCase : Union[str, Any] = np.asmatrix(A_ ) UpperCamelCase , UpperCamelCase : List[Any] = self.convolute( A_ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , ) UpperCamelCase : str = self.pooling(A_ , self.size_poolinga ) return data_conveda, data_pooleda if __name__ == "__main__": pass
52
0
'''simple docstring''' import hashlib import unittest from typing import Dict import numpy as np from transformers import ( MODEL_FOR_MASK_GENERATION_MAPPING, TF_MODEL_FOR_MASK_GENERATION_MAPPING, is_vision_available, pipeline, ) from transformers.pipelines import MaskGenerationPipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) if is_vision_available(): from PIL import Image else: class A__ : """simple docstring""" @staticmethod def _lowerCAmelCase ( *lowerCAmelCase__ : Tuple , **lowerCAmelCase__ : int ) -> Tuple: """simple docstring""" pass def __UpperCAmelCase ( a_: List[str] ): _UpperCAmelCase : Union[str, Any] = hashlib.mda(image.tobytes() ) return m.hexdigest()[:10] def __UpperCAmelCase ( a_: List[Any] ): _UpperCAmelCase : Dict = np.array(_lowerCAmelCase ) _UpperCAmelCase : Optional[int] = npimg.shape return {"hash": hashimage(_lowerCAmelCase ), "shape": shape} @is_pipeline_test @require_vision @require_torch class A__ ( unittest.TestCase ): """simple docstring""" UpperCamelCase_ : Tuple = dict( (list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) ) UpperCamelCase_ : List[str] = dict( (list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) ) def _lowerCAmelCase ( self : Optional[int] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : str , lowerCAmelCase__ : Tuple ) -> str: """simple docstring""" _UpperCAmelCase : List[Any] = MaskGenerationPipeline(model=A_ , image_processor=A_ ) return image_segmenter, [ "./tests/fixtures/tests_samples/COCO/000000039769.png", "./tests/fixtures/tests_samples/COCO/000000039769.png", ] def _lowerCAmelCase ( self : str , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Dict ) -> Union[str, Any]: """simple docstring""" pass @require_tf @unittest.skip("Image segmentation not implemented in TF" ) def _lowerCAmelCase ( self : Any ) -> int: """simple docstring""" pass @slow @require_torch def _lowerCAmelCase ( self : Optional[int] ) -> List[str]: """simple docstring""" _UpperCAmelCase : List[Any] = pipeline("mask-generation" , model="facebook/sam-vit-huge" ) _UpperCAmelCase : Optional[Any] = image_segmenter("http://images.cocodataset.org/val2017/000000039769.jpg" , points_per_batch=2_5_6 ) # Shortening by hashing _UpperCAmelCase : Optional[Any] = [] for i, o in enumerate(outputs["masks"] ): new_outupt += [{"mask": mask_to_test_readable(A_ ), "scores": outputs["scores"][i]}] # fmt: off self.assertEqual( nested_simplify(A_ , decimals=4 ) , [ {"mask": {"hash": "115ad19f5f", "shape": (4_8_0, 6_4_0)}, "scores": 1.0444}, {"mask": {"hash": "6affa964c6", "shape": (4_8_0, 6_4_0)}, "scores": 1.021}, {"mask": {"hash": "dfe28a0388", "shape": (4_8_0, 6_4_0)}, "scores": 1.0167}, {"mask": {"hash": "c0a5f4a318", "shape": (4_8_0, 6_4_0)}, "scores": 1.0132}, {"mask": {"hash": "fe8065c197", "shape": (4_8_0, 6_4_0)}, "scores": 1.0053}, {"mask": {"hash": "e2d0b7a0b7", "shape": (4_8_0, 6_4_0)}, "scores": 0.9967}, {"mask": {"hash": "453c7844bd", "shape": (4_8_0, 6_4_0)}, "scores": 0.993}, {"mask": {"hash": "3d44f2926d", "shape": (4_8_0, 6_4_0)}, "scores": 0.9909}, {"mask": {"hash": "64033ddc3f", "shape": (4_8_0, 6_4_0)}, "scores": 0.9879}, {"mask": {"hash": "801064ff79", "shape": (4_8_0, 6_4_0)}, "scores": 0.9834}, {"mask": {"hash": "6172f276ef", "shape": (4_8_0, 6_4_0)}, "scores": 0.9716}, {"mask": {"hash": "b49e60e084", "shape": (4_8_0, 6_4_0)}, "scores": 0.9612}, {"mask": {"hash": "a811e775fd", "shape": (4_8_0, 6_4_0)}, "scores": 0.9599}, {"mask": {"hash": "a6a8ebcf4b", "shape": (4_8_0, 6_4_0)}, "scores": 0.9552}, {"mask": {"hash": "9d8257e080", "shape": (4_8_0, 6_4_0)}, "scores": 0.9532}, {"mask": {"hash": "32de6454a8", "shape": (4_8_0, 6_4_0)}, "scores": 0.9516}, {"mask": {"hash": "af3d4af2c8", "shape": (4_8_0, 6_4_0)}, "scores": 0.9499}, {"mask": {"hash": "3c6db475fb", "shape": (4_8_0, 6_4_0)}, "scores": 0.9483}, {"mask": {"hash": "c290813fb9", "shape": (4_8_0, 6_4_0)}, "scores": 0.9464}, {"mask": {"hash": "b6f0b8f606", "shape": (4_8_0, 6_4_0)}, "scores": 0.943}, {"mask": {"hash": "92ce16bfdf", "shape": (4_8_0, 6_4_0)}, "scores": 0.943}, {"mask": {"hash": "c749b25868", "shape": (4_8_0, 6_4_0)}, "scores": 0.9408}, {"mask": {"hash": "efb6cab859", "shape": (4_8_0, 6_4_0)}, "scores": 0.9335}, {"mask": {"hash": "1ff2eafb30", "shape": (4_8_0, 6_4_0)}, "scores": 0.9326}, {"mask": {"hash": "788b798e24", "shape": (4_8_0, 6_4_0)}, "scores": 0.9262}, {"mask": {"hash": "abea804f0e", "shape": (4_8_0, 6_4_0)}, "scores": 0.8999}, {"mask": {"hash": "7b9e8ddb73", "shape": (4_8_0, 6_4_0)}, "scores": 0.8986}, {"mask": {"hash": "cd24047c8a", "shape": (4_8_0, 6_4_0)}, "scores": 0.8984}, {"mask": {"hash": "6943e6bcbd", "shape": (4_8_0, 6_4_0)}, "scores": 0.8873}, {"mask": {"hash": "b5f47c9191", "shape": (4_8_0, 6_4_0)}, "scores": 0.8871} ] , ) # fmt: on @require_torch @slow def _lowerCAmelCase ( self : Optional[Any] ) -> List[Any]: """simple docstring""" _UpperCAmelCase : Optional[Any] = "facebook/sam-vit-huge" _UpperCAmelCase : Optional[Any] = pipeline("mask-generation" , model=A_ ) _UpperCAmelCase : Optional[Any] = image_segmenter( "http://images.cocodataset.org/val2017/000000039769.jpg" , pred_iou_thresh=1 , points_per_batch=2_5_6 ) # Shortening by hashing _UpperCAmelCase : Dict = [] for i, o in enumerate(outputs["masks"] ): new_outupt += [{"mask": mask_to_test_readable(A_ ), "scores": outputs["scores"][i]}] self.assertEqual( nested_simplify(A_ , decimals=4 ) , [ {"mask": {"hash": "115ad19f5f", "shape": (4_8_0, 6_4_0)}, "scores": 1.0444}, {"mask": {"hash": "6affa964c6", "shape": (4_8_0, 6_4_0)}, "scores": 1.0210}, {"mask": {"hash": "dfe28a0388", "shape": (4_8_0, 6_4_0)}, "scores": 1.0167}, {"mask": {"hash": "c0a5f4a318", "shape": (4_8_0, 6_4_0)}, "scores": 1.0132}, {"mask": {"hash": "fe8065c197", "shape": (4_8_0, 6_4_0)}, "scores": 1.0053}, ] , )
145
import warnings from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast from ...onnx.utils import compute_effective_axis_dimension from ...utils import TensorType, is_torch_available, logging __lowerCamelCase : Union[str, Any] = logging.get_logger(__name__) __lowerCamelCase : Any = { """facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/config.json""", # See all BART models at https://huggingface.co/models?filter=bart } class A__ ( __snake_case ): _UpperCAmelCase :Dict = 'bart' _UpperCAmelCase :str = ['past_key_values'] _UpperCAmelCase :Any = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'} def __init__( self , A_=5_0265 , A_=1024 , A_=12 , A_=4096 , A_=16 , A_=12 , A_=4096 , A_=16 , A_=0.0 , A_=0.0 , A_="gelu" , A_=1024 , A_=0.1 , A_=0.0 , A_=0.0 , A_=0.02 , A_=0.0 , A_=False , A_=True , A_=3 , A_=1 , A_=0 , A_=2 , A_=True , A_=2 , A_=2 , **A_ , ): '''simple docstring''' UpperCamelCase : int = vocab_size UpperCamelCase : List[Any] = max_position_embeddings UpperCamelCase : Any = d_model UpperCamelCase : Optional[Any] = encoder_ffn_dim UpperCamelCase : List[Any] = encoder_layers UpperCamelCase : int = encoder_attention_heads UpperCamelCase : Optional[int] = decoder_ffn_dim UpperCamelCase : List[str] = decoder_layers UpperCamelCase : Optional[int] = decoder_attention_heads UpperCamelCase : int = dropout UpperCamelCase : int = attention_dropout UpperCamelCase : Tuple = activation_dropout UpperCamelCase : Tuple = activation_function UpperCamelCase : int = init_std UpperCamelCase : List[Any] = encoder_layerdrop UpperCamelCase : List[str] = decoder_layerdrop UpperCamelCase : Dict = classifier_dropout UpperCamelCase : Optional[int] = use_cache UpperCamelCase : List[Any] = encoder_layers UpperCamelCase : int = scale_embedding # scale factor will be sqrt(d_model) if True super().__init__( num_labels=A_ , pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , is_encoder_decoder=A_ , decoder_start_token_id=A_ , forced_eos_token_id=A_ , **A_ , ) # ensure backward compatibility for BART CNN models if self.forced_bos_token_id is None and kwargs.get("force_bos_token_to_be_generated" , A_ ): UpperCamelCase : int = self.bos_token_id warnings.warn( F"""Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. """ "The config can simply be saved and uploaded again to be fixed." ) class A__ ( __snake_case ): @property def __UpperCamelCase( self ): '''simple docstring''' if self.task in ["default", "seq2seq-lm"]: UpperCamelCase : Optional[int] = OrderedDict( [ ("input_ids", {0: "batch", 1: "encoder_sequence"}), ("attention_mask", {0: "batch", 1: "encoder_sequence"}), ] ) if self.use_past: UpperCamelCase : List[str] = {0: "batch"} UpperCamelCase : Dict = {0: "batch", 1: "past_decoder_sequence + sequence"} else: UpperCamelCase : Dict = {0: "batch", 1: "decoder_sequence"} UpperCamelCase : Union[str, Any] = {0: "batch", 1: "decoder_sequence"} if self.use_past: self.fill_with_past_key_values_(A_ , direction="inputs" ) elif self.task == "causal-lm": # TODO: figure this case out. UpperCamelCase : Any = OrderedDict( [ ("input_ids", {0: "batch", 1: "encoder_sequence"}), ("attention_mask", {0: "batch", 1: "encoder_sequence"}), ] ) if self.use_past: UpperCamelCase , UpperCamelCase : Optional[int] = self.num_layers for i in range(A_ ): UpperCamelCase : Optional[Any] = {0: "batch", 2: "past_sequence + sequence"} UpperCamelCase : Union[str, Any] = {0: "batch", 2: "past_sequence + sequence"} else: UpperCamelCase : Optional[Any] = OrderedDict( [ ("input_ids", {0: "batch", 1: "encoder_sequence"}), ("attention_mask", {0: "batch", 1: "encoder_sequence"}), ("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}), ("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}), ] ) return common_inputs @property def __UpperCamelCase( self ): '''simple docstring''' if self.task in ["default", "seq2seq-lm"]: UpperCamelCase : Tuple = super().outputs else: UpperCamelCase : Dict = super(A_ , self ).outputs if self.use_past: UpperCamelCase , UpperCamelCase : int = self.num_layers for i in range(A_ ): UpperCamelCase : int = {0: "batch", 2: "past_sequence + sequence"} UpperCamelCase : Tuple = {0: "batch", 2: "past_sequence + sequence"} return common_outputs def __UpperCamelCase( self , A_ , A_ = -1 , A_ = -1 , A_ = False , A_ = None , ): '''simple docstring''' UpperCamelCase : List[Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( A_ , A_ , A_ , A_ , A_ ) # Generate decoder inputs UpperCamelCase : List[Any] = seq_length if not self.use_past else 1 UpperCamelCase : Tuple = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( A_ , A_ , A_ , A_ , A_ ) UpperCamelCase : Optional[int] = {F"""decoder_{name}""": tensor for name, tensor in decoder_inputs.items()} UpperCamelCase : List[Any] = dict(**A_ , **A_ ) if self.use_past: if not is_torch_available(): raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." ) else: import torch UpperCamelCase , UpperCamelCase : Optional[Any] = common_inputs["input_ids"].shape UpperCamelCase : List[Any] = common_inputs["decoder_input_ids"].shape[1] UpperCamelCase , UpperCamelCase : List[str] = self.num_attention_heads UpperCamelCase : int = ( batch, num_encoder_attention_heads, encoder_seq_length, self._config.hidden_size // num_encoder_attention_heads, ) UpperCamelCase : List[Any] = decoder_seq_length + 3 UpperCamelCase : str = ( batch, num_decoder_attention_heads, decoder_past_length, self._config.hidden_size // num_decoder_attention_heads, ) UpperCamelCase : int = torch.cat( [common_inputs["decoder_attention_mask"], torch.ones(A_ , A_ )] , dim=1 ) UpperCamelCase : int = [] # If the number of encoder and decoder layers are present in the model configuration, both are considered UpperCamelCase , UpperCamelCase : Union[str, Any] = self.num_layers UpperCamelCase : Any = min(A_ , A_ ) UpperCamelCase : List[str] = max(A_ , A_ ) - min_num_layers UpperCamelCase : Dict = "encoder" if num_encoder_layers > num_decoder_layers else "decoder" for _ in range(A_ ): common_inputs["past_key_values"].append( ( torch.zeros(A_ ), torch.zeros(A_ ), torch.zeros(A_ ), torch.zeros(A_ ), ) ) # TODO: test this. UpperCamelCase : Optional[Any] = encoder_shape if remaining_side_name == "encoder" else decoder_shape for _ in range(A_ , A_ ): common_inputs["past_key_values"].append((torch.zeros(A_ ), torch.zeros(A_ )) ) return common_inputs def __UpperCamelCase( self , A_ , A_ = -1 , A_ = -1 , A_ = False , A_ = None , ): '''simple docstring''' UpperCamelCase : int = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( A_ , A_ , A_ , A_ , A_ ) if self.use_past: if not is_torch_available(): raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." ) else: import torch UpperCamelCase , UpperCamelCase : Union[str, Any] = common_inputs["input_ids"].shape # Not using the same length for past_key_values UpperCamelCase : Optional[Any] = seqlen + 2 UpperCamelCase , UpperCamelCase : List[Any] = self.num_layers UpperCamelCase , UpperCamelCase : Optional[int] = self.num_attention_heads UpperCamelCase : str = ( batch, num_encoder_attention_heads, past_key_values_length, self._config.hidden_size // num_encoder_attention_heads, ) UpperCamelCase : Optional[Any] = common_inputs["attention_mask"].dtype UpperCamelCase : int = torch.cat( [common_inputs["attention_mask"], torch.ones(A_ , A_ , dtype=A_ )] , dim=1 ) UpperCamelCase : Optional[Any] = [ (torch.zeros(A_ ), torch.zeros(A_ )) for _ in range(A_ ) ] return common_inputs def __UpperCamelCase( self , A_ , A_ = -1 , A_ = -1 , A_ = False , A_ = None , ): '''simple docstring''' UpperCamelCase : Optional[Any] = compute_effective_axis_dimension( A_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX UpperCamelCase : Union[str, Any] = tokenizer.num_special_tokens_to_add(A_ ) UpperCamelCase : int = compute_effective_axis_dimension( A_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=A_ ) # Generate dummy inputs according to compute batch and sequence UpperCamelCase : int = [" ".join([tokenizer.unk_token] ) * seq_length] * batch_size UpperCamelCase : Dict = dict(tokenizer(A_ , return_tensors=A_ ) ) return common_inputs def __UpperCamelCase( self , A_ , A_ = -1 , A_ = -1 , A_ = False , A_ = None , ): '''simple docstring''' if self.task in ["default", "seq2seq-lm"]: UpperCamelCase : Optional[int] = self._generate_dummy_inputs_for_default_and_seqaseq_lm( A_ , batch_size=A_ , seq_length=A_ , is_pair=A_ , framework=A_ ) elif self.task == "causal-lm": UpperCamelCase : List[str] = self._generate_dummy_inputs_for_causal_lm( A_ , batch_size=A_ , seq_length=A_ , is_pair=A_ , framework=A_ ) else: UpperCamelCase : List[str] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( A_ , batch_size=A_ , seq_length=A_ , is_pair=A_ , framework=A_ ) return common_inputs def __UpperCamelCase( self , A_ , A_ , A_ , A_ ): '''simple docstring''' if self.task in ["default", "seq2seq-lm"]: UpperCamelCase : Optional[Any] = super()._flatten_past_key_values_(A_ , A_ , A_ , A_ ) else: UpperCamelCase : Optional[Any] = super(A_ , self )._flatten_past_key_values_( A_ , A_ , A_ , A_ )
52
0
"""simple docstring""" import warnings from ...utils import logging from .image_processing_glpn import GLPNImageProcessor SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) class lowerCAmelCase_ ( __snake_case ): """simple docstring""" def __init__( self , *lowerCAmelCase , **lowerCAmelCase ): """simple docstring""" warnings.warn( 'The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please' ' use GLPNImageProcessor instead.' , A_ , ) super().__init__(*A_ , **A_ )
150
from math import sqrt def A_ ( _lowerCAmelCase ) -> bool: assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and ( number >= 0 ), "'number' must been an int and positive" UpperCamelCase : List[Any] = True # 0 and 1 are none primes. if number <= 1: UpperCamelCase : List[Any] = False for divisor in range(2 , int(round(sqrt(_lowerCAmelCase ) ) ) + 1 ): # if 'number' divisible by 'divisor' then sets 'status' # of false and break up the loop. if number % divisor == 0: UpperCamelCase : Union[str, Any] = False break # precondition assert isinstance(_lowerCAmelCase , _lowerCAmelCase ), "'status' must been from type bool" return status def A_ ( _lowerCAmelCase ) -> Any: assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (n > 2), "'N' must been an int and > 2" # beginList: contains all natural numbers from 2 up to N UpperCamelCase : int = list(range(2 , n + 1 ) ) UpperCamelCase : Optional[int] = [] # this list will be returns. # actual sieve of erathostenes for i in range(len(_lowerCAmelCase ) ): for j in range(i + 1 , len(_lowerCAmelCase ) ): if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0): UpperCamelCase : Tuple = 0 # filters actual prime numbers. UpperCamelCase : str = [x for x in begin_list if x != 0] # precondition assert isinstance(_lowerCAmelCase , _lowerCAmelCase ), "'ans' must been from type list" return ans def A_ ( _lowerCAmelCase ) -> Optional[Any]: assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (n > 2), "'N' must been an int and > 2" UpperCamelCase : str = [] # iterates over all numbers between 2 up to N+1 # if a number is prime then appends to list 'ans' for number in range(2 , n + 1 ): if is_prime(_lowerCAmelCase ): ans.append(_lowerCAmelCase ) # precondition assert isinstance(_lowerCAmelCase , _lowerCAmelCase ), "'ans' must been from type list" return ans def A_ ( _lowerCAmelCase ) -> Any: assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and number >= 0, "'number' must been an int and >= 0" UpperCamelCase : Optional[Any] = [] # this list will be returns of the function. # potential prime number factors. UpperCamelCase : Tuple = 2 UpperCamelCase : str = number if number == 0 or number == 1: ans.append(_lowerCAmelCase ) # if 'number' not prime then builds the prime factorization of 'number' elif not is_prime(_lowerCAmelCase ): while quotient != 1: if is_prime(_lowerCAmelCase ) and (quotient % factor == 0): ans.append(_lowerCAmelCase ) quotient /= factor else: factor += 1 else: ans.append(_lowerCAmelCase ) # precondition assert isinstance(_lowerCAmelCase , _lowerCAmelCase ), "'ans' must been from type list" return ans def A_ ( _lowerCAmelCase ) -> Any: assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and ( number >= 0 ), "'number' bust been an int and >= 0" UpperCamelCase : List[Any] = 0 # prime factorization of 'number' UpperCamelCase : Any = prime_factorization(_lowerCAmelCase ) UpperCamelCase : List[Any] = max(_lowerCAmelCase ) # precondition assert isinstance(_lowerCAmelCase , _lowerCAmelCase ), "'ans' must been from type int" return ans def A_ ( _lowerCAmelCase ) -> Union[str, Any]: assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and ( number >= 0 ), "'number' bust been an int and >= 0" UpperCamelCase : List[Any] = 0 # prime factorization of 'number' UpperCamelCase : Dict = prime_factorization(_lowerCAmelCase ) UpperCamelCase : List[Any] = min(_lowerCAmelCase ) # precondition assert isinstance(_lowerCAmelCase , _lowerCAmelCase ), "'ans' must been from type int" return ans def A_ ( _lowerCAmelCase ) -> Optional[Any]: assert isinstance(_lowerCAmelCase , _lowerCAmelCase ), "'number' must been an int" assert isinstance(number % 2 == 0 , _lowerCAmelCase ), "compare bust been from type bool" return number % 2 == 0 def A_ ( _lowerCAmelCase ) -> List[Any]: assert isinstance(_lowerCAmelCase , _lowerCAmelCase ), "'number' must been an int" assert isinstance(number % 2 != 0 , _lowerCAmelCase ), "compare bust been from type bool" return number % 2 != 0 def A_ ( _lowerCAmelCase ) -> Any: assert ( isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (number > 2) and is_even(_lowerCAmelCase ) ), "'number' must been an int, even and > 2" UpperCamelCase : List[str] = [] # this list will returned # creates a list of prime numbers between 2 up to 'number' UpperCamelCase : Dict = get_prime_numbers(_lowerCAmelCase ) UpperCamelCase : Tuple = len(_lowerCAmelCase ) # run variable for while-loops. UpperCamelCase : Optional[int] = 0 UpperCamelCase : int = None # exit variable. for break up the loops UpperCamelCase : Union[str, Any] = True while i < len_pn and loop: UpperCamelCase : Tuple = i + 1 while j < len_pn and loop: if prime_numbers[i] + prime_numbers[j] == number: UpperCamelCase : Any = False ans.append(prime_numbers[i] ) ans.append(prime_numbers[j] ) j += 1 i += 1 # precondition assert ( isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (len(_lowerCAmelCase ) == 2) and (ans[0] + ans[1] == number) and is_prime(ans[0] ) and is_prime(ans[1] ) ), "'ans' must contains two primes. And sum of elements must been eq 'number'" return ans def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> str: assert ( isinstance(_lowerCAmelCase , _lowerCAmelCase ) and isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (numbera >= 0) and (numbera >= 0) ), "'number1' and 'number2' must been positive integer." UpperCamelCase : Tuple = 0 while numbera != 0: UpperCamelCase : Tuple = numbera % numbera UpperCamelCase : Any = numbera UpperCamelCase : Union[str, Any] = rest # precondition assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and ( numbera >= 0 ), "'number' must been from type int and positive" return numbera def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> int: assert ( isinstance(_lowerCAmelCase , _lowerCAmelCase ) and isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (numbera >= 1) and (numbera >= 1) ), "'number1' and 'number2' must been positive integer." UpperCamelCase : Optional[int] = 1 # actual answer that will be return. # for kgV (x,1) if numbera > 1 and numbera > 1: # builds the prime factorization of 'number1' and 'number2' UpperCamelCase : List[Any] = prime_factorization(_lowerCAmelCase ) UpperCamelCase : Union[str, Any] = prime_factorization(_lowerCAmelCase ) elif numbera == 1 or numbera == 1: UpperCamelCase : Optional[Any] = [] UpperCamelCase : int = [] UpperCamelCase : List[Any] = max(_lowerCAmelCase , _lowerCAmelCase ) UpperCamelCase : Optional[int] = 0 UpperCamelCase : Tuple = 0 UpperCamelCase : List[str] = [] # captured numbers int both 'primeFac1' and 'primeFac2' # iterates through primeFac1 for n in prime_fac_a: if n not in done: if n in prime_fac_a: UpperCamelCase : str = prime_fac_a.count(_lowerCAmelCase ) UpperCamelCase : Tuple = prime_fac_a.count(_lowerCAmelCase ) for _ in range(max(_lowerCAmelCase , _lowerCAmelCase ) ): ans *= n else: UpperCamelCase : str = prime_fac_a.count(_lowerCAmelCase ) for _ in range(_lowerCAmelCase ): ans *= n done.append(_lowerCAmelCase ) # iterates through primeFac2 for n in prime_fac_a: if n not in done: UpperCamelCase : Any = prime_fac_a.count(_lowerCAmelCase ) for _ in range(_lowerCAmelCase ): ans *= n done.append(_lowerCAmelCase ) # precondition assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and ( ans >= 0 ), "'ans' must been from type int and positive" return ans def A_ ( _lowerCAmelCase ) -> Tuple: assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (n >= 0), "'number' must been a positive int" UpperCamelCase : int = 0 UpperCamelCase : int = 2 # this variable holds the answer while index < n: index += 1 ans += 1 # counts to the next number # if ans not prime then # runs to the next prime number. while not is_prime(_lowerCAmelCase ): ans += 1 # precondition assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and is_prime( _lowerCAmelCase ), "'ans' must been a prime number and from type int" return ans def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> int: assert ( is_prime(_lowerCAmelCase ) and is_prime(_lowerCAmelCase ) and (p_number_a < p_number_a) ), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'" UpperCamelCase : str = p_number_a + 1 # jump to the next number UpperCamelCase : Dict = [] # this list will be returns. # if number is not prime then # fetch the next prime number. while not is_prime(_lowerCAmelCase ): number += 1 while number < p_number_a: ans.append(_lowerCAmelCase ) number += 1 # fetch the next prime number. while not is_prime(_lowerCAmelCase ): number += 1 # precondition assert ( isinstance(_lowerCAmelCase , _lowerCAmelCase ) and ans[0] != p_number_a and ans[len(_lowerCAmelCase ) - 1] != p_number_a ), "'ans' must been a list without the arguments" # 'ans' contains not 'pNumber1' and 'pNumber2' ! return ans def A_ ( _lowerCAmelCase ) -> List[str]: assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (n >= 1), "'n' must been int and >= 1" UpperCamelCase : Dict = [] # will be returned. for divisor in range(1 , n + 1 ): if n % divisor == 0: ans.append(_lowerCAmelCase ) # precondition assert ans[0] == 1 and ans[len(_lowerCAmelCase ) - 1] == n, "Error in function getDivisiors(...)" return ans def A_ ( _lowerCAmelCase ) -> int: assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and ( number > 1 ), "'number' must been an int and >= 1" UpperCamelCase : int = get_divisors(_lowerCAmelCase ) # precondition assert ( isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (divisors[0] == 1) and (divisors[len(_lowerCAmelCase ) - 1] == number) ), "Error in help-function getDivisiors(...)" # summed all divisors up to 'number' (exclusive), hence [:-1] return sum(divisors[:-1] ) == number def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]: assert ( isinstance(_lowerCAmelCase , _lowerCAmelCase ) and isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (denominator != 0) ), "The arguments must been from type int and 'denominator' != 0" # build the greatest common divisor of numerator and denominator. UpperCamelCase : List[str] = gcd(abs(_lowerCAmelCase ) , abs(_lowerCAmelCase ) ) # precondition assert ( isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (numerator % gcd_of_fraction == 0) and (denominator % gcd_of_fraction == 0) ), "Error in function gcd(...,...)" return (numerator // gcd_of_fraction, denominator // gcd_of_fraction) def A_ ( _lowerCAmelCase ) -> Dict: assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (n >= 0), "'n' must been a int and >= 0" UpperCamelCase : str = 1 # this will be return. for factor in range(1 , n + 1 ): ans *= factor return ans def A_ ( _lowerCAmelCase ) -> Tuple: assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (n >= 0), "'n' must been an int and >= 0" UpperCamelCase : Dict = 0 UpperCamelCase : Dict = 1 UpperCamelCase : Union[str, Any] = 1 # this will be return for _ in range(n - 1 ): UpperCamelCase : Any = ans ans += fiba UpperCamelCase : str = tmp return ans
52
0
'''simple docstring''' import json import os import shutil import tempfile import unittest from transformers import BatchEncoding, CanineTokenizer from transformers.testing_utils import require_tokenizers, require_torch from transformers.tokenization_utils import AddedToken from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin class __lowerCAmelCase ( __snake_case , unittest.TestCase ): """simple docstring""" _snake_case : Union[str, Any] = CanineTokenizer _snake_case : Optional[Any] = False def snake_case__ ( self : int ) -> str: '''simple docstring''' super().setUp() _UpperCamelCase = CanineTokenizer() tokenizer.save_pretrained(self.tmpdirname ) @cached_property def snake_case__ ( self : str ) -> Tuple: '''simple docstring''' return CanineTokenizer.from_pretrained('''google/canine-s''' ) def snake_case__ ( self : Dict , **lowerCAmelCase__ : Tuple ) -> Optional[Any]: '''simple docstring''' _UpperCamelCase = self.tokenizer_class.from_pretrained(self.tmpdirname , **A_ ) _UpperCamelCase = 1024 return tokenizer @require_torch def snake_case__ ( self : Optional[Any] ) -> str: '''simple docstring''' _UpperCamelCase = self.canine_tokenizer _UpperCamelCase = ["Life is like a box of chocolates.", "You never know what you're gonna get."] # fmt: off _UpperCamelCase = [57344, 76, 105, 102, 101, 32, 105, 115, 32, 108, 105, 107, 101, 32, 97, 32, 98, 111, 120, 32, 111, 102, 32, 99, 104, 111, 99, 111, 108, 97, 116, 101, 115, 46, 57345, 0, 0, 0, 0] # fmt: on _UpperCamelCase = tokenizer(A_ , padding=A_ , return_tensors='''pt''' ) self.assertIsInstance(A_ , A_ ) _UpperCamelCase = list(batch.input_ids.numpy()[0] ) self.assertListEqual(A_ , A_ ) self.assertEqual((2, 39) , batch.input_ids.shape ) self.assertEqual((2, 39) , batch.attention_mask.shape ) @require_torch def snake_case__ ( self : Dict ) -> Dict: '''simple docstring''' _UpperCamelCase = self.canine_tokenizer _UpperCamelCase = ["Once there was a man.", "He wrote a test in HuggingFace Tranformers."] _UpperCamelCase = tokenizer(A_ , padding=A_ , return_tensors='''pt''' ) # check if input_ids, attention_mask and token_type_ids are returned self.assertIn('''input_ids''' , A_ ) self.assertIn('''attention_mask''' , A_ ) self.assertIn('''token_type_ids''' , A_ ) @require_torch def snake_case__ ( self : int ) -> Optional[Any]: '''simple docstring''' _UpperCamelCase = self.canine_tokenizer _UpperCamelCase = [ "What's the weater?", "It's about 25 degrees.", ] _UpperCamelCase = tokenizer( text_target=A_ , max_length=32 , padding='''max_length''' , truncation=A_ , return_tensors='''pt''' ) self.assertEqual(32 , targets['''input_ids'''].shape[1] ) def snake_case__ ( self : Union[str, Any] ) -> int: '''simple docstring''' _UpperCamelCase = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f"""{tokenizer.__class__.__name__}""" ): self.assertNotEqual(tokenizer.model_max_length , 42 ) # Now let's start the test _UpperCamelCase = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f"""{tokenizer.__class__.__name__}""" ): # Isolate this from the other tests because we save additional tokens/etc _UpperCamelCase = tempfile.mkdtemp() _UpperCamelCase = " He is very happy, UNwant\u00E9d,running" _UpperCamelCase = tokenizer.encode(A_ , add_special_tokens=A_ ) tokenizer.save_pretrained(A_ ) _UpperCamelCase = tokenizer.__class__.from_pretrained(A_ ) _UpperCamelCase = after_tokenizer.encode(A_ , add_special_tokens=A_ ) self.assertListEqual(A_ , A_ ) shutil.rmtree(A_ ) _UpperCamelCase = self.get_tokenizers(model_max_length=42 ) for tokenizer in tokenizers: with self.subTest(f"""{tokenizer.__class__.__name__}""" ): # Isolate this from the other tests because we save additional tokens/etc _UpperCamelCase = tempfile.mkdtemp() _UpperCamelCase = " He is very happy, UNwant\u00E9d,running" _UpperCamelCase = tokenizer.additional_special_tokens # We can add a new special token for Canine as follows: _UpperCamelCase = chr(0Xe0_07 ) additional_special_tokens.append(A_ ) tokenizer.add_special_tokens({'''additional_special_tokens''': additional_special_tokens} ) _UpperCamelCase = tokenizer.encode(A_ , add_special_tokens=A_ ) tokenizer.save_pretrained(A_ ) _UpperCamelCase = tokenizer.__class__.from_pretrained(A_ ) _UpperCamelCase = after_tokenizer.encode(A_ , add_special_tokens=A_ ) self.assertListEqual(A_ , A_ ) self.assertIn(A_ , after_tokenizer.additional_special_tokens ) self.assertEqual(after_tokenizer.model_max_length , 42 ) _UpperCamelCase = tokenizer.__class__.from_pretrained(A_ , model_max_length=43 ) self.assertEqual(tokenizer.model_max_length , 43 ) shutil.rmtree(A_ ) def snake_case__ ( self : int ) -> List[Any]: '''simple docstring''' _UpperCamelCase = self.get_tokenizers(do_lower_case=A_ ) for tokenizer in tokenizers: with self.subTest(f"""{tokenizer.__class__.__name__}""" ): _UpperCamelCase = self.get_clean_sequence(A_ ) # a special token for Canine can be defined as follows: _UpperCamelCase = 0Xe0_05 _UpperCamelCase = chr(A_ ) tokenizer.add_special_tokens({'''cls_token''': special_token} ) _UpperCamelCase = tokenizer.encode(A_ , add_special_tokens=A_ ) self.assertEqual(len(A_ ) , 1 ) _UpperCamelCase = tokenizer.decode(ids + encoded_special_token , clean_up_tokenization_spaces=A_ ) _UpperCamelCase = tokenizer.encode(A_ , add_special_tokens=A_ ) _UpperCamelCase = tokenizer.encode(A_ , add_special_tokens=A_ ) _UpperCamelCase = tokenizer.encode(A_ , add_special_tokens=A_ ) self.assertEqual(A_ , input_encoded + special_token_id ) _UpperCamelCase = tokenizer.decode(A_ , skip_special_tokens=A_ ) self.assertTrue(special_token not in decoded ) def snake_case__ ( self : List[str] ) -> Any: '''simple docstring''' _UpperCamelCase = self.get_tokenizers(do_lower_case=A_ ) for tokenizer in tokenizers: with self.subTest(f"""{tokenizer.__class__.__name__}""" ): _UpperCamelCase = chr(0Xe0_05 ) _UpperCamelCase = chr(0Xe0_06 ) # `add_tokens` method stores special tokens only in `tokenizer.unique_no_split_tokens`. (in tokenization_utils.py) tokenizer.add_tokens([SPECIAL_TOKEN_1] , special_tokens=A_ ) # `add_special_tokens` method stores special tokens in `tokenizer.additional_special_tokens`, # which also occur in `tokenizer.all_special_tokens`. (in tokenization_utils_base.py) tokenizer.add_special_tokens({'''additional_special_tokens''': [SPECIAL_TOKEN_2]} ) _UpperCamelCase = tokenizer.tokenize(A_ ) _UpperCamelCase = tokenizer.tokenize(A_ ) self.assertEqual(len(A_ ) , 1 ) self.assertEqual(len(A_ ) , 1 ) self.assertEqual(token_a[0] , A_ ) self.assertEqual(token_a[0] , A_ ) @require_tokenizers def snake_case__ ( self : Any ) -> int: '''simple docstring''' _UpperCamelCase = self.get_tokenizers(do_lower_case=A_ ) for tokenizer in tokenizers: with self.subTest(f"""{tokenizer.__class__.__name__}""" ): # a special token for Canine can be defined as follows: _UpperCamelCase = 0Xe0_06 _UpperCamelCase = chr(A_ ) _UpperCamelCase = AddedToken(A_ , lstrip=A_ ) tokenizer.add_special_tokens({'''additional_special_tokens''': [new_token]} ) with tempfile.TemporaryDirectory() as tmp_dir_name: tokenizer.save_pretrained(A_ ) tokenizer.from_pretrained(A_ ) def snake_case__ ( self : List[str] ) -> Any: '''simple docstring''' _UpperCamelCase = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(A_ ) with open(os.path.join(A_ , '''special_tokens_map.json''' ) , encoding='''utf-8''' ) as json_file: _UpperCamelCase = json.load(A_ ) with open(os.path.join(A_ , '''tokenizer_config.json''' ) , encoding='''utf-8''' ) as json_file: _UpperCamelCase = json.load(A_ ) # a special token for Canine can be defined as follows: _UpperCamelCase = 0Xe0_06 _UpperCamelCase = chr(A_ ) _UpperCamelCase = [new_token_a] _UpperCamelCase = [new_token_a] with open(os.path.join(A_ , '''special_tokens_map.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile: json.dump(A_ , A_ ) with open(os.path.join(A_ , '''tokenizer_config.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile: json.dump(A_ , A_ ) # the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes # into account the new value of additional_special_tokens given in the "tokenizer_config.json" and # "special_tokens_map.json" files _UpperCamelCase = tokenizer_class.from_pretrained(A_ , extra_ids=0 ) self.assertIn(A_ , tokenizer_without_change_in_init.additional_special_tokens ) # self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab self.assertEqual( [new_token_a] , tokenizer_without_change_in_init.convert_ids_to_tokens( tokenizer_without_change_in_init.convert_tokens_to_ids([new_token_a] ) ) , ) _UpperCamelCase = 0Xe0_07 _UpperCamelCase = chr(A_ ) # Now we test that we can change the value of additional_special_tokens in the from_pretrained _UpperCamelCase = [AddedToken(A_ , lstrip=A_ )] _UpperCamelCase = tokenizer_class.from_pretrained( A_ , additional_special_tokens=A_ , extra_ids=0 ) self.assertIn(A_ , tokenizer.additional_special_tokens ) # self.assertIn(new_token_2,tokenizer.get_vocab()) # ByT5Tokenization no vocab self.assertEqual( [new_token_a] , tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids([new_token_a] ) ) ) @require_tokenizers def snake_case__ ( self : str ) -> Tuple: '''simple docstring''' _UpperCamelCase = self.get_tokenizers(do_lower_case=A_ ) for tokenizer in tokenizers: with self.subTest(f"""{tokenizer.__class__.__name__}""" ): _UpperCamelCase = "hello world" if self.space_between_special_tokens: _UpperCamelCase = "[CLS] hello world [SEP]" else: _UpperCamelCase = input _UpperCamelCase = tokenizer.encode(A_ , add_special_tokens=A_ ) _UpperCamelCase = tokenizer.decode(A_ , spaces_between_special_tokens=self.space_between_special_tokens ) self.assertIn(A_ , [output, output.lower()] ) def snake_case__ ( self : str ) -> Any: '''simple docstring''' _UpperCamelCase = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f"""{tokenizer.__class__.__name__}""" ): _UpperCamelCase = [ "bos_token", "eos_token", "unk_token", "sep_token", "pad_token", "cls_token", "mask_token", ] _UpperCamelCase = "a" _UpperCamelCase = ord(A_ ) for attr in attributes_list: setattr(A_ , attr + '''_id''' , A_ ) self.assertEqual(getattr(A_ , A_ ) , A_ ) self.assertEqual(getattr(A_ , attr + '''_id''' ) , A_ ) setattr(A_ , attr + '''_id''' , A_ ) self.assertEqual(getattr(A_ , A_ ) , A_ ) self.assertEqual(getattr(A_ , attr + '''_id''' ) , A_ ) setattr(A_ , '''additional_special_tokens_ids''' , [] ) self.assertListEqual(getattr(A_ , '''additional_special_tokens''' ) , [] ) self.assertListEqual(getattr(A_ , '''additional_special_tokens_ids''' ) , [] ) _UpperCamelCase = 0Xe0_06 _UpperCamelCase = chr(A_ ) setattr(A_ , '''additional_special_tokens_ids''' , [additional_special_token_id] ) self.assertListEqual(getattr(A_ , '''additional_special_tokens''' ) , [additional_special_token] ) self.assertListEqual(getattr(A_ , '''additional_special_tokens_ids''' ) , [additional_special_token_id] ) def snake_case__ ( self : str ) -> Optional[int]: '''simple docstring''' pass def snake_case__ ( self : str ) -> Optional[Any]: '''simple docstring''' pass def snake_case__ ( self : Optional[int] ) -> Optional[int]: '''simple docstring''' pass def snake_case__ ( self : Any ) -> str: '''simple docstring''' pass def snake_case__ ( self : Any ) -> List[Any]: '''simple docstring''' pass def snake_case__ ( self : Optional[int] ) -> List[Any]: '''simple docstring''' pass def snake_case__ ( self : str ) -> Union[str, Any]: '''simple docstring''' pass def snake_case__ ( self : List[str] ) -> List[Any]: '''simple docstring''' pass
324
import inspect import re from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_config_docstrings.py __lowerCamelCase : str = """src/transformers""" # This is to make sure the transformers module imported is the one in the repo. __lowerCamelCase : Tuple = direct_transformers_import(PATH_TO_TRANSFORMERS) __lowerCamelCase : List[str] = transformers.models.auto.configuration_auto.CONFIG_MAPPING # Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`. # For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)` __lowerCamelCase : Optional[Any] = re.compile(r"""\[(.+?)\]\((https://huggingface\.co/.+?)\)""") __lowerCamelCase : List[str] = { """DecisionTransformerConfig""", """EncoderDecoderConfig""", """MusicgenConfig""", """RagConfig""", """SpeechEncoderDecoderConfig""", """TimmBackboneConfig""", """VisionEncoderDecoderConfig""", """VisionTextDualEncoderConfig""", """LlamaConfig""", } def A_ ( _lowerCAmelCase ) -> List[str]: UpperCamelCase : Optional[Any] = None # source code of `config_class` UpperCamelCase : Tuple = inspect.getsource(_lowerCAmelCase ) UpperCamelCase : Optional[Any] = _re_checkpoint.findall(_lowerCAmelCase ) # Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link. # For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')` for ckpt_name, ckpt_link in checkpoints: # allow the link to end with `/` if ckpt_link.endswith("/" ): UpperCamelCase : Dict = ckpt_link[:-1] # verify the checkpoint name corresponds to the checkpoint link UpperCamelCase : Any = F"""https://huggingface.co/{ckpt_name}""" if ckpt_link == ckpt_link_from_name: UpperCamelCase : List[Any] = ckpt_name break return checkpoint def A_ ( ) -> List[str]: UpperCamelCase : Optional[int] = [] for config_class in list(CONFIG_MAPPING.values() ): # Skip deprecated models if "models.deprecated" in config_class.__module__: continue UpperCamelCase : Union[str, Any] = get_checkpoint_from_config_class(_lowerCAmelCase ) UpperCamelCase : Optional[int] = config_class.__name__ if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK: configs_without_checkpoint.append(_lowerCAmelCase ) if len(_lowerCAmelCase ) > 0: UpperCamelCase : Any = "\n".join(sorted(_lowerCAmelCase ) ) raise ValueError(F"""The following configurations don't contain any valid checkpoint:\n{message}""" ) if __name__ == "__main__": check_config_docstrings_have_checkpoints()
52
0
from __future__ import annotations from dataclasses import dataclass @dataclass class SCREAMING_SNAKE_CASE__ : __SCREAMING_SNAKE_CASE = 42 __SCREAMING_SNAKE_CASE = None __SCREAMING_SNAKE_CASE = None def UpperCamelCase__( UpperCamelCase__ : Optional[Any] )->bool: # Validation def is_valid_tree(UpperCamelCase__ : int ) -> bool: if node is None: return True if not isinstance(_lowerCAmelCase , _lowerCAmelCase ): return False try: float(node.data ) except (TypeError, ValueError): return False return is_valid_tree(node.left ) and is_valid_tree(node.right ) if not is_valid_tree(_lowerCAmelCase ): raise ValueError( '''Each node should be type of TreeNode and data should be float.''' ) def is_binary_search_tree_recursive_check( UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[str] ) -> bool: if node is None: return True return ( left_bound < node.data < right_bound and is_binary_search_tree_recursive_check(node.left , _lowerCAmelCase , node.data ) and is_binary_search_tree_recursive_check( node.right , node.data , _lowerCAmelCase ) ) return is_binary_search_tree_recursive_check(_lowerCAmelCase , -float('''inf''' ) , float('''inf''' ) ) if __name__ == "__main__": import doctest doctest.testmod()
193
from __future__ import annotations from functools import lru_cache from math import ceil __lowerCamelCase : str = 100 __lowerCamelCase : Any = set(range(3, NUM_PRIMES, 2)) primes.add(2) __lowerCamelCase : int for prime in range(3, ceil(NUM_PRIMES**0.5), 2): if prime not in primes: continue primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime))) @lru_cache(maxsize=100 ) def A_ ( _lowerCAmelCase ) -> set[int]: if number_to_partition < 0: return set() elif number_to_partition == 0: return {1} UpperCamelCase : set[int] = set() UpperCamelCase : int UpperCamelCase : int for prime in primes: if prime > number_to_partition: continue for sub in partition(number_to_partition - prime ): ret.add(sub * prime ) return ret def A_ ( _lowerCAmelCase = 5000 ) -> int | None: for number_to_partition in range(1 , _lowerCAmelCase ): if len(partition(_lowerCAmelCase ) ) > number_unique_partitions: return number_to_partition return None if __name__ == "__main__": print(f"""{solution() = }""")
52
0
import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, EulerAncestralDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, StableDiffusionPanoramaPipeline, UNetaDConditionModel, ) from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() @skip_mps class A ( __snake_case , __snake_case , unittest.TestCase ): '''simple docstring''' A__ = StableDiffusionPanoramaPipeline A__ = TEXT_TO_IMAGE_PARAMS A__ = TEXT_TO_IMAGE_BATCH_PARAMS A__ = TEXT_TO_IMAGE_IMAGE_PARAMS A__ = TEXT_TO_IMAGE_IMAGE_PARAMS def lowerCamelCase__ (self : List[Any] ) -> int: """simple docstring""" torch.manual_seed(0 ) lowercase__ = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , ) lowercase__ = DDIMScheduler() torch.manual_seed(0 ) lowercase__ = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , ) torch.manual_seed(0 ) lowercase__ = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) lowercase__ = CLIPTextModel(A_ ) lowercase__ = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) lowercase__ = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, } return components def lowerCamelCase__ (self : List[str] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[Any]=0 ) -> int: """simple docstring""" lowercase__ = torch.manual_seed(A_ ) lowercase__ = { "prompt": "a photo of the dolomites", "generator": generator, # Setting height and width to None to prevent OOMs on CPU. "height": None, "width": None, "num_inference_steps": 1, "guidance_scale": 6.0, "output_type": "numpy", } return inputs def lowerCamelCase__ (self : str ) -> Optional[Any]: """simple docstring""" lowercase__ = "cpu" # ensure determinism for the device-dependent torch.Generator lowercase__ = self.get_dummy_components() lowercase__ = StableDiffusionPanoramaPipeline(**A_ ) lowercase__ = sd_pipe.to(A_ ) sd_pipe.set_progress_bar_config(disable=A_ ) lowercase__ = self.get_dummy_inputs(A_ ) lowercase__ = sd_pipe(**A_ ).images lowercase__ = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) lowercase__ = np.array([0.6_186, 0.5_374, 0.4_915, 0.4_135, 0.4_114, 0.4_563, 0.5_128, 0.4_977, 0.4_757] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def lowerCamelCase__ (self : List[str] ) -> List[str]: """simple docstring""" super().test_inference_batch_consistent(batch_sizes=[1, 2] ) def lowerCamelCase__ (self : Tuple ) -> Union[str, Any]: """simple docstring""" super().test_inference_batch_single_identical(batch_size=2 , expected_max_diff=3.2_5E-3 ) def lowerCamelCase__ (self : Dict ) -> List[Any]: """simple docstring""" lowercase__ = "cpu" # ensure determinism for the device-dependent torch.Generator lowercase__ = self.get_dummy_components() lowercase__ = StableDiffusionPanoramaPipeline(**A_ ) lowercase__ = sd_pipe.to(A_ ) sd_pipe.set_progress_bar_config(disable=A_ ) lowercase__ = self.get_dummy_inputs(A_ ) lowercase__ = "french fries" lowercase__ = sd_pipe(**A_ , negative_prompt=A_ ) lowercase__ = output.images lowercase__ = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) lowercase__ = np.array([0.6_187, 0.5_375, 0.4_915, 0.4_136, 0.4_114, 0.4_563, 0.5_128, 0.4_976, 0.4_757] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def lowerCamelCase__ (self : Union[str, Any] ) -> Optional[Any]: """simple docstring""" lowercase__ = "cpu" # ensure determinism for the device-dependent torch.Generator lowercase__ = self.get_dummy_components() lowercase__ = StableDiffusionPanoramaPipeline(**A_ ) lowercase__ = sd_pipe.to(A_ ) sd_pipe.set_progress_bar_config(disable=A_ ) lowercase__ = self.get_dummy_inputs(A_ ) lowercase__ = sd_pipe(**A_ , view_batch_size=2 ) lowercase__ = output.images lowercase__ = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) lowercase__ = np.array([0.6_187, 0.5_375, 0.4_915, 0.4_136, 0.4_114, 0.4_563, 0.5_128, 0.4_976, 0.4_757] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def lowerCamelCase__ (self : Any ) -> Optional[int]: """simple docstring""" lowercase__ = "cpu" # ensure determinism for the device-dependent torch.Generator lowercase__ = self.get_dummy_components() lowercase__ = EulerAncestralDiscreteScheduler( beta_start=0.00_085 , beta_end=0.012 , beta_schedule="""scaled_linear""" ) lowercase__ = StableDiffusionPanoramaPipeline(**A_ ) lowercase__ = sd_pipe.to(A_ ) sd_pipe.set_progress_bar_config(disable=A_ ) lowercase__ = self.get_dummy_inputs(A_ ) lowercase__ = sd_pipe(**A_ ).images lowercase__ = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) lowercase__ = np.array([0.4_024, 0.6_510, 0.4_901, 0.5_378, 0.5_813, 0.5_622, 0.4_795, 0.4_467, 0.4_952] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def lowerCamelCase__ (self : Optional[int] ) -> Any: """simple docstring""" lowercase__ = "cpu" # ensure determinism for the device-dependent torch.Generator lowercase__ = self.get_dummy_components() lowercase__ = PNDMScheduler( beta_start=0.00_085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , skip_prk_steps=A_ ) lowercase__ = StableDiffusionPanoramaPipeline(**A_ ) lowercase__ = sd_pipe.to(A_ ) sd_pipe.set_progress_bar_config(disable=A_ ) lowercase__ = self.get_dummy_inputs(A_ ) lowercase__ = sd_pipe(**A_ ).images lowercase__ = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) lowercase__ = np.array([0.6_391, 0.6_291, 0.4_861, 0.5_134, 0.5_552, 0.4_578, 0.5_032, 0.5_023, 0.4_539] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 @slow @require_torch_gpu class A ( unittest.TestCase ): '''simple docstring''' def lowerCamelCase__ (self : Optional[int] ) -> Any: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCamelCase__ (self : List[Any] , _UpperCAmelCase : str=0 ) -> Optional[int]: """simple docstring""" lowercase__ = torch.manual_seed(A_ ) lowercase__ = { "prompt": "a photo of the dolomites", "generator": generator, "num_inference_steps": 3, "guidance_scale": 7.5, "output_type": "numpy", } return inputs def lowerCamelCase__ (self : Any ) -> Optional[int]: """simple docstring""" lowercase__ = "stabilityai/stable-diffusion-2-base" lowercase__ = DDIMScheduler.from_pretrained(A_ , subfolder="""scheduler""" ) lowercase__ = StableDiffusionPanoramaPipeline.from_pretrained(A_ , scheduler=A_ , safety_checker=A_ ) pipe.to(A_ ) pipe.set_progress_bar_config(disable=A_ ) pipe.enable_attention_slicing() lowercase__ = self.get_inputs() lowercase__ = pipe(**A_ ).images lowercase__ = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 2048, 3) lowercase__ = np.array( [ 0.36_968_392, 0.27_025_372, 0.32_446_766, 0.28_379_387, 0.36_363_274, 0.30_733_347, 0.27_100_027, 0.27_054_125, 0.25_536_096, ] ) assert np.abs(expected_slice - image_slice ).max() < 1E-2 def lowerCamelCase__ (self : Any ) -> List[str]: """simple docstring""" lowercase__ = StableDiffusionPanoramaPipeline.from_pretrained( """stabilityai/stable-diffusion-2-base""" , safety_checker=A_ ) lowercase__ = LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.to(A_ ) pipe.set_progress_bar_config(disable=A_ ) pipe.enable_attention_slicing() lowercase__ = self.get_inputs() lowercase__ = pipe(**A_ ).images lowercase__ = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 2048, 3) lowercase__ = np.array( [ [ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ] ] ) assert np.abs(expected_slice - image_slice ).max() < 1E-3 def lowerCamelCase__ (self : Optional[int] ) -> int: """simple docstring""" lowercase__ = 0 def callback_fn(_UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[Any] ) -> None: lowercase__ = True nonlocal number_of_steps number_of_steps += 1 if step == 1: lowercase__ = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 256) lowercase__ = latents[0, -3:, -3:, -1] lowercase__ = np.array( [ 0.18_681_869, 0.33_907_816, 0.5_361_276, 0.14_432_865, -0.02_856_611, -0.73_941_123, 0.23_397_987, 0.47_322_682, -0.37_823_164, ] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2 elif step == 2: lowercase__ = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 256) lowercase__ = latents[0, -3:, -3:, -1] lowercase__ = np.array( [ 0.18_539_645, 0.33_987_248, 0.5_378_559, 0.14_437_142, -0.02_455_261, -0.7_338_317, 0.23_990_755, 0.47_356_272, -0.3_786_505, ] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2 lowercase__ = False lowercase__ = "stabilityai/stable-diffusion-2-base" lowercase__ = DDIMScheduler.from_pretrained(A_ , subfolder="""scheduler""" ) lowercase__ = StableDiffusionPanoramaPipeline.from_pretrained(A_ , scheduler=A_ , safety_checker=A_ ) lowercase__ = pipe.to(A_ ) pipe.set_progress_bar_config(disable=A_ ) pipe.enable_attention_slicing() lowercase__ = self.get_inputs() pipe(**A_ , callback=A_ , callback_steps=1 ) assert callback_fn.has_been_called assert number_of_steps == 3 def lowerCamelCase__ (self : int ) -> Tuple: """simple docstring""" torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() lowercase__ = "stabilityai/stable-diffusion-2-base" lowercase__ = DDIMScheduler.from_pretrained(A_ , subfolder="""scheduler""" ) lowercase__ = StableDiffusionPanoramaPipeline.from_pretrained(A_ , scheduler=A_ , safety_checker=A_ ) lowercase__ = pipe.to(A_ ) pipe.set_progress_bar_config(disable=A_ ) pipe.enable_attention_slicing(1 ) pipe.enable_sequential_cpu_offload() lowercase__ = self.get_inputs() lowercase__ = pipe(**A_ ) lowercase__ = torch.cuda.max_memory_allocated() # make sure that less than 5.2 GB is allocated assert mem_bytes < 5.5 * 10**9
305
def A_ ( _lowerCAmelCase ) -> str: UpperCamelCase : Optional[int] = int(_lowerCAmelCase ) if decimal in (0, 1): # Exit cases for the recursion return str(_lowerCAmelCase ) UpperCamelCase , UpperCamelCase : Dict = divmod(_lowerCAmelCase , 2 ) return binary_recursive(_lowerCAmelCase ) + str(_lowerCAmelCase ) def A_ ( _lowerCAmelCase ) -> str: UpperCamelCase : Tuple = str(_lowerCAmelCase ).strip() if not number: raise ValueError("No input value was provided" ) UpperCamelCase : Optional[int] = "-" if number.startswith("-" ) else "" UpperCamelCase : Any = number.lstrip("-" ) if not number.isnumeric(): raise ValueError("Input value is not an integer" ) return F"""{negative}0b{binary_recursive(int(_lowerCAmelCase ) )}""" if __name__ == "__main__": from doctest import testmod testmod()
52
0
import glob import os import random from string import ascii_lowercase, digits import cva import numpy as np # Parrameters A__ : Tuple = (7_20, 12_80) # Height, Width A__ : int = (0.4, 0.6) # if height or width lower than this scale, drop it. A__ : int = 1 / 1_00 A__ : Any = """""" A__ : List[str] = """""" A__ : List[Any] = """""" A__ : Tuple = 2_50 def a ( ): '''simple docstring''' lowercase__ = get_dataset(_lowerCAmelCase , _lowerCAmelCase ) for index in range(_lowerCAmelCase ): lowercase__ = random.sample(range(len(_lowerCAmelCase ) ) , 4 ) lowercase__ = update_image_and_anno( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , filter_scale=_lowerCAmelCase , ) # Get random string code: '7b7ad245cdff75241935e4dd860f3bad' lowercase__ = random_chars(32 ) lowercase__ = path.split(os.sep )[-1].rsplit('''.''' , 1 )[0] lowercase__ = F"""{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}""" cva.imwrite(F"""{file_root}.jpg""" , _lowerCAmelCase , [cva.IMWRITE_JPEG_QUALITY, 85] ) print(F"""Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}""" ) lowercase__ = [] for anno in new_annos: lowercase__ = anno[3] - anno[1] lowercase__ = anno[4] - anno[2] lowercase__ = anno[1] + width / 2 lowercase__ = anno[2] + height / 2 lowercase__ = F"""{anno[0]} {x_center} {y_center} {width} {height}""" annos_list.append(_lowerCAmelCase ) with open(F"""{file_root}.txt""" , '''w''' ) as outfile: outfile.write('''\n'''.join(line for line in annos_list ) ) def a ( lowerCamelCase_ , lowerCamelCase_ ): '''simple docstring''' lowercase__ = [] lowercase__ = [] for label_file in glob.glob(os.path.join(_lowerCAmelCase , '''*.txt''' ) ): lowercase__ = label_file.split(os.sep )[-1].rsplit('''.''' , 1 )[0] with open(_lowerCAmelCase ) as in_file: lowercase__ = in_file.readlines() lowercase__ = os.path.join(_lowerCAmelCase , F"""{label_name}.jpg""" ) lowercase__ = [] for obj_list in obj_lists: lowercase__ = obj_list.rstrip('''\n''' ).split(''' ''' ) lowercase__ = float(obj[1] ) - float(obj[3] ) / 2 lowercase__ = float(obj[2] ) - float(obj[4] ) / 2 lowercase__ = float(obj[1] ) + float(obj[3] ) / 2 lowercase__ = float(obj[2] ) + float(obj[4] ) / 2 boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] ) if not boxes: continue img_paths.append(_lowerCAmelCase ) labels.append(_lowerCAmelCase ) return img_paths, labels def a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = 0.0 , ): '''simple docstring''' lowercase__ = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta ) lowercase__ = scale_range[0] + random.random() * (scale_range[1] - scale_range[0]) lowercase__ = scale_range[0] + random.random() * (scale_range[1] - scale_range[0]) lowercase__ = int(scale_x * output_size[1] ) lowercase__ = int(scale_y * output_size[0] ) lowercase__ = [] lowercase__ = [] for i, index in enumerate(_lowerCAmelCase ): lowercase__ = all_img_list[index] path_list.append(_lowerCAmelCase ) lowercase__ = all_annos[index] lowercase__ = cva.imread(_lowerCAmelCase ) if i == 0: # top-left lowercase__ = cva.resize(_lowerCAmelCase , (divid_point_x, divid_point_y) ) lowercase__ = img for bbox in img_annos: lowercase__ = bbox[1] * scale_x lowercase__ = bbox[2] * scale_y lowercase__ = bbox[3] * scale_x lowercase__ = bbox[4] * scale_y new_anno.append([bbox[0], xmin, ymin, xmax, ymax] ) elif i == 1: # top-right lowercase__ = cva.resize(_lowerCAmelCase , (output_size[1] - divid_point_x, divid_point_y) ) lowercase__ = img for bbox in img_annos: lowercase__ = scale_x + bbox[1] * (1 - scale_x) lowercase__ = bbox[2] * scale_y lowercase__ = scale_x + bbox[3] * (1 - scale_x) lowercase__ = bbox[4] * scale_y new_anno.append([bbox[0], xmin, ymin, xmax, ymax] ) elif i == 2: # bottom-left lowercase__ = cva.resize(_lowerCAmelCase , (divid_point_x, output_size[0] - divid_point_y) ) lowercase__ = img for bbox in img_annos: lowercase__ = bbox[1] * scale_x lowercase__ = scale_y + bbox[2] * (1 - scale_y) lowercase__ = bbox[3] * scale_x lowercase__ = scale_y + bbox[4] * (1 - scale_y) new_anno.append([bbox[0], xmin, ymin, xmax, ymax] ) else: # bottom-right lowercase__ = cva.resize( _lowerCAmelCase , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) ) lowercase__ = img for bbox in img_annos: lowercase__ = scale_x + bbox[1] * (1 - scale_x) lowercase__ = scale_y + bbox[2] * (1 - scale_y) lowercase__ = scale_x + bbox[3] * (1 - scale_x) lowercase__ = scale_y + bbox[4] * (1 - scale_y) new_anno.append([bbox[0], xmin, ymin, xmax, ymax] ) # Remove bounding box small than scale of filter if filter_scale > 0: lowercase__ = [ anno for anno in new_anno if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2]) ] return output_img, new_anno, path_list[0] def a ( lowerCamelCase_ ): '''simple docstring''' assert number_char > 1, "The number of character should greater than 1" lowercase__ = ascii_lowercase + digits return "".join(random.choice(_lowerCAmelCase ) for _ in range(_lowerCAmelCase ) ) if __name__ == "__main__": main() print('DONE ✅')
207
import unittest from transformers import LiltConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( LiltForQuestionAnswering, LiltForSequenceClassification, LiltForTokenClassification, LiltModel, ) from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST class A__ : def __init__( self , A_ , A_=13 , A_=7 , A_=True , A_=True , A_=True , A_=True , A_=99 , A_=24 , A_=2 , A_=6 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=16 , A_=2 , A_=0.02 , A_=3 , A_=None , A_=1000 , ): '''simple docstring''' UpperCamelCase : Union[str, Any] = parent UpperCamelCase : List[Any] = batch_size UpperCamelCase : Dict = seq_length UpperCamelCase : Tuple = is_training UpperCamelCase : Union[str, Any] = use_input_mask UpperCamelCase : Tuple = use_token_type_ids UpperCamelCase : Optional[Any] = use_labels UpperCamelCase : str = vocab_size UpperCamelCase : Optional[int] = hidden_size UpperCamelCase : Any = num_hidden_layers UpperCamelCase : Optional[Any] = num_attention_heads UpperCamelCase : Optional[Any] = intermediate_size UpperCamelCase : Optional[Any] = hidden_act UpperCamelCase : Union[str, Any] = hidden_dropout_prob UpperCamelCase : Union[str, Any] = attention_probs_dropout_prob UpperCamelCase : List[Any] = max_position_embeddings UpperCamelCase : str = type_vocab_size UpperCamelCase : Optional[int] = type_sequence_label_size UpperCamelCase : Dict = initializer_range UpperCamelCase : int = num_labels UpperCamelCase : Optional[int] = scope UpperCamelCase : int = range_bbox def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCamelCase : Any = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ) # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: UpperCamelCase : Union[str, Any] = bbox[i, j, 3] UpperCamelCase : int = bbox[i, j, 1] UpperCamelCase : int = t if bbox[i, j, 2] < bbox[i, j, 0]: UpperCamelCase : List[str] = bbox[i, j, 2] UpperCamelCase : Optional[int] = bbox[i, j, 0] UpperCamelCase : Optional[Any] = t UpperCamelCase : Dict = None if self.use_input_mask: UpperCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) UpperCamelCase : str = None if self.use_token_type_ids: UpperCamelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) UpperCamelCase : Dict = None UpperCamelCase : int = None if self.use_labels: UpperCamelCase : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCamelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCamelCase : List[Any] = self.get_config() return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels def __UpperCamelCase( self ): '''simple docstring''' return LiltConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ): '''simple docstring''' UpperCamelCase : Any = LiltModel(config=A_ ) model.to(A_ ) model.eval() UpperCamelCase : str = model(A_ , bbox=A_ , attention_mask=A_ , token_type_ids=A_ ) UpperCamelCase : Optional[int] = model(A_ , bbox=A_ , token_type_ids=A_ ) UpperCamelCase : Any = model(A_ , bbox=A_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ): '''simple docstring''' UpperCamelCase : Any = self.num_labels UpperCamelCase : Dict = LiltForTokenClassification(config=A_ ) model.to(A_ ) model.eval() UpperCamelCase : Dict = model( A_ , bbox=A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ): '''simple docstring''' UpperCamelCase : Dict = LiltForQuestionAnswering(config=A_ ) model.to(A_ ) model.eval() UpperCamelCase : List[str] = model( A_ , bbox=A_ , attention_mask=A_ , token_type_ids=A_ , start_positions=A_ , end_positions=A_ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Any = self.prepare_config_and_inputs() ( ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ) : Tuple = config_and_inputs UpperCamelCase : Tuple = { "input_ids": input_ids, "bbox": bbox, "token_type_ids": token_type_ids, "attention_mask": input_mask, } return config, inputs_dict @require_torch class A__ ( __snake_case , __snake_case , __snake_case , unittest.TestCase ): _UpperCAmelCase :Union[str, Any] = ( ( LiltModel, LiltForSequenceClassification, LiltForTokenClassification, LiltForQuestionAnswering, ) if is_torch_available() else () ) _UpperCAmelCase :Optional[Any] = ( { 'feature-extraction': LiltModel, 'question-answering': LiltForQuestionAnswering, 'text-classification': LiltForSequenceClassification, 'token-classification': LiltForTokenClassification, 'zero-shot': LiltForSequenceClassification, } if is_torch_available() else {} ) _UpperCAmelCase :Dict = False _UpperCAmelCase :Union[str, Any] = False def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ ): '''simple docstring''' return True def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Optional[int] = LiltModelTester(self ) UpperCamelCase : Optional[int] = ConfigTester(self , config_class=A_ , hidden_size=37 ) def __UpperCamelCase( self ): '''simple docstring''' self.config_tester.run_common_tests() def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*A_ ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: UpperCamelCase : Union[str, Any] = type self.model_tester.create_and_check_model(*A_ ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*A_ ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*A_ ) @slow def __UpperCamelCase( self ): '''simple docstring''' for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCamelCase : Dict = LiltModel.from_pretrained(A_ ) self.assertIsNotNone(A_ ) @require_torch @slow class A__ ( unittest.TestCase ): def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : int = LiltModel.from_pretrained("SCUT-DLVCLab/lilt-roberta-en-base" ).to(A_ ) UpperCamelCase : Tuple = torch.tensor([[1, 2]] , device=A_ ) UpperCamelCase : List[str] = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=A_ ) # forward pass with torch.no_grad(): UpperCamelCase : Optional[int] = model(input_ids=A_ , bbox=A_ ) UpperCamelCase : List[str] = torch.Size([1, 2, 768] ) UpperCamelCase : Any = torch.tensor( [[-0.06_53, 0.09_50, -0.00_61], [-0.05_45, 0.09_26, -0.03_24]] , device=A_ , ) self.assertTrue(outputs.last_hidden_state.shape , A_ ) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , A_ , atol=1e-3 ) )
52
0
"""simple docstring""" import numpy as np from transformers import BatchFeature from transformers.testing_utils import require_tf, require_torch from .test_feature_extraction_common import FeatureExtractionSavingTestMixin class a ( __snake_case ): # to overwrite at feature extractactor specific tests _snake_case : Any = None _snake_case : Optional[Any] = None @property def lowerCAmelCase_ ( self : Union[str, Any] ): return self.feat_extract_tester.prepare_feat_extract_dict() def lowerCAmelCase_ ( self : List[Any] ): _UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_dict ) self.assertTrue(hasattr(A_ , """feature_size""" ) ) self.assertTrue(hasattr(A_ , """sampling_rate""" ) ) self.assertTrue(hasattr(A_ , """padding_value""" ) ) def lowerCAmelCase_ ( self : Dict ): _UpperCAmelCase = self.feat_extract_tester.prepare_inputs_for_common() _UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_dict ) _UpperCAmelCase = feat_extract.model_input_names[0] _UpperCAmelCase = BatchFeature({input_name: speech_inputs} ) self.assertTrue(all(len(A_ ) == len(A_ ) for x, y in zip(A_ , processed_features[input_name] ) ) ) _UpperCAmelCase = self.feat_extract_tester.prepare_inputs_for_common(equal_length=A_ ) _UpperCAmelCase = BatchFeature({input_name: speech_inputs} , tensor_type="""np""" ) _UpperCAmelCase = processed_features[input_name] if len(batch_features_input.shape ) < 3: _UpperCAmelCase = batch_features_input[:, :, None] self.assertTrue( batch_features_input.shape == (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) ) @require_torch def lowerCAmelCase_ ( self : Dict ): _UpperCAmelCase = self.feat_extract_tester.prepare_inputs_for_common(equal_length=A_ ) _UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_dict ) _UpperCAmelCase = feat_extract.model_input_names[0] _UpperCAmelCase = BatchFeature({input_name: speech_inputs} , tensor_type="""pt""" ) _UpperCAmelCase = processed_features[input_name] if len(batch_features_input.shape ) < 3: _UpperCAmelCase = batch_features_input[:, :, None] self.assertTrue( batch_features_input.shape == (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) ) @require_tf def lowerCAmelCase_ ( self : Optional[int] ): _UpperCAmelCase = self.feat_extract_tester.prepare_inputs_for_common(equal_length=A_ ) _UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_dict ) _UpperCAmelCase = feat_extract.model_input_names[0] _UpperCAmelCase = BatchFeature({input_name: speech_inputs} , tensor_type="""tf""" ) _UpperCAmelCase = processed_features[input_name] if len(batch_features_input.shape ) < 3: _UpperCAmelCase = batch_features_input[:, :, None] self.assertTrue( batch_features_input.shape == (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) ) def lowerCAmelCase_ ( self : Dict , __lowerCAmelCase : List[str]=False ): def _inputs_have_equal_length(__lowerCAmelCase : int ): _UpperCAmelCase = len(input[0] ) for input_slice in input[1:]: if len(A_ ) != length: return False return True def _inputs_are_equal(__lowerCAmelCase : List[str] , __lowerCAmelCase : Optional[int] ): if len(A_ ) != len(A_ ): return False for input_slice_a, input_slice_a in zip(A_ , A_ ): if not np.allclose(np.asarray(A_ ) , np.asarray(A_ ) , atol=1e-3 ): return False return True _UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_dict ) _UpperCAmelCase = self.feat_extract_tester.prepare_inputs_for_common(numpify=A_ ) _UpperCAmelCase = feat_extract.model_input_names[0] _UpperCAmelCase = BatchFeature({input_name: speech_inputs} ) _UpperCAmelCase = self.feat_extract_tester.seq_length_diff _UpperCAmelCase = self.feat_extract_tester.max_seq_length + pad_diff _UpperCAmelCase = self.feat_extract_tester.min_seq_length _UpperCAmelCase = self.feat_extract_tester.batch_size _UpperCAmelCase = self.feat_extract_tester.feature_size # test padding for List[int] + numpy _UpperCAmelCase = feat_extract.pad(A_ , padding=A_ ) _UpperCAmelCase = input_a[input_name] _UpperCAmelCase = feat_extract.pad(A_ , padding="""longest""" ) _UpperCAmelCase = input_a[input_name] _UpperCAmelCase = feat_extract.pad(A_ , padding="""max_length""" , max_length=len(speech_inputs[-1] ) ) _UpperCAmelCase = input_a[input_name] _UpperCAmelCase = feat_extract.pad(A_ , padding="""longest""" , return_tensors="""np""" ) _UpperCAmelCase = input_a[input_name] # max_length parameter has to be provided when setting `padding="max_length"` with self.assertRaises(A_ ): feat_extract.pad(A_ , padding="""max_length""" )[input_name] _UpperCAmelCase = feat_extract.pad( A_ , padding="""max_length""" , max_length=A_ , return_tensors="""np""" ) _UpperCAmelCase = input_a[input_name] self.assertFalse(_inputs_have_equal_length(A_ ) ) self.assertTrue(_inputs_have_equal_length(A_ ) ) self.assertTrue(_inputs_have_equal_length(A_ ) ) self.assertTrue(_inputs_are_equal(A_ , A_ ) ) self.assertTrue(len(input_a[0] ) == pad_min_length ) self.assertTrue(len(input_a[1] ) == pad_min_length + pad_diff ) self.assertTrue(input_a.shape[:2] == (batch_size, len(input_a[0] )) ) self.assertTrue(input_a.shape[:2] == (batch_size, pad_max_length) ) if feature_size > 1: self.assertTrue(input_a.shape[2] == input_a.shape[2] == feature_size ) # test padding for `pad_to_multiple_of` for List[int] + numpy _UpperCAmelCase = feat_extract.pad(A_ , pad_to_multiple_of=10 ) _UpperCAmelCase = input_a[input_name] _UpperCAmelCase = feat_extract.pad(A_ , padding="""longest""" , pad_to_multiple_of=10 ) _UpperCAmelCase = input_a[input_name] _UpperCAmelCase = feat_extract.pad( A_ , padding="""max_length""" , pad_to_multiple_of=10 , max_length=A_ ) _UpperCAmelCase = input_a[input_name] _UpperCAmelCase = feat_extract.pad( A_ , padding="""max_length""" , pad_to_multiple_of=10 , max_length=A_ , return_tensors="""np""" , ) _UpperCAmelCase = input_a[input_name] self.assertTrue(all(len(A_ ) % 10 == 0 for x in input_a ) ) self.assertTrue(_inputs_are_equal(A_ , A_ ) ) _UpperCAmelCase = pad_max_length if pad_max_length % 10 == 0 else (pad_max_length // 10 + 1) * 10 self.assertTrue(all(len(A_ ) == expected_mult_pad_length for x in input_a ) ) self.assertEqual(input_a.shape[:2] , (batch_size, expected_mult_pad_length) ) if feature_size > 1: self.assertTrue(input_a.shape[2] == feature_size ) # Check padding value is correct _UpperCAmelCase = (np.ones(self.feat_extract_tester.feature_size ) * feat_extract.padding_value).sum() self.assertTrue( abs(np.asarray(input_a[0] )[pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) ) < 1e-3 ) self.assertTrue( abs( np.asarray(input_a[1] )[pad_min_length + pad_diff :].sum() - padding_vector_sum * (pad_max_length - pad_min_length - pad_diff) ) < 1e-3 ) self.assertTrue( abs( np.asarray(input_a[2] )[pad_min_length + 2 * pad_diff :].sum() - padding_vector_sum * (pad_max_length - pad_min_length - 2 * pad_diff) ) < 1e-3 ) self.assertTrue( abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) ) < 1e-3 ) self.assertTrue( abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (expected_mult_pad_length - pad_min_length) ) < 1e-3 ) def lowerCAmelCase_ ( self : Union[str, Any] , __lowerCAmelCase : Tuple=False ): def _inputs_have_equal_length(__lowerCAmelCase : Dict ): _UpperCAmelCase = len(input[0] ) for input_slice in input[1:]: if len(A_ ) != length: return False return True def _inputs_are_equal(__lowerCAmelCase : Tuple , __lowerCAmelCase : int ): if len(A_ ) != len(A_ ): return False for input_slice_a, input_slice_a in zip(A_ , A_ ): if not np.allclose(np.asarray(A_ ) , np.asarray(A_ ) , atol=1e-3 ): return False return True _UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_dict ) _UpperCAmelCase = self.feat_extract_tester.prepare_inputs_for_common(numpify=A_ ) _UpperCAmelCase = feat_extract.model_input_names[0] _UpperCAmelCase = BatchFeature({input_name: speech_inputs} ) # truncate to smallest _UpperCAmelCase = feat_extract.pad( A_ , padding="""max_length""" , max_length=len(speech_inputs[0] ) , truncation=A_ ) _UpperCAmelCase = input_a[input_name] _UpperCAmelCase = feat_extract.pad(A_ , padding="""max_length""" , max_length=len(speech_inputs[0] ) ) _UpperCAmelCase = input_a[input_name] self.assertTrue(_inputs_have_equal_length(A_ ) ) self.assertFalse(_inputs_have_equal_length(A_ ) ) # truncate to smallest with np _UpperCAmelCase = feat_extract.pad( A_ , padding="""max_length""" , max_length=len(speech_inputs[0] ) , return_tensors="""np""" , truncation=A_ , ) _UpperCAmelCase = input_a[input_name] _UpperCAmelCase = feat_extract.pad( A_ , padding="""max_length""" , max_length=len(speech_inputs[0] ) , return_tensors="""np""" ) _UpperCAmelCase = input_a[input_name] self.assertTrue(_inputs_have_equal_length(A_ ) ) self.assertTrue(input_a.shape[1] == len(speech_inputs[0] ) ) # since truncation forces padding to be smaller than longest input # function can't return `np.ndarray`, but has to return list self.assertFalse(_inputs_have_equal_length(A_ ) ) # truncate to middle _UpperCAmelCase = feat_extract.pad( A_ , padding="""max_length""" , max_length=len(speech_inputs[1] ) , truncation=A_ , return_tensors="""np""" , ) _UpperCAmelCase = input_a[input_name] _UpperCAmelCase = feat_extract.pad( A_ , padding="""max_length""" , max_length=len(speech_inputs[1] ) , truncation=A_ ) _UpperCAmelCase = input_a[input_name] _UpperCAmelCase = feat_extract.pad( A_ , padding="""max_length""" , max_length=len(speech_inputs[1] ) , return_tensors="""np""" ) _UpperCAmelCase = input_a[input_name] self.assertTrue(input_a.shape[1] == len(speech_inputs[1] ) ) self.assertTrue(_inputs_have_equal_length(A_ ) ) self.assertTrue(_inputs_have_equal_length(A_ ) ) self.assertTrue(_inputs_are_equal(A_ , A_ ) ) # since truncation forces padding to be smaller than longest input # function can't return `np.ndarray`, but has to return list self.assertFalse(_inputs_have_equal_length(A_ ) ) self.assertTrue(len(input_a[-1] ) == len(speech_inputs[-1] ) ) # padding has to be max_length when setting `truncation=True` with self.assertRaises(A_ ): feat_extract.pad(A_ , truncation=A_ )[input_name] # padding has to be max_length when setting `truncation=True` with self.assertRaises(A_ ): feat_extract.pad(A_ , padding="""longest""" , truncation=A_ )[input_name] # padding has to be max_length when setting `truncation=True` with self.assertRaises(A_ ): feat_extract.pad(A_ , padding="""longest""" , truncation=A_ )[input_name] # max_length parameter has to be provided when setting `truncation=True` and padding="max_length" with self.assertRaises(A_ ): feat_extract.pad(A_ , padding="""max_length""" , truncation=A_ )[input_name] # test truncation for `pad_to_multiple_of` for List[int] + numpy _UpperCAmelCase = 12 _UpperCAmelCase = feat_extract.pad( A_ , padding="""max_length""" , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=A_ , truncation=A_ , ) _UpperCAmelCase = input_a[input_name] _UpperCAmelCase = feat_extract.pad( A_ , padding="""max_length""" , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=A_ , ) _UpperCAmelCase = input_a[input_name] # retrieve expected_length as multiple of pad_to_multiple_of _UpperCAmelCase = len(speech_inputs[0] ) if expected_length % pad_to_multiple_of != 0: _UpperCAmelCase = ((len(speech_inputs[0] ) // pad_to_multiple_of) + 1) * pad_to_multiple_of self.assertTrue(len(input_a[0] ) == expected_length ) self.assertTrue(_inputs_have_equal_length(A_ ) ) self.assertFalse(_inputs_have_equal_length(A_ ) ) def lowerCAmelCase_ ( self : List[Any] ): self._check_padding(numpify=A_ ) def lowerCAmelCase_ ( self : Dict ): self._check_padding(numpify=A_ ) def lowerCAmelCase_ ( self : str ): self._check_truncation(numpify=A_ ) def lowerCAmelCase_ ( self : List[Any] ): self._check_truncation(numpify=A_ ) @require_torch def lowerCAmelCase_ ( self : Dict ): _UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_dict ) _UpperCAmelCase = self.feat_extract_tester.prepare_inputs_for_common() _UpperCAmelCase = feat_extract.model_input_names[0] _UpperCAmelCase = BatchFeature({input_name: speech_inputs} ) _UpperCAmelCase = feat_extract.pad(A_ , padding="""longest""" , return_tensors="""np""" )[input_name] _UpperCAmelCase = feat_extract.pad(A_ , padding="""longest""" , return_tensors="""pt""" )[input_name] self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1e-2 ) @require_tf def lowerCAmelCase_ ( self : Optional[int] ): _UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_dict ) _UpperCAmelCase = self.feat_extract_tester.prepare_inputs_for_common() _UpperCAmelCase = feat_extract.model_input_names[0] _UpperCAmelCase = BatchFeature({input_name: speech_inputs} ) _UpperCAmelCase = feat_extract.pad(A_ , padding="""longest""" , return_tensors="""np""" )[input_name] _UpperCAmelCase = feat_extract.pad(A_ , padding="""longest""" , return_tensors="""tf""" )[input_name] self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_tf.numpy().astype(np.floataa ).sum() ) < 1e-2 ) def lowerCAmelCase_ ( self : str ): _UpperCAmelCase = self.feat_extract_dict _UpperCAmelCase = True _UpperCAmelCase = self.feature_extraction_class(**A_ ) _UpperCAmelCase = self.feat_extract_tester.prepare_inputs_for_common() _UpperCAmelCase = [len(A_ ) for x in speech_inputs] _UpperCAmelCase = feat_extract.model_input_names[0] _UpperCAmelCase = BatchFeature({input_name: speech_inputs} ) _UpperCAmelCase = feat_extract.pad(A_ , padding="""longest""" , return_tensors="""np""" ) self.assertIn("""attention_mask""" , A_ ) self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) ) self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , A_ ) def lowerCAmelCase_ ( self : Optional[Any] ): _UpperCAmelCase = self.feat_extract_dict _UpperCAmelCase = True _UpperCAmelCase = self.feature_extraction_class(**A_ ) _UpperCAmelCase = self.feat_extract_tester.prepare_inputs_for_common() _UpperCAmelCase = [len(A_ ) for x in speech_inputs] _UpperCAmelCase = feat_extract.model_input_names[0] _UpperCAmelCase = BatchFeature({input_name: speech_inputs} ) _UpperCAmelCase = min(A_ ) _UpperCAmelCase = feat_extract.pad( A_ , padding="""max_length""" , max_length=A_ , truncation=A_ , return_tensors="""np""" ) self.assertIn("""attention_mask""" , A_ ) self.assertListEqual( list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] ) self.assertListEqual( processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
289
import os import tempfile from functools import partial from unittest import TestCase from unittest.mock import patch import numpy as np import pytest from datasets.arrow_dataset import Dataset from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex from .utils import require_elasticsearch, require_faiss __lowerCamelCase : Union[str, Any] = pytest.mark.integration @require_faiss class A__ ( __snake_case ): def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Union[str, Any] = Dataset.from_dict({"filename": ["my_name-train" + "_" + str(A_ ) for x in np.arange(30 ).tolist()]} ) return dset def __UpperCamelCase( self ): '''simple docstring''' import faiss UpperCamelCase : Dataset = self._create_dummy_dataset() UpperCamelCase : List[Any] = dset.map( lambda A_ , A_ : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=A_ , keep_in_memory=A_ ) UpperCamelCase : List[str] = dset.add_faiss_index("vecs" , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT ) UpperCamelCase , UpperCamelCase : Tuple = dset.get_nearest_examples("vecs" , np.ones(5 , dtype=np.floataa ) ) self.assertEqual(examples["filename"][0] , "my_name-train_29" ) dset.drop_index("vecs" ) def __UpperCamelCase( self ): '''simple docstring''' import faiss UpperCamelCase : Dataset = self._create_dummy_dataset() dset.add_faiss_index_from_external_arrays( external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT , ) UpperCamelCase , UpperCamelCase : int = dset.get_nearest_examples("vecs" , np.ones(5 , dtype=np.floataa ) ) self.assertEqual(examples["filename"][0] , "my_name-train_29" ) def __UpperCamelCase( self ): '''simple docstring''' import faiss UpperCamelCase : Dataset = self._create_dummy_dataset() dset.add_faiss_index_from_external_arrays( external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" , metric_type=faiss.METRIC_INNER_PRODUCT , ) # Setting delete=False and unlinking manually is not pretty... but it is required on Windows to # ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue. # see https://bugs.python.org/issue14243 and # https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515 with tempfile.NamedTemporaryFile(delete=A_ ) as tmp_file: dset.save_faiss_index("vecs" , tmp_file.name ) dset.load_faiss_index("vecs2" , tmp_file.name ) os.unlink(tmp_file.name ) UpperCamelCase , UpperCamelCase : List[str] = dset.get_nearest_examples("vecs2" , np.ones(5 , dtype=np.floataa ) ) self.assertEqual(examples["filename"][0] , "my_name-train_29" ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Dataset = self._create_dummy_dataset() dset.add_faiss_index_from_external_arrays( external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" ) dset.drop_index("vecs" ) self.assertRaises(A_ , partial(dset.get_nearest_examples , "vecs2" , np.ones(5 , dtype=np.floataa ) ) ) def __UpperCamelCase( self ): '''simple docstring''' from elasticsearch import Elasticsearch UpperCamelCase : Dataset = self._create_dummy_dataset() with patch("elasticsearch.Elasticsearch.search" ) as mocked_search, patch( "elasticsearch.client.IndicesClient.create" ) as mocked_index_create, patch("elasticsearch.helpers.streaming_bulk" ) as mocked_bulk: UpperCamelCase : List[str] = {"acknowledged": True} mocked_bulk.return_value([(True, None)] * 30 ) UpperCamelCase : List[Any] = {"hits": {"hits": [{"_score": 1, "_id": 29}]}} UpperCamelCase : Optional[Any] = Elasticsearch() dset.add_elasticsearch_index("filename" , es_client=A_ ) UpperCamelCase , UpperCamelCase : List[str] = dset.get_nearest_examples("filename" , "my_name-train_29" ) self.assertEqual(examples["filename"][0] , "my_name-train_29" ) @require_faiss class A__ ( __snake_case ): def __UpperCamelCase( self ): '''simple docstring''' import faiss UpperCamelCase : Optional[int] = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT ) # add vectors index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsNotNone(index.faiss_index ) self.assertEqual(index.faiss_index.ntotal , 5 ) index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) ) self.assertEqual(index.faiss_index.ntotal , 10 ) # single query UpperCamelCase : Any = np.zeros(5 , dtype=np.floataa ) UpperCamelCase : Optional[Any] = 1 UpperCamelCase , UpperCamelCase : Optional[Any] = index.search(A_ ) self.assertRaises(A_ , index.search , query.reshape(-1 , 1 ) ) self.assertGreater(scores[0] , 0 ) self.assertEqual(indices[0] , 1 ) # batched queries UpperCamelCase : Optional[int] = np.eye(5 , dtype=np.floataa )[::-1] UpperCamelCase , UpperCamelCase : Tuple = index.search_batch(A_ ) self.assertRaises(A_ , index.search_batch , queries[0] ) UpperCamelCase : Optional[int] = [scores[0] for scores in total_scores] UpperCamelCase : Tuple = [indices[0] for indices in total_indices] self.assertGreater(np.min(A_ ) , 0 ) self.assertListEqual([4, 3, 2, 1, 0] , A_ ) def __UpperCamelCase( self ): '''simple docstring''' import faiss UpperCamelCase : List[str] = FaissIndex(string_factory="Flat" ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsInstance(index.faiss_index , faiss.IndexFlat ) UpperCamelCase : List[str] = FaissIndex(string_factory="LSH" ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsInstance(index.faiss_index , faiss.IndexLSH ) with self.assertRaises(A_ ): UpperCamelCase : List[str] = FaissIndex(string_factory="Flat" , custom_index=faiss.IndexFlat(5 ) ) def __UpperCamelCase( self ): '''simple docstring''' import faiss UpperCamelCase : Dict = faiss.IndexFlat(5 ) UpperCamelCase : Union[str, Any] = FaissIndex(custom_index=A_ ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsInstance(index.faiss_index , faiss.IndexFlat ) def __UpperCamelCase( self ): '''simple docstring''' import faiss UpperCamelCase : str = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) # Setting delete=False and unlinking manually is not pretty... but it is required on Windows to # ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue. # see https://bugs.python.org/issue14243 and # https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515 with tempfile.NamedTemporaryFile(delete=A_ ) as tmp_file: index.save(tmp_file.name ) UpperCamelCase : int = FaissIndex.load(tmp_file.name ) os.unlink(tmp_file.name ) UpperCamelCase : str = np.zeros(5 , dtype=np.floataa ) UpperCamelCase : int = 1 UpperCamelCase , UpperCamelCase : Dict = index.search(A_ ) self.assertGreater(scores[0] , 0 ) self.assertEqual(indices[0] , 1 ) @require_faiss def A_ ( _lowerCAmelCase ) -> Optional[int]: import faiss UpperCamelCase : Union[str, Any] = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) UpperCamelCase : List[Any] = "index.faiss" UpperCamelCase : List[str] = F"""mock://{index_name}""" index.save(_lowerCAmelCase , storage_options=mockfs.storage_options ) UpperCamelCase : List[str] = FaissIndex.load(_lowerCAmelCase , storage_options=mockfs.storage_options ) UpperCamelCase : List[str] = np.zeros(5 , dtype=np.floataa ) UpperCamelCase : Optional[int] = 1 UpperCamelCase , UpperCamelCase : List[str] = index.search(_lowerCAmelCase ) assert scores[0] > 0 assert indices[0] == 1 @require_elasticsearch class A__ ( __snake_case ): def __UpperCamelCase( self ): '''simple docstring''' from elasticsearch import Elasticsearch with patch("elasticsearch.Elasticsearch.search" ) as mocked_search, patch( "elasticsearch.client.IndicesClient.create" ) as mocked_index_create, patch("elasticsearch.helpers.streaming_bulk" ) as mocked_bulk: UpperCamelCase : List[str] = Elasticsearch() UpperCamelCase : Union[str, Any] = {"acknowledged": True} UpperCamelCase : Union[str, Any] = ElasticSearchIndex(es_client=A_ ) mocked_bulk.return_value([(True, None)] * 3 ) index.add_documents(["foo", "bar", "foobar"] ) # single query UpperCamelCase : str = "foo" UpperCamelCase : Dict = {"hits": {"hits": [{"_score": 1, "_id": 0}]}} UpperCamelCase , UpperCamelCase : Tuple = index.search(A_ ) self.assertEqual(scores[0] , 1 ) self.assertEqual(indices[0] , 0 ) # single query with timeout UpperCamelCase : Dict = "foo" UpperCamelCase : Optional[Any] = {"hits": {"hits": [{"_score": 1, "_id": 0}]}} UpperCamelCase , UpperCamelCase : str = index.search(A_ , request_timeout=30 ) self.assertEqual(scores[0] , 1 ) self.assertEqual(indices[0] , 0 ) # batched queries UpperCamelCase : Dict = ["foo", "bar", "foobar"] UpperCamelCase : List[Any] = {"hits": {"hits": [{"_score": 1, "_id": 1}]}} UpperCamelCase , UpperCamelCase : Optional[int] = index.search_batch(A_ ) UpperCamelCase : str = [scores[0] for scores in total_scores] UpperCamelCase : Optional[Any] = [indices[0] for indices in total_indices] self.assertGreater(np.min(A_ ) , 0 ) self.assertListEqual([1, 1, 1] , A_ ) # batched queries with timeout UpperCamelCase : int = ["foo", "bar", "foobar"] UpperCamelCase : List[Any] = {"hits": {"hits": [{"_score": 1, "_id": 1}]}} UpperCamelCase , UpperCamelCase : Union[str, Any] = index.search_batch(A_ , request_timeout=30 ) UpperCamelCase : Union[str, Any] = [scores[0] for scores in total_scores] UpperCamelCase : Dict = [indices[0] for indices in total_indices] self.assertGreater(np.min(A_ ) , 0 ) self.assertListEqual([1, 1, 1] , A_ )
52
0
'''simple docstring''' def lowercase__( __UpperCamelCase: Optional[Any] ): """simple docstring""" return " ".join( ''.join(word[::-1] ) if len(_lowerCAmelCase ) > 4 else word for word in sentence.split() ) if __name__ == "__main__": import doctest doctest.testmod() print(reverse_long_words("Hey wollef sroirraw"))
251
def A_ ( _lowerCAmelCase = 50 ) -> int: UpperCamelCase : List[Any] = [[0] * 3 for _ in range(length + 1 )] for row_length in range(length + 1 ): for tile_length in range(2 , 5 ): for tile_start in range(row_length - tile_length + 1 ): different_colour_ways_number[row_length][tile_length - 2] += ( different_colour_ways_number[row_length - tile_start - tile_length][ tile_length - 2 ] + 1 ) return sum(different_colour_ways_number[length] ) if __name__ == "__main__": print(f"""{solution() = }""")
52
0
'''simple docstring''' import logging from dataclasses import dataclass, field from typing import Optional from seqaseq_trainer import arg_to_scheduler from transformers import TrainingArguments a : Optional[Any] = logging.getLogger(__name__) @dataclass class UpperCamelCase_ ( __snake_case ): lowercase = field( default=0.0 , metadata={'help': 'The label smoothing epsilon to apply (if not zero).'} ) lowercase = field(default=__snake_case , metadata={'help': 'Whether to SortishSamler or not.'} ) lowercase = field( default=__snake_case , metadata={'help': 'Whether to use generate to calculate generative metrics (ROUGE, BLEU).'} ) lowercase = field(default=__snake_case , metadata={'help': 'whether to use adafactor'} ) lowercase = field( default=__snake_case , metadata={'help': 'Encoder layer dropout probability. Goes into model.config.'} ) lowercase = field( default=__snake_case , metadata={'help': 'Decoder layer dropout probability. Goes into model.config.'} ) lowercase = field(default=__snake_case , metadata={'help': 'Dropout probability. Goes into model.config.'} ) lowercase = field( default=__snake_case , metadata={'help': 'Attention dropout probability. Goes into model.config.'} ) lowercase = field( default='linear' , metadata={'help': F"""Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}"""} , )
265
def A_ ( _lowerCAmelCase ) -> str: UpperCamelCase : List[Any] = "" for ch in key: if ch == " " or ch not in key_no_dups and ch.isalpha(): key_no_dups += ch return key_no_dups def A_ ( _lowerCAmelCase ) -> dict[str, str]: UpperCamelCase : Optional[Any] = [chr(i + 65 ) for i in range(26 )] # Remove duplicate characters from key UpperCamelCase : Tuple = remove_duplicates(key.upper() ) UpperCamelCase : int = len(_lowerCAmelCase ) # First fill cipher with key characters UpperCamelCase : int = {alphabet[i]: char for i, char in enumerate(_lowerCAmelCase )} # Then map remaining characters in alphabet to # the alphabet from the beginning for i in range(len(_lowerCAmelCase ) , 26 ): UpperCamelCase : Optional[Any] = alphabet[i - offset] # Ensure we are not mapping letters to letters previously mapped while char in key: offset -= 1 UpperCamelCase : List[str] = alphabet[i - offset] UpperCamelCase : List[Any] = char return cipher_alphabet def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> str: return "".join(cipher_map.get(_lowerCAmelCase , _lowerCAmelCase ) for ch in message.upper() ) def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> str: UpperCamelCase : Union[str, Any] = {v: k for k, v in cipher_map.items()} return "".join(rev_cipher_map.get(_lowerCAmelCase , _lowerCAmelCase ) for ch in message.upper() ) def A_ ( ) -> None: UpperCamelCase : int = input("Enter message to encode or decode: " ).strip() UpperCamelCase : str = input("Enter keyword: " ).strip() UpperCamelCase : Union[str, Any] = input("Encipher or decipher? E/D:" ).strip()[0].lower() try: UpperCamelCase : List[str] = {"e": encipher, "d": decipher}[option] except KeyError: raise KeyError("invalid input option" ) UpperCamelCase : str = create_cipher_map(_lowerCAmelCase ) print(func(_lowerCAmelCase , _lowerCAmelCase ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
52
0
"""simple docstring""" import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor from transformers.utils import logging logging.set_verbosity_info() UpperCAmelCase = logging.get_logger(__name__) def lowercase ( a__ : Any , a__ : List[str]=False ) -> Any: _UpperCamelCase = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((F'''blocks.{i}.norm1.weight''', F'''deit.encoder.layer.{i}.layernorm_before.weight''') ) rename_keys.append((F'''blocks.{i}.norm1.bias''', F'''deit.encoder.layer.{i}.layernorm_before.bias''') ) rename_keys.append((F'''blocks.{i}.attn.proj.weight''', F'''deit.encoder.layer.{i}.attention.output.dense.weight''') ) rename_keys.append((F'''blocks.{i}.attn.proj.bias''', F'''deit.encoder.layer.{i}.attention.output.dense.bias''') ) rename_keys.append((F'''blocks.{i}.norm2.weight''', F'''deit.encoder.layer.{i}.layernorm_after.weight''') ) rename_keys.append((F'''blocks.{i}.norm2.bias''', F'''deit.encoder.layer.{i}.layernorm_after.bias''') ) rename_keys.append((F'''blocks.{i}.mlp.fc1.weight''', F'''deit.encoder.layer.{i}.intermediate.dense.weight''') ) rename_keys.append((F'''blocks.{i}.mlp.fc1.bias''', F'''deit.encoder.layer.{i}.intermediate.dense.bias''') ) rename_keys.append((F'''blocks.{i}.mlp.fc2.weight''', F'''deit.encoder.layer.{i}.output.dense.weight''') ) rename_keys.append((F'''blocks.{i}.mlp.fc2.bias''', F'''deit.encoder.layer.{i}.output.dense.bias''') ) # projection layer + position embeddings rename_keys.extend( [ ('''cls_token''', '''deit.embeddings.cls_token'''), ('''dist_token''', '''deit.embeddings.distillation_token'''), ('''patch_embed.proj.weight''', '''deit.embeddings.patch_embeddings.projection.weight'''), ('''patch_embed.proj.bias''', '''deit.embeddings.patch_embeddings.projection.bias'''), ('''pos_embed''', '''deit.embeddings.position_embeddings'''), ] ) if base_model: # layernorm + pooler rename_keys.extend( [ ('''norm.weight''', '''layernorm.weight'''), ('''norm.bias''', '''layernorm.bias'''), ('''pre_logits.fc.weight''', '''pooler.dense.weight'''), ('''pre_logits.fc.bias''', '''pooler.dense.bias'''), ] ) # if just the base model, we should remove "deit" from all keys that start with "deit" _UpperCamelCase = [(pair[0], pair[1][4:]) if pair[1].startswith('''deit''' ) else pair for pair in rename_keys] else: # layernorm + classification heads rename_keys.extend( [ ('''norm.weight''', '''deit.layernorm.weight'''), ('''norm.bias''', '''deit.layernorm.bias'''), ('''head.weight''', '''cls_classifier.weight'''), ('''head.bias''', '''cls_classifier.bias'''), ('''head_dist.weight''', '''distillation_classifier.weight'''), ('''head_dist.bias''', '''distillation_classifier.bias'''), ] ) return rename_keys def lowercase ( a__ : Dict , a__ : Union[str, Any] , a__ : Optional[int]=False ) -> Union[str, Any]: for i in range(config.num_hidden_layers ): if base_model: _UpperCamelCase = "" else: _UpperCamelCase = "deit." # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) _UpperCamelCase = state_dict.pop(F'''blocks.{i}.attn.qkv.weight''' ) _UpperCamelCase = state_dict.pop(F'''blocks.{i}.attn.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict _UpperCamelCase = in_proj_weight[ : config.hidden_size, : ] _UpperCamelCase = in_proj_bias[: config.hidden_size] _UpperCamelCase = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] _UpperCamelCase = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] _UpperCamelCase = in_proj_weight[ -config.hidden_size :, : ] _UpperCamelCase = in_proj_bias[-config.hidden_size :] def lowercase ( a__ : List[str] , a__ : Tuple , a__ : Optional[int] ) -> int: _UpperCamelCase = dct.pop(_lowerCAmelCase ) _UpperCamelCase = val def lowercase ( ) -> Optional[Any]: _UpperCamelCase = "http://images.cocodataset.org/val2017/000000039769.jpg" _UpperCamelCase = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw ) return im @torch.no_grad() def lowercase ( a__ : Optional[int] , a__ : str ) -> List[str]: _UpperCamelCase = DeiTConfig() # all deit models have fine-tuned heads _UpperCamelCase = False # dataset (fine-tuned on ImageNet 2012), patch_size and image_size _UpperCamelCase = 1000 _UpperCamelCase = "huggingface/label-files" _UpperCamelCase = "imagenet-1k-id2label.json" _UpperCamelCase = json.load(open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type='''dataset''' ) , '''r''' ) ) _UpperCamelCase = {int(_lowerCAmelCase ): v for k, v in idalabel.items()} _UpperCamelCase = idalabel _UpperCamelCase = {v: k for k, v in idalabel.items()} _UpperCamelCase = int(deit_name[-6:-4] ) _UpperCamelCase = int(deit_name[-3:] ) # size of the architecture if deit_name[9:].startswith('''tiny''' ): _UpperCamelCase = 192 _UpperCamelCase = 768 _UpperCamelCase = 12 _UpperCamelCase = 3 elif deit_name[9:].startswith('''small''' ): _UpperCamelCase = 384 _UpperCamelCase = 1536 _UpperCamelCase = 12 _UpperCamelCase = 6 if deit_name[9:].startswith('''base''' ): pass elif deit_name[4:].startswith('''large''' ): _UpperCamelCase = 1024 _UpperCamelCase = 4096 _UpperCamelCase = 24 _UpperCamelCase = 16 # load original model from timm _UpperCamelCase = timm.create_model(_lowerCAmelCase , pretrained=_lowerCAmelCase ) timm_model.eval() # load state_dict of original model, remove and rename some keys _UpperCamelCase = timm_model.state_dict() _UpperCamelCase = create_rename_keys(_lowerCAmelCase , _lowerCAmelCase ) for src, dest in rename_keys: rename_key(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) read_in_q_k_v(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) # load HuggingFace model _UpperCamelCase = DeiTForImageClassificationWithTeacher(_lowerCAmelCase ).eval() model.load_state_dict(_lowerCAmelCase ) # Check outputs on an image, prepared by DeiTImageProcessor _UpperCamelCase = int( (256 / 224) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103 _UpperCamelCase = DeiTImageProcessor(size=_lowerCAmelCase , crop_size=config.image_size ) _UpperCamelCase = image_processor(images=prepare_img() , return_tensors='''pt''' ) _UpperCamelCase = encoding["pixel_values"] _UpperCamelCase = model(_lowerCAmelCase ) _UpperCamelCase = timm_model(_lowerCAmelCase ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(_lowerCAmelCase , outputs.logits , atol=1e-3 ) Path(_lowerCAmelCase ).mkdir(exist_ok=_lowerCAmelCase ) print(F'''Saving model {deit_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(_lowerCAmelCase ) print(F'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(_lowerCAmelCase ) if __name__ == "__main__": UpperCAmelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( """--deit_name""", default="""vit_deit_base_distilled_patch16_224""", type=str, help="""Name of the DeiT timm model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) UpperCAmelCase = parser.parse_args() convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
256
from sklearn.metrics import fa_score import datasets __lowerCamelCase : List[Any] = """ The F1 score is the harmonic mean of the precision and recall. It can be computed with the equation: F1 = 2 * (precision * recall) / (precision + recall) """ __lowerCamelCase : List[Any] = """ Args: predictions (`list` of `int`): Predicted labels. references (`list` of `int`): Ground truth labels. labels (`list` of `int`): The set of labels to include when `average` is not set to `'binary'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None. pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1. average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`. - 'binary': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary. - 'micro': Calculate metrics globally by counting the total true positives, false negatives and false positives. - 'macro': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account. - 'weighted': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. This option can result in an F-score that is not between precision and recall. - 'samples': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification). sample_weight (`list` of `float`): Sample weights Defaults to None. Returns: f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better. Examples: Example 1-A simple binary example >>> f1_metric = datasets.load_metric(\"f1\") >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0]) >>> print(results) {'f1': 0.5} Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`. >>> f1_metric = datasets.load_metric(\"f1\") >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0) >>> print(round(results['f1'], 2)) 0.67 Example 3-The same simple binary example as in Example 1, but with `sample_weight` included. >>> f1_metric = datasets.load_metric(\"f1\") >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3]) >>> print(round(results['f1'], 2)) 0.35 Example 4-A multiclass example, with different values for the `average` input. >>> predictions = [0, 2, 1, 0, 0, 1] >>> references = [0, 1, 2, 0, 1, 2] >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"macro\") >>> print(round(results['f1'], 2)) 0.27 >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"micro\") >>> print(round(results['f1'], 2)) 0.33 >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"weighted\") >>> print(round(results['f1'], 2)) 0.27 >>> results = f1_metric.compute(predictions=predictions, references=references, average=None) >>> print(results) {'f1': array([0.8, 0. , 0. ])} """ __lowerCamelCase : str = """ @article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011} } """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class A__ ( datasets.Metric ): def __UpperCamelCase( self ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Sequence(datasets.Value("int32" ) ), "references": datasets.Sequence(datasets.Value("int32" ) ), } if self.config_name == "multilabel" else { "predictions": datasets.Value("int32" ), "references": datasets.Value("int32" ), } ) , reference_urls=["https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html"] , ) def __UpperCamelCase( self , A_ , A_ , A_=None , A_=1 , A_="binary" , A_=None ): '''simple docstring''' UpperCamelCase : List[str] = fa_score( A_ , A_ , labels=A_ , pos_label=A_ , average=A_ , sample_weight=A_ ) return {"f1": float(A_ ) if score.size == 1 else score}
52
0
"""simple docstring""" from argparse import ArgumentParser from ..pipelines import Pipeline, PipelineDataFormat, get_supported_tasks, pipeline from ..utils import logging from . import BaseTransformersCLICommand _snake_case : Optional[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name def A__ ( UpperCamelCase ): if not path: return "pipe" for ext in PipelineDataFormat.SUPPORTED_FORMATS: if path.endswith(_lowerCAmelCase ): return ext raise Exception( F"Unable to determine file format from file extension {path}. " F"Please provide the format through --format {PipelineDataFormat.SUPPORTED_FORMATS}" ) def A__ ( UpperCamelCase ): A = pipeline( task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , ) A = try_infer_format_from_ext(args.input ) if args.format == "infer" else args.format A = PipelineDataFormat.from_str( format=_lowerCAmelCase , output_path=args.output , input_path=args.input , column=args.column if args.column else nlp.default_input_names , overwrite=args.overwrite , ) return RunCommand(_lowerCAmelCase , _lowerCAmelCase ) class _UpperCAmelCase ( __snake_case ): def __init__( self :Optional[int] , __UpperCamelCase :Union[str, Any] , __UpperCamelCase :Any ): A = nlp A = reader @staticmethod def lowerCamelCase ( __UpperCamelCase :List[str] ): A = parser.add_parser("run" , help="Run a pipeline through the CLI" ) run_parser.add_argument("--task" , choices=get_supported_tasks() , help="Task to run" ) run_parser.add_argument("--input" , type=A_ , help="Path to the file to use for inference" ) run_parser.add_argument("--output" , type=A_ , help="Path to the file that will be used post to write results." ) run_parser.add_argument("--model" , type=A_ , help="Name or path to the model to instantiate." ) run_parser.add_argument("--config" , type=A_ , help="Name or path to the model's config to instantiate." ) run_parser.add_argument( "--tokenizer" , type=A_ , help="Name of the tokenizer to use. (default: same as the model name)" ) run_parser.add_argument( "--column" , type=A_ , help="Name of the column to use as input. (For multi columns input as QA use column1,columns2)" , ) run_parser.add_argument( "--format" , type=A_ , default="infer" , choices=PipelineDataFormat.SUPPORTED_FORMATS , help="Input format to read from" , ) run_parser.add_argument( "--device" , type=A_ , default=-1 , help="Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)" , ) run_parser.add_argument("--overwrite" , action="store_true" , help="Allow overwriting the output file." ) run_parser.set_defaults(func=A_ ) def lowerCamelCase ( self :str ): A = self._nlp, [] for entry in self._reader: A = nlp(**A_ ) if self._reader.is_multi_columns else nlp(A_ ) if isinstance(A_ , A_ ): outputs.append(A_ ) else: outputs += output # Saving data if self._nlp.binary_output: A = self._reader.save_binary(A_ ) logger.warning(f"Current pipeline requires output to be in binary format, saving at {binary_path}" ) else: self._reader.save(A_ )
292
import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import XLMRobertaTokenizerFast from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class A__ ( __snake_case , unittest.TestCase ): _UpperCAmelCase :List[str] = KandinskyInpaintPipeline _UpperCAmelCase :List[str] = ['prompt', 'image_embeds', 'negative_image_embeds', 'image', 'mask_image'] _UpperCAmelCase :Dict = [ 'prompt', 'negative_prompt', 'image_embeds', 'negative_image_embeds', 'image', 'mask_image', ] _UpperCAmelCase :Optional[int] = [ 'generator', 'height', 'width', 'latents', 'guidance_scale', 'negative_prompt', 'num_inference_steps', 'return_dict', 'guidance_scale', 'num_images_per_prompt', 'output_type', 'return_dict', ] _UpperCAmelCase :int = False @property def __UpperCamelCase( self ): '''simple docstring''' return 32 @property def __UpperCamelCase( self ): '''simple docstring''' return 32 @property def __UpperCamelCase( self ): '''simple docstring''' return self.time_input_dim @property def __UpperCamelCase( self ): '''simple docstring''' return self.time_input_dim * 4 @property def __UpperCamelCase( self ): '''simple docstring''' return 100 @property def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Any = XLMRobertaTokenizerFast.from_pretrained("YiYiXu/tiny-random-mclip-base" ) return tokenizer @property def __UpperCamelCase( self ): '''simple docstring''' torch.manual_seed(0 ) UpperCamelCase : Optional[int] = MCLIPConfig( numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , ) UpperCamelCase : Optional[int] = MultilingualCLIP(A_ ) UpperCamelCase : Union[str, Any] = text_encoder.eval() return text_encoder @property def __UpperCamelCase( self ): '''simple docstring''' torch.manual_seed(0 ) UpperCamelCase : Optional[int] = { "in_channels": 9, # Out channels is double in channels because predicts mean and variance "out_channels": 8, "addition_embed_type": "text_image", "down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"), "up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"), "mid_block_type": "UNetMidBlock2DSimpleCrossAttn", "block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2), "layers_per_block": 1, "encoder_hid_dim": self.text_embedder_hidden_size, "encoder_hid_dim_type": "text_image_proj", "cross_attention_dim": self.cross_attention_dim, "attention_head_dim": 4, "resnet_time_scale_shift": "scale_shift", "class_embed_type": None, } UpperCamelCase : List[Any] = UNetaDConditionModel(**A_ ) return model @property def __UpperCamelCase( self ): '''simple docstring''' return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def __UpperCamelCase( self ): '''simple docstring''' torch.manual_seed(0 ) UpperCamelCase : List[str] = VQModel(**self.dummy_movq_kwargs ) return model def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Any = self.dummy_text_encoder UpperCamelCase : str = self.dummy_tokenizer UpperCamelCase : List[Any] = self.dummy_unet UpperCamelCase : Optional[Any] = self.dummy_movq UpperCamelCase : Union[str, Any] = DDIMScheduler( num_train_timesteps=1000 , beta_schedule="linear" , beta_start=0.0_00_85 , beta_end=0.0_12 , clip_sample=A_ , set_alpha_to_one=A_ , steps_offset=1 , prediction_type="epsilon" , thresholding=A_ , ) UpperCamelCase : Optional[Any] = { "text_encoder": text_encoder, "tokenizer": tokenizer, "unet": unet, "scheduler": scheduler, "movq": movq, } return components def __UpperCamelCase( self , A_ , A_=0 ): '''simple docstring''' UpperCamelCase : Dict = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(A_ ) ).to(A_ ) UpperCamelCase : Union[str, Any] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(A_ ) # create init_image UpperCamelCase : Union[str, Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(A_ ) ).to(A_ ) UpperCamelCase : str = image.cpu().permute(0 , 2 , 3 , 1 )[0] UpperCamelCase : List[Any] = Image.fromarray(np.uinta(A_ ) ).convert("RGB" ).resize((256, 256) ) # create mask UpperCamelCase : str = np.ones((64, 64) , dtype=np.floataa ) UpperCamelCase : str = 0 if str(A_ ).startswith("mps" ): UpperCamelCase : int = torch.manual_seed(A_ ) else: UpperCamelCase : Tuple = torch.Generator(device=A_ ).manual_seed(A_ ) UpperCamelCase : Union[str, Any] = { "prompt": "horse", "image": init_image, "mask_image": mask, "image_embeds": image_embeds, "negative_image_embeds": negative_image_embeds, "generator": generator, "height": 64, "width": 64, "num_inference_steps": 2, "guidance_scale": 4.0, "output_type": "np", } return inputs def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Optional[int] = "cpu" UpperCamelCase : Tuple = self.get_dummy_components() UpperCamelCase : str = self.pipeline_class(**A_ ) UpperCamelCase : Tuple = pipe.to(A_ ) pipe.set_progress_bar_config(disable=A_ ) UpperCamelCase : Any = pipe(**self.get_dummy_inputs(A_ ) ) UpperCamelCase : List[Any] = output.images UpperCamelCase : List[Any] = pipe( **self.get_dummy_inputs(A_ ) , return_dict=A_ , )[0] UpperCamelCase : List[Any] = image[0, -3:, -3:, -1] UpperCamelCase : Any = image_from_tuple[0, -3:, -3:, -1] print(F"""image.shape {image.shape}""" ) assert image.shape == (1, 64, 64, 3) UpperCamelCase : Union[str, Any] = np.array( [0.8_32_69_19, 0.73_79_04_67, 0.20_91_85_81, 0.9_30_96_12, 0.5_51_17_91, 0.43_71_33_28, 0.5_51_33_21, 0.49_92_29_34, 0.59_49_77_86] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 ), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}""" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 ), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}""" def __UpperCamelCase( self ): '''simple docstring''' super().test_inference_batch_single_identical(expected_max_diff=3e-3 ) @slow @require_torch_gpu class A__ ( unittest.TestCase ): def __UpperCamelCase( self ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Optional[Any] = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy" ) UpperCamelCase : List[str] = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" ) UpperCamelCase : Dict = np.ones((768, 768) , dtype=np.floataa ) UpperCamelCase : str = 0 UpperCamelCase : List[Any] = "a hat" UpperCamelCase : Tuple = KandinskyPriorPipeline.from_pretrained( "kandinsky-community/kandinsky-2-1-prior" , torch_dtype=torch.floataa ) pipe_prior.to(A_ ) UpperCamelCase : Union[str, Any] = KandinskyInpaintPipeline.from_pretrained( "kandinsky-community/kandinsky-2-1-inpaint" , torch_dtype=torch.floataa ) UpperCamelCase : Optional[Any] = pipeline.to(A_ ) pipeline.set_progress_bar_config(disable=A_ ) UpperCamelCase : List[Any] = torch.Generator(device="cpu" ).manual_seed(0 ) UpperCamelCase , UpperCamelCase : Optional[Any] = pipe_prior( A_ , generator=A_ , num_inference_steps=5 , negative_prompt="" , ).to_tuple() UpperCamelCase : Dict = pipeline( A_ , image=A_ , mask_image=A_ , image_embeds=A_ , negative_image_embeds=A_ , generator=A_ , num_inference_steps=100 , height=768 , width=768 , output_type="np" , ) UpperCamelCase : List[str] = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(A_ , A_ )
52
0
'''simple docstring''' # Imports import numpy as np class A__ : """simple docstring""" def __init__( self : int , lowerCAmelCase__ : int=None , lowerCAmelCase__ : List[str]=None , lowerCAmelCase__ : Any=None , lowerCAmelCase__ : Optional[int]=None , lowerCAmelCase__ : Dict=None ) -> List[str]: """simple docstring""" self.set_matricies(red=A_ , green=A_ , blue=A_ , red_edge=A_ , nir=A_ ) def _lowerCAmelCase ( self : Optional[Any] , lowerCAmelCase__ : List[Any]=None , lowerCAmelCase__ : List[Any]=None , lowerCAmelCase__ : int=None , lowerCAmelCase__ : int=None , lowerCAmelCase__ : List[Any]=None ) -> Optional[Any]: """simple docstring""" if red is not None: _UpperCAmelCase : Tuple = red if green is not None: _UpperCAmelCase : List[str] = green if blue is not None: _UpperCAmelCase : Tuple = blue if red_edge is not None: _UpperCAmelCase : List[Any] = red_edge if nir is not None: _UpperCAmelCase : Dict = nir return True def _lowerCAmelCase ( self : List[Any] , lowerCAmelCase__ : Dict="" , lowerCAmelCase__ : List[Any]=None , lowerCAmelCase__ : Tuple=None , lowerCAmelCase__ : List[str]=None , lowerCAmelCase__ : Union[str, Any]=None , lowerCAmelCase__ : str=None ) -> Optional[int]: """simple docstring""" self.set_matricies(red=A_ , green=A_ , blue=A_ , red_edge=A_ , nir=A_ ) _UpperCAmelCase : Any = { "ARVI2": self.arvaa, "CCCI": self.ccci, "CVI": self.cvi, "GLI": self.gli, "NDVI": self.ndvi, "BNDVI": self.bndvi, "redEdgeNDVI": self.red_edge_ndvi, "GNDVI": self.gndvi, "GBNDVI": self.gbndvi, "GRNDVI": self.grndvi, "RBNDVI": self.rbndvi, "PNDVI": self.pndvi, "ATSAVI": self.atsavi, "BWDRVI": self.bwdrvi, "CIgreen": self.ci_green, "CIrededge": self.ci_rededge, "CI": self.ci, "CTVI": self.ctvi, "GDVI": self.gdvi, "EVI": self.evi, "GEMI": self.gemi, "GOSAVI": self.gosavi, "GSAVI": self.gsavi, "Hue": self.hue, "IVI": self.ivi, "IPVI": self.ipvi, "I": self.i, "RVI": self.rvi, "MRVI": self.mrvi, "MSAVI": self.m_savi, "NormG": self.norm_g, "NormNIR": self.norm_nir, "NormR": self.norm_r, "NGRDI": self.ngrdi, "RI": self.ri, "S": self.s, "IF": self._if, "DVI": self.dvi, "TVI": self.tvi, "NDRE": self.ndre, } try: return funcs[index]() except KeyError: print("Index not in the list!" ) return False def _lowerCAmelCase ( self : int ) -> Union[str, Any]: """simple docstring""" return -0.18 + (1.17 * ((self.nir - self.red) / (self.nir + self.red))) def _lowerCAmelCase ( self : Dict ) -> Union[str, Any]: """simple docstring""" return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / ( (self.nir - self.red) / (self.nir + self.red) ) def _lowerCAmelCase ( self : Optional[int] ) -> int: """simple docstring""" return self.nir * (self.red / (self.green**2)) def _lowerCAmelCase ( self : str ) -> Optional[int]: """simple docstring""" return (2 * self.green - self.red - self.blue) / ( 2 * self.green + self.red + self.blue ) def _lowerCAmelCase ( self : int ) -> int: """simple docstring""" return (self.nir - self.red) / (self.nir + self.red) def _lowerCAmelCase ( self : List[Any] ) -> int: """simple docstring""" return (self.nir - self.blue) / (self.nir + self.blue) def _lowerCAmelCase ( self : List[Any] ) -> int: """simple docstring""" return (self.redEdge - self.red) / (self.redEdge + self.red) def _lowerCAmelCase ( self : str ) -> Optional[int]: """simple docstring""" return (self.nir - self.green) / (self.nir + self.green) def _lowerCAmelCase ( self : str ) -> Optional[int]: """simple docstring""" return (self.nir - (self.green + self.blue)) / ( self.nir + (self.green + self.blue) ) def _lowerCAmelCase ( self : Dict ) -> List[Any]: """simple docstring""" return (self.nir - (self.green + self.red)) / ( self.nir + (self.green + self.red) ) def _lowerCAmelCase ( self : Optional[int] ) -> Dict: """simple docstring""" return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red)) def _lowerCAmelCase ( self : List[Any] ) -> Tuple: """simple docstring""" return (self.nir - (self.green + self.red + self.blue)) / ( self.nir + (self.green + self.red + self.blue) ) def _lowerCAmelCase ( self : Any , lowerCAmelCase__ : Optional[int]=0.08 , lowerCAmelCase__ : List[Any]=1.22 , lowerCAmelCase__ : List[str]=0.03 ) -> List[str]: """simple docstring""" return a * ( (self.nir - a * self.red - b) / (a * self.nir + self.red - a * b + x * (1 + a**2)) ) def _lowerCAmelCase ( self : Any ) -> List[str]: """simple docstring""" return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue) def _lowerCAmelCase ( self : List[Any] ) -> Optional[int]: """simple docstring""" return (self.nir / self.green) - 1 def _lowerCAmelCase ( self : Dict ) -> str: """simple docstring""" return (self.nir / self.redEdge) - 1 def _lowerCAmelCase ( self : str ) -> int: """simple docstring""" return (self.red - self.blue) / self.red def _lowerCAmelCase ( self : int ) -> Dict: """simple docstring""" _UpperCAmelCase : List[Any] = self.ndvi() return ((ndvi + 0.5) / (abs(ndvi + 0.5 ))) * (abs(ndvi + 0.5 ) ** (1 / 2)) def _lowerCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" return self.nir - self.green def _lowerCAmelCase ( self : Dict ) -> Any: """simple docstring""" return 2.5 * ( (self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1) ) def _lowerCAmelCase ( self : Optional[Any] ) -> Tuple: """simple docstring""" _UpperCAmelCase : Union[str, Any] = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / ( self.nir + self.red + 0.5 ) return n * (1 - 0.25 * n) - (self.red - 0.125) / (1 - self.red) def _lowerCAmelCase ( self : Optional[Any] , lowerCAmelCase__ : List[Any]=0.16 ) -> Dict: """simple docstring""" return (self.nir - self.green) / (self.nir + self.green + y) def _lowerCAmelCase ( self : Optional[Any] , lowerCAmelCase__ : Optional[Any]=0.5 ) -> Any: """simple docstring""" return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n) def _lowerCAmelCase ( self : Dict ) -> Optional[Any]: """simple docstring""" return np.arctan( ((2 * self.red - self.green - self.blue) / 30.5) * (self.green - self.blue) ) def _lowerCAmelCase ( self : Dict , lowerCAmelCase__ : Union[str, Any]=None , lowerCAmelCase__ : Optional[int]=None ) -> str: """simple docstring""" return (self.nir - b) / (a * self.red) def _lowerCAmelCase ( self : List[str] ) -> Optional[Any]: """simple docstring""" return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1) def _lowerCAmelCase ( self : Optional[int] ) -> str: """simple docstring""" return (self.red + self.green + self.blue) / 30.5 def _lowerCAmelCase ( self : Optional[int] ) -> List[str]: """simple docstring""" return self.nir / self.red def _lowerCAmelCase ( self : int ) -> List[Any]: """simple docstring""" return (self.rvi() - 1) / (self.rvi() + 1) def _lowerCAmelCase ( self : Optional[Any] ) -> int: """simple docstring""" return ( (2 * self.nir + 1) - ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2) ) / 2 def _lowerCAmelCase ( self : int ) -> List[Any]: """simple docstring""" return self.green / (self.nir + self.red + self.green) def _lowerCAmelCase ( self : Optional[Any] ) -> Optional[Any]: """simple docstring""" return self.nir / (self.nir + self.red + self.green) def _lowerCAmelCase ( self : Tuple ) -> Tuple: """simple docstring""" return self.red / (self.nir + self.red + self.green) def _lowerCAmelCase ( self : List[str] ) -> Union[str, Any]: """simple docstring""" return (self.green - self.red) / (self.green + self.red) def _lowerCAmelCase ( self : List[str] ) -> List[str]: """simple docstring""" return (self.red - self.green) / (self.red + self.green) def _lowerCAmelCase ( self : List[Any] ) -> str: """simple docstring""" _UpperCAmelCase : List[Any] = np.max([np.max(self.red ), np.max(self.green ), np.max(self.blue )] ) _UpperCAmelCase : Dict = np.min([np.min(self.red ), np.min(self.green ), np.min(self.blue )] ) return (max_value - min_value) / max_value def _lowerCAmelCase ( self : str ) -> Tuple: """simple docstring""" return (2 * self.red - self.green - self.blue) / (self.green - self.blue) def _lowerCAmelCase ( self : List[Any] ) -> List[Any]: """simple docstring""" return self.nir / self.red def _lowerCAmelCase ( self : Tuple ) -> Union[str, Any]: """simple docstring""" return (self.ndvi() + 0.5) ** (1 / 2) def _lowerCAmelCase ( self : Tuple ) -> List[str]: """simple docstring""" return (self.nir - self.redEdge) / (self.nir + self.redEdge)
145
class A__ : def __init__( self , A_ ): '''simple docstring''' UpperCamelCase : Union[str, Any] = set_counts UpperCamelCase : int = max(A_ ) UpperCamelCase : Optional[Any] = len(A_ ) UpperCamelCase : Union[str, Any] = [1] * num_sets UpperCamelCase : Union[str, Any] = list(range(A_ ) ) def __UpperCamelCase( self , A_ , A_ ): '''simple docstring''' UpperCamelCase : Any = self.get_parent(A_ ) UpperCamelCase : Optional[int] = self.get_parent(A_ ) if src_parent == dst_parent: return False if self.ranks[dst_parent] >= self.ranks[src_parent]: self.set_counts[dst_parent] += self.set_counts[src_parent] UpperCamelCase : int = 0 UpperCamelCase : Dict = dst_parent if self.ranks[dst_parent] == self.ranks[src_parent]: self.ranks[dst_parent] += 1 UpperCamelCase : Optional[int] = self.set_counts[dst_parent] else: self.set_counts[src_parent] += self.set_counts[dst_parent] UpperCamelCase : Any = 0 UpperCamelCase : Optional[int] = src_parent UpperCamelCase : int = self.set_counts[src_parent] UpperCamelCase : Any = max(self.max_set , A_ ) return True def __UpperCamelCase( self , A_ ): '''simple docstring''' if self.parents[disj_set] == disj_set: return disj_set UpperCamelCase : Optional[int] = self.get_parent(self.parents[disj_set] ) return self.parents[disj_set]
52
0
"""simple docstring""" import itertools import json import linecache import os import pickle import re import socket import string from collections import Counter from logging import getLogger from pathlib import Path from typing import Callable, Dict, Iterable, List import git import torch from torch.utils.data import Dataset from transformers import BartTokenizer, RagTokenizer, TaTokenizer def lowerCAmelCase__ ( _UpperCamelCase : List[str] , _UpperCamelCase : Optional[int] , _UpperCamelCase : str , _UpperCamelCase : int , _UpperCamelCase : int=True , _UpperCamelCase : Optional[int]="pt" ) -> int: """simple docstring""" snake_case = {"add_prefix_space": True} if isinstance(_lowerCAmelCase , _lowerCAmelCase ) and not line.startswith(' ' ) else {} snake_case = padding_side return tokenizer( [line] , max_length=_lowerCAmelCase , padding='max_length' if pad_to_max_length else None , truncation=_lowerCAmelCase , return_tensors=_lowerCAmelCase , add_special_tokens=_lowerCAmelCase , **_lowerCAmelCase , ) def lowerCAmelCase__ ( _UpperCamelCase : List[str] , _UpperCamelCase : str , _UpperCamelCase : Any=None , ) -> str: """simple docstring""" snake_case = input_ids.ne(_lowerCAmelCase ).any(dim=0 ) if attention_mask is None: return input_ids[:, keep_column_mask] else: return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask]) class lowerCAmelCase_ ( __snake_case ): """simple docstring""" def __init__( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase="train" , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase="" , ): """simple docstring""" super().__init__() snake_case = Path(A_ ).joinpath(type_path + '.source' ) snake_case = Path(A_ ).joinpath(type_path + '.target' ) snake_case = self.get_char_lens(self.src_file ) snake_case = max_source_length snake_case = max_target_length assert min(self.src_lens ) > 0, F"""found empty line in {self.src_file}""" snake_case = tokenizer snake_case = prefix if n_obs is not None: snake_case = self.src_lens[:n_obs] snake_case = src_lang snake_case = tgt_lang def __len__( self ): """simple docstring""" return len(self.src_lens ) def __getitem__( self , lowerCAmelCase ): """simple docstring""" snake_case = index + 1 # linecache starts at 1 snake_case = self.prefix + linecache.getline(str(self.src_file ) , A_ ).rstrip('\n' ) snake_case = linecache.getline(str(self.tgt_file ) , A_ ).rstrip('\n' ) assert source_line, F"""empty source line for index {index}""" assert tgt_line, F"""empty tgt line for index {index}""" # Need to add eos token manually for T5 if isinstance(self.tokenizer , A_ ): source_line += self.tokenizer.eos_token tgt_line += self.tokenizer.eos_token # Pad source and target to the right snake_case = ( self.tokenizer.question_encoder if isinstance(self.tokenizer , A_ ) else self.tokenizer ) snake_case = self.tokenizer.generator if isinstance(self.tokenizer , A_ ) else self.tokenizer snake_case = encode_line(A_ , A_ , self.max_source_length , 'right' ) snake_case = encode_line(A_ , A_ , self.max_target_length , 'right' ) snake_case = source_inputs["input_ids"].squeeze() snake_case = target_inputs["input_ids"].squeeze() snake_case = source_inputs["attention_mask"].squeeze() return { "input_ids": source_ids, "attention_mask": src_mask, "decoder_input_ids": target_ids, } @staticmethod def snake_case ( lowerCAmelCase ): """simple docstring""" return [len(A_ ) for x in Path(A_ ).open().readlines()] def snake_case ( self , lowerCAmelCase ): """simple docstring""" snake_case = torch.stack([x['input_ids'] for x in batch] ) snake_case = torch.stack([x['attention_mask'] for x in batch] ) snake_case = torch.stack([x['decoder_input_ids'] for x in batch] ) snake_case = ( self.tokenizer.generator.pad_token_id if isinstance(self.tokenizer , A_ ) else self.tokenizer.pad_token_id ) snake_case = ( self.tokenizer.question_encoder.pad_token_id if isinstance(self.tokenizer , A_ ) else self.tokenizer.pad_token_id ) snake_case = trim_batch(A_ , A_ ) snake_case = trim_batch(A_ , A_ , attention_mask=A_ ) snake_case = { "input_ids": source_ids, "attention_mask": source_mask, "decoder_input_ids": y, } return batch SCREAMING_SNAKE_CASE__ = getLogger(__name__) def lowerCAmelCase__ ( _UpperCamelCase : List[Any] ) -> List[Any]: """simple docstring""" return list(itertools.chain.from_iterable(_lowerCAmelCase ) ) def lowerCAmelCase__ ( _UpperCamelCase : Any ) -> None: """simple docstring""" snake_case = get_git_info() save_json(_lowerCAmelCase , os.path.join(_lowerCAmelCase , 'git_log.json' ) ) def lowerCAmelCase__ ( _UpperCamelCase : List[Any] , _UpperCamelCase : Optional[int] , _UpperCamelCase : str=4 , **_UpperCamelCase : Any ) -> List[str]: """simple docstring""" with open(_lowerCAmelCase , 'w' ) as f: json.dump(_lowerCAmelCase , _lowerCAmelCase , indent=_lowerCAmelCase , **_lowerCAmelCase ) def lowerCAmelCase__ ( _UpperCamelCase : str ) -> Any: """simple docstring""" with open(_lowerCAmelCase ) as f: return json.load(_lowerCAmelCase ) def lowerCAmelCase__ ( ) -> Any: """simple docstring""" snake_case = git.Repo(search_parent_directories=_lowerCAmelCase ) snake_case = { "repo_id": str(_lowerCAmelCase ), "repo_sha": str(repo.head.object.hexsha ), "repo_branch": str(repo.active_branch ), "hostname": str(socket.gethostname() ), } return repo_infos def lowerCAmelCase__ ( _UpperCamelCase : Optional[int] , _UpperCamelCase : Optional[Any] ) -> List: """simple docstring""" return list(map(_lowerCAmelCase , _lowerCAmelCase ) ) def lowerCAmelCase__ ( _UpperCamelCase : List[str] , _UpperCamelCase : Any ) -> List[Any]: """simple docstring""" with open(_lowerCAmelCase , 'wb' ) as f: return pickle.dump(_lowerCAmelCase , _lowerCAmelCase ) def lowerCAmelCase__ ( _UpperCamelCase : int ) -> Union[str, Any]: """simple docstring""" def remove_articles(_UpperCamelCase : Any ): return re.sub(r'\b(a|an|the)\b' , ' ' , _lowerCAmelCase ) def white_space_fix(_UpperCamelCase : List[str] ): return " ".join(text.split() ) def remove_punc(_UpperCamelCase : str ): snake_case = set(string.punctuation ) return "".join(ch for ch in text if ch not in exclude ) def lower(_UpperCamelCase : int ): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(_lowerCAmelCase ) ) ) ) def lowerCAmelCase__ ( _UpperCamelCase : Any , _UpperCamelCase : Any ) -> List[Any]: """simple docstring""" snake_case = normalize_answer(_lowerCAmelCase ).split() snake_case = normalize_answer(_lowerCAmelCase ).split() snake_case = Counter(_lowerCAmelCase ) & Counter(_lowerCAmelCase ) snake_case = sum(common.values() ) if num_same == 0: return 0 snake_case = 1.0 * num_same / len(_lowerCAmelCase ) snake_case = 1.0 * num_same / len(_lowerCAmelCase ) snake_case = (2 * precision * recall) / (precision + recall) return fa def lowerCAmelCase__ ( _UpperCamelCase : int , _UpperCamelCase : List[Any] ) -> Union[str, Any]: """simple docstring""" return normalize_answer(_lowerCAmelCase ) == normalize_answer(_lowerCAmelCase ) def lowerCAmelCase__ ( _UpperCamelCase : Any , _UpperCamelCase : Union[str, Any] ) -> Dict: """simple docstring""" assert len(_lowerCAmelCase ) == len(_lowerCAmelCase ) snake_case = 0 for hypo, pred in zip(_lowerCAmelCase , _lowerCAmelCase ): em += exact_match_score(_lowerCAmelCase , _lowerCAmelCase ) if len(_lowerCAmelCase ) > 0: em /= len(_lowerCAmelCase ) return {"em": em} def lowerCAmelCase__ ( _UpperCamelCase : str ) -> str: """simple docstring""" return model_prefix.startswith('rag' ) def lowerCAmelCase__ ( _UpperCamelCase : Tuple , _UpperCamelCase : int , _UpperCamelCase : Dict ) -> int: """simple docstring""" snake_case = {p: p for p in extra_params} # T5 models don't have `dropout` param, they have `dropout_rate` instead snake_case = "dropout_rate" for p in extra_params: if getattr(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): if not hasattr(_lowerCAmelCase , _lowerCAmelCase ) and not hasattr(_lowerCAmelCase , equivalent_param[p] ): logger.info('config doesn\'t have a `{}` attribute'.format(_lowerCAmelCase ) ) delattr(_lowerCAmelCase , _lowerCAmelCase ) continue snake_case = p if hasattr(_lowerCAmelCase , _lowerCAmelCase ) else equivalent_param[p] setattr(_lowerCAmelCase , _lowerCAmelCase , getattr(_lowerCAmelCase , _lowerCAmelCase ) ) delattr(_lowerCAmelCase , _lowerCAmelCase ) return hparams, config
150
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __lowerCamelCase : Any = { """configuration_electra""": ["""ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ElectraConfig""", """ElectraOnnxConfig"""], """tokenization_electra""": ["""ElectraTokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : Dict = ["""ElectraTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : Tuple = [ """ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST""", """ElectraForCausalLM""", """ElectraForMaskedLM""", """ElectraForMultipleChoice""", """ElectraForPreTraining""", """ElectraForQuestionAnswering""", """ElectraForSequenceClassification""", """ElectraForTokenClassification""", """ElectraModel""", """ElectraPreTrainedModel""", """load_tf_weights_in_electra""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : List[Any] = [ """TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFElectraForMaskedLM""", """TFElectraForMultipleChoice""", """TFElectraForPreTraining""", """TFElectraForQuestionAnswering""", """TFElectraForSequenceClassification""", """TFElectraForTokenClassification""", """TFElectraModel""", """TFElectraPreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : List[str] = [ """FlaxElectraForCausalLM""", """FlaxElectraForMaskedLM""", """FlaxElectraForMultipleChoice""", """FlaxElectraForPreTraining""", """FlaxElectraForQuestionAnswering""", """FlaxElectraForSequenceClassification""", """FlaxElectraForTokenClassification""", """FlaxElectraModel""", """FlaxElectraPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig from .tokenization_electra import ElectraTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_electra_fast import ElectraTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_electra import ( ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST, ElectraForCausalLM, ElectraForMaskedLM, ElectraForMultipleChoice, ElectraForPreTraining, ElectraForQuestionAnswering, ElectraForSequenceClassification, ElectraForTokenClassification, ElectraModel, ElectraPreTrainedModel, load_tf_weights_in_electra, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_electra import ( TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST, TFElectraForMaskedLM, TFElectraForMultipleChoice, TFElectraForPreTraining, TFElectraForQuestionAnswering, TFElectraForSequenceClassification, TFElectraForTokenClassification, TFElectraModel, TFElectraPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_electra import ( FlaxElectraForCausalLM, FlaxElectraForMaskedLM, FlaxElectraForMultipleChoice, FlaxElectraForPreTraining, FlaxElectraForQuestionAnswering, FlaxElectraForSequenceClassification, FlaxElectraForTokenClassification, FlaxElectraModel, FlaxElectraPreTrainedModel, ) else: import sys __lowerCamelCase : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
52
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, is_vision_available, ) lowercase__ : str = { """configuration_owlvit""": [ """OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """OwlViTConfig""", """OwlViTOnnxConfig""", """OwlViTTextConfig""", """OwlViTVisionConfig""", ], """processing_owlvit""": ["""OwlViTProcessor"""], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase__ : Optional[Any] = ["""OwlViTFeatureExtractor"""] lowercase__ : Optional[int] = ["""OwlViTImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase__ : Union[str, Any] = [ """OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST""", """OwlViTModel""", """OwlViTPreTrainedModel""", """OwlViTTextModel""", """OwlViTVisionModel""", """OwlViTForObjectDetection""", ] if TYPE_CHECKING: from .configuration_owlvit import ( OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, OwlViTConfig, OwlViTOnnxConfig, OwlViTTextConfig, OwlViTVisionConfig, ) from .processing_owlvit import OwlViTProcessor try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_owlvit import OwlViTFeatureExtractor from .image_processing_owlvit import OwlViTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_owlvit import ( OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST, OwlViTForObjectDetection, OwlViTModel, OwlViTPreTrainedModel, OwlViTTextModel, OwlViTVisionModel, ) else: import sys lowercase__ : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
324
from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices __lowerCamelCase : str = logging.get_logger(__name__) __lowerCamelCase : str = { """facebook/convnextv2-tiny-1k-224""": """https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json""", } class A__ ( __snake_case , __snake_case ): _UpperCAmelCase :Optional[int] = 'convnextv2' def __init__( self , A_=3 , A_=4 , A_=4 , A_=None , A_=None , A_="gelu" , A_=0.02 , A_=1e-12 , A_=0.0 , A_=224 , A_=None , A_=None , **A_ , ): '''simple docstring''' super().__init__(**A_ ) UpperCamelCase : Dict = num_channels UpperCamelCase : Union[str, Any] = patch_size UpperCamelCase : Union[str, Any] = num_stages UpperCamelCase : List[Any] = [96, 192, 384, 768] if hidden_sizes is None else hidden_sizes UpperCamelCase : List[str] = [3, 3, 9, 3] if depths is None else depths UpperCamelCase : Dict = hidden_act UpperCamelCase : Union[str, Any] = initializer_range UpperCamelCase : Tuple = layer_norm_eps UpperCamelCase : str = drop_path_rate UpperCamelCase : List[str] = image_size UpperCamelCase : List[str] = ["stem"] + [F"""stage{idx}""" for idx in range(1 , len(self.depths ) + 1 )] UpperCamelCase , UpperCamelCase : str = get_aligned_output_features_output_indices( out_features=A_ , out_indices=A_ , stage_names=self.stage_names )
52
0
import json import os import unittest from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import ( VOCAB_FILES_NAMES, GPTSanJapaneseTokenizer, ) from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class SCREAMING_SNAKE_CASE__ ( __snake_case , unittest.TestCase ): __SCREAMING_SNAKE_CASE = GPTSanJapaneseTokenizer __SCREAMING_SNAKE_CASE = False __SCREAMING_SNAKE_CASE = {'do_clean_text': False, 'add_prefix_space': False} def UpperCamelCase ( self ): super().setUp() # fmt: off A__ = ["こん", "こんに", "にちは", "ばんは", "世界,㔺界", "、", "。", "<BR>", "<SP>", "<TAB>", "<URL>", "<EMAIL>", "<TEL>", "<DATE>", "<PRICE>", "<BLOCK>", "<KIGOU>", "<U2000U2BFF>", "<|emoji1|>", "<unk>", "<|bagoftoken|>", "<|endoftext|>"] # fmt: on A__ = {"emoji": {"\ud83d\ude00": "<|emoji1|>"}, "emoji_inv": {"<|emoji1|>": "\ud83d\ude00"}} # 😀 A__ = {"unk_token": "<unk>"} A__ = os.path.join(self.tmpdirname,VOCAB_FILES_NAMES['''vocab_file'''] ) A__ = os.path.join(self.tmpdirname,VOCAB_FILES_NAMES['''emoji_file'''] ) with open(self.vocab_file,'''w''',encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) with open(self.emoji_file,'''w''' ) as emoji_writer: emoji_writer.write(json.dumps(A_ ) ) def UpperCamelCase ( self,**__lowerCamelCase ): kwargs.update(self.special_tokens_map ) return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname,**A_ ) def UpperCamelCase ( self,__lowerCamelCase ): A__ = "こんにちは、世界。 \nこんばんは、㔺界。😀" A__ = "こんにちは、世界。 \nこんばんは、世界。😀" return input_text, output_text def UpperCamelCase ( self,__lowerCamelCase ): A__ = self.get_input_output_texts(A_ ) A__ = tokenizer.encode(A_,add_special_tokens=A_ ) A__ = tokenizer.decode(A_,clean_up_tokenization_spaces=A_ ) return text, ids def UpperCamelCase ( self ): pass # TODO add if relevant def UpperCamelCase ( self ): pass # TODO add if relevant def UpperCamelCase ( self ): pass # TODO add if relevant def UpperCamelCase ( self ): A__ = self.get_tokenizer() # Testing tokenization A__ = "こんにちは、世界。 こんばんは、㔺界。" A__ = ["こん", "にちは", "、", "世界", "。", "<SP>", "こん", "ばんは", "、", "㔺界", "。"] A__ = tokenizer.tokenize(A_ ) self.assertListEqual(A_,A_ ) # Testing conversion to ids without special tokens A__ = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6] A__ = tokenizer.convert_tokens_to_ids(A_ ) self.assertListEqual(A_,A_ ) # Testing conversion to ids with special tokens A__ = tokens + [tokenizer.unk_token] A__ = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19] A__ = tokenizer.convert_tokens_to_ids(A_ ) self.assertListEqual(A_,A_ ) def UpperCamelCase ( self ): A__ = self.get_tokenizer() # Testing tokenization A__ = "こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。" A__ = "こんにちは、、、、世界。こんばんは、、、、世界。" A__ = tokenizer.encode(A_ ) A__ = tokenizer.decode(A_ ) self.assertEqual(A_,A_ ) @slow def UpperCamelCase ( self ): A__ = self.tokenizer_class.from_pretrained('''Tanrei/GPTSAN-japanese''' ) # Testing tokenization A__ = "こんにちは、世界。" A__ = "こんばんは、㔺界。😀" A__ = "こんにちは、世界。こんばんは、世界。😀" A__ = tokenizer.encode(prefix_text + input_text ) A__ = tokenizer.encode('''''',prefix_text=prefix_text + input_text ) A__ = tokenizer.encode(A_,prefix_text=A_ ) A__ = tokenizer.decode(A_ ) A__ = tokenizer.decode(A_ ) A__ = tokenizer.decode(A_ ) self.assertEqual(A_,A_ ) self.assertEqual(A_,A_ ) self.assertEqual(A_,A_ ) @slow def UpperCamelCase ( self ): A__ = self.tokenizer_class.from_pretrained('''Tanrei/GPTSAN-japanese''' ) # Testing tokenization A__ = "こんにちは、世界。" A__ = "こんばんは、㔺界。😀" A__ = len(tokenizer.encode(A_ ) ) - 2 A__ = len(tokenizer.encode(A_ ) ) - 2 A__ = [1] + [0] * (len_prefix + len_text + 1) A__ = [1] * (len_prefix + len_text + 1) + [0] A__ = [1] + [1] * (len_prefix) + [0] * (len_text + 1) A__ = tokenizer(prefix_text + input_text ).token_type_ids A__ = tokenizer('''''',prefix_text=prefix_text + input_text ).token_type_ids A__ = tokenizer(A_,prefix_text=A_ ).token_type_ids self.assertListEqual(A_,A_ ) self.assertListEqual(A_,A_ ) self.assertListEqual(A_,A_ ) @slow def UpperCamelCase ( self ): A__ = self.tokenizer_class.from_pretrained('''Tanrei/GPTSAN-japanese''' ) A__ = tokenizer.encode('''あンいワ''' ) A__ = tokenizer.encode('''''',prefix_text='''あンいワ''' ) A__ = tokenizer.encode('''いワ''',prefix_text='''あン''' ) self.assertEqual(tokenizer.decode(A_ ),tokenizer.decode(A_ ) ) self.assertEqual(tokenizer.decode(A_ ),tokenizer.decode(A_ ) ) self.assertNotEqual(A_,A_ ) self.assertNotEqual(A_,A_ ) self.assertEqual(x_token_a[1],x_token_a[-1] ) # SEG token self.assertEqual(x_token_a[1],x_token_a[3] ) # SEG token @slow def UpperCamelCase ( self ): A__ = self.tokenizer_class.from_pretrained('''Tanrei/GPTSAN-japanese''' ) A__ = [["武田信玄", "は、"], ["織田信長", "の配下の、"]] A__ = tokenizer(A_,padding=A_ ) A__ = tokenizer.batch_encode_plus(A_,padding=A_ ) # fmt: off A__ = [[3_5993, 8640, 2_5948, 3_5998, 3_0647, 3_5675, 3_5999, 3_5999], [3_5993, 1_0382, 9868, 3_5998, 3_0646, 9459, 3_0646, 3_5675]] A__ = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]] A__ = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]] # fmt: on self.assertListEqual(x_token.input_ids,A_ ) self.assertListEqual(x_token.token_type_ids,A_ ) self.assertListEqual(x_token.attention_mask,A_ ) self.assertListEqual(x_token_a.input_ids,A_ ) self.assertListEqual(x_token_a.token_type_ids,A_ ) self.assertListEqual(x_token_a.attention_mask,A_ ) def UpperCamelCase ( self ): pass def UpperCamelCase ( self ): pass
193
import pytest import requests from datasets.utils.file_utils import http_head from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline @pytest.mark.integration def A_ ( ) -> List[Any]: with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ): with pytest.raises(_lowerCAmelCase ): requests.request("GET" , "https://huggingface.co" ) with pytest.raises(requests.exceptions.ConnectTimeout ): requests.request("GET" , "https://huggingface.co" , timeout=1.0 ) @pytest.mark.integration def A_ ( ) -> Tuple: with offline(OfflineSimulationMode.CONNECTION_FAILS ): with pytest.raises(requests.exceptions.ConnectionError ): requests.request("GET" , "https://huggingface.co" ) def A_ ( ) -> Optional[int]: with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ): with pytest.raises(_lowerCAmelCase ): http_head("https://huggingface.co" )
52
0
import logging import os from dataclasses import dataclass, field from typing import Dict, Optional import datasets import numpy as np import tensorflow as tf from transformers import ( AutoConfig, AutoTokenizer, EvalPrediction, HfArgumentParser, PreTrainedTokenizer, TFAutoModelForSequenceClassification, TFTrainer, TFTrainingArguments, ) from transformers.utils import logging as hf_logging hf_logging.set_verbosity_info() hf_logging.enable_default_handler() hf_logging.enable_explicit_format() def UpperCamelCase ( __magic_name__ : List[Any] , __magic_name__ : Union[str, Any] , __magic_name__ : Tuple , __magic_name__ : int , __magic_name__ : Union[str, Any] , __magic_name__ : List[Any] = None , ) -> int: """simple docstring""" lowercase__ = {} if train_file is not None: lowercase__ = [train_file] if eval_file is not None: lowercase__ = [eval_file] if test_file is not None: lowercase__ = [test_file] lowercase__ = datasets.load_dataset("""csv""" , data_files=_lowerCAmelCase ) lowercase__ = list(ds[list(files.keys() )[0]].features.keys() ) lowercase__ = features_name.pop(_lowerCAmelCase ) lowercase__ = list(set(ds[list(files.keys() )[0]][label_name] ) ) lowercase__ = {label: i for i, label in enumerate(_lowerCAmelCase )} lowercase__ = tokenizer.model_input_names lowercase__ = {} if len(_lowerCAmelCase ) == 1: for k in files.keys(): lowercase__ = ds[k].map( lambda __magic_name__ : tokenizer.batch_encode_plus( example[features_name[0]] , truncation=_lowerCAmelCase , max_length=_lowerCAmelCase , padding="""max_length""" ) , batched=_lowerCAmelCase , ) elif len(_lowerCAmelCase ) == 2: for k in files.keys(): lowercase__ = ds[k].map( lambda __magic_name__ : tokenizer.batch_encode_plus( (example[features_name[0]], example[features_name[1]]) , truncation=_lowerCAmelCase , max_length=_lowerCAmelCase , padding="""max_length""" , ) , batched=_lowerCAmelCase , ) def gen_train(): for ex in transformed_ds[datasets.Split.TRAIN]: lowercase__ = {k: v for k, v in ex.items() if k in input_names} lowercase__ = labelaid[ex[label_name]] yield (d, label) def gen_val(): for ex in transformed_ds[datasets.Split.VALIDATION]: lowercase__ = {k: v for k, v in ex.items() if k in input_names} lowercase__ = labelaid[ex[label_name]] yield (d, label) def gen_test(): for ex in transformed_ds[datasets.Split.TEST]: lowercase__ = {k: v for k, v in ex.items() if k in input_names} lowercase__ = labelaid[ex[label_name]] yield (d, label) lowercase__ = ( tf.data.Dataset.from_generator( _lowerCAmelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , ) if datasets.Split.TRAIN in transformed_ds else None ) if train_ds is not None: lowercase__ = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) ) lowercase__ = ( tf.data.Dataset.from_generator( _lowerCAmelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , ) if datasets.Split.VALIDATION in transformed_ds else None ) if val_ds is not None: lowercase__ = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) ) lowercase__ = ( tf.data.Dataset.from_generator( _lowerCAmelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , ) if datasets.Split.TEST in transformed_ds else None ) if test_ds is not None: lowercase__ = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) ) return train_ds, val_ds, test_ds, labelaid A : Tuple = logging.getLogger(__name__) @dataclass class A : '''simple docstring''' A__ = field(metadata={'''help''': '''Which column contains the label'''} ) A__ = field(default=__snake_case , metadata={'''help''': '''The path of the training file'''} ) A__ = field(default=__snake_case , metadata={'''help''': '''The path of the development file'''} ) A__ = field(default=__snake_case , metadata={'''help''': '''The path of the test file'''} ) A__ = field( default=1_28 , metadata={ '''help''': ( '''The maximum total input sequence length after tokenization. Sequences longer ''' '''than this will be truncated, sequences shorter will be padded.''' ) } , ) A__ = field( default=__snake_case , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} ) @dataclass class A : '''simple docstring''' A__ = field( metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} ) A__ = field( default=__snake_case , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} ) A__ = field( default=__snake_case , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} ) A__ = field(default=__snake_case , metadata={'''help''': '''Set this flag to use fast tokenization.'''} ) # If you want to tweak more attributes on your tokenizer, you should do it in a distinct script, # or just modify its tokenizer_config.json. A__ = field( default=__snake_case , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , ) def UpperCamelCase ( ) -> Tuple: """simple docstring""" lowercase__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) ) lowercase__ = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use''' """ --overwrite_output_dir to overcome.""" ) # Setup logging logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO , ) logger.info( f'''n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, ''' f'''16-bits training: {training_args.fpaa}''' ) logger.info(f'''Training/evaluation parameters {training_args}''' ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. lowercase__ = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) lowercase__ = get_tfds( train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=_lowerCAmelCase , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , ) lowercase__ = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(_lowerCAmelCase ) , labelaid=_lowerCAmelCase , idalabel={id: label for label, id in labelaid.items()} , finetuning_task="""text-classification""" , cache_dir=model_args.cache_dir , ) with training_args.strategy.scope(): lowercase__ = TFAutoModelForSequenceClassification.from_pretrained( model_args.model_name_or_path , from_pt=bool(""".bin""" in model_args.model_name_or_path ) , config=_lowerCAmelCase , cache_dir=model_args.cache_dir , ) def compute_metrics(__magic_name__ : Dict ) -> Dict: lowercase__ = np.argmax(p.predictions , axis=1 ) return {"acc": (preds == p.label_ids).mean()} # Initialize our Trainer lowercase__ = TFTrainer( model=_lowerCAmelCase , args=_lowerCAmelCase , train_dataset=_lowerCAmelCase , eval_dataset=_lowerCAmelCase , compute_metrics=_lowerCAmelCase , ) # Training if training_args.do_train: trainer.train() trainer.save_model() tokenizer.save_pretrained(training_args.output_dir ) # Evaluation lowercase__ = {} if training_args.do_eval: logger.info("""*** Evaluate ***""" ) lowercase__ = trainer.evaluate() lowercase__ = os.path.join(training_args.output_dir , """eval_results.txt""" ) with open(_lowerCAmelCase , """w""" ) as writer: logger.info("""***** Eval results *****""" ) for key, value in result.items(): logger.info(f''' {key} = {value}''' ) writer.write(f'''{key} = {value}\n''' ) results.update(_lowerCAmelCase ) return results if __name__ == "__main__": main()
305
from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __lowerCamelCase : Optional[int] = {"""configuration_mmbt""": ["""MMBTConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : List[Any] = ["""MMBTForClassification""", """MMBTModel""", """ModalEmbeddings"""] if TYPE_CHECKING: from .configuration_mmbt import MMBTConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings else: import sys __lowerCamelCase : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
52
0
import sys from collections import defaultdict class _UpperCAmelCase : """simple docstring""" def __init__( self : List[Any] ): '''simple docstring''' lowercase__ = [] def lowercase__ ( self : Dict, lowerCamelCase : Optional[int] ): '''simple docstring''' return self.node_position[vertex] def lowercase__ ( self : int, lowerCamelCase : Optional[Any], lowerCamelCase : List[str] ): '''simple docstring''' lowercase__ = pos def lowercase__ ( self : str, lowerCamelCase : str, lowerCamelCase : List[str], lowerCamelCase : Dict, lowerCamelCase : List[str] ): '''simple docstring''' if start > size // 2 - 1: return else: if 2 * start + 2 >= size: lowercase__ = 2 * start + 1 else: if heap[2 * start + 1] < heap[2 * start + 2]: lowercase__ = 2 * start + 1 else: lowercase__ = 2 * start + 2 if heap[smallest_child] < heap[start]: lowercase__ = heap[smallest_child], positions[smallest_child] lowercase__ = ( heap[start], positions[start], ) lowercase__ = temp, tempa lowercase__ = self.get_position(positions[smallest_child] ) self.set_position( positions[smallest_child], self.get_position(positions[start] ) ) self.set_position(positions[start], A_ ) self.top_to_bottom(A_, A_, A_, A_ ) def lowercase__ ( self : Optional[int], lowerCamelCase : Tuple, lowerCamelCase : List[Any], lowerCamelCase : Optional[Any], lowerCamelCase : Any ): '''simple docstring''' lowercase__ = position[index] while index != 0: lowercase__ = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 ) if val < heap[parent]: lowercase__ = heap[parent] lowercase__ = position[parent] self.set_position(position[parent], A_ ) else: lowercase__ = val lowercase__ = temp self.set_position(A_, A_ ) break lowercase__ = parent else: lowercase__ = val lowercase__ = temp self.set_position(A_, 0 ) def lowercase__ ( self : Union[str, Any], lowerCamelCase : Union[str, Any], lowerCamelCase : Union[str, Any] ): '''simple docstring''' lowercase__ = len(A_ ) // 2 - 1 for i in range(A_, -1, -1 ): self.top_to_bottom(A_, A_, len(A_ ), A_ ) def lowercase__ ( self : Union[str, Any], lowerCamelCase : int, lowerCamelCase : Dict ): '''simple docstring''' lowercase__ = positions[0] lowercase__ = sys.maxsize self.top_to_bottom(A_, 0, len(A_ ), A_ ) return temp def a ( lowerCamelCase_ ): '''simple docstring''' lowercase__ = Heap() lowercase__ = [0] * len(_lowerCAmelCase ) lowercase__ = [-1] * len(_lowerCAmelCase ) # Neighboring Tree Vertex of selected vertex # Minimum Distance of explored vertex with neighboring vertex of partial tree # formed in graph lowercase__ = [] # Heap of Distance of vertices from their neighboring vertex lowercase__ = [] for vertex in range(len(_lowerCAmelCase ) ): distance_tv.append(sys.maxsize ) positions.append(_lowerCAmelCase ) heap.node_position.append(_lowerCAmelCase ) lowercase__ = [] lowercase__ = 1 lowercase__ = sys.maxsize for neighbor, distance in adjacency_list[0]: lowercase__ = 0 lowercase__ = distance heap.heapify(_lowerCAmelCase , _lowerCAmelCase ) for _ in range(1 , len(_lowerCAmelCase ) ): lowercase__ = heap.delete_minimum(_lowerCAmelCase , _lowerCAmelCase ) if visited[vertex] == 0: tree_edges.append((nbr_tv[vertex], vertex) ) lowercase__ = 1 for neighbor, distance in adjacency_list[vertex]: if ( visited[neighbor] == 0 and distance < distance_tv[heap.get_position(_lowerCAmelCase )] ): lowercase__ = distance heap.bottom_to_top( _lowerCAmelCase , heap.get_position(_lowerCAmelCase ) , _lowerCAmelCase , _lowerCAmelCase ) lowercase__ = vertex return tree_edges if __name__ == "__main__": # pragma: no cover # < --------- Prims Algorithm --------- > A__ : Any = int(input('Enter number of edges: ').strip()) A__ : Optional[Any] = defaultdict(list) for _ in range(edges_number): A__ : Optional[int] = [int(x) for x in input().strip().split()] adjacency_list[edge[0]].append([edge[1], edge[2]]) adjacency_list[edge[1]].append([edge[0], edge[2]]) print(prisms_algorithm(adjacency_list))
207
import re import string from collections import Counter import sacrebleu import sacremoses from packaging import version import datasets __lowerCamelCase : List[Any] = """ @inproceedings{xu-etal-2016-optimizing, title = {Optimizing Statistical Machine Translation for Text Simplification}, authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris}, journal = {Transactions of the Association for Computational Linguistics}, volume = {4}, year={2016}, url = {https://www.aclweb.org/anthology/Q16-1029}, pages = {401--415 }, @inproceedings{post-2018-call, title = \"A Call for Clarity in Reporting {BLEU} Scores\", author = \"Post, Matt\", booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\", month = oct, year = \"2018\", address = \"Belgium, Brussels\", publisher = \"Association for Computational Linguistics\", url = \"https://www.aclweb.org/anthology/W18-6319\", pages = \"186--191\", } """ __lowerCamelCase : Optional[int] = """\ WIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU It can be used to evaluate the quality of machine-generated texts. """ __lowerCamelCase : str = """ Calculates sari score (between 0 and 100) given a list of source and predicted sentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score. Args: sources: list of source sentences where each sentence should be a string. predictions: list of predicted sentences where each sentence should be a string. references: list of lists of reference sentences where each sentence should be a string. Returns: sari: sari score sacrebleu: sacrebleu score exact: exact score Examples: >>> sources=[\"About 95 species are currently accepted .\"] >>> predictions=[\"About 95 you now get in .\"] >>> references=[[\"About 95 species are currently known .\"]] >>> wiki_split = datasets.load_metric(\"wiki_split\") >>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references) >>> print(results) {'sari': 21.805555555555557, 'sacrebleu': 14.535768424205482, 'exact': 0.0} """ def A_ ( _lowerCAmelCase ) -> str: def remove_articles(_lowerCAmelCase ): UpperCamelCase : Tuple = re.compile(r"\b(a|an|the)\b" , re.UNICODE ) return re.sub(_lowerCAmelCase , " " , _lowerCAmelCase ) def white_space_fix(_lowerCAmelCase ): return " ".join(text.split() ) def remove_punc(_lowerCAmelCase ): UpperCamelCase : int = set(string.punctuation ) return "".join(ch for ch in text if ch not in exclude ) def lower(_lowerCAmelCase ): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(_lowerCAmelCase ) ) ) ) def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> Any: return int(normalize_answer(_lowerCAmelCase ) == normalize_answer(_lowerCAmelCase ) ) def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> str: UpperCamelCase : Tuple = [any(compute_exact(_lowerCAmelCase , _lowerCAmelCase ) for ref in refs ) for pred, refs in zip(_lowerCAmelCase , _lowerCAmelCase )] return (sum(_lowerCAmelCase ) / len(_lowerCAmelCase )) * 100 def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Union[str, Any]: UpperCamelCase : Union[str, Any] = [rgram for rgrams in rgramslist for rgram in rgrams] UpperCamelCase : Union[str, Any] = Counter(_lowerCAmelCase ) UpperCamelCase : Optional[int] = Counter(_lowerCAmelCase ) UpperCamelCase : List[Any] = Counter() for sgram, scount in sgramcounter.items(): UpperCamelCase : Tuple = scount * numref UpperCamelCase : Union[str, Any] = Counter(_lowerCAmelCase ) UpperCamelCase : Tuple = Counter() for cgram, ccount in cgramcounter.items(): UpperCamelCase : Dict = ccount * numref # KEEP UpperCamelCase : List[Any] = sgramcounter_rep & cgramcounter_rep UpperCamelCase : Union[str, Any] = keepgramcounter_rep & rgramcounter UpperCamelCase : Dict = sgramcounter_rep & rgramcounter UpperCamelCase : Optional[int] = 0 UpperCamelCase : Tuple = 0 for keepgram in keepgramcountergood_rep: keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram] # Fix an alleged bug [2] in the keep score computation. # keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram] keeptmpscorea += keepgramcountergood_rep[keepgram] # Define 0/0=1 instead of 0 to give higher scores for predictions that match # a target exactly. UpperCamelCase : Any = 1 UpperCamelCase : Any = 1 if len(_lowerCAmelCase ) > 0: UpperCamelCase : Dict = keeptmpscorea / len(_lowerCAmelCase ) if len(_lowerCAmelCase ) > 0: # Fix an alleged bug [2] in the keep score computation. # keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep) UpperCamelCase : Union[str, Any] = keeptmpscorea / sum(keepgramcounterall_rep.values() ) UpperCamelCase : Any = 0 if keepscore_precision > 0 or keepscore_recall > 0: UpperCamelCase : List[str] = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall) # DELETION UpperCamelCase : Any = sgramcounter_rep - cgramcounter_rep UpperCamelCase : str = delgramcounter_rep - rgramcounter UpperCamelCase : Any = sgramcounter_rep - rgramcounter UpperCamelCase : Optional[int] = 0 UpperCamelCase : Union[str, Any] = 0 for delgram in delgramcountergood_rep: deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram] deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram] # Define 0/0=1 instead of 0 to give higher scores for predictions that match # a target exactly. UpperCamelCase : Dict = 1 if len(_lowerCAmelCase ) > 0: UpperCamelCase : str = deltmpscorea / len(_lowerCAmelCase ) # ADDITION UpperCamelCase : List[str] = set(_lowerCAmelCase ) - set(_lowerCAmelCase ) UpperCamelCase : List[str] = set(_lowerCAmelCase ) & set(_lowerCAmelCase ) UpperCamelCase : Dict = set(_lowerCAmelCase ) - set(_lowerCAmelCase ) UpperCamelCase : Optional[Any] = 0 for addgram in addgramcountergood: addtmpscore += 1 # Define 0/0=1 instead of 0 to give higher scores for predictions that match # a target exactly. UpperCamelCase : Tuple = 1 UpperCamelCase : Tuple = 1 if len(_lowerCAmelCase ) > 0: UpperCamelCase : Dict = addtmpscore / len(_lowerCAmelCase ) if len(_lowerCAmelCase ) > 0: UpperCamelCase : Tuple = addtmpscore / len(_lowerCAmelCase ) UpperCamelCase : List[str] = 0 if addscore_precision > 0 or addscore_recall > 0: UpperCamelCase : List[str] = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall) return (keepscore, delscore_precision, addscore) def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> List[str]: UpperCamelCase : int = len(_lowerCAmelCase ) UpperCamelCase : Optional[Any] = ssent.split(" " ) UpperCamelCase : Dict = csent.split(" " ) UpperCamelCase : str = [] UpperCamelCase : Any = [] UpperCamelCase : Any = [] UpperCamelCase : Union[str, Any] = [] UpperCamelCase : str = [] UpperCamelCase : str = [] UpperCamelCase : Dict = [] UpperCamelCase : int = [] UpperCamelCase : Optional[Any] = [] UpperCamelCase : Tuple = [] for rsent in rsents: UpperCamelCase : List[Any] = rsent.split(" " ) UpperCamelCase : List[str] = [] UpperCamelCase : int = [] UpperCamelCase : Tuple = [] ragramslist.append(_lowerCAmelCase ) for i in range(0 , len(_lowerCAmelCase ) - 1 ): if i < len(_lowerCAmelCase ) - 1: UpperCamelCase : Dict = ragrams[i] + " " + ragrams[i + 1] ragrams.append(_lowerCAmelCase ) if i < len(_lowerCAmelCase ) - 2: UpperCamelCase : Dict = ragrams[i] + " " + ragrams[i + 1] + " " + ragrams[i + 2] ragrams.append(_lowerCAmelCase ) if i < len(_lowerCAmelCase ) - 3: UpperCamelCase : List[Any] = ragrams[i] + " " + ragrams[i + 1] + " " + ragrams[i + 2] + " " + ragrams[i + 3] ragrams.append(_lowerCAmelCase ) ragramslist.append(_lowerCAmelCase ) ragramslist.append(_lowerCAmelCase ) ragramslist.append(_lowerCAmelCase ) for i in range(0 , len(_lowerCAmelCase ) - 1 ): if i < len(_lowerCAmelCase ) - 1: UpperCamelCase : Union[str, Any] = sagrams[i] + " " + sagrams[i + 1] sagrams.append(_lowerCAmelCase ) if i < len(_lowerCAmelCase ) - 2: UpperCamelCase : List[str] = sagrams[i] + " " + sagrams[i + 1] + " " + sagrams[i + 2] sagrams.append(_lowerCAmelCase ) if i < len(_lowerCAmelCase ) - 3: UpperCamelCase : Optional[int] = sagrams[i] + " " + sagrams[i + 1] + " " + sagrams[i + 2] + " " + sagrams[i + 3] sagrams.append(_lowerCAmelCase ) for i in range(0 , len(_lowerCAmelCase ) - 1 ): if i < len(_lowerCAmelCase ) - 1: UpperCamelCase : Union[str, Any] = cagrams[i] + " " + cagrams[i + 1] cagrams.append(_lowerCAmelCase ) if i < len(_lowerCAmelCase ) - 2: UpperCamelCase : Optional[Any] = cagrams[i] + " " + cagrams[i + 1] + " " + cagrams[i + 2] cagrams.append(_lowerCAmelCase ) if i < len(_lowerCAmelCase ) - 3: UpperCamelCase : Union[str, Any] = cagrams[i] + " " + cagrams[i + 1] + " " + cagrams[i + 2] + " " + cagrams[i + 3] cagrams.append(_lowerCAmelCase ) ((UpperCamelCase) , (UpperCamelCase) , (UpperCamelCase)) : Optional[int] = SARIngram(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) ((UpperCamelCase) , (UpperCamelCase) , (UpperCamelCase)) : Optional[Any] = SARIngram(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) ((UpperCamelCase) , (UpperCamelCase) , (UpperCamelCase)) : str = SARIngram(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) ((UpperCamelCase) , (UpperCamelCase) , (UpperCamelCase)) : Optional[int] = SARIngram(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) UpperCamelCase : Tuple = sum([keepascore, keepascore, keepascore, keepascore] ) / 4 UpperCamelCase : str = sum([delascore, delascore, delascore, delascore] ) / 4 UpperCamelCase : Union[str, Any] = sum([addascore, addascore, addascore, addascore] ) / 4 UpperCamelCase : Union[str, Any] = (avgkeepscore + avgdelscore + avgaddscore) / 3 return finalscore def A_ ( _lowerCAmelCase , _lowerCAmelCase = True , _lowerCAmelCase = "13a" , _lowerCAmelCase = True ) -> Optional[Any]: # Normalization is requried for the ASSET dataset (one of the primary # datasets in sentence simplification) to allow using space # to split the sentence. Even though Wiki-Auto and TURK datasets, # do not require normalization, we do it for consistency. # Code adapted from the EASSE library [1] written by the authors of the ASSET dataset. # [1] https://github.com/feralvam/easse/blob/580bba7e1378fc8289c663f864e0487188fe8067/easse/utils/preprocessing.py#L7 if lowercase: UpperCamelCase : Dict = sentence.lower() if tokenizer in ["13a", "intl"]: if version.parse(sacrebleu.__version__ ).major >= 2: UpperCamelCase : str = sacrebleu.metrics.bleu._get_tokenizer(_lowerCAmelCase )()(_lowerCAmelCase ) else: UpperCamelCase : Dict = sacrebleu.TOKENIZERS[tokenizer]()(_lowerCAmelCase ) elif tokenizer == "moses": UpperCamelCase : Union[str, Any] = sacremoses.MosesTokenizer().tokenize(_lowerCAmelCase , return_str=_lowerCAmelCase , escape=_lowerCAmelCase ) elif tokenizer == "penn": UpperCamelCase : str = sacremoses.MosesTokenizer().penn_tokenize(_lowerCAmelCase , return_str=_lowerCAmelCase ) else: UpperCamelCase : Union[str, Any] = sentence if not return_str: UpperCamelCase : Tuple = normalized_sent.split() return normalized_sent def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> List[str]: if not (len(_lowerCAmelCase ) == len(_lowerCAmelCase ) == len(_lowerCAmelCase )): raise ValueError("Sources length must match predictions and references lengths." ) UpperCamelCase : Optional[Any] = 0 for src, pred, refs in zip(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): sari_score += SARIsent(normalize(_lowerCAmelCase ) , normalize(_lowerCAmelCase ) , [normalize(_lowerCAmelCase ) for sent in refs] ) UpperCamelCase : Optional[int] = sari_score / len(_lowerCAmelCase ) return 100 * sari_score def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase="exp" , _lowerCAmelCase=None , _lowerCAmelCase=False , _lowerCAmelCase=False , _lowerCAmelCase=False , ) -> List[str]: UpperCamelCase : Optional[Any] = len(references[0] ) if any(len(_lowerCAmelCase ) != references_per_prediction for refs in references ): raise ValueError("Sacrebleu requires the same number of references for each prediction" ) UpperCamelCase : Optional[int] = [[refs[i] for refs in references] for i in range(_lowerCAmelCase )] UpperCamelCase : Tuple = sacrebleu.corpus_bleu( _lowerCAmelCase , _lowerCAmelCase , smooth_method=_lowerCAmelCase , smooth_value=_lowerCAmelCase , force=_lowerCAmelCase , lowercase=_lowerCAmelCase , use_effective_order=_lowerCAmelCase , ) return output.score @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class A__ ( datasets.Metric ): def __UpperCamelCase( self ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("string" , id="sequence" ), "references": datasets.Sequence(datasets.Value("string" , id="sequence" ) , id="references" ), } ) , codebase_urls=[ "https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py", "https://github.com/cocoxu/simplification/blob/master/SARI.py", "https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py", "https://github.com/mjpost/sacreBLEU", ] , reference_urls=[ "https://www.aclweb.org/anthology/Q16-1029.pdf", "https://github.com/mjpost/sacreBLEU", "https://en.wikipedia.org/wiki/BLEU", "https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213", ] , ) def __UpperCamelCase( self , A_ , A_ , A_ ): '''simple docstring''' UpperCamelCase : Optional[Any] = {} result.update({"sari": compute_sari(sources=A_ , predictions=A_ , references=A_ )} ) result.update({"sacrebleu": compute_sacrebleu(predictions=A_ , references=A_ )} ) result.update({"exact": compute_em(predictions=A_ , references=A_ )} ) return result
52
0
"""simple docstring""" # Function to print upper half of diamond (pyramid) def __UpperCAmelCase ( lowercase ): """simple docstring""" for i in range(0 ,_lowerCAmelCase ): for _ in range(0 ,n - i - 1 ): # printing spaces print(""" """ ,end="""""" ) for _ in range(0 ,i + 1 ): # printing stars print("""* """ ,end="""""" ) print() def __UpperCAmelCase ( lowercase ): """simple docstring""" for i in range(_lowerCAmelCase ,0 ,-1 ): for _ in range(_lowerCAmelCase ,0 ,-1 ): # printing stars print("""* """ ,end="""""" ) print() for _ in range(n - i + 1 ,0 ,-1 ): # printing spaces print(""" """ ,end="""""" ) def __UpperCAmelCase ( lowercase ): """simple docstring""" if n <= 0: print(""" ... .... nothing printing :(""" ) return floyd(_lowerCAmelCase ) # upper half reverse_floyd(_lowerCAmelCase ) # lower half if __name__ == "__main__": print(r"""| /\ | |- | |- |--| |\ /| |-""") print(r"""|/ \| |- |_ |_ |__| | \/ | |_""") UpperCAmelCase__ = 1 while K: UpperCAmelCase__ = int(input("""enter the number and , and see the magic : """)) print() pretty_print(user_number) UpperCAmelCase__ = int(input("""press 0 to exit... and 1 to continue...""")) print("""Good Bye...""")
289
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __lowerCamelCase : List[Any] = logging.get_logger(__name__) __lowerCamelCase : str = { """roberta-base""": """https://huggingface.co/roberta-base/resolve/main/config.json""", """roberta-large""": """https://huggingface.co/roberta-large/resolve/main/config.json""", """roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/config.json""", """distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/config.json""", """roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json""", """roberta-large-openai-detector""": """https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json""", } class A__ ( __snake_case ): _UpperCAmelCase :Union[str, Any] = 'roberta' def __init__( self , A_=5_0265 , A_=768 , A_=12 , A_=12 , A_=3072 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=2 , A_=0.02 , A_=1e-12 , A_=1 , A_=0 , A_=2 , A_="absolute" , A_=True , A_=None , **A_ , ): '''simple docstring''' super().__init__(pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , **A_ ) UpperCamelCase : Optional[int] = vocab_size UpperCamelCase : Dict = hidden_size UpperCamelCase : str = num_hidden_layers UpperCamelCase : Any = num_attention_heads UpperCamelCase : List[str] = hidden_act UpperCamelCase : Optional[Any] = intermediate_size UpperCamelCase : Tuple = hidden_dropout_prob UpperCamelCase : Tuple = attention_probs_dropout_prob UpperCamelCase : Tuple = max_position_embeddings UpperCamelCase : Any = type_vocab_size UpperCamelCase : int = initializer_range UpperCamelCase : str = layer_norm_eps UpperCamelCase : Dict = position_embedding_type UpperCamelCase : Any = use_cache UpperCamelCase : Union[str, Any] = classifier_dropout class A__ ( __snake_case ): @property def __UpperCamelCase( self ): '''simple docstring''' if self.task == "multiple-choice": UpperCamelCase : Optional[int] = {0: "batch", 1: "choice", 2: "sequence"} else: UpperCamelCase : Optional[int] = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ] )
52
0
'''simple docstring''' from cva import destroyAllWindows, imread, imshow, waitKey def lowercase__( __UpperCamelCase: str ): """simple docstring""" SCREAMING_SNAKE_CASE : List[str] = img.shape[0], img.shape[1] # converting each pixel's color to its negative for i in range(_lowerCAmelCase ): for j in range(_lowerCAmelCase ): SCREAMING_SNAKE_CASE : Tuple = [2_55, 2_55, 2_55] - img[i][j] return img if __name__ == "__main__": # read original image UpperCamelCase_ = imread("image_data/lena.jpg", 1) # convert to its negative UpperCamelCase_ = convert_to_negative(img) # show result image imshow("negative of original image", img) waitKey(0) destroyAllWindows()
251
from typing import List, Optional, Tuple, Union import torch from ...utils import logging, randn_tensor from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline __lowerCamelCase : str = logging.get_logger(__name__) # pylint: disable=invalid-name class A__ ( __snake_case ): def __init__( self , A_ , A_ ): '''simple docstring''' super().__init__() self.register_modules(unet=A_ , scheduler=A_ ) @torch.no_grad() def __call__( self , A_ = 1 , A_ = 100 , A_ = None , A_ = None , A_ = True , ): '''simple docstring''' if audio_length_in_s is None: UpperCamelCase : str = self.unet.config.sample_size / self.unet.config.sample_rate UpperCamelCase : Optional[Any] = audio_length_in_s * self.unet.config.sample_rate UpperCamelCase : Any = 2 ** len(self.unet.up_blocks ) if sample_size < 3 * down_scale_factor: raise ValueError( F"""{audio_length_in_s} is too small. Make sure it's bigger or equal to""" F""" {3 * down_scale_factor / self.unet.config.sample_rate}.""" ) UpperCamelCase : Union[str, Any] = int(A_ ) if sample_size % down_scale_factor != 0: UpperCamelCase : List[str] = ( (audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1 ) * down_scale_factor logger.info( F"""{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled""" F""" by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising""" " process." ) UpperCamelCase : Any = int(A_ ) UpperCamelCase : Union[str, Any] = next(iter(self.unet.parameters() ) ).dtype UpperCamelCase : Optional[int] = (batch_size, self.unet.config.in_channels, sample_size) if isinstance(A_ , A_ ) and len(A_ ) != batch_size: raise ValueError( F"""You have passed a list of generators of length {len(A_ )}, but requested an effective batch""" F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" ) UpperCamelCase : Optional[Any] = randn_tensor(A_ , generator=A_ , device=self.device , dtype=A_ ) # set step values self.scheduler.set_timesteps(A_ , device=audio.device ) UpperCamelCase : Optional[int] = self.scheduler.timesteps.to(A_ ) for t in self.progress_bar(self.scheduler.timesteps ): # 1. predict noise model_output UpperCamelCase : Dict = self.unet(A_ , A_ ).sample # 2. compute previous image: x_t -> t_t-1 UpperCamelCase : int = self.scheduler.step(A_ , A_ , A_ ).prev_sample UpperCamelCase : Optional[Any] = audio.clamp(-1 , 1 ).float().cpu().numpy() UpperCamelCase : Dict = audio[:, :, :original_sample_size] if not return_dict: return (audio,) return AudioPipelineOutput(audios=A_ )
52
0
'''simple docstring''' import numpy # List of input, output pairs a : Any = ( ((5, 2, 3), 1_5), ((6, 5, 9), 2_5), ((1_1, 1_2, 1_3), 4_1), ((1, 1, 1), 8), ((1_1, 1_2, 1_3), 4_1), ) a : Optional[Any] = (((5_1_5, 2_2, 1_3), 5_5_5), ((6_1, 3_5, 4_9), 1_5_0)) a : Tuple = [2, 4, 1, 5] a : List[str] = len(train_data) a : Union[str, Any] = 0.0_0_9 def __lowerCamelCase ( _lowercase , _lowercase="train" ) -> Tuple: return calculate_hypothesis_value(_lowerCAmelCase , _lowerCAmelCase ) - output( _lowerCAmelCase , _lowerCAmelCase ) def __lowerCamelCase ( _lowercase ) -> str: UpperCAmelCase : Dict = 0 for i in range(len(_lowerCAmelCase ) - 1 ): hyp_val += data_input_tuple[i] * parameter_vector[i + 1] hyp_val += parameter_vector[0] return hyp_val def __lowerCamelCase ( _lowercase , _lowercase ) -> Any: if data_set == "train": return train_data[example_no][1] elif data_set == "test": return test_data[example_no][1] return None def __lowerCamelCase ( _lowercase , _lowercase ) -> Tuple: if data_set == "train": return _hypothesis_value(train_data[example_no][0] ) elif data_set == "test": return _hypothesis_value(test_data[example_no][0] ) return None def __lowerCamelCase ( _lowercase , _lowercase=m ) -> Any: UpperCAmelCase : int = 0 for i in range(_lowerCAmelCase ): if index == -1: summation_value += _error(_lowerCAmelCase ) else: summation_value += _error(_lowerCAmelCase ) * train_data[i][0][index] return summation_value def __lowerCamelCase ( _lowercase ) -> Any: UpperCAmelCase : str = summation_of_cost_derivative(_lowerCAmelCase , _lowerCAmelCase ) / m return cost_derivative_value def __lowerCamelCase ( ) -> Union[str, Any]: global parameter_vector # Tune these values to set a tolerance value for predicted output UpperCAmelCase : int = 0.00_0002 UpperCAmelCase : Optional[int] = 0 UpperCAmelCase : List[str] = 0 while True: j += 1 UpperCAmelCase : List[Any] = [0, 0, 0, 0] for i in range(0 , len(_lowerCAmelCase ) ): UpperCAmelCase : List[Any] = get_cost_derivative(i - 1 ) UpperCAmelCase : Union[str, Any] = ( parameter_vector[i] - LEARNING_RATE * cost_derivative ) if numpy.allclose( _lowerCAmelCase , _lowerCAmelCase , atol=_lowerCAmelCase , rtol=_lowerCAmelCase , ): break UpperCAmelCase : Union[str, Any] = temp_parameter_vector print(("""Number of iterations:""", j) ) def __lowerCamelCase ( ) -> Tuple: for i in range(len(_lowerCAmelCase ) ): print(("""Actual output value:""", output(_lowerCAmelCase , """test""" )) ) print(("""Hypothesis output:""", calculate_hypothesis_value(_lowerCAmelCase , """test""" )) ) if __name__ == "__main__": run_gradient_descent() print("""\nTesting gradient descent for a linear hypothesis function.\n""") test_gradient_descent()
265
import functools def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> int: UpperCamelCase : Optional[int] = len(_lowerCAmelCase ) UpperCamelCase : List[str] = len(_lowerCAmelCase ) @functools.cache def min_distance(_lowerCAmelCase , _lowerCAmelCase ) -> int: # if first word index is overflow - delete all from the second word if indexa >= len_worda: return len_worda - indexa # if second word index is overflow - delete all from the first word if indexa >= len_worda: return len_worda - indexa UpperCamelCase : Union[str, Any] = int(worda[indexa] != worda[indexa] ) # current letters not identical return min( 1 + min_distance(indexa + 1 , _lowerCAmelCase ) , 1 + min_distance(_lowerCAmelCase , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , ) return min_distance(0 , 0 ) if __name__ == "__main__": import doctest doctest.testmod()
52
0
"""simple docstring""" import argparse import json from dataclasses import dataclass, field from functools import partial from pathlib import Path from typing import Callable, Dict, List, Tuple import timm import torch import torch.nn as nn from classy_vision.models.regnet import RegNet, RegNetParams, RegNetYaagf, RegNetYaagf, RegNetYaaagf from huggingface_hub import cached_download, hf_hub_url from torch import Tensor from vissl.models.model_helpers import get_trunk_forward_outputs from transformers import AutoImageProcessor, RegNetConfig, RegNetForImageClassification, RegNetModel from transformers.utils import logging logging.set_verbosity_info() UpperCAmelCase = logging.get_logger() @dataclass class UpperCAmelCase_ : snake_case__ = 42 snake_case__ = field(default_factory=__snake_case) snake_case__ = field(default_factory=__snake_case) def _UpperCamelCase ( self : Optional[Any] , __UpperCamelCase : Tuple , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[Any] ) -> str: _UpperCamelCase = len(list(m.modules() ) ) == 1 or isinstance(A_ , nn.Convad ) or isinstance(A_ , nn.BatchNormad ) if has_not_submodules: self.traced.append(A_ ) def __call__( self : int , __UpperCamelCase : Optional[int] ) -> List[str]: for m in self.module.modules(): self.handles.append(m.register_forward_hook(self._forward_hook ) ) self.module(A_ ) [x.remove() for x in self.handles] return self @property def _UpperCamelCase ( self : List[str] ) -> Union[str, Any]: return list(filter(lambda __UpperCamelCase : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) ) @dataclass class UpperCAmelCase_ : snake_case__ = 42 snake_case__ = 42 snake_case__ = 1 snake_case__ = field(default_factory=__snake_case) snake_case__ = field(default_factory=__snake_case) snake_case__ = True def __call__( self : Optional[Any] , __UpperCamelCase : Dict ) -> str: _UpperCamelCase = Tracker(self.dest )(A_ ).parametrized _UpperCamelCase = Tracker(self.src )(A_ ).parametrized _UpperCamelCase = list(filter(lambda __UpperCamelCase : type(A_ ) not in self.src_skip , A_ ) ) _UpperCamelCase = list(filter(lambda __UpperCamelCase : type(A_ ) not in self.dest_skip , A_ ) ) if len(A_ ) != len(A_ ) and self.raise_if_mismatch: raise Exception( F'''Numbers of operations are different. Source module has {len(A_ )} operations while''' F''' destination module has {len(A_ )}.''' ) for dest_m, src_m in zip(A_ , A_ ): dest_m.load_state_dict(src_m.state_dict() ) if self.verbose == 1: print(F'''Transfered from={src_m} to={dest_m}''' ) class UpperCAmelCase_ ( nn.Module): def __init__( self : Union[str, Any] , __UpperCamelCase : Any ) -> str: super().__init__() _UpperCamelCase = [] # - get the stem feature_blocks.append(('''conv1''', model.stem) ) # - get all the feature blocks for k, v in model.trunk_output.named_children(): assert k.startswith('''block''' ), F'''Unexpected layer name {k}''' _UpperCamelCase = len(A_ ) + 1 feature_blocks.append((F'''res{block_index}''', v) ) _UpperCamelCase = nn.ModuleDict(A_ ) def _UpperCamelCase ( self : List[str] , __UpperCamelCase : Union[str, Any] ) -> List[str]: return get_trunk_forward_outputs( A_ , out_feat_keys=A_ , feature_blocks=self._feature_blocks , ) class UpperCAmelCase_ ( __snake_case): def _UpperCamelCase ( self : Optional[int] , __UpperCamelCase : str ) -> Dict: _UpperCamelCase = x.split('''-''' ) return x_split[0] + x_split[1] + "_" + "".join(x_split[2:] ) def __getitem__( self : List[Any] , __UpperCamelCase : Union[str, Any] ) -> Optional[Any]: if x not in self: _UpperCamelCase = self.convert_name_to_timm(A_ ) _UpperCamelCase = partial(lambda: (timm.create_model(A_ , pretrained=A_ ).eval(), None) ) else: _UpperCamelCase = super().__getitem__(A_ ) return val class UpperCAmelCase_ ( __snake_case): def __getitem__( self : List[str] , __UpperCamelCase : Dict ) -> Dict: if "seer" in x and "in1k" not in x: _UpperCamelCase = RegNetModel else: _UpperCamelCase = RegNetForImageClassification return val def lowercase ( a__ : Tuple , a__ : Any , a__ : List[Any] ) -> List[str]: for from_key, to_key in keys: _UpperCamelCase = from_state_dict[from_key].clone() print(F'''Copied key={from_key} to={to_key}''' ) return to_state_dict def lowercase ( a__ : Dict , a__ : Optional[Any] , a__ : List[str] , a__ : int , a__ : str , a__ : str = True , ) -> int: print(F'''Converting {name}...''' ) with torch.no_grad(): _UpperCamelCase = from_model_func() _UpperCamelCase = our_model_func(_lowerCAmelCase ).eval() _UpperCamelCase = ModuleTransfer(src=_lowerCAmelCase , dest=_lowerCAmelCase , raise_if_mismatch=_lowerCAmelCase ) _UpperCamelCase = torch.randn((1, 3, 224, 224) ) module_transfer(_lowerCAmelCase ) if from_state_dict is not None: _UpperCamelCase = [] # for seer - in1k finetuned we have to manually copy the head if "seer" in name and "in1k" in name: _UpperCamelCase = [("0.clf.0.weight", "classifier.1.weight"), ("0.clf.0.bias", "classifier.1.bias")] _UpperCamelCase = manually_copy_vissl_head(_lowerCAmelCase , our_model.state_dict() , _lowerCAmelCase ) our_model.load_state_dict(_lowerCAmelCase ) _UpperCamelCase = our_model(_lowerCAmelCase , output_hidden_states=_lowerCAmelCase ) _UpperCamelCase = ( our_outputs.logits if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else our_outputs.last_hidden_state ) _UpperCamelCase = from_model(_lowerCAmelCase ) _UpperCamelCase = from_output[-1] if type(_lowerCAmelCase ) is list else from_output # now since I don't want to use any config files, vissl seer model doesn't actually have an head, so let's just check the last hidden state if "seer" in name and "in1k" in name: _UpperCamelCase = our_outputs.hidden_states[-1] assert torch.allclose(_lowerCAmelCase , _lowerCAmelCase ), "The model logits don't match the original one." if push_to_hub: our_model.push_to_hub( repo_path_or_name=save_directory / name , commit_message='''Add model''' , use_temp_dir=_lowerCAmelCase , ) _UpperCamelCase = 224 if "seer" not in name else 384 # we can use the convnext one _UpperCamelCase = AutoImageProcessor.from_pretrained('''facebook/convnext-base-224-22k-1k''' , size=_lowerCAmelCase ) image_processor.push_to_hub( repo_path_or_name=save_directory / name , commit_message='''Add image processor''' , use_temp_dir=_lowerCAmelCase , ) print(F'''Pushed {name}''' ) def lowercase ( a__ : Union[str, Any] , a__ : Optional[Any] = None , a__ : int = True ) -> List[str]: _UpperCamelCase = "imagenet-1k-id2label.json" _UpperCamelCase = 1000 _UpperCamelCase = (1, num_labels) _UpperCamelCase = "huggingface/label-files" _UpperCamelCase = num_labels _UpperCamelCase = json.load(open(cached_download(hf_hub_url(_lowerCAmelCase , _lowerCAmelCase , repo_type='''dataset''' ) ) , '''r''' ) ) _UpperCamelCase = {int(_lowerCAmelCase ): v for k, v in idalabel.items()} _UpperCamelCase = idalabel _UpperCamelCase = {v: k for k, v in idalabel.items()} _UpperCamelCase = partial(_lowerCAmelCase , num_labels=_lowerCAmelCase , idalabel=_lowerCAmelCase , labelaid=_lowerCAmelCase ) _UpperCamelCase = { "regnet-x-002": ImageNetPreTrainedConfig( depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 152, 368] , groups_width=8 , layer_type='''x''' ), "regnet-x-004": ImageNetPreTrainedConfig( depths=[1, 2, 7, 12] , hidden_sizes=[32, 64, 160, 384] , groups_width=16 , layer_type='''x''' ), "regnet-x-006": ImageNetPreTrainedConfig( depths=[1, 3, 5, 7] , hidden_sizes=[48, 96, 240, 528] , groups_width=24 , layer_type='''x''' ), "regnet-x-008": ImageNetPreTrainedConfig( depths=[1, 3, 7, 5] , hidden_sizes=[64, 128, 288, 672] , groups_width=16 , layer_type='''x''' ), "regnet-x-016": ImageNetPreTrainedConfig( depths=[2, 4, 10, 2] , hidden_sizes=[72, 168, 408, 912] , groups_width=24 , layer_type='''x''' ), "regnet-x-032": ImageNetPreTrainedConfig( depths=[2, 6, 15, 2] , hidden_sizes=[96, 192, 432, 1008] , groups_width=48 , layer_type='''x''' ), "regnet-x-040": ImageNetPreTrainedConfig( depths=[2, 5, 14, 2] , hidden_sizes=[80, 240, 560, 1360] , groups_width=40 , layer_type='''x''' ), "regnet-x-064": ImageNetPreTrainedConfig( depths=[2, 4, 10, 1] , hidden_sizes=[168, 392, 784, 1624] , groups_width=56 , layer_type='''x''' ), "regnet-x-080": ImageNetPreTrainedConfig( depths=[2, 5, 15, 1] , hidden_sizes=[80, 240, 720, 1920] , groups_width=120 , layer_type='''x''' ), "regnet-x-120": ImageNetPreTrainedConfig( depths=[2, 5, 11, 1] , hidden_sizes=[224, 448, 896, 2240] , groups_width=112 , layer_type='''x''' ), "regnet-x-160": ImageNetPreTrainedConfig( depths=[2, 6, 13, 1] , hidden_sizes=[256, 512, 896, 2048] , groups_width=128 , layer_type='''x''' ), "regnet-x-320": ImageNetPreTrainedConfig( depths=[2, 7, 13, 1] , hidden_sizes=[336, 672, 1344, 2520] , groups_width=168 , layer_type='''x''' ), # y variant "regnet-y-002": ImageNetPreTrainedConfig(depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 152, 368] , groups_width=8 ), "regnet-y-004": ImageNetPreTrainedConfig( depths=[1, 3, 6, 6] , hidden_sizes=[48, 104, 208, 440] , groups_width=8 ), "regnet-y-006": ImageNetPreTrainedConfig( depths=[1, 3, 7, 4] , hidden_sizes=[48, 112, 256, 608] , groups_width=16 ), "regnet-y-008": ImageNetPreTrainedConfig( depths=[1, 3, 8, 2] , hidden_sizes=[64, 128, 320, 768] , groups_width=16 ), "regnet-y-016": ImageNetPreTrainedConfig( depths=[2, 6, 17, 2] , hidden_sizes=[48, 120, 336, 888] , groups_width=24 ), "regnet-y-032": ImageNetPreTrainedConfig( depths=[2, 5, 13, 1] , hidden_sizes=[72, 216, 576, 1512] , groups_width=24 ), "regnet-y-040": ImageNetPreTrainedConfig( depths=[2, 6, 12, 2] , hidden_sizes=[128, 192, 512, 1088] , groups_width=64 ), "regnet-y-064": ImageNetPreTrainedConfig( depths=[2, 7, 14, 2] , hidden_sizes=[144, 288, 576, 1296] , groups_width=72 ), "regnet-y-080": ImageNetPreTrainedConfig( depths=[2, 4, 10, 1] , hidden_sizes=[168, 448, 896, 2016] , groups_width=56 ), "regnet-y-120": ImageNetPreTrainedConfig( depths=[2, 5, 11, 1] , hidden_sizes=[224, 448, 896, 2240] , groups_width=112 ), "regnet-y-160": ImageNetPreTrainedConfig( depths=[2, 4, 11, 1] , hidden_sizes=[224, 448, 1232, 3024] , groups_width=112 ), "regnet-y-320": ImageNetPreTrainedConfig( depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1392, 3712] , groups_width=232 ), # models created by SEER -> https://arxiv.org/abs/2202.08360 "regnet-y-320-seer": RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1392, 3712] , groups_width=232 ), "regnet-y-640-seer": RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[328, 984, 1968, 4920] , groups_width=328 ), "regnet-y-1280-seer": RegNetConfig( depths=[2, 7, 17, 1] , hidden_sizes=[528, 1056, 2904, 7392] , groups_width=264 ), "regnet-y-2560-seer": RegNetConfig( depths=[3, 7, 16, 1] , hidden_sizes=[640, 1696, 2544, 5088] , groups_width=640 ), "regnet-y-10b-seer": ImageNetPreTrainedConfig( depths=[2, 7, 17, 1] , hidden_sizes=[2020, 4040, 11110, 28280] , groups_width=1010 ), # finetuned on imagenet "regnet-y-320-seer-in1k": ImageNetPreTrainedConfig( depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1392, 3712] , groups_width=232 ), "regnet-y-640-seer-in1k": ImageNetPreTrainedConfig( depths=[2, 5, 12, 1] , hidden_sizes=[328, 984, 1968, 4920] , groups_width=328 ), "regnet-y-1280-seer-in1k": ImageNetPreTrainedConfig( depths=[2, 7, 17, 1] , hidden_sizes=[528, 1056, 2904, 7392] , groups_width=264 ), "regnet-y-2560-seer-in1k": ImageNetPreTrainedConfig( depths=[3, 7, 16, 1] , hidden_sizes=[640, 1696, 2544, 5088] , groups_width=640 ), "regnet-y-10b-seer-in1k": ImageNetPreTrainedConfig( depths=[2, 7, 17, 1] , hidden_sizes=[2020, 4040, 11110, 28280] , groups_width=1010 ), } _UpperCamelCase = NameToOurModelFuncMap() _UpperCamelCase = NameToFromModelFuncMap() # add seer weights logic def load_using_classy_vision(a__ : List[Any] , a__ : Dict ) -> Tuple[nn.Module, Dict]: _UpperCamelCase = torch.hub.load_state_dict_from_url(_lowerCAmelCase , model_dir=str(_lowerCAmelCase ) , map_location='''cpu''' ) _UpperCamelCase = model_func() # check if we have a head, if yes add it _UpperCamelCase = files["classy_state_dict"]["base_model"]["model"] _UpperCamelCase = model_state_dict["trunk"] model.load_state_dict(_lowerCAmelCase ) return model.eval(), model_state_dict["heads"] # pretrained _UpperCamelCase = partial( _lowerCAmelCase , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet32d/seer_regnet32gf_model_iteration244000.torch''' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , ) _UpperCamelCase = partial( _lowerCAmelCase , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet64/seer_regnet64gf_model_final_checkpoint_phase0.torch''' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , ) _UpperCamelCase = partial( _lowerCAmelCase , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/swav_ig1b_regnet128Gf_cnstant_bs32_node16_sinkhorn10_proto16k_syncBN64_warmup8k/model_final_checkpoint_phase0.torch''' , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , ) _UpperCamelCase = partial( _lowerCAmelCase , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet10B/model_iteration124500_conso.torch''' , lambda: FakeRegNetVisslWrapper( RegNet(RegNetParams(depth=27 , group_width=1010 , w_a=1744 , w_a=620.83 , w_m=2.52 ) ) ) , ) # IN1K finetuned _UpperCamelCase = partial( _lowerCAmelCase , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet32_finetuned_in1k_model_final_checkpoint_phase78.torch''' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , ) _UpperCamelCase = partial( _lowerCAmelCase , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet64_finetuned_in1k_model_final_checkpoint_phase78.torch''' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , ) _UpperCamelCase = partial( _lowerCAmelCase , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet128_finetuned_in1k_model_final_checkpoint_phase78.torch''' , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , ) _UpperCamelCase = partial( _lowerCAmelCase , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_10b_finetuned_in1k_model_phase28_conso.torch''' , lambda: FakeRegNetVisslWrapper( RegNet(RegNetParams(depth=27 , group_width=1010 , w_a=1744 , w_a=620.83 , w_m=2.52 ) ) ) , ) if model_name: convert_weight_and_push( _lowerCAmelCase , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , names_to_config[model_name] , _lowerCAmelCase , _lowerCAmelCase , ) else: for model_name, config in names_to_config.items(): convert_weight_and_push( _lowerCAmelCase , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ) return config, expected_shape if __name__ == "__main__": UpperCAmelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default=None, type=str, help=( """The name of the model you wish to convert, it must be one of the supported regnet* architecture,""" """ currently: regnetx-*, regnety-*. If `None`, all of them will the converted.""" ), ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=Path, required=True, help="""Path to the output PyTorch model directory.""", ) parser.add_argument( """--push_to_hub""", default=True, type=bool, required=False, help="""If True, push model and image processor to the hub.""", ) UpperCAmelCase = parser.parse_args() UpperCAmelCase = args.pytorch_dump_folder_path pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True) convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
256
import itertools import random import unittest import numpy as np from transformers import ASTFeatureExtractor from transformers.testing_utils import require_torch, require_torchaudio from transformers.utils.import_utils import is_torch_available from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin __lowerCamelCase : str = random.Random() if is_torch_available(): import torch def A_ ( _lowerCAmelCase , _lowerCAmelCase=1.0 , _lowerCAmelCase=None , _lowerCAmelCase=None ) -> Optional[Any]: if rng is None: UpperCamelCase : Optional[int] = global_rng UpperCamelCase : Optional[Any] = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values class A__ ( unittest.TestCase ): def __init__( self , A_ , A_=7 , A_=400 , A_=2000 , A_=1 , A_=0.0 , A_=1_6000 , A_=True , A_=True , ): '''simple docstring''' UpperCamelCase : Tuple = parent UpperCamelCase : List[Any] = batch_size UpperCamelCase : List[Any] = min_seq_length UpperCamelCase : List[str] = max_seq_length UpperCamelCase : int = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) UpperCamelCase : Union[str, Any] = feature_size UpperCamelCase : List[str] = padding_value UpperCamelCase : Optional[Any] = sampling_rate UpperCamelCase : List[str] = return_attention_mask UpperCamelCase : List[Any] = do_normalize def __UpperCamelCase( self ): '''simple docstring''' return { "feature_size": self.feature_size, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "return_attention_mask": self.return_attention_mask, "do_normalize": self.do_normalize, } def __UpperCamelCase( self , A_=False , A_=False ): '''simple docstring''' def _flatten(A_ ): return list(itertools.chain(*A_ ) ) if equal_length: UpperCamelCase : List[str] = floats_list((self.batch_size, self.max_seq_length) ) else: # make sure that inputs increase in size UpperCamelCase : Dict = [ _flatten(floats_list((x, self.feature_size) ) ) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff ) ] if numpify: UpperCamelCase : Union[str, Any] = [np.asarray(A_ ) for x in speech_inputs] return speech_inputs @require_torch @require_torchaudio class A__ ( __snake_case , unittest.TestCase ): _UpperCAmelCase :Optional[Any] = ASTFeatureExtractor def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Tuple = ASTFeatureExtractionTester(self ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Optional[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 UpperCamelCase : Tuple = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] UpperCamelCase : Dict = [np.asarray(A_ ) for speech_input in speech_inputs] # Test not batched input UpperCamelCase : Dict = feat_extract(speech_inputs[0] , return_tensors="np" ).input_values UpperCamelCase : Union[str, Any] = feat_extract(np_speech_inputs[0] , return_tensors="np" ).input_values self.assertTrue(np.allclose(A_ , A_ , atol=1e-3 ) ) # Test batched UpperCamelCase : Any = feat_extract(A_ , padding=A_ , return_tensors="np" ).input_values UpperCamelCase : Any = feat_extract(A_ , padding=A_ , return_tensors="np" ).input_values for enc_seq_a, enc_seq_a in zip(A_ , A_ ): self.assertTrue(np.allclose(A_ , A_ , atol=1e-3 ) ) # Test 2-D numpy arrays are batched. UpperCamelCase : Dict = [floats_list((1, x) )[0] for x in (800, 800, 800)] UpperCamelCase : int = np.asarray(A_ ) UpperCamelCase : Any = feat_extract(A_ , return_tensors="np" ).input_values UpperCamelCase : List[str] = feat_extract(A_ , return_tensors="np" ).input_values for enc_seq_a, enc_seq_a in zip(A_ , A_ ): self.assertTrue(np.allclose(A_ , A_ , atol=1e-3 ) ) @require_torch def __UpperCamelCase( self ): '''simple docstring''' import torch UpperCamelCase : List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) UpperCamelCase : int = np.random.rand(100 ).astype(np.floataa ) UpperCamelCase : str = np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: UpperCamelCase : List[Any] = feature_extractor.pad([{"input_values": inputs}] , return_tensors="np" ) self.assertTrue(np_processed.input_values.dtype == np.floataa ) UpperCamelCase : List[str] = feature_extractor.pad([{"input_values": inputs}] , return_tensors="pt" ) self.assertTrue(pt_processed.input_values.dtype == torch.floataa ) def __UpperCamelCase( self , A_ ): '''simple docstring''' from datasets import load_dataset UpperCamelCase : Dict = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" ) # automatic decoding with librispeech UpperCamelCase : Any = ds.sort("id" ).select(range(A_ ) )[:num_samples]["audio"] return [x["array"] for x in speech_samples] @require_torch def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Any = torch.tensor( [-0.98_94, -1.27_76, -0.90_66, -1.27_76, -0.93_49, -1.26_09, -1.03_86, -1.27_76, -1.15_61, -1.27_76, -1.20_52, -1.27_23, -1.21_90, -1.21_32, -1.27_76, -1.11_33, -1.19_53, -1.13_43, -1.15_84, -1.22_03, -1.17_70, -1.24_74, -1.23_81, -1.19_36, -0.92_70, -0.83_17, -0.80_49, -0.77_06, -0.75_65, -0.78_69] ) # fmt: on UpperCamelCase : List[Any] = self._load_datasamples(1 ) UpperCamelCase : Tuple = ASTFeatureExtractor() UpperCamelCase : str = feature_extractor(A_ , return_tensors="pt" ).input_values self.assertEquals(input_values.shape , (1, 1024, 128) ) self.assertTrue(torch.allclose(input_values[0, 0, :30] , A_ , atol=1e-4 ) )
52
0
"""simple docstring""" import os import tempfile from functools import partial from unittest import TestCase from unittest.mock import patch import numpy as np import pytest from datasets.arrow_dataset import Dataset from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex from .utils import require_elasticsearch, require_faiss _snake_case : Union[str, Any] = pytest.mark.integration @require_faiss class _UpperCAmelCase ( __snake_case ): def lowerCamelCase ( self :Union[str, Any] ): A = Dataset.from_dict({"filename": ["my_name-train" + "_" + str(A_ ) for x in np.arange(30 ).tolist()]} ) return dset def lowerCamelCase ( self :Optional[Any] ): import faiss A = self._create_dummy_dataset() A = dset.map( lambda __UpperCamelCase , __UpperCamelCase : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=A_ , keep_in_memory=A_ ) A = dset.add_faiss_index("vecs" , batch_size=1_00 , metric_type=faiss.METRIC_INNER_PRODUCT ) A = dset.get_nearest_examples("vecs" , np.ones(5 , dtype=np.floataa ) ) self.assertEqual(examples["filename"][0] , "my_name-train_29" ) dset.drop_index("vecs" ) def lowerCamelCase ( self :Tuple ): import faiss A = self._create_dummy_dataset() dset.add_faiss_index_from_external_arrays( external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" , batch_size=1_00 , metric_type=faiss.METRIC_INNER_PRODUCT , ) A = dset.get_nearest_examples("vecs" , np.ones(5 , dtype=np.floataa ) ) self.assertEqual(examples["filename"][0] , "my_name-train_29" ) def lowerCamelCase ( self :List[str] ): import faiss A = self._create_dummy_dataset() dset.add_faiss_index_from_external_arrays( external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" , metric_type=faiss.METRIC_INNER_PRODUCT , ) # Setting delete=False and unlinking manually is not pretty... but it is required on Windows to # ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue. # see https://bugs.python.org/issue14243 and # https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515 with tempfile.NamedTemporaryFile(delete=A_ ) as tmp_file: dset.save_faiss_index("vecs" , tmp_file.name ) dset.load_faiss_index("vecs2" , tmp_file.name ) os.unlink(tmp_file.name ) A = dset.get_nearest_examples("vecs2" , np.ones(5 , dtype=np.floataa ) ) self.assertEqual(examples["filename"][0] , "my_name-train_29" ) def lowerCamelCase ( self :Any ): A = self._create_dummy_dataset() dset.add_faiss_index_from_external_arrays( external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" ) dset.drop_index("vecs" ) self.assertRaises(A_ , partial(dset.get_nearest_examples , "vecs2" , np.ones(5 , dtype=np.floataa ) ) ) def lowerCamelCase ( self :str ): from elasticsearch import Elasticsearch A = self._create_dummy_dataset() with patch("elasticsearch.Elasticsearch.search" ) as mocked_search, patch( "elasticsearch.client.IndicesClient.create" ) as mocked_index_create, patch("elasticsearch.helpers.streaming_bulk" ) as mocked_bulk: A = {"acknowledged": True} mocked_bulk.return_value([(True, None)] * 30 ) A = {"hits": {"hits": [{"_score": 1, "_id": 29}]}} A = Elasticsearch() dset.add_elasticsearch_index("filename" , es_client=A_ ) A = dset.get_nearest_examples("filename" , "my_name-train_29" ) self.assertEqual(examples["filename"][0] , "my_name-train_29" ) @require_faiss class _UpperCAmelCase ( __snake_case ): def lowerCamelCase ( self :Optional[int] ): import faiss A = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT ) # add vectors index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsNotNone(index.faiss_index ) self.assertEqual(index.faiss_index.ntotal , 5 ) index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) ) self.assertEqual(index.faiss_index.ntotal , 10 ) # single query A = np.zeros(5 , dtype=np.floataa ) A = 1 A = index.search(A_ ) self.assertRaises(A_ , index.search , query.reshape(-1 , 1 ) ) self.assertGreater(scores[0] , 0 ) self.assertEqual(indices[0] , 1 ) # batched queries A = np.eye(5 , dtype=np.floataa )[::-1] A = index.search_batch(A_ ) self.assertRaises(A_ , index.search_batch , queries[0] ) A = [scores[0] for scores in total_scores] A = [indices[0] for indices in total_indices] self.assertGreater(np.min(A_ ) , 0 ) self.assertListEqual([4, 3, 2, 1, 0] , A_ ) def lowerCamelCase ( self :List[Any] ): import faiss A = FaissIndex(string_factory="Flat" ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsInstance(index.faiss_index , faiss.IndexFlat ) A = FaissIndex(string_factory="LSH" ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsInstance(index.faiss_index , faiss.IndexLSH ) with self.assertRaises(A_ ): A = FaissIndex(string_factory="Flat" , custom_index=faiss.IndexFlat(5 ) ) def lowerCamelCase ( self :List[str] ): import faiss A = faiss.IndexFlat(5 ) A = FaissIndex(custom_index=A_ ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsInstance(index.faiss_index , faiss.IndexFlat ) def lowerCamelCase ( self :List[Any] ): import faiss A = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) # Setting delete=False and unlinking manually is not pretty... but it is required on Windows to # ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue. # see https://bugs.python.org/issue14243 and # https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515 with tempfile.NamedTemporaryFile(delete=A_ ) as tmp_file: index.save(tmp_file.name ) A = FaissIndex.load(tmp_file.name ) os.unlink(tmp_file.name ) A = np.zeros(5 , dtype=np.floataa ) A = 1 A = index.search(A_ ) self.assertGreater(scores[0] , 0 ) self.assertEqual(indices[0] , 1 ) @require_faiss def A__ ( UpperCamelCase ): import faiss A = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) A = "index.faiss" A = F"mock://{index_name}" index.save(_lowerCAmelCase , storage_options=mockfs.storage_options ) A = FaissIndex.load(_lowerCAmelCase , storage_options=mockfs.storage_options ) A = np.zeros(5 , dtype=np.floataa ) A = 1 A = index.search(_lowerCAmelCase ) assert scores[0] > 0 assert indices[0] == 1 @require_elasticsearch class _UpperCAmelCase ( __snake_case ): def lowerCamelCase ( self :Union[str, Any] ): from elasticsearch import Elasticsearch with patch("elasticsearch.Elasticsearch.search" ) as mocked_search, patch( "elasticsearch.client.IndicesClient.create" ) as mocked_index_create, patch("elasticsearch.helpers.streaming_bulk" ) as mocked_bulk: A = Elasticsearch() A = {"acknowledged": True} A = ElasticSearchIndex(es_client=A_ ) mocked_bulk.return_value([(True, None)] * 3 ) index.add_documents(["foo", "bar", "foobar"] ) # single query A = "foo" A = {"hits": {"hits": [{"_score": 1, "_id": 0}]}} A = index.search(A_ ) self.assertEqual(scores[0] , 1 ) self.assertEqual(indices[0] , 0 ) # single query with timeout A = "foo" A = {"hits": {"hits": [{"_score": 1, "_id": 0}]}} A = index.search(A_ , request_timeout=30 ) self.assertEqual(scores[0] , 1 ) self.assertEqual(indices[0] , 0 ) # batched queries A = ["foo", "bar", "foobar"] A = {"hits": {"hits": [{"_score": 1, "_id": 1}]}} A = index.search_batch(A_ ) A = [scores[0] for scores in total_scores] A = [indices[0] for indices in total_indices] self.assertGreater(np.min(A_ ) , 0 ) self.assertListEqual([1, 1, 1] , A_ ) # batched queries with timeout A = ["foo", "bar", "foobar"] A = {"hits": {"hits": [{"_score": 1, "_id": 1}]}} A = index.search_batch(A_ , request_timeout=30 ) A = [scores[0] for scores in total_scores] A = [indices[0] for indices in total_indices] self.assertGreater(np.min(A_ ) , 0 ) self.assertListEqual([1, 1, 1] , A_ )
292
import pickle import numpy as np from matplotlib import pyplot as plt class A__ : def __init__( self , A_ , A_ , A_ , A_ , A_ , A_=0.2 , A_=0.2 ): '''simple docstring''' UpperCamelCase : int = bp_numa UpperCamelCase : int = bp_numa UpperCamelCase : List[Any] = bp_numa UpperCamelCase : Optional[int] = conva_get[:2] UpperCamelCase : Optional[Any] = conva_get[2] UpperCamelCase : Dict = size_pa UpperCamelCase : Union[str, Any] = rate_w UpperCamelCase : Dict = rate_t UpperCamelCase : Union[str, Any] = [ np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0] ) + 0.5 ) for i in range(self.conva[1] ) ] UpperCamelCase : Any = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 ) UpperCamelCase : List[Any] = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 ) UpperCamelCase : Optional[Any] = -2 * np.random.rand(self.conva[1] ) + 1 UpperCamelCase : Any = -2 * np.random.rand(self.num_bpa ) + 1 UpperCamelCase : int = -2 * np.random.rand(self.num_bpa ) + 1 def __UpperCamelCase( self , A_ ): '''simple docstring''' UpperCamelCase : Optional[int] = { "num_bp1": self.num_bpa, "num_bp2": self.num_bpa, "num_bp3": self.num_bpa, "conv1": self.conva, "step_conv1": self.step_conva, "size_pooling1": self.size_poolinga, "rate_weight": self.rate_weight, "rate_thre": self.rate_thre, "w_conv1": self.w_conva, "wkj": self.wkj, "vji": self.vji, "thre_conv1": self.thre_conva, "thre_bp2": self.thre_bpa, "thre_bp3": self.thre_bpa, } with open(A_ , "wb" ) as f: pickle.dump(A_ , A_ ) print(F"""Model saved: {save_path}""" ) @classmethod def __UpperCamelCase( cls , A_ ): '''simple docstring''' with open(A_ , "rb" ) as f: UpperCamelCase : Optional[Any] = pickle.load(A_ ) # noqa: S301 UpperCamelCase : List[Any] = model_dic.get("conv1" ) conv_get.append(model_dic.get("step_conv1" ) ) UpperCamelCase : Union[str, Any] = model_dic.get("size_pooling1" ) UpperCamelCase : List[Any] = model_dic.get("num_bp1" ) UpperCamelCase : Dict = model_dic.get("num_bp2" ) UpperCamelCase : Dict = model_dic.get("num_bp3" ) UpperCamelCase : Dict = model_dic.get("rate_weight" ) UpperCamelCase : str = model_dic.get("rate_thre" ) # create model instance UpperCamelCase : Any = CNN(A_ , A_ , A_ , A_ , A_ , A_ , A_ ) # modify model parameter UpperCamelCase : str = model_dic.get("w_conv1" ) UpperCamelCase : Optional[Any] = model_dic.get("wkj" ) UpperCamelCase : int = model_dic.get("vji" ) UpperCamelCase : Any = model_dic.get("thre_conv1" ) UpperCamelCase : Optional[int] = model_dic.get("thre_bp2" ) UpperCamelCase : Union[str, Any] = model_dic.get("thre_bp3" ) return conv_ins def __UpperCamelCase( self , A_ ): '''simple docstring''' return 1 / (1 + np.exp(-1 * x )) def __UpperCamelCase( self , A_ ): '''simple docstring''' return round(A_ , 3 ) def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ ): '''simple docstring''' UpperCamelCase : str = convs[0] UpperCamelCase : Optional[Any] = convs[1] UpperCamelCase : Optional[Any] = np.shape(A_ )[0] # get the data slice of original image data, data_focus UpperCamelCase : List[str] = [] for i_focus in range(0 , size_data - size_conv + 1 , A_ ): for j_focus in range(0 , size_data - size_conv + 1 , A_ ): UpperCamelCase : Union[str, Any] = data[ i_focus : i_focus + size_conv, j_focus : j_focus + size_conv ] data_focus.append(A_ ) # calculate the feature map of every single kernel, and saved as list of matrix UpperCamelCase : int = [] UpperCamelCase : Optional[Any] = int((size_data - size_conv) / conv_step + 1 ) for i_map in range(A_ ): UpperCamelCase : str = [] for i_focus in range(len(A_ ) ): UpperCamelCase : List[Any] = ( np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map] ) ) - thre_convs[i_map] ) featuremap.append(self.sig(A_ ) ) UpperCamelCase : Optional[int] = np.asmatrix(A_ ).reshape( A_ , A_ ) data_featuremap.append(A_ ) # expanding the data slice to One dimenssion UpperCamelCase : List[Any] = [] for each_focus in data_focus: focusa_list.extend(self.Expand_Mat(A_ ) ) UpperCamelCase : Tuple = np.asarray(A_ ) return focus_list, data_featuremap def __UpperCamelCase( self , A_ , A_ , A_="average_pool" ): '''simple docstring''' UpperCamelCase : Any = len(featuremaps[0] ) UpperCamelCase : str = int(size_map / size_pooling ) UpperCamelCase : Optional[int] = [] for i_map in range(len(A_ ) ): UpperCamelCase : Tuple = featuremaps[i_map] UpperCamelCase : Any = [] for i_focus in range(0 , A_ , A_ ): for j_focus in range(0 , A_ , A_ ): UpperCamelCase : int = feature_map[ i_focus : i_focus + size_pooling, j_focus : j_focus + size_pooling, ] if pooling_type == "average_pool": # average pooling map_pooled.append(np.average(A_ ) ) elif pooling_type == "max_pooling": # max pooling map_pooled.append(np.max(A_ ) ) UpperCamelCase : Optional[Any] = np.asmatrix(A_ ).reshape(A_ , A_ ) featuremap_pooled.append(A_ ) return featuremap_pooled def __UpperCamelCase( self , A_ ): '''simple docstring''' UpperCamelCase : List[Any] = [] for i in range(len(A_ ) ): UpperCamelCase : List[Any] = np.shape(data[i] ) UpperCamelCase : str = data[i].reshape(1 , shapes[0] * shapes[1] ) UpperCamelCase : Optional[int] = data_listed.getA().tolist()[0] data_expanded.extend(A_ ) UpperCamelCase : Any = np.asarray(A_ ) return data_expanded def __UpperCamelCase( self , A_ ): '''simple docstring''' UpperCamelCase : List[Any] = np.asarray(A_ ) UpperCamelCase : List[Any] = np.shape(A_ ) UpperCamelCase : Any = data_mat.reshape(1 , shapes[0] * shapes[1] ) return data_expanded def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ ): '''simple docstring''' UpperCamelCase : int = [] UpperCamelCase : Optional[int] = 0 for i_map in range(A_ ): UpperCamelCase : int = np.ones((size_map, size_map) ) for i in range(0 , A_ , A_ ): for j in range(0 , A_ , A_ ): UpperCamelCase : str = pd_pool[ i_pool ] UpperCamelCase : str = i_pool + 1 UpperCamelCase : str = np.multiply( A_ , np.multiply(out_map[i_map] , (1 - out_map[i_map]) ) ) pd_all.append(A_ ) return pd_all def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_=bool ): '''simple docstring''' print("----------------------Start Training-------------------------" ) print((" - - Shape: Train_Data ", np.shape(A_ )) ) print((" - - Shape: Teach_Data ", np.shape(A_ )) ) UpperCamelCase : List[str] = 0 UpperCamelCase : Union[str, Any] = [] UpperCamelCase : int = 1_0000 while rp < n_repeat and mse >= error_accuracy: UpperCamelCase : Tuple = 0 print(F"""-------------Learning Time {rp}--------------""" ) for p in range(len(A_ ) ): # print('------------Learning Image: %d--------------'%p) UpperCamelCase : Any = np.asmatrix(datas_train[p] ) UpperCamelCase : List[str] = np.asarray(datas_teach[p] ) UpperCamelCase , UpperCamelCase : Dict = self.convolute( A_ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , ) UpperCamelCase : Tuple = self.pooling(A_ , self.size_poolinga ) UpperCamelCase : int = np.shape(A_ ) UpperCamelCase : List[str] = self._expand(A_ ) UpperCamelCase : Optional[int] = data_bp_input UpperCamelCase : str = np.dot(A_ , self.vji.T ) - self.thre_bpa UpperCamelCase : Optional[int] = self.sig(A_ ) UpperCamelCase : List[Any] = np.dot(A_ , self.wkj.T ) - self.thre_bpa UpperCamelCase : Dict = self.sig(A_ ) # --------------Model Leaning ------------------------ # calculate error and gradient--------------- UpperCamelCase : List[Any] = np.multiply( (data_teach - bp_outa) , np.multiply(A_ , (1 - bp_outa) ) ) UpperCamelCase : str = np.multiply( np.dot(A_ , self.wkj ) , np.multiply(A_ , (1 - bp_outa) ) ) UpperCamelCase : Any = np.dot(A_ , self.vji ) UpperCamelCase : Dict = pd_i_all / (self.size_poolinga * self.size_poolinga) UpperCamelCase : List[Any] = pd_conva_pooled.T.getA().tolist() UpperCamelCase : List[Any] = self._calculate_gradient_from_pool( A_ , A_ , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , ) # weight and threshold learning process--------- # convolution layer for k_conv in range(self.conva[1] ): UpperCamelCase : List[Any] = self._expand_mat(pd_conva_all[k_conv] ) UpperCamelCase : List[Any] = self.rate_weight * np.dot(A_ , A_ ) UpperCamelCase : str = self.w_conva[k_conv] + delta_w.reshape( (self.conva[0], self.conva[0]) ) UpperCamelCase : Dict = ( self.thre_conva[k_conv] - np.sum(pd_conva_all[k_conv] ) * self.rate_thre ) # all connected layer UpperCamelCase : Optional[Any] = self.wkj + pd_k_all.T * bp_outa * self.rate_weight UpperCamelCase : List[Any] = self.vji + pd_j_all.T * bp_outa * self.rate_weight UpperCamelCase : Optional[Any] = self.thre_bpa - pd_k_all * self.rate_thre UpperCamelCase : List[str] = self.thre_bpa - pd_j_all * self.rate_thre # calculate the sum error of all single image UpperCamelCase : List[Any] = np.sum(abs(data_teach - bp_outa ) ) error_count += errors # print(' ----Teach ',data_teach) # print(' ----BP_output ',bp_out3) UpperCamelCase : Any = rp + 1 UpperCamelCase : Union[str, Any] = error_count / patterns all_mse.append(A_ ) def draw_error(): UpperCamelCase : Tuple = [error_accuracy for i in range(int(n_repeat * 1.2 ) )] plt.plot(A_ , "+-" ) plt.plot(A_ , "r--" ) plt.xlabel("Learning Times" ) plt.ylabel("All_mse" ) plt.grid(A_ , alpha=0.5 ) plt.show() print("------------------Training Complished---------------------" ) print((" - - Training epoch: ", rp, F""" - - Mse: {mse:.6f}""") ) if draw_e: draw_error() return mse def __UpperCamelCase( self , A_ ): '''simple docstring''' UpperCamelCase : Optional[Any] = [] print("-------------------Start Testing-------------------------" ) print((" - - Shape: Test_Data ", np.shape(A_ )) ) for p in range(len(A_ ) ): UpperCamelCase : int = np.asmatrix(datas_test[p] ) UpperCamelCase , UpperCamelCase : Any = self.convolute( A_ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , ) UpperCamelCase : List[str] = self.pooling(A_ , self.size_poolinga ) UpperCamelCase : Dict = self._expand(A_ ) UpperCamelCase : List[Any] = data_bp_input UpperCamelCase : Any = bp_outa * self.vji.T - self.thre_bpa UpperCamelCase : List[Any] = self.sig(A_ ) UpperCamelCase : int = bp_outa * self.wkj.T - self.thre_bpa UpperCamelCase : Optional[int] = self.sig(A_ ) produce_out.extend(bp_outa.getA().tolist() ) UpperCamelCase : List[str] = [list(map(self.do_round , A_ ) ) for each in produce_out] return np.asarray(A_ ) def __UpperCamelCase( self , A_ ): '''simple docstring''' UpperCamelCase : Union[str, Any] = np.asmatrix(A_ ) UpperCamelCase , UpperCamelCase : List[Any] = self.convolute( A_ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , ) UpperCamelCase : str = self.pooling(A_ , self.size_poolinga ) return data_conveda, data_pooleda if __name__ == "__main__": pass
52
0
'''simple docstring''' import argparse from pathlib import Path import torch from packaging import version from torch.onnx import export from diffusers import AutoencoderKL __a = version.parse(version.parse(torch.__version__).base_version) < version.parse('1.11') def __UpperCAmelCase ( a_: str, a_: str, a_: Any, a_: Dict, a_: Optional[Any], a_: List[str], a_: Optional[int], a_: List[str]=False, ): output_path.parent.mkdir(parents=_lowerCAmelCase, exist_ok=_lowerCAmelCase ) # PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11, # so we check the torch version for backwards compatibility if is_torch_less_than_1_11: export( _lowerCAmelCase, _lowerCAmelCase, f=output_path.as_posix(), input_names=_lowerCAmelCase, output_names=_lowerCAmelCase, dynamic_axes=_lowerCAmelCase, do_constant_folding=_lowerCAmelCase, use_external_data_format=_lowerCAmelCase, enable_onnx_checker=_lowerCAmelCase, opset_version=_lowerCAmelCase, ) else: export( _lowerCAmelCase, _lowerCAmelCase, f=output_path.as_posix(), input_names=_lowerCAmelCase, output_names=_lowerCAmelCase, dynamic_axes=_lowerCAmelCase, do_constant_folding=_lowerCAmelCase, opset_version=_lowerCAmelCase, ) @torch.no_grad() def __UpperCAmelCase ( a_: Union[str, Any], a_: Optional[int], a_: int, a_: str = False ): _UpperCAmelCase : Optional[Any] = torch.floataa if fpaa else torch.floataa if fpaa and torch.cuda.is_available(): _UpperCAmelCase : str = "cuda" elif fpaa and not torch.cuda.is_available(): raise ValueError("`float16` model export is only supported on GPUs with CUDA" ) else: _UpperCAmelCase : Optional[Any] = "cpu" _UpperCAmelCase : Dict = Path(_lowerCAmelCase ) # VAE DECODER _UpperCAmelCase : Dict = AutoencoderKL.from_pretrained(model_path + "/vae" ) _UpperCAmelCase : Optional[Any] = vae_decoder.config.latent_channels # forward only through the decoder part _UpperCAmelCase : Optional[int] = vae_decoder.decode onnx_export( _lowerCAmelCase, model_args=( torch.randn(1, _lowerCAmelCase, 25, 25 ).to(device=_lowerCAmelCase, dtype=_lowerCAmelCase ), False, ), output_path=output_path / "vae_decoder" / "model.onnx", ordered_input_names=["latent_sample", "return_dict"], output_names=["sample"], dynamic_axes={ "latent_sample": {0: "batch", 1: "channels", 2: "height", 3: "width"}, }, opset=_lowerCAmelCase, ) del vae_decoder if __name__ == "__main__": __a = argparse.ArgumentParser() parser.add_argument( '--model_path', type=str, required=True, help='Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).', ) parser.add_argument('--output_path', type=str, required=True, help='Path to the output model.') parser.add_argument( '--opset', default=14, type=int, help='The version of the ONNX operator set to use.', ) parser.add_argument('--fp16', action='store_true', default=False, help='Export the models in `float16` mode') __a = parser.parse_args() print(args.output_path) convert_models(args.model_path, args.output_path, args.opset, args.fpaa) print('SD: Done: ONNX')
145
import warnings from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast from ...onnx.utils import compute_effective_axis_dimension from ...utils import TensorType, is_torch_available, logging __lowerCamelCase : Union[str, Any] = logging.get_logger(__name__) __lowerCamelCase : Any = { """facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/config.json""", # See all BART models at https://huggingface.co/models?filter=bart } class A__ ( __snake_case ): _UpperCAmelCase :Dict = 'bart' _UpperCAmelCase :str = ['past_key_values'] _UpperCAmelCase :Any = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'} def __init__( self , A_=5_0265 , A_=1024 , A_=12 , A_=4096 , A_=16 , A_=12 , A_=4096 , A_=16 , A_=0.0 , A_=0.0 , A_="gelu" , A_=1024 , A_=0.1 , A_=0.0 , A_=0.0 , A_=0.02 , A_=0.0 , A_=False , A_=True , A_=3 , A_=1 , A_=0 , A_=2 , A_=True , A_=2 , A_=2 , **A_ , ): '''simple docstring''' UpperCamelCase : int = vocab_size UpperCamelCase : List[Any] = max_position_embeddings UpperCamelCase : Any = d_model UpperCamelCase : Optional[Any] = encoder_ffn_dim UpperCamelCase : List[Any] = encoder_layers UpperCamelCase : int = encoder_attention_heads UpperCamelCase : Optional[int] = decoder_ffn_dim UpperCamelCase : List[str] = decoder_layers UpperCamelCase : Optional[int] = decoder_attention_heads UpperCamelCase : int = dropout UpperCamelCase : int = attention_dropout UpperCamelCase : Tuple = activation_dropout UpperCamelCase : Tuple = activation_function UpperCamelCase : int = init_std UpperCamelCase : List[Any] = encoder_layerdrop UpperCamelCase : List[str] = decoder_layerdrop UpperCamelCase : Dict = classifier_dropout UpperCamelCase : Optional[int] = use_cache UpperCamelCase : List[Any] = encoder_layers UpperCamelCase : int = scale_embedding # scale factor will be sqrt(d_model) if True super().__init__( num_labels=A_ , pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , is_encoder_decoder=A_ , decoder_start_token_id=A_ , forced_eos_token_id=A_ , **A_ , ) # ensure backward compatibility for BART CNN models if self.forced_bos_token_id is None and kwargs.get("force_bos_token_to_be_generated" , A_ ): UpperCamelCase : int = self.bos_token_id warnings.warn( F"""Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. """ "The config can simply be saved and uploaded again to be fixed." ) class A__ ( __snake_case ): @property def __UpperCamelCase( self ): '''simple docstring''' if self.task in ["default", "seq2seq-lm"]: UpperCamelCase : Optional[int] = OrderedDict( [ ("input_ids", {0: "batch", 1: "encoder_sequence"}), ("attention_mask", {0: "batch", 1: "encoder_sequence"}), ] ) if self.use_past: UpperCamelCase : List[str] = {0: "batch"} UpperCamelCase : Dict = {0: "batch", 1: "past_decoder_sequence + sequence"} else: UpperCamelCase : Dict = {0: "batch", 1: "decoder_sequence"} UpperCamelCase : Union[str, Any] = {0: "batch", 1: "decoder_sequence"} if self.use_past: self.fill_with_past_key_values_(A_ , direction="inputs" ) elif self.task == "causal-lm": # TODO: figure this case out. UpperCamelCase : Any = OrderedDict( [ ("input_ids", {0: "batch", 1: "encoder_sequence"}), ("attention_mask", {0: "batch", 1: "encoder_sequence"}), ] ) if self.use_past: UpperCamelCase , UpperCamelCase : Optional[int] = self.num_layers for i in range(A_ ): UpperCamelCase : Optional[Any] = {0: "batch", 2: "past_sequence + sequence"} UpperCamelCase : Union[str, Any] = {0: "batch", 2: "past_sequence + sequence"} else: UpperCamelCase : Optional[Any] = OrderedDict( [ ("input_ids", {0: "batch", 1: "encoder_sequence"}), ("attention_mask", {0: "batch", 1: "encoder_sequence"}), ("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}), ("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}), ] ) return common_inputs @property def __UpperCamelCase( self ): '''simple docstring''' if self.task in ["default", "seq2seq-lm"]: UpperCamelCase : Tuple = super().outputs else: UpperCamelCase : Dict = super(A_ , self ).outputs if self.use_past: UpperCamelCase , UpperCamelCase : int = self.num_layers for i in range(A_ ): UpperCamelCase : int = {0: "batch", 2: "past_sequence + sequence"} UpperCamelCase : Tuple = {0: "batch", 2: "past_sequence + sequence"} return common_outputs def __UpperCamelCase( self , A_ , A_ = -1 , A_ = -1 , A_ = False , A_ = None , ): '''simple docstring''' UpperCamelCase : List[Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( A_ , A_ , A_ , A_ , A_ ) # Generate decoder inputs UpperCamelCase : List[Any] = seq_length if not self.use_past else 1 UpperCamelCase : Tuple = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( A_ , A_ , A_ , A_ , A_ ) UpperCamelCase : Optional[int] = {F"""decoder_{name}""": tensor for name, tensor in decoder_inputs.items()} UpperCamelCase : List[Any] = dict(**A_ , **A_ ) if self.use_past: if not is_torch_available(): raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." ) else: import torch UpperCamelCase , UpperCamelCase : Optional[Any] = common_inputs["input_ids"].shape UpperCamelCase : List[Any] = common_inputs["decoder_input_ids"].shape[1] UpperCamelCase , UpperCamelCase : List[str] = self.num_attention_heads UpperCamelCase : int = ( batch, num_encoder_attention_heads, encoder_seq_length, self._config.hidden_size // num_encoder_attention_heads, ) UpperCamelCase : List[Any] = decoder_seq_length + 3 UpperCamelCase : str = ( batch, num_decoder_attention_heads, decoder_past_length, self._config.hidden_size // num_decoder_attention_heads, ) UpperCamelCase : int = torch.cat( [common_inputs["decoder_attention_mask"], torch.ones(A_ , A_ )] , dim=1 ) UpperCamelCase : int = [] # If the number of encoder and decoder layers are present in the model configuration, both are considered UpperCamelCase , UpperCamelCase : Union[str, Any] = self.num_layers UpperCamelCase : Any = min(A_ , A_ ) UpperCamelCase : List[str] = max(A_ , A_ ) - min_num_layers UpperCamelCase : Dict = "encoder" if num_encoder_layers > num_decoder_layers else "decoder" for _ in range(A_ ): common_inputs["past_key_values"].append( ( torch.zeros(A_ ), torch.zeros(A_ ), torch.zeros(A_ ), torch.zeros(A_ ), ) ) # TODO: test this. UpperCamelCase : Optional[Any] = encoder_shape if remaining_side_name == "encoder" else decoder_shape for _ in range(A_ , A_ ): common_inputs["past_key_values"].append((torch.zeros(A_ ), torch.zeros(A_ )) ) return common_inputs def __UpperCamelCase( self , A_ , A_ = -1 , A_ = -1 , A_ = False , A_ = None , ): '''simple docstring''' UpperCamelCase : int = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( A_ , A_ , A_ , A_ , A_ ) if self.use_past: if not is_torch_available(): raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." ) else: import torch UpperCamelCase , UpperCamelCase : Union[str, Any] = common_inputs["input_ids"].shape # Not using the same length for past_key_values UpperCamelCase : Optional[Any] = seqlen + 2 UpperCamelCase , UpperCamelCase : List[Any] = self.num_layers UpperCamelCase , UpperCamelCase : Optional[int] = self.num_attention_heads UpperCamelCase : str = ( batch, num_encoder_attention_heads, past_key_values_length, self._config.hidden_size // num_encoder_attention_heads, ) UpperCamelCase : Optional[Any] = common_inputs["attention_mask"].dtype UpperCamelCase : int = torch.cat( [common_inputs["attention_mask"], torch.ones(A_ , A_ , dtype=A_ )] , dim=1 ) UpperCamelCase : Optional[Any] = [ (torch.zeros(A_ ), torch.zeros(A_ )) for _ in range(A_ ) ] return common_inputs def __UpperCamelCase( self , A_ , A_ = -1 , A_ = -1 , A_ = False , A_ = None , ): '''simple docstring''' UpperCamelCase : Optional[Any] = compute_effective_axis_dimension( A_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX UpperCamelCase : Union[str, Any] = tokenizer.num_special_tokens_to_add(A_ ) UpperCamelCase : int = compute_effective_axis_dimension( A_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=A_ ) # Generate dummy inputs according to compute batch and sequence UpperCamelCase : int = [" ".join([tokenizer.unk_token] ) * seq_length] * batch_size UpperCamelCase : Dict = dict(tokenizer(A_ , return_tensors=A_ ) ) return common_inputs def __UpperCamelCase( self , A_ , A_ = -1 , A_ = -1 , A_ = False , A_ = None , ): '''simple docstring''' if self.task in ["default", "seq2seq-lm"]: UpperCamelCase : Optional[int] = self._generate_dummy_inputs_for_default_and_seqaseq_lm( A_ , batch_size=A_ , seq_length=A_ , is_pair=A_ , framework=A_ ) elif self.task == "causal-lm": UpperCamelCase : List[str] = self._generate_dummy_inputs_for_causal_lm( A_ , batch_size=A_ , seq_length=A_ , is_pair=A_ , framework=A_ ) else: UpperCamelCase : List[str] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( A_ , batch_size=A_ , seq_length=A_ , is_pair=A_ , framework=A_ ) return common_inputs def __UpperCamelCase( self , A_ , A_ , A_ , A_ ): '''simple docstring''' if self.task in ["default", "seq2seq-lm"]: UpperCamelCase : Optional[Any] = super()._flatten_past_key_values_(A_ , A_ , A_ , A_ ) else: UpperCamelCase : Optional[Any] = super(A_ , self )._flatten_past_key_values_( A_ , A_ , A_ , A_ )
52
0
"""simple docstring""" def lowerCAmelCase__ ( _UpperCamelCase : Dict , _UpperCamelCase : List[Any] ) -> List[Any]: """simple docstring""" snake_case = (boundary[1] - boundary[0]) / steps snake_case = boundary[0] snake_case = boundary[1] snake_case = make_points(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) snake_case = 0.0 y += (h / 2.0) * f(_lowerCAmelCase ) for i in x_i: # print(i) y += h * f(_lowerCAmelCase ) y += (h / 2.0) * f(_lowerCAmelCase ) return y def lowerCAmelCase__ ( _UpperCamelCase : Optional[int] , _UpperCamelCase : Any , _UpperCamelCase : List[str] ) -> Tuple: """simple docstring""" snake_case = a + h while x < (b - h): yield x snake_case = x + h def lowerCAmelCase__ ( _UpperCamelCase : Dict ) -> Optional[Any]: # enter your function here """simple docstring""" snake_case = (x - 0) * (x - 0) return y def lowerCAmelCase__ ( ) -> Dict: """simple docstring""" snake_case = 0.0 # Lower bound of integration snake_case = 1.0 # Upper bound of integration snake_case = 10.0 # define number of steps or resolution snake_case = [a, b] # define boundary of integration snake_case = method_a(_lowerCAmelCase , _lowerCAmelCase ) print(f"""y = {y}""" ) if __name__ == "__main__": main()
150
from math import sqrt def A_ ( _lowerCAmelCase ) -> bool: assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and ( number >= 0 ), "'number' must been an int and positive" UpperCamelCase : List[Any] = True # 0 and 1 are none primes. if number <= 1: UpperCamelCase : List[Any] = False for divisor in range(2 , int(round(sqrt(_lowerCAmelCase ) ) ) + 1 ): # if 'number' divisible by 'divisor' then sets 'status' # of false and break up the loop. if number % divisor == 0: UpperCamelCase : Union[str, Any] = False break # precondition assert isinstance(_lowerCAmelCase , _lowerCAmelCase ), "'status' must been from type bool" return status def A_ ( _lowerCAmelCase ) -> Any: assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (n > 2), "'N' must been an int and > 2" # beginList: contains all natural numbers from 2 up to N UpperCamelCase : int = list(range(2 , n + 1 ) ) UpperCamelCase : Optional[int] = [] # this list will be returns. # actual sieve of erathostenes for i in range(len(_lowerCAmelCase ) ): for j in range(i + 1 , len(_lowerCAmelCase ) ): if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0): UpperCamelCase : Tuple = 0 # filters actual prime numbers. UpperCamelCase : str = [x for x in begin_list if x != 0] # precondition assert isinstance(_lowerCAmelCase , _lowerCAmelCase ), "'ans' must been from type list" return ans def A_ ( _lowerCAmelCase ) -> Optional[Any]: assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (n > 2), "'N' must been an int and > 2" UpperCamelCase : str = [] # iterates over all numbers between 2 up to N+1 # if a number is prime then appends to list 'ans' for number in range(2 , n + 1 ): if is_prime(_lowerCAmelCase ): ans.append(_lowerCAmelCase ) # precondition assert isinstance(_lowerCAmelCase , _lowerCAmelCase ), "'ans' must been from type list" return ans def A_ ( _lowerCAmelCase ) -> Any: assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and number >= 0, "'number' must been an int and >= 0" UpperCamelCase : Optional[Any] = [] # this list will be returns of the function. # potential prime number factors. UpperCamelCase : Tuple = 2 UpperCamelCase : str = number if number == 0 or number == 1: ans.append(_lowerCAmelCase ) # if 'number' not prime then builds the prime factorization of 'number' elif not is_prime(_lowerCAmelCase ): while quotient != 1: if is_prime(_lowerCAmelCase ) and (quotient % factor == 0): ans.append(_lowerCAmelCase ) quotient /= factor else: factor += 1 else: ans.append(_lowerCAmelCase ) # precondition assert isinstance(_lowerCAmelCase , _lowerCAmelCase ), "'ans' must been from type list" return ans def A_ ( _lowerCAmelCase ) -> Any: assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and ( number >= 0 ), "'number' bust been an int and >= 0" UpperCamelCase : List[Any] = 0 # prime factorization of 'number' UpperCamelCase : Any = prime_factorization(_lowerCAmelCase ) UpperCamelCase : List[Any] = max(_lowerCAmelCase ) # precondition assert isinstance(_lowerCAmelCase , _lowerCAmelCase ), "'ans' must been from type int" return ans def A_ ( _lowerCAmelCase ) -> Union[str, Any]: assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and ( number >= 0 ), "'number' bust been an int and >= 0" UpperCamelCase : List[Any] = 0 # prime factorization of 'number' UpperCamelCase : Dict = prime_factorization(_lowerCAmelCase ) UpperCamelCase : List[Any] = min(_lowerCAmelCase ) # precondition assert isinstance(_lowerCAmelCase , _lowerCAmelCase ), "'ans' must been from type int" return ans def A_ ( _lowerCAmelCase ) -> Optional[Any]: assert isinstance(_lowerCAmelCase , _lowerCAmelCase ), "'number' must been an int" assert isinstance(number % 2 == 0 , _lowerCAmelCase ), "compare bust been from type bool" return number % 2 == 0 def A_ ( _lowerCAmelCase ) -> List[Any]: assert isinstance(_lowerCAmelCase , _lowerCAmelCase ), "'number' must been an int" assert isinstance(number % 2 != 0 , _lowerCAmelCase ), "compare bust been from type bool" return number % 2 != 0 def A_ ( _lowerCAmelCase ) -> Any: assert ( isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (number > 2) and is_even(_lowerCAmelCase ) ), "'number' must been an int, even and > 2" UpperCamelCase : List[str] = [] # this list will returned # creates a list of prime numbers between 2 up to 'number' UpperCamelCase : Dict = get_prime_numbers(_lowerCAmelCase ) UpperCamelCase : Tuple = len(_lowerCAmelCase ) # run variable for while-loops. UpperCamelCase : Optional[int] = 0 UpperCamelCase : int = None # exit variable. for break up the loops UpperCamelCase : Union[str, Any] = True while i < len_pn and loop: UpperCamelCase : Tuple = i + 1 while j < len_pn and loop: if prime_numbers[i] + prime_numbers[j] == number: UpperCamelCase : Any = False ans.append(prime_numbers[i] ) ans.append(prime_numbers[j] ) j += 1 i += 1 # precondition assert ( isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (len(_lowerCAmelCase ) == 2) and (ans[0] + ans[1] == number) and is_prime(ans[0] ) and is_prime(ans[1] ) ), "'ans' must contains two primes. And sum of elements must been eq 'number'" return ans def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> str: assert ( isinstance(_lowerCAmelCase , _lowerCAmelCase ) and isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (numbera >= 0) and (numbera >= 0) ), "'number1' and 'number2' must been positive integer." UpperCamelCase : Tuple = 0 while numbera != 0: UpperCamelCase : Tuple = numbera % numbera UpperCamelCase : Any = numbera UpperCamelCase : Union[str, Any] = rest # precondition assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and ( numbera >= 0 ), "'number' must been from type int and positive" return numbera def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> int: assert ( isinstance(_lowerCAmelCase , _lowerCAmelCase ) and isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (numbera >= 1) and (numbera >= 1) ), "'number1' and 'number2' must been positive integer." UpperCamelCase : Optional[int] = 1 # actual answer that will be return. # for kgV (x,1) if numbera > 1 and numbera > 1: # builds the prime factorization of 'number1' and 'number2' UpperCamelCase : List[Any] = prime_factorization(_lowerCAmelCase ) UpperCamelCase : Union[str, Any] = prime_factorization(_lowerCAmelCase ) elif numbera == 1 or numbera == 1: UpperCamelCase : Optional[Any] = [] UpperCamelCase : int = [] UpperCamelCase : List[Any] = max(_lowerCAmelCase , _lowerCAmelCase ) UpperCamelCase : Optional[int] = 0 UpperCamelCase : Tuple = 0 UpperCamelCase : List[str] = [] # captured numbers int both 'primeFac1' and 'primeFac2' # iterates through primeFac1 for n in prime_fac_a: if n not in done: if n in prime_fac_a: UpperCamelCase : str = prime_fac_a.count(_lowerCAmelCase ) UpperCamelCase : Tuple = prime_fac_a.count(_lowerCAmelCase ) for _ in range(max(_lowerCAmelCase , _lowerCAmelCase ) ): ans *= n else: UpperCamelCase : str = prime_fac_a.count(_lowerCAmelCase ) for _ in range(_lowerCAmelCase ): ans *= n done.append(_lowerCAmelCase ) # iterates through primeFac2 for n in prime_fac_a: if n not in done: UpperCamelCase : Any = prime_fac_a.count(_lowerCAmelCase ) for _ in range(_lowerCAmelCase ): ans *= n done.append(_lowerCAmelCase ) # precondition assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and ( ans >= 0 ), "'ans' must been from type int and positive" return ans def A_ ( _lowerCAmelCase ) -> Tuple: assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (n >= 0), "'number' must been a positive int" UpperCamelCase : int = 0 UpperCamelCase : int = 2 # this variable holds the answer while index < n: index += 1 ans += 1 # counts to the next number # if ans not prime then # runs to the next prime number. while not is_prime(_lowerCAmelCase ): ans += 1 # precondition assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and is_prime( _lowerCAmelCase ), "'ans' must been a prime number and from type int" return ans def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> int: assert ( is_prime(_lowerCAmelCase ) and is_prime(_lowerCAmelCase ) and (p_number_a < p_number_a) ), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'" UpperCamelCase : str = p_number_a + 1 # jump to the next number UpperCamelCase : Dict = [] # this list will be returns. # if number is not prime then # fetch the next prime number. while not is_prime(_lowerCAmelCase ): number += 1 while number < p_number_a: ans.append(_lowerCAmelCase ) number += 1 # fetch the next prime number. while not is_prime(_lowerCAmelCase ): number += 1 # precondition assert ( isinstance(_lowerCAmelCase , _lowerCAmelCase ) and ans[0] != p_number_a and ans[len(_lowerCAmelCase ) - 1] != p_number_a ), "'ans' must been a list without the arguments" # 'ans' contains not 'pNumber1' and 'pNumber2' ! return ans def A_ ( _lowerCAmelCase ) -> List[str]: assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (n >= 1), "'n' must been int and >= 1" UpperCamelCase : Dict = [] # will be returned. for divisor in range(1 , n + 1 ): if n % divisor == 0: ans.append(_lowerCAmelCase ) # precondition assert ans[0] == 1 and ans[len(_lowerCAmelCase ) - 1] == n, "Error in function getDivisiors(...)" return ans def A_ ( _lowerCAmelCase ) -> int: assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and ( number > 1 ), "'number' must been an int and >= 1" UpperCamelCase : int = get_divisors(_lowerCAmelCase ) # precondition assert ( isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (divisors[0] == 1) and (divisors[len(_lowerCAmelCase ) - 1] == number) ), "Error in help-function getDivisiors(...)" # summed all divisors up to 'number' (exclusive), hence [:-1] return sum(divisors[:-1] ) == number def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]: assert ( isinstance(_lowerCAmelCase , _lowerCAmelCase ) and isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (denominator != 0) ), "The arguments must been from type int and 'denominator' != 0" # build the greatest common divisor of numerator and denominator. UpperCamelCase : List[str] = gcd(abs(_lowerCAmelCase ) , abs(_lowerCAmelCase ) ) # precondition assert ( isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (numerator % gcd_of_fraction == 0) and (denominator % gcd_of_fraction == 0) ), "Error in function gcd(...,...)" return (numerator // gcd_of_fraction, denominator // gcd_of_fraction) def A_ ( _lowerCAmelCase ) -> Dict: assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (n >= 0), "'n' must been a int and >= 0" UpperCamelCase : str = 1 # this will be return. for factor in range(1 , n + 1 ): ans *= factor return ans def A_ ( _lowerCAmelCase ) -> Tuple: assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (n >= 0), "'n' must been an int and >= 0" UpperCamelCase : Dict = 0 UpperCamelCase : Dict = 1 UpperCamelCase : Union[str, Any] = 1 # this will be return for _ in range(n - 1 ): UpperCamelCase : Any = ans ans += fiba UpperCamelCase : str = tmp return ans
52
0
'''simple docstring''' def a__ ( lowercase : List[Any] ) -> str: """simple docstring""" _UpperCamelCase = int(_lowerCAmelCase ) if decimal in (0, 1): # Exit cases for the recursion return str(_lowerCAmelCase ) _UpperCamelCase = divmod(_lowerCAmelCase, 2 ) return binary_recursive(_lowerCAmelCase ) + str(_lowerCAmelCase ) def a__ ( lowercase : str ) -> str: """simple docstring""" _UpperCamelCase = str(_lowerCAmelCase ).strip() if not number: raise ValueError('''No input value was provided''' ) _UpperCamelCase = "-" if number.startswith('''-''' ) else "" _UpperCamelCase = number.lstrip('''-''' ) if not number.isnumeric(): raise ValueError('''Input value is not an integer''' ) return F"""{negative}0b{binary_recursive(int(_lowerCAmelCase ) )}""" if __name__ == "__main__": from doctest import testmod testmod()
324
import inspect import re from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_config_docstrings.py __lowerCamelCase : str = """src/transformers""" # This is to make sure the transformers module imported is the one in the repo. __lowerCamelCase : Tuple = direct_transformers_import(PATH_TO_TRANSFORMERS) __lowerCamelCase : List[str] = transformers.models.auto.configuration_auto.CONFIG_MAPPING # Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`. # For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)` __lowerCamelCase : Optional[Any] = re.compile(r"""\[(.+?)\]\((https://huggingface\.co/.+?)\)""") __lowerCamelCase : List[str] = { """DecisionTransformerConfig""", """EncoderDecoderConfig""", """MusicgenConfig""", """RagConfig""", """SpeechEncoderDecoderConfig""", """TimmBackboneConfig""", """VisionEncoderDecoderConfig""", """VisionTextDualEncoderConfig""", """LlamaConfig""", } def A_ ( _lowerCAmelCase ) -> List[str]: UpperCamelCase : Optional[Any] = None # source code of `config_class` UpperCamelCase : Tuple = inspect.getsource(_lowerCAmelCase ) UpperCamelCase : Optional[Any] = _re_checkpoint.findall(_lowerCAmelCase ) # Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link. # For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')` for ckpt_name, ckpt_link in checkpoints: # allow the link to end with `/` if ckpt_link.endswith("/" ): UpperCamelCase : Dict = ckpt_link[:-1] # verify the checkpoint name corresponds to the checkpoint link UpperCamelCase : Any = F"""https://huggingface.co/{ckpt_name}""" if ckpt_link == ckpt_link_from_name: UpperCamelCase : List[Any] = ckpt_name break return checkpoint def A_ ( ) -> List[str]: UpperCamelCase : Optional[int] = [] for config_class in list(CONFIG_MAPPING.values() ): # Skip deprecated models if "models.deprecated" in config_class.__module__: continue UpperCamelCase : Union[str, Any] = get_checkpoint_from_config_class(_lowerCAmelCase ) UpperCamelCase : Optional[int] = config_class.__name__ if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK: configs_without_checkpoint.append(_lowerCAmelCase ) if len(_lowerCAmelCase ) > 0: UpperCamelCase : Any = "\n".join(sorted(_lowerCAmelCase ) ) raise ValueError(F"""The following configurations don't contain any valid checkpoint:\n{message}""" ) if __name__ == "__main__": check_config_docstrings_have_checkpoints()
52
0
import argparse import os import numpy as np import tensorflow as tf import torch from transformers import BertModel def UpperCamelCase__( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[int] )->List[Any]: A__ = ("dense.weight", "attention.self.query", "attention.self.key", "attention.self.value") A__ = ( ("layer.", "layer_"), ("word_embeddings.weight", "word_embeddings"), ("position_embeddings.weight", "position_embeddings"), ("token_type_embeddings.weight", "token_type_embeddings"), (".", "/"), ("LayerNorm/weight", "LayerNorm/gamma"), ("LayerNorm/bias", "LayerNorm/beta"), ("weight", "kernel"), ) if not os.path.isdir(_lowerCAmelCase ): os.makedirs(_lowerCAmelCase ) A__ = model.state_dict() def to_tf_var_name(UpperCamelCase__ : Dict ): for patt, repl in iter(_lowerCAmelCase ): A__ = name.replace(_lowerCAmelCase , _lowerCAmelCase ) return f"bert/{name}" def create_tf_var(UpperCamelCase__ : int , UpperCamelCase__ : Dict , UpperCamelCase__ : List[str] ): A__ = tf.dtypes.as_dtype(tensor.dtype ) A__ = tf.get_variable(dtype=_lowerCAmelCase , shape=tensor.shape , name=_lowerCAmelCase , initializer=tf.zeros_initializer() ) session.run(tf.variables_initializer([tf_var] ) ) session.run(_lowerCAmelCase ) return tf_var tf.reset_default_graph() with tf.Session() as session: for var_name in state_dict: A__ = to_tf_var_name(_lowerCAmelCase ) A__ = state_dict[var_name].numpy() if any(x in var_name for x in tensors_to_transpose ): A__ = torch_tensor.T A__ = create_tf_var(tensor=_lowerCAmelCase , name=_lowerCAmelCase , session=_lowerCAmelCase ) tf.keras.backend.set_value(_lowerCAmelCase , _lowerCAmelCase ) A__ = session.run(_lowerCAmelCase ) print(f"Successfully created {tf_name}: {np.allclose(_lowerCAmelCase , _lowerCAmelCase )}" ) A__ = tf.train.Saver(tf.trainable_variables() ) saver.save(_lowerCAmelCase , os.path.join(_lowerCAmelCase , model_name.replace('''-''' , '''_''' ) + '''.ckpt''' ) ) def UpperCamelCase__( UpperCamelCase__ : int=None )->Dict: A__ = argparse.ArgumentParser() parser.add_argument('''--model_name''' , type=_lowerCAmelCase , required=_lowerCAmelCase , help='''model name e.g. bert-base-uncased''' ) parser.add_argument( '''--cache_dir''' , type=_lowerCAmelCase , default=_lowerCAmelCase , required=_lowerCAmelCase , help='''Directory containing pytorch model''' ) parser.add_argument('''--pytorch_model_path''' , type=_lowerCAmelCase , required=_lowerCAmelCase , help='''/path/to/<pytorch-model-name>.bin''' ) parser.add_argument('''--tf_cache_dir''' , type=_lowerCAmelCase , required=_lowerCAmelCase , help='''Directory in which to save tensorflow model''' ) A__ = parser.parse_args(_lowerCAmelCase ) A__ = BertModel.from_pretrained( pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , ) convert_pytorch_checkpoint_to_tf(model=_lowerCAmelCase , ckpt_dir=args.tf_cache_dir , model_name=args.model_name ) if __name__ == "__main__": main()
193
from __future__ import annotations from functools import lru_cache from math import ceil __lowerCamelCase : str = 100 __lowerCamelCase : Any = set(range(3, NUM_PRIMES, 2)) primes.add(2) __lowerCamelCase : int for prime in range(3, ceil(NUM_PRIMES**0.5), 2): if prime not in primes: continue primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime))) @lru_cache(maxsize=100 ) def A_ ( _lowerCAmelCase ) -> set[int]: if number_to_partition < 0: return set() elif number_to_partition == 0: return {1} UpperCamelCase : set[int] = set() UpperCamelCase : int UpperCamelCase : int for prime in primes: if prime > number_to_partition: continue for sub in partition(number_to_partition - prime ): ret.add(sub * prime ) return ret def A_ ( _lowerCAmelCase = 5000 ) -> int | None: for number_to_partition in range(1 , _lowerCAmelCase ): if len(partition(_lowerCAmelCase ) ) > number_unique_partitions: return number_to_partition return None if __name__ == "__main__": print(f"""{solution() = }""")
52
0
A : List[Any] = 0 # The first color of the flag. A : int = 1 # The second color of the flag. A : Union[str, Any] = 2 # The third color of the flag. A : str = (red, white, blue) def UpperCamelCase ( __magic_name__ : Optional[Any] ) -> list: """simple docstring""" if not sequence: return [] if len(_lowerCAmelCase ) == 1: return list(_lowerCAmelCase ) lowercase__ = 0 lowercase__ = len(_lowerCAmelCase ) - 1 lowercase__ = 0 while mid <= high: if sequence[mid] == colors[0]: lowercase__ = sequence[mid], sequence[low] low += 1 mid += 1 elif sequence[mid] == colors[1]: mid += 1 elif sequence[mid] == colors[2]: lowercase__ = sequence[high], sequence[mid] high -= 1 else: lowercase__ = f'''The elements inside the sequence must contains only {colors} values''' raise ValueError(_lowerCAmelCase ) return sequence if __name__ == "__main__": import doctest doctest.testmod() A : Optional[Any] = input('Enter numbers separated by commas:\n').strip() A : Optional[int] = [int(item.strip()) for item in user_input.split(',')] print(F'{dutch_national_flag_sort(unsorted)}')
305
def A_ ( _lowerCAmelCase ) -> str: UpperCamelCase : Optional[int] = int(_lowerCAmelCase ) if decimal in (0, 1): # Exit cases for the recursion return str(_lowerCAmelCase ) UpperCamelCase , UpperCamelCase : Dict = divmod(_lowerCAmelCase , 2 ) return binary_recursive(_lowerCAmelCase ) + str(_lowerCAmelCase ) def A_ ( _lowerCAmelCase ) -> str: UpperCamelCase : Tuple = str(_lowerCAmelCase ).strip() if not number: raise ValueError("No input value was provided" ) UpperCamelCase : Optional[int] = "-" if number.startswith("-" ) else "" UpperCamelCase : Any = number.lstrip("-" ) if not number.isnumeric(): raise ValueError("Input value is not an integer" ) return F"""{negative}0b{binary_recursive(int(_lowerCAmelCase ) )}""" if __name__ == "__main__": from doctest import testmod testmod()
52
0
import numpy as np from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey def a ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ): '''simple docstring''' # prepare kernel # the kernel size have to be odd if (ksize % 2) == 0: lowercase__ = ksize + 1 lowercase__ = np.zeros((ksize, ksize) , dtype=np.floataa ) # each value for y in range(_lowerCAmelCase ): for x in range(_lowerCAmelCase ): # distance from center lowercase__ = x - ksize // 2 lowercase__ = y - ksize // 2 # degree to radiant lowercase__ = theta / 180 * np.pi lowercase__ = np.cos(_theta ) lowercase__ = np.sin(_theta ) # get kernel x lowercase__ = cos_theta * px + sin_theta * py # get kernel y lowercase__ = -sin_theta * px + cos_theta * py # fill kernel lowercase__ = np.exp( -(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi ) return gabor if __name__ == "__main__": import doctest doctest.testmod() # read original image A__ : Union[str, Any] = imread('../image_data/lena.jpg') # turn image in gray scale value A__ : Optional[Any] = cvtColor(img, COLOR_BGR2GRAY) # Apply multiple Kernel to detect edges A__ : Optional[Any] = np.zeros(gray.shape[:2]) for theta in [0, 30, 60, 90, 1_20, 1_50]: A__ : Optional[Any] = gabor_filter_kernel(10, 8, theta, 10, 0, 0) out += filteraD(gray, CV_8UC3, kernel_aa) A__ : Tuple = out / out.max() * 2_55 A__ : Optional[Any] = out.astype(np.uinta) imshow('Original', gray) imshow('Gabor filter with 20x20 mask and 6 directions', out) waitKey(0)
207
import unittest from transformers import LiltConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( LiltForQuestionAnswering, LiltForSequenceClassification, LiltForTokenClassification, LiltModel, ) from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST class A__ : def __init__( self , A_ , A_=13 , A_=7 , A_=True , A_=True , A_=True , A_=True , A_=99 , A_=24 , A_=2 , A_=6 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=16 , A_=2 , A_=0.02 , A_=3 , A_=None , A_=1000 , ): '''simple docstring''' UpperCamelCase : Union[str, Any] = parent UpperCamelCase : List[Any] = batch_size UpperCamelCase : Dict = seq_length UpperCamelCase : Tuple = is_training UpperCamelCase : Union[str, Any] = use_input_mask UpperCamelCase : Tuple = use_token_type_ids UpperCamelCase : Optional[Any] = use_labels UpperCamelCase : str = vocab_size UpperCamelCase : Optional[int] = hidden_size UpperCamelCase : Any = num_hidden_layers UpperCamelCase : Optional[Any] = num_attention_heads UpperCamelCase : Optional[Any] = intermediate_size UpperCamelCase : Optional[Any] = hidden_act UpperCamelCase : Union[str, Any] = hidden_dropout_prob UpperCamelCase : Union[str, Any] = attention_probs_dropout_prob UpperCamelCase : List[Any] = max_position_embeddings UpperCamelCase : str = type_vocab_size UpperCamelCase : Optional[int] = type_sequence_label_size UpperCamelCase : Dict = initializer_range UpperCamelCase : int = num_labels UpperCamelCase : Optional[int] = scope UpperCamelCase : int = range_bbox def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCamelCase : Any = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ) # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: UpperCamelCase : Union[str, Any] = bbox[i, j, 3] UpperCamelCase : int = bbox[i, j, 1] UpperCamelCase : int = t if bbox[i, j, 2] < bbox[i, j, 0]: UpperCamelCase : List[str] = bbox[i, j, 2] UpperCamelCase : Optional[int] = bbox[i, j, 0] UpperCamelCase : Optional[Any] = t UpperCamelCase : Dict = None if self.use_input_mask: UpperCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) UpperCamelCase : str = None if self.use_token_type_ids: UpperCamelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) UpperCamelCase : Dict = None UpperCamelCase : int = None if self.use_labels: UpperCamelCase : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCamelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCamelCase : List[Any] = self.get_config() return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels def __UpperCamelCase( self ): '''simple docstring''' return LiltConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ): '''simple docstring''' UpperCamelCase : Any = LiltModel(config=A_ ) model.to(A_ ) model.eval() UpperCamelCase : str = model(A_ , bbox=A_ , attention_mask=A_ , token_type_ids=A_ ) UpperCamelCase : Optional[int] = model(A_ , bbox=A_ , token_type_ids=A_ ) UpperCamelCase : Any = model(A_ , bbox=A_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ): '''simple docstring''' UpperCamelCase : Any = self.num_labels UpperCamelCase : Dict = LiltForTokenClassification(config=A_ ) model.to(A_ ) model.eval() UpperCamelCase : Dict = model( A_ , bbox=A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ): '''simple docstring''' UpperCamelCase : Dict = LiltForQuestionAnswering(config=A_ ) model.to(A_ ) model.eval() UpperCamelCase : List[str] = model( A_ , bbox=A_ , attention_mask=A_ , token_type_ids=A_ , start_positions=A_ , end_positions=A_ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Any = self.prepare_config_and_inputs() ( ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ) : Tuple = config_and_inputs UpperCamelCase : Tuple = { "input_ids": input_ids, "bbox": bbox, "token_type_ids": token_type_ids, "attention_mask": input_mask, } return config, inputs_dict @require_torch class A__ ( __snake_case , __snake_case , __snake_case , unittest.TestCase ): _UpperCAmelCase :Union[str, Any] = ( ( LiltModel, LiltForSequenceClassification, LiltForTokenClassification, LiltForQuestionAnswering, ) if is_torch_available() else () ) _UpperCAmelCase :Optional[Any] = ( { 'feature-extraction': LiltModel, 'question-answering': LiltForQuestionAnswering, 'text-classification': LiltForSequenceClassification, 'token-classification': LiltForTokenClassification, 'zero-shot': LiltForSequenceClassification, } if is_torch_available() else {} ) _UpperCAmelCase :Dict = False _UpperCAmelCase :Union[str, Any] = False def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ ): '''simple docstring''' return True def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Optional[int] = LiltModelTester(self ) UpperCamelCase : Optional[int] = ConfigTester(self , config_class=A_ , hidden_size=37 ) def __UpperCamelCase( self ): '''simple docstring''' self.config_tester.run_common_tests() def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*A_ ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: UpperCamelCase : Union[str, Any] = type self.model_tester.create_and_check_model(*A_ ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*A_ ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*A_ ) @slow def __UpperCamelCase( self ): '''simple docstring''' for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCamelCase : Dict = LiltModel.from_pretrained(A_ ) self.assertIsNotNone(A_ ) @require_torch @slow class A__ ( unittest.TestCase ): def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : int = LiltModel.from_pretrained("SCUT-DLVCLab/lilt-roberta-en-base" ).to(A_ ) UpperCamelCase : Tuple = torch.tensor([[1, 2]] , device=A_ ) UpperCamelCase : List[str] = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=A_ ) # forward pass with torch.no_grad(): UpperCamelCase : Optional[int] = model(input_ids=A_ , bbox=A_ ) UpperCamelCase : List[str] = torch.Size([1, 2, 768] ) UpperCamelCase : Any = torch.tensor( [[-0.06_53, 0.09_50, -0.00_61], [-0.05_45, 0.09_26, -0.03_24]] , device=A_ , ) self.assertTrue(outputs.last_hidden_state.shape , A_ ) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , A_ , atol=1e-3 ) )
52
0
"""simple docstring""" from dataclasses import dataclass from typing import List, Optional, Union import numpy as np import PIL import torch from transformers import CLIPImageProcessor, CLIPVisionModel from ...models import PriorTransformer from ...pipelines import DiffusionPipeline from ...schedulers import HeunDiscreteScheduler from ...utils import ( BaseOutput, is_accelerate_available, logging, randn_tensor, replace_example_docstring, ) from .renderer import ShapERenderer UpperCAmelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name UpperCAmelCase__ = """ Examples: ```py >>> from PIL import Image >>> import torch >>> from diffusers import DiffusionPipeline >>> from diffusers.utils import export_to_gif, load_image >>> device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\") >>> repo = \"openai/shap-e-img2img\" >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16) >>> pipe = pipe.to(device) >>> guidance_scale = 3.0 >>> image_url = \"https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png\" >>> image = load_image(image_url).convert(\"RGB\") >>> images = pipe( ... image, ... guidance_scale=guidance_scale, ... num_inference_steps=64, ... frame_size=256, ... ).images >>> gif_path = export_to_gif(images[0], \"corgi_3d.gif\") ``` """ @dataclass class a ( __snake_case ): _snake_case : Union[PIL.Image.Image, np.ndarray] class a ( __snake_case ): def __init__( self : Tuple , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : str , __lowerCAmelCase : int , __lowerCAmelCase : str , __lowerCAmelCase : Optional[Any] , ): super().__init__() self.register_modules( prior=A_ , image_encoder=A_ , image_processor=A_ , scheduler=A_ , renderer=A_ , ) def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Dict , __lowerCAmelCase : int , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[Any] ): if latents is None: _UpperCAmelCase = randn_tensor(A_ , generator=A_ , device=A_ , dtype=A_ ) else: if latents.shape != shape: raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {shape}''' ) _UpperCAmelCase = latents.to(A_ ) _UpperCAmelCase = latents * scheduler.init_noise_sigma return latents def lowerCAmelCase_ ( self : Optional[Any] , __lowerCAmelCase : str=0 ): if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError("""Please install accelerate via `pip install accelerate`""" ) _UpperCAmelCase = torch.device(f'''cuda:{gpu_id}''' ) _UpperCAmelCase = [self.image_encoder, self.prior] for cpu_offloaded_model in models: if cpu_offloaded_model is not None: cpu_offload(A_ , A_ ) @property def lowerCAmelCase_ ( self : Any ): if self.device != torch.device("""meta""" ) or not hasattr(self.image_encoder , """_hf_hook""" ): return self.device for module in self.image_encoder.modules(): if ( hasattr(A_ , """_hf_hook""" ) and hasattr(module._hf_hook , """execution_device""" ) and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device ) return self.device def lowerCAmelCase_ ( self : int , __lowerCAmelCase : int , __lowerCAmelCase : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : Union[str, Any] , ): if isinstance(A_ , A_ ) and isinstance(image[0] , torch.Tensor ): _UpperCAmelCase = torch.cat(A_ , axis=0 ) if image[0].ndim == 4 else torch.stack(A_ , axis=0 ) if not isinstance(A_ , torch.Tensor ): _UpperCAmelCase = self.image_processor(A_ , return_tensors="""pt""" ).pixel_values[0].unsqueeze(0 ) _UpperCAmelCase = image.to(dtype=self.image_encoder.dtype , device=A_ ) _UpperCAmelCase = self.image_encoder(A_ )["last_hidden_state"] _UpperCAmelCase = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256 _UpperCAmelCase = image_embeds.repeat_interleave(A_ , dim=0 ) if do_classifier_free_guidance: _UpperCAmelCase = torch.zeros_like(A_ ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes _UpperCAmelCase = torch.cat([negative_image_embeds, image_embeds] ) return image_embeds @torch.no_grad() @replace_example_docstring(A_ ) def __call__( self : Optional[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : str = 1 , __lowerCAmelCase : str = 25 , __lowerCAmelCase : Dict = None , __lowerCAmelCase : List[str] = None , __lowerCAmelCase : Tuple = 4.0 , __lowerCAmelCase : Any = 64 , __lowerCAmelCase : Optional[Any] = "pil" , __lowerCAmelCase : Union[str, Any] = True , ): if isinstance(A_ , PIL.Image.Image ): _UpperCAmelCase = 1 elif isinstance(A_ , torch.Tensor ): _UpperCAmelCase = image.shape[0] elif isinstance(A_ , A_ ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ): _UpperCAmelCase = len(A_ ) else: raise ValueError( f'''`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(A_ )}''' ) _UpperCAmelCase = self._execution_device _UpperCAmelCase = batch_size * num_images_per_prompt _UpperCAmelCase = guidance_scale > 1.0 _UpperCAmelCase = self._encode_image(A_ , A_ , A_ , A_ ) # prior self.scheduler.set_timesteps(A_ , device=A_ ) _UpperCAmelCase = self.scheduler.timesteps _UpperCAmelCase = self.prior.config.num_embeddings _UpperCAmelCase = self.prior.config.embedding_dim _UpperCAmelCase = self.prepare_latents( (batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , A_ , A_ , A_ , self.scheduler , ) # YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim _UpperCAmelCase = latents.reshape(latents.shape[0] , A_ , A_ ) for i, t in enumerate(self.progress_bar(A_ ) ): # expand the latents if we are doing classifier free guidance _UpperCAmelCase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents _UpperCAmelCase = self.scheduler.scale_model_input(A_ , A_ ) _UpperCAmelCase = self.prior( A_ , timestep=A_ , proj_embedding=A_ , ).predicted_image_embedding # remove the variance _UpperCAmelCase = noise_pred.split( scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim if do_classifier_free_guidance is not None: _UpperCAmelCase = noise_pred.chunk(2 ) _UpperCAmelCase = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond) _UpperCAmelCase = self.scheduler.step( A_ , timestep=A_ , sample=A_ , ).prev_sample if output_type == "latent": return ShapEPipelineOutput(images=A_ ) _UpperCAmelCase = [] for i, latent in enumerate(A_ ): print() _UpperCAmelCase = self.renderer.decode( latent[None, :] , A_ , size=A_ , ray_batch_size=4096 , n_coarse_samples=64 , n_fine_samples=128 , ) images.append(A_ ) _UpperCAmelCase = torch.stack(A_ ) if output_type not in ["np", "pil"]: raise ValueError(f'''Only the output types `pil` and `np` are supported not output_type={output_type}''' ) _UpperCAmelCase = images.cpu().numpy() if output_type == "pil": _UpperCAmelCase = [self.numpy_to_pil(A_ ) for image in images] # Offload last model to CPU if hasattr(self , """final_offload_hook""" ) and self.final_offload_hook is not None: self.final_offload_hook.offload() if not return_dict: return (images,) return ShapEPipelineOutput(images=A_ )
289
import os import tempfile from functools import partial from unittest import TestCase from unittest.mock import patch import numpy as np import pytest from datasets.arrow_dataset import Dataset from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex from .utils import require_elasticsearch, require_faiss __lowerCamelCase : Union[str, Any] = pytest.mark.integration @require_faiss class A__ ( __snake_case ): def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Union[str, Any] = Dataset.from_dict({"filename": ["my_name-train" + "_" + str(A_ ) for x in np.arange(30 ).tolist()]} ) return dset def __UpperCamelCase( self ): '''simple docstring''' import faiss UpperCamelCase : Dataset = self._create_dummy_dataset() UpperCamelCase : List[Any] = dset.map( lambda A_ , A_ : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=A_ , keep_in_memory=A_ ) UpperCamelCase : List[str] = dset.add_faiss_index("vecs" , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT ) UpperCamelCase , UpperCamelCase : Tuple = dset.get_nearest_examples("vecs" , np.ones(5 , dtype=np.floataa ) ) self.assertEqual(examples["filename"][0] , "my_name-train_29" ) dset.drop_index("vecs" ) def __UpperCamelCase( self ): '''simple docstring''' import faiss UpperCamelCase : Dataset = self._create_dummy_dataset() dset.add_faiss_index_from_external_arrays( external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT , ) UpperCamelCase , UpperCamelCase : int = dset.get_nearest_examples("vecs" , np.ones(5 , dtype=np.floataa ) ) self.assertEqual(examples["filename"][0] , "my_name-train_29" ) def __UpperCamelCase( self ): '''simple docstring''' import faiss UpperCamelCase : Dataset = self._create_dummy_dataset() dset.add_faiss_index_from_external_arrays( external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" , metric_type=faiss.METRIC_INNER_PRODUCT , ) # Setting delete=False and unlinking manually is not pretty... but it is required on Windows to # ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue. # see https://bugs.python.org/issue14243 and # https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515 with tempfile.NamedTemporaryFile(delete=A_ ) as tmp_file: dset.save_faiss_index("vecs" , tmp_file.name ) dset.load_faiss_index("vecs2" , tmp_file.name ) os.unlink(tmp_file.name ) UpperCamelCase , UpperCamelCase : List[str] = dset.get_nearest_examples("vecs2" , np.ones(5 , dtype=np.floataa ) ) self.assertEqual(examples["filename"][0] , "my_name-train_29" ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Dataset = self._create_dummy_dataset() dset.add_faiss_index_from_external_arrays( external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" ) dset.drop_index("vecs" ) self.assertRaises(A_ , partial(dset.get_nearest_examples , "vecs2" , np.ones(5 , dtype=np.floataa ) ) ) def __UpperCamelCase( self ): '''simple docstring''' from elasticsearch import Elasticsearch UpperCamelCase : Dataset = self._create_dummy_dataset() with patch("elasticsearch.Elasticsearch.search" ) as mocked_search, patch( "elasticsearch.client.IndicesClient.create" ) as mocked_index_create, patch("elasticsearch.helpers.streaming_bulk" ) as mocked_bulk: UpperCamelCase : List[str] = {"acknowledged": True} mocked_bulk.return_value([(True, None)] * 30 ) UpperCamelCase : List[Any] = {"hits": {"hits": [{"_score": 1, "_id": 29}]}} UpperCamelCase : Optional[Any] = Elasticsearch() dset.add_elasticsearch_index("filename" , es_client=A_ ) UpperCamelCase , UpperCamelCase : List[str] = dset.get_nearest_examples("filename" , "my_name-train_29" ) self.assertEqual(examples["filename"][0] , "my_name-train_29" ) @require_faiss class A__ ( __snake_case ): def __UpperCamelCase( self ): '''simple docstring''' import faiss UpperCamelCase : Optional[int] = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT ) # add vectors index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsNotNone(index.faiss_index ) self.assertEqual(index.faiss_index.ntotal , 5 ) index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) ) self.assertEqual(index.faiss_index.ntotal , 10 ) # single query UpperCamelCase : Any = np.zeros(5 , dtype=np.floataa ) UpperCamelCase : Optional[Any] = 1 UpperCamelCase , UpperCamelCase : Optional[Any] = index.search(A_ ) self.assertRaises(A_ , index.search , query.reshape(-1 , 1 ) ) self.assertGreater(scores[0] , 0 ) self.assertEqual(indices[0] , 1 ) # batched queries UpperCamelCase : Optional[int] = np.eye(5 , dtype=np.floataa )[::-1] UpperCamelCase , UpperCamelCase : Tuple = index.search_batch(A_ ) self.assertRaises(A_ , index.search_batch , queries[0] ) UpperCamelCase : Optional[int] = [scores[0] for scores in total_scores] UpperCamelCase : Tuple = [indices[0] for indices in total_indices] self.assertGreater(np.min(A_ ) , 0 ) self.assertListEqual([4, 3, 2, 1, 0] , A_ ) def __UpperCamelCase( self ): '''simple docstring''' import faiss UpperCamelCase : List[str] = FaissIndex(string_factory="Flat" ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsInstance(index.faiss_index , faiss.IndexFlat ) UpperCamelCase : List[str] = FaissIndex(string_factory="LSH" ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsInstance(index.faiss_index , faiss.IndexLSH ) with self.assertRaises(A_ ): UpperCamelCase : List[str] = FaissIndex(string_factory="Flat" , custom_index=faiss.IndexFlat(5 ) ) def __UpperCamelCase( self ): '''simple docstring''' import faiss UpperCamelCase : Dict = faiss.IndexFlat(5 ) UpperCamelCase : Union[str, Any] = FaissIndex(custom_index=A_ ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsInstance(index.faiss_index , faiss.IndexFlat ) def __UpperCamelCase( self ): '''simple docstring''' import faiss UpperCamelCase : str = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) # Setting delete=False and unlinking manually is not pretty... but it is required on Windows to # ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue. # see https://bugs.python.org/issue14243 and # https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515 with tempfile.NamedTemporaryFile(delete=A_ ) as tmp_file: index.save(tmp_file.name ) UpperCamelCase : int = FaissIndex.load(tmp_file.name ) os.unlink(tmp_file.name ) UpperCamelCase : str = np.zeros(5 , dtype=np.floataa ) UpperCamelCase : int = 1 UpperCamelCase , UpperCamelCase : Dict = index.search(A_ ) self.assertGreater(scores[0] , 0 ) self.assertEqual(indices[0] , 1 ) @require_faiss def A_ ( _lowerCAmelCase ) -> Optional[int]: import faiss UpperCamelCase : Union[str, Any] = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) UpperCamelCase : List[Any] = "index.faiss" UpperCamelCase : List[str] = F"""mock://{index_name}""" index.save(_lowerCAmelCase , storage_options=mockfs.storage_options ) UpperCamelCase : List[str] = FaissIndex.load(_lowerCAmelCase , storage_options=mockfs.storage_options ) UpperCamelCase : List[str] = np.zeros(5 , dtype=np.floataa ) UpperCamelCase : Optional[int] = 1 UpperCamelCase , UpperCamelCase : List[str] = index.search(_lowerCAmelCase ) assert scores[0] > 0 assert indices[0] == 1 @require_elasticsearch class A__ ( __snake_case ): def __UpperCamelCase( self ): '''simple docstring''' from elasticsearch import Elasticsearch with patch("elasticsearch.Elasticsearch.search" ) as mocked_search, patch( "elasticsearch.client.IndicesClient.create" ) as mocked_index_create, patch("elasticsearch.helpers.streaming_bulk" ) as mocked_bulk: UpperCamelCase : List[str] = Elasticsearch() UpperCamelCase : Union[str, Any] = {"acknowledged": True} UpperCamelCase : Union[str, Any] = ElasticSearchIndex(es_client=A_ ) mocked_bulk.return_value([(True, None)] * 3 ) index.add_documents(["foo", "bar", "foobar"] ) # single query UpperCamelCase : str = "foo" UpperCamelCase : Dict = {"hits": {"hits": [{"_score": 1, "_id": 0}]}} UpperCamelCase , UpperCamelCase : Tuple = index.search(A_ ) self.assertEqual(scores[0] , 1 ) self.assertEqual(indices[0] , 0 ) # single query with timeout UpperCamelCase : Dict = "foo" UpperCamelCase : Optional[Any] = {"hits": {"hits": [{"_score": 1, "_id": 0}]}} UpperCamelCase , UpperCamelCase : str = index.search(A_ , request_timeout=30 ) self.assertEqual(scores[0] , 1 ) self.assertEqual(indices[0] , 0 ) # batched queries UpperCamelCase : Dict = ["foo", "bar", "foobar"] UpperCamelCase : List[Any] = {"hits": {"hits": [{"_score": 1, "_id": 1}]}} UpperCamelCase , UpperCamelCase : Optional[int] = index.search_batch(A_ ) UpperCamelCase : str = [scores[0] for scores in total_scores] UpperCamelCase : Optional[Any] = [indices[0] for indices in total_indices] self.assertGreater(np.min(A_ ) , 0 ) self.assertListEqual([1, 1, 1] , A_ ) # batched queries with timeout UpperCamelCase : int = ["foo", "bar", "foobar"] UpperCamelCase : List[Any] = {"hits": {"hits": [{"_score": 1, "_id": 1}]}} UpperCamelCase , UpperCamelCase : Union[str, Any] = index.search_batch(A_ , request_timeout=30 ) UpperCamelCase : Union[str, Any] = [scores[0] for scores in total_scores] UpperCamelCase : Dict = [indices[0] for indices in total_indices] self.assertGreater(np.min(A_ ) , 0 ) self.assertListEqual([1, 1, 1] , A_ )
52
0
'''simple docstring''' import gc import random import unittest import numpy as np import torch from diffusers import ( DDIMScheduler, KandinskyVaaControlnetPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class _a ( __snake_case , unittest.TestCase ): '''simple docstring''' A : int = KandinskyVaaControlnetPipeline A : Dict = ['image_embeds', 'negative_image_embeds', 'hint'] A : Tuple = ['image_embeds', 'negative_image_embeds', 'hint'] A : str = [ 'generator', 'height', 'width', 'latents', 'guidance_scale', 'num_inference_steps', 'return_dict', 'guidance_scale', 'num_images_per_prompt', 'output_type', 'return_dict', ] A : Optional[int] = False @property def UpperCamelCase_ ( self ): '''simple docstring''' return 32 @property def UpperCamelCase_ ( self ): '''simple docstring''' return 32 @property def UpperCamelCase_ ( self ): '''simple docstring''' return self.time_input_dim @property def UpperCamelCase_ ( self ): '''simple docstring''' return self.time_input_dim * 4 @property def UpperCamelCase_ ( self ): '''simple docstring''' return 100 @property def UpperCamelCase_ ( self ): '''simple docstring''' torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : Optional[int] = { "in_channels": 8, # Out channels is double in channels because predicts mean and variance "out_channels": 8, "addition_embed_type": "image_hint", "down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"), "up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"), "mid_block_type": "UNetMidBlock2DSimpleCrossAttn", "block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2), "layers_per_block": 1, "encoder_hid_dim": self.text_embedder_hidden_size, "encoder_hid_dim_type": "image_proj", "cross_attention_dim": self.cross_attention_dim, "attention_head_dim": 4, "resnet_time_scale_shift": "scale_shift", "class_embed_type": None, } SCREAMING_SNAKE_CASE : Dict = UNetaDConditionModel(**A_ ) return model @property def UpperCamelCase_ ( self ): '''simple docstring''' return { "block_out_channels": [32, 32, 64, 64], "down_block_types": [ "DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D", "AttnDownEncoderBlock2D", ], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"], "vq_embed_dim": 4, } @property def UpperCamelCase_ ( self ): '''simple docstring''' torch.manual_seed(0 ) SCREAMING_SNAKE_CASE : Any = VQModel(**self.dummy_movq_kwargs ) return model def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = self.dummy_unet SCREAMING_SNAKE_CASE : Optional[int] = self.dummy_movq SCREAMING_SNAKE_CASE : Optional[int] = DDIMScheduler( num_train_timesteps=1_000, beta_schedule='linear', beta_start=0.0_00_85, beta_end=0.0_12, clip_sample=A_, set_alpha_to_one=A_, steps_offset=1, prediction_type='epsilon', thresholding=A_, ) SCREAMING_SNAKE_CASE : List[Any] = { "unet": unet, "scheduler": scheduler, "movq": movq, } return components def UpperCamelCase_ ( self, A, A=0 ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(A_ ) ).to(A_ ) SCREAMING_SNAKE_CASE : Optional[Any] = floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(seed + 1 ) ).to( A_ ) # create hint SCREAMING_SNAKE_CASE : List[str] = floats_tensor((1, 3, 64, 64), rng=random.Random(A_ ) ).to(A_ ) if str(A_ ).startswith('mps' ): SCREAMING_SNAKE_CASE : int = torch.manual_seed(A_ ) else: SCREAMING_SNAKE_CASE : Union[str, Any] = torch.Generator(device=A_ ).manual_seed(A_ ) SCREAMING_SNAKE_CASE : Dict = { "image_embeds": image_embeds, "negative_image_embeds": negative_image_embeds, "hint": hint, "generator": generator, "height": 64, "width": 64, "guidance_scale": 4.0, "num_inference_steps": 2, "output_type": "np", } return inputs def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = "cpu" SCREAMING_SNAKE_CASE : Dict = self.get_dummy_components() SCREAMING_SNAKE_CASE : int = self.pipeline_class(**A_ ) SCREAMING_SNAKE_CASE : Any = pipe.to(A_ ) pipe.set_progress_bar_config(disable=A_ ) SCREAMING_SNAKE_CASE : int = pipe(**self.get_dummy_inputs(A_ ) ) SCREAMING_SNAKE_CASE : Tuple = output.images SCREAMING_SNAKE_CASE : List[str] = pipe( **self.get_dummy_inputs(A_ ), return_dict=A_, )[0] SCREAMING_SNAKE_CASE : Tuple = image[0, -3:, -3:, -1] SCREAMING_SNAKE_CASE : Optional[int] = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) SCREAMING_SNAKE_CASE : str = np.array( [0.6_95_98_26, 0.86_82_79, 0.7_55_80_92, 0.68_76_94_67, 0.85_80_58_04, 0.65_97_74_96, 0.44_88_53_02, 0.5_95_91_11, 0.4_25_15_95] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 ), F" expected_slice {expected_slice}, but got {image_slice.flatten()}" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 ), F" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" @slow @require_torch_gpu class _a ( unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy' ) SCREAMING_SNAKE_CASE : Union[str, Any] = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinskyv22/hint_image_cat.png' ) SCREAMING_SNAKE_CASE : Optional[int] = torch.from_numpy(np.array(A_ ) ).float() / 255.0 SCREAMING_SNAKE_CASE : Tuple = hint.permute(2, 0, 1 ).unsqueeze(0 ) SCREAMING_SNAKE_CASE : Optional[int] = KandinskyVaaPriorPipeline.from_pretrained( 'kandinsky-community/kandinsky-2-2-prior', torch_dtype=torch.floataa ) pipe_prior.to(A_ ) SCREAMING_SNAKE_CASE : Optional[int] = KandinskyVaaControlnetPipeline.from_pretrained( 'kandinsky-community/kandinsky-2-2-controlnet-depth', torch_dtype=torch.floataa ) SCREAMING_SNAKE_CASE : Tuple = pipeline.to(A_ ) pipeline.set_progress_bar_config(disable=A_ ) SCREAMING_SNAKE_CASE : str = "A robot, 4k photo" SCREAMING_SNAKE_CASE : Optional[Any] = torch.Generator(device='cuda' ).manual_seed(0 ) SCREAMING_SNAKE_CASE : Dict = pipe_prior( A_, generator=A_, num_inference_steps=5, negative_prompt='', ).to_tuple() SCREAMING_SNAKE_CASE : int = torch.Generator(device='cuda' ).manual_seed(0 ) SCREAMING_SNAKE_CASE : str = pipeline( image_embeds=A_, negative_image_embeds=A_, hint=A_, generator=A_, num_inference_steps=100, output_type='np', ) SCREAMING_SNAKE_CASE : Union[str, Any] = output.images[0] assert image.shape == (512, 512, 3) assert_mean_pixel_difference(A_, A_ )
251
def A_ ( _lowerCAmelCase = 50 ) -> int: UpperCamelCase : List[Any] = [[0] * 3 for _ in range(length + 1 )] for row_length in range(length + 1 ): for tile_length in range(2 , 5 ): for tile_start in range(row_length - tile_length + 1 ): different_colour_ways_number[row_length][tile_length - 2] += ( different_colour_ways_number[row_length - tile_start - tile_length][ tile_length - 2 ] + 1 ) return sum(different_colour_ways_number[length] ) if __name__ == "__main__": print(f"""{solution() = }""")
52
0
'''simple docstring''' import re import string from collections import Counter import sacrebleu import sacremoses from packaging import version import datasets a : List[Any] = """ @inproceedings{xu-etal-2016-optimizing, title = {Optimizing Statistical Machine Translation for Text Simplification}, authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris}, journal = {Transactions of the Association for Computational Linguistics}, volume = {4}, year={2016}, url = {https://www.aclweb.org/anthology/Q16-1029}, pages = {401--415 }, @inproceedings{post-2018-call, title = \"A Call for Clarity in Reporting {BLEU} Scores\", author = \"Post, Matt\", booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\", month = oct, year = \"2018\", address = \"Belgium, Brussels\", publisher = \"Association for Computational Linguistics\", url = \"https://www.aclweb.org/anthology/W18-6319\", pages = \"186--191\", } """ a : Optional[int] = """\ WIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU It can be used to evaluate the quality of machine-generated texts. """ a : str = """ Calculates sari score (between 0 and 100) given a list of source and predicted sentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score. Args: sources: list of source sentences where each sentence should be a string. predictions: list of predicted sentences where each sentence should be a string. references: list of lists of reference sentences where each sentence should be a string. Returns: sari: sari score sacrebleu: sacrebleu score exact: exact score Examples: >>> sources=[\"About 95 species are currently accepted .\"] >>> predictions=[\"About 95 you now get in .\"] >>> references=[[\"About 95 species are currently known .\"]] >>> wiki_split = datasets.load_metric(\"wiki_split\") >>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references) >>> print(results) {'sari': 21.805555555555557, 'sacrebleu': 14.535768424205482, 'exact': 0.0} """ def __lowerCamelCase ( _lowercase ) -> str: def remove_articles(_lowercase ): UpperCAmelCase : Tuple = re.compile(R"""\b(a|an|the)\b""" , re.UNICODE ) return re.sub(_lowerCAmelCase , """ """ , _lowerCAmelCase ) def white_space_fix(_lowercase ): return " ".join(text.split() ) def remove_punc(_lowercase ): UpperCAmelCase : int = set(string.punctuation ) return "".join(ch for ch in text if ch not in exclude ) def lower(_lowercase ): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(_lowerCAmelCase ) ) ) ) def __lowerCamelCase ( _lowercase , _lowercase ) -> Any: return int(normalize_answer(_lowerCAmelCase ) == normalize_answer(_lowerCAmelCase ) ) def __lowerCamelCase ( _lowercase , _lowercase ) -> str: UpperCAmelCase : Tuple = [any(compute_exact(_lowerCAmelCase , _lowerCAmelCase ) for ref in refs ) for pred, refs in zip(_lowerCAmelCase , _lowerCAmelCase )] return (sum(_lowerCAmelCase ) / len(_lowerCAmelCase )) * 1_0_0 def __lowerCamelCase ( _lowercase , _lowercase , _lowercase , _lowercase ) -> Union[str, Any]: UpperCAmelCase : Union[str, Any] = [rgram for rgrams in rgramslist for rgram in rgrams] UpperCAmelCase : Union[str, Any] = Counter(_lowerCAmelCase ) UpperCAmelCase : Optional[int] = Counter(_lowerCAmelCase ) UpperCAmelCase : List[Any] = Counter() for sgram, scount in sgramcounter.items(): UpperCAmelCase : Tuple = scount * numref UpperCAmelCase : Union[str, Any] = Counter(_lowerCAmelCase ) UpperCAmelCase : Tuple = Counter() for cgram, ccount in cgramcounter.items(): UpperCAmelCase : Dict = ccount * numref # KEEP UpperCAmelCase : List[Any] = sgramcounter_rep & cgramcounter_rep UpperCAmelCase : Union[str, Any] = keepgramcounter_rep & rgramcounter UpperCAmelCase : Dict = sgramcounter_rep & rgramcounter UpperCAmelCase : Optional[int] = 0 UpperCAmelCase : Tuple = 0 for keepgram in keepgramcountergood_rep: keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram] # Fix an alleged bug [2] in the keep score computation. # keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram] keeptmpscorea += keepgramcountergood_rep[keepgram] # Define 0/0=1 instead of 0 to give higher scores for predictions that match # a target exactly. UpperCAmelCase : Any = 1 UpperCAmelCase : Any = 1 if len(_lowerCAmelCase ) > 0: UpperCAmelCase : Dict = keeptmpscorea / len(_lowerCAmelCase ) if len(_lowerCAmelCase ) > 0: # Fix an alleged bug [2] in the keep score computation. # keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep) UpperCAmelCase : Union[str, Any] = keeptmpscorea / sum(keepgramcounterall_rep.values() ) UpperCAmelCase : Any = 0 if keepscore_precision > 0 or keepscore_recall > 0: UpperCAmelCase : List[str] = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall) # DELETION UpperCAmelCase : Any = sgramcounter_rep - cgramcounter_rep UpperCAmelCase : str = delgramcounter_rep - rgramcounter UpperCAmelCase : Any = sgramcounter_rep - rgramcounter UpperCAmelCase : Optional[int] = 0 UpperCAmelCase : Union[str, Any] = 0 for delgram in delgramcountergood_rep: deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram] deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram] # Define 0/0=1 instead of 0 to give higher scores for predictions that match # a target exactly. UpperCAmelCase : Dict = 1 if len(_lowerCAmelCase ) > 0: UpperCAmelCase : str = deltmpscorea / len(_lowerCAmelCase ) # ADDITION UpperCAmelCase : List[str] = set(_lowerCAmelCase ) - set(_lowerCAmelCase ) UpperCAmelCase : List[str] = set(_lowerCAmelCase ) & set(_lowerCAmelCase ) UpperCAmelCase : Dict = set(_lowerCAmelCase ) - set(_lowerCAmelCase ) UpperCAmelCase : Optional[Any] = 0 for addgram in addgramcountergood: addtmpscore += 1 # Define 0/0=1 instead of 0 to give higher scores for predictions that match # a target exactly. UpperCAmelCase : Tuple = 1 UpperCAmelCase : Tuple = 1 if len(_lowerCAmelCase ) > 0: UpperCAmelCase : Dict = addtmpscore / len(_lowerCAmelCase ) if len(_lowerCAmelCase ) > 0: UpperCAmelCase : Tuple = addtmpscore / len(_lowerCAmelCase ) UpperCAmelCase : List[str] = 0 if addscore_precision > 0 or addscore_recall > 0: UpperCAmelCase : List[str] = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall) return (keepscore, delscore_precision, addscore) def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> List[str]: UpperCAmelCase : int = len(_lowerCAmelCase ) UpperCAmelCase : Optional[Any] = ssent.split(""" """ ) UpperCAmelCase : Dict = csent.split(""" """ ) UpperCAmelCase : str = [] UpperCAmelCase : Any = [] UpperCAmelCase : Any = [] UpperCAmelCase : Union[str, Any] = [] UpperCAmelCase : str = [] UpperCAmelCase : str = [] UpperCAmelCase : Dict = [] UpperCAmelCase : int = [] UpperCAmelCase : Optional[Any] = [] UpperCAmelCase : Tuple = [] for rsent in rsents: UpperCAmelCase : List[Any] = rsent.split(""" """ ) UpperCAmelCase : List[str] = [] UpperCAmelCase : int = [] UpperCAmelCase : Tuple = [] ragramslist.append(_lowerCAmelCase ) for i in range(0 , len(_lowerCAmelCase ) - 1 ): if i < len(_lowerCAmelCase ) - 1: UpperCAmelCase : Dict = ragrams[i] + " " + ragrams[i + 1] ragrams.append(_lowerCAmelCase ) if i < len(_lowerCAmelCase ) - 2: UpperCAmelCase : Dict = ragrams[i] + " " + ragrams[i + 1] + " " + ragrams[i + 2] ragrams.append(_lowerCAmelCase ) if i < len(_lowerCAmelCase ) - 3: UpperCAmelCase : List[Any] = ragrams[i] + " " + ragrams[i + 1] + " " + ragrams[i + 2] + " " + ragrams[i + 3] ragrams.append(_lowerCAmelCase ) ragramslist.append(_lowerCAmelCase ) ragramslist.append(_lowerCAmelCase ) ragramslist.append(_lowerCAmelCase ) for i in range(0 , len(_lowerCAmelCase ) - 1 ): if i < len(_lowerCAmelCase ) - 1: UpperCAmelCase : Union[str, Any] = sagrams[i] + " " + sagrams[i + 1] sagrams.append(_lowerCAmelCase ) if i < len(_lowerCAmelCase ) - 2: UpperCAmelCase : List[str] = sagrams[i] + " " + sagrams[i + 1] + " " + sagrams[i + 2] sagrams.append(_lowerCAmelCase ) if i < len(_lowerCAmelCase ) - 3: UpperCAmelCase : Optional[int] = sagrams[i] + " " + sagrams[i + 1] + " " + sagrams[i + 2] + " " + sagrams[i + 3] sagrams.append(_lowerCAmelCase ) for i in range(0 , len(_lowerCAmelCase ) - 1 ): if i < len(_lowerCAmelCase ) - 1: UpperCAmelCase : Union[str, Any] = cagrams[i] + " " + cagrams[i + 1] cagrams.append(_lowerCAmelCase ) if i < len(_lowerCAmelCase ) - 2: UpperCAmelCase : Optional[Any] = cagrams[i] + " " + cagrams[i + 1] + " " + cagrams[i + 2] cagrams.append(_lowerCAmelCase ) if i < len(_lowerCAmelCase ) - 3: UpperCAmelCase : Union[str, Any] = cagrams[i] + " " + cagrams[i + 1] + " " + cagrams[i + 2] + " " + cagrams[i + 3] cagrams.append(_lowerCAmelCase ) (UpperCAmelCase) : Optional[int] = SARIngram(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) (UpperCAmelCase) : Optional[Any] = SARIngram(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) (UpperCAmelCase) : str = SARIngram(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) (UpperCAmelCase) : Optional[int] = SARIngram(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) UpperCAmelCase : Tuple = sum([keepascore, keepascore, keepascore, keepascore] ) / 4 UpperCAmelCase : str = sum([delascore, delascore, delascore, delascore] ) / 4 UpperCAmelCase : Union[str, Any] = sum([addascore, addascore, addascore, addascore] ) / 4 UpperCAmelCase : Union[str, Any] = (avgkeepscore + avgdelscore + avgaddscore) / 3 return finalscore def __lowerCamelCase ( _lowercase , _lowercase = True , _lowercase = "13a" , _lowercase = True ) -> Optional[Any]: # Normalization is requried for the ASSET dataset (one of the primary # datasets in sentence simplification) to allow using space # to split the sentence. Even though Wiki-Auto and TURK datasets, # do not require normalization, we do it for consistency. # Code adapted from the EASSE library [1] written by the authors of the ASSET dataset. # [1] https://github.com/feralvam/easse/blob/580bba7e1378fc8289c663f864e0487188fe8067/easse/utils/preprocessing.py#L7 if lowercase: UpperCAmelCase : Dict = sentence.lower() if tokenizer in ["13a", "intl"]: if version.parse(sacrebleu.__version__ ).major >= 2: UpperCAmelCase : str = sacrebleu.metrics.bleu._get_tokenizer(_lowerCAmelCase )()(_lowerCAmelCase ) else: UpperCAmelCase : Dict = sacrebleu.TOKENIZERS[tokenizer]()(_lowerCAmelCase ) elif tokenizer == "moses": UpperCAmelCase : Union[str, Any] = sacremoses.MosesTokenizer().tokenize(_lowerCAmelCase , return_str=_lowerCAmelCase , escape=_lowerCAmelCase ) elif tokenizer == "penn": UpperCAmelCase : str = sacremoses.MosesTokenizer().penn_tokenize(_lowerCAmelCase , return_str=_lowerCAmelCase ) else: UpperCAmelCase : Union[str, Any] = sentence if not return_str: UpperCAmelCase : Tuple = normalized_sent.split() return normalized_sent def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> List[str]: if not (len(_lowerCAmelCase ) == len(_lowerCAmelCase ) == len(_lowerCAmelCase )): raise ValueError("""Sources length must match predictions and references lengths.""" ) UpperCAmelCase : Optional[Any] = 0 for src, pred, refs in zip(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): sari_score += SARIsent(normalize(_lowerCAmelCase ) , normalize(_lowerCAmelCase ) , [normalize(_lowerCAmelCase ) for sent in refs] ) UpperCAmelCase : Optional[int] = sari_score / len(_lowerCAmelCase ) return 1_0_0 * sari_score def __lowerCamelCase ( _lowercase , _lowercase , _lowercase="exp" , _lowercase=None , _lowercase=False , _lowercase=False , _lowercase=False , ) -> List[str]: UpperCAmelCase : Optional[Any] = len(references[0] ) if any(len(_lowerCAmelCase ) != references_per_prediction for refs in references ): raise ValueError("""Sacrebleu requires the same number of references for each prediction""" ) UpperCAmelCase : Optional[int] = [[refs[i] for refs in references] for i in range(_lowerCAmelCase )] UpperCAmelCase : Tuple = sacrebleu.corpus_bleu( _lowerCAmelCase , _lowerCAmelCase , smooth_method=_lowerCAmelCase , smooth_value=_lowerCAmelCase , force=_lowerCAmelCase , lowercase=_lowerCAmelCase , use_effective_order=_lowerCAmelCase , ) return output.score @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class UpperCamelCase_ ( datasets.Metric ): def _lowercase( self ) -> str: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Value("""string""" , id="""sequence""" ), """references""": datasets.Sequence(datasets.Value("""string""" , id="""sequence""" ) , id="""references""" ), } ) , codebase_urls=[ """https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py""", """https://github.com/cocoxu/simplification/blob/master/SARI.py""", """https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py""", """https://github.com/mjpost/sacreBLEU""", ] , reference_urls=[ """https://www.aclweb.org/anthology/Q16-1029.pdf""", """https://github.com/mjpost/sacreBLEU""", """https://en.wikipedia.org/wiki/BLEU""", """https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213""", ] , ) def _lowercase( self , A , A , A ) -> Dict: UpperCAmelCase : Optional[Any] = {} result.update({"""sari""": compute_sari(sources=A_ , predictions=A_ , references=A_ )} ) result.update({"""sacrebleu""": compute_sacrebleu(predictions=A_ , references=A_ )} ) result.update({"""exact""": compute_em(predictions=A_ , references=A_ )} ) return result
265
def A_ ( _lowerCAmelCase ) -> str: UpperCamelCase : List[Any] = "" for ch in key: if ch == " " or ch not in key_no_dups and ch.isalpha(): key_no_dups += ch return key_no_dups def A_ ( _lowerCAmelCase ) -> dict[str, str]: UpperCamelCase : Optional[Any] = [chr(i + 65 ) for i in range(26 )] # Remove duplicate characters from key UpperCamelCase : Tuple = remove_duplicates(key.upper() ) UpperCamelCase : int = len(_lowerCAmelCase ) # First fill cipher with key characters UpperCamelCase : int = {alphabet[i]: char for i, char in enumerate(_lowerCAmelCase )} # Then map remaining characters in alphabet to # the alphabet from the beginning for i in range(len(_lowerCAmelCase ) , 26 ): UpperCamelCase : Optional[Any] = alphabet[i - offset] # Ensure we are not mapping letters to letters previously mapped while char in key: offset -= 1 UpperCamelCase : List[str] = alphabet[i - offset] UpperCamelCase : List[Any] = char return cipher_alphabet def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> str: return "".join(cipher_map.get(_lowerCAmelCase , _lowerCAmelCase ) for ch in message.upper() ) def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> str: UpperCamelCase : Union[str, Any] = {v: k for k, v in cipher_map.items()} return "".join(rev_cipher_map.get(_lowerCAmelCase , _lowerCAmelCase ) for ch in message.upper() ) def A_ ( ) -> None: UpperCamelCase : int = input("Enter message to encode or decode: " ).strip() UpperCamelCase : str = input("Enter keyword: " ).strip() UpperCamelCase : Union[str, Any] = input("Encipher or decipher? E/D:" ).strip()[0].lower() try: UpperCamelCase : List[str] = {"e": encipher, "d": decipher}[option] except KeyError: raise KeyError("invalid input option" ) UpperCamelCase : str = create_cipher_map(_lowerCAmelCase ) print(func(_lowerCAmelCase , _lowerCAmelCase ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
52
0
"""simple docstring""" import pytest from datasets.parallel import ParallelBackendConfig, parallel_backend from datasets.utils.py_utils import map_nested from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows def lowercase ( a__ : int ) -> int: # picklable for multiprocessing return i + 1 @require_dill_gt_0_3_2 @require_joblibspark @require_not_windows def lowercase ( ) -> int: with parallel_backend('''spark''' ): assert ParallelBackendConfig.backend_name == "spark" _UpperCamelCase = [1, 2, 3] with pytest.raises(_lowerCAmelCase ): with parallel_backend('''unsupported backend''' ): map_nested(_lowerCAmelCase , _lowerCAmelCase , num_proc=2 ) with pytest.raises(_lowerCAmelCase ): with parallel_backend('''unsupported backend''' ): map_nested(_lowerCAmelCase , _lowerCAmelCase , num_proc=-1 ) @require_dill_gt_0_3_2 @require_joblibspark @require_not_windows @pytest.mark.parametrize('''num_proc''' , [2, -1] ) def lowercase ( a__ : Union[str, Any] ) -> List[Any]: _UpperCamelCase = [1, 2] _UpperCamelCase = {"a": 1, "b": 2} _UpperCamelCase = {"a": [1, 2], "b": [3, 4]} _UpperCamelCase = {"a": {"1": 1}, "b": 2} _UpperCamelCase = {"a": 1, "b": 2, "c": 3, "d": 4} _UpperCamelCase = [2, 3] _UpperCamelCase = {"a": 2, "b": 3} _UpperCamelCase = {"a": [2, 3], "b": [4, 5]} _UpperCamelCase = {"a": {"1": 2}, "b": 3} _UpperCamelCase = {"a": 2, "b": 3, "c": 4, "d": 5} with parallel_backend('''spark''' ): assert map_nested(_lowerCAmelCase , _lowerCAmelCase , num_proc=_lowerCAmelCase ) == expected_map_nested_sa assert map_nested(_lowerCAmelCase , _lowerCAmelCase , num_proc=_lowerCAmelCase ) == expected_map_nested_sa assert map_nested(_lowerCAmelCase , _lowerCAmelCase , num_proc=_lowerCAmelCase ) == expected_map_nested_sa assert map_nested(_lowerCAmelCase , _lowerCAmelCase , num_proc=_lowerCAmelCase ) == expected_map_nested_sa assert map_nested(_lowerCAmelCase , _lowerCAmelCase , num_proc=_lowerCAmelCase ) == expected_map_nested_sa
256
from sklearn.metrics import fa_score import datasets __lowerCamelCase : List[Any] = """ The F1 score is the harmonic mean of the precision and recall. It can be computed with the equation: F1 = 2 * (precision * recall) / (precision + recall) """ __lowerCamelCase : List[Any] = """ Args: predictions (`list` of `int`): Predicted labels. references (`list` of `int`): Ground truth labels. labels (`list` of `int`): The set of labels to include when `average` is not set to `'binary'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None. pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1. average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`. - 'binary': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary. - 'micro': Calculate metrics globally by counting the total true positives, false negatives and false positives. - 'macro': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account. - 'weighted': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. This option can result in an F-score that is not between precision and recall. - 'samples': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification). sample_weight (`list` of `float`): Sample weights Defaults to None. Returns: f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better. Examples: Example 1-A simple binary example >>> f1_metric = datasets.load_metric(\"f1\") >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0]) >>> print(results) {'f1': 0.5} Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`. >>> f1_metric = datasets.load_metric(\"f1\") >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0) >>> print(round(results['f1'], 2)) 0.67 Example 3-The same simple binary example as in Example 1, but with `sample_weight` included. >>> f1_metric = datasets.load_metric(\"f1\") >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3]) >>> print(round(results['f1'], 2)) 0.35 Example 4-A multiclass example, with different values for the `average` input. >>> predictions = [0, 2, 1, 0, 0, 1] >>> references = [0, 1, 2, 0, 1, 2] >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"macro\") >>> print(round(results['f1'], 2)) 0.27 >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"micro\") >>> print(round(results['f1'], 2)) 0.33 >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"weighted\") >>> print(round(results['f1'], 2)) 0.27 >>> results = f1_metric.compute(predictions=predictions, references=references, average=None) >>> print(results) {'f1': array([0.8, 0. , 0. ])} """ __lowerCamelCase : str = """ @article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011} } """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class A__ ( datasets.Metric ): def __UpperCamelCase( self ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Sequence(datasets.Value("int32" ) ), "references": datasets.Sequence(datasets.Value("int32" ) ), } if self.config_name == "multilabel" else { "predictions": datasets.Value("int32" ), "references": datasets.Value("int32" ), } ) , reference_urls=["https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html"] , ) def __UpperCamelCase( self , A_ , A_ , A_=None , A_=1 , A_="binary" , A_=None ): '''simple docstring''' UpperCamelCase : List[str] = fa_score( A_ , A_ , labels=A_ , pos_label=A_ , average=A_ , sample_weight=A_ ) return {"f1": float(A_ ) if score.size == 1 else score}
52
0
"""simple docstring""" import pytest import requests from datasets.utils.file_utils import http_head from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline @pytest.mark.integration def A__ ( ): with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ): with pytest.raises(_lowerCAmelCase ): requests.request("GET" , "https://huggingface.co" ) with pytest.raises(requests.exceptions.ConnectTimeout ): requests.request("GET" , "https://huggingface.co" , timeout=1.0 ) @pytest.mark.integration def A__ ( ): with offline(OfflineSimulationMode.CONNECTION_FAILS ): with pytest.raises(requests.exceptions.ConnectionError ): requests.request("GET" , "https://huggingface.co" ) def A__ ( ): with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ): with pytest.raises(_lowerCAmelCase ): http_head("https://huggingface.co" )
292
import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import XLMRobertaTokenizerFast from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class A__ ( __snake_case , unittest.TestCase ): _UpperCAmelCase :List[str] = KandinskyInpaintPipeline _UpperCAmelCase :List[str] = ['prompt', 'image_embeds', 'negative_image_embeds', 'image', 'mask_image'] _UpperCAmelCase :Dict = [ 'prompt', 'negative_prompt', 'image_embeds', 'negative_image_embeds', 'image', 'mask_image', ] _UpperCAmelCase :Optional[int] = [ 'generator', 'height', 'width', 'latents', 'guidance_scale', 'negative_prompt', 'num_inference_steps', 'return_dict', 'guidance_scale', 'num_images_per_prompt', 'output_type', 'return_dict', ] _UpperCAmelCase :int = False @property def __UpperCamelCase( self ): '''simple docstring''' return 32 @property def __UpperCamelCase( self ): '''simple docstring''' return 32 @property def __UpperCamelCase( self ): '''simple docstring''' return self.time_input_dim @property def __UpperCamelCase( self ): '''simple docstring''' return self.time_input_dim * 4 @property def __UpperCamelCase( self ): '''simple docstring''' return 100 @property def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Any = XLMRobertaTokenizerFast.from_pretrained("YiYiXu/tiny-random-mclip-base" ) return tokenizer @property def __UpperCamelCase( self ): '''simple docstring''' torch.manual_seed(0 ) UpperCamelCase : Optional[int] = MCLIPConfig( numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , ) UpperCamelCase : Optional[int] = MultilingualCLIP(A_ ) UpperCamelCase : Union[str, Any] = text_encoder.eval() return text_encoder @property def __UpperCamelCase( self ): '''simple docstring''' torch.manual_seed(0 ) UpperCamelCase : Optional[int] = { "in_channels": 9, # Out channels is double in channels because predicts mean and variance "out_channels": 8, "addition_embed_type": "text_image", "down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"), "up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"), "mid_block_type": "UNetMidBlock2DSimpleCrossAttn", "block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2), "layers_per_block": 1, "encoder_hid_dim": self.text_embedder_hidden_size, "encoder_hid_dim_type": "text_image_proj", "cross_attention_dim": self.cross_attention_dim, "attention_head_dim": 4, "resnet_time_scale_shift": "scale_shift", "class_embed_type": None, } UpperCamelCase : List[Any] = UNetaDConditionModel(**A_ ) return model @property def __UpperCamelCase( self ): '''simple docstring''' return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def __UpperCamelCase( self ): '''simple docstring''' torch.manual_seed(0 ) UpperCamelCase : List[str] = VQModel(**self.dummy_movq_kwargs ) return model def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Any = self.dummy_text_encoder UpperCamelCase : str = self.dummy_tokenizer UpperCamelCase : List[Any] = self.dummy_unet UpperCamelCase : Optional[Any] = self.dummy_movq UpperCamelCase : Union[str, Any] = DDIMScheduler( num_train_timesteps=1000 , beta_schedule="linear" , beta_start=0.0_00_85 , beta_end=0.0_12 , clip_sample=A_ , set_alpha_to_one=A_ , steps_offset=1 , prediction_type="epsilon" , thresholding=A_ , ) UpperCamelCase : Optional[Any] = { "text_encoder": text_encoder, "tokenizer": tokenizer, "unet": unet, "scheduler": scheduler, "movq": movq, } return components def __UpperCamelCase( self , A_ , A_=0 ): '''simple docstring''' UpperCamelCase : Dict = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(A_ ) ).to(A_ ) UpperCamelCase : Union[str, Any] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(A_ ) # create init_image UpperCamelCase : Union[str, Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(A_ ) ).to(A_ ) UpperCamelCase : str = image.cpu().permute(0 , 2 , 3 , 1 )[0] UpperCamelCase : List[Any] = Image.fromarray(np.uinta(A_ ) ).convert("RGB" ).resize((256, 256) ) # create mask UpperCamelCase : str = np.ones((64, 64) , dtype=np.floataa ) UpperCamelCase : str = 0 if str(A_ ).startswith("mps" ): UpperCamelCase : int = torch.manual_seed(A_ ) else: UpperCamelCase : Tuple = torch.Generator(device=A_ ).manual_seed(A_ ) UpperCamelCase : Union[str, Any] = { "prompt": "horse", "image": init_image, "mask_image": mask, "image_embeds": image_embeds, "negative_image_embeds": negative_image_embeds, "generator": generator, "height": 64, "width": 64, "num_inference_steps": 2, "guidance_scale": 4.0, "output_type": "np", } return inputs def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Optional[int] = "cpu" UpperCamelCase : Tuple = self.get_dummy_components() UpperCamelCase : str = self.pipeline_class(**A_ ) UpperCamelCase : Tuple = pipe.to(A_ ) pipe.set_progress_bar_config(disable=A_ ) UpperCamelCase : Any = pipe(**self.get_dummy_inputs(A_ ) ) UpperCamelCase : List[Any] = output.images UpperCamelCase : List[Any] = pipe( **self.get_dummy_inputs(A_ ) , return_dict=A_ , )[0] UpperCamelCase : List[Any] = image[0, -3:, -3:, -1] UpperCamelCase : Any = image_from_tuple[0, -3:, -3:, -1] print(F"""image.shape {image.shape}""" ) assert image.shape == (1, 64, 64, 3) UpperCamelCase : Union[str, Any] = np.array( [0.8_32_69_19, 0.73_79_04_67, 0.20_91_85_81, 0.9_30_96_12, 0.5_51_17_91, 0.43_71_33_28, 0.5_51_33_21, 0.49_92_29_34, 0.59_49_77_86] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 ), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}""" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 ), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}""" def __UpperCamelCase( self ): '''simple docstring''' super().test_inference_batch_single_identical(expected_max_diff=3e-3 ) @slow @require_torch_gpu class A__ ( unittest.TestCase ): def __UpperCamelCase( self ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Optional[Any] = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy" ) UpperCamelCase : List[str] = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" ) UpperCamelCase : Dict = np.ones((768, 768) , dtype=np.floataa ) UpperCamelCase : str = 0 UpperCamelCase : List[Any] = "a hat" UpperCamelCase : Tuple = KandinskyPriorPipeline.from_pretrained( "kandinsky-community/kandinsky-2-1-prior" , torch_dtype=torch.floataa ) pipe_prior.to(A_ ) UpperCamelCase : Union[str, Any] = KandinskyInpaintPipeline.from_pretrained( "kandinsky-community/kandinsky-2-1-inpaint" , torch_dtype=torch.floataa ) UpperCamelCase : Optional[Any] = pipeline.to(A_ ) pipeline.set_progress_bar_config(disable=A_ ) UpperCamelCase : List[Any] = torch.Generator(device="cpu" ).manual_seed(0 ) UpperCamelCase , UpperCamelCase : Optional[Any] = pipe_prior( A_ , generator=A_ , num_inference_steps=5 , negative_prompt="" , ).to_tuple() UpperCamelCase : Dict = pipeline( A_ , image=A_ , mask_image=A_ , image_embeds=A_ , negative_image_embeds=A_ , generator=A_ , num_inference_steps=100 , height=768 , width=768 , output_type="np" , ) UpperCamelCase : List[str] = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(A_ , A_ )
52
0
'''simple docstring''' def __UpperCAmelCase ( a_: Optional[int] ): if not all(char in "01" for char in bin_string ): raise ValueError("Non-binary value was passed to the function" ) if not bin_string: raise ValueError("Empty string was passed to the function" ) _UpperCAmelCase : List[str] = "" while len(_lowerCAmelCase ) % 3 != 0: _UpperCAmelCase : int = "0" + bin_string _UpperCAmelCase : Optional[int] = [ bin_string[index : index + 3] for index in range(len(_lowerCAmelCase ) ) if index % 3 == 0 ] for bin_group in bin_string_in_3_list: _UpperCAmelCase : Any = 0 for index, val in enumerate(_lowerCAmelCase ): oct_val += int(2 ** (2 - index) * int(_lowerCAmelCase ) ) oct_string += str(_lowerCAmelCase ) return oct_string if __name__ == "__main__": from doctest import testmod testmod()
145
class A__ : def __init__( self , A_ ): '''simple docstring''' UpperCamelCase : Union[str, Any] = set_counts UpperCamelCase : int = max(A_ ) UpperCamelCase : Optional[Any] = len(A_ ) UpperCamelCase : Union[str, Any] = [1] * num_sets UpperCamelCase : Union[str, Any] = list(range(A_ ) ) def __UpperCamelCase( self , A_ , A_ ): '''simple docstring''' UpperCamelCase : Any = self.get_parent(A_ ) UpperCamelCase : Optional[int] = self.get_parent(A_ ) if src_parent == dst_parent: return False if self.ranks[dst_parent] >= self.ranks[src_parent]: self.set_counts[dst_parent] += self.set_counts[src_parent] UpperCamelCase : int = 0 UpperCamelCase : Dict = dst_parent if self.ranks[dst_parent] == self.ranks[src_parent]: self.ranks[dst_parent] += 1 UpperCamelCase : Optional[int] = self.set_counts[dst_parent] else: self.set_counts[src_parent] += self.set_counts[dst_parent] UpperCamelCase : Any = 0 UpperCamelCase : Optional[int] = src_parent UpperCamelCase : int = self.set_counts[src_parent] UpperCamelCase : Any = max(self.max_set , A_ ) return True def __UpperCamelCase( self , A_ ): '''simple docstring''' if self.parents[disj_set] == disj_set: return disj_set UpperCamelCase : Optional[int] = self.get_parent(self.parents[disj_set] ) return self.parents[disj_set]
52
0
"""simple docstring""" import argparse import math import os import torch from neural_compressor.utils.pytorch import load from PIL import Image from transformers import CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel def lowerCAmelCase__ ( ) -> List[Any]: """simple docstring""" snake_case = argparse.ArgumentParser() parser.add_argument( '-m' , '--pretrained_model_name_or_path' , type=_lowerCAmelCase , default=_lowerCAmelCase , required=_lowerCAmelCase , help='Path to pretrained model or model identifier from huggingface.co/models.' , ) parser.add_argument( '-c' , '--caption' , type=_lowerCAmelCase , default='robotic cat with wings' , help='Text used to generate images.' , ) parser.add_argument( '-n' , '--images_num' , type=_lowerCAmelCase , default=4 , help='How much images to generate.' , ) parser.add_argument( '-s' , '--seed' , type=_lowerCAmelCase , default=4_2 , help='Seed for random process.' , ) parser.add_argument( '-ci' , '--cuda_id' , type=_lowerCAmelCase , default=0 , help='cuda_id.' , ) snake_case = parser.parse_args() return args def lowerCAmelCase__ ( _UpperCamelCase : List[str] , _UpperCamelCase : List[str] , _UpperCamelCase : Optional[int] ) -> Union[str, Any]: """simple docstring""" if not len(_lowerCAmelCase ) == rows * cols: raise ValueError('The specified number of rows and columns are not correct.' ) snake_case = imgs[0].size snake_case = Image.new('RGB' , size=(cols * w, rows * h) ) snake_case = grid.size for i, img in enumerate(_lowerCAmelCase ): grid.paste(_lowerCAmelCase , box=(i % cols * w, i // cols * h) ) return grid def lowerCAmelCase__ ( _UpperCamelCase : List[str] , _UpperCamelCase : int="robotic cat with wings" , _UpperCamelCase : str=7.5 , _UpperCamelCase : Any=5_0 , _UpperCamelCase : Tuple=1 , _UpperCamelCase : Union[str, Any]=4_2 , ) -> Dict: """simple docstring""" snake_case = torch.Generator(pipeline.device ).manual_seed(_lowerCAmelCase ) snake_case = pipeline( _lowerCAmelCase , guidance_scale=_lowerCAmelCase , num_inference_steps=_lowerCAmelCase , generator=_lowerCAmelCase , num_images_per_prompt=_lowerCAmelCase , ).images snake_case = int(math.sqrt(_lowerCAmelCase ) ) snake_case = image_grid(_lowerCAmelCase , rows=_rows , cols=num_images_per_prompt // _rows ) return grid, images SCREAMING_SNAKE_CASE__ = parse_args() # Load models and create wrapper for stable diffusion SCREAMING_SNAKE_CASE__ = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder="tokenizer") SCREAMING_SNAKE_CASE__ = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="text_encoder") SCREAMING_SNAKE_CASE__ = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder="vae") SCREAMING_SNAKE_CASE__ = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="unet") SCREAMING_SNAKE_CASE__ = StableDiffusionPipeline.from_pretrained( args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer ) SCREAMING_SNAKE_CASE__ = lambda images, clip_input: (images, False) if os.path.exists(os.path.join(args.pretrained_model_name_or_path, "best_model.pt")): SCREAMING_SNAKE_CASE__ = load(args.pretrained_model_name_or_path, model=unet) unet.eval() setattr(pipeline, "unet", unet) else: SCREAMING_SNAKE_CASE__ = unet.to(torch.device("cuda", args.cuda_id)) SCREAMING_SNAKE_CASE__ = pipeline.to(unet.device) SCREAMING_SNAKE_CASE__ = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed) grid.save(os.path.join(args.pretrained_model_name_or_path, "{}.png".format("_".join(args.caption.split())))) SCREAMING_SNAKE_CASE__ = os.path.join(args.pretrained_model_name_or_path, "_".join(args.caption.split())) os.makedirs(dirname, exist_ok=True) for idx, image in enumerate(images): image.save(os.path.join(dirname, "{}.png".format(idx + 1)))
150
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __lowerCamelCase : Any = { """configuration_electra""": ["""ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ElectraConfig""", """ElectraOnnxConfig"""], """tokenization_electra""": ["""ElectraTokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : Dict = ["""ElectraTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : Tuple = [ """ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST""", """ElectraForCausalLM""", """ElectraForMaskedLM""", """ElectraForMultipleChoice""", """ElectraForPreTraining""", """ElectraForQuestionAnswering""", """ElectraForSequenceClassification""", """ElectraForTokenClassification""", """ElectraModel""", """ElectraPreTrainedModel""", """load_tf_weights_in_electra""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : List[Any] = [ """TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFElectraForMaskedLM""", """TFElectraForMultipleChoice""", """TFElectraForPreTraining""", """TFElectraForQuestionAnswering""", """TFElectraForSequenceClassification""", """TFElectraForTokenClassification""", """TFElectraModel""", """TFElectraPreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : List[str] = [ """FlaxElectraForCausalLM""", """FlaxElectraForMaskedLM""", """FlaxElectraForMultipleChoice""", """FlaxElectraForPreTraining""", """FlaxElectraForQuestionAnswering""", """FlaxElectraForSequenceClassification""", """FlaxElectraForTokenClassification""", """FlaxElectraModel""", """FlaxElectraPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig from .tokenization_electra import ElectraTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_electra_fast import ElectraTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_electra import ( ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST, ElectraForCausalLM, ElectraForMaskedLM, ElectraForMultipleChoice, ElectraForPreTraining, ElectraForQuestionAnswering, ElectraForSequenceClassification, ElectraForTokenClassification, ElectraModel, ElectraPreTrainedModel, load_tf_weights_in_electra, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_electra import ( TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST, TFElectraForMaskedLM, TFElectraForMultipleChoice, TFElectraForPreTraining, TFElectraForQuestionAnswering, TFElectraForSequenceClassification, TFElectraForTokenClassification, TFElectraModel, TFElectraPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_electra import ( FlaxElectraForCausalLM, FlaxElectraForMaskedLM, FlaxElectraForMultipleChoice, FlaxElectraForPreTraining, FlaxElectraForQuestionAnswering, FlaxElectraForSequenceClassification, FlaxElectraForTokenClassification, FlaxElectraModel, FlaxElectraPreTrainedModel, ) else: import sys __lowerCamelCase : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
52
0
'''simple docstring''' def a__ ( lowercase : Tuple ) -> Tuple: """simple docstring""" _UpperCamelCase = 0 _UpperCamelCase = len(_lowerCAmelCase ) for i in range(n - 1 ): for j in range(i + 1, _lowerCAmelCase ): if arr[i] > arr[j]: num_inversions += 1 return num_inversions def a__ ( lowercase : List[Any] ) -> Optional[int]: """simple docstring""" if len(_lowerCAmelCase ) <= 1: return arr, 0 _UpperCamelCase = len(_lowerCAmelCase ) // 2 _UpperCamelCase = arr[0:mid] _UpperCamelCase = arr[mid:] _UpperCamelCase = count_inversions_recursive(_lowerCAmelCase ) _UpperCamelCase = count_inversions_recursive(_lowerCAmelCase ) _UpperCamelCase = _count_cross_inversions(_lowerCAmelCase, _lowerCAmelCase ) _UpperCamelCase = inversion_p + inversions_q + cross_inversions return c, num_inversions def a__ ( lowercase : Optional[Any], lowercase : Any ) -> int: """simple docstring""" _UpperCamelCase = [] _UpperCamelCase = 0 while i < len(_lowerCAmelCase ) and j < len(_lowerCAmelCase ): if p[i] > q[j]: # if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P) # These are all inversions. The claim emerges from the # property that P is sorted. num_inversion += len(_lowerCAmelCase ) - i r.append(q[j] ) j += 1 else: r.append(p[i] ) i += 1 if i < len(_lowerCAmelCase ): r.extend(p[i:] ) else: r.extend(q[j:] ) return r, num_inversion def a__ ( ) -> Dict: """simple docstring""" _UpperCamelCase = [10, 2, 1, 5, 5, 2, 11] # this arr has 8 inversions: # (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2) _UpperCamelCase = count_inversions_bf(_lowerCAmelCase ) _UpperCamelCase = count_inversions_recursive(_lowerCAmelCase ) assert num_inversions_bf == num_inversions_recursive == 8 print('''number of inversions = ''', _lowerCAmelCase ) # testing an array with zero inversion (a sorted arr_1) arr_a.sort() _UpperCamelCase = count_inversions_bf(_lowerCAmelCase ) _UpperCamelCase = count_inversions_recursive(_lowerCAmelCase ) assert num_inversions_bf == num_inversions_recursive == 0 print('''number of inversions = ''', _lowerCAmelCase ) # an empty list should also have zero inversions _UpperCamelCase = [] _UpperCamelCase = count_inversions_bf(_lowerCAmelCase ) _UpperCamelCase = count_inversions_recursive(_lowerCAmelCase ) assert num_inversions_bf == num_inversions_recursive == 0 print('''number of inversions = ''', _lowerCAmelCase ) if __name__ == "__main__": main()
324
from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices __lowerCamelCase : str = logging.get_logger(__name__) __lowerCamelCase : str = { """facebook/convnextv2-tiny-1k-224""": """https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json""", } class A__ ( __snake_case , __snake_case ): _UpperCAmelCase :Optional[int] = 'convnextv2' def __init__( self , A_=3 , A_=4 , A_=4 , A_=None , A_=None , A_="gelu" , A_=0.02 , A_=1e-12 , A_=0.0 , A_=224 , A_=None , A_=None , **A_ , ): '''simple docstring''' super().__init__(**A_ ) UpperCamelCase : Dict = num_channels UpperCamelCase : Union[str, Any] = patch_size UpperCamelCase : Union[str, Any] = num_stages UpperCamelCase : List[Any] = [96, 192, 384, 768] if hidden_sizes is None else hidden_sizes UpperCamelCase : List[str] = [3, 3, 9, 3] if depths is None else depths UpperCamelCase : Dict = hidden_act UpperCamelCase : Union[str, Any] = initializer_range UpperCamelCase : Tuple = layer_norm_eps UpperCamelCase : str = drop_path_rate UpperCamelCase : List[str] = image_size UpperCamelCase : List[str] = ["stem"] + [F"""stage{idx}""" for idx in range(1 , len(self.depths ) + 1 )] UpperCamelCase , UpperCamelCase : str = get_aligned_output_features_output_indices( out_features=A_ , out_indices=A_ , stage_names=self.stage_names )
52
0
a__: str = """ # Transformers installation ! pip install transformers datasets # To install from source instead of the last release, comment the command above and uncomment the following one. # ! pip install git+https://github.com/huggingface/transformers.git """ a__: Tuple = [{"""type""": """code""", """content""": INSTALL_CONTENT}] a__: List[Any] = { """{processor_class}""": """FakeProcessorClass""", """{model_class}""": """FakeModelClass""", """{object_class}""": """FakeObjectClass""", }
193
import pytest import requests from datasets.utils.file_utils import http_head from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline @pytest.mark.integration def A_ ( ) -> List[Any]: with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ): with pytest.raises(_lowerCAmelCase ): requests.request("GET" , "https://huggingface.co" ) with pytest.raises(requests.exceptions.ConnectTimeout ): requests.request("GET" , "https://huggingface.co" , timeout=1.0 ) @pytest.mark.integration def A_ ( ) -> Tuple: with offline(OfflineSimulationMode.CONNECTION_FAILS ): with pytest.raises(requests.exceptions.ConnectionError ): requests.request("GET" , "https://huggingface.co" ) def A_ ( ) -> Optional[int]: with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ): with pytest.raises(_lowerCAmelCase ): http_head("https://huggingface.co" )
52
0
A : Dict = { """A""": ["""B""", """C""", """E"""], """B""": ["""A""", """D""", """E"""], """C""": ["""A""", """F""", """G"""], """D""": ["""B"""], """E""": ["""A""", """B""", """D"""], """F""": ["""C"""], """G""": ["""C"""], } def UpperCamelCase ( __magic_name__ : Optional[int] , __magic_name__ : str , __magic_name__ : List[str] ) -> list[str]: """simple docstring""" lowercase__ = set() # keep track of all the paths to be checked lowercase__ = [[start]] # return path if start is goal if start == goal: return [start] # keeps looping until all possible paths have been checked while queue: # pop the first path from the queue lowercase__ = queue.pop(0 ) # get the last node from the path lowercase__ = path[-1] if node not in explored: lowercase__ = graph[node] # go through all neighbour nodes, construct a new path and # push it into the queue for neighbour in neighbours: lowercase__ = list(_lowerCAmelCase ) new_path.append(_lowerCAmelCase ) queue.append(_lowerCAmelCase ) # return path if neighbour is goal if neighbour == goal: return new_path # mark node as explored explored.add(_lowerCAmelCase ) # in case there's no path between the 2 nodes return [] def UpperCamelCase ( __magic_name__ : Any , __magic_name__ : List[str] , __magic_name__ : Optional[Any] ) -> int: """simple docstring""" if not graph or start not in graph or target not in graph: return -1 if start == target: return 0 lowercase__ = [start] lowercase__ = set(_lowerCAmelCase ) # Keep tab on distances from `start` node. lowercase__ = {start: 0, target: -1} while queue: lowercase__ = queue.pop(0 ) if node == target: lowercase__ = ( dist[node] if dist[target] == -1 else min(dist[target] , dist[node] ) ) for adjacent in graph[node]: if adjacent not in visited: visited.add(_lowerCAmelCase ) queue.append(_lowerCAmelCase ) lowercase__ = dist[node] + 1 return dist[target] if __name__ == "__main__": print(bfs_shortest_path(demo_graph, 'G', 'D')) # returns ['G', 'C', 'A', 'B', 'D'] print(bfs_shortest_path_distance(demo_graph, 'G', 'D')) # returns 4
305
from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __lowerCamelCase : Optional[int] = {"""configuration_mmbt""": ["""MMBTConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCamelCase : List[Any] = ["""MMBTForClassification""", """MMBTModel""", """ModalEmbeddings"""] if TYPE_CHECKING: from .configuration_mmbt import MMBTConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings else: import sys __lowerCamelCase : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
52
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) A__ : List[str] = { """configuration_whisper""": ["""WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """WhisperConfig""", """WhisperOnnxConfig"""], """feature_extraction_whisper""": ["""WhisperFeatureExtractor"""], """processing_whisper""": ["""WhisperProcessor"""], """tokenization_whisper""": ["""WhisperTokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__ : Dict = ["""WhisperTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__ : Union[str, Any] = [ """WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST""", """WhisperForConditionalGeneration""", """WhisperModel""", """WhisperPreTrainedModel""", """WhisperForAudioClassification""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__ : Dict = [ """TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFWhisperForConditionalGeneration""", """TFWhisperModel""", """TFWhisperPreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__ : Any = [ """FlaxWhisperForConditionalGeneration""", """FlaxWhisperModel""", """FlaxWhisperPreTrainedModel""", """FlaxWhisperForAudioClassification""", ] if TYPE_CHECKING: from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig from .feature_extraction_whisper import WhisperFeatureExtractor from .processing_whisper import WhisperProcessor from .tokenization_whisper import WhisperTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_whisper_fast import WhisperTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_whisper import ( WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST, WhisperForAudioClassification, WhisperForConditionalGeneration, WhisperModel, WhisperPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_whisper import ( TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST, TFWhisperForConditionalGeneration, TFWhisperModel, TFWhisperPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_whisper import ( FlaxWhisperForAudioClassification, FlaxWhisperForConditionalGeneration, FlaxWhisperModel, FlaxWhisperPreTrainedModel, ) else: import sys A__ : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
207
import re import string from collections import Counter import sacrebleu import sacremoses from packaging import version import datasets __lowerCamelCase : List[Any] = """ @inproceedings{xu-etal-2016-optimizing, title = {Optimizing Statistical Machine Translation for Text Simplification}, authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris}, journal = {Transactions of the Association for Computational Linguistics}, volume = {4}, year={2016}, url = {https://www.aclweb.org/anthology/Q16-1029}, pages = {401--415 }, @inproceedings{post-2018-call, title = \"A Call for Clarity in Reporting {BLEU} Scores\", author = \"Post, Matt\", booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\", month = oct, year = \"2018\", address = \"Belgium, Brussels\", publisher = \"Association for Computational Linguistics\", url = \"https://www.aclweb.org/anthology/W18-6319\", pages = \"186--191\", } """ __lowerCamelCase : Optional[int] = """\ WIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU It can be used to evaluate the quality of machine-generated texts. """ __lowerCamelCase : str = """ Calculates sari score (between 0 and 100) given a list of source and predicted sentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score. Args: sources: list of source sentences where each sentence should be a string. predictions: list of predicted sentences where each sentence should be a string. references: list of lists of reference sentences where each sentence should be a string. Returns: sari: sari score sacrebleu: sacrebleu score exact: exact score Examples: >>> sources=[\"About 95 species are currently accepted .\"] >>> predictions=[\"About 95 you now get in .\"] >>> references=[[\"About 95 species are currently known .\"]] >>> wiki_split = datasets.load_metric(\"wiki_split\") >>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references) >>> print(results) {'sari': 21.805555555555557, 'sacrebleu': 14.535768424205482, 'exact': 0.0} """ def A_ ( _lowerCAmelCase ) -> str: def remove_articles(_lowerCAmelCase ): UpperCamelCase : Tuple = re.compile(r"\b(a|an|the)\b" , re.UNICODE ) return re.sub(_lowerCAmelCase , " " , _lowerCAmelCase ) def white_space_fix(_lowerCAmelCase ): return " ".join(text.split() ) def remove_punc(_lowerCAmelCase ): UpperCamelCase : int = set(string.punctuation ) return "".join(ch for ch in text if ch not in exclude ) def lower(_lowerCAmelCase ): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(_lowerCAmelCase ) ) ) ) def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> Any: return int(normalize_answer(_lowerCAmelCase ) == normalize_answer(_lowerCAmelCase ) ) def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> str: UpperCamelCase : Tuple = [any(compute_exact(_lowerCAmelCase , _lowerCAmelCase ) for ref in refs ) for pred, refs in zip(_lowerCAmelCase , _lowerCAmelCase )] return (sum(_lowerCAmelCase ) / len(_lowerCAmelCase )) * 100 def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Union[str, Any]: UpperCamelCase : Union[str, Any] = [rgram for rgrams in rgramslist for rgram in rgrams] UpperCamelCase : Union[str, Any] = Counter(_lowerCAmelCase ) UpperCamelCase : Optional[int] = Counter(_lowerCAmelCase ) UpperCamelCase : List[Any] = Counter() for sgram, scount in sgramcounter.items(): UpperCamelCase : Tuple = scount * numref UpperCamelCase : Union[str, Any] = Counter(_lowerCAmelCase ) UpperCamelCase : Tuple = Counter() for cgram, ccount in cgramcounter.items(): UpperCamelCase : Dict = ccount * numref # KEEP UpperCamelCase : List[Any] = sgramcounter_rep & cgramcounter_rep UpperCamelCase : Union[str, Any] = keepgramcounter_rep & rgramcounter UpperCamelCase : Dict = sgramcounter_rep & rgramcounter UpperCamelCase : Optional[int] = 0 UpperCamelCase : Tuple = 0 for keepgram in keepgramcountergood_rep: keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram] # Fix an alleged bug [2] in the keep score computation. # keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram] keeptmpscorea += keepgramcountergood_rep[keepgram] # Define 0/0=1 instead of 0 to give higher scores for predictions that match # a target exactly. UpperCamelCase : Any = 1 UpperCamelCase : Any = 1 if len(_lowerCAmelCase ) > 0: UpperCamelCase : Dict = keeptmpscorea / len(_lowerCAmelCase ) if len(_lowerCAmelCase ) > 0: # Fix an alleged bug [2] in the keep score computation. # keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep) UpperCamelCase : Union[str, Any] = keeptmpscorea / sum(keepgramcounterall_rep.values() ) UpperCamelCase : Any = 0 if keepscore_precision > 0 or keepscore_recall > 0: UpperCamelCase : List[str] = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall) # DELETION UpperCamelCase : Any = sgramcounter_rep - cgramcounter_rep UpperCamelCase : str = delgramcounter_rep - rgramcounter UpperCamelCase : Any = sgramcounter_rep - rgramcounter UpperCamelCase : Optional[int] = 0 UpperCamelCase : Union[str, Any] = 0 for delgram in delgramcountergood_rep: deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram] deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram] # Define 0/0=1 instead of 0 to give higher scores for predictions that match # a target exactly. UpperCamelCase : Dict = 1 if len(_lowerCAmelCase ) > 0: UpperCamelCase : str = deltmpscorea / len(_lowerCAmelCase ) # ADDITION UpperCamelCase : List[str] = set(_lowerCAmelCase ) - set(_lowerCAmelCase ) UpperCamelCase : List[str] = set(_lowerCAmelCase ) & set(_lowerCAmelCase ) UpperCamelCase : Dict = set(_lowerCAmelCase ) - set(_lowerCAmelCase ) UpperCamelCase : Optional[Any] = 0 for addgram in addgramcountergood: addtmpscore += 1 # Define 0/0=1 instead of 0 to give higher scores for predictions that match # a target exactly. UpperCamelCase : Tuple = 1 UpperCamelCase : Tuple = 1 if len(_lowerCAmelCase ) > 0: UpperCamelCase : Dict = addtmpscore / len(_lowerCAmelCase ) if len(_lowerCAmelCase ) > 0: UpperCamelCase : Tuple = addtmpscore / len(_lowerCAmelCase ) UpperCamelCase : List[str] = 0 if addscore_precision > 0 or addscore_recall > 0: UpperCamelCase : List[str] = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall) return (keepscore, delscore_precision, addscore) def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> List[str]: UpperCamelCase : int = len(_lowerCAmelCase ) UpperCamelCase : Optional[Any] = ssent.split(" " ) UpperCamelCase : Dict = csent.split(" " ) UpperCamelCase : str = [] UpperCamelCase : Any = [] UpperCamelCase : Any = [] UpperCamelCase : Union[str, Any] = [] UpperCamelCase : str = [] UpperCamelCase : str = [] UpperCamelCase : Dict = [] UpperCamelCase : int = [] UpperCamelCase : Optional[Any] = [] UpperCamelCase : Tuple = [] for rsent in rsents: UpperCamelCase : List[Any] = rsent.split(" " ) UpperCamelCase : List[str] = [] UpperCamelCase : int = [] UpperCamelCase : Tuple = [] ragramslist.append(_lowerCAmelCase ) for i in range(0 , len(_lowerCAmelCase ) - 1 ): if i < len(_lowerCAmelCase ) - 1: UpperCamelCase : Dict = ragrams[i] + " " + ragrams[i + 1] ragrams.append(_lowerCAmelCase ) if i < len(_lowerCAmelCase ) - 2: UpperCamelCase : Dict = ragrams[i] + " " + ragrams[i + 1] + " " + ragrams[i + 2] ragrams.append(_lowerCAmelCase ) if i < len(_lowerCAmelCase ) - 3: UpperCamelCase : List[Any] = ragrams[i] + " " + ragrams[i + 1] + " " + ragrams[i + 2] + " " + ragrams[i + 3] ragrams.append(_lowerCAmelCase ) ragramslist.append(_lowerCAmelCase ) ragramslist.append(_lowerCAmelCase ) ragramslist.append(_lowerCAmelCase ) for i in range(0 , len(_lowerCAmelCase ) - 1 ): if i < len(_lowerCAmelCase ) - 1: UpperCamelCase : Union[str, Any] = sagrams[i] + " " + sagrams[i + 1] sagrams.append(_lowerCAmelCase ) if i < len(_lowerCAmelCase ) - 2: UpperCamelCase : List[str] = sagrams[i] + " " + sagrams[i + 1] + " " + sagrams[i + 2] sagrams.append(_lowerCAmelCase ) if i < len(_lowerCAmelCase ) - 3: UpperCamelCase : Optional[int] = sagrams[i] + " " + sagrams[i + 1] + " " + sagrams[i + 2] + " " + sagrams[i + 3] sagrams.append(_lowerCAmelCase ) for i in range(0 , len(_lowerCAmelCase ) - 1 ): if i < len(_lowerCAmelCase ) - 1: UpperCamelCase : Union[str, Any] = cagrams[i] + " " + cagrams[i + 1] cagrams.append(_lowerCAmelCase ) if i < len(_lowerCAmelCase ) - 2: UpperCamelCase : Optional[Any] = cagrams[i] + " " + cagrams[i + 1] + " " + cagrams[i + 2] cagrams.append(_lowerCAmelCase ) if i < len(_lowerCAmelCase ) - 3: UpperCamelCase : Union[str, Any] = cagrams[i] + " " + cagrams[i + 1] + " " + cagrams[i + 2] + " " + cagrams[i + 3] cagrams.append(_lowerCAmelCase ) ((UpperCamelCase) , (UpperCamelCase) , (UpperCamelCase)) : Optional[int] = SARIngram(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) ((UpperCamelCase) , (UpperCamelCase) , (UpperCamelCase)) : Optional[Any] = SARIngram(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) ((UpperCamelCase) , (UpperCamelCase) , (UpperCamelCase)) : str = SARIngram(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) ((UpperCamelCase) , (UpperCamelCase) , (UpperCamelCase)) : Optional[int] = SARIngram(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) UpperCamelCase : Tuple = sum([keepascore, keepascore, keepascore, keepascore] ) / 4 UpperCamelCase : str = sum([delascore, delascore, delascore, delascore] ) / 4 UpperCamelCase : Union[str, Any] = sum([addascore, addascore, addascore, addascore] ) / 4 UpperCamelCase : Union[str, Any] = (avgkeepscore + avgdelscore + avgaddscore) / 3 return finalscore def A_ ( _lowerCAmelCase , _lowerCAmelCase = True , _lowerCAmelCase = "13a" , _lowerCAmelCase = True ) -> Optional[Any]: # Normalization is requried for the ASSET dataset (one of the primary # datasets in sentence simplification) to allow using space # to split the sentence. Even though Wiki-Auto and TURK datasets, # do not require normalization, we do it for consistency. # Code adapted from the EASSE library [1] written by the authors of the ASSET dataset. # [1] https://github.com/feralvam/easse/blob/580bba7e1378fc8289c663f864e0487188fe8067/easse/utils/preprocessing.py#L7 if lowercase: UpperCamelCase : Dict = sentence.lower() if tokenizer in ["13a", "intl"]: if version.parse(sacrebleu.__version__ ).major >= 2: UpperCamelCase : str = sacrebleu.metrics.bleu._get_tokenizer(_lowerCAmelCase )()(_lowerCAmelCase ) else: UpperCamelCase : Dict = sacrebleu.TOKENIZERS[tokenizer]()(_lowerCAmelCase ) elif tokenizer == "moses": UpperCamelCase : Union[str, Any] = sacremoses.MosesTokenizer().tokenize(_lowerCAmelCase , return_str=_lowerCAmelCase , escape=_lowerCAmelCase ) elif tokenizer == "penn": UpperCamelCase : str = sacremoses.MosesTokenizer().penn_tokenize(_lowerCAmelCase , return_str=_lowerCAmelCase ) else: UpperCamelCase : Union[str, Any] = sentence if not return_str: UpperCamelCase : Tuple = normalized_sent.split() return normalized_sent def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> List[str]: if not (len(_lowerCAmelCase ) == len(_lowerCAmelCase ) == len(_lowerCAmelCase )): raise ValueError("Sources length must match predictions and references lengths." ) UpperCamelCase : Optional[Any] = 0 for src, pred, refs in zip(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): sari_score += SARIsent(normalize(_lowerCAmelCase ) , normalize(_lowerCAmelCase ) , [normalize(_lowerCAmelCase ) for sent in refs] ) UpperCamelCase : Optional[int] = sari_score / len(_lowerCAmelCase ) return 100 * sari_score def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase="exp" , _lowerCAmelCase=None , _lowerCAmelCase=False , _lowerCAmelCase=False , _lowerCAmelCase=False , ) -> List[str]: UpperCamelCase : Optional[Any] = len(references[0] ) if any(len(_lowerCAmelCase ) != references_per_prediction for refs in references ): raise ValueError("Sacrebleu requires the same number of references for each prediction" ) UpperCamelCase : Optional[int] = [[refs[i] for refs in references] for i in range(_lowerCAmelCase )] UpperCamelCase : Tuple = sacrebleu.corpus_bleu( _lowerCAmelCase , _lowerCAmelCase , smooth_method=_lowerCAmelCase , smooth_value=_lowerCAmelCase , force=_lowerCAmelCase , lowercase=_lowerCAmelCase , use_effective_order=_lowerCAmelCase , ) return output.score @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class A__ ( datasets.Metric ): def __UpperCamelCase( self ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("string" , id="sequence" ), "references": datasets.Sequence(datasets.Value("string" , id="sequence" ) , id="references" ), } ) , codebase_urls=[ "https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py", "https://github.com/cocoxu/simplification/blob/master/SARI.py", "https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py", "https://github.com/mjpost/sacreBLEU", ] , reference_urls=[ "https://www.aclweb.org/anthology/Q16-1029.pdf", "https://github.com/mjpost/sacreBLEU", "https://en.wikipedia.org/wiki/BLEU", "https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213", ] , ) def __UpperCamelCase( self , A_ , A_ , A_ ): '''simple docstring''' UpperCamelCase : Optional[Any] = {} result.update({"sari": compute_sari(sources=A_ , predictions=A_ , references=A_ )} ) result.update({"sacrebleu": compute_sacrebleu(predictions=A_ , references=A_ )} ) result.update({"exact": compute_em(predictions=A_ , references=A_ )} ) return result
52
0
"""simple docstring""" def __UpperCAmelCase ( lowercase ,lowercase ): """simple docstring""" if not (isinstance(_lowerCAmelCase ,_lowerCAmelCase ) and isinstance(_lowerCAmelCase ,_lowerCAmelCase )): raise ValueError("""longest_common_substring() takes two strings for inputs""" ) _UpperCAmelCase = len(_lowerCAmelCase ) _UpperCAmelCase = len(_lowerCAmelCase ) _UpperCAmelCase = [[0] * (texta_length + 1) for _ in range(texta_length + 1 )] _UpperCAmelCase = 0 _UpperCAmelCase = 0 for i in range(1 ,texta_length + 1 ): for j in range(1 ,texta_length + 1 ): if texta[i - 1] == texta[j - 1]: _UpperCAmelCase = 1 + dp[i - 1][j - 1] if dp[i][j] > ans_length: _UpperCAmelCase = i _UpperCAmelCase = dp[i][j] return texta[ans_index - ans_length : ans_index] if __name__ == "__main__": import doctest doctest.testmod()
289
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __lowerCamelCase : List[Any] = logging.get_logger(__name__) __lowerCamelCase : str = { """roberta-base""": """https://huggingface.co/roberta-base/resolve/main/config.json""", """roberta-large""": """https://huggingface.co/roberta-large/resolve/main/config.json""", """roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/config.json""", """distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/config.json""", """roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json""", """roberta-large-openai-detector""": """https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json""", } class A__ ( __snake_case ): _UpperCAmelCase :Union[str, Any] = 'roberta' def __init__( self , A_=5_0265 , A_=768 , A_=12 , A_=12 , A_=3072 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=2 , A_=0.02 , A_=1e-12 , A_=1 , A_=0 , A_=2 , A_="absolute" , A_=True , A_=None , **A_ , ): '''simple docstring''' super().__init__(pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , **A_ ) UpperCamelCase : Optional[int] = vocab_size UpperCamelCase : Dict = hidden_size UpperCamelCase : str = num_hidden_layers UpperCamelCase : Any = num_attention_heads UpperCamelCase : List[str] = hidden_act UpperCamelCase : Optional[Any] = intermediate_size UpperCamelCase : Tuple = hidden_dropout_prob UpperCamelCase : Tuple = attention_probs_dropout_prob UpperCamelCase : Tuple = max_position_embeddings UpperCamelCase : Any = type_vocab_size UpperCamelCase : int = initializer_range UpperCamelCase : str = layer_norm_eps UpperCamelCase : Dict = position_embedding_type UpperCamelCase : Any = use_cache UpperCamelCase : Union[str, Any] = classifier_dropout class A__ ( __snake_case ): @property def __UpperCamelCase( self ): '''simple docstring''' if self.task == "multiple-choice": UpperCamelCase : Optional[int] = {0: "batch", 1: "choice", 2: "sequence"} else: UpperCamelCase : Optional[int] = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ] )
52
0
'''simple docstring''' import argparse import json import os import time import zipfile from get_ci_error_statistics import download_artifact, get_artifacts_links from transformers import logging UpperCamelCase_ = logging.get_logger(__name__) def lowercase__( __UpperCamelCase: List[str] ,__UpperCamelCase: Optional[int] ): """simple docstring""" SCREAMING_SNAKE_CASE : Any = set() SCREAMING_SNAKE_CASE : Union[str, Any] = [] def parse_line(__UpperCamelCase: Optional[Any] ): for line in fp: if isinstance(_lowerCAmelCase ,_lowerCAmelCase ): SCREAMING_SNAKE_CASE : Dict = line.decode('UTF-8' ) if "warnings summary (final)" in line: continue # This means we are outside the body of a warning elif not line.startswith(' ' ): # process a single warning and move it to `selected_warnings`. if len(_lowerCAmelCase ) > 0: SCREAMING_SNAKE_CASE : Dict = "\n".join(_lowerCAmelCase ) # Only keep the warnings specified in `targets` if any(f": {x}: " in warning for x in targets ): selected_warnings.add(_lowerCAmelCase ) buffer.clear() continue else: SCREAMING_SNAKE_CASE : List[Any] = line.strip() buffer.append(_lowerCAmelCase ) if from_gh: for filename in os.listdir(_lowerCAmelCase ): SCREAMING_SNAKE_CASE : Optional[Any] = os.path.join(_lowerCAmelCase ,_lowerCAmelCase ) if not os.path.isdir(_lowerCAmelCase ): # read the file if filename != "warnings.txt": continue with open(_lowerCAmelCase ) as fp: parse_line(_lowerCAmelCase ) else: try: with zipfile.ZipFile(_lowerCAmelCase ) as z: for filename in z.namelist(): if not os.path.isdir(_lowerCAmelCase ): # read the file if filename != "warnings.txt": continue with z.open(_lowerCAmelCase ) as fp: parse_line(_lowerCAmelCase ) except Exception: logger.warning( f"{artifact_path} is either an invalid zip file or something else wrong. This file is skipped." ) return selected_warnings def lowercase__( __UpperCamelCase: Any ,__UpperCamelCase: Union[str, Any] ): """simple docstring""" SCREAMING_SNAKE_CASE : List[str] = set() SCREAMING_SNAKE_CASE : Dict = [os.path.join(_lowerCAmelCase ,_lowerCAmelCase ) for p in os.listdir(_lowerCAmelCase ) if (p.endswith('.zip' ) or from_gh)] for p in paths: selected_warnings.update(extract_warnings_from_single_artifact(_lowerCAmelCase ,_lowerCAmelCase ) ) return selected_warnings if __name__ == "__main__": def lowercase__( __UpperCamelCase: Optional[int] ): """simple docstring""" return values.split(',' ) UpperCamelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.") parser.add_argument( "--output_dir", type=str, required=True, help="Where to store the downloaded artifacts and other result files.", ) parser.add_argument("--token", default=None, type=str, help="A token that has actions:read permission.") # optional parameters parser.add_argument( "--targets", default="DeprecationWarning,UserWarning,FutureWarning", type=list_str, help="Comma-separated list of target warning(s) which we want to extract.", ) parser.add_argument( "--from_gh", action="store_true", help="If running from a GitHub action workflow and collecting warnings from its artifacts.", ) UpperCamelCase_ = parser.parse_args() UpperCamelCase_ = args.from_gh if from_gh: # The artifacts have to be downloaded using `actions/download-artifact@v3` pass else: os.makedirs(args.output_dir, exist_ok=True) # get download links UpperCamelCase_ = get_artifacts_links(args.workflow_run_id, token=args.token) with open(os.path.join(args.output_dir, "artifacts.json"), "w", encoding="UTF-8") as fp: json.dump(artifacts, fp, ensure_ascii=False, indent=4) # download artifacts for idx, (name, url) in enumerate(artifacts.items()): print(name) print(url) print("=" * 8_0) download_artifact(name, url, args.output_dir, args.token) # Be gentle to GitHub time.sleep(1) # extract warnings from artifacts UpperCamelCase_ = extract_warnings(args.output_dir, args.targets) UpperCamelCase_ = sorted(selected_warnings) with open(os.path.join(args.output_dir, "selected_warnings.json"), "w", encoding="UTF-8") as fp: json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
251
from typing import List, Optional, Tuple, Union import torch from ...utils import logging, randn_tensor from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline __lowerCamelCase : str = logging.get_logger(__name__) # pylint: disable=invalid-name class A__ ( __snake_case ): def __init__( self , A_ , A_ ): '''simple docstring''' super().__init__() self.register_modules(unet=A_ , scheduler=A_ ) @torch.no_grad() def __call__( self , A_ = 1 , A_ = 100 , A_ = None , A_ = None , A_ = True , ): '''simple docstring''' if audio_length_in_s is None: UpperCamelCase : str = self.unet.config.sample_size / self.unet.config.sample_rate UpperCamelCase : Optional[Any] = audio_length_in_s * self.unet.config.sample_rate UpperCamelCase : Any = 2 ** len(self.unet.up_blocks ) if sample_size < 3 * down_scale_factor: raise ValueError( F"""{audio_length_in_s} is too small. Make sure it's bigger or equal to""" F""" {3 * down_scale_factor / self.unet.config.sample_rate}.""" ) UpperCamelCase : Union[str, Any] = int(A_ ) if sample_size % down_scale_factor != 0: UpperCamelCase : List[str] = ( (audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1 ) * down_scale_factor logger.info( F"""{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled""" F""" by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising""" " process." ) UpperCamelCase : Any = int(A_ ) UpperCamelCase : Union[str, Any] = next(iter(self.unet.parameters() ) ).dtype UpperCamelCase : Optional[int] = (batch_size, self.unet.config.in_channels, sample_size) if isinstance(A_ , A_ ) and len(A_ ) != batch_size: raise ValueError( F"""You have passed a list of generators of length {len(A_ )}, but requested an effective batch""" F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" ) UpperCamelCase : Optional[Any] = randn_tensor(A_ , generator=A_ , device=self.device , dtype=A_ ) # set step values self.scheduler.set_timesteps(A_ , device=audio.device ) UpperCamelCase : Optional[int] = self.scheduler.timesteps.to(A_ ) for t in self.progress_bar(self.scheduler.timesteps ): # 1. predict noise model_output UpperCamelCase : Dict = self.unet(A_ , A_ ).sample # 2. compute previous image: x_t -> t_t-1 UpperCamelCase : int = self.scheduler.step(A_ , A_ , A_ ).prev_sample UpperCamelCase : Optional[Any] = audio.clamp(-1 , 1 ).float().cpu().numpy() UpperCamelCase : Dict = audio[:, :, :original_sample_size] if not return_dict: return (audio,) return AudioPipelineOutput(audios=A_ )
52
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available, ) a : Dict = { """configuration_perceiver""": ["""PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """PerceiverConfig""", """PerceiverOnnxConfig"""], """tokenization_perceiver""": ["""PerceiverTokenizer"""], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a : Dict = ["""PerceiverFeatureExtractor"""] a : str = ["""PerceiverImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a : Optional[Any] = [ """PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST""", """PerceiverForImageClassificationConvProcessing""", """PerceiverForImageClassificationFourier""", """PerceiverForImageClassificationLearned""", """PerceiverForMaskedLM""", """PerceiverForMultimodalAutoencoding""", """PerceiverForOpticalFlow""", """PerceiverForSequenceClassification""", """PerceiverLayer""", """PerceiverModel""", """PerceiverPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig from .tokenization_perceiver import PerceiverTokenizer try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_perceiver import PerceiverFeatureExtractor from .image_processing_perceiver import PerceiverImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_perceiver import ( PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST, PerceiverForImageClassificationConvProcessing, PerceiverForImageClassificationFourier, PerceiverForImageClassificationLearned, PerceiverForMaskedLM, PerceiverForMultimodalAutoencoding, PerceiverForOpticalFlow, PerceiverForSequenceClassification, PerceiverLayer, PerceiverModel, PerceiverPreTrainedModel, ) else: import sys a : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
265
import functools def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> int: UpperCamelCase : Optional[int] = len(_lowerCAmelCase ) UpperCamelCase : List[str] = len(_lowerCAmelCase ) @functools.cache def min_distance(_lowerCAmelCase , _lowerCAmelCase ) -> int: # if first word index is overflow - delete all from the second word if indexa >= len_worda: return len_worda - indexa # if second word index is overflow - delete all from the first word if indexa >= len_worda: return len_worda - indexa UpperCamelCase : Union[str, Any] = int(worda[indexa] != worda[indexa] ) # current letters not identical return min( 1 + min_distance(indexa + 1 , _lowerCAmelCase ) , 1 + min_distance(_lowerCAmelCase , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , ) return min_distance(0 , 0 ) if __name__ == "__main__": import doctest doctest.testmod()
52
0
"""simple docstring""" import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNetaDConditionModel from diffusers.pipelines.alt_diffusion.modeling_roberta_series import ( RobertaSeriesConfig, RobertaSeriesModelWithTransformation, ) from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case , unittest.TestCase): snake_case__ = AltDiffusionPipeline snake_case__ = TEXT_TO_IMAGE_PARAMS snake_case__ = TEXT_TO_IMAGE_BATCH_PARAMS snake_case__ = TEXT_TO_IMAGE_IMAGE_PARAMS snake_case__ = TEXT_TO_IMAGE_IMAGE_PARAMS def _UpperCamelCase ( self : int ) -> List[str]: torch.manual_seed(0 ) _UpperCamelCase = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , ) _UpperCamelCase = DDIMScheduler( beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='''scaled_linear''' , clip_sample=A_ , set_alpha_to_one=A_ , ) torch.manual_seed(0 ) _UpperCamelCase = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , ) # TODO: address the non-deterministic text encoder (fails for save-load tests) # torch.manual_seed(0) # text_encoder_config = RobertaSeriesConfig( # hidden_size=32, # project_dim=32, # intermediate_size=37, # layer_norm_eps=1e-05, # num_attention_heads=4, # num_hidden_layers=5, # vocab_size=5002, # ) # text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config) torch.manual_seed(0 ) _UpperCamelCase = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5002 , ) _UpperCamelCase = CLIPTextModel(A_ ) _UpperCamelCase = XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' ) _UpperCamelCase = 77 _UpperCamelCase = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, } return components def _UpperCamelCase ( self : Optional[Any] , __UpperCamelCase : Dict , __UpperCamelCase : Dict=0 ) -> str: if str(A_ ).startswith('''mps''' ): _UpperCamelCase = torch.manual_seed(A_ ) else: _UpperCamelCase = torch.Generator(device=A_ ).manual_seed(A_ ) _UpperCamelCase = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "numpy", } return inputs def _UpperCamelCase ( self : Union[str, Any] ) -> Optional[int]: super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 ) def _UpperCamelCase ( self : Tuple ) -> List[str]: super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) def _UpperCamelCase ( self : Any ) -> Optional[Any]: _UpperCamelCase = "cpu" # ensure determinism for the device-dependent torch.Generator _UpperCamelCase = self.get_dummy_components() torch.manual_seed(0 ) _UpperCamelCase = RobertaSeriesConfig( hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5002 , ) # TODO: remove after fixing the non-deterministic text encoder _UpperCamelCase = RobertaSeriesModelWithTransformation(A_ ) _UpperCamelCase = text_encoder _UpperCamelCase = AltDiffusionPipeline(**A_ ) _UpperCamelCase = alt_pipe.to(A_ ) alt_pipe.set_progress_bar_config(disable=A_ ) _UpperCamelCase = self.get_dummy_inputs(A_ ) _UpperCamelCase = "A photo of an astronaut" _UpperCamelCase = alt_pipe(**A_ ) _UpperCamelCase = output.images _UpperCamelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) _UpperCamelCase = np.array( [0.5_7_4_8_1_6_2, 0.6_0_4_4_7_1_4_5, 0.4_8_8_2_1_2_1_7, 0.5_0_1_0_0_6_3_6, 0.5_4_3_1_1_8_5, 0.4_5_7_6_3_6_8_3, 0.4_9_6_5_7_6_9_6, 0.4_8_1_3_2_7_3_3, 0.4_7_5_7_3_0_9_3] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def _UpperCamelCase ( self : List[Any] ) -> int: _UpperCamelCase = "cpu" # ensure determinism for the device-dependent torch.Generator _UpperCamelCase = self.get_dummy_components() _UpperCamelCase = PNDMScheduler(skip_prk_steps=A_ ) torch.manual_seed(0 ) _UpperCamelCase = RobertaSeriesConfig( hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5002 , ) # TODO: remove after fixing the non-deterministic text encoder _UpperCamelCase = RobertaSeriesModelWithTransformation(A_ ) _UpperCamelCase = text_encoder _UpperCamelCase = AltDiffusionPipeline(**A_ ) _UpperCamelCase = alt_pipe.to(A_ ) alt_pipe.set_progress_bar_config(disable=A_ ) _UpperCamelCase = self.get_dummy_inputs(A_ ) _UpperCamelCase = alt_pipe(**A_ ) _UpperCamelCase = output.images _UpperCamelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) _UpperCamelCase = np.array( [0.5_1_6_0_5_0_9_3, 0.5_7_0_7_2_4_1, 0.4_7_3_6_5_5_0_7, 0.5_0_5_7_8_8_8_6, 0.5_6_3_3_8_7_7, 0.4_6_4_2_5_0_3, 0.5_1_8_2_0_8_1, 0.4_8_7_6_3_4_8_4, 0.4_9_0_8_4_2_3_7] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 @slow @require_torch_gpu class UpperCAmelCase_ ( unittest.TestCase): def _UpperCamelCase ( self : str ) -> List[Any]: super().tearDown() gc.collect() torch.cuda.empty_cache() def _UpperCamelCase ( self : Optional[Any] ) -> Any: _UpperCamelCase = AltDiffusionPipeline.from_pretrained('''BAAI/AltDiffusion''' , safety_checker=A_ ) _UpperCamelCase = alt_pipe.to(A_ ) alt_pipe.set_progress_bar_config(disable=A_ ) _UpperCamelCase = "A painting of a squirrel eating a burger" _UpperCamelCase = torch.manual_seed(0 ) _UpperCamelCase = alt_pipe([prompt] , generator=A_ , guidance_scale=6.0 , num_inference_steps=20 , output_type='''np''' ) _UpperCamelCase = output.images _UpperCamelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) _UpperCamelCase = np.array([0.1_0_1_0, 0.0_8_0_0, 0.0_7_9_4, 0.0_8_8_5, 0.0_8_4_3, 0.0_7_6_2, 0.0_7_6_9, 0.0_7_2_9, 0.0_5_8_6] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def _UpperCamelCase ( self : Any ) -> str: _UpperCamelCase = DDIMScheduler.from_pretrained('''BAAI/AltDiffusion''' , subfolder='''scheduler''' ) _UpperCamelCase = AltDiffusionPipeline.from_pretrained('''BAAI/AltDiffusion''' , scheduler=A_ , safety_checker=A_ ) _UpperCamelCase = alt_pipe.to(A_ ) alt_pipe.set_progress_bar_config(disable=A_ ) _UpperCamelCase = "A painting of a squirrel eating a burger" _UpperCamelCase = torch.manual_seed(0 ) _UpperCamelCase = alt_pipe([prompt] , generator=A_ , num_inference_steps=2 , output_type='''numpy''' ) _UpperCamelCase = output.images _UpperCamelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) _UpperCamelCase = np.array([0.4_0_1_9, 0.4_0_5_2, 0.3_8_1_0, 0.4_1_1_9, 0.3_9_1_6, 0.3_9_8_2, 0.4_6_5_1, 0.4_1_9_5, 0.5_3_2_3] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
256
import itertools import random import unittest import numpy as np from transformers import ASTFeatureExtractor from transformers.testing_utils import require_torch, require_torchaudio from transformers.utils.import_utils import is_torch_available from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin __lowerCamelCase : str = random.Random() if is_torch_available(): import torch def A_ ( _lowerCAmelCase , _lowerCAmelCase=1.0 , _lowerCAmelCase=None , _lowerCAmelCase=None ) -> Optional[Any]: if rng is None: UpperCamelCase : Optional[int] = global_rng UpperCamelCase : Optional[Any] = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values class A__ ( unittest.TestCase ): def __init__( self , A_ , A_=7 , A_=400 , A_=2000 , A_=1 , A_=0.0 , A_=1_6000 , A_=True , A_=True , ): '''simple docstring''' UpperCamelCase : Tuple = parent UpperCamelCase : List[Any] = batch_size UpperCamelCase : List[Any] = min_seq_length UpperCamelCase : List[str] = max_seq_length UpperCamelCase : int = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) UpperCamelCase : Union[str, Any] = feature_size UpperCamelCase : List[str] = padding_value UpperCamelCase : Optional[Any] = sampling_rate UpperCamelCase : List[str] = return_attention_mask UpperCamelCase : List[Any] = do_normalize def __UpperCamelCase( self ): '''simple docstring''' return { "feature_size": self.feature_size, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "return_attention_mask": self.return_attention_mask, "do_normalize": self.do_normalize, } def __UpperCamelCase( self , A_=False , A_=False ): '''simple docstring''' def _flatten(A_ ): return list(itertools.chain(*A_ ) ) if equal_length: UpperCamelCase : List[str] = floats_list((self.batch_size, self.max_seq_length) ) else: # make sure that inputs increase in size UpperCamelCase : Dict = [ _flatten(floats_list((x, self.feature_size) ) ) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff ) ] if numpify: UpperCamelCase : Union[str, Any] = [np.asarray(A_ ) for x in speech_inputs] return speech_inputs @require_torch @require_torchaudio class A__ ( __snake_case , unittest.TestCase ): _UpperCAmelCase :Optional[Any] = ASTFeatureExtractor def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Tuple = ASTFeatureExtractionTester(self ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Optional[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 UpperCamelCase : Tuple = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] UpperCamelCase : Dict = [np.asarray(A_ ) for speech_input in speech_inputs] # Test not batched input UpperCamelCase : Dict = feat_extract(speech_inputs[0] , return_tensors="np" ).input_values UpperCamelCase : Union[str, Any] = feat_extract(np_speech_inputs[0] , return_tensors="np" ).input_values self.assertTrue(np.allclose(A_ , A_ , atol=1e-3 ) ) # Test batched UpperCamelCase : Any = feat_extract(A_ , padding=A_ , return_tensors="np" ).input_values UpperCamelCase : Any = feat_extract(A_ , padding=A_ , return_tensors="np" ).input_values for enc_seq_a, enc_seq_a in zip(A_ , A_ ): self.assertTrue(np.allclose(A_ , A_ , atol=1e-3 ) ) # Test 2-D numpy arrays are batched. UpperCamelCase : Dict = [floats_list((1, x) )[0] for x in (800, 800, 800)] UpperCamelCase : int = np.asarray(A_ ) UpperCamelCase : Any = feat_extract(A_ , return_tensors="np" ).input_values UpperCamelCase : List[str] = feat_extract(A_ , return_tensors="np" ).input_values for enc_seq_a, enc_seq_a in zip(A_ , A_ ): self.assertTrue(np.allclose(A_ , A_ , atol=1e-3 ) ) @require_torch def __UpperCamelCase( self ): '''simple docstring''' import torch UpperCamelCase : List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) UpperCamelCase : int = np.random.rand(100 ).astype(np.floataa ) UpperCamelCase : str = np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: UpperCamelCase : List[Any] = feature_extractor.pad([{"input_values": inputs}] , return_tensors="np" ) self.assertTrue(np_processed.input_values.dtype == np.floataa ) UpperCamelCase : List[str] = feature_extractor.pad([{"input_values": inputs}] , return_tensors="pt" ) self.assertTrue(pt_processed.input_values.dtype == torch.floataa ) def __UpperCamelCase( self , A_ ): '''simple docstring''' from datasets import load_dataset UpperCamelCase : Dict = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" ) # automatic decoding with librispeech UpperCamelCase : Any = ds.sort("id" ).select(range(A_ ) )[:num_samples]["audio"] return [x["array"] for x in speech_samples] @require_torch def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Any = torch.tensor( [-0.98_94, -1.27_76, -0.90_66, -1.27_76, -0.93_49, -1.26_09, -1.03_86, -1.27_76, -1.15_61, -1.27_76, -1.20_52, -1.27_23, -1.21_90, -1.21_32, -1.27_76, -1.11_33, -1.19_53, -1.13_43, -1.15_84, -1.22_03, -1.17_70, -1.24_74, -1.23_81, -1.19_36, -0.92_70, -0.83_17, -0.80_49, -0.77_06, -0.75_65, -0.78_69] ) # fmt: on UpperCamelCase : List[Any] = self._load_datasamples(1 ) UpperCamelCase : Tuple = ASTFeatureExtractor() UpperCamelCase : str = feature_extractor(A_ , return_tensors="pt" ).input_values self.assertEquals(input_values.shape , (1, 1024, 128) ) self.assertTrue(torch.allclose(input_values[0, 0, :30] , A_ , atol=1e-4 ) )
52
0
"""simple docstring""" import inspect import os import re from transformers.configuration_utils import PretrainedConfig from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_config_docstrings.py _snake_case : Optional[Any] = """src/transformers""" # This is to make sure the transformers module imported is the one in the repo. _snake_case : List[str] = direct_transformers_import(PATH_TO_TRANSFORMERS) _snake_case : Union[str, Any] = transformers.models.auto.configuration_auto.CONFIG_MAPPING _snake_case : Union[str, Any] = { # used to compute the property `self.chunk_length` """EncodecConfig""": ["""overlap"""], # used as `self.bert_model = BertModel(config, ...)` """DPRConfig""": True, # not used in modeling files, but it's an important information """FSMTConfig""": ["""langs"""], # used internally in the configuration class file """GPTNeoConfig""": ["""attention_types"""], # used internally in the configuration class file """EsmConfig""": ["""is_folding_model"""], # used during training (despite we don't have training script for these models yet) """Mask2FormerConfig""": ["""ignore_value"""], # `ignore_value` used during training (despite we don't have training script for these models yet) # `norm` used in conversion script (despite not using in the modeling file) """OneFormerConfig""": ["""ignore_value""", """norm"""], # used during preprocessing and collation, see `collating_graphormer.py` """GraphormerConfig""": ["""spatial_pos_max"""], # used internally in the configuration class file """T5Config""": ["""feed_forward_proj"""], # used internally in the configuration class file # `tokenizer_class` get default value `T5Tokenizer` intentionally """MT5Config""": ["""feed_forward_proj""", """tokenizer_class"""], """UMT5Config""": ["""feed_forward_proj""", """tokenizer_class"""], # used internally in the configuration class file """LongT5Config""": ["""feed_forward_proj"""], # used internally in the configuration class file """SwitchTransformersConfig""": ["""feed_forward_proj"""], # having default values other than `1e-5` - we can't fix them without breaking """BioGptConfig""": ["""layer_norm_eps"""], # having default values other than `1e-5` - we can't fix them without breaking """GLPNConfig""": ["""layer_norm_eps"""], # having default values other than `1e-5` - we can't fix them without breaking """SegformerConfig""": ["""layer_norm_eps"""], # having default values other than `1e-5` - we can't fix them without breaking """CvtConfig""": ["""layer_norm_eps"""], # having default values other than `1e-5` - we can't fix them without breaking """PerceiverConfig""": ["""layer_norm_eps"""], # used internally to calculate the feature size """InformerConfig""": ["""num_static_real_features""", """num_time_features"""], # used internally to calculate the feature size """TimeSeriesTransformerConfig""": ["""num_static_real_features""", """num_time_features"""], # used internally to calculate the feature size """AutoformerConfig""": ["""num_static_real_features""", """num_time_features"""], # used internally to calculate `mlp_dim` """SamVisionConfig""": ["""mlp_ratio"""], # For (head) training, but so far not implemented """ClapAudioConfig""": ["""num_classes"""], # Not used, but providing useful information to users """SpeechT5HifiGanConfig""": ["""sampling_rate"""], } # TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure SPECIAL_CASES_TO_ALLOW.update( { 'CLIPSegConfig': True, 'DeformableDetrConfig': True, 'DetaConfig': True, 'DinatConfig': True, 'DonutSwinConfig': True, 'EfficientFormerConfig': True, 'FSMTConfig': True, 'JukeboxConfig': True, 'LayoutLMv2Config': True, 'MaskFormerSwinConfig': True, 'MT5Config': True, 'NatConfig': True, 'OneFormerConfig': True, 'PerceiverConfig': True, 'RagConfig': True, 'SpeechT5Config': True, 'SwinConfig': True, 'Swin2SRConfig': True, 'Swinv2Config': True, 'SwitchTransformersConfig': True, 'TableTransformerConfig': True, 'TapasConfig': True, 'TransfoXLConfig': True, 'UniSpeechConfig': True, 'UniSpeechSatConfig': True, 'WavLMConfig': True, 'WhisperConfig': True, # TODO: @Arthur (for `alignment_head` and `alignment_layer`) 'JukeboxPriorConfig': True, # TODO: @Younes (for `is_decoder`) 'Pix2StructTextConfig': True, } ) def A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ): A = False for attribute in attributes: for modeling_source in source_strings: # check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)` if ( F"config.{attribute}" in modeling_source or F"getattr(config, \"{attribute}\"" in modeling_source or F"getattr(self.config, \"{attribute}\"" in modeling_source ): A = True # Deal with multi-line cases elif ( re.search( rF"getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*\"{attribute}\"" , _lowerCAmelCase , ) is not None ): A = True # `SequenceSummary` is called with `SequenceSummary(config)` elif attribute in [ "summary_type", "summary_use_proj", "summary_activation", "summary_last_dropout", "summary_proj_to_labels", "summary_first_dropout", ]: if "SequenceSummary" in modeling_source: A = True if attribute_used: break if attribute_used: break # common and important attributes, even if they do not always appear in the modeling files A = [ "bos_index", "eos_index", "pad_index", "unk_index", "mask_index", "image_size", "use_cache", "out_features", "out_indices", ] A = ["encoder_no_repeat_ngram_size"] # Special cases to be allowed A = True if not attribute_used: A = False for attribute in attributes: # Allow if the default value in the configuration class is different from the one in `PretrainedConfig` if attribute in ["is_encoder_decoder"] and default_value is True: A = True elif attribute in ["tie_word_embeddings"] and default_value is False: A = True # Allow cases without checking the default value in the configuration class elif attribute in attributes_to_allow + attributes_used_in_generation: A = True elif attribute.endswith("_token_id" ): A = True # configuration class specific cases if not case_allowed: A = SPECIAL_CASES_TO_ALLOW.get(config_class.__name__ , [] ) A = allowed_cases is True or attribute in allowed_cases return attribute_used or case_allowed def A__ ( UpperCamelCase ): A = dict(inspect.signature(config_class.__init__ ).parameters ) A = [x for x in list(signature.keys() ) if x not in ["self", "kwargs"]] A = [signature[param].default for param in parameter_names] # If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long # as one variant is used, the test should pass A = {} if len(config_class.attribute_map ) > 0: A = {v: k for k, v in config_class.attribute_map.items()} # Get the path to modeling source files A = inspect.getsourcefile(_lowerCAmelCase ) A = os.path.dirname(_lowerCAmelCase ) # Let's check against all frameworks: as long as one framework uses an attribute, we are good. A = [os.path.join(_lowerCAmelCase , _lowerCAmelCase ) for fn in os.listdir(_lowerCAmelCase ) if fn.startswith("modeling_" )] # Get the source code strings A = [] for path in modeling_paths: if os.path.isfile(_lowerCAmelCase ): with open(_lowerCAmelCase ) as fp: modeling_sources.append(fp.read() ) A = [] for config_param, default_value in zip(_lowerCAmelCase , _lowerCAmelCase ): # `attributes` here is all the variant names for `config_param` A = [config_param] # some configuration classes have non-empty `attribute_map`, and both names could be used in the # corresponding modeling files. As long as one of them appears, it is fine. if config_param in reversed_attribute_map: attributes.append(reversed_attribute_map[config_param] ) if not check_attribute_being_used(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ): unused_attributes.append(attributes[0] ) return sorted(_lowerCAmelCase ) def A__ ( ): A = {} for _config_class in list(CONFIG_MAPPING.values() ): # Skip deprecated models if "models.deprecated" in _config_class.__module__: continue # Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.) A = [ cls for name, cls in inspect.getmembers( inspect.getmodule(_config_class ) , lambda UpperCamelCase : inspect.isclass(_lowerCAmelCase ) and issubclass(_lowerCAmelCase , _lowerCAmelCase ) and inspect.getmodule(_lowerCAmelCase ) == inspect.getmodule(_config_class ) , ) ] for config_class in config_classes_in_module: A = check_config_attributes_being_used(_lowerCAmelCase ) if len(_lowerCAmelCase ) > 0: A = unused_attributes if len(_lowerCAmelCase ) > 0: A = "The following configuration classes contain unused attributes in the corresponding modeling files:\n" for name, attributes in configs_with_unused_attributes.items(): error += F"{name}: {attributes}\n" raise ValueError(_lowerCAmelCase ) if __name__ == "__main__": check_config_attributes()
292
import pickle import numpy as np from matplotlib import pyplot as plt class A__ : def __init__( self , A_ , A_ , A_ , A_ , A_ , A_=0.2 , A_=0.2 ): '''simple docstring''' UpperCamelCase : int = bp_numa UpperCamelCase : int = bp_numa UpperCamelCase : List[Any] = bp_numa UpperCamelCase : Optional[int] = conva_get[:2] UpperCamelCase : Optional[Any] = conva_get[2] UpperCamelCase : Dict = size_pa UpperCamelCase : Union[str, Any] = rate_w UpperCamelCase : Dict = rate_t UpperCamelCase : Union[str, Any] = [ np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0] ) + 0.5 ) for i in range(self.conva[1] ) ] UpperCamelCase : Any = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 ) UpperCamelCase : List[Any] = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 ) UpperCamelCase : Optional[Any] = -2 * np.random.rand(self.conva[1] ) + 1 UpperCamelCase : Any = -2 * np.random.rand(self.num_bpa ) + 1 UpperCamelCase : int = -2 * np.random.rand(self.num_bpa ) + 1 def __UpperCamelCase( self , A_ ): '''simple docstring''' UpperCamelCase : Optional[int] = { "num_bp1": self.num_bpa, "num_bp2": self.num_bpa, "num_bp3": self.num_bpa, "conv1": self.conva, "step_conv1": self.step_conva, "size_pooling1": self.size_poolinga, "rate_weight": self.rate_weight, "rate_thre": self.rate_thre, "w_conv1": self.w_conva, "wkj": self.wkj, "vji": self.vji, "thre_conv1": self.thre_conva, "thre_bp2": self.thre_bpa, "thre_bp3": self.thre_bpa, } with open(A_ , "wb" ) as f: pickle.dump(A_ , A_ ) print(F"""Model saved: {save_path}""" ) @classmethod def __UpperCamelCase( cls , A_ ): '''simple docstring''' with open(A_ , "rb" ) as f: UpperCamelCase : Optional[Any] = pickle.load(A_ ) # noqa: S301 UpperCamelCase : List[Any] = model_dic.get("conv1" ) conv_get.append(model_dic.get("step_conv1" ) ) UpperCamelCase : Union[str, Any] = model_dic.get("size_pooling1" ) UpperCamelCase : List[Any] = model_dic.get("num_bp1" ) UpperCamelCase : Dict = model_dic.get("num_bp2" ) UpperCamelCase : Dict = model_dic.get("num_bp3" ) UpperCamelCase : Dict = model_dic.get("rate_weight" ) UpperCamelCase : str = model_dic.get("rate_thre" ) # create model instance UpperCamelCase : Any = CNN(A_ , A_ , A_ , A_ , A_ , A_ , A_ ) # modify model parameter UpperCamelCase : str = model_dic.get("w_conv1" ) UpperCamelCase : Optional[Any] = model_dic.get("wkj" ) UpperCamelCase : int = model_dic.get("vji" ) UpperCamelCase : Any = model_dic.get("thre_conv1" ) UpperCamelCase : Optional[int] = model_dic.get("thre_bp2" ) UpperCamelCase : Union[str, Any] = model_dic.get("thre_bp3" ) return conv_ins def __UpperCamelCase( self , A_ ): '''simple docstring''' return 1 / (1 + np.exp(-1 * x )) def __UpperCamelCase( self , A_ ): '''simple docstring''' return round(A_ , 3 ) def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ ): '''simple docstring''' UpperCamelCase : str = convs[0] UpperCamelCase : Optional[Any] = convs[1] UpperCamelCase : Optional[Any] = np.shape(A_ )[0] # get the data slice of original image data, data_focus UpperCamelCase : List[str] = [] for i_focus in range(0 , size_data - size_conv + 1 , A_ ): for j_focus in range(0 , size_data - size_conv + 1 , A_ ): UpperCamelCase : Union[str, Any] = data[ i_focus : i_focus + size_conv, j_focus : j_focus + size_conv ] data_focus.append(A_ ) # calculate the feature map of every single kernel, and saved as list of matrix UpperCamelCase : int = [] UpperCamelCase : Optional[Any] = int((size_data - size_conv) / conv_step + 1 ) for i_map in range(A_ ): UpperCamelCase : str = [] for i_focus in range(len(A_ ) ): UpperCamelCase : List[Any] = ( np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map] ) ) - thre_convs[i_map] ) featuremap.append(self.sig(A_ ) ) UpperCamelCase : Optional[int] = np.asmatrix(A_ ).reshape( A_ , A_ ) data_featuremap.append(A_ ) # expanding the data slice to One dimenssion UpperCamelCase : List[Any] = [] for each_focus in data_focus: focusa_list.extend(self.Expand_Mat(A_ ) ) UpperCamelCase : Tuple = np.asarray(A_ ) return focus_list, data_featuremap def __UpperCamelCase( self , A_ , A_ , A_="average_pool" ): '''simple docstring''' UpperCamelCase : Any = len(featuremaps[0] ) UpperCamelCase : str = int(size_map / size_pooling ) UpperCamelCase : Optional[int] = [] for i_map in range(len(A_ ) ): UpperCamelCase : Tuple = featuremaps[i_map] UpperCamelCase : Any = [] for i_focus in range(0 , A_ , A_ ): for j_focus in range(0 , A_ , A_ ): UpperCamelCase : int = feature_map[ i_focus : i_focus + size_pooling, j_focus : j_focus + size_pooling, ] if pooling_type == "average_pool": # average pooling map_pooled.append(np.average(A_ ) ) elif pooling_type == "max_pooling": # max pooling map_pooled.append(np.max(A_ ) ) UpperCamelCase : Optional[Any] = np.asmatrix(A_ ).reshape(A_ , A_ ) featuremap_pooled.append(A_ ) return featuremap_pooled def __UpperCamelCase( self , A_ ): '''simple docstring''' UpperCamelCase : List[Any] = [] for i in range(len(A_ ) ): UpperCamelCase : List[Any] = np.shape(data[i] ) UpperCamelCase : str = data[i].reshape(1 , shapes[0] * shapes[1] ) UpperCamelCase : Optional[int] = data_listed.getA().tolist()[0] data_expanded.extend(A_ ) UpperCamelCase : Any = np.asarray(A_ ) return data_expanded def __UpperCamelCase( self , A_ ): '''simple docstring''' UpperCamelCase : List[Any] = np.asarray(A_ ) UpperCamelCase : List[Any] = np.shape(A_ ) UpperCamelCase : Any = data_mat.reshape(1 , shapes[0] * shapes[1] ) return data_expanded def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ ): '''simple docstring''' UpperCamelCase : int = [] UpperCamelCase : Optional[int] = 0 for i_map in range(A_ ): UpperCamelCase : int = np.ones((size_map, size_map) ) for i in range(0 , A_ , A_ ): for j in range(0 , A_ , A_ ): UpperCamelCase : str = pd_pool[ i_pool ] UpperCamelCase : str = i_pool + 1 UpperCamelCase : str = np.multiply( A_ , np.multiply(out_map[i_map] , (1 - out_map[i_map]) ) ) pd_all.append(A_ ) return pd_all def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_=bool ): '''simple docstring''' print("----------------------Start Training-------------------------" ) print((" - - Shape: Train_Data ", np.shape(A_ )) ) print((" - - Shape: Teach_Data ", np.shape(A_ )) ) UpperCamelCase : List[str] = 0 UpperCamelCase : Union[str, Any] = [] UpperCamelCase : int = 1_0000 while rp < n_repeat and mse >= error_accuracy: UpperCamelCase : Tuple = 0 print(F"""-------------Learning Time {rp}--------------""" ) for p in range(len(A_ ) ): # print('------------Learning Image: %d--------------'%p) UpperCamelCase : Any = np.asmatrix(datas_train[p] ) UpperCamelCase : List[str] = np.asarray(datas_teach[p] ) UpperCamelCase , UpperCamelCase : Dict = self.convolute( A_ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , ) UpperCamelCase : Tuple = self.pooling(A_ , self.size_poolinga ) UpperCamelCase : int = np.shape(A_ ) UpperCamelCase : List[str] = self._expand(A_ ) UpperCamelCase : Optional[int] = data_bp_input UpperCamelCase : str = np.dot(A_ , self.vji.T ) - self.thre_bpa UpperCamelCase : Optional[int] = self.sig(A_ ) UpperCamelCase : List[Any] = np.dot(A_ , self.wkj.T ) - self.thre_bpa UpperCamelCase : Dict = self.sig(A_ ) # --------------Model Leaning ------------------------ # calculate error and gradient--------------- UpperCamelCase : List[Any] = np.multiply( (data_teach - bp_outa) , np.multiply(A_ , (1 - bp_outa) ) ) UpperCamelCase : str = np.multiply( np.dot(A_ , self.wkj ) , np.multiply(A_ , (1 - bp_outa) ) ) UpperCamelCase : Any = np.dot(A_ , self.vji ) UpperCamelCase : Dict = pd_i_all / (self.size_poolinga * self.size_poolinga) UpperCamelCase : List[Any] = pd_conva_pooled.T.getA().tolist() UpperCamelCase : List[Any] = self._calculate_gradient_from_pool( A_ , A_ , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , ) # weight and threshold learning process--------- # convolution layer for k_conv in range(self.conva[1] ): UpperCamelCase : List[Any] = self._expand_mat(pd_conva_all[k_conv] ) UpperCamelCase : List[Any] = self.rate_weight * np.dot(A_ , A_ ) UpperCamelCase : str = self.w_conva[k_conv] + delta_w.reshape( (self.conva[0], self.conva[0]) ) UpperCamelCase : Dict = ( self.thre_conva[k_conv] - np.sum(pd_conva_all[k_conv] ) * self.rate_thre ) # all connected layer UpperCamelCase : Optional[Any] = self.wkj + pd_k_all.T * bp_outa * self.rate_weight UpperCamelCase : List[Any] = self.vji + pd_j_all.T * bp_outa * self.rate_weight UpperCamelCase : Optional[Any] = self.thre_bpa - pd_k_all * self.rate_thre UpperCamelCase : List[str] = self.thre_bpa - pd_j_all * self.rate_thre # calculate the sum error of all single image UpperCamelCase : List[Any] = np.sum(abs(data_teach - bp_outa ) ) error_count += errors # print(' ----Teach ',data_teach) # print(' ----BP_output ',bp_out3) UpperCamelCase : Any = rp + 1 UpperCamelCase : Union[str, Any] = error_count / patterns all_mse.append(A_ ) def draw_error(): UpperCamelCase : Tuple = [error_accuracy for i in range(int(n_repeat * 1.2 ) )] plt.plot(A_ , "+-" ) plt.plot(A_ , "r--" ) plt.xlabel("Learning Times" ) plt.ylabel("All_mse" ) plt.grid(A_ , alpha=0.5 ) plt.show() print("------------------Training Complished---------------------" ) print((" - - Training epoch: ", rp, F""" - - Mse: {mse:.6f}""") ) if draw_e: draw_error() return mse def __UpperCamelCase( self , A_ ): '''simple docstring''' UpperCamelCase : Optional[Any] = [] print("-------------------Start Testing-------------------------" ) print((" - - Shape: Test_Data ", np.shape(A_ )) ) for p in range(len(A_ ) ): UpperCamelCase : int = np.asmatrix(datas_test[p] ) UpperCamelCase , UpperCamelCase : Any = self.convolute( A_ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , ) UpperCamelCase : List[str] = self.pooling(A_ , self.size_poolinga ) UpperCamelCase : Dict = self._expand(A_ ) UpperCamelCase : List[Any] = data_bp_input UpperCamelCase : Any = bp_outa * self.vji.T - self.thre_bpa UpperCamelCase : List[Any] = self.sig(A_ ) UpperCamelCase : int = bp_outa * self.wkj.T - self.thre_bpa UpperCamelCase : Optional[int] = self.sig(A_ ) produce_out.extend(bp_outa.getA().tolist() ) UpperCamelCase : List[str] = [list(map(self.do_round , A_ ) ) for each in produce_out] return np.asarray(A_ ) def __UpperCamelCase( self , A_ ): '''simple docstring''' UpperCamelCase : Union[str, Any] = np.asmatrix(A_ ) UpperCamelCase , UpperCamelCase : List[Any] = self.convolute( A_ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , ) UpperCamelCase : str = self.pooling(A_ , self.size_poolinga ) return data_conveda, data_pooleda if __name__ == "__main__": pass
52
0
'''simple docstring''' import random import timeit from functools import wraps from typing import Callable, Optional from ..configuration_utils import PretrainedConfig from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING from ..utils import is_pyanvml_available, is_tf_available, logging from .benchmark_utils import ( Benchmark, Memory, MemorySummary, measure_peak_memory_cpu, start_memory_tracing, stop_memory_tracing, ) if is_tf_available(): import tensorflow as tf from tensorflow.python.framework.errors_impl import ResourceExhaustedError from .benchmark_args_tf import TensorFlowBenchmarkArguments if is_pyanvml_available(): import pyanvml.pyanvml as nvml __a = logging.get_logger(__name__) def __UpperCAmelCase ( a_: List[Any], a_: Dict ): def run_func(a_: Tuple ): @wraps(_lowerCAmelCase ) def run_in_eager_mode(*a_: Optional[Any], **a_: Optional[int] ): return func(*_lowerCAmelCase, **_lowerCAmelCase ) @wraps(_lowerCAmelCase ) @tf.function(experimental_compile=_lowerCAmelCase ) def run_in_graph_mode(*a_: str, **a_: str ): return func(*_lowerCAmelCase, **_lowerCAmelCase ) if do_eager_mode is True: if use_xla is not False: raise ValueError( "Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`." ) return run_in_eager_mode else: return run_in_graph_mode return run_func def __UpperCAmelCase ( a_: int, a_: Tuple, a_: Union[str, Any] ): _UpperCAmelCase : Optional[Any] = random.Random() _UpperCAmelCase : str = [rng.randint(0, vocab_size - 1 ) for i in range(batch_size * sequence_length )] return tf.constant(_lowerCAmelCase, shape=(batch_size, sequence_length), dtype=tf.intaa ) class A__ ( __snake_case ): """simple docstring""" UpperCamelCase_ : TensorFlowBenchmarkArguments UpperCamelCase_ : PretrainedConfig UpperCamelCase_ : str = "TensorFlow" @property def _lowerCAmelCase ( self : Optional[int] ) -> Optional[Any]: """simple docstring""" return tf.__version__ def _lowerCAmelCase ( self : Any , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Union[str, Any] ) -> List[Any]: """simple docstring""" _UpperCAmelCase : Optional[int] = self.args.strategy if strategy is None: raise ValueError("A device strategy has to be initialized before using TensorFlow." ) _UpperCAmelCase : Optional[Any] = self._prepare_inference_func(A_ , A_ , A_ ) return self._measure_speed(_inference ) def _lowerCAmelCase ( self : Tuple , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : str , lowerCAmelCase__ : Union[str, Any] ) -> str: """simple docstring""" _UpperCAmelCase : Any = self.args.strategy if strategy is None: raise ValueError("A device strategy has to be initialized before using TensorFlow." ) _UpperCAmelCase : Any = self._prepare_train_func(A_ , A_ , A_ ) return self._measure_speed(_train ) def _lowerCAmelCase ( self : Dict , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Any , lowerCAmelCase__ : Tuple ) -> Union[str, Any]: """simple docstring""" if self.args.is_gpu: tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , A_ ) _UpperCAmelCase : Union[str, Any] = self.args.strategy if strategy is None: raise ValueError("A device strategy has to be initialized before using TensorFlow." ) _UpperCAmelCase : Union[str, Any] = self._prepare_inference_func(A_ , A_ , A_ ) return self._measure_memory(_inference ) def _lowerCAmelCase ( self : int , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Any , lowerCAmelCase__ : Any ) -> Dict: """simple docstring""" if self.args.is_gpu: tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , A_ ) _UpperCAmelCase : Union[str, Any] = self.args.strategy if strategy is None: raise ValueError("A device strategy has to be initialized before using TensorFlow." ) _UpperCAmelCase : Union[str, Any] = self._prepare_train_func(A_ , A_ , A_ ) return self._measure_memory(_train ) def _lowerCAmelCase ( self : Optional[int] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : str ) -> Optional[Any]: """simple docstring""" _UpperCAmelCase : Tuple = self.config_dict[model_name] if self.args.fpaa: raise NotImplementedError("Mixed precision is currently not supported." ) _UpperCAmelCase : Union[str, Any] = ( hasattr(A_ , "architectures" ) and isinstance(config.architectures , A_ ) and len(config.architectures ) > 0 ) if not self.args.only_pretrain_model and has_model_class_in_config: try: _UpperCAmelCase : Optional[Any] = "TF" + config.architectures[0] # prepend 'TF' for tensorflow model _UpperCAmelCase : str = __import__("transformers" , fromlist=[model_class] ) _UpperCAmelCase : Dict = getattr(A_ , A_ ) _UpperCAmelCase : List[str] = model_cls(A_ ) except ImportError: raise ImportError( F"""{model_class} does not exist. If you just want to test the pretrained model, you might want to""" " set `--only_pretrain_model` or `args.only_pretrain_model=True`." ) else: _UpperCAmelCase : Tuple = TF_MODEL_MAPPING[config.__class__](A_ ) # encoder-decoder has vocab size saved differently _UpperCAmelCase : Optional[int] = config.vocab_size if hasattr(A_ , "vocab_size" ) else config.encoder.vocab_size _UpperCAmelCase : str = random_input_ids(A_ , A_ , A_ ) @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla ) def encoder_decoder_forward(): return model(A_ , decoder_input_ids=A_ , training=A_ ) @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla ) def encoder_forward(): return model(A_ , training=A_ ) _UpperCAmelCase : Dict = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward return _inference def _lowerCAmelCase ( self : List[Any] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Union[str, Any] ) -> str: """simple docstring""" _UpperCAmelCase : Union[str, Any] = self.config_dict[model_name] if self.args.eager_mode is not False: raise ValueError("Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`." ) if self.args.fpaa: raise NotImplementedError("Mixed precision is currently not supported." ) _UpperCAmelCase : List[Any] = ( hasattr(A_ , "architectures" ) and isinstance(config.architectures , A_ ) and len(config.architectures ) > 0 ) if not self.args.only_pretrain_model and has_model_class_in_config: try: _UpperCAmelCase : Any = "TF" + config.architectures[0] # prepend 'TF' for tensorflow model _UpperCAmelCase : Dict = __import__("transformers" , fromlist=[model_class] ) _UpperCAmelCase : Any = getattr(A_ , A_ ) _UpperCAmelCase : List[str] = model_cls(A_ ) except ImportError: raise ImportError( F"""{model_class} does not exist. If you just want to test the pretrained model, you might want to""" " set `--only_pretrain_model` or `args.only_pretrain_model=True`." ) else: _UpperCAmelCase : int = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](A_ ) # encoder-decoder has vocab size saved differently _UpperCAmelCase : Optional[Any] = config.vocab_size if hasattr(A_ , "vocab_size" ) else config.encoder.vocab_size _UpperCAmelCase : Any = random_input_ids(A_ , A_ , A_ ) @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla ) def encoder_decoder_train(): _UpperCAmelCase : List[Any] = model(A_ , decoder_input_ids=A_ , labels=A_ , training=A_ )[0] _UpperCAmelCase : Dict = tf.gradients(A_ , model.trainable_variables ) return gradients @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla ) def encoder_train(): _UpperCAmelCase : Union[str, Any] = model(A_ , labels=A_ , training=A_ )[0] _UpperCAmelCase : Optional[int] = tf.gradients(A_ , model.trainable_variables ) return gradients _UpperCAmelCase : Union[str, Any] = encoder_decoder_train if config.is_encoder_decoder else encoder_train return _train def _lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase__ : Any ) -> List[str]: """simple docstring""" with self.args.strategy.scope(): try: if self.args.is_tpu or self.args.use_xla: # run additional 10 times to stabilize compilation for tpu logger.info("Do inference on TPU. Running model 5 times to stabilize compilation" ) timeit.repeat(A_ , repeat=1 , number=5 ) # as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average _UpperCAmelCase : str = timeit.repeat( A_ , repeat=self.args.repeat , number=1_0 , ) return min(A_ ) / 10.0 except ResourceExhaustedError as e: self.print_fn(F"""Doesn't fit on GPU. {e}""" ) def _lowerCAmelCase ( self : List[Any] , lowerCAmelCase__ : Union[str, Any] ) -> Optional[Any]: """simple docstring""" logger.info( "Note that TensorFlow allocates more memory than " "it might need to speed up computation. " "The memory reported here corresponds to the memory " "reported by `nvidia-smi`, which can vary depending " "on total available memory on the GPU that is used." ) with self.args.strategy.scope(): try: if self.args.trace_memory_line_by_line: if not self.args.eager_mode: raise ValueError( "`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory" " consumption line by line." ) _UpperCAmelCase : Dict = start_memory_tracing("transformers" ) if self.args.is_tpu: # tpu raise NotImplementedError( "Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking" " with `args.memory=False`" ) elif self.args.is_gpu: # gpu if not is_pyanvml_available(): logger.warning( "py3nvml not installed, we won't log GPU memory usage. " "Install py3nvml (pip install py3nvml) to log information about GPU." ) _UpperCAmelCase : Any = "N/A" else: logger.info( "Measuring total GPU usage on GPU device. Make sure to not have additional processes" " running on the same GPU." ) # init nvml nvml.nvmlInit() func() _UpperCAmelCase : List[Any] = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx ) _UpperCAmelCase : int = nvml.nvmlDeviceGetMemoryInfo(A_ ) _UpperCAmelCase : Tuple = meminfo.used _UpperCAmelCase : List[str] = Memory(A_ ) # shutdown nvml nvml.nvmlShutdown() else: # cpu if self.args.trace_memory_line_by_line: logger.info( "When enabling line by line tracing, the max peak memory for CPU is inaccurate in" " TensorFlow." ) _UpperCAmelCase : Optional[Any] = None else: _UpperCAmelCase : List[str] = measure_peak_memory_cpu(A_ ) _UpperCAmelCase : Dict = Memory(A_ ) if isinstance(A_ , A_ ) else memory_bytes if self.args.trace_memory_line_by_line: _UpperCAmelCase : int = stop_memory_tracing(A_ ) if memory is None: _UpperCAmelCase : str = summary.total else: _UpperCAmelCase : List[Any] = None return memory, summary except ResourceExhaustedError as e: self.print_fn(F"""Doesn't fit on GPU. {e}""" ) return "N/A", None
145
import warnings from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast from ...onnx.utils import compute_effective_axis_dimension from ...utils import TensorType, is_torch_available, logging __lowerCamelCase : Union[str, Any] = logging.get_logger(__name__) __lowerCamelCase : Any = { """facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/config.json""", # See all BART models at https://huggingface.co/models?filter=bart } class A__ ( __snake_case ): _UpperCAmelCase :Dict = 'bart' _UpperCAmelCase :str = ['past_key_values'] _UpperCAmelCase :Any = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'} def __init__( self , A_=5_0265 , A_=1024 , A_=12 , A_=4096 , A_=16 , A_=12 , A_=4096 , A_=16 , A_=0.0 , A_=0.0 , A_="gelu" , A_=1024 , A_=0.1 , A_=0.0 , A_=0.0 , A_=0.02 , A_=0.0 , A_=False , A_=True , A_=3 , A_=1 , A_=0 , A_=2 , A_=True , A_=2 , A_=2 , **A_ , ): '''simple docstring''' UpperCamelCase : int = vocab_size UpperCamelCase : List[Any] = max_position_embeddings UpperCamelCase : Any = d_model UpperCamelCase : Optional[Any] = encoder_ffn_dim UpperCamelCase : List[Any] = encoder_layers UpperCamelCase : int = encoder_attention_heads UpperCamelCase : Optional[int] = decoder_ffn_dim UpperCamelCase : List[str] = decoder_layers UpperCamelCase : Optional[int] = decoder_attention_heads UpperCamelCase : int = dropout UpperCamelCase : int = attention_dropout UpperCamelCase : Tuple = activation_dropout UpperCamelCase : Tuple = activation_function UpperCamelCase : int = init_std UpperCamelCase : List[Any] = encoder_layerdrop UpperCamelCase : List[str] = decoder_layerdrop UpperCamelCase : Dict = classifier_dropout UpperCamelCase : Optional[int] = use_cache UpperCamelCase : List[Any] = encoder_layers UpperCamelCase : int = scale_embedding # scale factor will be sqrt(d_model) if True super().__init__( num_labels=A_ , pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , is_encoder_decoder=A_ , decoder_start_token_id=A_ , forced_eos_token_id=A_ , **A_ , ) # ensure backward compatibility for BART CNN models if self.forced_bos_token_id is None and kwargs.get("force_bos_token_to_be_generated" , A_ ): UpperCamelCase : int = self.bos_token_id warnings.warn( F"""Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. """ "The config can simply be saved and uploaded again to be fixed." ) class A__ ( __snake_case ): @property def __UpperCamelCase( self ): '''simple docstring''' if self.task in ["default", "seq2seq-lm"]: UpperCamelCase : Optional[int] = OrderedDict( [ ("input_ids", {0: "batch", 1: "encoder_sequence"}), ("attention_mask", {0: "batch", 1: "encoder_sequence"}), ] ) if self.use_past: UpperCamelCase : List[str] = {0: "batch"} UpperCamelCase : Dict = {0: "batch", 1: "past_decoder_sequence + sequence"} else: UpperCamelCase : Dict = {0: "batch", 1: "decoder_sequence"} UpperCamelCase : Union[str, Any] = {0: "batch", 1: "decoder_sequence"} if self.use_past: self.fill_with_past_key_values_(A_ , direction="inputs" ) elif self.task == "causal-lm": # TODO: figure this case out. UpperCamelCase : Any = OrderedDict( [ ("input_ids", {0: "batch", 1: "encoder_sequence"}), ("attention_mask", {0: "batch", 1: "encoder_sequence"}), ] ) if self.use_past: UpperCamelCase , UpperCamelCase : Optional[int] = self.num_layers for i in range(A_ ): UpperCamelCase : Optional[Any] = {0: "batch", 2: "past_sequence + sequence"} UpperCamelCase : Union[str, Any] = {0: "batch", 2: "past_sequence + sequence"} else: UpperCamelCase : Optional[Any] = OrderedDict( [ ("input_ids", {0: "batch", 1: "encoder_sequence"}), ("attention_mask", {0: "batch", 1: "encoder_sequence"}), ("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}), ("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}), ] ) return common_inputs @property def __UpperCamelCase( self ): '''simple docstring''' if self.task in ["default", "seq2seq-lm"]: UpperCamelCase : Tuple = super().outputs else: UpperCamelCase : Dict = super(A_ , self ).outputs if self.use_past: UpperCamelCase , UpperCamelCase : int = self.num_layers for i in range(A_ ): UpperCamelCase : int = {0: "batch", 2: "past_sequence + sequence"} UpperCamelCase : Tuple = {0: "batch", 2: "past_sequence + sequence"} return common_outputs def __UpperCamelCase( self , A_ , A_ = -1 , A_ = -1 , A_ = False , A_ = None , ): '''simple docstring''' UpperCamelCase : List[Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( A_ , A_ , A_ , A_ , A_ ) # Generate decoder inputs UpperCamelCase : List[Any] = seq_length if not self.use_past else 1 UpperCamelCase : Tuple = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( A_ , A_ , A_ , A_ , A_ ) UpperCamelCase : Optional[int] = {F"""decoder_{name}""": tensor for name, tensor in decoder_inputs.items()} UpperCamelCase : List[Any] = dict(**A_ , **A_ ) if self.use_past: if not is_torch_available(): raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." ) else: import torch UpperCamelCase , UpperCamelCase : Optional[Any] = common_inputs["input_ids"].shape UpperCamelCase : List[Any] = common_inputs["decoder_input_ids"].shape[1] UpperCamelCase , UpperCamelCase : List[str] = self.num_attention_heads UpperCamelCase : int = ( batch, num_encoder_attention_heads, encoder_seq_length, self._config.hidden_size // num_encoder_attention_heads, ) UpperCamelCase : List[Any] = decoder_seq_length + 3 UpperCamelCase : str = ( batch, num_decoder_attention_heads, decoder_past_length, self._config.hidden_size // num_decoder_attention_heads, ) UpperCamelCase : int = torch.cat( [common_inputs["decoder_attention_mask"], torch.ones(A_ , A_ )] , dim=1 ) UpperCamelCase : int = [] # If the number of encoder and decoder layers are present in the model configuration, both are considered UpperCamelCase , UpperCamelCase : Union[str, Any] = self.num_layers UpperCamelCase : Any = min(A_ , A_ ) UpperCamelCase : List[str] = max(A_ , A_ ) - min_num_layers UpperCamelCase : Dict = "encoder" if num_encoder_layers > num_decoder_layers else "decoder" for _ in range(A_ ): common_inputs["past_key_values"].append( ( torch.zeros(A_ ), torch.zeros(A_ ), torch.zeros(A_ ), torch.zeros(A_ ), ) ) # TODO: test this. UpperCamelCase : Optional[Any] = encoder_shape if remaining_side_name == "encoder" else decoder_shape for _ in range(A_ , A_ ): common_inputs["past_key_values"].append((torch.zeros(A_ ), torch.zeros(A_ )) ) return common_inputs def __UpperCamelCase( self , A_ , A_ = -1 , A_ = -1 , A_ = False , A_ = None , ): '''simple docstring''' UpperCamelCase : int = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( A_ , A_ , A_ , A_ , A_ ) if self.use_past: if not is_torch_available(): raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." ) else: import torch UpperCamelCase , UpperCamelCase : Union[str, Any] = common_inputs["input_ids"].shape # Not using the same length for past_key_values UpperCamelCase : Optional[Any] = seqlen + 2 UpperCamelCase , UpperCamelCase : List[Any] = self.num_layers UpperCamelCase , UpperCamelCase : Optional[int] = self.num_attention_heads UpperCamelCase : str = ( batch, num_encoder_attention_heads, past_key_values_length, self._config.hidden_size // num_encoder_attention_heads, ) UpperCamelCase : Optional[Any] = common_inputs["attention_mask"].dtype UpperCamelCase : int = torch.cat( [common_inputs["attention_mask"], torch.ones(A_ , A_ , dtype=A_ )] , dim=1 ) UpperCamelCase : Optional[Any] = [ (torch.zeros(A_ ), torch.zeros(A_ )) for _ in range(A_ ) ] return common_inputs def __UpperCamelCase( self , A_ , A_ = -1 , A_ = -1 , A_ = False , A_ = None , ): '''simple docstring''' UpperCamelCase : Optional[Any] = compute_effective_axis_dimension( A_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX UpperCamelCase : Union[str, Any] = tokenizer.num_special_tokens_to_add(A_ ) UpperCamelCase : int = compute_effective_axis_dimension( A_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=A_ ) # Generate dummy inputs according to compute batch and sequence UpperCamelCase : int = [" ".join([tokenizer.unk_token] ) * seq_length] * batch_size UpperCamelCase : Dict = dict(tokenizer(A_ , return_tensors=A_ ) ) return common_inputs def __UpperCamelCase( self , A_ , A_ = -1 , A_ = -1 , A_ = False , A_ = None , ): '''simple docstring''' if self.task in ["default", "seq2seq-lm"]: UpperCamelCase : Optional[int] = self._generate_dummy_inputs_for_default_and_seqaseq_lm( A_ , batch_size=A_ , seq_length=A_ , is_pair=A_ , framework=A_ ) elif self.task == "causal-lm": UpperCamelCase : List[str] = self._generate_dummy_inputs_for_causal_lm( A_ , batch_size=A_ , seq_length=A_ , is_pair=A_ , framework=A_ ) else: UpperCamelCase : List[str] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( A_ , batch_size=A_ , seq_length=A_ , is_pair=A_ , framework=A_ ) return common_inputs def __UpperCamelCase( self , A_ , A_ , A_ , A_ ): '''simple docstring''' if self.task in ["default", "seq2seq-lm"]: UpperCamelCase : Optional[Any] = super()._flatten_past_key_values_(A_ , A_ , A_ , A_ ) else: UpperCamelCase : Optional[Any] = super(A_ , self )._flatten_past_key_values_( A_ , A_ , A_ , A_ )
52
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available SCREAMING_SNAKE_CASE__ = { """configuration_timesformer""": ["""TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TimesformerConfig"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ = [ """TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """TimesformerModel""", """TimesformerForVideoClassification""", """TimesformerPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_timesformer import ( TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TimesformerForVideoClassification, TimesformerModel, TimesformerPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
150
from math import sqrt def A_ ( _lowerCAmelCase ) -> bool: assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and ( number >= 0 ), "'number' must been an int and positive" UpperCamelCase : List[Any] = True # 0 and 1 are none primes. if number <= 1: UpperCamelCase : List[Any] = False for divisor in range(2 , int(round(sqrt(_lowerCAmelCase ) ) ) + 1 ): # if 'number' divisible by 'divisor' then sets 'status' # of false and break up the loop. if number % divisor == 0: UpperCamelCase : Union[str, Any] = False break # precondition assert isinstance(_lowerCAmelCase , _lowerCAmelCase ), "'status' must been from type bool" return status def A_ ( _lowerCAmelCase ) -> Any: assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (n > 2), "'N' must been an int and > 2" # beginList: contains all natural numbers from 2 up to N UpperCamelCase : int = list(range(2 , n + 1 ) ) UpperCamelCase : Optional[int] = [] # this list will be returns. # actual sieve of erathostenes for i in range(len(_lowerCAmelCase ) ): for j in range(i + 1 , len(_lowerCAmelCase ) ): if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0): UpperCamelCase : Tuple = 0 # filters actual prime numbers. UpperCamelCase : str = [x for x in begin_list if x != 0] # precondition assert isinstance(_lowerCAmelCase , _lowerCAmelCase ), "'ans' must been from type list" return ans def A_ ( _lowerCAmelCase ) -> Optional[Any]: assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (n > 2), "'N' must been an int and > 2" UpperCamelCase : str = [] # iterates over all numbers between 2 up to N+1 # if a number is prime then appends to list 'ans' for number in range(2 , n + 1 ): if is_prime(_lowerCAmelCase ): ans.append(_lowerCAmelCase ) # precondition assert isinstance(_lowerCAmelCase , _lowerCAmelCase ), "'ans' must been from type list" return ans def A_ ( _lowerCAmelCase ) -> Any: assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and number >= 0, "'number' must been an int and >= 0" UpperCamelCase : Optional[Any] = [] # this list will be returns of the function. # potential prime number factors. UpperCamelCase : Tuple = 2 UpperCamelCase : str = number if number == 0 or number == 1: ans.append(_lowerCAmelCase ) # if 'number' not prime then builds the prime factorization of 'number' elif not is_prime(_lowerCAmelCase ): while quotient != 1: if is_prime(_lowerCAmelCase ) and (quotient % factor == 0): ans.append(_lowerCAmelCase ) quotient /= factor else: factor += 1 else: ans.append(_lowerCAmelCase ) # precondition assert isinstance(_lowerCAmelCase , _lowerCAmelCase ), "'ans' must been from type list" return ans def A_ ( _lowerCAmelCase ) -> Any: assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and ( number >= 0 ), "'number' bust been an int and >= 0" UpperCamelCase : List[Any] = 0 # prime factorization of 'number' UpperCamelCase : Any = prime_factorization(_lowerCAmelCase ) UpperCamelCase : List[Any] = max(_lowerCAmelCase ) # precondition assert isinstance(_lowerCAmelCase , _lowerCAmelCase ), "'ans' must been from type int" return ans def A_ ( _lowerCAmelCase ) -> Union[str, Any]: assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and ( number >= 0 ), "'number' bust been an int and >= 0" UpperCamelCase : List[Any] = 0 # prime factorization of 'number' UpperCamelCase : Dict = prime_factorization(_lowerCAmelCase ) UpperCamelCase : List[Any] = min(_lowerCAmelCase ) # precondition assert isinstance(_lowerCAmelCase , _lowerCAmelCase ), "'ans' must been from type int" return ans def A_ ( _lowerCAmelCase ) -> Optional[Any]: assert isinstance(_lowerCAmelCase , _lowerCAmelCase ), "'number' must been an int" assert isinstance(number % 2 == 0 , _lowerCAmelCase ), "compare bust been from type bool" return number % 2 == 0 def A_ ( _lowerCAmelCase ) -> List[Any]: assert isinstance(_lowerCAmelCase , _lowerCAmelCase ), "'number' must been an int" assert isinstance(number % 2 != 0 , _lowerCAmelCase ), "compare bust been from type bool" return number % 2 != 0 def A_ ( _lowerCAmelCase ) -> Any: assert ( isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (number > 2) and is_even(_lowerCAmelCase ) ), "'number' must been an int, even and > 2" UpperCamelCase : List[str] = [] # this list will returned # creates a list of prime numbers between 2 up to 'number' UpperCamelCase : Dict = get_prime_numbers(_lowerCAmelCase ) UpperCamelCase : Tuple = len(_lowerCAmelCase ) # run variable for while-loops. UpperCamelCase : Optional[int] = 0 UpperCamelCase : int = None # exit variable. for break up the loops UpperCamelCase : Union[str, Any] = True while i < len_pn and loop: UpperCamelCase : Tuple = i + 1 while j < len_pn and loop: if prime_numbers[i] + prime_numbers[j] == number: UpperCamelCase : Any = False ans.append(prime_numbers[i] ) ans.append(prime_numbers[j] ) j += 1 i += 1 # precondition assert ( isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (len(_lowerCAmelCase ) == 2) and (ans[0] + ans[1] == number) and is_prime(ans[0] ) and is_prime(ans[1] ) ), "'ans' must contains two primes. And sum of elements must been eq 'number'" return ans def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> str: assert ( isinstance(_lowerCAmelCase , _lowerCAmelCase ) and isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (numbera >= 0) and (numbera >= 0) ), "'number1' and 'number2' must been positive integer." UpperCamelCase : Tuple = 0 while numbera != 0: UpperCamelCase : Tuple = numbera % numbera UpperCamelCase : Any = numbera UpperCamelCase : Union[str, Any] = rest # precondition assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and ( numbera >= 0 ), "'number' must been from type int and positive" return numbera def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> int: assert ( isinstance(_lowerCAmelCase , _lowerCAmelCase ) and isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (numbera >= 1) and (numbera >= 1) ), "'number1' and 'number2' must been positive integer." UpperCamelCase : Optional[int] = 1 # actual answer that will be return. # for kgV (x,1) if numbera > 1 and numbera > 1: # builds the prime factorization of 'number1' and 'number2' UpperCamelCase : List[Any] = prime_factorization(_lowerCAmelCase ) UpperCamelCase : Union[str, Any] = prime_factorization(_lowerCAmelCase ) elif numbera == 1 or numbera == 1: UpperCamelCase : Optional[Any] = [] UpperCamelCase : int = [] UpperCamelCase : List[Any] = max(_lowerCAmelCase , _lowerCAmelCase ) UpperCamelCase : Optional[int] = 0 UpperCamelCase : Tuple = 0 UpperCamelCase : List[str] = [] # captured numbers int both 'primeFac1' and 'primeFac2' # iterates through primeFac1 for n in prime_fac_a: if n not in done: if n in prime_fac_a: UpperCamelCase : str = prime_fac_a.count(_lowerCAmelCase ) UpperCamelCase : Tuple = prime_fac_a.count(_lowerCAmelCase ) for _ in range(max(_lowerCAmelCase , _lowerCAmelCase ) ): ans *= n else: UpperCamelCase : str = prime_fac_a.count(_lowerCAmelCase ) for _ in range(_lowerCAmelCase ): ans *= n done.append(_lowerCAmelCase ) # iterates through primeFac2 for n in prime_fac_a: if n not in done: UpperCamelCase : Any = prime_fac_a.count(_lowerCAmelCase ) for _ in range(_lowerCAmelCase ): ans *= n done.append(_lowerCAmelCase ) # precondition assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and ( ans >= 0 ), "'ans' must been from type int and positive" return ans def A_ ( _lowerCAmelCase ) -> Tuple: assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (n >= 0), "'number' must been a positive int" UpperCamelCase : int = 0 UpperCamelCase : int = 2 # this variable holds the answer while index < n: index += 1 ans += 1 # counts to the next number # if ans not prime then # runs to the next prime number. while not is_prime(_lowerCAmelCase ): ans += 1 # precondition assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and is_prime( _lowerCAmelCase ), "'ans' must been a prime number and from type int" return ans def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> int: assert ( is_prime(_lowerCAmelCase ) and is_prime(_lowerCAmelCase ) and (p_number_a < p_number_a) ), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'" UpperCamelCase : str = p_number_a + 1 # jump to the next number UpperCamelCase : Dict = [] # this list will be returns. # if number is not prime then # fetch the next prime number. while not is_prime(_lowerCAmelCase ): number += 1 while number < p_number_a: ans.append(_lowerCAmelCase ) number += 1 # fetch the next prime number. while not is_prime(_lowerCAmelCase ): number += 1 # precondition assert ( isinstance(_lowerCAmelCase , _lowerCAmelCase ) and ans[0] != p_number_a and ans[len(_lowerCAmelCase ) - 1] != p_number_a ), "'ans' must been a list without the arguments" # 'ans' contains not 'pNumber1' and 'pNumber2' ! return ans def A_ ( _lowerCAmelCase ) -> List[str]: assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (n >= 1), "'n' must been int and >= 1" UpperCamelCase : Dict = [] # will be returned. for divisor in range(1 , n + 1 ): if n % divisor == 0: ans.append(_lowerCAmelCase ) # precondition assert ans[0] == 1 and ans[len(_lowerCAmelCase ) - 1] == n, "Error in function getDivisiors(...)" return ans def A_ ( _lowerCAmelCase ) -> int: assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and ( number > 1 ), "'number' must been an int and >= 1" UpperCamelCase : int = get_divisors(_lowerCAmelCase ) # precondition assert ( isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (divisors[0] == 1) and (divisors[len(_lowerCAmelCase ) - 1] == number) ), "Error in help-function getDivisiors(...)" # summed all divisors up to 'number' (exclusive), hence [:-1] return sum(divisors[:-1] ) == number def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> Optional[Any]: assert ( isinstance(_lowerCAmelCase , _lowerCAmelCase ) and isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (denominator != 0) ), "The arguments must been from type int and 'denominator' != 0" # build the greatest common divisor of numerator and denominator. UpperCamelCase : List[str] = gcd(abs(_lowerCAmelCase ) , abs(_lowerCAmelCase ) ) # precondition assert ( isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (numerator % gcd_of_fraction == 0) and (denominator % gcd_of_fraction == 0) ), "Error in function gcd(...,...)" return (numerator // gcd_of_fraction, denominator // gcd_of_fraction) def A_ ( _lowerCAmelCase ) -> Dict: assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (n >= 0), "'n' must been a int and >= 0" UpperCamelCase : str = 1 # this will be return. for factor in range(1 , n + 1 ): ans *= factor return ans def A_ ( _lowerCAmelCase ) -> Tuple: assert isinstance(_lowerCAmelCase , _lowerCAmelCase ) and (n >= 0), "'n' must been an int and >= 0" UpperCamelCase : Dict = 0 UpperCamelCase : Dict = 1 UpperCamelCase : Union[str, Any] = 1 # this will be return for _ in range(n - 1 ): UpperCamelCase : Any = ans ans += fiba UpperCamelCase : str = tmp return ans
52
0
'''simple docstring''' import itertools import random import unittest import numpy as np from transformers import ASTFeatureExtractor from transformers.testing_utils import require_torch, require_torchaudio from transformers.utils.import_utils import is_torch_available from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin lowercase__ : str = random.Random() if is_torch_available(): import torch def a__ ( lowercase : Optional[int], lowercase : List[str]=1.0, lowercase : Tuple=None, lowercase : Optional[int]=None ) -> Optional[Any]: """simple docstring""" if rng is None: _UpperCamelCase = global_rng _UpperCamelCase = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values class __lowerCAmelCase ( unittest.TestCase ): """simple docstring""" def __init__( self : Union[str, Any] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : int=7 , lowerCAmelCase__ : Tuple=400 , lowerCAmelCase__ : Tuple=2000 , lowerCAmelCase__ : Optional[int]=1 , lowerCAmelCase__ : List[Any]=0.0 , lowerCAmelCase__ : Dict=16000 , lowerCAmelCase__ : List[Any]=True , lowerCAmelCase__ : Tuple=True , ) -> Dict: '''simple docstring''' _UpperCamelCase = parent _UpperCamelCase = batch_size _UpperCamelCase = min_seq_length _UpperCamelCase = max_seq_length _UpperCamelCase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) _UpperCamelCase = feature_size _UpperCamelCase = padding_value _UpperCamelCase = sampling_rate _UpperCamelCase = return_attention_mask _UpperCamelCase = do_normalize def snake_case__ ( self : Any ) -> Union[str, Any]: '''simple docstring''' return { "feature_size": self.feature_size, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "return_attention_mask": self.return_attention_mask, "do_normalize": self.do_normalize, } def snake_case__ ( self : List[Any] , lowerCAmelCase__ : str=False , lowerCAmelCase__ : Optional[int]=False ) -> Any: '''simple docstring''' def _flatten(lowerCAmelCase__ : Tuple ): return list(itertools.chain(*A_ ) ) if equal_length: _UpperCamelCase = floats_list((self.batch_size, self.max_seq_length) ) else: # make sure that inputs increase in size _UpperCamelCase = [ _flatten(floats_list((x, self.feature_size) ) ) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff ) ] if numpify: _UpperCamelCase = [np.asarray(A_ ) for x in speech_inputs] return speech_inputs @require_torch @require_torchaudio class __lowerCAmelCase ( __snake_case , unittest.TestCase ): """simple docstring""" _snake_case : Optional[Any] = ASTFeatureExtractor def snake_case__ ( self : Union[str, Any] ) -> Optional[int]: '''simple docstring''' _UpperCamelCase = ASTFeatureExtractionTester(self ) def snake_case__ ( self : str ) -> Tuple: '''simple docstring''' _UpperCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 _UpperCamelCase = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] _UpperCamelCase = [np.asarray(A_ ) for speech_input in speech_inputs] # Test not batched input _UpperCamelCase = feat_extract(speech_inputs[0] , return_tensors='''np''' ).input_values _UpperCamelCase = feat_extract(np_speech_inputs[0] , return_tensors='''np''' ).input_values self.assertTrue(np.allclose(A_ , A_ , atol=1e-3 ) ) # Test batched _UpperCamelCase = feat_extract(A_ , padding=A_ , return_tensors='''np''' ).input_values _UpperCamelCase = feat_extract(A_ , padding=A_ , return_tensors='''np''' ).input_values for enc_seq_a, enc_seq_a in zip(A_ , A_ ): self.assertTrue(np.allclose(A_ , A_ , atol=1e-3 ) ) # Test 2-D numpy arrays are batched. _UpperCamelCase = [floats_list((1, x) )[0] for x in (800, 800, 800)] _UpperCamelCase = np.asarray(A_ ) _UpperCamelCase = feat_extract(A_ , return_tensors='''np''' ).input_values _UpperCamelCase = feat_extract(A_ , return_tensors='''np''' ).input_values for enc_seq_a, enc_seq_a in zip(A_ , A_ ): self.assertTrue(np.allclose(A_ , A_ , atol=1e-3 ) ) @require_torch def snake_case__ ( self : int ) -> Optional[int]: '''simple docstring''' import torch _UpperCamelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) _UpperCamelCase = np.random.rand(100 ).astype(np.floataa ) _UpperCamelCase = np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: _UpperCamelCase = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''np''' ) self.assertTrue(np_processed.input_values.dtype == np.floataa ) _UpperCamelCase = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''pt''' ) self.assertTrue(pt_processed.input_values.dtype == torch.floataa ) def snake_case__ ( self : Dict , lowerCAmelCase__ : Tuple ) -> Optional[int]: '''simple docstring''' from datasets import load_dataset _UpperCamelCase = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' ) # automatic decoding with librispeech _UpperCamelCase = ds.sort('''id''' ).select(range(A_ ) )[:num_samples]["audio"] return [x["array"] for x in speech_samples] @require_torch def snake_case__ ( self : Tuple ) -> Any: '''simple docstring''' _UpperCamelCase = torch.tensor( [-0.9894, -1.2776, -0.9066, -1.2776, -0.9349, -1.2609, -1.0386, -1.2776, -1.1561, -1.2776, -1.2052, -1.2723, -1.2190, -1.2132, -1.2776, -1.1133, -1.1953, -1.1343, -1.1584, -1.2203, -1.1770, -1.2474, -1.2381, -1.1936, -0.9270, -0.8317, -0.8049, -0.7706, -0.7565, -0.7869] ) # fmt: on _UpperCamelCase = self._load_datasamples(1 ) _UpperCamelCase = ASTFeatureExtractor() _UpperCamelCase = feature_extractor(A_ , return_tensors='''pt''' ).input_values self.assertEquals(input_values.shape , (1, 1024, 128) ) self.assertTrue(torch.allclose(input_values[0, 0, :30] , A_ , atol=1e-4 ) )
324
import inspect import re from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_config_docstrings.py __lowerCamelCase : str = """src/transformers""" # This is to make sure the transformers module imported is the one in the repo. __lowerCamelCase : Tuple = direct_transformers_import(PATH_TO_TRANSFORMERS) __lowerCamelCase : List[str] = transformers.models.auto.configuration_auto.CONFIG_MAPPING # Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`. # For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)` __lowerCamelCase : Optional[Any] = re.compile(r"""\[(.+?)\]\((https://huggingface\.co/.+?)\)""") __lowerCamelCase : List[str] = { """DecisionTransformerConfig""", """EncoderDecoderConfig""", """MusicgenConfig""", """RagConfig""", """SpeechEncoderDecoderConfig""", """TimmBackboneConfig""", """VisionEncoderDecoderConfig""", """VisionTextDualEncoderConfig""", """LlamaConfig""", } def A_ ( _lowerCAmelCase ) -> List[str]: UpperCamelCase : Optional[Any] = None # source code of `config_class` UpperCamelCase : Tuple = inspect.getsource(_lowerCAmelCase ) UpperCamelCase : Optional[Any] = _re_checkpoint.findall(_lowerCAmelCase ) # Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link. # For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')` for ckpt_name, ckpt_link in checkpoints: # allow the link to end with `/` if ckpt_link.endswith("/" ): UpperCamelCase : Dict = ckpt_link[:-1] # verify the checkpoint name corresponds to the checkpoint link UpperCamelCase : Any = F"""https://huggingface.co/{ckpt_name}""" if ckpt_link == ckpt_link_from_name: UpperCamelCase : List[Any] = ckpt_name break return checkpoint def A_ ( ) -> List[str]: UpperCamelCase : Optional[int] = [] for config_class in list(CONFIG_MAPPING.values() ): # Skip deprecated models if "models.deprecated" in config_class.__module__: continue UpperCamelCase : Union[str, Any] = get_checkpoint_from_config_class(_lowerCAmelCase ) UpperCamelCase : Optional[int] = config_class.__name__ if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK: configs_without_checkpoint.append(_lowerCAmelCase ) if len(_lowerCAmelCase ) > 0: UpperCamelCase : Any = "\n".join(sorted(_lowerCAmelCase ) ) raise ValueError(F"""The following configurations don't contain any valid checkpoint:\n{message}""" ) if __name__ == "__main__": check_config_docstrings_have_checkpoints()
52
0
import unittest from knapsack import greedy_knapsack as kp class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): def UpperCamelCase ( self ): A__ = [10, 20, 30, 40, 50, 60] A__ = [2, 4, 6, 8, 10, 12] A__ = 100 self.assertEqual(kp.calc_profit(A_,A_,A_ ),210 ) def UpperCamelCase ( self ): self.assertRaisesRegex(A_,'''max_weight must greater than zero.''' ) def UpperCamelCase ( self ): self.assertRaisesRegex(A_,'''Weight can not be negative.''' ) def UpperCamelCase ( self ): self.assertRaisesRegex(A_,'''Profit can not be negative.''' ) def UpperCamelCase ( self ): self.assertRaisesRegex(A_,'''max_weight must greater than zero.''' ) def UpperCamelCase ( self ): self.assertRaisesRegex( A_,'''The length of profit and weight must be same.''' ) if __name__ == "__main__": unittest.main()
193
from __future__ import annotations from functools import lru_cache from math import ceil __lowerCamelCase : str = 100 __lowerCamelCase : Any = set(range(3, NUM_PRIMES, 2)) primes.add(2) __lowerCamelCase : int for prime in range(3, ceil(NUM_PRIMES**0.5), 2): if prime not in primes: continue primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime))) @lru_cache(maxsize=100 ) def A_ ( _lowerCAmelCase ) -> set[int]: if number_to_partition < 0: return set() elif number_to_partition == 0: return {1} UpperCamelCase : set[int] = set() UpperCamelCase : int UpperCamelCase : int for prime in primes: if prime > number_to_partition: continue for sub in partition(number_to_partition - prime ): ret.add(sub * prime ) return ret def A_ ( _lowerCAmelCase = 5000 ) -> int | None: for number_to_partition in range(1 , _lowerCAmelCase ): if len(partition(_lowerCAmelCase ) ) > number_unique_partitions: return number_to_partition return None if __name__ == "__main__": print(f"""{solution() = }""")
52
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available A : Union[str, Any] = { """configuration_tapas""": ["""TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TapasConfig"""], """tokenization_tapas""": ["""TapasTokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : Optional[Any] = [ """TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST""", """TapasForMaskedLM""", """TapasForQuestionAnswering""", """TapasForSequenceClassification""", """TapasModel""", """TapasPreTrainedModel""", """load_tf_weights_in_tapas""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A : Union[str, Any] = [ """TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFTapasForMaskedLM""", """TFTapasForQuestionAnswering""", """TFTapasForSequenceClassification""", """TFTapasModel""", """TFTapasPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig from .tokenization_tapas import TapasTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tapas import ( TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST, TapasForMaskedLM, TapasForQuestionAnswering, TapasForSequenceClassification, TapasModel, TapasPreTrainedModel, load_tf_weights_in_tapas, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_tapas import ( TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST, TFTapasForMaskedLM, TFTapasForQuestionAnswering, TFTapasForSequenceClassification, TFTapasModel, TFTapasPreTrainedModel, ) else: import sys A : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
305
def A_ ( _lowerCAmelCase ) -> str: UpperCamelCase : Optional[int] = int(_lowerCAmelCase ) if decimal in (0, 1): # Exit cases for the recursion return str(_lowerCAmelCase ) UpperCamelCase , UpperCamelCase : Dict = divmod(_lowerCAmelCase , 2 ) return binary_recursive(_lowerCAmelCase ) + str(_lowerCAmelCase ) def A_ ( _lowerCAmelCase ) -> str: UpperCamelCase : Tuple = str(_lowerCAmelCase ).strip() if not number: raise ValueError("No input value was provided" ) UpperCamelCase : Optional[int] = "-" if number.startswith("-" ) else "" UpperCamelCase : Any = number.lstrip("-" ) if not number.isnumeric(): raise ValueError("Input value is not an integer" ) return F"""{negative}0b{binary_recursive(int(_lowerCAmelCase ) )}""" if __name__ == "__main__": from doctest import testmod testmod()
52
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) A__ : List[Any] = { """configuration_mega""": ["""MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MegaConfig""", """MegaOnnxConfig"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A__ : Tuple = [ """MEGA_PRETRAINED_MODEL_ARCHIVE_LIST""", """MegaForCausalLM""", """MegaForMaskedLM""", """MegaForMultipleChoice""", """MegaForQuestionAnswering""", """MegaForSequenceClassification""", """MegaForTokenClassification""", """MegaModel""", """MegaPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mega import ( MEGA_PRETRAINED_MODEL_ARCHIVE_LIST, MegaForCausalLM, MegaForMaskedLM, MegaForMultipleChoice, MegaForQuestionAnswering, MegaForSequenceClassification, MegaForTokenClassification, MegaModel, MegaPreTrainedModel, ) else: import sys A__ : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
207
import unittest from transformers import LiltConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( LiltForQuestionAnswering, LiltForSequenceClassification, LiltForTokenClassification, LiltModel, ) from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST class A__ : def __init__( self , A_ , A_=13 , A_=7 , A_=True , A_=True , A_=True , A_=True , A_=99 , A_=24 , A_=2 , A_=6 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=16 , A_=2 , A_=0.02 , A_=3 , A_=None , A_=1000 , ): '''simple docstring''' UpperCamelCase : Union[str, Any] = parent UpperCamelCase : List[Any] = batch_size UpperCamelCase : Dict = seq_length UpperCamelCase : Tuple = is_training UpperCamelCase : Union[str, Any] = use_input_mask UpperCamelCase : Tuple = use_token_type_ids UpperCamelCase : Optional[Any] = use_labels UpperCamelCase : str = vocab_size UpperCamelCase : Optional[int] = hidden_size UpperCamelCase : Any = num_hidden_layers UpperCamelCase : Optional[Any] = num_attention_heads UpperCamelCase : Optional[Any] = intermediate_size UpperCamelCase : Optional[Any] = hidden_act UpperCamelCase : Union[str, Any] = hidden_dropout_prob UpperCamelCase : Union[str, Any] = attention_probs_dropout_prob UpperCamelCase : List[Any] = max_position_embeddings UpperCamelCase : str = type_vocab_size UpperCamelCase : Optional[int] = type_sequence_label_size UpperCamelCase : Dict = initializer_range UpperCamelCase : int = num_labels UpperCamelCase : Optional[int] = scope UpperCamelCase : int = range_bbox def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCamelCase : Any = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ) # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: UpperCamelCase : Union[str, Any] = bbox[i, j, 3] UpperCamelCase : int = bbox[i, j, 1] UpperCamelCase : int = t if bbox[i, j, 2] < bbox[i, j, 0]: UpperCamelCase : List[str] = bbox[i, j, 2] UpperCamelCase : Optional[int] = bbox[i, j, 0] UpperCamelCase : Optional[Any] = t UpperCamelCase : Dict = None if self.use_input_mask: UpperCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) UpperCamelCase : str = None if self.use_token_type_ids: UpperCamelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) UpperCamelCase : Dict = None UpperCamelCase : int = None if self.use_labels: UpperCamelCase : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCamelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCamelCase : List[Any] = self.get_config() return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels def __UpperCamelCase( self ): '''simple docstring''' return LiltConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ): '''simple docstring''' UpperCamelCase : Any = LiltModel(config=A_ ) model.to(A_ ) model.eval() UpperCamelCase : str = model(A_ , bbox=A_ , attention_mask=A_ , token_type_ids=A_ ) UpperCamelCase : Optional[int] = model(A_ , bbox=A_ , token_type_ids=A_ ) UpperCamelCase : Any = model(A_ , bbox=A_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ): '''simple docstring''' UpperCamelCase : Any = self.num_labels UpperCamelCase : Dict = LiltForTokenClassification(config=A_ ) model.to(A_ ) model.eval() UpperCamelCase : Dict = model( A_ , bbox=A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ): '''simple docstring''' UpperCamelCase : Dict = LiltForQuestionAnswering(config=A_ ) model.to(A_ ) model.eval() UpperCamelCase : List[str] = model( A_ , bbox=A_ , attention_mask=A_ , token_type_ids=A_ , start_positions=A_ , end_positions=A_ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Any = self.prepare_config_and_inputs() ( ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ( UpperCamelCase ) , ) : Tuple = config_and_inputs UpperCamelCase : Tuple = { "input_ids": input_ids, "bbox": bbox, "token_type_ids": token_type_ids, "attention_mask": input_mask, } return config, inputs_dict @require_torch class A__ ( __snake_case , __snake_case , __snake_case , unittest.TestCase ): _UpperCAmelCase :Union[str, Any] = ( ( LiltModel, LiltForSequenceClassification, LiltForTokenClassification, LiltForQuestionAnswering, ) if is_torch_available() else () ) _UpperCAmelCase :Optional[Any] = ( { 'feature-extraction': LiltModel, 'question-answering': LiltForQuestionAnswering, 'text-classification': LiltForSequenceClassification, 'token-classification': LiltForTokenClassification, 'zero-shot': LiltForSequenceClassification, } if is_torch_available() else {} ) _UpperCAmelCase :Dict = False _UpperCAmelCase :Union[str, Any] = False def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ ): '''simple docstring''' return True def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Optional[int] = LiltModelTester(self ) UpperCamelCase : Optional[int] = ConfigTester(self , config_class=A_ , hidden_size=37 ) def __UpperCamelCase( self ): '''simple docstring''' self.config_tester.run_common_tests() def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*A_ ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: UpperCamelCase : Union[str, Any] = type self.model_tester.create_and_check_model(*A_ ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*A_ ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*A_ ) @slow def __UpperCamelCase( self ): '''simple docstring''' for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCamelCase : Dict = LiltModel.from_pretrained(A_ ) self.assertIsNotNone(A_ ) @require_torch @slow class A__ ( unittest.TestCase ): def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : int = LiltModel.from_pretrained("SCUT-DLVCLab/lilt-roberta-en-base" ).to(A_ ) UpperCamelCase : Tuple = torch.tensor([[1, 2]] , device=A_ ) UpperCamelCase : List[str] = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=A_ ) # forward pass with torch.no_grad(): UpperCamelCase : Optional[int] = model(input_ids=A_ , bbox=A_ ) UpperCamelCase : List[str] = torch.Size([1, 2, 768] ) UpperCamelCase : Any = torch.tensor( [[-0.06_53, 0.09_50, -0.00_61], [-0.05_45, 0.09_26, -0.03_24]] , device=A_ , ) self.assertTrue(outputs.last_hidden_state.shape , A_ ) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , A_ , atol=1e-3 ) )
52
0
"""simple docstring""" import warnings warnings.warn( """memory_utils has been reorganized to utils.memory. Import `find_executable_batchsize` from the main `__init__`: """ """`from accelerate import find_executable_batch_size` to avoid this warning.""", FutureWarning, )
289
import os import tempfile from functools import partial from unittest import TestCase from unittest.mock import patch import numpy as np import pytest from datasets.arrow_dataset import Dataset from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex from .utils import require_elasticsearch, require_faiss __lowerCamelCase : Union[str, Any] = pytest.mark.integration @require_faiss class A__ ( __snake_case ): def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Union[str, Any] = Dataset.from_dict({"filename": ["my_name-train" + "_" + str(A_ ) for x in np.arange(30 ).tolist()]} ) return dset def __UpperCamelCase( self ): '''simple docstring''' import faiss UpperCamelCase : Dataset = self._create_dummy_dataset() UpperCamelCase : List[Any] = dset.map( lambda A_ , A_ : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=A_ , keep_in_memory=A_ ) UpperCamelCase : List[str] = dset.add_faiss_index("vecs" , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT ) UpperCamelCase , UpperCamelCase : Tuple = dset.get_nearest_examples("vecs" , np.ones(5 , dtype=np.floataa ) ) self.assertEqual(examples["filename"][0] , "my_name-train_29" ) dset.drop_index("vecs" ) def __UpperCamelCase( self ): '''simple docstring''' import faiss UpperCamelCase : Dataset = self._create_dummy_dataset() dset.add_faiss_index_from_external_arrays( external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT , ) UpperCamelCase , UpperCamelCase : int = dset.get_nearest_examples("vecs" , np.ones(5 , dtype=np.floataa ) ) self.assertEqual(examples["filename"][0] , "my_name-train_29" ) def __UpperCamelCase( self ): '''simple docstring''' import faiss UpperCamelCase : Dataset = self._create_dummy_dataset() dset.add_faiss_index_from_external_arrays( external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" , metric_type=faiss.METRIC_INNER_PRODUCT , ) # Setting delete=False and unlinking manually is not pretty... but it is required on Windows to # ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue. # see https://bugs.python.org/issue14243 and # https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515 with tempfile.NamedTemporaryFile(delete=A_ ) as tmp_file: dset.save_faiss_index("vecs" , tmp_file.name ) dset.load_faiss_index("vecs2" , tmp_file.name ) os.unlink(tmp_file.name ) UpperCamelCase , UpperCamelCase : List[str] = dset.get_nearest_examples("vecs2" , np.ones(5 , dtype=np.floataa ) ) self.assertEqual(examples["filename"][0] , "my_name-train_29" ) def __UpperCamelCase( self ): '''simple docstring''' UpperCamelCase : Dataset = self._create_dummy_dataset() dset.add_faiss_index_from_external_arrays( external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="vecs" ) dset.drop_index("vecs" ) self.assertRaises(A_ , partial(dset.get_nearest_examples , "vecs2" , np.ones(5 , dtype=np.floataa ) ) ) def __UpperCamelCase( self ): '''simple docstring''' from elasticsearch import Elasticsearch UpperCamelCase : Dataset = self._create_dummy_dataset() with patch("elasticsearch.Elasticsearch.search" ) as mocked_search, patch( "elasticsearch.client.IndicesClient.create" ) as mocked_index_create, patch("elasticsearch.helpers.streaming_bulk" ) as mocked_bulk: UpperCamelCase : List[str] = {"acknowledged": True} mocked_bulk.return_value([(True, None)] * 30 ) UpperCamelCase : List[Any] = {"hits": {"hits": [{"_score": 1, "_id": 29}]}} UpperCamelCase : Optional[Any] = Elasticsearch() dset.add_elasticsearch_index("filename" , es_client=A_ ) UpperCamelCase , UpperCamelCase : List[str] = dset.get_nearest_examples("filename" , "my_name-train_29" ) self.assertEqual(examples["filename"][0] , "my_name-train_29" ) @require_faiss class A__ ( __snake_case ): def __UpperCamelCase( self ): '''simple docstring''' import faiss UpperCamelCase : Optional[int] = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT ) # add vectors index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsNotNone(index.faiss_index ) self.assertEqual(index.faiss_index.ntotal , 5 ) index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) ) self.assertEqual(index.faiss_index.ntotal , 10 ) # single query UpperCamelCase : Any = np.zeros(5 , dtype=np.floataa ) UpperCamelCase : Optional[Any] = 1 UpperCamelCase , UpperCamelCase : Optional[Any] = index.search(A_ ) self.assertRaises(A_ , index.search , query.reshape(-1 , 1 ) ) self.assertGreater(scores[0] , 0 ) self.assertEqual(indices[0] , 1 ) # batched queries UpperCamelCase : Optional[int] = np.eye(5 , dtype=np.floataa )[::-1] UpperCamelCase , UpperCamelCase : Tuple = index.search_batch(A_ ) self.assertRaises(A_ , index.search_batch , queries[0] ) UpperCamelCase : Optional[int] = [scores[0] for scores in total_scores] UpperCamelCase : Tuple = [indices[0] for indices in total_indices] self.assertGreater(np.min(A_ ) , 0 ) self.assertListEqual([4, 3, 2, 1, 0] , A_ ) def __UpperCamelCase( self ): '''simple docstring''' import faiss UpperCamelCase : List[str] = FaissIndex(string_factory="Flat" ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsInstance(index.faiss_index , faiss.IndexFlat ) UpperCamelCase : List[str] = FaissIndex(string_factory="LSH" ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsInstance(index.faiss_index , faiss.IndexLSH ) with self.assertRaises(A_ ): UpperCamelCase : List[str] = FaissIndex(string_factory="Flat" , custom_index=faiss.IndexFlat(5 ) ) def __UpperCamelCase( self ): '''simple docstring''' import faiss UpperCamelCase : Dict = faiss.IndexFlat(5 ) UpperCamelCase : Union[str, Any] = FaissIndex(custom_index=A_ ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) self.assertIsInstance(index.faiss_index , faiss.IndexFlat ) def __UpperCamelCase( self ): '''simple docstring''' import faiss UpperCamelCase : str = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) # Setting delete=False and unlinking manually is not pretty... but it is required on Windows to # ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue. # see https://bugs.python.org/issue14243 and # https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515 with tempfile.NamedTemporaryFile(delete=A_ ) as tmp_file: index.save(tmp_file.name ) UpperCamelCase : int = FaissIndex.load(tmp_file.name ) os.unlink(tmp_file.name ) UpperCamelCase : str = np.zeros(5 , dtype=np.floataa ) UpperCamelCase : int = 1 UpperCamelCase , UpperCamelCase : Dict = index.search(A_ ) self.assertGreater(scores[0] , 0 ) self.assertEqual(indices[0] , 1 ) @require_faiss def A_ ( _lowerCAmelCase ) -> Optional[int]: import faiss UpperCamelCase : Union[str, Any] = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT ) index.add_vectors(np.eye(5 , dtype=np.floataa ) ) UpperCamelCase : List[Any] = "index.faiss" UpperCamelCase : List[str] = F"""mock://{index_name}""" index.save(_lowerCAmelCase , storage_options=mockfs.storage_options ) UpperCamelCase : List[str] = FaissIndex.load(_lowerCAmelCase , storage_options=mockfs.storage_options ) UpperCamelCase : List[str] = np.zeros(5 , dtype=np.floataa ) UpperCamelCase : Optional[int] = 1 UpperCamelCase , UpperCamelCase : List[str] = index.search(_lowerCAmelCase ) assert scores[0] > 0 assert indices[0] == 1 @require_elasticsearch class A__ ( __snake_case ): def __UpperCamelCase( self ): '''simple docstring''' from elasticsearch import Elasticsearch with patch("elasticsearch.Elasticsearch.search" ) as mocked_search, patch( "elasticsearch.client.IndicesClient.create" ) as mocked_index_create, patch("elasticsearch.helpers.streaming_bulk" ) as mocked_bulk: UpperCamelCase : List[str] = Elasticsearch() UpperCamelCase : Union[str, Any] = {"acknowledged": True} UpperCamelCase : Union[str, Any] = ElasticSearchIndex(es_client=A_ ) mocked_bulk.return_value([(True, None)] * 3 ) index.add_documents(["foo", "bar", "foobar"] ) # single query UpperCamelCase : str = "foo" UpperCamelCase : Dict = {"hits": {"hits": [{"_score": 1, "_id": 0}]}} UpperCamelCase , UpperCamelCase : Tuple = index.search(A_ ) self.assertEqual(scores[0] , 1 ) self.assertEqual(indices[0] , 0 ) # single query with timeout UpperCamelCase : Dict = "foo" UpperCamelCase : Optional[Any] = {"hits": {"hits": [{"_score": 1, "_id": 0}]}} UpperCamelCase , UpperCamelCase : str = index.search(A_ , request_timeout=30 ) self.assertEqual(scores[0] , 1 ) self.assertEqual(indices[0] , 0 ) # batched queries UpperCamelCase : Dict = ["foo", "bar", "foobar"] UpperCamelCase : List[Any] = {"hits": {"hits": [{"_score": 1, "_id": 1}]}} UpperCamelCase , UpperCamelCase : Optional[int] = index.search_batch(A_ ) UpperCamelCase : str = [scores[0] for scores in total_scores] UpperCamelCase : Optional[Any] = [indices[0] for indices in total_indices] self.assertGreater(np.min(A_ ) , 0 ) self.assertListEqual([1, 1, 1] , A_ ) # batched queries with timeout UpperCamelCase : int = ["foo", "bar", "foobar"] UpperCamelCase : List[Any] = {"hits": {"hits": [{"_score": 1, "_id": 1}]}} UpperCamelCase , UpperCamelCase : Union[str, Any] = index.search_batch(A_ , request_timeout=30 ) UpperCamelCase : Union[str, Any] = [scores[0] for scores in total_scores] UpperCamelCase : Dict = [indices[0] for indices in total_indices] self.assertGreater(np.min(A_ ) , 0 ) self.assertListEqual([1, 1, 1] , A_ )
52
0
'''simple docstring''' import unittest from transformers import LiltConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( LiltForQuestionAnswering, LiltForSequenceClassification, LiltForTokenClassification, LiltModel, ) from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST class _a : '''simple docstring''' def __init__( self, A, A=13, A=7, A=True, A=True, A=True, A=True, A=99, A=24, A=2, A=6, A=37, A="gelu", A=0.1, A=0.1, A=512, A=16, A=2, A=0.02, A=3, A=None, A=1_000, ): '''simple docstring''' SCREAMING_SNAKE_CASE : Union[str, Any] = parent SCREAMING_SNAKE_CASE : List[Any] = batch_size SCREAMING_SNAKE_CASE : Dict = seq_length SCREAMING_SNAKE_CASE : Tuple = is_training SCREAMING_SNAKE_CASE : Union[str, Any] = use_input_mask SCREAMING_SNAKE_CASE : Tuple = use_token_type_ids SCREAMING_SNAKE_CASE : Optional[Any] = use_labels SCREAMING_SNAKE_CASE : str = vocab_size SCREAMING_SNAKE_CASE : Optional[int] = hidden_size SCREAMING_SNAKE_CASE : Any = num_hidden_layers SCREAMING_SNAKE_CASE : Optional[Any] = num_attention_heads SCREAMING_SNAKE_CASE : Optional[Any] = intermediate_size SCREAMING_SNAKE_CASE : Optional[Any] = hidden_act SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_dropout_prob SCREAMING_SNAKE_CASE : Union[str, Any] = attention_probs_dropout_prob SCREAMING_SNAKE_CASE : List[Any] = max_position_embeddings SCREAMING_SNAKE_CASE : str = type_vocab_size SCREAMING_SNAKE_CASE : Optional[int] = type_sequence_label_size SCREAMING_SNAKE_CASE : Dict = initializer_range SCREAMING_SNAKE_CASE : int = num_labels SCREAMING_SNAKE_CASE : Optional[int] = scope SCREAMING_SNAKE_CASE : int = range_bbox def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = ids_tensor([self.batch_size, self.seq_length], self.vocab_size ) SCREAMING_SNAKE_CASE : Any = ids_tensor([self.batch_size, self.seq_length, 4], self.range_bbox ) # Ensure that bbox is legal for i in range(bbox.shape[0] ): for j in range(bbox.shape[1] ): if bbox[i, j, 3] < bbox[i, j, 1]: SCREAMING_SNAKE_CASE : Union[str, Any] = bbox[i, j, 3] SCREAMING_SNAKE_CASE : int = bbox[i, j, 1] SCREAMING_SNAKE_CASE : int = t if bbox[i, j, 2] < bbox[i, j, 0]: SCREAMING_SNAKE_CASE : List[str] = bbox[i, j, 2] SCREAMING_SNAKE_CASE : Optional[int] = bbox[i, j, 0] SCREAMING_SNAKE_CASE : Optional[Any] = t SCREAMING_SNAKE_CASE : Dict = None if self.use_input_mask: SCREAMING_SNAKE_CASE : Optional[int] = ids_tensor([self.batch_size, self.seq_length], vocab_size=2 ) SCREAMING_SNAKE_CASE : str = None if self.use_token_type_ids: SCREAMING_SNAKE_CASE : str = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size ) SCREAMING_SNAKE_CASE : Dict = None SCREAMING_SNAKE_CASE : int = None if self.use_labels: SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size], self.type_sequence_label_size ) SCREAMING_SNAKE_CASE : int = ids_tensor([self.batch_size, self.seq_length], self.num_labels ) SCREAMING_SNAKE_CASE : List[Any] = self.get_config() return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels def UpperCamelCase_ ( self ): '''simple docstring''' return LiltConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, ) def UpperCamelCase_ ( self, A, A, A, A, A, A, A, ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = LiltModel(config=A_ ) model.to(A_ ) model.eval() SCREAMING_SNAKE_CASE : str = model(A_, bbox=A_, attention_mask=A_, token_type_ids=A_ ) SCREAMING_SNAKE_CASE : Optional[int] = model(A_, bbox=A_, token_type_ids=A_ ) SCREAMING_SNAKE_CASE : Any = model(A_, bbox=A_ ) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size) ) def UpperCamelCase_ ( self, A, A, A, A, A, A, A, ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = self.num_labels SCREAMING_SNAKE_CASE : Dict = LiltForTokenClassification(config=A_ ) model.to(A_ ) model.eval() SCREAMING_SNAKE_CASE : Dict = model( A_, bbox=A_, attention_mask=A_, token_type_ids=A_, labels=A_ ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) ) def UpperCamelCase_ ( self, A, A, A, A, A, A, A, ): '''simple docstring''' SCREAMING_SNAKE_CASE : Dict = LiltForQuestionAnswering(config=A_ ) model.to(A_ ) model.eval() SCREAMING_SNAKE_CASE : List[str] = model( A_, bbox=A_, attention_mask=A_, token_type_ids=A_, start_positions=A_, end_positions=A_, ) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Any = self.prepare_config_and_inputs() ( SCREAMING_SNAKE_CASE ) : Tuple = config_and_inputs SCREAMING_SNAKE_CASE : Tuple = { "input_ids": input_ids, "bbox": bbox, "token_type_ids": token_type_ids, "attention_mask": input_mask, } return config, inputs_dict @require_torch class _a ( __snake_case , __snake_case , __snake_case , unittest.TestCase ): '''simple docstring''' A : Union[str, Any] = ( ( LiltModel, LiltForSequenceClassification, LiltForTokenClassification, LiltForQuestionAnswering, ) if is_torch_available() else () ) A : Optional[Any] = ( { 'feature-extraction': LiltModel, 'question-answering': LiltForQuestionAnswering, 'text-classification': LiltForSequenceClassification, 'token-classification': LiltForTokenClassification, 'zero-shot': LiltForSequenceClassification, } if is_torch_available() else {} ) A : Dict = False A : Union[str, Any] = False def UpperCamelCase_ ( self, A, A, A, A, A ): '''simple docstring''' return True def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Optional[int] = LiltModelTester(self ) SCREAMING_SNAKE_CASE : Optional[int] = ConfigTester(self, config_class=A_, hidden_size=37 ) def UpperCamelCase_ ( self ): '''simple docstring''' self.config_tester.run_common_tests() def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*A_ ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: SCREAMING_SNAKE_CASE : Union[str, Any] = type self.model_tester.create_and_check_model(*A_ ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*A_ ) def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*A_ ) @slow def UpperCamelCase_ ( self ): '''simple docstring''' for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: SCREAMING_SNAKE_CASE : Dict = LiltModel.from_pretrained(A_ ) self.assertIsNotNone(A_ ) @require_torch @slow class _a ( unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE : int = LiltModel.from_pretrained('SCUT-DLVCLab/lilt-roberta-en-base' ).to(A_ ) SCREAMING_SNAKE_CASE : Tuple = torch.tensor([[1, 2]], device=A_ ) SCREAMING_SNAKE_CASE : List[str] = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]], device=A_ ) # forward pass with torch.no_grad(): SCREAMING_SNAKE_CASE : Optional[int] = model(input_ids=A_, bbox=A_ ) SCREAMING_SNAKE_CASE : List[str] = torch.Size([1, 2, 768] ) SCREAMING_SNAKE_CASE : Any = torch.tensor( [[-0.06_53, 0.09_50, -0.00_61], [-0.05_45, 0.09_26, -0.03_24]], device=A_, ) self.assertTrue(outputs.last_hidden_state.shape, A_ ) self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3], A_, atol=1E-3 ) )
251
def A_ ( _lowerCAmelCase = 50 ) -> int: UpperCamelCase : List[Any] = [[0] * 3 for _ in range(length + 1 )] for row_length in range(length + 1 ): for tile_length in range(2 , 5 ): for tile_start in range(row_length - tile_length + 1 ): different_colour_ways_number[row_length][tile_length - 2] += ( different_colour_ways_number[row_length - tile_start - tile_length][ tile_length - 2 ] + 1 ) return sum(different_colour_ways_number[length] ) if __name__ == "__main__": print(f"""{solution() = }""")
52
0
'''simple docstring''' from math import cos, sin, sqrt, tau from audio_filters.iir_filter import IIRFilter def __lowerCamelCase ( _lowercase , _lowercase , _lowercase = 1 / sqrt(2 ) ) -> IIRFilter: UpperCAmelCase : Optional[int] = tau * frequency / samplerate UpperCAmelCase : Tuple = sin(_lowerCAmelCase ) UpperCAmelCase : List[Any] = cos(_lowerCAmelCase ) UpperCAmelCase : str = _sin / (2 * q_factor) UpperCAmelCase : List[str] = (1 - _cos) / 2 UpperCAmelCase : Any = 1 - _cos UpperCAmelCase : int = 1 + alpha UpperCAmelCase : List[Any] = -2 * _cos UpperCAmelCase : Optional[Any] = 1 - alpha UpperCAmelCase : Optional[Any] = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def __lowerCamelCase ( _lowercase , _lowercase , _lowercase = 1 / sqrt(2 ) ) -> IIRFilter: UpperCAmelCase : Union[str, Any] = tau * frequency / samplerate UpperCAmelCase : Dict = sin(_lowerCAmelCase ) UpperCAmelCase : int = cos(_lowerCAmelCase ) UpperCAmelCase : Dict = _sin / (2 * q_factor) UpperCAmelCase : str = (1 + _cos) / 2 UpperCAmelCase : List[str] = -1 - _cos UpperCAmelCase : Any = 1 + alpha UpperCAmelCase : List[str] = -2 * _cos UpperCAmelCase : Optional[Any] = 1 - alpha UpperCAmelCase : Dict = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def __lowerCamelCase ( _lowercase , _lowercase , _lowercase = 1 / sqrt(2 ) ) -> IIRFilter: UpperCAmelCase : Union[str, Any] = tau * frequency / samplerate UpperCAmelCase : Tuple = sin(_lowerCAmelCase ) UpperCAmelCase : List[Any] = cos(_lowerCAmelCase ) UpperCAmelCase : str = _sin / (2 * q_factor) UpperCAmelCase : List[str] = _sin / 2 UpperCAmelCase : Dict = 0 UpperCAmelCase : str = -ba UpperCAmelCase : Tuple = 1 + alpha UpperCAmelCase : int = -2 * _cos UpperCAmelCase : Any = 1 - alpha UpperCAmelCase : List[str] = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def __lowerCamelCase ( _lowercase , _lowercase , _lowercase = 1 / sqrt(2 ) ) -> IIRFilter: UpperCAmelCase : List[Any] = tau * frequency / samplerate UpperCAmelCase : List[str] = sin(_lowerCAmelCase ) UpperCAmelCase : Optional[int] = cos(_lowerCAmelCase ) UpperCAmelCase : int = _sin / (2 * q_factor) UpperCAmelCase : str = 1 - alpha UpperCAmelCase : Union[str, Any] = -2 * _cos UpperCAmelCase : Optional[Any] = 1 + alpha UpperCAmelCase : Any = IIRFilter(2 ) filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] ) return filt def __lowerCamelCase ( _lowercase , _lowercase , _lowercase , _lowercase = 1 / sqrt(2 ) , ) -> IIRFilter: UpperCAmelCase : Optional[Any] = tau * frequency / samplerate UpperCAmelCase : Optional[int] = sin(_lowerCAmelCase ) UpperCAmelCase : Any = cos(_lowerCAmelCase ) UpperCAmelCase : Tuple = _sin / (2 * q_factor) UpperCAmelCase : Any = 1_0 ** (gain_db / 4_0) UpperCAmelCase : Optional[Any] = 1 + alpha * big_a UpperCAmelCase : int = -2 * _cos UpperCAmelCase : List[Any] = 1 - alpha * big_a UpperCAmelCase : Any = 1 + alpha / big_a UpperCAmelCase : List[Any] = -2 * _cos UpperCAmelCase : List[str] = 1 - alpha / big_a UpperCAmelCase : Optional[Any] = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def __lowerCamelCase ( _lowercase , _lowercase , _lowercase , _lowercase = 1 / sqrt(2 ) , ) -> IIRFilter: UpperCAmelCase : List[Any] = tau * frequency / samplerate UpperCAmelCase : int = sin(_lowerCAmelCase ) UpperCAmelCase : Tuple = cos(_lowerCAmelCase ) UpperCAmelCase : Union[str, Any] = _sin / (2 * q_factor) UpperCAmelCase : Optional[Any] = 1_0 ** (gain_db / 4_0) UpperCAmelCase : Tuple = (big_a + 1) - (big_a - 1) * _cos UpperCAmelCase : Any = (big_a + 1) + (big_a - 1) * _cos UpperCAmelCase : str = (big_a - 1) - (big_a + 1) * _cos UpperCAmelCase : List[Any] = (big_a - 1) + (big_a + 1) * _cos UpperCAmelCase : Tuple = 2 * sqrt(_lowerCAmelCase ) * alpha UpperCAmelCase : str = big_a * (pmc + aaa) UpperCAmelCase : int = 2 * big_a * mpc UpperCAmelCase : Optional[Any] = big_a * (pmc - aaa) UpperCAmelCase : Any = ppmc + aaa UpperCAmelCase : Optional[Any] = -2 * pmpc UpperCAmelCase : int = ppmc - aaa UpperCAmelCase : Optional[Any] = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt def __lowerCamelCase ( _lowercase , _lowercase , _lowercase , _lowercase = 1 / sqrt(2 ) , ) -> IIRFilter: UpperCAmelCase : Optional[int] = tau * frequency / samplerate UpperCAmelCase : Union[str, Any] = sin(_lowerCAmelCase ) UpperCAmelCase : Optional[int] = cos(_lowerCAmelCase ) UpperCAmelCase : Optional[int] = _sin / (2 * q_factor) UpperCAmelCase : int = 1_0 ** (gain_db / 4_0) UpperCAmelCase : Union[str, Any] = (big_a + 1) - (big_a - 1) * _cos UpperCAmelCase : str = (big_a + 1) + (big_a - 1) * _cos UpperCAmelCase : Tuple = (big_a - 1) - (big_a + 1) * _cos UpperCAmelCase : Union[str, Any] = (big_a - 1) + (big_a + 1) * _cos UpperCAmelCase : Dict = 2 * sqrt(_lowerCAmelCase ) * alpha UpperCAmelCase : str = big_a * (ppmc + aaa) UpperCAmelCase : Any = -2 * big_a * pmpc UpperCAmelCase : int = big_a * (ppmc - aaa) UpperCAmelCase : str = pmc + aaa UpperCAmelCase : Dict = 2 * mpc UpperCAmelCase : List[Any] = pmc - aaa UpperCAmelCase : List[str] = IIRFilter(2 ) filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] ) return filt
265
def A_ ( _lowerCAmelCase ) -> str: UpperCamelCase : List[Any] = "" for ch in key: if ch == " " or ch not in key_no_dups and ch.isalpha(): key_no_dups += ch return key_no_dups def A_ ( _lowerCAmelCase ) -> dict[str, str]: UpperCamelCase : Optional[Any] = [chr(i + 65 ) for i in range(26 )] # Remove duplicate characters from key UpperCamelCase : Tuple = remove_duplicates(key.upper() ) UpperCamelCase : int = len(_lowerCAmelCase ) # First fill cipher with key characters UpperCamelCase : int = {alphabet[i]: char for i, char in enumerate(_lowerCAmelCase )} # Then map remaining characters in alphabet to # the alphabet from the beginning for i in range(len(_lowerCAmelCase ) , 26 ): UpperCamelCase : Optional[Any] = alphabet[i - offset] # Ensure we are not mapping letters to letters previously mapped while char in key: offset -= 1 UpperCamelCase : List[str] = alphabet[i - offset] UpperCamelCase : List[Any] = char return cipher_alphabet def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> str: return "".join(cipher_map.get(_lowerCAmelCase , _lowerCAmelCase ) for ch in message.upper() ) def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> str: UpperCamelCase : Union[str, Any] = {v: k for k, v in cipher_map.items()} return "".join(rev_cipher_map.get(_lowerCAmelCase , _lowerCAmelCase ) for ch in message.upper() ) def A_ ( ) -> None: UpperCamelCase : int = input("Enter message to encode or decode: " ).strip() UpperCamelCase : str = input("Enter keyword: " ).strip() UpperCamelCase : Union[str, Any] = input("Encipher or decipher? E/D:" ).strip()[0].lower() try: UpperCamelCase : List[str] = {"e": encipher, "d": decipher}[option] except KeyError: raise KeyError("invalid input option" ) UpperCamelCase : str = create_cipher_map(_lowerCAmelCase ) print(func(_lowerCAmelCase , _lowerCAmelCase ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
52
0