code
stringlengths
82
54.1k
code_codestyle
int64
0
699
style_context
stringlengths
111
35.6k
style_context_codestyle
int64
0
699
label
int64
0
1
"""simple docstring""" import numpy as np from transformers import BatchFeature from transformers.testing_utils import require_tf, require_torch from .test_feature_extraction_common import FeatureExtractionSavingTestMixin class __a ( __UpperCAmelCase ): # to overwrite at feature extractactor specific tests lowerCamelCase : Optional[int] =None lowerCamelCase : List[str] =None @property def lowerCamelCase_ ( self ): '''simple docstring''' return self.feat_extract_tester.prepare_feat_extract_dict() def lowerCamelCase_ ( self ): '''simple docstring''' lowerCAmelCase_ = self.feature_extraction_class(**self.feat_extract_dict ) self.assertTrue(hasattr(snake_case__ , '''feature_size''' ) ) self.assertTrue(hasattr(snake_case__ , '''sampling_rate''' ) ) self.assertTrue(hasattr(snake_case__ , '''padding_value''' ) ) def lowerCamelCase_ ( self ): '''simple docstring''' lowerCAmelCase_ = self.feat_extract_tester.prepare_inputs_for_common() lowerCAmelCase_ = self.feature_extraction_class(**self.feat_extract_dict ) lowerCAmelCase_ = feat_extract.model_input_names[0] lowerCAmelCase_ = BatchFeature({input_name: speech_inputs} ) self.assertTrue(all(len(snake_case__ ) == len(snake_case__ ) for x, y in zip(snake_case__ , processed_features[input_name] ) ) ) lowerCAmelCase_ = self.feat_extract_tester.prepare_inputs_for_common(equal_length=snake_case__ ) lowerCAmelCase_ = BatchFeature({input_name: speech_inputs} , tensor_type='''np''' ) lowerCAmelCase_ = processed_features[input_name] if len(batch_features_input.shape ) < 3: lowerCAmelCase_ = batch_features_input[:, :, None] self.assertTrue( batch_features_input.shape == (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) ) @require_torch def lowerCamelCase_ ( self ): '''simple docstring''' lowerCAmelCase_ = self.feat_extract_tester.prepare_inputs_for_common(equal_length=snake_case__ ) lowerCAmelCase_ = self.feature_extraction_class(**self.feat_extract_dict ) lowerCAmelCase_ = feat_extract.model_input_names[0] lowerCAmelCase_ = BatchFeature({input_name: speech_inputs} , tensor_type='''pt''' ) lowerCAmelCase_ = processed_features[input_name] if len(batch_features_input.shape ) < 3: lowerCAmelCase_ = batch_features_input[:, :, None] self.assertTrue( batch_features_input.shape == (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) ) @require_tf def lowerCamelCase_ ( self ): '''simple docstring''' lowerCAmelCase_ = self.feat_extract_tester.prepare_inputs_for_common(equal_length=snake_case__ ) lowerCAmelCase_ = self.feature_extraction_class(**self.feat_extract_dict ) lowerCAmelCase_ = feat_extract.model_input_names[0] lowerCAmelCase_ = BatchFeature({input_name: speech_inputs} , tensor_type='''tf''' ) lowerCAmelCase_ = processed_features[input_name] if len(batch_features_input.shape ) < 3: lowerCAmelCase_ = batch_features_input[:, :, None] self.assertTrue( batch_features_input.shape == (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) ) def lowerCamelCase_ ( self , UpperCAmelCase=False ): '''simple docstring''' def _inputs_have_equal_length(UpperCAmelCase ): lowerCAmelCase_ = len(input[0] ) for input_slice in input[1:]: if len(snake_case__ ) != length: return False return True def _inputs_are_equal(UpperCAmelCase , UpperCAmelCase ): if len(snake_case__ ) != len(snake_case__ ): return False for input_slice_a, input_slice_a in zip(snake_case__ , snake_case__ ): if not np.allclose(np.asarray(snake_case__ ) , np.asarray(snake_case__ ) , atol=1E-3 ): return False return True lowerCAmelCase_ = self.feature_extraction_class(**self.feat_extract_dict ) lowerCAmelCase_ = self.feat_extract_tester.prepare_inputs_for_common(numpify=snake_case__ ) lowerCAmelCase_ = feat_extract.model_input_names[0] lowerCAmelCase_ = BatchFeature({input_name: speech_inputs} ) lowerCAmelCase_ = self.feat_extract_tester.seq_length_diff lowerCAmelCase_ = self.feat_extract_tester.max_seq_length + pad_diff lowerCAmelCase_ = self.feat_extract_tester.min_seq_length lowerCAmelCase_ = self.feat_extract_tester.batch_size lowerCAmelCase_ = self.feat_extract_tester.feature_size # test padding for List[int] + numpy lowerCAmelCase_ = feat_extract.pad(snake_case__ , padding=snake_case__ ) lowerCAmelCase_ = input_a[input_name] lowerCAmelCase_ = feat_extract.pad(snake_case__ , padding='''longest''' ) lowerCAmelCase_ = input_a[input_name] lowerCAmelCase_ = feat_extract.pad(snake_case__ , padding='''max_length''' , max_length=len(speech_inputs[-1] ) ) lowerCAmelCase_ = input_a[input_name] lowerCAmelCase_ = feat_extract.pad(snake_case__ , padding='''longest''' , return_tensors='''np''' ) lowerCAmelCase_ = input_a[input_name] # max_length parameter has to be provided when setting `padding="max_length"` with self.assertRaises(snake_case__ ): feat_extract.pad(snake_case__ , padding='''max_length''' )[input_name] lowerCAmelCase_ = feat_extract.pad( snake_case__ , padding='''max_length''' , max_length=snake_case__ , return_tensors='''np''' ) lowerCAmelCase_ = input_a[input_name] self.assertFalse(_inputs_have_equal_length(snake_case__ ) ) self.assertTrue(_inputs_have_equal_length(snake_case__ ) ) self.assertTrue(_inputs_have_equal_length(snake_case__ ) ) self.assertTrue(_inputs_are_equal(snake_case__ , snake_case__ ) ) self.assertTrue(len(input_a[0] ) == pad_min_length ) self.assertTrue(len(input_a[1] ) == pad_min_length + pad_diff ) self.assertTrue(input_a.shape[:2] == (batch_size, len(input_a[0] )) ) self.assertTrue(input_a.shape[:2] == (batch_size, pad_max_length) ) if feature_size > 1: self.assertTrue(input_a.shape[2] == input_a.shape[2] == feature_size ) # test padding for `pad_to_multiple_of` for List[int] + numpy lowerCAmelCase_ = feat_extract.pad(snake_case__ , pad_to_multiple_of=10 ) lowerCAmelCase_ = input_a[input_name] lowerCAmelCase_ = feat_extract.pad(snake_case__ , padding='''longest''' , pad_to_multiple_of=10 ) lowerCAmelCase_ = input_a[input_name] lowerCAmelCase_ = feat_extract.pad( snake_case__ , padding='''max_length''' , pad_to_multiple_of=10 , max_length=snake_case__ ) lowerCAmelCase_ = input_a[input_name] lowerCAmelCase_ = feat_extract.pad( snake_case__ , padding='''max_length''' , pad_to_multiple_of=10 , max_length=snake_case__ , return_tensors='''np''' , ) lowerCAmelCase_ = input_a[input_name] self.assertTrue(all(len(snake_case__ ) % 10 == 0 for x in input_a ) ) self.assertTrue(_inputs_are_equal(snake_case__ , snake_case__ ) ) lowerCAmelCase_ = pad_max_length if pad_max_length % 10 == 0 else (pad_max_length // 10 + 1) * 10 self.assertTrue(all(len(snake_case__ ) == expected_mult_pad_length for x in input_a ) ) self.assertEqual(input_a.shape[:2] , (batch_size, expected_mult_pad_length) ) if feature_size > 1: self.assertTrue(input_a.shape[2] == feature_size ) # Check padding value is correct lowerCAmelCase_ = (np.ones(self.feat_extract_tester.feature_size ) * feat_extract.padding_value).sum() self.assertTrue( abs(np.asarray(input_a[0] )[pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) ) < 1E-3 ) self.assertTrue( abs( np.asarray(input_a[1] )[pad_min_length + pad_diff :].sum() - padding_vector_sum * (pad_max_length - pad_min_length - pad_diff) ) < 1E-3 ) self.assertTrue( abs( np.asarray(input_a[2] )[pad_min_length + 2 * pad_diff :].sum() - padding_vector_sum * (pad_max_length - pad_min_length - 2 * pad_diff) ) < 1E-3 ) self.assertTrue( abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) ) < 1E-3 ) self.assertTrue( abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (expected_mult_pad_length - pad_min_length) ) < 1E-3 ) def lowerCamelCase_ ( self , UpperCAmelCase=False ): '''simple docstring''' def _inputs_have_equal_length(UpperCAmelCase ): lowerCAmelCase_ = len(input[0] ) for input_slice in input[1:]: if len(snake_case__ ) != length: return False return True def _inputs_are_equal(UpperCAmelCase , UpperCAmelCase ): if len(snake_case__ ) != len(snake_case__ ): return False for input_slice_a, input_slice_a in zip(snake_case__ , snake_case__ ): if not np.allclose(np.asarray(snake_case__ ) , np.asarray(snake_case__ ) , atol=1E-3 ): return False return True lowerCAmelCase_ = self.feature_extraction_class(**self.feat_extract_dict ) lowerCAmelCase_ = self.feat_extract_tester.prepare_inputs_for_common(numpify=snake_case__ ) lowerCAmelCase_ = feat_extract.model_input_names[0] lowerCAmelCase_ = BatchFeature({input_name: speech_inputs} ) # truncate to smallest lowerCAmelCase_ = feat_extract.pad( snake_case__ , padding='''max_length''' , max_length=len(speech_inputs[0] ) , truncation=snake_case__ ) lowerCAmelCase_ = input_a[input_name] lowerCAmelCase_ = feat_extract.pad(snake_case__ , padding='''max_length''' , max_length=len(speech_inputs[0] ) ) lowerCAmelCase_ = input_a[input_name] self.assertTrue(_inputs_have_equal_length(snake_case__ ) ) self.assertFalse(_inputs_have_equal_length(snake_case__ ) ) # truncate to smallest with np lowerCAmelCase_ = feat_extract.pad( snake_case__ , padding='''max_length''' , max_length=len(speech_inputs[0] ) , return_tensors='''np''' , truncation=snake_case__ , ) lowerCAmelCase_ = input_a[input_name] lowerCAmelCase_ = feat_extract.pad( snake_case__ , padding='''max_length''' , max_length=len(speech_inputs[0] ) , return_tensors='''np''' ) lowerCAmelCase_ = input_a[input_name] self.assertTrue(_inputs_have_equal_length(snake_case__ ) ) self.assertTrue(input_a.shape[1] == len(speech_inputs[0] ) ) # since truncation forces padding to be smaller than longest input # function can't return `np.ndarray`, but has to return list self.assertFalse(_inputs_have_equal_length(snake_case__ ) ) # truncate to middle lowerCAmelCase_ = feat_extract.pad( snake_case__ , padding='''max_length''' , max_length=len(speech_inputs[1] ) , truncation=snake_case__ , return_tensors='''np''' , ) lowerCAmelCase_ = input_a[input_name] lowerCAmelCase_ = feat_extract.pad( snake_case__ , padding='''max_length''' , max_length=len(speech_inputs[1] ) , truncation=snake_case__ ) lowerCAmelCase_ = input_a[input_name] lowerCAmelCase_ = feat_extract.pad( snake_case__ , padding='''max_length''' , max_length=len(speech_inputs[1] ) , return_tensors='''np''' ) lowerCAmelCase_ = input_a[input_name] self.assertTrue(input_a.shape[1] == len(speech_inputs[1] ) ) self.assertTrue(_inputs_have_equal_length(snake_case__ ) ) self.assertTrue(_inputs_have_equal_length(snake_case__ ) ) self.assertTrue(_inputs_are_equal(snake_case__ , snake_case__ ) ) # since truncation forces padding to be smaller than longest input # function can't return `np.ndarray`, but has to return list self.assertFalse(_inputs_have_equal_length(snake_case__ ) ) self.assertTrue(len(input_a[-1] ) == len(speech_inputs[-1] ) ) # padding has to be max_length when setting `truncation=True` with self.assertRaises(snake_case__ ): feat_extract.pad(snake_case__ , truncation=snake_case__ )[input_name] # padding has to be max_length when setting `truncation=True` with self.assertRaises(snake_case__ ): feat_extract.pad(snake_case__ , padding='''longest''' , truncation=snake_case__ )[input_name] # padding has to be max_length when setting `truncation=True` with self.assertRaises(snake_case__ ): feat_extract.pad(snake_case__ , padding='''longest''' , truncation=snake_case__ )[input_name] # max_length parameter has to be provided when setting `truncation=True` and padding="max_length" with self.assertRaises(snake_case__ ): feat_extract.pad(snake_case__ , padding='''max_length''' , truncation=snake_case__ )[input_name] # test truncation for `pad_to_multiple_of` for List[int] + numpy lowerCAmelCase_ = 12 lowerCAmelCase_ = feat_extract.pad( snake_case__ , padding='''max_length''' , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=snake_case__ , truncation=snake_case__ , ) lowerCAmelCase_ = input_a[input_name] lowerCAmelCase_ = feat_extract.pad( snake_case__ , padding='''max_length''' , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=snake_case__ , ) lowerCAmelCase_ = input_a[input_name] # retrieve expected_length as multiple of pad_to_multiple_of lowerCAmelCase_ = len(speech_inputs[0] ) if expected_length % pad_to_multiple_of != 0: lowerCAmelCase_ = ((len(speech_inputs[0] ) // pad_to_multiple_of) + 1) * pad_to_multiple_of self.assertTrue(len(input_a[0] ) == expected_length ) self.assertTrue(_inputs_have_equal_length(snake_case__ ) ) self.assertFalse(_inputs_have_equal_length(snake_case__ ) ) def lowerCamelCase_ ( self ): '''simple docstring''' self._check_padding(numpify=snake_case__ ) def lowerCamelCase_ ( self ): '''simple docstring''' self._check_padding(numpify=snake_case__ ) def lowerCamelCase_ ( self ): '''simple docstring''' self._check_truncation(numpify=snake_case__ ) def lowerCamelCase_ ( self ): '''simple docstring''' self._check_truncation(numpify=snake_case__ ) @require_torch def lowerCamelCase_ ( self ): '''simple docstring''' lowerCAmelCase_ = self.feature_extraction_class(**self.feat_extract_dict ) lowerCAmelCase_ = self.feat_extract_tester.prepare_inputs_for_common() lowerCAmelCase_ = feat_extract.model_input_names[0] lowerCAmelCase_ = BatchFeature({input_name: speech_inputs} ) lowerCAmelCase_ = feat_extract.pad(snake_case__ , padding='''longest''' , return_tensors='''np''' )[input_name] lowerCAmelCase_ = feat_extract.pad(snake_case__ , padding='''longest''' , return_tensors='''pt''' )[input_name] self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1E-2 ) @require_tf def lowerCamelCase_ ( self ): '''simple docstring''' lowerCAmelCase_ = self.feature_extraction_class(**self.feat_extract_dict ) lowerCAmelCase_ = self.feat_extract_tester.prepare_inputs_for_common() lowerCAmelCase_ = feat_extract.model_input_names[0] lowerCAmelCase_ = BatchFeature({input_name: speech_inputs} ) lowerCAmelCase_ = feat_extract.pad(snake_case__ , padding='''longest''' , return_tensors='''np''' )[input_name] lowerCAmelCase_ = feat_extract.pad(snake_case__ , padding='''longest''' , return_tensors='''tf''' )[input_name] self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_tf.numpy().astype(np.floataa ).sum() ) < 1E-2 ) def lowerCamelCase_ ( self ): '''simple docstring''' lowerCAmelCase_ = self.feat_extract_dict lowerCAmelCase_ = True lowerCAmelCase_ = self.feature_extraction_class(**snake_case__ ) lowerCAmelCase_ = self.feat_extract_tester.prepare_inputs_for_common() lowerCAmelCase_ = [len(snake_case__ ) for x in speech_inputs] lowerCAmelCase_ = feat_extract.model_input_names[0] lowerCAmelCase_ = BatchFeature({input_name: speech_inputs} ) lowerCAmelCase_ = feat_extract.pad(snake_case__ , padding='''longest''' , return_tensors='''np''' ) self.assertIn('''attention_mask''' , snake_case__ ) self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) ) self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , snake_case__ ) def lowerCamelCase_ ( self ): '''simple docstring''' lowerCAmelCase_ = self.feat_extract_dict lowerCAmelCase_ = True lowerCAmelCase_ = self.feature_extraction_class(**snake_case__ ) lowerCAmelCase_ = self.feat_extract_tester.prepare_inputs_for_common() lowerCAmelCase_ = [len(snake_case__ ) for x in speech_inputs] lowerCAmelCase_ = feat_extract.model_input_names[0] lowerCAmelCase_ = BatchFeature({input_name: speech_inputs} ) lowerCAmelCase_ = min(snake_case__ ) lowerCAmelCase_ = feat_extract.pad( snake_case__ , padding='''max_length''' , max_length=snake_case__ , truncation=snake_case__ , return_tensors='''np''' ) self.assertIn('''attention_mask''' , snake_case__ ) self.assertListEqual( list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] ) self.assertListEqual( processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
552
"""simple docstring""" import contextlib import copy import random from typing import Any, Dict, Iterable, Optional, Union import numpy as np import torch from .utils import deprecate, is_transformers_available if is_transformers_available(): import transformers def lowerCamelCase (a_ :int) -> List[str]: random.seed(a_) np.random.seed(a_) torch.manual_seed(a_) torch.cuda.manual_seed_all(a_) # ^^ safe to call this function even if cuda is not available class __magic_name__ : def __init__( self : Optional[Any] , snake_case__ : Iterable[torch.nn.Parameter] , snake_case__ : float = 0.99_99 , snake_case__ : float = 0.0 , snake_case__ : int = 0 , snake_case__ : bool = False , snake_case__ : Union[float, int] = 1.0 , snake_case__ : Union[float, int] = 2 / 3 , snake_case__ : Optional[Any] = None , snake_case__ : Dict[str, Any] = None , **snake_case__ : Tuple , ): '''simple docstring''' if isinstance(snake_case__ , torch.nn.Module ): lowercase :int = ( '''Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. ''' '''Please pass the parameters of the module instead.''' ) deprecate( '''passing a `torch.nn.Module` to `ExponentialMovingAverage`''' , '''1.0.0''' , snake_case__ , standard_warn=snake_case__ , ) lowercase :Dict = parameters.parameters() # set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility lowercase :Optional[Any] = True if kwargs.get('''max_value''' , snake_case__ ) is not None: lowercase :Optional[Any] = '''The `max_value` argument is deprecated. Please use `decay` instead.''' deprecate('''max_value''' , '''1.0.0''' , snake_case__ , standard_warn=snake_case__ ) lowercase :Optional[int] = kwargs['''max_value'''] if kwargs.get('''min_value''' , snake_case__ ) is not None: lowercase :List[Any] = '''The `min_value` argument is deprecated. Please use `min_decay` instead.''' deprecate('''min_value''' , '''1.0.0''' , snake_case__ , standard_warn=snake_case__ ) lowercase :str = kwargs['''min_value'''] lowercase :Any = list(snake_case__ ) lowercase :Optional[Any] = [p.clone().detach() for p in parameters] if kwargs.get('''device''' , snake_case__ ) is not None: lowercase :str = '''The `device` argument is deprecated. Please use `to` instead.''' deprecate('''device''' , '''1.0.0''' , snake_case__ , standard_warn=snake_case__ ) self.to(device=kwargs['''device'''] ) lowercase :int = None lowercase :int = decay lowercase :Union[str, Any] = min_decay lowercase :List[Any] = update_after_step lowercase :Union[str, Any] = use_ema_warmup lowercase :Any = inv_gamma lowercase :Any = power lowercase :str = 0 lowercase :int = None # set in `step()` lowercase :List[str] = model_cls lowercase :Any = model_config @classmethod def __snake_case ( cls : int , snake_case__ : Tuple , snake_case__ : Union[str, Any] ): '''simple docstring''' lowercase , lowercase :int = model_cls.load_config(snake_case__ , return_unused_kwargs=snake_case__ ) lowercase :List[Any] = model_cls.from_pretrained(snake_case__ ) lowercase :Optional[int] = cls(model.parameters() , model_cls=snake_case__ , model_config=model.config ) ema_model.load_state_dict(snake_case__ ) return ema_model def __snake_case ( self : int , snake_case__ : Union[str, Any] ): '''simple docstring''' if self.model_cls is None: raise ValueError('''`save_pretrained` can only be used if `model_cls` was defined at __init__.''' ) if self.model_config is None: raise ValueError('''`save_pretrained` can only be used if `model_config` was defined at __init__.''' ) lowercase :Dict = self.model_cls.from_config(self.model_config ) lowercase :Tuple = self.state_dict() state_dict.pop('''shadow_params''' , snake_case__ ) model.register_to_config(**snake_case__ ) self.copy_to(model.parameters() ) model.save_pretrained(snake_case__ ) def __snake_case ( self : int , snake_case__ : int ): '''simple docstring''' lowercase :Union[str, Any] = max(0 , optimization_step - self.update_after_step - 1 ) if step <= 0: return 0.0 if self.use_ema_warmup: lowercase :int = 1 - (1 + step / self.inv_gamma) ** -self.power else: lowercase :Dict = (1 + step) / (1_0 + step) lowercase :Optional[int] = min(snake_case__ , self.decay ) # make sure decay is not smaller than min_decay lowercase :Optional[int] = max(snake_case__ , self.min_decay ) return cur_decay_value @torch.no_grad() def __snake_case ( self : Any , snake_case__ : Iterable[torch.nn.Parameter] ): '''simple docstring''' if isinstance(snake_case__ , torch.nn.Module ): lowercase :Tuple = ( '''Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. ''' '''Please pass the parameters of the module instead.''' ) deprecate( '''passing a `torch.nn.Module` to `ExponentialMovingAverage.step`''' , '''1.0.0''' , snake_case__ , standard_warn=snake_case__ , ) lowercase :Union[str, Any] = parameters.parameters() lowercase :Optional[Any] = list(snake_case__ ) self.optimization_step += 1 # Compute the decay factor for the exponential moving average. lowercase :List[Any] = self.get_decay(self.optimization_step ) lowercase :Optional[Any] = decay lowercase :List[Any] = 1 - decay lowercase :List[str] = contextlib.nullcontext if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled(): import deepspeed for s_param, param in zip(self.shadow_params , snake_case__ ): if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled(): lowercase :Union[str, Any] = deepspeed.zero.GatheredParameters(snake_case__ , modifier_rank=snake_case__ ) with context_manager(): if param.requires_grad: s_param.sub_(one_minus_decay * (s_param - param) ) else: s_param.copy_(snake_case__ ) def __snake_case ( self : str , snake_case__ : Iterable[torch.nn.Parameter] ): '''simple docstring''' lowercase :Optional[Any] = list(snake_case__ ) for s_param, param in zip(self.shadow_params , snake_case__ ): param.data.copy_(s_param.to(param.device ).data ) def __snake_case ( self : Optional[int] , snake_case__ : Dict=None , snake_case__ : Dict=None ): '''simple docstring''' lowercase :str = [ p.to(device=snake_case__ , dtype=snake_case__ ) if p.is_floating_point() else p.to(device=snake_case__ ) for p in self.shadow_params ] def __snake_case ( self : Dict ): '''simple docstring''' return { "decay": self.decay, "min_decay": self.min_decay, "optimization_step": self.optimization_step, "update_after_step": self.update_after_step, "use_ema_warmup": self.use_ema_warmup, "inv_gamma": self.inv_gamma, "power": self.power, "shadow_params": self.shadow_params, } def __snake_case ( self : Optional[int] , snake_case__ : Iterable[torch.nn.Parameter] ): '''simple docstring''' lowercase :str = [param.detach().cpu().clone() for param in parameters] def __snake_case ( self : List[Any] , snake_case__ : Iterable[torch.nn.Parameter] ): '''simple docstring''' if self.temp_stored_params is None: raise RuntimeError('''This ExponentialMovingAverage has no `store()`ed weights ''' '''to `restore()`''' ) for c_param, param in zip(self.temp_stored_params , snake_case__ ): param.data.copy_(c_param.data ) # Better memory-wise. lowercase :Dict = None def __snake_case ( self : Union[str, Any] , snake_case__ : dict ): '''simple docstring''' lowercase :List[str] = copy.deepcopy(snake_case__ ) lowercase :Any = state_dict.get('''decay''' , self.decay ) if self.decay < 0.0 or self.decay > 1.0: raise ValueError('''Decay must be between 0 and 1''' ) lowercase :int = state_dict.get('''min_decay''' , self.min_decay ) if not isinstance(self.min_decay , snake_case__ ): raise ValueError('''Invalid min_decay''' ) lowercase :List[Any] = state_dict.get('''optimization_step''' , self.optimization_step ) if not isinstance(self.optimization_step , snake_case__ ): raise ValueError('''Invalid optimization_step''' ) lowercase :int = state_dict.get('''update_after_step''' , self.update_after_step ) if not isinstance(self.update_after_step , snake_case__ ): raise ValueError('''Invalid update_after_step''' ) lowercase :Optional[int] = state_dict.get('''use_ema_warmup''' , self.use_ema_warmup ) if not isinstance(self.use_ema_warmup , snake_case__ ): raise ValueError('''Invalid use_ema_warmup''' ) lowercase :Any = state_dict.get('''inv_gamma''' , self.inv_gamma ) if not isinstance(self.inv_gamma , (float, int) ): raise ValueError('''Invalid inv_gamma''' ) lowercase :Dict = state_dict.get('''power''' , self.power ) if not isinstance(self.power , (float, int) ): raise ValueError('''Invalid power''' ) lowercase :Optional[int] = state_dict.get('''shadow_params''' , snake_case__ ) if shadow_params is not None: lowercase :List[Any] = shadow_params if not isinstance(self.shadow_params , snake_case__ ): raise ValueError('''shadow_params must be a list''' ) if not all(isinstance(snake_case__ , torch.Tensor ) for p in self.shadow_params ): raise ValueError('''shadow_params must all be Tensors''' )
677
0
'''simple docstring''' import unittest from pathlib import Path from tempfile import NamedTemporaryFile, TemporaryDirectory from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline from transformers.convert_graph_to_onnx import ( convert, ensure_valid_input, generate_identified_filename, infer_shapes, quantize, ) from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow class lowercase_ : """simple docstring""" def lowerCAmelCase_ ( self : List[str] , __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[Any] ): """simple docstring""" return None class lowercase_ : """simple docstring""" def lowerCAmelCase_ ( self : List[Any] , __lowerCamelCase : int , __lowerCamelCase : Optional[int] , __lowerCamelCase : str , __lowerCamelCase : List[Any] ): """simple docstring""" return None class lowercase_ ( unittest.TestCase ): """simple docstring""" lowerCamelCase_ = [ # (model_name, model_kwargs) ("bert-base-cased", {}), ("gpt2", {"use_cache": False}), # We don't support exporting GPT2 past keys anymore ] @require_tf @slow def lowerCAmelCase_ ( self : Dict ): """simple docstring""" for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: self._test_export(snake_case__ , "tf" , 1_2 , **snake_case__ ) @require_torch @slow def lowerCAmelCase_ ( self : Union[str, Any] ): """simple docstring""" for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: self._test_export(snake_case__ , "pt" , 1_2 , **snake_case__ ) @require_torch @slow def lowerCAmelCase_ ( self : List[str] ): """simple docstring""" from transformers import BertModel _SCREAMING_SNAKE_CASE = ['''[UNK]''', '''[SEP]''', '''[CLS]''', '''[PAD]''', '''[MASK]''', '''some''', '''other''', '''words'''] with NamedTemporaryFile(mode="w+t" ) as vocab_file: vocab_file.write("\n".join(snake_case__ ) ) vocab_file.flush() _SCREAMING_SNAKE_CASE = BertTokenizerFast(vocab_file.name ) with TemporaryDirectory() as bert_save_dir: _SCREAMING_SNAKE_CASE = BertModel(BertConfig(vocab_size=len(snake_case__ ) ) ) model.save_pretrained(snake_case__ ) self._test_export(snake_case__ , "pt" , 1_2 , snake_case__ ) @require_tf @slow def lowerCAmelCase_ ( self : List[Any] ): """simple docstring""" for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: _SCREAMING_SNAKE_CASE = self._test_export(snake_case__ , "tf" , 1_2 , **snake_case__ ) _SCREAMING_SNAKE_CASE = quantize(Path(snake_case__ ) ) # Ensure the actual quantized model is not bigger than the original one if quantized_path.stat().st_size >= Path(snake_case__ ).stat().st_size: self.fail("Quantized model is bigger than initial ONNX model" ) @require_torch @slow def lowerCAmelCase_ ( self : int ): """simple docstring""" for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: _SCREAMING_SNAKE_CASE = self._test_export(snake_case__ , "pt" , 1_2 , **snake_case__ ) _SCREAMING_SNAKE_CASE = quantize(snake_case__ ) # Ensure the actual quantized model is not bigger than the original one if quantized_path.stat().st_size >= Path(snake_case__ ).stat().st_size: self.fail("Quantized model is bigger than initial ONNX model" ) def lowerCAmelCase_ ( self : Tuple , __lowerCamelCase : List[Any] , __lowerCamelCase : Dict , __lowerCamelCase : str , __lowerCamelCase : Dict=None , **__lowerCamelCase : Dict ): """simple docstring""" try: # Compute path with TemporaryDirectory() as tempdir: _SCREAMING_SNAKE_CASE = Path(snake_case__ ).joinpath("model.onnx" ) # Remove folder if exists if path.parent.exists(): path.parent.rmdir() # Export convert(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , **snake_case__ ) return path except Exception as e: self.fail(snake_case__ ) @require_torch @require_tokenizers @slow def lowerCAmelCase_ ( self : str ): """simple docstring""" from transformers import BertModel _SCREAMING_SNAKE_CASE = BertModel(BertConfig.from_pretrained("lysandre/tiny-bert-random" ) ) _SCREAMING_SNAKE_CASE = BertTokenizerFast.from_pretrained("lysandre/tiny-bert-random" ) self._test_infer_dynamic_axis(snake_case__ , snake_case__ , "pt" ) @require_tf @require_tokenizers @slow def lowerCAmelCase_ ( self : Union[str, Any] ): """simple docstring""" from transformers import TFBertModel _SCREAMING_SNAKE_CASE = TFBertModel(BertConfig.from_pretrained("lysandre/tiny-bert-random" ) ) _SCREAMING_SNAKE_CASE = BertTokenizerFast.from_pretrained("lysandre/tiny-bert-random" ) self._test_infer_dynamic_axis(snake_case__ , snake_case__ , "tf" ) def lowerCAmelCase_ ( self : Dict , __lowerCamelCase : Tuple , __lowerCamelCase : Any , __lowerCamelCase : Tuple ): """simple docstring""" _SCREAMING_SNAKE_CASE = FeatureExtractionPipeline(snake_case__ , snake_case__ ) _SCREAMING_SNAKE_CASE = ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''output_0''', '''output_1'''] _SCREAMING_SNAKE_CASE = infer_shapes(snake_case__ , snake_case__ ) # Assert all variables are present self.assertEqual(len(snake_case__ ) , len(snake_case__ ) ) self.assertTrue(all(var_name in shapes for var_name in variable_names ) ) self.assertSequenceEqual(variable_names[:3] , snake_case__ ) self.assertSequenceEqual(variable_names[3:] , snake_case__ ) # Assert inputs are {0: batch, 1: sequence} for var_name in ["input_ids", "token_type_ids", "attention_mask"]: self.assertDictEqual(shapes[var_name] , {0: "batch", 1: "sequence"} ) # Assert outputs are {0: batch, 1: sequence} and {0: batch} self.assertDictEqual(shapes["output_0"] , {0: "batch", 1: "sequence"} ) self.assertDictEqual(shapes["output_1"] , {0: "batch"} ) def lowerCAmelCase_ ( self : Any ): """simple docstring""" _SCREAMING_SNAKE_CASE = ['''input_ids''', '''attention_mask''', '''token_type_ids'''] _SCREAMING_SNAKE_CASE = {'''input_ids''': [1, 2, 3, 4], '''attention_mask''': [0, 0, 0, 0], '''token_type_ids''': [1, 1, 1, 1]} _SCREAMING_SNAKE_CASE = ensure_valid_input(FuncContiguousArgs() , snake_case__ , snake_case__ ) # Should have exactly the same number of args (all are valid) self.assertEqual(len(snake_case__ ) , 3 ) # Should have exactly the same input names self.assertEqual(set(snake_case__ ) , set(snake_case__ ) ) # Parameter should be reordered according to their respective place in the function: # (input_ids, token_type_ids, attention_mask) self.assertEqual(snake_case__ , (tokens["input_ids"], tokens["token_type_ids"], tokens["attention_mask"]) ) # Generated args are interleaved with another args (for instance parameter "past" in GPT2) _SCREAMING_SNAKE_CASE = ensure_valid_input(FuncNonContiguousArgs() , snake_case__ , snake_case__ ) # Should have exactly the one arg (all before the one not provided "some_other_args") self.assertEqual(len(snake_case__ ) , 1 ) self.assertEqual(len(snake_case__ ) , 1 ) # Should have only "input_ids" self.assertEqual(inputs_args[0] , tokens["input_ids"] ) self.assertEqual(ordered_input_names[0] , "input_ids" ) def lowerCAmelCase_ ( self : Optional[int] ): """simple docstring""" _SCREAMING_SNAKE_CASE = generate_identified_filename(Path("/home/something/my_fake_model.onnx" ) , "-test" ) self.assertEqual("/home/something/my_fake_model-test.onnx" , generated.as_posix() )
418
"""simple docstring""" import argparse import collections import numpy as np import torch from flax import traverse_util from tax import checkpoints from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration from transformers.utils import logging logging.set_verbosity_info() def lowerCamelCase (a_ :int , a_ :Union[str, Any] , a_ :List[Any]) -> List[str]: return params[F"""{prefix}/{prefix}/relpos_bias/rel_embedding"""][:, i, :] def lowerCamelCase (a_ :Optional[Any] , a_ :Optional[int] , a_ :str , a_ :Any="attention") -> Optional[int]: lowercase :Tuple = np.ascontiguousarray(params[F"""{prefix}/{prefix}/{layer_name}/key/kernel"""][:, i, :, :]) lowercase :int = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2]) lowercase :str = np.ascontiguousarray(params[F"""{prefix}/{prefix}/{layer_name}/out/kernel"""][:, i, :, :]) lowercase :Any = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2]) lowercase :int = np.ascontiguousarray(params[F"""{prefix}/{prefix}/{layer_name}/query/kernel"""][:, i, :, :]) lowercase :List[str] = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2]) lowercase :List[Any] = np.ascontiguousarray(params[F"""{prefix}/{prefix}/{layer_name}/value/kernel"""][:, i, :, :]) lowercase :Optional[int] = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2]) return k, o, q, v def lowerCamelCase (a_ :Any , a_ :Union[str, Any] , a_ :Union[str, Any] , a_ :Union[str, Any]=False) -> List[Any]: if split_mlp_wi: lowercase :List[Any] = params[F"""{prefix}/{prefix}/mlp/wi_0/kernel"""][:, i, :] lowercase :Optional[int] = params[F"""{prefix}/{prefix}/mlp/wi_1/kernel"""][:, i, :] lowercase :Dict = (wi_a, wi_a) else: lowercase :Optional[Any] = params[F"""{prefix}/{prefix}/mlp/wi/kernel"""][:, i, :] lowercase :Union[str, Any] = params[F"""{prefix}/{prefix}/mlp/wo/kernel"""][:, i, :] return wi, wo def lowerCamelCase (a_ :Any , a_ :Optional[Any] , a_ :Optional[Any] , a_ :Union[str, Any]) -> Optional[Any]: return params[F"""{prefix}/{prefix}/{layer_name}/scale"""][:, i] def lowerCamelCase (a_ :dict , *, a_ :int , a_ :bool , a_ :bool = False) -> int: lowercase :Dict = traverse_util.flatten_dict(variables['''target''']) lowercase :Optional[Any] = {'''/'''.join(a_): v for k, v in old.items()} # v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi lowercase :str = '''encoder/encoder/mlp/wi_0/kernel''' in old print('''Split MLP:''' , a_) lowercase :str = collections.OrderedDict() # Shared embeddings. lowercase :int = old['''token_embedder/embedding'''] # Encoder. for i in range(a_): # Block i, layer 0 (Self Attention). lowercase :Union[str, Any] = tax_layer_norm_lookup(a_ , a_ , '''encoder''' , '''pre_attention_layer_norm''') lowercase , lowercase , lowercase , lowercase :Tuple = tax_attention_lookup(a_ , a_ , '''encoder''' , '''attention''') lowercase :Dict = layer_norm lowercase :Dict = k.T lowercase :Union[str, Any] = o.T lowercase :List[Any] = q.T lowercase :int = v.T # Block i, layer 1 (MLP). lowercase :Optional[int] = tax_layer_norm_lookup(a_ , a_ , '''encoder''' , '''pre_mlp_layer_norm''') lowercase , lowercase :str = tax_mlp_lookup(a_ , a_ , '''encoder''' , a_) lowercase :int = layer_norm if split_mlp_wi: lowercase :Tuple = wi[0].T lowercase :Tuple = wi[1].T else: lowercase :int = wi.T lowercase :Tuple = wo.T if scalable_attention: # convert the rel_embedding of each layer lowercase :Dict = tax_relpos_bias_lookup( a_ , a_ , '''encoder''').T lowercase :str = old['''encoder/encoder_norm/scale'''] if not scalable_attention: lowercase :str = tax_relpos_bias_lookup( a_ , 0 , '''encoder''').T lowercase :List[Any] = tax_relpos_bias_lookup( a_ , 0 , '''decoder''').T if not is_encoder_only: # Decoder. for i in range(a_): # Block i, layer 0 (Self Attention). lowercase :Any = tax_layer_norm_lookup(a_ , a_ , '''decoder''' , '''pre_self_attention_layer_norm''') lowercase , lowercase , lowercase , lowercase :str = tax_attention_lookup(a_ , a_ , '''decoder''' , '''self_attention''') lowercase :List[str] = layer_norm lowercase :Dict = k.T lowercase :List[Any] = o.T lowercase :List[Any] = q.T lowercase :Any = v.T # Block i, layer 1 (Cross Attention). lowercase :Tuple = tax_layer_norm_lookup(a_ , a_ , '''decoder''' , '''pre_cross_attention_layer_norm''') lowercase , lowercase , lowercase , lowercase :int = tax_attention_lookup(a_ , a_ , '''decoder''' , '''encoder_decoder_attention''') lowercase :int = layer_norm lowercase :Dict = k.T lowercase :int = o.T lowercase :List[Any] = q.T lowercase :Tuple = v.T # Block i, layer 2 (MLP). lowercase :Any = tax_layer_norm_lookup(a_ , a_ , '''decoder''' , '''pre_mlp_layer_norm''') lowercase , lowercase :Tuple = tax_mlp_lookup(a_ , a_ , '''decoder''' , a_) lowercase :Any = layer_norm if split_mlp_wi: lowercase :int = wi[0].T lowercase :Union[str, Any] = wi[1].T else: lowercase :int = wi.T lowercase :List[Any] = wo.T if scalable_attention: # convert the rel_embedding of each layer lowercase :Union[str, Any] = tax_relpos_bias_lookup(a_ , a_ , '''decoder''').T lowercase :Union[str, Any] = old['''decoder/decoder_norm/scale'''] # LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead) if "decoder/logits_dense/kernel" in old: lowercase :int = old['''decoder/logits_dense/kernel'''].T return new def lowerCamelCase (a_ :Dict , a_ :bool) -> Tuple: lowercase :str = collections.OrderedDict([(k, torch.from_numpy(v.copy())) for (k, v) in converted_params.items()]) # Add what is missing. if "encoder.embed_tokens.weight" not in state_dict: lowercase :Any = state_dict['''shared.weight'''] if not is_encoder_only: if "decoder.embed_tokens.weight" not in state_dict: lowercase :Optional[Any] = state_dict['''shared.weight'''] if "lm_head.weight" not in state_dict: # For old 1.0 models. print('''Using shared word embeddings as lm_head.''') lowercase :Optional[int] = state_dict['''shared.weight'''] return state_dict def lowerCamelCase (a_ :List[str] , a_ :List[str] , a_ :Tuple , a_ :Optional[int] , a_ :List[str]) -> List[str]: lowercase :Optional[Any] = checkpoints.load_tax_checkpoint(a_) lowercase :Optional[int] = convert_tax_to_pytorch( a_ , num_layers=config.num_layers , is_encoder_only=a_ , scalable_attention=a_) lowercase :Union[str, Any] = make_state_dict(a_ , a_) model.load_state_dict(a_ , strict=a_) def lowerCamelCase (a_ :str , a_ :Optional[int] , a_ :Any , a_ :bool = False , a_ :bool = False , ) -> Tuple: lowercase :Optional[int] = MTaConfig.from_json_file(a_) print(F"""Building PyTorch model from configuration: {config}""") # Non-v1.1 checkpoints could also use T5Model, but this works for all. # The v1.0 checkpoints will simply have an LM head that is the word embeddings. if is_encoder_only: lowercase :Union[str, Any] = UMTaEncoderModel(a_) else: lowercase :int = UMTaForConditionalGeneration(a_) # Load weights from tf checkpoint load_tax_weights_in_ta(a_ , a_ , a_ , a_ , a_) # Save pytorch-model print(F"""Save PyTorch model to {pytorch_dump_path}""") model.save_pretrained(a_) # Verify that we can load the checkpoint. model.from_pretrained(a_) print('''Done''') if __name__ == "__main__": UpperCAmelCase = argparse.ArgumentParser(description='''Converts a native T5X checkpoint into a PyTorch checkpoint.''') # Required parameters parser.add_argument( '''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path to the T5X checkpoint.''' ) parser.add_argument( '''--config_file''', default=None, type=str, required=True, help='''The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.''', ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) parser.add_argument( '''--is_encoder_only''', action='''store_true''', help='''Check if the model is encoder-decoder model''', default=False ) parser.add_argument( '''--scalable_attention''', action='''store_true''', help='''Whether the model uses scaled attention (umt5 model)''', default=False, ) UpperCAmelCase = parser.parse_args() convert_tax_checkpoint_to_pytorch( args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only, args.scalable_attention, )
677
0
'''simple docstring''' import json import os import unittest from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class a ( __UpperCAmelCase , unittest.TestCase ): '''simple docstring''' __lowerCAmelCase : List[Any] = GPTaTokenizer __lowerCAmelCase : List[Any] = GPTaTokenizerFast __lowerCAmelCase : Union[str, Any] = True __lowerCAmelCase : Union[str, Any] = {"add_prefix_space": True} __lowerCAmelCase : Tuple = False def __UpperCamelCase ( self ) -> List[Any]: super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt _a : List[str] = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''\u0120''', '''\u0120l''', '''\u0120n''', '''\u0120lo''', '''\u0120low''', '''er''', '''\u0120lowest''', '''\u0120newer''', '''\u0120wider''', '''<unk>''', '''<|endoftext|>''', ] _a : int = dict(zip(snake_case__ , range(len(snake_case__ ) ) ) ) _a : Optional[Any] = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', ''''''] _a : Optional[int] = {'''unk_token''': '''<unk>'''} _a : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) _a : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp: fp.write(json.dumps(snake_case__ ) + '\n' ) with open(self.merges_file , 'w' , encoding='utf-8' ) as fp: fp.write('\n'.join(snake_case__ ) ) def __UpperCamelCase ( self , **lowerCamelCase_ ) -> List[Any]: kwargs.update(self.special_tokens_map ) return GPTaTokenizer.from_pretrained(self.tmpdirname , **snake_case__ ) def __UpperCamelCase ( self , **lowerCamelCase_ ) -> Dict: kwargs.update(self.special_tokens_map ) return GPTaTokenizerFast.from_pretrained(self.tmpdirname , **snake_case__ ) def __UpperCamelCase ( self , lowerCamelCase_ ) -> Tuple: _a : List[Any] = '''lower newer''' _a : Optional[int] = '''lower newer''' return input_text, output_text def __UpperCamelCase ( self ) -> Optional[Any]: _a : str = GPTaTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) _a : Union[str, Any] = '''lower newer''' _a : List[str] = ['''\u0120low''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er'''] _a : Optional[Any] = tokenizer.tokenize(snake_case__ , add_prefix_space=snake_case__ ) self.assertListEqual(snake_case__ , snake_case__ ) _a : List[str] = tokens + [tokenizer.unk_token] _a : Union[str, Any] = [1_4, 1_5, 1_0, 9, 3, 2, 1_5, 1_9] self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case__ ) , snake_case__ ) def __UpperCamelCase ( self ) -> List[str]: if not self.test_rust_tokenizer: return _a : Optional[int] = self.get_tokenizer() _a : Tuple = self.get_rust_tokenizer(add_prefix_space=snake_case__ ) _a : Dict = '''lower newer''' # Testing tokenization _a : int = tokenizer.tokenize(snake_case__ , add_prefix_space=snake_case__ ) _a : int = rust_tokenizer.tokenize(snake_case__ ) self.assertListEqual(snake_case__ , snake_case__ ) # Testing conversion to ids without special tokens _a : str = tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ , add_prefix_space=snake_case__ ) _a : Optional[int] = rust_tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ ) self.assertListEqual(snake_case__ , snake_case__ ) # Testing conversion to ids with special tokens _a : Union[str, Any] = self.get_rust_tokenizer(add_prefix_space=snake_case__ ) _a : Tuple = tokenizer.encode(snake_case__ , add_prefix_space=snake_case__ ) _a : str = rust_tokenizer.encode(snake_case__ ) self.assertListEqual(snake_case__ , snake_case__ ) # Testing the unknown token _a : int = tokens + [rust_tokenizer.unk_token] _a : Tuple = [1_4, 1_5, 1_0, 9, 3, 2, 1_5, 1_9] self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(snake_case__ ) , snake_case__ ) def __UpperCamelCase ( self , *lowerCamelCase_ , **lowerCamelCase_ ) -> Optional[Any]: pass def __UpperCamelCase ( self , lowerCamelCase_=1_5 ) -> int: for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): _a : Optional[int] = self.rust_tokenizer_class.from_pretrained(snake_case__ , **snake_case__ ) # Simple input _a : Optional[int] = '''This is a simple input''' _a : Tuple = ['''This is a simple input 1''', '''This is a simple input 2'''] _a : List[Any] = ('''This is a simple input''', '''This is a pair''') _a : int = [ ('''This is a simple input 1''', '''This is a simple input 2'''), ('''This is a simple pair 1''', '''This is a simple pair 2'''), ] # Simple input tests self.assertRaises(snake_case__ , tokenizer_r.encode , snake_case__ , max_length=snake_case__ , padding='max_length' ) # Simple input self.assertRaises(snake_case__ , tokenizer_r.encode_plus , snake_case__ , max_length=snake_case__ , padding='max_length' ) # Simple input self.assertRaises( snake_case__ , tokenizer_r.batch_encode_plus , snake_case__ , max_length=snake_case__ , padding='max_length' , ) # Pair input self.assertRaises(snake_case__ , tokenizer_r.encode , snake_case__ , max_length=snake_case__ , padding='max_length' ) # Pair input self.assertRaises(snake_case__ , tokenizer_r.encode_plus , snake_case__ , max_length=snake_case__ , padding='max_length' ) # Pair input self.assertRaises( snake_case__ , tokenizer_r.batch_encode_plus , snake_case__ , max_length=snake_case__ , padding='max_length' , ) def __UpperCamelCase ( self ) -> int: _a : List[Any] = GPTaTokenizer.from_pretrained(self.tmpdirname , pad_token='<pad>' ) # Simple input _a : Tuple = '''This is a simple input''' _a : Tuple = ['''This is a simple input looooooooong''', '''This is a simple input'''] _a : Optional[Any] = ('''This is a simple input''', '''This is a pair''') _a : Optional[int] = [ ('''This is a simple input loooooong''', '''This is a simple input'''), ('''This is a simple pair loooooong''', '''This is a simple pair'''), ] _a : str = tokenizer.pad_token_id _a : Optional[Any] = tokenizer(snake_case__ , padding='max_length' , max_length=3_0 , return_tensors='np' ) _a : Optional[int] = tokenizer(snake_case__ , padding=snake_case__ , truncate=snake_case__ , return_tensors='np' ) _a : Dict = tokenizer(*snake_case__ , padding='max_length' , max_length=6_0 , return_tensors='np' ) _a : List[Any] = tokenizer(snake_case__ , padding=snake_case__ , truncate=snake_case__ , return_tensors='np' ) # s # test single string max_length padding self.assertEqual(out_s['input_ids'].shape[-1] , 3_0 ) self.assertTrue(pad_token_id in out_s['input_ids'] ) self.assertTrue(0 in out_s['attention_mask'] ) # s2 # test automatic padding self.assertEqual(out_sa['input_ids'].shape[-1] , 3_3 ) # long slice doesn't have padding self.assertFalse(pad_token_id in out_sa['input_ids'][0] ) self.assertFalse(0 in out_sa['attention_mask'][0] ) # short slice does have padding self.assertTrue(pad_token_id in out_sa['input_ids'][1] ) self.assertTrue(0 in out_sa['attention_mask'][1] ) # p # test single pair max_length padding self.assertEqual(out_p['input_ids'].shape[-1] , 6_0 ) self.assertTrue(pad_token_id in out_p['input_ids'] ) self.assertTrue(0 in out_p['attention_mask'] ) # p2 # test automatic padding pair self.assertEqual(out_pa['input_ids'].shape[-1] , 5_2 ) # long slice pair doesn't have padding self.assertFalse(pad_token_id in out_pa['input_ids'][0] ) self.assertFalse(0 in out_pa['attention_mask'][0] ) # short slice pair does have padding self.assertTrue(pad_token_id in out_pa['input_ids'][1] ) self.assertTrue(0 in out_pa['attention_mask'][1] ) def __UpperCamelCase ( self ) -> Optional[int]: _a : Any = '''$$$''' _a : Optional[int] = GPTaTokenizer.from_pretrained(self.tmpdirname , bos_token=snake_case__ , add_bos_token=snake_case__ ) _a : List[Any] = '''This is a simple input''' _a : str = ['''This is a simple input 1''', '''This is a simple input 2'''] _a : Any = tokenizer.bos_token_id _a : Union[str, Any] = tokenizer(snake_case__ ) _a : Optional[int] = tokenizer(snake_case__ ) self.assertEqual(out_s.input_ids[0] , snake_case__ ) self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) ) _a : Dict = tokenizer.decode(out_s.input_ids ) _a : Any = tokenizer.batch_decode(out_sa.input_ids ) self.assertEqual(decode_s.split()[0] , snake_case__ ) self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) ) def __UpperCamelCase ( self ) -> Dict: pass def __UpperCamelCase ( self ) -> Dict: _a : Optional[Any] = [self.get_tokenizer(do_lower_case=snake_case__ , add_bos_token=snake_case__ )] for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): _a : Optional[int] = '''Encode this.''' _a : Dict = '''This one too please.''' _a : Optional[Any] = tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ ) encoded_sequence += tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ ) _a : Union[str, Any] = tokenizer.encode_plus( snake_case__ , snake_case__ , add_special_tokens=snake_case__ , return_special_tokens_mask=snake_case__ , ) _a : Any = encoded_sequence_dict['''input_ids'''] _a : Any = encoded_sequence_dict['''special_tokens_mask'''] self.assertEqual(len(snake_case__ ) , len(snake_case__ ) ) _a : List[str] = [ (x if not special_tokens_mask[i] else None) for i, x in enumerate(snake_case__ ) ] _a : int = [x for x in filtered_sequence if x is not None] self.assertEqual(snake_case__ , snake_case__ ) @require_tokenizers class a ( unittest.TestCase ): '''simple docstring''' def __UpperCamelCase ( self ) -> Union[str, Any]: _a : List[str] = AutoTokenizer.from_pretrained('facebook/opt-350m' , from_slow=snake_case__ ) _a : Any = '''A photo of a cat''' _a : Tuple = tokenizer.encode( snake_case__ , ) self.assertEqual(snake_case__ , [2, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] ) tokenizer.save_pretrained('test_opt' ) _a : Optional[Any] = AutoTokenizer.from_pretrained('./test_opt' ) _a : Union[str, Any] = tokenizer.encode( snake_case__ , ) self.assertEqual(snake_case__ , [2, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] ) def __UpperCamelCase ( self ) -> List[Any]: _a : Dict = AutoTokenizer.from_pretrained('facebook/opt-350m' , use_slow=snake_case__ ) _a : Union[str, Any] = '''A photo of a cat''' _a : Tuple = tokenizer.encode( snake_case__ , ) # Same as above self.assertEqual(snake_case__ , [2, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] ) @unittest.skip('This test is failing because of a bug in the fast tokenizer' ) def __UpperCamelCase ( self ) -> Dict: _a : List[Any] = AutoTokenizer.from_pretrained('facebook/opt-350m' , from_slow=snake_case__ ) _a : Tuple = '''bos''' _a : List[str] = tokenizer.get_vocab()['''bos'''] _a : Tuple = '''A photo of a cat''' _a : Optional[Any] = tokenizer.encode( snake_case__ , ) # We changed the bos token self.assertEqual(snake_case__ , [3_1_9_5_7, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] ) tokenizer.save_pretrained('./tok' ) _a : str = AutoTokenizer.from_pretrained('./tok' ) self.assertTrue(tokenizer.is_fast ) _a : Optional[Any] = tokenizer.encode( snake_case__ , ) self.assertEqual(snake_case__ , [3_1_9_5_7, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] )
120
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) UpperCAmelCase = { '''configuration_blenderbot''': [ '''BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BlenderbotConfig''', '''BlenderbotOnnxConfig''', ], '''tokenization_blenderbot''': ['''BlenderbotTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase = ['''BlenderbotTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase = [ '''BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''BlenderbotForCausalLM''', '''BlenderbotForConditionalGeneration''', '''BlenderbotModel''', '''BlenderbotPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase = [ '''TFBlenderbotForConditionalGeneration''', '''TFBlenderbotModel''', '''TFBlenderbotPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase = [ '''FlaxBlenderbotForConditionalGeneration''', '''FlaxBlenderbotModel''', '''FlaxBlenderbotPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_blenderbot import ( BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP, BlenderbotConfig, BlenderbotOnnxConfig, ) from .tokenization_blenderbot import BlenderbotTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_blenderbot_fast import BlenderbotTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_blenderbot import ( BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST, BlenderbotForCausalLM, BlenderbotForConditionalGeneration, BlenderbotModel, BlenderbotPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_blenderbot import ( TFBlenderbotForConditionalGeneration, TFBlenderbotModel, TFBlenderbotPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_blenderbot import ( FlaxBlenderbotForConditionalGeneration, FlaxBlenderbotModel, FlaxBlenderbotPreTrainedModel, ) else: import sys UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
677
0
from typing import Any, Dict, Optional import torch import torch.nn.functional as F from torch import nn from ..utils import maybe_allow_in_graph from .activations import get_activation from .attention_processor import Attention from .embeddings import CombinedTimestepLabelEmbeddings @maybe_allow_in_graph class lowerCamelCase_ ( nn.Module ): def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=0.0 , __lowerCAmelCase = None , __lowerCAmelCase = "geglu" , __lowerCAmelCase = None , __lowerCAmelCase = False , __lowerCAmelCase = False , __lowerCAmelCase = False , __lowerCAmelCase = False , __lowerCAmelCase = True , __lowerCAmelCase = "layer_norm" , __lowerCAmelCase = False , ): """simple docstring""" super().__init__() __magic_name__ :Any = only_cross_attention __magic_name__ :Dict = (num_embeds_ada_norm is not None) and norm_type == '''ada_norm_zero''' __magic_name__ :Dict = (num_embeds_ada_norm is not None) and norm_type == '''ada_norm''' if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None: raise ValueError( F'''`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to''' F''' define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}.''' ) # Define 3 blocks. Each block has its own normalization layer. # 1. Self-Attn if self.use_ada_layer_norm: __magic_name__ :Any = AdaLayerNorm(snake_case__ , snake_case__ ) elif self.use_ada_layer_norm_zero: __magic_name__ :Tuple = AdaLayerNormZero(snake_case__ , snake_case__ ) else: __magic_name__ :str = nn.LayerNorm(snake_case__ , elementwise_affine=snake_case__ ) __magic_name__ :Any = Attention( query_dim=snake_case__ , heads=snake_case__ , dim_head=snake_case__ , dropout=snake_case__ , bias=snake_case__ , cross_attention_dim=cross_attention_dim if only_cross_attention else None , upcast_attention=snake_case__ , ) # 2. Cross-Attn if cross_attention_dim is not None or double_self_attention: # We currently only use AdaLayerNormZero for self attention where there will only be one attention block. # I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during # the second cross attention block. __magic_name__ :Dict = ( AdaLayerNorm(snake_case__ , snake_case__ ) if self.use_ada_layer_norm else nn.LayerNorm(snake_case__ , elementwise_affine=snake_case__ ) ) __magic_name__ :Dict = Attention( query_dim=snake_case__ , cross_attention_dim=cross_attention_dim if not double_self_attention else None , heads=snake_case__ , dim_head=snake_case__ , dropout=snake_case__ , bias=snake_case__ , upcast_attention=snake_case__ , ) # is self-attn if encoder_hidden_states is none else: __magic_name__ :List[str] = None __magic_name__ :int = None # 3. Feed-forward __magic_name__ :Dict = nn.LayerNorm(snake_case__ , elementwise_affine=snake_case__ ) __magic_name__ :int = FeedForward(snake_case__ , dropout=snake_case__ , activation_fn=snake_case__ , final_dropout=snake_case__ ) # let chunk size default to None __magic_name__ :str = None __magic_name__ :Any = 0 def A ( self , __lowerCAmelCase , __lowerCAmelCase ): """simple docstring""" __magic_name__ :int = chunk_size __magic_name__ :Tuple = dim def A ( self , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , ): """simple docstring""" if self.use_ada_layer_norm: __magic_name__ :int = self.norma(snake_case__ , snake_case__ ) elif self.use_ada_layer_norm_zero: __magic_name__ :Union[str, Any] = self.norma( snake_case__ , snake_case__ , snake_case__ , hidden_dtype=hidden_states.dtype ) else: __magic_name__ :str = self.norma(snake_case__ ) __magic_name__ :List[str] = cross_attention_kwargs if cross_attention_kwargs is not None else {} __magic_name__ :List[Any] = self.attna( snake_case__ , encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None , attention_mask=snake_case__ , **snake_case__ , ) if self.use_ada_layer_norm_zero: __magic_name__ :Any = gate_msa.unsqueeze(1 ) * attn_output __magic_name__ :int = attn_output + hidden_states # 2. Cross-Attention if self.attna is not None: __magic_name__ :int = ( self.norma(snake_case__ , snake_case__ ) if self.use_ada_layer_norm else self.norma(snake_case__ ) ) __magic_name__ :List[str] = self.attna( snake_case__ , encoder_hidden_states=snake_case__ , attention_mask=snake_case__ , **snake_case__ , ) __magic_name__ :Optional[int] = attn_output + hidden_states # 3. Feed-forward __magic_name__ :List[Any] = self.norma(snake_case__ ) if self.use_ada_layer_norm_zero: __magic_name__ :List[str] = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None] if self._chunk_size is not None: # "feed_forward_chunk_size" can be used to save memory if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0: raise ValueError( F'''`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`.''' ) __magic_name__ :Any = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size __magic_name__ :Tuple = torch.cat( [self.ff(snake_case__ ) for hid_slice in norm_hidden_states.chunk(snake_case__ , dim=self._chunk_dim )] , dim=self._chunk_dim , ) else: __magic_name__ :str = self.ff(snake_case__ ) if self.use_ada_layer_norm_zero: __magic_name__ :List[Any] = gate_mlp.unsqueeze(1 ) * ff_output __magic_name__ :List[str] = ff_output + hidden_states return hidden_states class lowerCamelCase_ ( nn.Module ): def __init__( self , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = 4 , __lowerCAmelCase = 0.0 , __lowerCAmelCase = "geglu" , __lowerCAmelCase = False , ): """simple docstring""" super().__init__() __magic_name__ :List[Any] = int(dim * mult ) __magic_name__ :Optional[Any] = dim_out if dim_out is not None else dim if activation_fn == "gelu": __magic_name__ :Optional[int] = GELU(snake_case__ , snake_case__ ) if activation_fn == "gelu-approximate": __magic_name__ :List[Any] = GELU(snake_case__ , snake_case__ , approximate='''tanh''' ) elif activation_fn == "geglu": __magic_name__ :Optional[int] = GEGLU(snake_case__ , snake_case__ ) elif activation_fn == "geglu-approximate": __magic_name__ :List[str] = ApproximateGELU(snake_case__ , snake_case__ ) __magic_name__ :Any = nn.ModuleList([] ) # project in self.net.append(snake_case__ ) # project dropout self.net.append(nn.Dropout(snake_case__ ) ) # project out self.net.append(nn.Linear(snake_case__ , snake_case__ ) ) # FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout if final_dropout: self.net.append(nn.Dropout(snake_case__ ) ) def A ( self , __lowerCAmelCase ): """simple docstring""" for module in self.net: __magic_name__ :List[str] = module(snake_case__ ) return hidden_states class lowerCamelCase_ ( nn.Module ): def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = "none" ): """simple docstring""" super().__init__() __magic_name__ :Any = nn.Linear(snake_case__ , snake_case__ ) __magic_name__ :Optional[Any] = approximate def A ( self , __lowerCAmelCase ): """simple docstring""" if gate.device.type != "mps": return F.gelu(snake_case__ , approximate=self.approximate ) # mps: gelu is not implemented for float16 return F.gelu(gate.to(dtype=torch.floataa ) , approximate=self.approximate ).to(dtype=gate.dtype ) def A ( self , __lowerCAmelCase ): """simple docstring""" __magic_name__ :List[Any] = self.proj(snake_case__ ) __magic_name__ :Optional[int] = self.gelu(snake_case__ ) return hidden_states class lowerCamelCase_ ( nn.Module ): def __init__( self , __lowerCAmelCase , __lowerCAmelCase ): """simple docstring""" super().__init__() __magic_name__ :Union[str, Any] = nn.Linear(snake_case__ , dim_out * 2 ) def A ( self , __lowerCAmelCase ): """simple docstring""" if gate.device.type != "mps": return F.gelu(snake_case__ ) # mps: gelu is not implemented for float16 return F.gelu(gate.to(dtype=torch.floataa ) ).to(dtype=gate.dtype ) def A ( self , __lowerCAmelCase ): """simple docstring""" __magic_name__ :Dict = self.proj(snake_case__ ).chunk(2 , dim=-1 ) return hidden_states * self.gelu(snake_case__ ) class lowerCamelCase_ ( nn.Module ): def __init__( self , __lowerCAmelCase , __lowerCAmelCase ): """simple docstring""" super().__init__() __magic_name__ :Union[str, Any] = nn.Linear(snake_case__ , snake_case__ ) def A ( self , __lowerCAmelCase ): """simple docstring""" __magic_name__ :List[str] = self.proj(snake_case__ ) return x * torch.sigmoid(1.702 * x ) class lowerCamelCase_ ( nn.Module ): def __init__( self , __lowerCAmelCase , __lowerCAmelCase ): """simple docstring""" super().__init__() __magic_name__ :Optional[Any] = nn.Embedding(snake_case__ , snake_case__ ) __magic_name__ :str = nn.SiLU() __magic_name__ :Tuple = nn.Linear(snake_case__ , embedding_dim * 2 ) __magic_name__ :str = nn.LayerNorm(snake_case__ , elementwise_affine=snake_case__ ) def A ( self , __lowerCAmelCase , __lowerCAmelCase ): """simple docstring""" __magic_name__ :Tuple = self.linear(self.silu(self.emb(snake_case__ ) ) ) __magic_name__ :Any = torch.chunk(snake_case__ , 2 ) __magic_name__ :int = self.norm(snake_case__ ) * (1 + scale) + shift return x class lowerCamelCase_ ( nn.Module ): def __init__( self , __lowerCAmelCase , __lowerCAmelCase ): """simple docstring""" super().__init__() __magic_name__ :Any = CombinedTimestepLabelEmbeddings(snake_case__ , snake_case__ ) __magic_name__ :List[Any] = nn.SiLU() __magic_name__ :Tuple = nn.Linear(snake_case__ , 6 * embedding_dim , bias=snake_case__ ) __magic_name__ :Dict = nn.LayerNorm(snake_case__ , elementwise_affine=snake_case__ , eps=1E-6 ) def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None ): """simple docstring""" __magic_name__ :Union[str, Any] = self.linear(self.silu(self.emb(snake_case__ , snake_case__ , hidden_dtype=snake_case__ ) ) ) __magic_name__ :List[Any] = emb.chunk(6 , dim=1 ) __magic_name__ :Any = self.norm(snake_case__ ) * (1 + scale_msa[:, None]) + shift_msa[:, None] return x, gate_msa, shift_mlp, scale_mlp, gate_mlp class lowerCamelCase_ ( nn.Module ): def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = 1E-5 ): """simple docstring""" super().__init__() __magic_name__ :str = num_groups __magic_name__ :int = eps if act_fn is None: __magic_name__ :List[Any] = None else: __magic_name__ :Optional[int] = get_activation(snake_case__ ) __magic_name__ :Optional[Any] = nn.Linear(snake_case__ , out_dim * 2 ) def A ( self , __lowerCAmelCase , __lowerCAmelCase ): """simple docstring""" if self.act: __magic_name__ :Optional[Any] = self.act(snake_case__ ) __magic_name__ :Optional[Any] = self.linear(snake_case__ ) __magic_name__ :Optional[int] = emb[:, :, None, None] __magic_name__ :Any = emb.chunk(2 , dim=1 ) __magic_name__ :str = F.group_norm(snake_case__ , self.num_groups , eps=self.eps ) __magic_name__ :str = x * (1 + scale) + shift return x
0
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase = logging.get_logger(__name__) UpperCAmelCase = { '''naver-clova-ix/donut-base''': '''https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json''', # See all Donut models at https://huggingface.co/models?filter=donut-swin } class __magic_name__ ( __UpperCAmelCase ): __A : Tuple = "donut-swin" __A : Optional[Any] = { "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers", } def __init__( self : List[str] , snake_case__ : Any=2_2_4 , snake_case__ : Tuple=4 , snake_case__ : str=3 , snake_case__ : Dict=9_6 , snake_case__ : Optional[Any]=[2, 2, 6, 2] , snake_case__ : Any=[3, 6, 1_2, 2_4] , snake_case__ : List[str]=7 , snake_case__ : Dict=4.0 , snake_case__ : str=True , snake_case__ : Optional[int]=0.0 , snake_case__ : Tuple=0.0 , snake_case__ : Any=0.1 , snake_case__ : List[str]="gelu" , snake_case__ : Tuple=False , snake_case__ : int=0.02 , snake_case__ : Optional[Any]=1e-5 , **snake_case__ : Any , ): '''simple docstring''' super().__init__(**snake_case__ ) lowercase :Union[str, Any] = image_size lowercase :Optional[Any] = patch_size lowercase :List[str] = num_channels lowercase :Optional[int] = embed_dim lowercase :Optional[Any] = depths lowercase :List[Any] = len(snake_case__ ) lowercase :Optional[Any] = num_heads lowercase :int = window_size lowercase :str = mlp_ratio lowercase :Optional[int] = qkv_bias lowercase :Dict = hidden_dropout_prob lowercase :Any = attention_probs_dropout_prob lowercase :Any = drop_path_rate lowercase :int = hidden_act lowercase :int = use_absolute_embeddings lowercase :List[str] = layer_norm_eps lowercase :Union[str, Any] = initializer_range # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model lowercase :str = int(embed_dim * 2 ** (len(snake_case__ ) - 1) )
677
0
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer from .base import PipelineTool class __A ( __UpperCAmelCase ): '''simple docstring''' a_ = "philschmid/bart-large-cnn-samsum" a_ = ( "This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, " "and returns a summary of the text." ) a_ = "summarizer" a_ = AutoTokenizer a_ = AutoModelForSeqaSeqLM a_ = ["text"] a_ = ["text"] def SCREAMING_SNAKE_CASE__ ( self , _snake_case ): return self.pre_processor(snake_case__ , return_tensors="pt" , truncation=snake_case__ ) def SCREAMING_SNAKE_CASE__ ( self , _snake_case ): return self.model.generate(**snake_case__ )[0] def SCREAMING_SNAKE_CASE__ ( self , _snake_case ): return self.pre_processor.decode(snake_case__ , skip_special_tokens=snake_case__ , clean_up_tokenization_spaces=snake_case__ )
424
"""simple docstring""" import argparse import os import shutil from pathlib import Path import onnx import torch from packaging import version from torch.onnx import export from diffusers import OnnxRuntimeModel, OnnxStableDiffusionPipeline, StableDiffusionPipeline UpperCAmelCase = version.parse(version.parse(torch.__version__).base_version) < version.parse('''1.11''') def lowerCamelCase (a_ :Optional[int] , a_ :tuple , a_ :Path , a_ :str , a_ :int , a_ :List[Any] , a_ :Any , a_ :Union[str, Any]=False , ) -> Dict: output_path.parent.mkdir(parents=a_ , exist_ok=a_) # PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11, # so we check the torch version for backwards compatibility if is_torch_less_than_1_11: export( a_ , a_ , f=output_path.as_posix() , input_names=a_ , output_names=a_ , dynamic_axes=a_ , do_constant_folding=a_ , use_external_data_format=a_ , enable_onnx_checker=a_ , opset_version=a_ , ) else: export( a_ , a_ , f=output_path.as_posix() , input_names=a_ , output_names=a_ , dynamic_axes=a_ , do_constant_folding=a_ , opset_version=a_ , ) @torch.no_grad() def lowerCamelCase (a_ :str , a_ :str , a_ :int , a_ :bool = False) -> Union[str, Any]: lowercase :Any = torch.floataa if fpaa else torch.floataa if fpaa and torch.cuda.is_available(): lowercase :Union[str, Any] = '''cuda''' elif fpaa and not torch.cuda.is_available(): raise ValueError('''`float16` model export is only supported on GPUs with CUDA''') else: lowercase :List[str] = '''cpu''' lowercase :List[str] = StableDiffusionPipeline.from_pretrained(a_ , torch_dtype=a_).to(a_) lowercase :List[Any] = Path(a_) # TEXT ENCODER lowercase :List[Any] = pipeline.text_encoder.config.max_position_embeddings lowercase :Dict = pipeline.text_encoder.config.hidden_size lowercase :Union[str, Any] = pipeline.tokenizer( '''A sample prompt''' , padding='''max_length''' , max_length=pipeline.tokenizer.model_max_length , truncation=a_ , return_tensors='''pt''' , ) onnx_export( pipeline.text_encoder , model_args=(text_input.input_ids.to(device=a_ , dtype=torch.intaa)) , output_path=output_path / '''text_encoder''' / '''model.onnx''' , ordered_input_names=['''input_ids'''] , output_names=['''last_hidden_state''', '''pooler_output'''] , dynamic_axes={ '''input_ids''': {0: '''batch''', 1: '''sequence'''}, } , opset=a_ , ) del pipeline.text_encoder # UNET lowercase :Any = pipeline.unet.config.in_channels lowercase :List[Any] = pipeline.unet.config.sample_size lowercase :Optional[int] = output_path / '''unet''' / '''model.onnx''' onnx_export( pipeline.unet , model_args=( torch.randn(2 , a_ , a_ , a_).to(device=a_ , dtype=a_), torch.randn(2).to(device=a_ , dtype=a_), torch.randn(2 , a_ , a_).to(device=a_ , dtype=a_), False, ) , output_path=a_ , ordered_input_names=['''sample''', '''timestep''', '''encoder_hidden_states''', '''return_dict'''] , output_names=['''out_sample'''] , dynamic_axes={ '''sample''': {0: '''batch''', 1: '''channels''', 2: '''height''', 3: '''width'''}, '''timestep''': {0: '''batch'''}, '''encoder_hidden_states''': {0: '''batch''', 1: '''sequence'''}, } , opset=a_ , use_external_data_format=a_ , ) lowercase :List[Any] = str(unet_path.absolute().as_posix()) lowercase :str = os.path.dirname(a_) lowercase :Optional[Any] = onnx.load(a_) # clean up existing tensor files shutil.rmtree(a_) os.mkdir(a_) # collate external tensor files into one onnx.save_model( a_ , a_ , save_as_external_data=a_ , all_tensors_to_one_file=a_ , location='''weights.pb''' , convert_attribute=a_ , ) del pipeline.unet # VAE ENCODER lowercase :Tuple = pipeline.vae lowercase :Optional[Any] = vae_encoder.config.in_channels lowercase :Any = vae_encoder.config.sample_size # need to get the raw tensor output (sample) from the encoder lowercase :Any = lambda a_ , a_: vae_encoder.encode(a_ , a_)[0].sample() onnx_export( a_ , model_args=( torch.randn(1 , a_ , a_ , a_).to(device=a_ , dtype=a_), False, ) , output_path=output_path / '''vae_encoder''' / '''model.onnx''' , ordered_input_names=['''sample''', '''return_dict'''] , output_names=['''latent_sample'''] , dynamic_axes={ '''sample''': {0: '''batch''', 1: '''channels''', 2: '''height''', 3: '''width'''}, } , opset=a_ , ) # VAE DECODER lowercase :Any = pipeline.vae lowercase :Dict = vae_decoder.config.latent_channels lowercase :Union[str, Any] = vae_decoder.config.out_channels # forward only through the decoder part lowercase :List[Any] = vae_encoder.decode onnx_export( a_ , model_args=( torch.randn(1 , a_ , a_ , a_).to(device=a_ , dtype=a_), False, ) , output_path=output_path / '''vae_decoder''' / '''model.onnx''' , ordered_input_names=['''latent_sample''', '''return_dict'''] , output_names=['''sample'''] , dynamic_axes={ '''latent_sample''': {0: '''batch''', 1: '''channels''', 2: '''height''', 3: '''width'''}, } , opset=a_ , ) del pipeline.vae # SAFETY CHECKER if pipeline.safety_checker is not None: lowercase :Dict = pipeline.safety_checker lowercase :str = safety_checker.config.vision_config.num_channels lowercase :str = safety_checker.config.vision_config.image_size lowercase :List[str] = safety_checker.forward_onnx onnx_export( pipeline.safety_checker , model_args=( torch.randn( 1 , a_ , a_ , a_ , ).to(device=a_ , dtype=a_), torch.randn(1 , a_ , a_ , a_).to(device=a_ , dtype=a_), ) , output_path=output_path / '''safety_checker''' / '''model.onnx''' , ordered_input_names=['''clip_input''', '''images'''] , output_names=['''out_images''', '''has_nsfw_concepts'''] , dynamic_axes={ '''clip_input''': {0: '''batch''', 1: '''channels''', 2: '''height''', 3: '''width'''}, '''images''': {0: '''batch''', 1: '''height''', 2: '''width''', 3: '''channels'''}, } , opset=a_ , ) del pipeline.safety_checker lowercase :Tuple = OnnxRuntimeModel.from_pretrained(output_path / '''safety_checker''') lowercase :Optional[Any] = pipeline.feature_extractor else: lowercase :int = None lowercase :Union[str, Any] = None lowercase :Optional[int] = OnnxStableDiffusionPipeline( vae_encoder=OnnxRuntimeModel.from_pretrained(output_path / '''vae_encoder''') , vae_decoder=OnnxRuntimeModel.from_pretrained(output_path / '''vae_decoder''') , text_encoder=OnnxRuntimeModel.from_pretrained(output_path / '''text_encoder''') , tokenizer=pipeline.tokenizer , unet=OnnxRuntimeModel.from_pretrained(output_path / '''unet''') , scheduler=pipeline.scheduler , safety_checker=a_ , feature_extractor=a_ , requires_safety_checker=safety_checker is not None , ) onnx_pipeline.save_pretrained(a_) print('''ONNX pipeline saved to''' , a_) del pipeline del onnx_pipeline lowercase :Tuple = OnnxStableDiffusionPipeline.from_pretrained(a_ , provider='''CPUExecutionProvider''') print('''ONNX pipeline is loadable''') if __name__ == "__main__": UpperCAmelCase = argparse.ArgumentParser() parser.add_argument( '''--model_path''', type=str, required=True, help='''Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).''', ) parser.add_argument('''--output_path''', type=str, required=True, help='''Path to the output model.''') parser.add_argument( '''--opset''', default=14, type=int, help='''The version of the ONNX operator set to use.''', ) parser.add_argument('''--fp16''', action='''store_true''', default=False, help='''Export the models in `float16` mode''') UpperCAmelCase = parser.parse_args() convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
677
0
'''simple docstring''' def UpperCamelCase_( snake_case : list ): '''simple docstring''' def merge(snake_case : list , snake_case : list ) -> list: def _merge(): while left and right: yield (left if left[0] <= right[0] else right).pop(0 ) yield from left yield from right return list(_merge() ) if len(a_ ) <= 1: return collection snake_case_ = len(a_ ) // 2 return merge(merge_sort(collection[:mid] ) , merge_sort(collection[mid:] ) ) if __name__ == "__main__": import doctest doctest.testmod() _SCREAMING_SNAKE_CASE : Union[str, Any] = input("Enter numbers separated by a comma:\n").strip() _SCREAMING_SNAKE_CASE : List[Any] = [int(item) for item in user_input.split(",")] print(*merge_sort(unsorted), sep=",")
400
"""simple docstring""" import itertools import json import linecache import os import pickle import re import socket import string from collections import Counter from logging import getLogger from pathlib import Path from typing import Callable, Dict, Iterable, List import git import torch from torch.utils.data import Dataset from transformers import BartTokenizer, RagTokenizer, TaTokenizer def lowerCamelCase (a_ :List[Any] , a_ :Union[str, Any] , a_ :Tuple , a_ :List[str] , a_ :str=True , a_ :str="pt") -> List[str]: lowercase :Optional[int] = {'''add_prefix_space''': True} if isinstance(a_ , a_) and not line.startswith(''' ''') else {} lowercase :Optional[int] = padding_side return tokenizer( [line] , max_length=a_ , padding='''max_length''' if pad_to_max_length else None , truncation=a_ , return_tensors=a_ , add_special_tokens=a_ , **a_ , ) def lowerCamelCase (a_ :str , a_ :Tuple , a_ :Optional[Any]=None , ) -> Tuple: lowercase :Optional[Any] = input_ids.ne(a_).any(dim=0) if attention_mask is None: return input_ids[:, keep_column_mask] else: return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask]) class __magic_name__ ( __UpperCAmelCase ): def __init__( self : Union[str, Any] , snake_case__ : Union[str, Any] , snake_case__ : Optional[Any] , snake_case__ : Optional[Any] , snake_case__ : Optional[Any] , snake_case__ : str="train" , snake_case__ : Optional[Any]=None , snake_case__ : Tuple=None , snake_case__ : Any=None , snake_case__ : Dict="" , ): '''simple docstring''' super().__init__() lowercase :Tuple = Path(snake_case__ ).joinpath(type_path + '''.source''' ) lowercase :Union[str, Any] = Path(snake_case__ ).joinpath(type_path + '''.target''' ) lowercase :List[Any] = self.get_char_lens(self.src_file ) lowercase :Tuple = max_source_length lowercase :Optional[int] = max_target_length assert min(self.src_lens ) > 0, f"""found empty line in {self.src_file}""" lowercase :Any = tokenizer lowercase :Tuple = prefix if n_obs is not None: lowercase :List[str] = self.src_lens[:n_obs] lowercase :List[Any] = src_lang lowercase :str = tgt_lang def __len__( self : Any ): '''simple docstring''' return len(self.src_lens ) def __getitem__( self : str , snake_case__ : Any ): '''simple docstring''' lowercase :Optional[int] = index + 1 # linecache starts at 1 lowercase :Optional[Any] = self.prefix + linecache.getline(str(self.src_file ) , snake_case__ ).rstrip('''\n''' ) lowercase :Dict = linecache.getline(str(self.tgt_file ) , snake_case__ ).rstrip('''\n''' ) assert source_line, f"""empty source line for index {index}""" assert tgt_line, f"""empty tgt line for index {index}""" # Need to add eos token manually for T5 if isinstance(self.tokenizer , snake_case__ ): source_line += self.tokenizer.eos_token tgt_line += self.tokenizer.eos_token # Pad source and target to the right lowercase :Dict = ( self.tokenizer.question_encoder if isinstance(self.tokenizer , snake_case__ ) else self.tokenizer ) lowercase :Optional[int] = self.tokenizer.generator if isinstance(self.tokenizer , snake_case__ ) else self.tokenizer lowercase :Optional[int] = encode_line(snake_case__ , snake_case__ , self.max_source_length , '''right''' ) lowercase :Tuple = encode_line(snake_case__ , snake_case__ , self.max_target_length , '''right''' ) lowercase :List[str] = source_inputs['''input_ids'''].squeeze() lowercase :Optional[Any] = target_inputs['''input_ids'''].squeeze() lowercase :List[str] = source_inputs['''attention_mask'''].squeeze() return { "input_ids": source_ids, "attention_mask": src_mask, "decoder_input_ids": target_ids, } @staticmethod def __snake_case ( snake_case__ : Optional[int] ): '''simple docstring''' return [len(snake_case__ ) for x in Path(snake_case__ ).open().readlines()] def __snake_case ( self : Tuple , snake_case__ : Union[str, Any] ): '''simple docstring''' lowercase :Optional[Any] = torch.stack([x['''input_ids'''] for x in batch] ) lowercase :Tuple = torch.stack([x['''attention_mask'''] for x in batch] ) lowercase :Tuple = torch.stack([x['''decoder_input_ids'''] for x in batch] ) lowercase :str = ( self.tokenizer.generator.pad_token_id if isinstance(self.tokenizer , snake_case__ ) else self.tokenizer.pad_token_id ) lowercase :Optional[int] = ( self.tokenizer.question_encoder.pad_token_id if isinstance(self.tokenizer , snake_case__ ) else self.tokenizer.pad_token_id ) lowercase :List[Any] = trim_batch(snake_case__ , snake_case__ ) lowercase , lowercase :List[str] = trim_batch(snake_case__ , snake_case__ , attention_mask=snake_case__ ) lowercase :Optional[int] = { '''input_ids''': source_ids, '''attention_mask''': source_mask, '''decoder_input_ids''': y, } return batch UpperCAmelCase = getLogger(__name__) def lowerCamelCase (a_ :List[List]) -> Tuple: return list(itertools.chain.from_iterable(a_)) def lowerCamelCase (a_ :str) -> None: lowercase :List[str] = get_git_info() save_json(a_ , os.path.join(a_ , '''git_log.json''')) def lowerCamelCase (a_ :Optional[int] , a_ :Optional[int] , a_ :Optional[Any]=4 , **a_ :Optional[Any]) -> str: with open(a_ , '''w''') as f: json.dump(a_ , a_ , indent=a_ , **a_) def lowerCamelCase (a_ :Dict) -> Union[str, Any]: with open(a_) as f: return json.load(a_) def lowerCamelCase () -> List[str]: lowercase :Dict = git.Repo(search_parent_directories=a_) lowercase :int = { '''repo_id''': str(a_), '''repo_sha''': str(repo.head.object.hexsha), '''repo_branch''': str(repo.active_branch), '''hostname''': str(socket.gethostname()), } return repo_infos def lowerCamelCase (a_ :Callable , a_ :Iterable) -> List: return list(map(a_ , a_)) def lowerCamelCase (a_ :Optional[Any] , a_ :str) -> Any: with open(a_ , '''wb''') as f: return pickle.dump(a_ , a_) def lowerCamelCase (a_ :List[str]) -> List[str]: def remove_articles(a_ :Union[str, Any]): return re.sub(R'''\b(a|an|the)\b''' , ''' ''' , a_) def white_space_fix(a_ :Tuple): return " ".join(text.split()) def remove_punc(a_ :int): lowercase :List[Any] = set(string.punctuation) return "".join(ch for ch in text if ch not in exclude) def lower(a_ :int): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(a_)))) def lowerCamelCase (a_ :List[str] , a_ :Any) -> List[str]: lowercase :Dict = normalize_answer(a_).split() lowercase :int = normalize_answer(a_).split() lowercase :List[Any] = Counter(a_) & Counter(a_) lowercase :Optional[int] = sum(common.values()) if num_same == 0: return 0 lowercase :str = 1.0 * num_same / len(a_) lowercase :Tuple = 1.0 * num_same / len(a_) lowercase :Tuple = (2 * precision * recall) / (precision + recall) return fa def lowerCamelCase (a_ :Tuple , a_ :Optional[Any]) -> List[Any]: return normalize_answer(a_) == normalize_answer(a_) def lowerCamelCase (a_ :List[str] , a_ :List[str]) -> Dict: assert len(a_) == len(a_) lowercase :Any = 0 for hypo, pred in zip(a_ , a_): em += exact_match_score(a_ , a_) if len(a_) > 0: em /= len(a_) return {"em": em} def lowerCamelCase (a_ :Union[str, Any]) -> Optional[Any]: return model_prefix.startswith('''rag''') def lowerCamelCase (a_ :List[str] , a_ :Tuple , a_ :List[str]) -> Any: lowercase :List[str] = {p: p for p in extra_params} # T5 models don't have `dropout` param, they have `dropout_rate` instead lowercase :str = '''dropout_rate''' for p in extra_params: if getattr(a_ , a_ , a_): if not hasattr(a_ , a_) and not hasattr(a_ , equivalent_param[p]): logger.info('''config doesn\'t have a `{}` attribute'''.format(a_)) delattr(a_ , a_) continue lowercase :List[str] = p if hasattr(a_ , a_) else equivalent_param[p] setattr(a_ , a_ , getattr(a_ , a_)) delattr(a_ , a_) return hparams, config
677
0
'''simple docstring''' import os import time import numpy as np import onnxruntime as ort lowercase__ ='1' lowercase__ ='0' lowercase__ ='1' lowercase__ =ort.SessionOptions() lowercase__ =ort.GraphOptimizationLevel.ORT_DISABLE_ALL print('Create inference session...') lowercase__ =['TensorrtExecutionProvider', 'CUDAExecutionProvider'] lowercase__ =ort.InferenceSession('model.onnx', sess_options=sess_opt, providers=execution_provider) lowercase__ =ort.RunOptions() lowercase__ =1_28 lowercase__ =1 lowercase__ =np.ones((batch, sequence), dtype=np.intaa) lowercase__ =np.ones((batch, sequence), dtype=np.intaa) lowercase__ =np.ones((batch, sequence), dtype=np.intaa) print('Warm up phase...') sess.run( None, { sess.get_inputs()[0].name: input_ids, sess.get_inputs()[1].name: attention_mask, sess.get_inputs()[2].name: token_type_ids, }, run_options=run_opt, ) print('Start inference...') lowercase__ =time.time() lowercase__ =20_00 lowercase__ ={} for iter in range(max_iters): lowercase__ =sess.run( None, { sess.get_inputs()[0].name: input_ids, sess.get_inputs()[1].name: attention_mask, sess.get_inputs()[2].name: token_type_ids, }, run_options=run_opt, ) print('Average Inference Time = {:.3f} ms'.format((time.time() - start_time) * 10_00 / max_iters))
263
"""simple docstring""" def lowerCamelCase (a_ :Tuple , a_ :int , a_ :Tuple , a_ :List[Any]) -> str: if height >= 1: move_tower(height - 1 , a_ , a_ , a_) move_disk(a_ , a_) move_tower(height - 1 , a_ , a_ , a_) def lowerCamelCase (a_ :int , a_ :Union[str, Any]) -> str: print('''moving disk from''' , a_ , '''to''' , a_) def lowerCamelCase () -> Tuple: lowercase :int = int(input('''Height of hanoi: ''').strip()) move_tower(a_ , '''A''' , '''B''' , '''C''') if __name__ == "__main__": main()
677
0
"""simple docstring""" def lowerCamelCase__ ( __snake_case ) -> str: """simple docstring""" return " ".join( ''''''.join(word[::-1] ) if len(a_ ) > 4 else word for word in sentence.split() ) if __name__ == "__main__": import doctest doctest.testmod() print(reverse_long_words("""Hey wollef sroirraw"""))
19
"""simple docstring""" from sklearn.metrics import mean_squared_error import datasets UpperCAmelCase = '''\ @article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011} } ''' UpperCAmelCase = '''\ Mean Squared Error(MSE) is the average of the square of difference between the predicted and actual values. ''' UpperCAmelCase = ''' Args: predictions: array-like of shape (n_samples,) or (n_samples, n_outputs) Estimated target values. references: array-like of shape (n_samples,) or (n_samples, n_outputs) Ground truth (correct) target values. sample_weight: array-like of shape (n_samples,), default=None Sample weights. multioutput: {"raw_values", "uniform_average"} or array-like of shape (n_outputs,), default="uniform_average" Defines aggregating of multiple output values. Array-like value defines weights used to average errors. "raw_values" : Returns a full set of errors in case of multioutput input. "uniform_average" : Errors of all outputs are averaged with uniform weight. squared : bool, default=True If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value. Returns: mse : mean squared error. Examples: >>> mse_metric = datasets.load_metric("mse") >>> predictions = [2.5, 0.0, 2, 8] >>> references = [3, -0.5, 2, 7] >>> results = mse_metric.compute(predictions=predictions, references=references) >>> print(results) {\'mse\': 0.375} >>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False) >>> print(rmse_result) {\'mse\': 0.6123724356957945} If you\'re using multi-dimensional lists, then set the config as follows : >>> mse_metric = datasets.load_metric("mse", "multilist") >>> predictions = [[0.5, 1], [-1, 1], [7, -6]] >>> references = [[0, 2], [-1, 2], [8, -5]] >>> results = mse_metric.compute(predictions=predictions, references=references) >>> print(results) {\'mse\': 0.7083333333333334} >>> results = mse_metric.compute(predictions=predictions, references=references, multioutput=\'raw_values\') >>> print(results) # doctest: +NORMALIZE_WHITESPACE {\'mse\': array([0.41666667, 1. ])} ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __magic_name__ ( datasets.Metric ): def __snake_case ( self : int ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[ '''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html''' ] , ) def __snake_case ( self : Optional[Any] ): '''simple docstring''' if self.config_name == "multilist": return { "predictions": datasets.Sequence(datasets.Value('''float''' ) ), "references": datasets.Sequence(datasets.Value('''float''' ) ), } else: return { "predictions": datasets.Value('''float''' ), "references": datasets.Value('''float''' ), } def __snake_case ( self : List[Any] , snake_case__ : str , snake_case__ : int , snake_case__ : str=None , snake_case__ : List[Any]="uniform_average" , snake_case__ : Dict=True ): '''simple docstring''' lowercase :Dict = mean_squared_error( snake_case__ , snake_case__ , sample_weight=snake_case__ , multioutput=snake_case__ , squared=snake_case__ ) return {"mse": mse}
677
0
"""simple docstring""" from ..utils import DummyObject, requires_backends class UpperCAmelCase (metaclass=__UpperCAmelCase ): """simple docstring""" _UpperCAmelCase :str = ["torch", "torchsde"] def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ): requires_backends(self , ['''torch''', '''torchsde'''] ) @classmethod def _snake_case ( cls , *_UpperCAmelCase , **_UpperCAmelCase ): requires_backends(cls , ['''torch''', '''torchsde'''] ) @classmethod def _snake_case ( cls , *_UpperCAmelCase , **_UpperCAmelCase ): requires_backends(cls , ['''torch''', '''torchsde'''] )
586
"""simple docstring""" from abc import ABC, abstractmethod from argparse import ArgumentParser class __magic_name__ ( __UpperCAmelCase ): @staticmethod @abstractmethod def __snake_case ( snake_case__ : ArgumentParser ): '''simple docstring''' raise NotImplementedError() @abstractmethod def __snake_case ( self : Optional[Any] ): '''simple docstring''' raise NotImplementedError()
677
0
def UpperCamelCase ( __lowerCamelCase : list[int] , __lowerCamelCase : list[int] ): snake_case : Optional[int] = len(a_ ) print("The following activities are selected:" ) # The first activity is always selected snake_case : Dict = 0 print(a_ , end="," ) # Consider rest of the activities for j in range(a_ ): # If this activity has start time greater than # or equal to the finish time of previously # selected activity, then select it if start[j] >= finish[i]: print(a_ , end="," ) snake_case : str = j if __name__ == "__main__": import doctest doctest.testmod() __lowerCamelCase = [1, 3, 0, 5, 8, 5] __lowerCamelCase = [2, 4, 6, 7, 9, 9] print_max_activities(start, finish)
204
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) UpperCAmelCase = { '''configuration_encodec''': [ '''ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''EncodecConfig''', ], '''feature_extraction_encodec''': ['''EncodecFeatureExtractor'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase = [ '''ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST''', '''EncodecModel''', '''EncodecPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_encodec import ( ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP, EncodecConfig, ) from .feature_extraction_encodec import EncodecFeatureExtractor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_encodec import ( ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST, EncodecModel, EncodecPreTrainedModel, ) else: import sys UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
677
0
import os import shutil from pathlib import Path from typing import Optional, Union import numpy as np from huggingface_hub import hf_hub_download from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging if is_onnx_available(): import onnxruntime as ort UpperCamelCase = logging.get_logger(__name__) UpperCamelCase = { "tensor(bool)": np.bool_, "tensor(int8)": np.inta, "tensor(uint8)": np.uinta, "tensor(int16)": np.intaa, "tensor(uint16)": np.uintaa, "tensor(int32)": np.intaa, "tensor(uint32)": np.uintaa, "tensor(int64)": np.intaa, "tensor(uint64)": np.uintaa, "tensor(float16)": np.floataa, "tensor(float)": np.floataa, "tensor(double)": np.floataa, } class lowerCAmelCase_ : """simple docstring""" def __init__( self :Union[str, Any] , lowerCamelCase__ :Optional[Any]=None , **lowerCamelCase__ :Dict ): logger.info("""`diffusers.OnnxRuntimeModel` is experimental and might change in the future.""" ) UpperCamelCase__ :Optional[int] = model UpperCamelCase__ :int = kwargs.get("""model_save_dir""" , snake_case__ ) UpperCamelCase__ :Dict = kwargs.get("""latest_model_name""" , snake_case__ ) def __call__( self :Optional[Any] , **lowerCamelCase__ :Union[str, Any] ): UpperCamelCase__ :Dict = {k: np.array(snake_case__ ) for k, v in kwargs.items()} return self.model.run(snake_case__ , snake_case__ ) @staticmethod def __a ( lowerCamelCase__ :Union[str, Path] , lowerCamelCase__ :Union[str, Any]=None , lowerCamelCase__ :Dict=None ): if provider is None: logger.info("""No onnxruntime provider specified, using CPUExecutionProvider""" ) UpperCamelCase__ :int = '''CPUExecutionProvider''' return ort.InferenceSession(snake_case__ , providers=[provider] , sess_options=snake_case__ ) def __a ( self :Dict , lowerCamelCase__ :Union[str, Path] , lowerCamelCase__ :Optional[str] = None , **lowerCamelCase__ :int ): UpperCamelCase__ :List[Any] = file_name if file_name is not None else ONNX_WEIGHTS_NAME UpperCamelCase__ :int = self.model_save_dir.joinpath(self.latest_model_name ) UpperCamelCase__ :int = Path(snake_case__ ).joinpath(snake_case__ ) try: shutil.copyfile(snake_case__ , snake_case__ ) except shutil.SameFileError: pass # copy external weights (for models >2GB) UpperCamelCase__ :Tuple = self.model_save_dir.joinpath(snake_case__ ) if src_path.exists(): UpperCamelCase__ :Optional[int] = Path(snake_case__ ).joinpath(snake_case__ ) try: shutil.copyfile(snake_case__ , snake_case__ ) except shutil.SameFileError: pass def __a ( self :Union[str, Any] , lowerCamelCase__ :Union[str, os.PathLike] , **lowerCamelCase__ :str , ): if os.path.isfile(snake_case__ ): logger.error(f"""Provided path ({save_directory}) should be a directory, not a file""" ) return os.makedirs(snake_case__ , exist_ok=snake_case__ ) # saving model weights/files self._save_pretrained(snake_case__ , **snake_case__ ) @classmethod def __a ( cls :Optional[Any] , lowerCamelCase__ :Union[str, Path] , lowerCamelCase__ :Optional[Union[bool, str, None]] = None , lowerCamelCase__ :Optional[Union[str, None]] = None , lowerCamelCase__ :bool = False , lowerCamelCase__ :Optional[str] = None , lowerCamelCase__ :Optional[str] = None , lowerCamelCase__ :Optional[str] = None , lowerCamelCase__ :Optional["ort.SessionOptions"] = None , **lowerCamelCase__ :Optional[Any] , ): UpperCamelCase__ :List[str] = file_name if file_name is not None else ONNX_WEIGHTS_NAME # load model from local directory if os.path.isdir(snake_case__ ): UpperCamelCase__ :Dict = OnnxRuntimeModel.load_model( os.path.join(snake_case__ , snake_case__ ) , provider=snake_case__ , sess_options=snake_case__ ) UpperCamelCase__ :str = Path(snake_case__ ) # load model from hub else: # download model UpperCamelCase__ :Optional[Any] = hf_hub_download( repo_id=snake_case__ , filename=snake_case__ , use_auth_token=snake_case__ , revision=snake_case__ , cache_dir=snake_case__ , force_download=snake_case__ , ) UpperCamelCase__ :str = Path(snake_case__ ).parent UpperCamelCase__ :str = Path(snake_case__ ).name UpperCamelCase__ :Tuple = OnnxRuntimeModel.load_model(snake_case__ , provider=snake_case__ , sess_options=snake_case__ ) return cls(model=snake_case__ , **snake_case__ ) @classmethod def __a ( cls :Tuple , lowerCamelCase__ :Union[str, Path] , lowerCamelCase__ :bool = True , lowerCamelCase__ :Optional[str] = None , lowerCamelCase__ :Optional[str] = None , **lowerCamelCase__ :Tuple , ): UpperCamelCase__ :List[str] = None if len(str(snake_case__ ).split("""@""" ) ) == 2: UpperCamelCase__ :List[Any] = model_id.split("""@""" ) return cls._from_pretrained( model_id=snake_case__ , revision=snake_case__ , cache_dir=snake_case__ , force_download=snake_case__ , use_auth_token=snake_case__ , **snake_case__ , )
45
"""simple docstring""" import inspect import unittest from transformers import RegNetConfig, is_flax_available from transformers.testing_utils import require_flax, slow from transformers.utils import cached_property, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor if is_flax_available(): import jax import jax.numpy as jnp from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class __magic_name__ ( unittest.TestCase ): def __init__( self : List[Any] , snake_case__ : Optional[int] , snake_case__ : List[str]=3 , snake_case__ : int=3_2 , snake_case__ : int=3 , snake_case__ : str=1_0 , snake_case__ : str=[1_0, 2_0, 3_0, 4_0] , snake_case__ : int=[1, 1, 2, 1] , snake_case__ : List[Any]=True , snake_case__ : Tuple=True , snake_case__ : Optional[Any]="relu" , snake_case__ : Optional[int]=3 , snake_case__ : Optional[Any]=None , ): '''simple docstring''' lowercase :Union[str, Any] = parent lowercase :Optional[Any] = batch_size lowercase :Dict = image_size lowercase :Any = num_channels lowercase :List[str] = embeddings_size lowercase :Union[str, Any] = hidden_sizes lowercase :Any = depths lowercase :Dict = is_training lowercase :Any = use_labels lowercase :Any = hidden_act lowercase :List[str] = num_labels lowercase :List[Any] = scope lowercase :int = len(snake_case__ ) def __snake_case ( self : Any ): '''simple docstring''' lowercase :Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowercase :Union[str, Any] = self.get_config() return config, pixel_values def __snake_case ( self : Dict ): '''simple docstring''' return RegNetConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , ) def __snake_case ( self : str , snake_case__ : Tuple , snake_case__ : List[Any] ): '''simple docstring''' lowercase :Any = FlaxRegNetModel(config=snake_case__ ) lowercase :str = model(snake_case__ ) # Output shape (b, c, h, w) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , ) def __snake_case ( self : List[str] , snake_case__ : Optional[int] , snake_case__ : str ): '''simple docstring''' lowercase :Tuple = self.num_labels lowercase :str = FlaxRegNetForImageClassification(config=snake_case__ ) lowercase :Union[str, Any] = model(snake_case__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __snake_case ( self : str ): '''simple docstring''' lowercase :int = self.prepare_config_and_inputs() lowercase , lowercase :Tuple = config_and_inputs lowercase :Union[str, Any] = {'''pixel_values''': pixel_values} return config, inputs_dict @require_flax class __magic_name__ ( __UpperCAmelCase , unittest.TestCase ): __A : List[Any] = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else () __A : str = False __A : Tuple = False __A : Dict = False def __snake_case ( self : Optional[Any] ): '''simple docstring''' lowercase :Dict = FlaxRegNetModelTester(self ) lowercase :Tuple = ConfigTester(self , config_class=snake_case__ , has_text_modality=snake_case__ ) def __snake_case ( self : Union[str, Any] ): '''simple docstring''' self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def __snake_case ( self : List[Any] ): '''simple docstring''' return def __snake_case ( self : str ): '''simple docstring''' lowercase :Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case__ ) def __snake_case ( self : List[str] ): '''simple docstring''' lowercase :Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*snake_case__ ) @unittest.skip(reason='''RegNet does not use inputs_embeds''' ) def __snake_case ( self : Tuple ): '''simple docstring''' pass @unittest.skip(reason='''RegNet does not support input and output embeddings''' ) def __snake_case ( self : List[Any] ): '''simple docstring''' pass def __snake_case ( self : List[Any] ): '''simple docstring''' lowercase , lowercase :Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase :Union[str, Any] = model_class(snake_case__ ) lowercase :int = inspect.signature(model.__call__ ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowercase :Tuple = [*signature.parameters.keys()] lowercase :Tuple = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , snake_case__ ) def __snake_case ( self : Tuple ): '''simple docstring''' def check_hidden_states_output(snake_case__ : Union[str, Any] , snake_case__ : Optional[Any] , snake_case__ : Optional[int] ): lowercase :int = model_class(snake_case__ ) lowercase :Tuple = model(**self._prepare_for_class(snake_case__ , snake_case__ ) ) lowercase :Any = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states lowercase :Dict = self.model_tester.num_stages self.assertEqual(len(snake_case__ ) , expected_num_stages + 1 ) lowercase , lowercase :Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase :Optional[int] = True check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowercase :str = True check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ ) def __snake_case ( self : List[Any] ): '''simple docstring''' lowercase , lowercase :Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): lowercase :Optional[Any] = self._prepare_for_class(snake_case__ , snake_case__ ) lowercase :List[Any] = model_class(snake_case__ ) @jax.jit def model_jitted(snake_case__ : str , **snake_case__ : Optional[int] ): return model(pixel_values=snake_case__ , **snake_case__ ) with self.subTest('''JIT Enabled''' ): lowercase :Optional[int] = model_jitted(**snake_case__ ).to_tuple() with self.subTest('''JIT Disabled''' ): with jax.disable_jit(): lowercase :Optional[int] = model_jitted(**snake_case__ ).to_tuple() self.assertEqual(len(snake_case__ ) , len(snake_case__ ) ) for jitted_output, output in zip(snake_case__ , snake_case__ ): self.assertEqual(jitted_output.shape , output.shape ) def lowerCamelCase () -> Tuple: lowercase :Tuple = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''') return image @require_flax class __magic_name__ ( unittest.TestCase ): @cached_property def __snake_case ( self : int ): '''simple docstring''' return AutoImageProcessor.from_pretrained('''facebook/regnet-y-040''' ) if is_vision_available() else None @slow def __snake_case ( self : List[str] ): '''simple docstring''' lowercase :int = FlaxRegNetForImageClassification.from_pretrained('''facebook/regnet-y-040''' ) lowercase :Optional[Any] = self.default_image_processor lowercase :Dict = prepare_img() lowercase :Any = image_processor(images=snake_case__ , return_tensors='''np''' ) lowercase :List[str] = model(**snake_case__ ) # verify the logits lowercase :Any = (1, 1_0_0_0) self.assertEqual(outputs.logits.shape , snake_case__ ) lowercase :List[Any] = jnp.array([-0.41_80, -1.50_51, -3.48_36] ) self.assertTrue(jnp.allclose(outputs.logits[0, :3] , snake_case__ , atol=1e-4 ) )
677
0
"""simple docstring""" import argparse import os import re # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_dummies.py lowercase_ = 'src/diffusers' # Matches is_xxx_available() lowercase_ = re.compile(r'is\_([a-z_]*)_available\(\)') # Matches from xxx import bla lowercase_ = re.compile(r'\s+from\s+\S*\s+import\s+([^\(\s].*)\n') lowercase_ = '\n{0} = None\n' lowercase_ = '\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, {1})\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, {1})\n' lowercase_ = '\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n' def UpperCAmelCase ( _lowercase : Dict ) -> Optional[int]: """simple docstring""" lowerCAmelCase_ = _re_backend.findall(a_ ) if len(a_ ) == 0: return None return "_and_".join(a_ ) def UpperCAmelCase ( ) -> List[Any]: """simple docstring""" with open(os.path.join(a_ , '''__init__.py''' ) , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: lowerCAmelCase_ = f.readlines() # Get to the point we do the actual imports for type checking lowerCAmelCase_ = 0 lowerCAmelCase_ = {} # Go through the end of the file while line_index < len(a_ ): # If the line contains is_backend_available, we grab all objects associated with the `else` block lowerCAmelCase_ = find_backend(lines[line_index] ) if backend is not None: while not lines[line_index].startswith('''else:''' ): line_index += 1 line_index += 1 lowerCAmelCase_ = [] # Until we unindent, add backend objects to the list while line_index < len(a_ ) and len(lines[line_index] ) > 1: lowerCAmelCase_ = lines[line_index] lowerCAmelCase_ = _re_single_line_import.search(a_ ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(''', ''' ) ) elif line.startswith(''' ''' * 8 ): objects.append(line[8:-2] ) line_index += 1 if len(a_ ) > 0: lowerCAmelCase_ = objects else: line_index += 1 return backend_specific_objects def UpperCAmelCase ( _lowercase : Tuple , _lowercase : Any ) -> Union[str, Any]: """simple docstring""" if name.isupper(): return DUMMY_CONSTANT.format(a_ ) elif name.islower(): return DUMMY_FUNCTION.format(a_ , a_ ) else: return DUMMY_CLASS.format(a_ , a_ ) def UpperCAmelCase ( _lowercase : Optional[Any]=None ) -> int: """simple docstring""" if backend_specific_objects is None: lowerCAmelCase_ = read_init() # For special correspondence backend to module name as used in the function requires_modulename lowerCAmelCase_ = {} for backend, objects in backend_specific_objects.items(): lowerCAmelCase_ = '''[''' + ''', '''.join(F"""\"{b}\"""" for b in backend.split('''_and_''' ) ) + ''']''' lowerCAmelCase_ = '''# This file is autogenerated by the command `make fix-copies`, do not edit.\n''' dummy_file += "from ..utils import DummyObject, requires_backends\n\n" dummy_file += "\n".join([create_dummy_object(a_ , a_ ) for o in objects] ) lowerCAmelCase_ = dummy_file return dummy_files def UpperCAmelCase ( _lowercase : Union[str, Any]=False ) -> int: """simple docstring""" lowerCAmelCase_ = create_dummy_files() # For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py lowerCAmelCase_ = {'''torch''': '''pt'''} # Locate actual dummy modules and read their content. lowerCAmelCase_ = os.path.join(a_ , '''utils''' ) lowerCAmelCase_ = { backend: os.path.join(a_ , F"""dummy_{short_names.get(a_ , a_ )}_objects.py""" ) for backend in dummy_files.keys() } lowerCAmelCase_ = {} for backend, file_path in dummy_file_paths.items(): if os.path.isfile(a_ ): with open(a_ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: lowerCAmelCase_ = f.read() else: lowerCAmelCase_ = '''''' for backend in dummy_files.keys(): if dummy_files[backend] != actual_dummies[backend]: if overwrite: print( F"""Updating diffusers.utils.dummy_{short_names.get(a_ , a_ )}_objects.py as the main """ '''__init__ has new objects.''' ) with open(dummy_file_paths[backend] , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f: f.write(dummy_files[backend] ) else: raise ValueError( '''The main __init__ has objects that are not present in ''' F"""diffusers.utils.dummy_{short_names.get(a_ , a_ )}_objects.py. Run `make fix-copies` """ '''to fix this.''' ) if __name__ == "__main__": lowercase_ = argparse.ArgumentParser() parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.') lowercase_ = parser.parse_args() check_dummies(args.fix_and_overwrite)
552
"""simple docstring""" UpperCAmelCase = { '''A''': ['''B''', '''C''', '''E'''], '''B''': ['''A''', '''D''', '''E'''], '''C''': ['''A''', '''F''', '''G'''], '''D''': ['''B'''], '''E''': ['''A''', '''B''', '''D'''], '''F''': ['''C'''], '''G''': ['''C'''], } def lowerCamelCase (a_ :dict , a_ :List[str] , a_ :Tuple) -> list[str]: lowercase :str = set() # keep track of all the paths to be checked lowercase :Dict = [[start]] # return path if start is goal if start == goal: return [start] # keeps looping until all possible paths have been checked while queue: # pop the first path from the queue lowercase :Optional[int] = queue.pop(0) # get the last node from the path lowercase :Any = path[-1] if node not in explored: lowercase :int = graph[node] # go through all neighbour nodes, construct a new path and # push it into the queue for neighbour in neighbours: lowercase :List[Any] = list(a_) new_path.append(a_) queue.append(a_) # return path if neighbour is goal if neighbour == goal: return new_path # mark node as explored explored.add(a_) # in case there's no path between the 2 nodes return [] def lowerCamelCase (a_ :dict , a_ :List[Any] , a_ :List[Any]) -> int: if not graph or start not in graph or target not in graph: return -1 if start == target: return 0 lowercase :List[str] = [start] lowercase :Optional[Any] = set(a_) # Keep tab on distances from `start` node. lowercase :Union[str, Any] = {start: 0, target: -1} while queue: lowercase :Union[str, Any] = queue.pop(0) if node == target: lowercase :Any = ( dist[node] if dist[target] == -1 else min(dist[target] , dist[node]) ) for adjacent in graph[node]: if adjacent not in visited: visited.add(a_) queue.append(a_) lowercase :Dict = dist[node] + 1 return dist[target] if __name__ == "__main__": print(bfs_shortest_path(demo_graph, '''G''', '''D''')) # returns ['G', 'C', 'A', 'B', 'D'] print(bfs_shortest_path_distance(demo_graph, '''G''', '''D''')) # returns 4
677
0
'''simple docstring''' import os import tempfile import unittest from transformers import FlaubertConfig, is_torch_available from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( FlaubertForMultipleChoice, FlaubertForQuestionAnswering, FlaubertForQuestionAnsweringSimple, FlaubertForSequenceClassification, FlaubertForTokenClassification, FlaubertModel, FlaubertWithLMHeadModel, ) from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST class lowercase_ ( __UpperCAmelCase ): """simple docstring""" def __init__( self : Optional[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Any=1_3 , __lowerCamelCase : Tuple=7 , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : int=True , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : Tuple=False , __lowerCamelCase : Dict=False , __lowerCamelCase : int=False , __lowerCamelCase : str=2 , __lowerCamelCase : int=9_9 , __lowerCamelCase : Optional[Any]=0 , __lowerCamelCase : int=3_2 , __lowerCamelCase : int=5 , __lowerCamelCase : Union[str, Any]=4 , __lowerCamelCase : List[str]=0.1 , __lowerCamelCase : List[str]=0.1 , __lowerCamelCase : Dict=5_1_2 , __lowerCamelCase : Optional[int]=1_2 , __lowerCamelCase : Optional[int]=2 , __lowerCamelCase : Tuple=0.0_2 , __lowerCamelCase : List[Any]=3 , __lowerCamelCase : Any=4 , __lowerCamelCase : Any="last" , __lowerCamelCase : Tuple=None , __lowerCamelCase : Union[str, Any]=None , ): """simple docstring""" _SCREAMING_SNAKE_CASE = parent _SCREAMING_SNAKE_CASE = batch_size _SCREAMING_SNAKE_CASE = seq_length _SCREAMING_SNAKE_CASE = is_training _SCREAMING_SNAKE_CASE = use_input_lengths _SCREAMING_SNAKE_CASE = use_token_type_ids _SCREAMING_SNAKE_CASE = use_labels _SCREAMING_SNAKE_CASE = gelu_activation _SCREAMING_SNAKE_CASE = sinusoidal_embeddings _SCREAMING_SNAKE_CASE = causal _SCREAMING_SNAKE_CASE = asm _SCREAMING_SNAKE_CASE = n_langs _SCREAMING_SNAKE_CASE = vocab_size _SCREAMING_SNAKE_CASE = n_special _SCREAMING_SNAKE_CASE = hidden_size _SCREAMING_SNAKE_CASE = num_hidden_layers _SCREAMING_SNAKE_CASE = num_attention_heads _SCREAMING_SNAKE_CASE = hidden_dropout_prob _SCREAMING_SNAKE_CASE = attention_probs_dropout_prob _SCREAMING_SNAKE_CASE = max_position_embeddings _SCREAMING_SNAKE_CASE = type_vocab_size _SCREAMING_SNAKE_CASE = type_sequence_label_size _SCREAMING_SNAKE_CASE = initializer_range _SCREAMING_SNAKE_CASE = num_labels _SCREAMING_SNAKE_CASE = num_choices _SCREAMING_SNAKE_CASE = summary_type _SCREAMING_SNAKE_CASE = use_proj _SCREAMING_SNAKE_CASE = scope def lowerCAmelCase_ ( self : List[str] ): """simple docstring""" _SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] ) _SCREAMING_SNAKE_CASE = None if self.use_input_lengths: _SCREAMING_SNAKE_CASE = ( ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2 ) # small variation of seq_length _SCREAMING_SNAKE_CASE = None if self.use_token_type_ids: _SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.n_langs ) _SCREAMING_SNAKE_CASE = None _SCREAMING_SNAKE_CASE = None _SCREAMING_SNAKE_CASE = None if self.use_labels: _SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , 2 ).float() _SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices ) _SCREAMING_SNAKE_CASE = self.get_config() return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def lowerCAmelCase_ ( self : Optional[Any] ): """simple docstring""" return FlaubertConfig( vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , ) def lowerCAmelCase_ ( self : Optional[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : int , __lowerCamelCase : Dict , __lowerCamelCase : List[str] , __lowerCamelCase : str , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[str] , __lowerCamelCase : List[Any] , __lowerCamelCase : str , ): """simple docstring""" _SCREAMING_SNAKE_CASE = FlaubertModel(config=snake_case__ ) model.to(snake_case__ ) model.eval() _SCREAMING_SNAKE_CASE = model(snake_case__ , lengths=snake_case__ , langs=snake_case__ ) _SCREAMING_SNAKE_CASE = model(snake_case__ , langs=snake_case__ ) _SCREAMING_SNAKE_CASE = model(snake_case__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCAmelCase_ ( self : List[str] , __lowerCamelCase : Tuple , __lowerCamelCase : Optional[int] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Any , __lowerCamelCase : List[Any] , __lowerCamelCase : int , __lowerCamelCase : Optional[int] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Union[str, Any] , ): """simple docstring""" _SCREAMING_SNAKE_CASE = FlaubertWithLMHeadModel(snake_case__ ) model.to(snake_case__ ) model.eval() _SCREAMING_SNAKE_CASE = model(snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCAmelCase_ ( self : str , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Dict , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : int , __lowerCamelCase : Dict , __lowerCamelCase : Any , __lowerCamelCase : int , __lowerCamelCase : Optional[int] , __lowerCamelCase : Tuple , ): """simple docstring""" _SCREAMING_SNAKE_CASE = FlaubertForQuestionAnsweringSimple(snake_case__ ) model.to(snake_case__ ) model.eval() _SCREAMING_SNAKE_CASE = model(snake_case__ ) _SCREAMING_SNAKE_CASE = model(snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowerCAmelCase_ ( self : str , __lowerCamelCase : int , __lowerCamelCase : List[str] , __lowerCamelCase : List[Any] , __lowerCamelCase : str , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : int , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[Any] , ): """simple docstring""" _SCREAMING_SNAKE_CASE = FlaubertForQuestionAnswering(snake_case__ ) model.to(snake_case__ ) model.eval() _SCREAMING_SNAKE_CASE = model(snake_case__ ) _SCREAMING_SNAKE_CASE = model( snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ , cls_index=snake_case__ , is_impossible=snake_case__ , p_mask=snake_case__ , ) _SCREAMING_SNAKE_CASE = model( snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ , cls_index=snake_case__ , is_impossible=snake_case__ , ) (_SCREAMING_SNAKE_CASE ) = result_with_labels.to_tuple() _SCREAMING_SNAKE_CASE = model(snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ ) (_SCREAMING_SNAKE_CASE ) = result_with_labels.to_tuple() self.parent.assertEqual(result_with_labels.loss.shape , () ) self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual( result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual( result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) ) def lowerCAmelCase_ ( self : int , __lowerCamelCase : Any , __lowerCamelCase : List[Any] , __lowerCamelCase : Any , __lowerCamelCase : str , __lowerCamelCase : int , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Tuple , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[int] , ): """simple docstring""" _SCREAMING_SNAKE_CASE = FlaubertForSequenceClassification(snake_case__ ) model.to(snake_case__ ) model.eval() _SCREAMING_SNAKE_CASE = model(snake_case__ ) _SCREAMING_SNAKE_CASE = model(snake_case__ , labels=snake_case__ ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def lowerCAmelCase_ ( self : List[Any] , __lowerCamelCase : Tuple , __lowerCamelCase : int , __lowerCamelCase : List[str] , __lowerCamelCase : str , __lowerCamelCase : List[str] , __lowerCamelCase : List[Any] , __lowerCamelCase : str , __lowerCamelCase : List[str] , __lowerCamelCase : str , ): """simple docstring""" _SCREAMING_SNAKE_CASE = self.num_labels _SCREAMING_SNAKE_CASE = FlaubertForTokenClassification(snake_case__ ) model.to(snake_case__ ) model.eval() _SCREAMING_SNAKE_CASE = model(snake_case__ , attention_mask=snake_case__ , labels=snake_case__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowerCAmelCase_ ( self : Dict , __lowerCamelCase : Optional[Any] , __lowerCamelCase : str , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[str] , __lowerCamelCase : List[Any] , __lowerCamelCase : int , __lowerCamelCase : Any , __lowerCamelCase : Dict , __lowerCamelCase : Tuple , ): """simple docstring""" _SCREAMING_SNAKE_CASE = self.num_choices _SCREAMING_SNAKE_CASE = FlaubertForMultipleChoice(config=snake_case__ ) model.to(snake_case__ ) model.eval() _SCREAMING_SNAKE_CASE = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _SCREAMING_SNAKE_CASE = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _SCREAMING_SNAKE_CASE = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() _SCREAMING_SNAKE_CASE = model( snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def lowerCAmelCase_ ( self : Dict ): """simple docstring""" _SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs() ( _SCREAMING_SNAKE_CASE ) = config_and_inputs _SCREAMING_SNAKE_CASE = { '''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''lengths''': input_lengths, '''attention_mask''': input_mask, } return config, inputs_dict @require_torch class lowercase_ ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ): """simple docstring""" lowerCamelCase_ = ( ( FlaubertModel, FlaubertWithLMHeadModel, FlaubertForQuestionAnswering, FlaubertForQuestionAnsweringSimple, FlaubertForSequenceClassification, FlaubertForTokenClassification, FlaubertForMultipleChoice, ) if is_torch_available() else () ) lowerCamelCase_ = ( { "feature-extraction": FlaubertModel, "fill-mask": FlaubertWithLMHeadModel, "question-answering": FlaubertForQuestionAnsweringSimple, "text-classification": FlaubertForSequenceClassification, "token-classification": FlaubertForTokenClassification, "zero-shot": FlaubertForSequenceClassification, } if is_torch_available() else {} ) def lowerCAmelCase_ ( self : int , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] , __lowerCamelCase : int , __lowerCamelCase : List[Any] , __lowerCamelCase : int ): """simple docstring""" if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith("Fast" ) ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def lowerCAmelCase_ ( self : Optional[int] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Dict , __lowerCamelCase : str=False ): """simple docstring""" _SCREAMING_SNAKE_CASE = super()._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ ) if return_labels: if model_class.__name__ == "FlaubertForQuestionAnswering": _SCREAMING_SNAKE_CASE = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=snake_case__ ) _SCREAMING_SNAKE_CASE = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=snake_case__ ) return inputs_dict def lowerCAmelCase_ ( self : Optional[Any] ): """simple docstring""" _SCREAMING_SNAKE_CASE = FlaubertModelTester(self ) _SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=snake_case__ , emb_dim=3_7 ) def lowerCAmelCase_ ( self : Union[str, Any] ): """simple docstring""" self.config_tester.run_common_tests() def lowerCAmelCase_ ( self : str ): """simple docstring""" _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_model(*snake_case__ ) def lowerCAmelCase_ ( self : Union[str, Any] ): """simple docstring""" _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_lm_head(*snake_case__ ) def lowerCAmelCase_ ( self : List[Any] ): """simple docstring""" _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_simple_qa(*snake_case__ ) def lowerCAmelCase_ ( self : Optional[int] ): """simple docstring""" _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_qa(*snake_case__ ) def lowerCAmelCase_ ( self : Dict ): """simple docstring""" _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_sequence_classif(*snake_case__ ) def lowerCAmelCase_ ( self : List[Any] ): """simple docstring""" _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_token_classif(*snake_case__ ) def lowerCAmelCase_ ( self : Optional[int] ): """simple docstring""" _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_multiple_choice(*snake_case__ ) @slow def lowerCAmelCase_ ( self : int ): """simple docstring""" for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _SCREAMING_SNAKE_CASE = FlaubertModel.from_pretrained(snake_case__ ) self.assertIsNotNone(snake_case__ ) @slow @require_torch_gpu def lowerCAmelCase_ ( self : Union[str, Any] ): """simple docstring""" _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # FlauBertForMultipleChoice behaves incorrectly in JIT environments. if model_class == FlaubertForMultipleChoice: return _SCREAMING_SNAKE_CASE = True _SCREAMING_SNAKE_CASE = model_class(config=snake_case__ ) _SCREAMING_SNAKE_CASE = self._prepare_for_class(snake_case__ , snake_case__ ) _SCREAMING_SNAKE_CASE = torch.jit.trace( snake_case__ , (inputs_dict["input_ids"].to("cpu" ), inputs_dict["attention_mask"].to("cpu" )) ) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(snake_case__ , os.path.join(snake_case__ , "traced_model.pt" ) ) _SCREAMING_SNAKE_CASE = torch.jit.load(os.path.join(snake_case__ , "traced_model.pt" ) , map_location=snake_case__ ) loaded(inputs_dict["input_ids"].to(snake_case__ ) , inputs_dict["attention_mask"].to(snake_case__ ) ) @require_torch class lowercase_ ( unittest.TestCase ): """simple docstring""" @slow def lowerCAmelCase_ ( self : Optional[Any] ): """simple docstring""" _SCREAMING_SNAKE_CASE = FlaubertModel.from_pretrained("flaubert/flaubert_base_cased" ) _SCREAMING_SNAKE_CASE = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] ) with torch.no_grad(): _SCREAMING_SNAKE_CASE = model(snake_case__ )[0] _SCREAMING_SNAKE_CASE = torch.Size((1, 1_1, 7_6_8) ) self.assertEqual(output.shape , snake_case__ ) _SCREAMING_SNAKE_CASE = torch.tensor( [[[-2.6_2_5_1, -1.4_2_9_8, -0.0_2_2_7], [-2.8_5_1_0, -1.6_3_8_7, 0.2_2_5_8], [-2.8_1_1_4, -1.1_8_3_2, -0.3_0_6_6]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case__ , atol=1e-4 ) )
418
"""simple docstring""" import logging import os from dataclasses import dataclass, field from functools import partial from pathlib import Path from tempfile import TemporaryDirectory from typing import List, Optional import faiss import torch from datasets import Features, Sequence, Value, load_dataset from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser UpperCAmelCase = logging.getLogger(__name__) torch.set_grad_enabled(False) UpperCAmelCase = '''cuda''' if torch.cuda.is_available() else '''cpu''' def lowerCamelCase (a_ :str , a_ :List[str]=100 , a_ :Optional[Any]=" ") -> List[str]: lowercase :str = text.split(a_) return [character.join(text[i : i + n]).strip() for i in range(0 , len(a_) , a_)] def lowerCamelCase (a_ :dict) -> dict: lowercase , lowercase :str = [], [] for title, text in zip(documents['''title'''] , documents['''text''']): if text is not None: for passage in split_text(a_): titles.append(title if title is not None else '''''') texts.append(a_) return {"title": titles, "text": texts} def lowerCamelCase (a_ :dict , a_ :DPRContextEncoder , a_ :DPRContextEncoderTokenizerFast) -> dict: lowercase :Tuple = ctx_tokenizer( documents['''title'''] , documents['''text'''] , truncation=a_ , padding='''longest''' , return_tensors='''pt''')['''input_ids'''] lowercase :Optional[Any] = ctx_encoder(input_ids.to(device=a_) , return_dict=a_).pooler_output return {"embeddings": embeddings.detach().cpu().numpy()} def lowerCamelCase (a_ :"RagExampleArguments" , a_ :"ProcessingArguments" , a_ :"IndexHnswArguments" , ) -> Any: ###################################### logger.info('''Step 1 - Create the dataset''') ###################################### # The dataset needed for RAG must have three columns: # - title (string): title of the document # - text (string): text of a passage of the document # - embeddings (array of dimension d): DPR representation of the passage # Let's say you have documents in tab-separated csv files with columns "title" and "text" assert os.path.isfile(rag_example_args.csv_path), "Please provide a valid path to a csv file" # You can load a Dataset object this way lowercase :List[Any] = load_dataset( '''csv''' , data_files=[rag_example_args.csv_path] , split='''train''' , delimiter='''\t''' , column_names=['''title''', '''text''']) # More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files # Then split the documents into passages of 100 words lowercase :Optional[Any] = dataset.map(a_ , batched=a_ , num_proc=processing_args.num_proc) # And compute the embeddings lowercase :str = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name).to(device=a_) lowercase :Dict = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name) lowercase :str = Features( {'''text''': Value('''string'''), '''title''': Value('''string'''), '''embeddings''': Sequence(Value('''float32'''))}) # optional, save as float32 instead of float64 to save space lowercase :Optional[Any] = dataset.map( partial(a_ , ctx_encoder=a_ , ctx_tokenizer=a_) , batched=a_ , batch_size=processing_args.batch_size , features=a_ , ) # And finally save your dataset lowercase :str = os.path.join(rag_example_args.output_dir , '''my_knowledge_dataset''') dataset.save_to_disk(a_) # from datasets import load_from_disk # dataset = load_from_disk(passages_path) # to reload the dataset ###################################### logger.info('''Step 2 - Index the dataset''') ###################################### # Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search lowercase :str = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT) dataset.add_faiss_index('''embeddings''' , custom_index=a_) # And save the index lowercase :Optional[Any] = os.path.join(rag_example_args.output_dir , '''my_knowledge_dataset_hnsw_index.faiss''') dataset.get_index('''embeddings''').save(a_) # dataset.load_faiss_index("embeddings", index_path) # to reload the index @dataclass class __magic_name__ : __A : str = field( default=str(Path(__UpperCAmelCase ).parent / "test_run" / "dummy-kb" / "my_knowledge_dataset.csv" ) , metadata={"help": "Path to a tab-separated csv file with columns 'title' and 'text'"} , ) __A : Optional[str] = field( default=__UpperCAmelCase , metadata={"help": "Question that is passed as input to RAG. Default is 'What does Moses' rod turn into ?'."} , ) __A : str = field( default="facebook/rag-sequence-nq" , metadata={"help": "The RAG model to use. Either 'facebook/rag-sequence-nq' or 'facebook/rag-token-nq'"} , ) __A : str = field( default="facebook/dpr-ctx_encoder-multiset-base" , metadata={ "help": ( "The DPR context encoder model to use. Either 'facebook/dpr-ctx_encoder-single-nq-base' or" " 'facebook/dpr-ctx_encoder-multiset-base'" ) } , ) __A : Optional[str] = field( default=str(Path(__UpperCAmelCase ).parent / "test_run" / "dummy-kb" ) , metadata={"help": "Path to a directory where the dataset passages and the index will be saved"} , ) @dataclass class __magic_name__ : __A : Optional[int] = field( default=__UpperCAmelCase , metadata={ "help": "The number of processes to use to split the documents into passages. Default is single process." } , ) __A : int = field( default=16 , metadata={ "help": "The batch size to use when computing the passages embeddings using the DPR context encoder." } , ) @dataclass class __magic_name__ : __A : int = field( default=7_68 , metadata={"help": "The dimension of the embeddings to pass to the HNSW Faiss index."} , ) __A : int = field( default=1_28 , metadata={ "help": ( "The number of bi-directional links created for every new element during the HNSW index construction." ) } , ) if __name__ == "__main__": logging.basicConfig(level=logging.WARNING) logger.setLevel(logging.INFO) UpperCAmelCase = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments)) UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = parser.parse_args_into_dataclasses() with TemporaryDirectory() as tmp_dir: UpperCAmelCase = rag_example_args.output_dir or tmp_dir main(rag_example_args, processing_args, index_hnsw_args)
677
0
'''simple docstring''' import unittest from transformers import TrOCRConfig from transformers.testing_utils import is_torch_available, require_torch, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM @require_torch class a : '''simple docstring''' def __init__( self , lowerCamelCase_ , lowerCamelCase_=9_9 , lowerCamelCase_=1_3 , lowerCamelCase_=1_6 , lowerCamelCase_=7 , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=False , lowerCamelCase_=True , lowerCamelCase_=2 , lowerCamelCase_=3_2 , lowerCamelCase_=4 , lowerCamelCase_=4 , lowerCamelCase_=3_0 , lowerCamelCase_=0 , lowerCamelCase_=1 , lowerCamelCase_=2 , lowerCamelCase_=None , ) -> Any: _a : List[str] = parent _a : Any = batch_size _a : Optional[Any] = decoder_seq_length # For common tests _a : List[Any] = self.decoder_seq_length _a : Dict = is_training _a : Tuple = use_attention_mask _a : Optional[int] = use_labels _a : Any = vocab_size _a : Optional[int] = d_model _a : Optional[Any] = d_model _a : Dict = decoder_layers _a : List[str] = decoder_layers _a : Any = decoder_ffn_dim _a : Union[str, Any] = decoder_attention_heads _a : Optional[int] = decoder_attention_heads _a : List[Any] = eos_token_id _a : Optional[Any] = bos_token_id _a : List[Any] = pad_token_id _a : Any = decoder_start_token_id _a : Tuple = use_cache _a : Any = max_position_embeddings _a : Any = None _a : int = decoder_seq_length _a : Optional[int] = 2 _a : Optional[int] = 1 def __UpperCamelCase ( self ) -> str: _a : str = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size ) _a : Tuple = None if self.use_attention_mask: _a : Any = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 ) _a : List[str] = None if self.use_labels: _a : List[Any] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size ) _a : Any = TrOCRConfig( vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , ) return (config, input_ids, attention_mask, lm_labels) def __UpperCamelCase ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , ) -> List[str]: _a : Union[str, Any] = True _a : str = TrOCRDecoder(config=snake_case__ ).to(snake_case__ ).eval() _a : Optional[Any] = input_ids[:2] input_ids[input_ids == 0] += 1 # first forward pass _a : Any = model(snake_case__ , use_cache=snake_case__ ) _a : Any = model(snake_case__ ) _a : Union[str, Any] = model(snake_case__ , use_cache=snake_case__ ) self.parent.assertTrue(len(snake_case__ ) == len(snake_case__ ) ) self.parent.assertTrue(len(snake_case__ ) == len(snake_case__ ) + 1 ) _a : Tuple = outputs['''past_key_values'''] # create hypothetical next token and extent to next_input_ids _a : Optional[int] = ids_tensor((2, 1) , config.vocab_size - 1 ) + 1 # append to next input_ids and _a : List[Any] = torch.cat([input_ids, next_tokens] , dim=-1 ) _a : Optional[int] = model(snake_case__ )['''last_hidden_state'''] _a : Any = model(snake_case__ , past_key_values=snake_case__ )['''last_hidden_state'''] # select random slice _a : Optional[Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item() _a : Optional[Any] = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach() _a : Union[str, Any] = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice assert torch.allclose(snake_case__ , snake_case__ , atol=1e-3 ) def __UpperCamelCase ( self ) -> Optional[Any]: _a : Optional[Any] = self.prepare_config_and_inputs() _a : Union[str, Any] = config_and_inputs _a : Optional[int] = {'''input_ids''': input_ids, '''attention_mask''': attention_mask} return config, inputs_dict @require_torch class a ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ): '''simple docstring''' __lowerCAmelCase : Any = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else () __lowerCAmelCase : Optional[Any] = (TrOCRForCausalLM,) if is_torch_available() else () __lowerCAmelCase : List[Any] = {"text-generation": TrOCRForCausalLM} if is_torch_available() else {} __lowerCAmelCase : Optional[Any] = True __lowerCAmelCase : Dict = False def __UpperCamelCase ( self ) -> List[Any]: _a : Tuple = TrOCRStandaloneDecoderModelTester(self , is_training=snake_case__ ) _a : List[Any] = ConfigTester(self , config_class=snake_case__ ) def __UpperCamelCase ( self ) -> int: pass def __UpperCamelCase ( self ) -> Dict: pass def __UpperCamelCase ( self ) -> List[str]: pass def __UpperCamelCase ( self ) -> str: self.config_tester.run_common_tests() def __UpperCamelCase ( self ) -> int: _a : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past(*snake_case__ ) def __UpperCamelCase ( self ) -> Tuple: return @unittest.skip('The model doesn\'t support left padding' ) # and it's not used enough to be worth fixing :) def __UpperCamelCase ( self ) -> Optional[Any]: pass
120
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available UpperCAmelCase = { '''configuration_longt5''': ['''LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LongT5Config''', '''LongT5OnnxConfig'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase = [ '''LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST''', '''LongT5EncoderModel''', '''LongT5ForConditionalGeneration''', '''LongT5Model''', '''LongT5PreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase = [ '''FlaxLongT5ForConditionalGeneration''', '''FlaxLongT5Model''', '''FlaxLongT5PreTrainedModel''', ] if TYPE_CHECKING: from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_longta import ( LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST, LongTaEncoderModel, LongTaForConditionalGeneration, LongTaModel, LongTaPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_longta import ( FlaxLongTaForConditionalGeneration, FlaxLongTaModel, FlaxLongTaPreTrainedModel, ) else: import sys UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
677
0
import warnings from ...utils import logging from .image_processing_flava import FlavaImageProcessor SCREAMING_SNAKE_CASE__ : List[str] = logging.get_logger(__name__) class lowerCamelCase_ ( __UpperCAmelCase ): def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase ): """simple docstring""" warnings.warn( '''The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please''' ''' use FlavaImageProcessor instead.''' , snake_case__ , ) super().__init__(*snake_case__ , **snake_case__ )
0
"""simple docstring""" import uuid from typing import Any, Dict, List, Optional, Union from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging from .base import PIPELINE_INIT_ARGS, Pipeline if is_tf_available(): import tensorflow as tf if is_torch_available(): import torch UpperCAmelCase = logging.get_logger(__name__) class __magic_name__ : def __init__( self : Tuple , snake_case__ : str = None , snake_case__ : uuid.UUID = None , snake_case__ : Optional[int]=None , snake_case__ : Tuple=None ): '''simple docstring''' if not conversation_id: lowercase :List[Any] = uuid.uuida() if past_user_inputs is None: lowercase :Union[str, Any] = [] if generated_responses is None: lowercase :List[str] = [] lowercase :uuid.UUID = conversation_id lowercase :List[str] = past_user_inputs lowercase :List[str] = generated_responses lowercase :Optional[str] = text def __eq__( self : Optional[Any] , snake_case__ : str ): '''simple docstring''' if not isinstance(snake_case__ , snake_case__ ): return False if self.uuid == other.uuid: return True return ( self.new_user_input == other.new_user_input and self.past_user_inputs == other.past_user_inputs and self.generated_responses == other.generated_responses ) def __snake_case ( self : Optional[int] , snake_case__ : str , snake_case__ : bool = False ): '''simple docstring''' if self.new_user_input: if overwrite: logger.warning( f"""User input added while unprocessed input was existing: \"{self.new_user_input}\" was overwritten """ f"""with: \"{text}\".""" ) lowercase :List[str] = text else: logger.warning( f"""User input added while unprocessed input was existing: \"{self.new_user_input}\" new input """ f"""ignored: \"{text}\". Set `overwrite` to True to overwrite unprocessed user input""" ) else: lowercase :Optional[int] = text def __snake_case ( self : Any ): '''simple docstring''' if self.new_user_input: self.past_user_inputs.append(self.new_user_input ) lowercase :Tuple = None def __snake_case ( self : Tuple , snake_case__ : str ): '''simple docstring''' self.generated_responses.append(snake_case__ ) def __snake_case ( self : Tuple ): '''simple docstring''' for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ): yield True, user_input yield False, generated_response if self.new_user_input: yield True, self.new_user_input def __repr__( self : Dict ): '''simple docstring''' lowercase :int = f"""Conversation id: {self.uuid} \n""" for is_user, text in self.iter_texts(): lowercase :Dict = '''user''' if is_user else '''bot''' output += f"""{name} >> {text} \n""" return output @add_end_docstrings( __UpperCAmelCase , R"\n min_length_for_response (`int`, *optional*, defaults to 32):\n The minimum length (in number of tokens) for a response.\n minimum_tokens (`int`, *optional*, defaults to 10):\n The minimum length of tokens to leave for a response.\n " , ) class __magic_name__ ( __UpperCAmelCase ): def __init__( self : Optional[Any] , *snake_case__ : Optional[Any] , **snake_case__ : List[Any] ): '''simple docstring''' super().__init__(*snake_case__ , **snake_case__ ) if self.tokenizer.pad_token_id is None: lowercase :Any = self.tokenizer.eos_token def __snake_case ( self : List[Any] , snake_case__ : Optional[int]=None , snake_case__ : Union[str, Any]=None , snake_case__ : List[str]=None , **snake_case__ : Union[str, Any] ): '''simple docstring''' lowercase :str = {} lowercase :List[str] = {} lowercase :Tuple = {} if min_length_for_response is not None: lowercase :Dict = min_length_for_response if minimum_tokens is not None: lowercase :Union[str, Any] = minimum_tokens if "max_length" in generate_kwargs: lowercase :List[Any] = generate_kwargs['''max_length'''] # self.max_length = generate_kwargs.get("max_length", self.model.config.max_length) if clean_up_tokenization_spaces is not None: lowercase :Dict = clean_up_tokenization_spaces if generate_kwargs: forward_params.update(snake_case__ ) return preprocess_params, forward_params, postprocess_params def __call__( self : List[Any] , snake_case__ : Union[Conversation, List[Conversation]] , snake_case__ : int=0 , **snake_case__ : int ): '''simple docstring''' lowercase :int = super().__call__(snake_case__ , num_workers=snake_case__ , **snake_case__ ) if isinstance(snake_case__ , snake_case__ ) and len(snake_case__ ) == 1: return outputs[0] return outputs def __snake_case ( self : List[Any] , snake_case__ : Conversation , snake_case__ : Any=3_2 ): '''simple docstring''' if not isinstance(snake_case__ , snake_case__ ): raise ValueError('''ConversationalPipeline, expects Conversation as inputs''' ) if conversation.new_user_input is None: raise ValueError( f"""Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. """ '''Add user inputs with the conversation\'s `add_user_input` method''' ) if hasattr(self.tokenizer , '''_build_conversation_input_ids''' ): lowercase :List[str] = self.tokenizer._build_conversation_input_ids(snake_case__ ) else: # If the tokenizer cannot handle conversations, we default to only the old version lowercase :List[str] = self._legacy_parse_and_tokenize(snake_case__ ) if self.framework == "pt": lowercase :int = torch.LongTensor([input_ids] ) elif self.framework == "tf": lowercase :Any = tf.constant([input_ids] ) return {"input_ids": input_ids, "conversation": conversation} def __snake_case ( self : int , snake_case__ : Union[str, Any] , snake_case__ : Any=1_0 , **snake_case__ : int ): '''simple docstring''' lowercase :Dict = generate_kwargs.get('''max_length''' , self.model.config.max_length ) lowercase :Optional[Any] = model_inputs['''input_ids'''].shape[1] if max_length - minimum_tokens < n: logger.warning(f"""Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})""" ) lowercase :int = max_length - minimum_tokens lowercase :int = model_inputs['''input_ids'''][:, -trim:] if "attention_mask" in model_inputs: lowercase :int = model_inputs['''attention_mask'''][:, -trim:] lowercase :int = model_inputs.pop('''conversation''' ) lowercase :Union[str, Any] = max_length lowercase :Dict = self.model.generate(**snake_case__ , **snake_case__ ) if self.model.config.is_encoder_decoder: lowercase :List[Any] = 1 else: lowercase :Optional[Any] = n return {"output_ids": output_ids[:, start_position:], "conversation": conversation} def __snake_case ( self : Optional[int] , snake_case__ : List[Any] , snake_case__ : Optional[Any]=True ): '''simple docstring''' lowercase :Dict = model_outputs['''output_ids'''] lowercase :Dict = self.tokenizer.decode( output_ids[0] , skip_special_tokens=snake_case__ , clean_up_tokenization_spaces=snake_case__ , ) lowercase :Optional[int] = model_outputs['''conversation'''] conversation.mark_processed() conversation.append_response(snake_case__ ) return conversation def __snake_case ( self : List[Any] , snake_case__ : Conversation ): '''simple docstring''' lowercase :str = self.tokenizer.eos_token_id lowercase :List[Any] = [] for is_user, text in conversation.iter_texts(): if eos_token_id is not None: input_ids.extend(self.tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ ) + [eos_token_id] ) else: input_ids.extend(self.tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ ) ) if len(snake_case__ ) > self.tokenizer.model_max_length: lowercase :List[Any] = input_ids[-self.tokenizer.model_max_length :] return input_ids
677
0
import argparse import gc import json import os import shutil import warnings import torch from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer try: from transformers import LlamaTokenizerFast except ImportError as e: warnings.warn(e) warnings.warn( "The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion" ) snake_case = None snake_case = { "7B": 1_1008, "13B": 1_3824, "30B": 1_7920, "65B": 2_2016, "70B": 2_8672, } snake_case = { "7B": 1, "7Bf": 1, "13B": 2, "13Bf": 2, "30B": 4, "65B": 8, "70B": 8, "70Bf": 8, } def UpperCamelCase_ ( lowerCAmelCase__ , lowerCAmelCase__=1 , lowerCAmelCase__=2_56 ): """simple docstring""" return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3 ) ) + multiple_of - 1) // multiple_of) def UpperCamelCase_ ( lowerCAmelCase__ ): """simple docstring""" with open(a_ , "r" ) as f: return json.load(a_ ) def UpperCamelCase_ ( lowerCAmelCase__ , lowerCAmelCase__ ): """simple docstring""" with open(a_ , "w" ) as f: json.dump(a_ , a_ ) def UpperCamelCase_ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=True ): """simple docstring""" os.makedirs(a_ , exist_ok=a_ ) _lowerCAmelCase : Dict = os.path.join(a_ , "tmp" ) os.makedirs(a_ , exist_ok=a_ ) _lowerCAmelCase : int = read_json(os.path.join(a_ , "params.json" ) ) _lowerCAmelCase : Optional[Any] = NUM_SHARDS[model_size] _lowerCAmelCase : str = params['''n_layers'''] _lowerCAmelCase : Optional[int] = params['''n_heads'''] _lowerCAmelCase : int = n_heads // num_shards _lowerCAmelCase : str = params['''dim'''] _lowerCAmelCase : Union[str, Any] = dim // n_heads _lowerCAmelCase : int = 1_00_00.0 _lowerCAmelCase : Optional[int] = 1.0 / (base ** (torch.arange(0 , a_ , 2 ).float() / dims_per_head)) if "n_kv_heads" in params: _lowerCAmelCase : Union[str, Any] = params['''n_kv_heads'''] # for GQA / MQA _lowerCAmelCase : int = n_heads_per_shard // num_key_value_heads _lowerCAmelCase : Any = dim // num_key_value_heads else: # compatibility with other checkpoints _lowerCAmelCase : int = n_heads _lowerCAmelCase : Union[str, Any] = n_heads_per_shard _lowerCAmelCase : Union[str, Any] = dim # permute for sliced rotary def permute(lowerCAmelCase__ , lowerCAmelCase__=n_heads , lowerCAmelCase__=dim , lowerCAmelCase__=dim ): return w.view(a_ , dima // n_heads // 2 , 2 , a_ ).transpose(1 , 2 ).reshape(a_ , a_ ) print(f"""Fetching all parameters from the checkpoint at {input_base_path}.""" ) # Load weights if model_size == "7B": # Not sharded # (The sharded implementation would also work, but this is simpler.) _lowerCAmelCase : Optional[Any] = torch.load(os.path.join(a_ , "consolidated.00.pth" ) , map_location="cpu" ) else: # Sharded _lowerCAmelCase : Optional[Any] = [ torch.load(os.path.join(a_ , f"""consolidated.{i:02d}.pth""" ) , map_location="cpu" ) for i in range(a_ ) ] _lowerCAmelCase : List[str] = 0 _lowerCAmelCase : Any = {'''weight_map''': {}} for layer_i in range(a_ ): _lowerCAmelCase : str = f"""pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin""" if model_size == "7B": # Unsharded _lowerCAmelCase : List[str] = { f"""model.layers.{layer_i}.self_attn.q_proj.weight""": permute( loaded[f"""layers.{layer_i}.attention.wq.weight"""] ), f"""model.layers.{layer_i}.self_attn.k_proj.weight""": permute( loaded[f"""layers.{layer_i}.attention.wk.weight"""] ), f"""model.layers.{layer_i}.self_attn.v_proj.weight""": loaded[f"""layers.{layer_i}.attention.wv.weight"""], f"""model.layers.{layer_i}.self_attn.o_proj.weight""": loaded[f"""layers.{layer_i}.attention.wo.weight"""], f"""model.layers.{layer_i}.mlp.gate_proj.weight""": loaded[f"""layers.{layer_i}.feed_forward.w1.weight"""], f"""model.layers.{layer_i}.mlp.down_proj.weight""": loaded[f"""layers.{layer_i}.feed_forward.w2.weight"""], f"""model.layers.{layer_i}.mlp.up_proj.weight""": loaded[f"""layers.{layer_i}.feed_forward.w3.weight"""], f"""model.layers.{layer_i}.input_layernorm.weight""": loaded[f"""layers.{layer_i}.attention_norm.weight"""], f"""model.layers.{layer_i}.post_attention_layernorm.weight""": loaded[f"""layers.{layer_i}.ffn_norm.weight"""], } else: # Sharded # Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share # the same storage object, saving attention_norm and ffn_norm will save other weights too, which is # redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned. _lowerCAmelCase : int = { f"""model.layers.{layer_i}.input_layernorm.weight""": loaded[0][ f"""layers.{layer_i}.attention_norm.weight""" ].clone(), f"""model.layers.{layer_i}.post_attention_layernorm.weight""": loaded[0][ f"""layers.{layer_i}.ffn_norm.weight""" ].clone(), } _lowerCAmelCase : Optional[Any] = permute( torch.cat( [ loaded[i][f"""layers.{layer_i}.attention.wq.weight"""].view(a_ , a_ , a_ ) for i in range(a_ ) ] , dim=0 , ).reshape(a_ , a_ ) ) _lowerCAmelCase : Any = permute( torch.cat( [ loaded[i][f"""layers.{layer_i}.attention.wk.weight"""].view( a_ , a_ , a_ ) for i in range(a_ ) ] , dim=0 , ).reshape(a_ , a_ ) , a_ , a_ , a_ , ) _lowerCAmelCase : Optional[int] = torch.cat( [ loaded[i][f"""layers.{layer_i}.attention.wv.weight"""].view( a_ , a_ , a_ ) for i in range(a_ ) ] , dim=0 , ).reshape(a_ , a_ ) _lowerCAmelCase : Any = torch.cat( [loaded[i][f"""layers.{layer_i}.attention.wo.weight"""] for i in range(a_ )] , dim=1 ) _lowerCAmelCase : List[Any] = torch.cat( [loaded[i][f"""layers.{layer_i}.feed_forward.w1.weight"""] for i in range(a_ )] , dim=0 ) _lowerCAmelCase : Any = torch.cat( [loaded[i][f"""layers.{layer_i}.feed_forward.w2.weight"""] for i in range(a_ )] , dim=1 ) _lowerCAmelCase : str = torch.cat( [loaded[i][f"""layers.{layer_i}.feed_forward.w3.weight"""] for i in range(a_ )] , dim=0 ) _lowerCAmelCase : Optional[Any] = inv_freq for k, v in state_dict.items(): _lowerCAmelCase : int = filename param_count += v.numel() torch.save(a_ , os.path.join(a_ , a_ ) ) _lowerCAmelCase : int = f"""pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin""" if model_size == "7B": # Unsharded _lowerCAmelCase : Union[str, Any] = { '''model.embed_tokens.weight''': loaded['''tok_embeddings.weight'''], '''model.norm.weight''': loaded['''norm.weight'''], '''lm_head.weight''': loaded['''output.weight'''], } else: _lowerCAmelCase : List[str] = { '''model.norm.weight''': loaded[0]['''norm.weight'''], '''model.embed_tokens.weight''': torch.cat( [loaded[i]["tok_embeddings.weight"] for i in range(a_ )] , dim=1 ), '''lm_head.weight''': torch.cat([loaded[i]["output.weight"] for i in range(a_ )] , dim=0 ), } for k, v in state_dict.items(): _lowerCAmelCase : Any = filename param_count += v.numel() torch.save(a_ , os.path.join(a_ , a_ ) ) # Write configs _lowerCAmelCase : Union[str, Any] = {'''total_size''': param_count * 2} write_json(a_ , os.path.join(a_ , "pytorch_model.bin.index.json" ) ) _lowerCAmelCase : str = params['''ffn_dim_multiplier'''] if '''ffn_dim_multiplier''' in params else 1 _lowerCAmelCase : str = params['''multiple_of'''] if '''multiple_of''' in params else 2_56 _lowerCAmelCase : str = LlamaConfig( hidden_size=a_ , intermediate_size=compute_intermediate_size(a_ , a_ , a_ ) , num_attention_heads=params["n_heads"] , num_hidden_layers=params["n_layers"] , rms_norm_eps=params["norm_eps"] , num_key_value_heads=a_ , ) config.save_pretrained(a_ ) # Make space so we can load the model properly now. del state_dict del loaded gc.collect() print("Loading the checkpoint in a Llama model." ) _lowerCAmelCase : Optional[Any] = LlamaForCausalLM.from_pretrained(a_ , torch_dtype=torch.floataa , low_cpu_mem_usage=a_ ) # Avoid saving this as part of the config. del model.config._name_or_path print("Saving in the Transformers format." ) model.save_pretrained(a_ , safe_serialization=a_ ) shutil.rmtree(a_ ) def UpperCamelCase_ ( lowerCAmelCase__ , lowerCAmelCase__ ): """simple docstring""" _lowerCAmelCase : str = LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast print(f"""Saving a {tokenizer_class.__name__} to {tokenizer_path}.""" ) _lowerCAmelCase : int = tokenizer_class(a_ ) tokenizer.save_pretrained(a_ ) def UpperCamelCase_ ( ): """simple docstring""" _lowerCAmelCase : str = argparse.ArgumentParser() parser.add_argument( "--input_dir" , help="Location of LLaMA weights, which contains tokenizer.model and model folders" , ) parser.add_argument( "--model_size" , choices=["7B", "7Bf", "13B", "13Bf", "30B", "65B", "70B", "70Bf", "tokenizer_only"] , ) parser.add_argument( "--output_dir" , help="Location to write HF model and tokenizer" , ) parser.add_argument("--safe_serialization" , type=a_ , help="Whether or not to save using `safetensors`." ) _lowerCAmelCase : List[str] = parser.parse_args() if args.model_size != "tokenizer_only": write_model( model_path=args.output_dir , input_base_path=os.path.join(args.input_dir , args.model_size ) , model_size=args.model_size , safe_serialization=args.safe_serialization , ) _lowerCAmelCase : List[str] = os.path.join(args.input_dir , "tokenizer.model" ) write_tokenizer(args.output_dir , a_ ) if __name__ == "__main__": main()
424
"""simple docstring""" def lowerCamelCase (a_ :int = 100) -> int: lowercase :Union[str, Any] = set() lowercase :List[Any] = 0 lowercase :Dict = n + 1 # maximum limit for a in range(2 , a_): for b in range(2 , a_): lowercase :Tuple = a**b # calculates the current power collect_powers.add(a_) # adds the result to the set return len(a_) if __name__ == "__main__": print('''Number of terms ''', solution(int(str(input()).strip())))
677
0
'''simple docstring''' import unittest from typing import Tuple import torch from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device from diffusers.utils.testing_utils import require_torch @require_torch class _snake_case : @property def lowerCAmelCase__ ( self ) -> List[Any]: '''simple docstring''' return self.get_dummy_input() @property def lowerCAmelCase__ ( self ) -> Any: '''simple docstring''' if self.block_type == "down": return (4, 32, 16, 16) elif self.block_type == "mid": return (4, 32, 32, 32) elif self.block_type == "up": return (4, 32, 64, 64) raise ValueError(F'\'{self.block_type}\' is not a supported block_type. Set it to \'up\', \'mid\', or \'down\'.' ) def lowerCAmelCase__ ( self , a__=True , a__=False , a__=False , a__=False , ) -> Union[str, Any]: '''simple docstring''' snake_case_ = 4 snake_case_ = 32 snake_case_ = (32, 32) snake_case_ = torch.manual_seed(0 ) snake_case_ = torch.device(snake_case__ ) snake_case_ = (batch_size, num_channels) + sizes snake_case_ = randn_tensor(snake_case__ , generator=snake_case__ , device=snake_case__ ) snake_case_ = {'''hidden_states''': hidden_states} if include_temb: snake_case_ = 128 snake_case_ = randn_tensor((batch_size, temb_channels) , generator=snake_case__ , device=snake_case__ ) if include_res_hidden_states_tuple: snake_case_ = torch.manual_seed(1 ) snake_case_ = (randn_tensor(snake_case__ , generator=snake_case__ , device=snake_case__ ),) if include_encoder_hidden_states: snake_case_ = floats_tensor((batch_size, 32, 32) ).to(snake_case__ ) if include_skip_sample: snake_case_ = randn_tensor(((batch_size, 3) + sizes) , generator=snake_case__ , device=snake_case__ ) return dummy_input def lowerCAmelCase__ ( self ) -> Union[str, Any]: '''simple docstring''' snake_case_ = { '''in_channels''': 32, '''out_channels''': 32, '''temb_channels''': 128, } if self.block_type == "up": snake_case_ = 32 if self.block_type == "mid": init_dict.pop("out_channels" ) snake_case_ = self.dummy_input return init_dict, inputs_dict def lowerCAmelCase__ ( self , a__ ) -> Optional[Any]: '''simple docstring''' snake_case_ = self.prepare_init_args_and_inputs_for_common() snake_case_ = self.block_class(**snake_case__ ) unet_block.to(snake_case__ ) unet_block.eval() with torch.no_grad(): snake_case_ = unet_block(**snake_case__ ) if isinstance(snake_case__ , snake_case__ ): snake_case_ = output[0] self.assertEqual(output.shape , self.output_shape ) snake_case_ = output[0, -1, -3:, -3:] snake_case_ = torch.tensor(snake_case__ ).to(snake_case__ ) assert torch_all_close(output_slice.flatten() , snake_case__ , atol=5e-3 ) @unittest.skipIf(torch_device == "mps" , "Training is not supported in mps" ) def lowerCAmelCase__ ( self ) -> Tuple: '''simple docstring''' snake_case_ = self.prepare_init_args_and_inputs_for_common() snake_case_ = self.block_class(**snake_case__ ) model.to(snake_case__ ) model.train() snake_case_ = model(**snake_case__ ) if isinstance(snake_case__ , snake_case__ ): snake_case_ = output[0] snake_case_ = torch.device(snake_case__ ) snake_case_ = randn_tensor(output.shape , device=snake_case__ ) snake_case_ = torch.nn.functional.mse_loss(snake_case__ , snake_case__ ) loss.backward()
400
"""simple docstring""" from typing import Callable, Optional, Union from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase = logging.get_logger(__name__) UpperCAmelCase = { '''microsoft/xprophetnet-large-wiki100-cased''': ( '''https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/config.json''' ), } class __magic_name__ ( __UpperCAmelCase ): __A : Optional[Any] = "xlm-prophetnet" __A : List[str] = ["past_key_values"] __A : int = { "num_attention_heads": "num_encoder_attention_heads", } def __init__( self : Any , snake_case__ : Optional[float] = 0.1 , snake_case__ : Optional[Union[str, Callable]] = "gelu" , snake_case__ : Optional[int] = 3_0_5_2_2 , snake_case__ : Optional[int] = 1_0_2_4 , snake_case__ : Optional[int] = 4_0_9_6 , snake_case__ : Optional[int] = 1_2 , snake_case__ : Optional[int] = 1_6 , snake_case__ : Optional[int] = 4_0_9_6 , snake_case__ : Optional[int] = 1_2 , snake_case__ : Optional[int] = 1_6 , snake_case__ : Optional[float] = 0.1 , snake_case__ : Optional[float] = 0.1 , snake_case__ : Optional[int] = 5_1_2 , snake_case__ : Optional[float] = 0.02 , snake_case__ : Optional[bool] = True , snake_case__ : Optional[bool] = True , snake_case__ : Optional[int] = 0 , snake_case__ : Optional[int] = 2 , snake_case__ : Optional[int] = 3_2 , snake_case__ : Optional[int] = 1_2_8 , snake_case__ : Optional[bool] = False , snake_case__ : Optional[float] = 0.0 , snake_case__ : Optional[bool] = True , snake_case__ : Optional[int] = 0 , snake_case__ : Optional[int] = 1 , snake_case__ : Optional[int] = 2 , **snake_case__ : List[str] , ): '''simple docstring''' lowercase :Tuple = vocab_size lowercase :Optional[int] = hidden_size lowercase :Optional[int] = encoder_ffn_dim lowercase :Optional[int] = num_encoder_layers lowercase :Dict = num_encoder_attention_heads lowercase :List[str] = decoder_ffn_dim lowercase :Dict = num_decoder_layers lowercase :List[Any] = num_decoder_attention_heads lowercase :Optional[int] = max_position_embeddings lowercase :Tuple = init_std # Normal(0, this parameter) lowercase :int = activation_function # parameters for xlmprophetnet lowercase :Dict = ngram lowercase :Optional[Any] = num_buckets lowercase :Dict = relative_max_distance lowercase :List[Any] = disable_ngram_loss lowercase :Optional[Any] = eps # 3 Types of Dropout lowercase :Any = attention_dropout lowercase :List[str] = activation_dropout lowercase :List[str] = dropout lowercase :List[str] = use_cache super().__init__( pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , is_encoder_decoder=snake_case__ , add_cross_attention=snake_case__ , decoder_start_token_id=snake_case__ , **snake_case__ , ) @property def __snake_case ( self : Any ): '''simple docstring''' return self.num_encoder_layers + self.num_decoder_layers @num_hidden_layers.setter def __snake_case ( self : Optional[Any] , snake_case__ : Optional[Any] ): '''simple docstring''' raise NotImplementedError( '''This model does not support the setting of `num_hidden_layers`. Please set `num_encoder_layers` and''' ''' `num_decoder_layers`.''' )
677
0
'''simple docstring''' def UpperCamelCase_ ( A__ , A__ , A__ , A__ , A__ ): if index == number_of_items: return 0 a_ = 0 a_ = 0 a_ = knapsack(a_ , a_ , a_ , a_ , index + 1 ) if weights[index] <= max_weight: a_ = values[index] + knapsack( a_ , a_ , a_ , max_weight - weights[index] , index + 1 ) return max(a_ , a_ ) if __name__ == "__main__": import doctest doctest.testmod()
263
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tensorflow_text_available, is_tf_available, is_tokenizers_available, is_torch_available, ) UpperCAmelCase = { '''configuration_bert''': ['''BERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BertConfig''', '''BertOnnxConfig'''], '''tokenization_bert''': ['''BasicTokenizer''', '''BertTokenizer''', '''WordpieceTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase = ['''BertTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase = [ '''BERT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''BertForMaskedLM''', '''BertForMultipleChoice''', '''BertForNextSentencePrediction''', '''BertForPreTraining''', '''BertForQuestionAnswering''', '''BertForSequenceClassification''', '''BertForTokenClassification''', '''BertLayer''', '''BertLMHeadModel''', '''BertModel''', '''BertPreTrainedModel''', '''load_tf_weights_in_bert''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase = [ '''TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFBertEmbeddings''', '''TFBertForMaskedLM''', '''TFBertForMultipleChoice''', '''TFBertForNextSentencePrediction''', '''TFBertForPreTraining''', '''TFBertForQuestionAnswering''', '''TFBertForSequenceClassification''', '''TFBertForTokenClassification''', '''TFBertLMHeadModel''', '''TFBertMainLayer''', '''TFBertModel''', '''TFBertPreTrainedModel''', ] try: if not is_tensorflow_text_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase = ['''TFBertTokenizer'''] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase = [ '''FlaxBertForCausalLM''', '''FlaxBertForMaskedLM''', '''FlaxBertForMultipleChoice''', '''FlaxBertForNextSentencePrediction''', '''FlaxBertForPreTraining''', '''FlaxBertForQuestionAnswering''', '''FlaxBertForSequenceClassification''', '''FlaxBertForTokenClassification''', '''FlaxBertModel''', '''FlaxBertPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bert_fast import BertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bert import ( BERT_PRETRAINED_MODEL_ARCHIVE_LIST, BertForMaskedLM, BertForMultipleChoice, BertForNextSentencePrediction, BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification, BertForTokenClassification, BertLayer, BertLMHeadModel, BertModel, BertPreTrainedModel, load_tf_weights_in_bert, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_bert import ( TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFBertEmbeddings, TFBertForMaskedLM, TFBertForMultipleChoice, TFBertForNextSentencePrediction, TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification, TFBertForTokenClassification, TFBertLMHeadModel, TFBertMainLayer, TFBertModel, TFBertPreTrainedModel, ) try: if not is_tensorflow_text_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bert_tf import TFBertTokenizer try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_bert import ( FlaxBertForCausalLM, FlaxBertForMaskedLM, FlaxBertForMultipleChoice, FlaxBertForNextSentencePrediction, FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification, FlaxBertForTokenClassification, FlaxBertModel, FlaxBertPreTrainedModel, ) else: import sys UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
677
0
"""simple docstring""" import os from datetime import datetime as dt from github import Github _a = [ """good first issue""", """good second issue""", """good difficult issue""", """enhancement""", """new pipeline/model""", """new scheduler""", """wip""", ] def lowerCamelCase__ ( ) -> Union[str, Any]: """simple docstring""" _UpperCamelCase = Github(os.environ['''GITHUB_TOKEN'''] ) _UpperCamelCase = g.get_repo('''huggingface/diffusers''' ) _UpperCamelCase = repo.get_issues(state='''open''' ) for issue in open_issues: _UpperCamelCase = sorted(issue.get_comments(), key=lambda __snake_case : i.created_at, reverse=a_ ) _UpperCamelCase = comments[0] if len(a_ ) > 0 else None if ( last_comment is not None and last_comment.user.login == "github-actions[bot]" and (dt.utcnow() - issue.updated_at).days > 7 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # Closes the issue after 7 days of inactivity since the Stalebot notification. issue.edit(state='''closed''' ) elif ( "stale" in issue.get_labels() and last_comment is not None and last_comment.user.login != "github-actions[bot]" ): # Opens the issue if someone other than Stalebot commented. issue.edit(state='''open''' ) issue.remove_from_labels('''stale''' ) elif ( (dt.utcnow() - issue.updated_at).days > 23 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # Post a Stalebot notification after 23 days of inactivity. issue.create_comment( '''This issue has been automatically marked as stale because it has not had ''' '''recent activity. If you think this still needs to be addressed ''' '''please comment on this thread.\n\nPlease note that issues that do not follow the ''' '''[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) ''' '''are likely to be ignored.''' ) issue.add_to_labels('''stale''' ) if __name__ == "__main__": main()
19
"""simple docstring""" from typing import List, Optional, Union from ...image_utils import ImageInput from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class __magic_name__ ( __UpperCAmelCase ): __A : Tuple = ["image_processor", "tokenizer"] __A : Dict = "BlipImageProcessor" __A : Dict = "AutoTokenizer" def __init__( self : Any , snake_case__ : Union[str, Any] , snake_case__ : str ): '''simple docstring''' lowercase :Dict = False super().__init__(snake_case__ , snake_case__ ) lowercase :Union[str, Any] = self.image_processor def __call__( self : Optional[int] , snake_case__ : ImageInput = None , snake_case__ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , snake_case__ : bool = True , snake_case__ : Union[bool, str, PaddingStrategy] = False , snake_case__ : Union[bool, str, TruncationStrategy] = None , snake_case__ : Optional[int] = None , snake_case__ : int = 0 , snake_case__ : Optional[int] = None , snake_case__ : Optional[bool] = None , snake_case__ : bool = False , snake_case__ : bool = False , snake_case__ : bool = False , snake_case__ : bool = False , snake_case__ : bool = False , snake_case__ : bool = True , snake_case__ : Optional[Union[str, TensorType]] = None , **snake_case__ : Optional[Any] , ): '''simple docstring''' if images is None and text is None: raise ValueError('''You have to specify either images or text.''' ) # Get only text if images is None: lowercase :List[Any] = self.tokenizer lowercase :str = self.tokenizer( text=snake_case__ , add_special_tokens=snake_case__ , padding=snake_case__ , truncation=snake_case__ , max_length=snake_case__ , stride=snake_case__ , pad_to_multiple_of=snake_case__ , return_attention_mask=snake_case__ , return_overflowing_tokens=snake_case__ , return_special_tokens_mask=snake_case__ , return_offsets_mapping=snake_case__ , return_token_type_ids=snake_case__ , return_length=snake_case__ , verbose=snake_case__ , return_tensors=snake_case__ , **snake_case__ , ) return text_encoding # add pixel_values lowercase :Union[str, Any] = self.image_processor(snake_case__ , return_tensors=snake_case__ ) if text is not None: lowercase :int = self.tokenizer( text=snake_case__ , add_special_tokens=snake_case__ , padding=snake_case__ , truncation=snake_case__ , max_length=snake_case__ , stride=snake_case__ , pad_to_multiple_of=snake_case__ , return_attention_mask=snake_case__ , return_overflowing_tokens=snake_case__ , return_special_tokens_mask=snake_case__ , return_offsets_mapping=snake_case__ , return_token_type_ids=snake_case__ , return_length=snake_case__ , verbose=snake_case__ , return_tensors=snake_case__ , **snake_case__ , ) else: lowercase :Optional[int] = None if text_encoding is not None: encoding_image_processor.update(snake_case__ ) return encoding_image_processor def __snake_case ( self : Tuple , *snake_case__ : List[Any] , **snake_case__ : Tuple ): '''simple docstring''' return self.tokenizer.batch_decode(*snake_case__ , **snake_case__ ) def __snake_case ( self : List[str] , *snake_case__ : Dict , **snake_case__ : List[Any] ): '''simple docstring''' return self.tokenizer.decode(*snake_case__ , **snake_case__ ) @property # Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names def __snake_case ( self : List[Any] ): '''simple docstring''' lowercase :List[Any] = self.tokenizer.model_input_names lowercase :List[Any] = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
677
0
"""simple docstring""" from __future__ import annotations import inspect import unittest import numpy as np from transformers import ResNetConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFResNetForImageClassification, TFResNetModel from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class UpperCAmelCase : """simple docstring""" def __init__( self , _UpperCAmelCase , _UpperCAmelCase=3 , _UpperCAmelCase=32 , _UpperCAmelCase=3 , _UpperCAmelCase=10 , _UpperCAmelCase=[10, 20, 30, 40] , _UpperCAmelCase=[1, 1, 2, 1] , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase="relu" , _UpperCAmelCase=3 , _UpperCAmelCase=None , ): lowercase__: Any = parent lowercase__: Dict = batch_size lowercase__: Any = image_size lowercase__: Optional[int] = num_channels lowercase__: int = embeddings_size lowercase__: str = hidden_sizes lowercase__: int = depths lowercase__: str = is_training lowercase__: Dict = use_labels lowercase__: Any = hidden_act lowercase__: Union[str, Any] = num_labels lowercase__: Union[str, Any] = scope lowercase__: Optional[int] = len(snake_case__ ) def _snake_case ( self ): lowercase__: int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowercase__: List[Any] = None if self.use_labels: lowercase__: Dict = ids_tensor([self.batch_size] , self.num_labels ) lowercase__: Tuple = self.get_config() return config, pixel_values, labels def _snake_case ( self ): return ResNetConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , ) def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): lowercase__: Any = TFResNetModel(config=snake_case__ ) lowercase__: Dict = model(snake_case__ ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): lowercase__: int = self.num_labels lowercase__: Dict = TFResNetForImageClassification(snake_case__ ) lowercase__: Optional[Any] = model(snake_case__ , labels=snake_case__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _snake_case ( self ): lowercase__: List[Any] = self.prepare_config_and_inputs() lowercase__: List[Any] = config_and_inputs lowercase__: Optional[int] = {'''pixel_values''': pixel_values} return config, inputs_dict @require_tf class UpperCAmelCase (__UpperCAmelCase ,__UpperCAmelCase ,unittest.TestCase ): """simple docstring""" _UpperCAmelCase :Tuple = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else () _UpperCAmelCase :List[Any] = ( {"feature-extraction": TFResNetModel, "image-classification": TFResNetForImageClassification} if is_tf_available() else {} ) _UpperCAmelCase :Union[str, Any] = False _UpperCAmelCase :str = False _UpperCAmelCase :List[str] = False _UpperCAmelCase :str = False _UpperCAmelCase :str = False def _snake_case ( self ): lowercase__: int = TFResNetModelTester(self ) lowercase__: Optional[int] = ConfigTester(self , config_class=snake_case__ , has_text_modality=snake_case__ ) def _snake_case ( self ): self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def _snake_case ( self ): return @unittest.skip(reason='''ResNet does not use inputs_embeds''' ) def _snake_case ( self ): pass @unittest.skip(reason='''ResNet does not support input and output embeddings''' ) def _snake_case ( self ): pass def _snake_case ( self ): lowercase__: Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase__: Union[str, Any] = model_class(snake_case__ ) lowercase__: Dict = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowercase__: str = [*signature.parameters.keys()] lowercase__: Optional[Any] = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , snake_case__ ) def _snake_case ( self ): lowercase__: Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case__ ) def _snake_case ( self ): def check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): lowercase__: int = model_class(snake_case__ ) lowercase__: Any = model(**self._prepare_for_class(snake_case__ , snake_case__ ) ) lowercase__: Any = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states lowercase__: List[Any] = self.model_tester.num_stages self.assertEqual(len(snake_case__ ) , expected_num_stages + 1 ) # ResNet's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) lowercase__: Tuple = self.model_tester.prepare_config_and_inputs_for_common() lowercase__: Tuple = ['''basic''', '''bottleneck'''] for model_class in self.all_model_classes: for layer_type in layers_type: lowercase__: List[Any] = layer_type lowercase__: List[Any] = True check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowercase__: str = True check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ ) def _snake_case ( self ): lowercase__: Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*snake_case__ ) @slow def _snake_case ( self ): for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase__: int = TFResNetModel.from_pretrained(snake_case__ ) self.assertIsNotNone(snake_case__ ) def SCREAMING_SNAKE_CASE__ ( ) -> List[Any]: lowercase__: List[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_tf @require_vision class UpperCAmelCase (unittest.TestCase ): """simple docstring""" @cached_property def _snake_case ( self ): return ( AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def _snake_case ( self ): lowercase__: Union[str, Any] = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) lowercase__: Union[str, Any] = self.default_image_processor lowercase__: Dict = prepare_img() lowercase__: int = image_processor(images=snake_case__ , return_tensors='''tf''' ) # forward pass lowercase__: List[Any] = model(**snake_case__ ) # verify the logits lowercase__: Tuple = tf.TensorShape((1, 1000) ) self.assertEqual(outputs.logits.shape , snake_case__ ) lowercase__: str = tf.constant([-11.1_069, -9.7_877, -8.3_777] ) self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , snake_case__ , atol=1e-4 ) )
586
"""simple docstring""" import subprocess import sys from transformers import BertConfig, BertModel, BertTokenizer, pipeline from transformers.testing_utils import TestCasePlus, require_torch class __magic_name__ ( __UpperCAmelCase ): @require_torch def __snake_case ( self : Dict ): '''simple docstring''' lowercase :Optional[Any] = ''' from transformers import BertConfig, BertModel, BertTokenizer, pipeline ''' lowercase :Any = ''' mname = "hf-internal-testing/tiny-random-bert" BertConfig.from_pretrained(mname) BertModel.from_pretrained(mname) BertTokenizer.from_pretrained(mname) pipe = pipeline(task="fill-mask", model=mname) print("success") ''' lowercase :Tuple = ''' import socket def offline_socket(*args, **kwargs): raise RuntimeError("Offline mode is enabled, we shouldn\'t access internet") socket.socket = offline_socket ''' # Force fetching the files so that we can use the cache lowercase :str = '''hf-internal-testing/tiny-random-bert''' BertConfig.from_pretrained(snake_case__ ) BertModel.from_pretrained(snake_case__ ) BertTokenizer.from_pretrained(snake_case__ ) pipeline(task='''fill-mask''' , model=snake_case__ ) # baseline - just load from_pretrained with normal network lowercase :Union[str, Any] = [sys.executable, '''-c''', '''\n'''.join([load, run, mock] )] # should succeed lowercase :Any = self.get_env() # should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files lowercase :List[Any] = '''1''' lowercase :List[str] = subprocess.run(snake_case__ , env=snake_case__ , check=snake_case__ , capture_output=snake_case__ ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn('''success''' , result.stdout.decode() ) @require_torch def __snake_case ( self : Tuple ): '''simple docstring''' lowercase :List[str] = ''' from transformers import BertConfig, BertModel, BertTokenizer, pipeline ''' lowercase :Dict = ''' mname = "hf-internal-testing/tiny-random-bert" BertConfig.from_pretrained(mname) BertModel.from_pretrained(mname) BertTokenizer.from_pretrained(mname) pipe = pipeline(task="fill-mask", model=mname) print("success") ''' lowercase :List[Any] = ''' import socket def offline_socket(*args, **kwargs): raise socket.error("Faking flaky internet") socket.socket = offline_socket ''' # Force fetching the files so that we can use the cache lowercase :str = '''hf-internal-testing/tiny-random-bert''' BertConfig.from_pretrained(snake_case__ ) BertModel.from_pretrained(snake_case__ ) BertTokenizer.from_pretrained(snake_case__ ) pipeline(task='''fill-mask''' , model=snake_case__ ) # baseline - just load from_pretrained with normal network lowercase :List[str] = [sys.executable, '''-c''', '''\n'''.join([load, run, mock] )] # should succeed lowercase :str = self.get_env() lowercase :str = subprocess.run(snake_case__ , env=snake_case__ , check=snake_case__ , capture_output=snake_case__ ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn('''success''' , result.stdout.decode() ) @require_torch def __snake_case ( self : int ): '''simple docstring''' lowercase :str = ''' from transformers import BertConfig, BertModel, BertTokenizer ''' lowercase :Union[str, Any] = ''' mname = "hf-internal-testing/tiny-random-bert-sharded" BertConfig.from_pretrained(mname) BertModel.from_pretrained(mname) print("success") ''' lowercase :Optional[int] = ''' import socket def offline_socket(*args, **kwargs): raise ValueError("Offline mode is enabled") socket.socket = offline_socket ''' # baseline - just load from_pretrained with normal network lowercase :Optional[Any] = [sys.executable, '''-c''', '''\n'''.join([load, run] )] # should succeed lowercase :Union[str, Any] = self.get_env() lowercase :str = subprocess.run(snake_case__ , env=snake_case__ , check=snake_case__ , capture_output=snake_case__ ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn('''success''' , result.stdout.decode() ) # next emulate no network lowercase :Tuple = [sys.executable, '''-c''', '''\n'''.join([load, mock, run] )] # Doesn't fail anymore since the model is in the cache due to other tests, so commenting this. # env["TRANSFORMERS_OFFLINE"] = "0" # result = subprocess.run(cmd, env=env, check=False, capture_output=True) # self.assertEqual(result.returncode, 1, result.stderr) # should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files lowercase :Any = '''1''' lowercase :Optional[Any] = subprocess.run(snake_case__ , env=snake_case__ , check=snake_case__ , capture_output=snake_case__ ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn('''success''' , result.stdout.decode() ) @require_torch def __snake_case ( self : Optional[int] ): '''simple docstring''' lowercase :Dict = ''' from transformers import pipeline ''' lowercase :Optional[Any] = ''' mname = "hf-internal-testing/tiny-random-bert" pipe = pipeline(model=mname) ''' lowercase :Dict = ''' import socket def offline_socket(*args, **kwargs): raise socket.error("Offline mode is enabled") socket.socket = offline_socket ''' lowercase :Tuple = self.get_env() lowercase :Optional[Any] = '''1''' lowercase :Optional[int] = [sys.executable, '''-c''', '''\n'''.join([load, mock, run] )] lowercase :str = subprocess.run(snake_case__ , env=snake_case__ , check=snake_case__ , capture_output=snake_case__ ) self.assertEqual(result.returncode , 1 , result.stderr ) self.assertIn( '''You cannot infer task automatically within `pipeline` when using offline mode''' , result.stderr.decode().replace('''\n''' , '''''' ) , ) @require_torch def __snake_case ( self : Optional[Any] ): '''simple docstring''' lowercase :List[Any] = ''' from transformers import AutoModel ''' lowercase :Union[str, Any] = ''' mname = "hf-internal-testing/test_dynamic_model" AutoModel.from_pretrained(mname, trust_remote_code=True) print("success") ''' # baseline - just load from_pretrained with normal network lowercase :Union[str, Any] = [sys.executable, '''-c''', '''\n'''.join([load, run] )] # should succeed lowercase :List[str] = self.get_env() lowercase :Optional[int] = subprocess.run(snake_case__ , env=snake_case__ , check=snake_case__ , capture_output=snake_case__ ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn('''success''' , result.stdout.decode() ) # should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files lowercase :List[Any] = '''1''' lowercase :Tuple = subprocess.run(snake_case__ , env=snake_case__ , check=snake_case__ , capture_output=snake_case__ ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn('''success''' , result.stdout.decode() )
677
0
from __future__ import annotations import os from collections.abc import Mapping lowerCAmelCase_ = tuple[int, int] class _lowerCAmelCase : def __init__( self , __UpperCAmelCase , __UpperCAmelCase ): lowerCAmelCase__ : set[int] = vertices lowerCAmelCase__ : dict[EdgeT, int] = { (min(__UpperCAmelCase ), max(__UpperCAmelCase )): weight for edge, weight in edges.items() } def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase ): self.vertices.add(edge[0] ) self.vertices.add(edge[1] ) lowerCAmelCase__ : List[Any] = weight def __magic_name__( self ): lowerCAmelCase__ : Graph = Graph({min(self.vertices )} , {} ) lowerCAmelCase__ : EdgeT lowerCAmelCase__ : int lowerCAmelCase__ : EdgeT lowerCAmelCase__ : int while len(subgraph.vertices ) < len(self.vertices ): lowerCAmelCase__ : List[Any] = max(self.edges.values() ) + 1 for edge, weight in self.edges.items(): if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices): if weight < min_weight: lowerCAmelCase__ : Any = edge lowerCAmelCase__ : Optional[int] = weight subgraph.add_edge(__UpperCAmelCase , __UpperCAmelCase ) return subgraph def __lowerCAmelCase ( UpperCamelCase = "p107_network.txt" ) -> int: lowerCAmelCase__ : str = os.path.abspath(os.path.dirname(UpperCamelCase ) ) lowerCAmelCase__ : str = os.path.join(UpperCamelCase , UpperCamelCase ) lowerCAmelCase__ : dict[EdgeT, int] = {} lowerCAmelCase__ : list[str] lowerCAmelCase__ : int lowerCAmelCase__ : int with open(UpperCamelCase ) as f: lowerCAmelCase__ : List[str] = f.read().strip().split('''\n''' ) lowerCAmelCase__ : Tuple = [line.split(''',''' ) for line in data] for edgea in range(1 , len(UpperCamelCase ) ): for edgea in range(UpperCamelCase ): if adjaceny_matrix[edgea][edgea] != "-": lowerCAmelCase__ : Any = int(adjaceny_matrix[edgea][edgea] ) lowerCAmelCase__ : Graph = Graph(set(range(len(UpperCamelCase ) ) ) , UpperCamelCase ) lowerCAmelCase__ : Graph = graph.prims_algorithm() lowerCAmelCase__ : int = sum(graph.edges.values() ) lowerCAmelCase__ : int = sum(subgraph.edges.values() ) return initial_total - optimal_total if __name__ == "__main__": print(F"""{solution() = }""")
678
import unittest from transformers import ( MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING, TextGenerationPipeline, logging, pipeline, ) from transformers.testing_utils import ( CaptureLogger, is_pipeline_test, require_accelerate, require_tf, require_torch, require_torch_gpu, require_torch_or_tf, ) from .test_pipelines_common import ANY @is_pipeline_test @require_torch_or_tf class _lowerCAmelCase ( unittest.TestCase ): A__ = MODEL_FOR_CAUSAL_LM_MAPPING A__ = TF_MODEL_FOR_CAUSAL_LM_MAPPING @require_torch def __magic_name__( self ): lowerCAmelCase__ : Tuple = pipeline(task='''text-generation''' , model='''sshleifer/tiny-ctrl''' , framework='''pt''' ) # Using `do_sample=False` to force deterministic output lowerCAmelCase__ : Optional[int] = text_generator('''This is a test''' , do_sample=__UpperCAmelCase ) self.assertEqual( __UpperCAmelCase , [ { '''generated_text''': ( '''This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.''' ''' oscope. FiliFili@@''' ) } ] , ) lowerCAmelCase__ : List[str] = text_generator(['''This is a test''', '''This is a second test'''] ) self.assertEqual( __UpperCAmelCase , [ [ { '''generated_text''': ( '''This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.''' ''' oscope. FiliFili@@''' ) } ], [ { '''generated_text''': ( '''This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy''' ''' oscope. oscope. FiliFili@@''' ) } ], ] , ) lowerCAmelCase__ : str = text_generator('''This is a test''' , do_sample=__UpperCAmelCase , num_return_sequences=2 , return_tensors=__UpperCAmelCase ) self.assertEqual( __UpperCAmelCase , [ {'''generated_token_ids''': ANY(__UpperCAmelCase )}, {'''generated_token_ids''': ANY(__UpperCAmelCase )}, ] , ) lowerCAmelCase__ : List[Any] = text_generator.model.config.eos_token_id lowerCAmelCase__ : List[Any] = '''<pad>''' lowerCAmelCase__ : List[Any] = text_generator( ['''This is a test''', '''This is a second test'''] , do_sample=__UpperCAmelCase , num_return_sequences=2 , batch_size=2 , return_tensors=__UpperCAmelCase , ) self.assertEqual( __UpperCAmelCase , [ [ {'''generated_token_ids''': ANY(__UpperCAmelCase )}, {'''generated_token_ids''': ANY(__UpperCAmelCase )}, ], [ {'''generated_token_ids''': ANY(__UpperCAmelCase )}, {'''generated_token_ids''': ANY(__UpperCAmelCase )}, ], ] , ) @require_tf def __magic_name__( self ): lowerCAmelCase__ : int = pipeline(task='''text-generation''' , model='''sshleifer/tiny-ctrl''' , framework='''tf''' ) # Using `do_sample=False` to force deterministic output lowerCAmelCase__ : List[Any] = text_generator('''This is a test''' , do_sample=__UpperCAmelCase ) self.assertEqual( __UpperCAmelCase , [ { '''generated_text''': ( '''This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵''' ''' please,''' ) } ] , ) lowerCAmelCase__ : List[str] = text_generator(['''This is a test''', '''This is a second test'''] , do_sample=__UpperCAmelCase ) self.assertEqual( __UpperCAmelCase , [ [ { '''generated_text''': ( '''This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵''' ''' please,''' ) } ], [ { '''generated_text''': ( '''This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes''' ''' Cannes 閲閲Cannes Cannes Cannes 攵 please,''' ) } ], ] , ) def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): lowerCAmelCase__ : Dict = TextGenerationPipeline(model=__UpperCAmelCase , tokenizer=__UpperCAmelCase ) return text_generator, ["This is a test", "Another test"] def __magic_name__( self ): lowerCAmelCase__ : Any = '''Hello I believe in''' lowerCAmelCase__ : List[Any] = pipeline('''text-generation''' , model='''hf-internal-testing/tiny-random-gpt2''' ) lowerCAmelCase__ : Optional[int] = text_generator(__UpperCAmelCase ) self.assertEqual( __UpperCAmelCase , [{'''generated_text''': '''Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe'''}] , ) lowerCAmelCase__ : List[str] = text_generator(__UpperCAmelCase , stop_sequence=''' fe''' ) self.assertEqual(__UpperCAmelCase , [{'''generated_text''': '''Hello I believe in fe'''}] ) def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase ): lowerCAmelCase__ : str = text_generator.model lowerCAmelCase__ : Optional[int] = text_generator.tokenizer lowerCAmelCase__ : Tuple = text_generator('''This is a test''' ) self.assertEqual(__UpperCAmelCase , [{'''generated_text''': ANY(__UpperCAmelCase )}] ) self.assertTrue(outputs[0]['''generated_text'''].startswith('''This is a test''' ) ) lowerCAmelCase__ : Optional[int] = text_generator('''This is a test''' , return_full_text=__UpperCAmelCase ) self.assertEqual(__UpperCAmelCase , [{'''generated_text''': ANY(__UpperCAmelCase )}] ) self.assertNotIn('''This is a test''' , outputs[0]['''generated_text'''] ) lowerCAmelCase__ : Dict = pipeline(task='''text-generation''' , model=__UpperCAmelCase , tokenizer=__UpperCAmelCase , return_full_text=__UpperCAmelCase ) lowerCAmelCase__ : Dict = text_generator('''This is a test''' ) self.assertEqual(__UpperCAmelCase , [{'''generated_text''': ANY(__UpperCAmelCase )}] ) self.assertNotIn('''This is a test''' , outputs[0]['''generated_text'''] ) lowerCAmelCase__ : List[str] = text_generator('''This is a test''' , return_full_text=__UpperCAmelCase ) self.assertEqual(__UpperCAmelCase , [{'''generated_text''': ANY(__UpperCAmelCase )}] ) self.assertTrue(outputs[0]['''generated_text'''].startswith('''This is a test''' ) ) lowerCAmelCase__ : Optional[int] = text_generator(['''This is great !''', '''Something else'''] , num_return_sequences=2 , do_sample=__UpperCAmelCase ) self.assertEqual( __UpperCAmelCase , [ [{'''generated_text''': ANY(__UpperCAmelCase )}, {'''generated_text''': ANY(__UpperCAmelCase )}], [{'''generated_text''': ANY(__UpperCAmelCase )}, {'''generated_text''': ANY(__UpperCAmelCase )}], ] , ) if text_generator.tokenizer.pad_token is not None: lowerCAmelCase__ : List[str] = text_generator( ['''This is great !''', '''Something else'''] , num_return_sequences=2 , batch_size=2 , do_sample=__UpperCAmelCase ) self.assertEqual( __UpperCAmelCase , [ [{'''generated_text''': ANY(__UpperCAmelCase )}, {'''generated_text''': ANY(__UpperCAmelCase )}], [{'''generated_text''': ANY(__UpperCAmelCase )}, {'''generated_text''': ANY(__UpperCAmelCase )}], ] , ) with self.assertRaises(__UpperCAmelCase ): lowerCAmelCase__ : Any = text_generator('''test''' , return_full_text=__UpperCAmelCase , return_text=__UpperCAmelCase ) with self.assertRaises(__UpperCAmelCase ): lowerCAmelCase__ : Optional[int] = text_generator('''test''' , return_full_text=__UpperCAmelCase , return_tensors=__UpperCAmelCase ) with self.assertRaises(__UpperCAmelCase ): lowerCAmelCase__ : str = text_generator('''test''' , return_text=__UpperCAmelCase , return_tensors=__UpperCAmelCase ) # Empty prompt is slighly special # it requires BOS token to exist. # Special case for Pegasus which will always append EOS so will # work even without BOS. if ( text_generator.tokenizer.bos_token_id is not None or "Pegasus" in tokenizer.__class__.__name__ or "Git" in model.__class__.__name__ ): lowerCAmelCase__ : str = text_generator('''''' ) self.assertEqual(__UpperCAmelCase , [{'''generated_text''': ANY(__UpperCAmelCase )}] ) else: with self.assertRaises((ValueError, AssertionError) ): lowerCAmelCase__ : List[str] = text_generator('''''' ) if text_generator.framework == "tf": # TF generation does not support max_new_tokens, and it's impossible # to control long generation with only max_length without # fancy calculation, dismissing tests for now. return # We don't care about infinite range models. # They already work. # Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly. lowerCAmelCase__ : Optional[Any] = ['''RwkvForCausalLM''', '''XGLMForCausalLM''', '''GPTNeoXForCausalLM'''] if ( tokenizer.model_max_length < 1_0000 and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS ): # Handling of large generations with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ): text_generator('''This is a test''' * 500 , max_new_tokens=20 ) lowerCAmelCase__ : Optional[Any] = text_generator('''This is a test''' * 500 , handle_long_generation='''hole''' , max_new_tokens=20 ) # Hole strategy cannot work with self.assertRaises(__UpperCAmelCase ): text_generator( '''This is a test''' * 500 , handle_long_generation='''hole''' , max_new_tokens=tokenizer.model_max_length + 10 , ) @require_torch @require_accelerate @require_torch_gpu def __magic_name__( self ): import torch # Classic `model_kwargs` lowerCAmelCase__ : List[str] = pipeline( model='''hf-internal-testing/tiny-random-bloom''' , model_kwargs={'''device_map''': '''auto''', '''torch_dtype''': torch.bfloataa} , ) self.assertEqual(pipe.model.device , torch.device(0 ) ) self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa ) lowerCAmelCase__ : Any = pipe('''This is a test''' ) self.assertEqual( __UpperCAmelCase , [ { '''generated_text''': ( '''This is a test test test test test test test test test test test test test test test test''' ''' test''' ) } ] , ) # Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.) lowerCAmelCase__ : Dict = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' , torch_dtype=torch.bfloataa ) self.assertEqual(pipe.model.device , torch.device(0 ) ) self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa ) lowerCAmelCase__ : Union[str, Any] = pipe('''This is a test''' ) self.assertEqual( __UpperCAmelCase , [ { '''generated_text''': ( '''This is a test test test test test test test test test test test test test test test test''' ''' test''' ) } ] , ) # torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602 lowerCAmelCase__ : str = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' ) self.assertEqual(pipe.model.device , torch.device(0 ) ) self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa ) lowerCAmelCase__ : Any = pipe('''This is a test''' ) self.assertEqual( __UpperCAmelCase , [ { '''generated_text''': ( '''This is a test test test test test test test test test test test test test test test test''' ''' test''' ) } ] , ) @require_torch @require_torch_gpu def __magic_name__( self ): import torch lowerCAmelCase__ : List[str] = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device=0 , torch_dtype=torch.floataa ) pipe('''This is a test''' ) @require_torch @require_accelerate @require_torch_gpu def __magic_name__( self ): import torch lowerCAmelCase__ : Any = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' , torch_dtype=torch.floataa ) pipe('''This is a test''' , do_sample=__UpperCAmelCase , top_p=0.5 ) def __magic_name__( self ): lowerCAmelCase__ : int = '''Hello world''' lowerCAmelCase__ : Union[str, Any] = pipeline('''text-generation''' , model='''hf-internal-testing/tiny-random-gpt2''' ) if text_generator.model.framework == "tf": lowerCAmelCase__ : List[Any] = logging.get_logger('''transformers.generation.tf_utils''' ) else: lowerCAmelCase__ : Dict = logging.get_logger('''transformers.generation.utils''' ) lowerCAmelCase__ : Optional[Any] = '''Both `max_new_tokens`''' # The beggining of the message to be checked in this test # Both are set by the user -> log warning with CaptureLogger(__UpperCAmelCase ) as cl: lowerCAmelCase__ : List[str] = text_generator(__UpperCAmelCase , max_length=10 , max_new_tokens=1 ) self.assertIn(__UpperCAmelCase , cl.out ) # The user only sets one -> no warning with CaptureLogger(__UpperCAmelCase ) as cl: lowerCAmelCase__ : Any = text_generator(__UpperCAmelCase , max_new_tokens=1 ) self.assertNotIn(__UpperCAmelCase , cl.out ) with CaptureLogger(__UpperCAmelCase ) as cl: lowerCAmelCase__ : Union[str, Any] = text_generator(__UpperCAmelCase , max_length=10 ) self.assertNotIn(__UpperCAmelCase , cl.out )
678
1
# # This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or # many nodes) can talk to each other via nccl and allocate gpu memory. # # To run first adjust the number of processes and nodes: # # python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py # # You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port # # You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d # # use torch.distributed.launch instead of torch.distributed.run for torch < 1.9 # # If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with: # # NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py # # which should tell you what's going on behind the scenes. # # # This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that # runs on 2 nodes of 4 gpus per node: # # #SBATCH --job-name=test-nodes # name # #SBATCH --nodes=2 # nodes # #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node! # #SBATCH --cpus-per-task=10 # number of cores per tasks # #SBATCH --gres=gpu:4 # number of gpus # #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS) # #SBATCH --output=%x-%j.out # output file name # # GPUS_PER_NODE=4 # MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1) # MASTER_PORT=6000 # # srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \ # --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \ # --master_addr $MASTER_ADDR --master_port $MASTER_PORT \ # torch-distributed-gpu-test.py' # import fcntl import os import socket import torch import torch.distributed as dist def __lowerCAmelCase ( *UpperCamelCase ) -> Union[str, Any]: with open(UpperCamelCase , '''r''' ) as fh: fcntl.flock(UpperCamelCase , fcntl.LOCK_EX ) try: print(*UpperCamelCase ) finally: fcntl.flock(UpperCamelCase , fcntl.LOCK_UN ) lowerCAmelCase_ = int(os.environ["""LOCAL_RANK"""]) torch.cuda.set_device(local_rank) lowerCAmelCase_ = torch.device("""cuda""", local_rank) lowerCAmelCase_ = socket.gethostname() lowerCAmelCase_ = F"""[{hostname}-{local_rank}]""" try: # test distributed dist.init_process_group("""nccl""") dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM) dist.barrier() # test cuda is available and can allocate memory torch.cuda.is_available() torch.ones(1).cuda(local_rank) # global rank lowerCAmelCase_ = dist.get_rank() lowerCAmelCase_ = dist.get_world_size() printflock(F"""{gpu} is OK (global rank: {rank}/{world_size})""") dist.barrier() if rank == 0: printflock(F"""pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}""") except Exception: printflock(F"""{gpu} is broken""") raise
678
def __lowerCAmelCase ( UpperCamelCase ) -> str: return "".join([hex(UpperCamelCase )[2:].zfill(2 ).upper() for byte in list(UpperCamelCase )] ) def __lowerCAmelCase ( UpperCamelCase ) -> bytes: # Check data validity, following RFC3548 # https://www.ietf.org/rfc/rfc3548.txt if (len(UpperCamelCase ) % 2) != 0: raise ValueError( '''Base16 encoded data is invalid: Data does not have an even number of hex digits.''' ) # Check the character set - the standard base16 alphabet # is uppercase according to RFC3548 section 6 if not set(UpperCamelCase ) <= set('''0123456789ABCDEF''' ): raise ValueError( '''Base16 encoded data is invalid: Data is not uppercase hex or it contains invalid characters.''' ) # For every two hexadecimal digits (= a byte), turn it into an integer. # Then, string the result together into bytes, and return it. return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(UpperCamelCase ) , 2 ) ) if __name__ == "__main__": import doctest doctest.testmod()
678
1
import math from typing import Any, Callable, List, Optional, Tuple, Union import numpy as np import torch from ...models import TaFilmDecoder from ...schedulers import DDPMScheduler from ...utils import is_onnx_available, logging, randn_tensor if is_onnx_available(): from ..onnx_utils import OnnxRuntimeModel from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline from .continous_encoder import SpectrogramContEncoder from .notes_encoder import SpectrogramNotesEncoder lowerCAmelCase_ = logging.get_logger(__name__) # pylint: disable=invalid-name lowerCAmelCase_ = 2_56 class _lowerCAmelCase ( _lowercase ): A__ = ['melgan'] def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ): super().__init__() # From MELGAN lowerCAmelCase__ : Dict = math.log(1e-5 ) # Matches MelGAN training. lowerCAmelCase__ : Tuple = 4.0 # Largest value for most examples lowerCAmelCase__ : int = 128 self.register_modules( notes_encoder=__UpperCAmelCase , continuous_encoder=__UpperCAmelCase , decoder=__UpperCAmelCase , scheduler=__UpperCAmelCase , melgan=__UpperCAmelCase , ) def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase=(-1.0, 1.0) , __UpperCAmelCase=False ): lowerCAmelCase__ , lowerCAmelCase__ : Dict = output_range if clip: lowerCAmelCase__ : List[str] = torch.clip(__UpperCAmelCase , self.min_value , self.max_value ) # Scale to [0, 1]. lowerCAmelCase__ : Union[str, Any] = (features - self.min_value) / (self.max_value - self.min_value) # Scale to [min_out, max_out]. return zero_one * (max_out - min_out) + min_out def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase=(-1.0, 1.0) , __UpperCAmelCase=False ): lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = input_range lowerCAmelCase__ : int = torch.clip(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) if clip else outputs # Scale to [0, 1]. lowerCAmelCase__ : Any = (outputs - min_out) / (max_out - min_out) # Scale to [self.min_value, self.max_value]. return zero_one * (self.max_value - self.min_value) + self.min_value def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): lowerCAmelCase__ : List[str] = input_tokens > 0 lowerCAmelCase__ , lowerCAmelCase__ : Tuple = self.notes_encoder( encoder_input_tokens=__UpperCAmelCase , encoder_inputs_mask=__UpperCAmelCase ) lowerCAmelCase__ , lowerCAmelCase__ : Dict = self.continuous_encoder( encoder_inputs=__UpperCAmelCase , encoder_inputs_mask=__UpperCAmelCase ) return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)] def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): lowerCAmelCase__ : Optional[Any] = noise_time if not torch.is_tensor(__UpperCAmelCase ): lowerCAmelCase__ : Dict = torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device ) elif torch.is_tensor(__UpperCAmelCase ) and len(timesteps.shape ) == 0: lowerCAmelCase__ : Optional[int] = timesteps[None].to(input_tokens.device ) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML lowerCAmelCase__ : Dict = timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device ) lowerCAmelCase__ : Optional[Any] = self.decoder( encodings_and_masks=__UpperCAmelCase , decoder_input_tokens=__UpperCAmelCase , decoder_noise_time=__UpperCAmelCase ) return logits @torch.no_grad() def __call__( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = 100 , __UpperCAmelCase = True , __UpperCAmelCase = "numpy" , __UpperCAmelCase = None , __UpperCAmelCase = 1 , ): if (callback_steps is None) or ( callback_steps is not None and (not isinstance(__UpperCAmelCase , __UpperCAmelCase ) or callback_steps <= 0) ): raise ValueError( f"""`callback_steps` has to be a positive integer but is {callback_steps} of type""" f""" {type(__UpperCAmelCase )}.""" ) lowerCAmelCase__ : Optional[Any] = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa ) lowerCAmelCase__ : Any = np.zeros([1, 0, self.n_dims] , np.floataa ) lowerCAmelCase__ : Optional[int] = torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=__UpperCAmelCase , device=self.device ) for i, encoder_input_tokens in enumerate(__UpperCAmelCase ): if i == 0: lowerCAmelCase__ : str = torch.from_numpy(pred_mel[:1].copy() ).to( device=self.device , dtype=self.decoder.dtype ) # The first chunk has no previous context. lowerCAmelCase__ : Optional[int] = torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=__UpperCAmelCase , device=self.device ) else: # The full song pipeline does not feed in a context feature, so the mask # will be all 0s after the feature converter. Because we know we're # feeding in a full context chunk from the previous prediction, set it # to all 1s. lowerCAmelCase__ : Optional[Any] = ones lowerCAmelCase__ : Dict = self.scale_features( __UpperCAmelCase , output_range=[-1.0, 1.0] , clip=__UpperCAmelCase ) lowerCAmelCase__ : List[str] = self.encode( input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ) , continuous_inputs=__UpperCAmelCase , continuous_mask=__UpperCAmelCase , ) # Sample encoder_continuous_inputs shaped gaussian noise to begin loop lowerCAmelCase__ : Optional[int] = randn_tensor( shape=encoder_continuous_inputs.shape , generator=__UpperCAmelCase , device=self.device , dtype=self.decoder.dtype , ) # set step values self.scheduler.set_timesteps(__UpperCAmelCase ) # Denoising diffusion loop for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ): lowerCAmelCase__ : Tuple = self.decode( encodings_and_masks=__UpperCAmelCase , input_tokens=__UpperCAmelCase , noise_time=t / self.scheduler.config.num_train_timesteps , ) # Compute previous output: x_t -> x_t-1 lowerCAmelCase__ : int = self.scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , generator=__UpperCAmelCase ).prev_sample lowerCAmelCase__ : Any = self.scale_to_features(__UpperCAmelCase , input_range=[-1.0, 1.0] ) lowerCAmelCase__ : int = mel[:1] lowerCAmelCase__ : Optional[int] = mel.cpu().float().numpy() lowerCAmelCase__ : int = np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1 ) # call the callback, if provided if callback is not None and i % callback_steps == 0: callback(__UpperCAmelCase , __UpperCAmelCase ) logger.info('''Generated segment''' , __UpperCAmelCase ) if output_type == "numpy" and not is_onnx_available(): raise ValueError( '''Cannot return output in \'np\' format if ONNX is not available. Make sure to have ONNX installed or set \'output_type\' to \'mel\'.''' ) elif output_type == "numpy" and self.melgan is None: raise ValueError( '''Cannot return output in \'np\' format if melgan component is not defined. Make sure to define `self.melgan` or set \'output_type\' to \'mel\'.''' ) if output_type == "numpy": lowerCAmelCase__ : Tuple = self.melgan(input_features=full_pred_mel.astype(np.floataa ) ) else: lowerCAmelCase__ : Dict = full_pred_mel if not return_dict: return (output,) return AudioPipelineOutput(audios=__UpperCAmelCase )
678
import torch from diffusers import DPMSolverSDEScheduler from diffusers.utils import torch_device from diffusers.utils.testing_utils import require_torchsde from .test_schedulers import SchedulerCommonTest @require_torchsde class _lowerCAmelCase ( _lowercase ): A__ = (DPMSolverSDEScheduler,) A__ = 10 def __magic_name__( self , **__UpperCAmelCase ): lowerCAmelCase__ : Dict = { '''num_train_timesteps''': 1100, '''beta_start''': 0.0001, '''beta_end''': 0.02, '''beta_schedule''': '''linear''', '''noise_sampler_seed''': 0, } config.update(**__UpperCAmelCase ) return config def __magic_name__( self ): for timesteps in [10, 50, 100, 1000]: self.check_over_configs(num_train_timesteps=__UpperCAmelCase ) def __magic_name__( self ): for beta_start, beta_end in zip([0.00001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ): self.check_over_configs(beta_start=__UpperCAmelCase , beta_end=__UpperCAmelCase ) def __magic_name__( self ): for schedule in ["linear", "scaled_linear"]: self.check_over_configs(beta_schedule=__UpperCAmelCase ) def __magic_name__( self ): for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=__UpperCAmelCase ) def __magic_name__( self ): lowerCAmelCase__ : List[Any] = self.scheduler_classes[0] lowerCAmelCase__ : str = self.get_scheduler_config() lowerCAmelCase__ : Optional[Any] = scheduler_class(**__UpperCAmelCase ) scheduler.set_timesteps(self.num_inference_steps ) lowerCAmelCase__ : Union[str, Any] = self.dummy_model() lowerCAmelCase__ : Any = self.dummy_sample_deter * scheduler.init_noise_sigma lowerCAmelCase__ : Optional[Any] = sample.to(__UpperCAmelCase ) for i, t in enumerate(scheduler.timesteps ): lowerCAmelCase__ : Optional[Any] = scheduler.scale_model_input(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : int = model(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : List[str] = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : List[str] = output.prev_sample lowerCAmelCase__ : Union[str, Any] = torch.sum(torch.abs(__UpperCAmelCase ) ) lowerCAmelCase__ : Optional[Any] = torch.mean(torch.abs(__UpperCAmelCase ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 167.47821044921875 ) < 1e-2 assert abs(result_mean.item() - 0.2178705964565277 ) < 1e-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 171.59352111816406 ) < 1e-2 assert abs(result_mean.item() - 0.22342906892299652 ) < 1e-3 else: assert abs(result_sum.item() - 162.52383422851562 ) < 1e-2 assert abs(result_mean.item() - 0.211619570851326 ) < 1e-3 def __magic_name__( self ): lowerCAmelCase__ : Dict = self.scheduler_classes[0] lowerCAmelCase__ : Tuple = self.get_scheduler_config(prediction_type='''v_prediction''' ) lowerCAmelCase__ : Tuple = scheduler_class(**__UpperCAmelCase ) scheduler.set_timesteps(self.num_inference_steps ) lowerCAmelCase__ : Optional[Any] = self.dummy_model() lowerCAmelCase__ : Union[str, Any] = self.dummy_sample_deter * scheduler.init_noise_sigma lowerCAmelCase__ : Tuple = sample.to(__UpperCAmelCase ) for i, t in enumerate(scheduler.timesteps ): lowerCAmelCase__ : List[str] = scheduler.scale_model_input(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : List[Any] = model(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : int = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : int = output.prev_sample lowerCAmelCase__ : Any = torch.sum(torch.abs(__UpperCAmelCase ) ) lowerCAmelCase__ : Optional[Any] = torch.mean(torch.abs(__UpperCAmelCase ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 124.77149200439453 ) < 1e-2 assert abs(result_mean.item() - 0.16226289014816284 ) < 1e-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 128.1663360595703 ) < 1e-2 assert abs(result_mean.item() - 0.16688326001167297 ) < 1e-3 else: assert abs(result_sum.item() - 119.8487548828125 ) < 1e-2 assert abs(result_mean.item() - 0.1560530662536621 ) < 1e-3 def __magic_name__( self ): lowerCAmelCase__ : Optional[int] = self.scheduler_classes[0] lowerCAmelCase__ : Tuple = self.get_scheduler_config() lowerCAmelCase__ : Tuple = scheduler_class(**__UpperCAmelCase ) scheduler.set_timesteps(self.num_inference_steps , device=__UpperCAmelCase ) lowerCAmelCase__ : Dict = self.dummy_model() lowerCAmelCase__ : Union[str, Any] = self.dummy_sample_deter.to(__UpperCAmelCase ) * scheduler.init_noise_sigma for t in scheduler.timesteps: lowerCAmelCase__ : List[str] = scheduler.scale_model_input(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : Optional[int] = model(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : Tuple = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : Optional[int] = output.prev_sample lowerCAmelCase__ : Dict = torch.sum(torch.abs(__UpperCAmelCase ) ) lowerCAmelCase__ : Dict = torch.mean(torch.abs(__UpperCAmelCase ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 167.46957397460938 ) < 1e-2 assert abs(result_mean.item() - 0.21805934607982635 ) < 1e-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 171.59353637695312 ) < 1e-2 assert abs(result_mean.item() - 0.22342908382415771 ) < 1e-3 else: assert abs(result_sum.item() - 162.52383422851562 ) < 1e-2 assert abs(result_mean.item() - 0.211619570851326 ) < 1e-3 def __magic_name__( self ): lowerCAmelCase__ : Optional[Any] = self.scheduler_classes[0] lowerCAmelCase__ : Dict = self.get_scheduler_config() lowerCAmelCase__ : Optional[int] = scheduler_class(**__UpperCAmelCase , use_karras_sigmas=__UpperCAmelCase ) scheduler.set_timesteps(self.num_inference_steps , device=__UpperCAmelCase ) lowerCAmelCase__ : List[Any] = self.dummy_model() lowerCAmelCase__ : int = self.dummy_sample_deter.to(__UpperCAmelCase ) * scheduler.init_noise_sigma lowerCAmelCase__ : Union[str, Any] = sample.to(__UpperCAmelCase ) for t in scheduler.timesteps: lowerCAmelCase__ : Optional[Any] = scheduler.scale_model_input(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : Union[str, Any] = model(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : Tuple = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : Dict = output.prev_sample lowerCAmelCase__ : int = torch.sum(torch.abs(__UpperCAmelCase ) ) lowerCAmelCase__ : Optional[Any] = torch.mean(torch.abs(__UpperCAmelCase ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 176.66974135742188 ) < 1e-2 assert abs(result_mean.item() - 0.23003872730981811 ) < 1e-2 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 177.63653564453125 ) < 1e-2 assert abs(result_mean.item() - 0.23003872730981811 ) < 1e-2 else: assert abs(result_sum.item() - 170.3135223388672 ) < 1e-2 assert abs(result_mean.item() - 0.23003872730981811 ) < 1e-2
678
1
import datasets lowerCAmelCase_ = """\ @InProceedings{conneau2018xnli, author = \"Conneau, Alexis and Rinott, Ruty and Lample, Guillaume and Williams, Adina and Bowman, Samuel R. and Schwenk, Holger and Stoyanov, Veselin\", title = \"XNLI: Evaluating Cross-lingual Sentence Representations\", booktitle = \"Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing\", year = \"2018\", publisher = \"Association for Computational Linguistics\", location = \"Brussels, Belgium\", } """ lowerCAmelCase_ = """\ XNLI is a subset of a few thousand examples from MNLI which has been translated into a 14 different languages (some low-ish resource). As with MNLI, the goal is to predict textual entailment (does sentence A imply/contradict/neither sentence B) and is a classification task (given two sentences, predict one of three labels). """ lowerCAmelCase_ = """ Computes XNLI score which is just simple accuracy. Args: predictions: Predicted labels. references: Ground truth labels. Returns: 'accuracy': accuracy Examples: >>> predictions = [0, 1] >>> references = [0, 1] >>> xnli_metric = datasets.load_metric(\"xnli\") >>> results = xnli_metric.compute(predictions=predictions, references=references) >>> print(results) {'accuracy': 1.0} """ def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> List[str]: return (preds == labels).mean() @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class _lowerCAmelCase ( datasets.Metric ): def __magic_name__( self ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''int64''' if self.config_name != '''sts-b''' else '''float32''' ), '''references''': datasets.Value('''int64''' if self.config_name != '''sts-b''' else '''float32''' ), } ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' , ) def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase ): return {"accuracy": simple_accuracy(__UpperCAmelCase , __UpperCAmelCase )}
678
import time import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers.generation import ( MaxLengthCriteria, MaxNewTokensCriteria, MaxTimeCriteria, StoppingCriteriaList, validate_stopping_criteria, ) @require_torch class _lowerCAmelCase ( unittest.TestCase ): def __magic_name__( self , __UpperCAmelCase ): lowerCAmelCase__ : Union[str, Any] = 3 lowerCAmelCase__ : Tuple = 250 lowerCAmelCase__ : List[Any] = ids_tensor((batch_size, length) , __UpperCAmelCase ) lowerCAmelCase__ : Optional[Any] = torch.ones((batch_size, length) , device=__UpperCAmelCase , dtype=torch.float ) / length return input_ids, scores def __magic_name__( self ): lowerCAmelCase__ , lowerCAmelCase__ : List[str] = self._get_tensors(5 ) lowerCAmelCase__ : List[str] = StoppingCriteriaList( [ MaxLengthCriteria(max_length=10 ), MaxTimeCriteria(max_time=0.1 ), ] ) self.assertFalse(criteria(__UpperCAmelCase , __UpperCAmelCase ) ) lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = self._get_tensors(9 ) self.assertFalse(criteria(__UpperCAmelCase , __UpperCAmelCase ) ) lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = self._get_tensors(10 ) self.assertTrue(criteria(__UpperCAmelCase , __UpperCAmelCase ) ) def __magic_name__( self ): lowerCAmelCase__ : Tuple = MaxLengthCriteria(max_length=10 ) lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = self._get_tensors(5 ) self.assertFalse(criteria(__UpperCAmelCase , __UpperCAmelCase ) ) lowerCAmelCase__ , lowerCAmelCase__ : Tuple = self._get_tensors(9 ) self.assertFalse(criteria(__UpperCAmelCase , __UpperCAmelCase ) ) lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = self._get_tensors(10 ) self.assertTrue(criteria(__UpperCAmelCase , __UpperCAmelCase ) ) def __magic_name__( self ): lowerCAmelCase__ : Optional[int] = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 ) lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = self._get_tensors(5 ) self.assertFalse(criteria(__UpperCAmelCase , __UpperCAmelCase ) ) lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = self._get_tensors(9 ) self.assertFalse(criteria(__UpperCAmelCase , __UpperCAmelCase ) ) lowerCAmelCase__ , lowerCAmelCase__ : Tuple = self._get_tensors(10 ) self.assertTrue(criteria(__UpperCAmelCase , __UpperCAmelCase ) ) lowerCAmelCase__ : int = StoppingCriteriaList([criteria] ) self.assertEqual(criteria_list.max_length , 10 ) def __magic_name__( self ): lowerCAmelCase__ , lowerCAmelCase__ : List[str] = self._get_tensors(5 ) lowerCAmelCase__ : Any = MaxTimeCriteria(max_time=0.1 ) self.assertFalse(criteria(__UpperCAmelCase , __UpperCAmelCase ) ) lowerCAmelCase__ : int = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 ) self.assertTrue(criteria(__UpperCAmelCase , __UpperCAmelCase ) ) def __magic_name__( self ): validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 10 ) with self.assertWarns(__UpperCAmelCase ): validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 11 ) lowerCAmelCase__ : List[str] = validate_stopping_criteria(StoppingCriteriaList() , 11 ) self.assertEqual(len(__UpperCAmelCase ) , 1 )
678
1
import argparse import json from pathlib import Path import torch import torchaudio from datasets import load_dataset from huggingface_hub import hf_hub_download from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification from transformers.utils import logging logging.set_verbosity_info() lowerCAmelCase_ = logging.get_logger(__name__) def __lowerCAmelCase ( UpperCamelCase ) -> List[str]: lowerCAmelCase__ : List[str] = ASTConfig() if "10-10" in model_name: pass elif "speech-commands" in model_name: lowerCAmelCase__ : Optional[int] = 128 elif "12-12" in model_name: lowerCAmelCase__ : List[Any] = 12 lowerCAmelCase__ : Optional[Any] = 12 elif "14-14" in model_name: lowerCAmelCase__ : str = 14 lowerCAmelCase__ : Optional[Any] = 14 elif "16-16" in model_name: lowerCAmelCase__ : Optional[int] = 16 lowerCAmelCase__ : List[Any] = 16 else: raise ValueError('''Model not supported''' ) lowerCAmelCase__ : Any = '''huggingface/label-files''' if "speech-commands" in model_name: lowerCAmelCase__ : List[str] = 35 lowerCAmelCase__ : int = '''speech-commands-v2-id2label.json''' else: lowerCAmelCase__ : Union[str, Any] = 527 lowerCAmelCase__ : Union[str, Any] = '''audioset-id2label.json''' lowerCAmelCase__ : List[str] = json.load(open(hf_hub_download(UpperCamelCase , UpperCamelCase , repo_type='''dataset''' ) , '''r''' ) ) lowerCAmelCase__ : str = {int(UpperCamelCase ): v for k, v in idalabel.items()} lowerCAmelCase__ : int = idalabel lowerCAmelCase__ : str = {v: k for k, v in idalabel.items()} return config def __lowerCAmelCase ( UpperCamelCase ) -> int: if "module.v" in name: lowerCAmelCase__ : str = name.replace('''module.v''' , '''audio_spectrogram_transformer''' ) if "cls_token" in name: lowerCAmelCase__ : int = name.replace('''cls_token''' , '''embeddings.cls_token''' ) if "dist_token" in name: lowerCAmelCase__ : Dict = name.replace('''dist_token''' , '''embeddings.distillation_token''' ) if "pos_embed" in name: lowerCAmelCase__ : int = name.replace('''pos_embed''' , '''embeddings.position_embeddings''' ) if "patch_embed.proj" in name: lowerCAmelCase__ : List[Any] = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' ) # transformer blocks if "blocks" in name: lowerCAmelCase__ : Dict = name.replace('''blocks''' , '''encoder.layer''' ) if "attn.proj" in name: lowerCAmelCase__ : Optional[int] = name.replace('''attn.proj''' , '''attention.output.dense''' ) if "attn" in name: lowerCAmelCase__ : Optional[Any] = name.replace('''attn''' , '''attention.self''' ) if "norm1" in name: lowerCAmelCase__ : List[Any] = name.replace('''norm1''' , '''layernorm_before''' ) if "norm2" in name: lowerCAmelCase__ : List[str] = name.replace('''norm2''' , '''layernorm_after''' ) if "mlp.fc1" in name: lowerCAmelCase__ : Tuple = name.replace('''mlp.fc1''' , '''intermediate.dense''' ) if "mlp.fc2" in name: lowerCAmelCase__ : List[str] = name.replace('''mlp.fc2''' , '''output.dense''' ) # final layernorm if "audio_spectrogram_transformer.norm" in name: lowerCAmelCase__ : Optional[Any] = name.replace('''audio_spectrogram_transformer.norm''' , '''audio_spectrogram_transformer.layernorm''' ) # classifier head if "module.mlp_head.0" in name: lowerCAmelCase__ : List[Any] = name.replace('''module.mlp_head.0''' , '''classifier.layernorm''' ) if "module.mlp_head.1" in name: lowerCAmelCase__ : List[str] = name.replace('''module.mlp_head.1''' , '''classifier.dense''' ) return name def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> Any: for key in orig_state_dict.copy().keys(): lowerCAmelCase__ : List[Any] = orig_state_dict.pop(UpperCamelCase ) if "qkv" in key: lowerCAmelCase__ : int = key.split('''.''' ) lowerCAmelCase__ : Optional[Any] = int(key_split[3] ) lowerCAmelCase__ : int = config.hidden_size if "weight" in key: lowerCAmelCase__ : str = val[:dim, :] lowerCAmelCase__ : str = val[dim : dim * 2, :] lowerCAmelCase__ : Any = val[-dim:, :] else: lowerCAmelCase__ : Dict = val[:dim] lowerCAmelCase__ : Optional[Any] = val[dim : dim * 2] lowerCAmelCase__ : Any = val[-dim:] else: lowerCAmelCase__ : List[str] = val return orig_state_dict def __lowerCAmelCase ( UpperCamelCase ) -> Optional[Any]: lowerCAmelCase__ : List[str] = [ '''module.v.head.weight''', '''module.v.head.bias''', '''module.v.head_dist.weight''', '''module.v.head_dist.bias''', ] for k in ignore_keys: state_dict.pop(UpperCamelCase , UpperCamelCase ) @torch.no_grad() def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase=False ) -> Tuple: lowerCAmelCase__ : Union[str, Any] = get_audio_spectrogram_transformer_config(UpperCamelCase ) lowerCAmelCase__ : Union[str, Any] = { '''ast-finetuned-audioset-10-10-0.4593''': ( '''https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1''' ), '''ast-finetuned-audioset-10-10-0.450''': ( '''https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1''' ), '''ast-finetuned-audioset-10-10-0.448''': ( '''https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1''' ), '''ast-finetuned-audioset-10-10-0.448-v2''': ( '''https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1''' ), '''ast-finetuned-audioset-12-12-0.447''': ( '''https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1''' ), '''ast-finetuned-audioset-14-14-0.443''': ( '''https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1''' ), '''ast-finetuned-audioset-16-16-0.442''': ( '''https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1''' ), '''ast-finetuned-speech-commands-v2''': ( '''https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1''' ), } # load original state_dict lowerCAmelCase__ : Dict = model_name_to_url[model_name] lowerCAmelCase__ : int = torch.hub.load_state_dict_from_url(UpperCamelCase , map_location='''cpu''' ) # remove some keys remove_keys(UpperCamelCase ) # rename some keys lowerCAmelCase__ : str = convert_state_dict(UpperCamelCase , UpperCamelCase ) # load 🤗 model lowerCAmelCase__ : List[str] = ASTForAudioClassification(UpperCamelCase ) model.eval() model.load_state_dict(UpperCamelCase ) # verify outputs on dummy input # source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62 lowerCAmelCase__ : Optional[Any] = -4.2677393 if '''speech-commands''' not in model_name else -6.845978 lowerCAmelCase__ : str = 4.5689974 if '''speech-commands''' not in model_name else 5.5654526 lowerCAmelCase__ : List[str] = 1024 if '''speech-commands''' not in model_name else 128 lowerCAmelCase__ : Dict = ASTFeatureExtractor(mean=UpperCamelCase , std=UpperCamelCase , max_length=UpperCamelCase ) if "speech-commands" in model_name: lowerCAmelCase__ : List[str] = load_dataset('''speech_commands''' , '''v0.02''' , split='''validation''' ) lowerCAmelCase__ : Tuple = dataset[0]['''audio''']['''array'''] else: lowerCAmelCase__ : int = hf_hub_download( repo_id='''nielsr/audio-spectogram-transformer-checkpoint''' , filename='''sample_audio.flac''' , repo_type='''dataset''' , ) lowerCAmelCase__ , lowerCAmelCase__ : List[str] = torchaudio.load(UpperCamelCase ) lowerCAmelCase__ : Any = waveform.squeeze().numpy() lowerCAmelCase__ : Any = feature_extractor(UpperCamelCase , sampling_rate=16000 , return_tensors='''pt''' ) # forward pass lowerCAmelCase__ : int = model(**UpperCamelCase ) lowerCAmelCase__ : str = outputs.logits if model_name == "ast-finetuned-audioset-10-10-0.4593": lowerCAmelCase__ : Tuple = torch.tensor([-0.8760, -7.0042, -8.6602] ) elif model_name == "ast-finetuned-audioset-10-10-0.450": lowerCAmelCase__ : List[str] = torch.tensor([-1.1986, -7.0903, -8.2718] ) elif model_name == "ast-finetuned-audioset-10-10-0.448": lowerCAmelCase__ : Optional[int] = torch.tensor([-2.6128, -8.0080, -9.4344] ) elif model_name == "ast-finetuned-audioset-10-10-0.448-v2": lowerCAmelCase__ : List[Any] = torch.tensor([-1.5080, -7.4534, -8.8917] ) elif model_name == "ast-finetuned-audioset-12-12-0.447": lowerCAmelCase__ : Union[str, Any] = torch.tensor([-0.5050, -6.5833, -8.0843] ) elif model_name == "ast-finetuned-audioset-14-14-0.443": lowerCAmelCase__ : Optional[Any] = torch.tensor([-0.3826, -7.0336, -8.2413] ) elif model_name == "ast-finetuned-audioset-16-16-0.442": lowerCAmelCase__ : Any = torch.tensor([-1.2113, -6.9101, -8.3470] ) elif model_name == "ast-finetuned-speech-commands-v2": lowerCAmelCase__ : List[Any] = torch.tensor([6.1589, -8.0566, -8.7984] ) else: raise ValueError('''Unknown model name''' ) if not torch.allclose(logits[0, :3] , UpperCamelCase , atol=1E-4 ): raise ValueError('''Logits don\'t match''' ) print('''Looks ok!''' ) if pytorch_dump_folder_path is not None: Path(UpperCamelCase ).mkdir(exist_ok=UpperCamelCase ) print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(UpperCamelCase ) print(F"""Saving feature extractor to {pytorch_dump_folder_path}""" ) feature_extractor.save_pretrained(UpperCamelCase ) if push_to_hub: print('''Pushing model and feature extractor to the hub...''' ) model.push_to_hub(F"""MIT/{model_name}""" ) feature_extractor.push_to_hub(F"""MIT/{model_name}""" ) if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""ast-finetuned-audioset-10-10-0.4593""", type=str, help="""Name of the Audio Spectrogram Transformer model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub.""" ) lowerCAmelCase_ = parser.parse_args() convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
678
from functools import reduce lowerCAmelCase_ = ( """73167176531330624919225119674426574742355349194934""" """96983520312774506326239578318016984801869478851843""" """85861560789112949495459501737958331952853208805511""" """12540698747158523863050715693290963295227443043557""" """66896648950445244523161731856403098711121722383113""" """62229893423380308135336276614282806444486645238749""" """30358907296290491560440772390713810515859307960866""" """70172427121883998797908792274921901699720888093776""" """65727333001053367881220235421809751254540594752243""" """52584907711670556013604839586446706324415722155397""" """53697817977846174064955149290862569321978468622482""" """83972241375657056057490261407972968652414535100474""" """82166370484403199890008895243450658541227588666881""" """16427171479924442928230863465674813919123162824586""" """17866458359124566529476545682848912883142607690042""" """24219022671055626321111109370544217506941658960408""" """07198403850962455444362981230987879927244284909188""" """84580156166097919133875499200524063689912560717606""" """05886116467109405077541002256983155200055935729725""" """71636269561882670428252483600823257530420752963450""" ) def __lowerCAmelCase ( UpperCamelCase = N ) -> int: return max( # mypy cannot properly interpret reduce int(reduce(lambda UpperCamelCase , UpperCamelCase : str(int(UpperCamelCase ) * int(UpperCamelCase ) ) , n[i : i + 13] ) ) for i in range(len(UpperCamelCase ) - 12 ) ) if __name__ == "__main__": print(F"""{solution() = }""")
678
1
from __future__ import annotations from fractions import Fraction from math import gcd, sqrt def __lowerCAmelCase ( UpperCamelCase ) -> bool: lowerCAmelCase__ : int = int(number**0.5 ) return number == sq * sq def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> tuple[int, int]: lowerCAmelCase__ : int = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den lowerCAmelCase__ : int = x_den * y_den * z_den lowerCAmelCase__ : int = gcd(UpperCamelCase , UpperCamelCase ) top //= hcf bottom //= hcf return top, bottom def __lowerCAmelCase ( UpperCamelCase = 35 ) -> int: lowerCAmelCase__ : set = set() lowerCAmelCase__ : int lowerCAmelCase__ : Fraction = Fraction(0 ) lowerCAmelCase__ : tuple[int, int] for x_num in range(1 , order + 1 ): for x_den in range(x_num + 1 , order + 1 ): for y_num in range(1 , order + 1 ): for y_den in range(y_num + 1 , order + 1 ): # n=1 lowerCAmelCase__ : List[str] = x_num * y_den + x_den * y_num lowerCAmelCase__ : Tuple = x_den * y_den lowerCAmelCase__ : Optional[Any] = gcd(UpperCamelCase , UpperCamelCase ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: lowerCAmelCase__ : int = add_three( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) unique_s.add(UpperCamelCase ) # n=2 lowerCAmelCase__ : str = ( x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num ) lowerCAmelCase__ : Optional[int] = x_den * x_den * y_den * y_den if is_sq(UpperCamelCase ) and is_sq(UpperCamelCase ): lowerCAmelCase__ : int = int(sqrt(UpperCamelCase ) ) lowerCAmelCase__ : str = int(sqrt(UpperCamelCase ) ) lowerCAmelCase__ : Tuple = gcd(UpperCamelCase , UpperCamelCase ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: lowerCAmelCase__ : Union[str, Any] = add_three( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) unique_s.add(UpperCamelCase ) # n=-1 lowerCAmelCase__ : Any = x_num * y_num lowerCAmelCase__ : Optional[int] = x_den * y_num + x_num * y_den lowerCAmelCase__ : Union[str, Any] = gcd(UpperCamelCase , UpperCamelCase ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: lowerCAmelCase__ : int = add_three( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) unique_s.add(UpperCamelCase ) # n=2 lowerCAmelCase__ : Union[str, Any] = x_num * x_num * y_num * y_num lowerCAmelCase__ : int = ( x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den ) if is_sq(UpperCamelCase ) and is_sq(UpperCamelCase ): lowerCAmelCase__ : List[str] = int(sqrt(UpperCamelCase ) ) lowerCAmelCase__ : int = int(sqrt(UpperCamelCase ) ) lowerCAmelCase__ : List[Any] = gcd(UpperCamelCase , UpperCamelCase ) z_num //= hcf z_den //= hcf if 0 < z_num < z_den <= order: lowerCAmelCase__ : Optional[int] = add_three( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) unique_s.add(UpperCamelCase ) for num, den in unique_s: total += Fraction(UpperCamelCase , UpperCamelCase ) return total.denominator + total.numerator if __name__ == "__main__": print(F"""{solution() = }""")
678
import re import jax.numpy as jnp from flax.traverse_util import flatten_dict, unflatten_dict from jax.random import PRNGKey from ..utils import logging lowerCAmelCase_ = logging.get_logger(__name__) def __lowerCAmelCase ( UpperCamelCase ) -> List[str]: lowerCAmelCase__ : int = R'''\w+[.]\d+''' lowerCAmelCase__ : Tuple = re.findall(UpperCamelCase , UpperCamelCase ) for pat in pats: lowerCAmelCase__ : List[str] = key.replace(UpperCamelCase , '''_'''.join(pat.split('''.''' ) ) ) return key def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Dict: lowerCAmelCase__ : Union[str, Any] = pt_tuple_key[:-1] + ('''scale''',) if ( any('''norm''' in str_ for str_ in pt_tuple_key ) and (pt_tuple_key[-1] == "bias") and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict) and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict) ): lowerCAmelCase__ : List[str] = pt_tuple_key[:-1] + ('''scale''',) return renamed_pt_tuple_key, pt_tensor elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict: lowerCAmelCase__ : str = pt_tuple_key[:-1] + ('''scale''',) return renamed_pt_tuple_key, pt_tensor # embedding if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict: lowerCAmelCase__ : str = pt_tuple_key[:-1] + ('''embedding''',) return renamed_pt_tuple_key, pt_tensor # conv layer lowerCAmelCase__ : str = pt_tuple_key[:-1] + ('''kernel''',) if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4: lowerCAmelCase__ : Dict = pt_tensor.transpose(2 , 3 , 1 , 0 ) return renamed_pt_tuple_key, pt_tensor # linear layer lowerCAmelCase__ : List[Any] = pt_tuple_key[:-1] + ('''kernel''',) if pt_tuple_key[-1] == "weight": lowerCAmelCase__ : str = pt_tensor.T return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm weight lowerCAmelCase__ : Any = pt_tuple_key[:-1] + ('''weight''',) if pt_tuple_key[-1] == "gamma": return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm bias lowerCAmelCase__ : List[Any] = pt_tuple_key[:-1] + ('''bias''',) if pt_tuple_key[-1] == "beta": return renamed_pt_tuple_key, pt_tensor return pt_tuple_key, pt_tensor def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase=42 ) -> Any: # Step 1: Convert pytorch tensor to numpy lowerCAmelCase__ : Optional[Any] = {k: v.numpy() for k, v in pt_state_dict.items()} # Step 2: Since the model is stateless, get random Flax params lowerCAmelCase__ : Tuple = flax_model.init_weights(PRNGKey(UpperCamelCase ) ) lowerCAmelCase__ : Any = flatten_dict(UpperCamelCase ) lowerCAmelCase__ : List[Any] = {} # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): lowerCAmelCase__ : str = rename_key(UpperCamelCase ) lowerCAmelCase__ : Union[str, Any] = tuple(renamed_pt_key.split('''.''' ) ) # Correctly rename weight parameters lowerCAmelCase__ , lowerCAmelCase__ : List[str] = rename_key_and_reshape_tensor(UpperCamelCase , UpperCamelCase , UpperCamelCase ) if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( F"""PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape """ F"""{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.""" ) # also add unexpected weight so that warning is thrown lowerCAmelCase__ : List[str] = jnp.asarray(UpperCamelCase ) return unflatten_dict(UpperCamelCase )
678
1
import inspect import unittest from transformers import ViTMSNConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ViTMSNForImageClassification, ViTMSNModel from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class _lowerCAmelCase : def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=30 , __UpperCAmelCase=2 , __UpperCAmelCase=3 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=32 , __UpperCAmelCase=5 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=10 , __UpperCAmelCase=0.02 , __UpperCAmelCase=None , ): lowerCAmelCase__ : Union[str, Any] = parent lowerCAmelCase__ : List[Any] = batch_size lowerCAmelCase__ : Tuple = image_size lowerCAmelCase__ : Optional[Any] = patch_size lowerCAmelCase__ : Union[str, Any] = num_channels lowerCAmelCase__ : Dict = is_training lowerCAmelCase__ : int = use_labels lowerCAmelCase__ : Optional[Any] = hidden_size lowerCAmelCase__ : Optional[Any] = num_hidden_layers lowerCAmelCase__ : Tuple = num_attention_heads lowerCAmelCase__ : List[Any] = intermediate_size lowerCAmelCase__ : str = hidden_act lowerCAmelCase__ : Optional[int] = hidden_dropout_prob lowerCAmelCase__ : List[Any] = attention_probs_dropout_prob lowerCAmelCase__ : Dict = type_sequence_label_size lowerCAmelCase__ : Dict = initializer_range lowerCAmelCase__ : str = scope # in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) lowerCAmelCase__ : str = (image_size // patch_size) ** 2 lowerCAmelCase__ : List[Any] = num_patches + 1 def __magic_name__( self ): lowerCAmelCase__ : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCAmelCase__ : List[str] = None if self.use_labels: lowerCAmelCase__ : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCAmelCase__ : List[str] = self.get_config() return config, pixel_values, labels def __magic_name__( self ): return ViTMSNConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , ) def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): lowerCAmelCase__ : List[str] = ViTMSNModel(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() lowerCAmelCase__ : List[str] = model(__UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): lowerCAmelCase__ : Optional[Any] = self.type_sequence_label_size lowerCAmelCase__ : Optional[int] = ViTMSNForImageClassification(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() lowerCAmelCase__ : List[Any] = model(__UpperCAmelCase , labels=__UpperCAmelCase ) print('''Pixel and labels shape: {pixel_values.shape}, {labels.shape}''' ) print('''Labels: {labels}''' ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images lowerCAmelCase__ : Tuple = 1 lowerCAmelCase__ : List[str] = ViTMSNForImageClassification(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() lowerCAmelCase__ : Optional[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowerCAmelCase__ : Tuple = model(__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def __magic_name__( self ): lowerCAmelCase__ : Union[str, Any] = self.prepare_config_and_inputs() lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = config_and_inputs lowerCAmelCase__ : Union[str, Any] = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class _lowerCAmelCase ( _lowercase , _lowercase , unittest.TestCase ): A__ = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else () A__ = ( {'feature-extraction': ViTMSNModel, 'image-classification': ViTMSNForImageClassification} if is_torch_available() else {} ) A__ = False A__ = False A__ = False A__ = False def __magic_name__( self ): lowerCAmelCase__ : Optional[int] = ViTMSNModelTester(self ) lowerCAmelCase__ : List[str] = ConfigTester(self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase , hidden_size=37 ) def __magic_name__( self ): self.config_tester.run_common_tests() @unittest.skip(reason='''ViTMSN does not use inputs_embeds''' ) def __magic_name__( self ): pass def __magic_name__( self ): lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase__ : List[str] = model_class(__UpperCAmelCase ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) lowerCAmelCase__ : int = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__UpperCAmelCase , nn.Linear ) ) def __magic_name__( self ): lowerCAmelCase__ , lowerCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase__ : List[Any] = model_class(__UpperCAmelCase ) lowerCAmelCase__ : Any = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCAmelCase__ : int = [*signature.parameters.keys()] lowerCAmelCase__ : Optional[int] = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , __UpperCAmelCase ) def __magic_name__( self ): lowerCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCAmelCase ) def __magic_name__( self ): lowerCAmelCase__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__UpperCAmelCase ) @slow def __magic_name__( self ): for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCAmelCase__ : Any = ViTMSNModel.from_pretrained(__UpperCAmelCase ) self.assertIsNotNone(__UpperCAmelCase ) def __lowerCAmelCase ( ) -> str: lowerCAmelCase__ : Any = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision class _lowerCAmelCase ( unittest.TestCase ): @cached_property def __magic_name__( self ): return ViTImageProcessor.from_pretrained('''facebook/vit-msn-small''' ) if is_vision_available() else None @slow def __magic_name__( self ): torch.manual_seed(2 ) lowerCAmelCase__ : str = ViTMSNForImageClassification.from_pretrained('''facebook/vit-msn-small''' ).to(__UpperCAmelCase ) lowerCAmelCase__ : int = self.default_image_processor lowerCAmelCase__ : Dict = prepare_img() lowerCAmelCase__ : List[str] = image_processor(images=__UpperCAmelCase , return_tensors='''pt''' ).to(__UpperCAmelCase ) # forward pass with torch.no_grad(): lowerCAmelCase__ : Any = model(**__UpperCAmelCase ) # verify the logits lowerCAmelCase__ : List[Any] = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , __UpperCAmelCase ) lowerCAmelCase__ : str = torch.tensor([-0.0803, -0.4454, -0.2375] ).to(__UpperCAmelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCAmelCase , atol=1e-4 ) )
678
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available lowerCAmelCase_ = { """configuration_nezha""": ["""NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """NezhaConfig"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ """NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST""", """NezhaForNextSentencePrediction""", """NezhaForMaskedLM""", """NezhaForPreTraining""", """NezhaForMultipleChoice""", """NezhaForQuestionAnswering""", """NezhaForSequenceClassification""", """NezhaForTokenClassification""", """NezhaModel""", """NezhaPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_nezha import ( NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST, NezhaForMaskedLM, NezhaForMultipleChoice, NezhaForNextSentencePrediction, NezhaForPreTraining, NezhaForQuestionAnswering, NezhaForSequenceClassification, NezhaForTokenClassification, NezhaModel, NezhaPreTrainedModel, ) else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
678
1
from __future__ import annotations from scipy.special import comb # type: ignore class _lowerCAmelCase : def __init__( self , __UpperCAmelCase ): lowerCAmelCase__ : Optional[Any] = list_of_points # Degree determines the flexibility of the curve. # Degree = 1 will produce a straight line. lowerCAmelCase__ : Optional[Any] = len(__UpperCAmelCase ) - 1 def __magic_name__( self , __UpperCAmelCase ): assert 0 <= t <= 1, "Time t must be between 0 and 1." lowerCAmelCase__ : list[float] = [] for i in range(len(self.list_of_points ) ): # basis function for each i output_values.append( comb(self.degree , __UpperCAmelCase ) * ((1 - t) ** (self.degree - i)) * (t**i) ) # the basis must sum up to 1 for it to produce a valid Bezier curve. assert round(sum(__UpperCAmelCase ) , 5 ) == 1 return output_values def __magic_name__( self , __UpperCAmelCase ): assert 0 <= t <= 1, "Time t must be between 0 and 1." lowerCAmelCase__ : Union[str, Any] = self.basis_function(__UpperCAmelCase ) lowerCAmelCase__ : List[Any] = 0.0 lowerCAmelCase__ : Any = 0.0 for i in range(len(self.list_of_points ) ): # For all points, sum up the product of i-th basis function and i-th point. x += basis_function[i] * self.list_of_points[i][0] y += basis_function[i] * self.list_of_points[i][1] return (x, y) def __magic_name__( self , __UpperCAmelCase = 0.01 ): from matplotlib import pyplot as plt # type: ignore lowerCAmelCase__ : list[float] = [] # x coordinates of points to plot lowerCAmelCase__ : list[float] = [] # y coordinates of points to plot lowerCAmelCase__ : int = 0.0 while t <= 1: lowerCAmelCase__ : Tuple = self.bezier_curve_function(__UpperCAmelCase ) to_plot_x.append(value[0] ) to_plot_y.append(value[1] ) t += step_size lowerCAmelCase__ : Optional[int] = [i[0] for i in self.list_of_points] lowerCAmelCase__ : List[str] = [i[1] for i in self.list_of_points] plt.plot( __UpperCAmelCase , __UpperCAmelCase , color='''blue''' , label='''Curve of Degree ''' + str(self.degree ) , ) plt.scatter(__UpperCAmelCase , __UpperCAmelCase , color='''red''' , label='''Control Points''' ) plt.legend() plt.show() if __name__ == "__main__": import doctest doctest.testmod() BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1 BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2 BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
678
import tempfile import unittest import numpy as np from huggingface_hub import HfFolder, delete_repo from requests.exceptions import HTTPError from transformers import BertConfig, is_flax_available from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax if is_flax_available(): import os from flax.core.frozen_dict import unfreeze from flax.traverse_util import flatten_dict from transformers import FlaxBertModel lowerCAmelCase_ = """0.12""" # assumed parallelism: 8 @require_flax @is_staging_test class _lowerCAmelCase ( unittest.TestCase ): @classmethod def __magic_name__( cls ): lowerCAmelCase__ : Dict = TOKEN HfFolder.save_token(__UpperCAmelCase ) @classmethod def __magic_name__( cls ): try: delete_repo(token=cls._token , repo_id='''test-model-flax''' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='''valid_org/test-model-flax-org''' ) except HTTPError: pass def __magic_name__( self ): lowerCAmelCase__ : List[Any] = BertConfig( vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 ) lowerCAmelCase__ : List[Any] = FlaxBertModel(__UpperCAmelCase ) model.push_to_hub('''test-model-flax''' , use_auth_token=self._token ) lowerCAmelCase__ : List[Any] = FlaxBertModel.from_pretrained(f"""{USER}/test-model-flax""" ) lowerCAmelCase__ : Union[str, Any] = flatten_dict(unfreeze(model.params ) ) lowerCAmelCase__ : List[Any] = flatten_dict(unfreeze(new_model.params ) ) for key in base_params.keys(): lowerCAmelCase__ : str = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(__UpperCAmelCase , 1e-3 , msg=f"""{key} not identical""" ) # Reset repo delete_repo(token=self._token , repo_id='''test-model-flax''' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(__UpperCAmelCase , repo_id='''test-model-flax''' , push_to_hub=__UpperCAmelCase , use_auth_token=self._token ) lowerCAmelCase__ : Tuple = FlaxBertModel.from_pretrained(f"""{USER}/test-model-flax""" ) lowerCAmelCase__ : List[str] = flatten_dict(unfreeze(model.params ) ) lowerCAmelCase__ : Optional[int] = flatten_dict(unfreeze(new_model.params ) ) for key in base_params.keys(): lowerCAmelCase__ : Union[str, Any] = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(__UpperCAmelCase , 1e-3 , msg=f"""{key} not identical""" ) def __magic_name__( self ): lowerCAmelCase__ : List[str] = BertConfig( vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 ) lowerCAmelCase__ : Optional[Any] = FlaxBertModel(__UpperCAmelCase ) model.push_to_hub('''valid_org/test-model-flax-org''' , use_auth_token=self._token ) lowerCAmelCase__ : str = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' ) lowerCAmelCase__ : List[str] = flatten_dict(unfreeze(model.params ) ) lowerCAmelCase__ : Dict = flatten_dict(unfreeze(new_model.params ) ) for key in base_params.keys(): lowerCAmelCase__ : Tuple = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(__UpperCAmelCase , 1e-3 , msg=f"""{key} not identical""" ) # Reset repo delete_repo(token=self._token , repo_id='''valid_org/test-model-flax-org''' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained( __UpperCAmelCase , repo_id='''valid_org/test-model-flax-org''' , push_to_hub=__UpperCAmelCase , use_auth_token=self._token ) lowerCAmelCase__ : Union[str, Any] = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' ) lowerCAmelCase__ : Optional[int] = flatten_dict(unfreeze(model.params ) ) lowerCAmelCase__ : List[str] = flatten_dict(unfreeze(new_model.params ) ) for key in base_params.keys(): lowerCAmelCase__ : Tuple = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(__UpperCAmelCase , 1e-3 , msg=f"""{key} not identical""" ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> Tuple: lowerCAmelCase__ : Any = True lowerCAmelCase__ : Any = flatten_dict(modela.params ) lowerCAmelCase__ : List[str] = flatten_dict(modela.params ) for key in flat_params_a.keys(): if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1E-4: lowerCAmelCase__ : Optional[Any] = False return models_are_equal @require_flax class _lowerCAmelCase ( unittest.TestCase ): def __magic_name__( self ): lowerCAmelCase__ : List[str] = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' ) lowerCAmelCase__ : List[str] = FlaxBertModel(__UpperCAmelCase ) lowerCAmelCase__ : Optional[int] = '''bert''' with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(os.path.join(__UpperCAmelCase , __UpperCAmelCase ) ) with self.assertRaises(__UpperCAmelCase ): lowerCAmelCase__ : Optional[int] = FlaxBertModel.from_pretrained(__UpperCAmelCase ) lowerCAmelCase__ : List[str] = FlaxBertModel.from_pretrained(__UpperCAmelCase , subfolder=__UpperCAmelCase ) self.assertTrue(check_models_equal(__UpperCAmelCase , __UpperCAmelCase ) ) def __magic_name__( self ): lowerCAmelCase__ : Tuple = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' ) lowerCAmelCase__ : Union[str, Any] = FlaxBertModel(__UpperCAmelCase ) lowerCAmelCase__ : Dict = '''bert''' with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(os.path.join(__UpperCAmelCase , __UpperCAmelCase ) , max_shard_size='''10KB''' ) with self.assertRaises(__UpperCAmelCase ): lowerCAmelCase__ : Tuple = FlaxBertModel.from_pretrained(__UpperCAmelCase ) lowerCAmelCase__ : Any = FlaxBertModel.from_pretrained(__UpperCAmelCase , subfolder=__UpperCAmelCase ) self.assertTrue(check_models_equal(__UpperCAmelCase , __UpperCAmelCase ) ) def __magic_name__( self ): lowerCAmelCase__ : List[str] = '''bert''' lowerCAmelCase__ : int = '''hf-internal-testing/tiny-random-bert-subfolder''' with self.assertRaises(__UpperCAmelCase ): lowerCAmelCase__ : Dict = FlaxBertModel.from_pretrained(__UpperCAmelCase ) lowerCAmelCase__ : Optional[int] = FlaxBertModel.from_pretrained(__UpperCAmelCase , subfolder=__UpperCAmelCase ) self.assertIsNotNone(__UpperCAmelCase ) def __magic_name__( self ): lowerCAmelCase__ : List[Any] = '''bert''' lowerCAmelCase__ : Tuple = '''hf-internal-testing/tiny-random-bert-sharded-subfolder''' with self.assertRaises(__UpperCAmelCase ): lowerCAmelCase__ : Union[str, Any] = FlaxBertModel.from_pretrained(__UpperCAmelCase ) lowerCAmelCase__ : Optional[Any] = FlaxBertModel.from_pretrained(__UpperCAmelCase , subfolder=__UpperCAmelCase ) self.assertIsNotNone(__UpperCAmelCase )
678
1
import os import unittest from tempfile import TemporaryDirectory import torch import torch.nn as nn from accelerate.utils import ( OffloadedWeightsLoader, extract_submodules_state_dict, load_offloaded_weight, offload_state_dict, offload_weight, ) class _lowerCAmelCase ( nn.Module ): def __init__( self ): super().__init__() lowerCAmelCase__ : Any = nn.Linear(3 , 4 ) lowerCAmelCase__ : Optional[int] = nn.BatchNormad(4 ) lowerCAmelCase__ : Union[str, Any] = nn.Linear(4 , 5 ) def __magic_name__( self , __UpperCAmelCase ): return self.lineara(self.batchnorm(self.lineara(__UpperCAmelCase ) ) ) class _lowerCAmelCase ( unittest.TestCase ): def __magic_name__( self ): lowerCAmelCase__ : str = ModelForTest() with TemporaryDirectory() as tmp_dir: offload_state_dict(__UpperCAmelCase , model.state_dict() ) lowerCAmelCase__ : List[Any] = os.path.join(__UpperCAmelCase , '''index.json''' ) self.assertTrue(os.path.isfile(__UpperCAmelCase ) ) # TODO: add tests on what is inside the index for key in ["linear1.weight", "linear1.bias", "linear2.weight", "linear2.bias"]: lowerCAmelCase__ : int = os.path.join(__UpperCAmelCase , f"""{key}.dat""" ) self.assertTrue(os.path.isfile(__UpperCAmelCase ) ) # TODO: add tests on the fact weights are properly loaded def __magic_name__( self ): lowerCAmelCase__ : Optional[int] = [torch.floataa, torch.floataa, torch.bfloataa] for dtype in dtypes: lowerCAmelCase__ : Optional[int] = torch.randn(2 , 3 , dtype=__UpperCAmelCase ) with TemporaryDirectory() as tmp_dir: lowerCAmelCase__ : Dict = offload_weight(__UpperCAmelCase , '''weight''' , __UpperCAmelCase , {} ) lowerCAmelCase__ : List[str] = os.path.join(__UpperCAmelCase , '''weight.dat''' ) self.assertTrue(os.path.isfile(__UpperCAmelCase ) ) self.assertDictEqual(__UpperCAmelCase , {'''weight''': {'''shape''': [2, 3], '''dtype''': str(__UpperCAmelCase ).split('''.''' )[1]}} ) lowerCAmelCase__ : List[str] = load_offloaded_weight(__UpperCAmelCase , index['''weight'''] ) self.assertTrue(torch.equal(__UpperCAmelCase , __UpperCAmelCase ) ) def __magic_name__( self ): lowerCAmelCase__ : Optional[Any] = ModelForTest() lowerCAmelCase__ : List[str] = model.state_dict() lowerCAmelCase__ : Dict = {k: v for k, v in state_dict.items() if '''linear2''' not in k} lowerCAmelCase__ : int = {k: v for k, v in state_dict.items() if '''linear2''' in k} with TemporaryDirectory() as tmp_dir: offload_state_dict(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : Optional[int] = OffloadedWeightsLoader(state_dict=__UpperCAmelCase , save_folder=__UpperCAmelCase ) # Every key is there with the right value self.assertEqual(sorted(__UpperCAmelCase ) , sorted(state_dict.keys() ) ) for key, param in state_dict.items(): self.assertTrue(torch.allclose(__UpperCAmelCase , weight_map[key] ) ) lowerCAmelCase__ : Optional[Any] = {k: v for k, v in state_dict.items() if '''weight''' in k} lowerCAmelCase__ : Any = {k: v for k, v in state_dict.items() if '''weight''' not in k} with TemporaryDirectory() as tmp_dir: offload_state_dict(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : int = OffloadedWeightsLoader(state_dict=__UpperCAmelCase , save_folder=__UpperCAmelCase ) # Every key is there with the right value self.assertEqual(sorted(__UpperCAmelCase ) , sorted(state_dict.keys() ) ) for key, param in state_dict.items(): self.assertTrue(torch.allclose(__UpperCAmelCase , weight_map[key] ) ) with TemporaryDirectory() as tmp_dir: offload_state_dict(__UpperCAmelCase , __UpperCAmelCase ) # Duplicates are removed lowerCAmelCase__ : Optional[int] = OffloadedWeightsLoader(state_dict=__UpperCAmelCase , save_folder=__UpperCAmelCase ) # Every key is there with the right value self.assertEqual(sorted(__UpperCAmelCase ) , sorted(state_dict.keys() ) ) for key, param in state_dict.items(): self.assertTrue(torch.allclose(__UpperCAmelCase , weight_map[key] ) ) def __magic_name__( self ): lowerCAmelCase__ : Dict = {'''a.1''': 0, '''a.10''': 1, '''a.2''': 2} lowerCAmelCase__ : Union[str, Any] = extract_submodules_state_dict(__UpperCAmelCase , ['''a.1''', '''a.2'''] ) self.assertDictEqual(__UpperCAmelCase , {'''a.1''': 0, '''a.2''': 2} ) lowerCAmelCase__ : Optional[Any] = {'''a.1.a''': 0, '''a.10.a''': 1, '''a.2.a''': 2} lowerCAmelCase__ : Tuple = extract_submodules_state_dict(__UpperCAmelCase , ['''a.1''', '''a.2'''] ) self.assertDictEqual(__UpperCAmelCase , {'''a.1.a''': 0, '''a.2.a''': 2} )
678
from random import randint from tempfile import TemporaryFile import numpy as np def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Any: lowerCAmelCase__ : Optional[Any] = 0 if start < end: lowerCAmelCase__ : Union[str, Any] = randint(UpperCamelCase , UpperCamelCase ) lowerCAmelCase__ : List[str] = a[end] lowerCAmelCase__ : List[Any] = a[pivot] lowerCAmelCase__ : str = temp lowerCAmelCase__ , lowerCAmelCase__ : Tuple = _in_place_partition(UpperCamelCase , UpperCamelCase , UpperCamelCase ) count += _in_place_quick_sort(UpperCamelCase , UpperCamelCase , p - 1 ) count += _in_place_quick_sort(UpperCamelCase , p + 1 , UpperCamelCase ) return count def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Optional[Any]: lowerCAmelCase__ : List[str] = 0 lowerCAmelCase__ : List[Any] = randint(UpperCamelCase , UpperCamelCase ) lowerCAmelCase__ : str = a[end] lowerCAmelCase__ : Optional[int] = a[pivot] lowerCAmelCase__ : List[str] = temp lowerCAmelCase__ : str = start - 1 for index in range(UpperCamelCase , UpperCamelCase ): count += 1 if a[index] < a[end]: # check if current val is less than pivot value lowerCAmelCase__ : List[str] = new_pivot_index + 1 lowerCAmelCase__ : int = a[new_pivot_index] lowerCAmelCase__ : int = a[index] lowerCAmelCase__ : Tuple = temp lowerCAmelCase__ : Optional[Any] = a[new_pivot_index + 1] lowerCAmelCase__ : List[str] = a[end] lowerCAmelCase__ : Union[str, Any] = temp return new_pivot_index + 1, count lowerCAmelCase_ = TemporaryFile() lowerCAmelCase_ = 1_00 # 1000 elements are to be sorted lowerCAmelCase_ , lowerCAmelCase_ = 0, 1 # mean and standard deviation lowerCAmelCase_ = np.random.normal(mu, sigma, p) np.save(outfile, X) print("""The array is""") print(X) outfile.seek(0) # using the same array lowerCAmelCase_ = np.load(outfile) lowerCAmelCase_ = len(M) - 1 lowerCAmelCase_ = _in_place_quick_sort(M, 0, r) print( """No of Comparisons for 100 elements selected from a standard normal distribution""" """is :""" ) print(z)
678
1
import shutil import tempfile import unittest import numpy as np import pytest from transformers.testing_utils import require_vision from transformers.utils import is_vision_available if is_vision_available(): from PIL import Image from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast @require_vision class _lowerCAmelCase ( unittest.TestCase ): def __magic_name__( self ): lowerCAmelCase__ : List[str] = tempfile.mkdtemp() lowerCAmelCase__ : Optional[int] = BlipImageProcessor() lowerCAmelCase__ : Union[str, Any] = BertTokenizer.from_pretrained('''hf-internal-testing/tiny-random-BertModel''' ) lowerCAmelCase__ : Optional[int] = BlipProcessor(__UpperCAmelCase , __UpperCAmelCase ) processor.save_pretrained(self.tmpdirname ) def __magic_name__( self , **__UpperCAmelCase ): return AutoProcessor.from_pretrained(self.tmpdirname , **__UpperCAmelCase ).tokenizer def __magic_name__( self , **__UpperCAmelCase ): return AutoProcessor.from_pretrained(self.tmpdirname , **__UpperCAmelCase ).image_processor def __magic_name__( self ): shutil.rmtree(self.tmpdirname ) def __magic_name__( self ): lowerCAmelCase__ : Tuple = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] lowerCAmelCase__ : List[Any] = [Image.fromarray(np.moveaxis(__UpperCAmelCase , 0 , -1 ) ) for x in image_inputs] return image_inputs def __magic_name__( self ): lowerCAmelCase__ : Optional[Any] = BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) lowerCAmelCase__ : Dict = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' ) lowerCAmelCase__ : Any = self.get_image_processor(do_normalize=__UpperCAmelCase , padding_value=1.0 ) lowerCAmelCase__ : int = BlipProcessor.from_pretrained( self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=__UpperCAmelCase , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , __UpperCAmelCase ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , __UpperCAmelCase ) def __magic_name__( self ): lowerCAmelCase__ : Dict = self.get_image_processor() lowerCAmelCase__ : List[Any] = self.get_tokenizer() lowerCAmelCase__ : List[str] = BlipProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase ) lowerCAmelCase__ : Any = self.prepare_image_inputs() lowerCAmelCase__ : List[str] = image_processor(__UpperCAmelCase , return_tensors='''np''' ) lowerCAmelCase__ : int = processor(images=__UpperCAmelCase , return_tensors='''np''' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) def __magic_name__( self ): lowerCAmelCase__ : List[str] = self.get_image_processor() lowerCAmelCase__ : Dict = self.get_tokenizer() lowerCAmelCase__ : Tuple = BlipProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase ) lowerCAmelCase__ : Tuple = '''lower newer''' lowerCAmelCase__ : int = processor(text=__UpperCAmelCase ) lowerCAmelCase__ : str = tokenizer(__UpperCAmelCase , return_token_type_ids=__UpperCAmelCase ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def __magic_name__( self ): lowerCAmelCase__ : Dict = self.get_image_processor() lowerCAmelCase__ : int = self.get_tokenizer() lowerCAmelCase__ : Optional[Any] = BlipProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase ) lowerCAmelCase__ : Tuple = '''lower newer''' lowerCAmelCase__ : int = self.prepare_image_inputs() lowerCAmelCase__ : Any = processor(text=__UpperCAmelCase , images=__UpperCAmelCase ) self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''input_ids''', '''attention_mask'''] ) # test if it raises when no input is passed with pytest.raises(__UpperCAmelCase ): processor() def __magic_name__( self ): lowerCAmelCase__ : Optional[int] = self.get_image_processor() lowerCAmelCase__ : List[Any] = self.get_tokenizer() lowerCAmelCase__ : Dict = BlipProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase ) lowerCAmelCase__ : Optional[int] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] lowerCAmelCase__ : List[str] = processor.batch_decode(__UpperCAmelCase ) lowerCAmelCase__ : Tuple = tokenizer.batch_decode(__UpperCAmelCase ) self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase ) def __magic_name__( self ): lowerCAmelCase__ : int = self.get_image_processor() lowerCAmelCase__ : str = self.get_tokenizer() lowerCAmelCase__ : Any = BlipProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase ) lowerCAmelCase__ : str = '''lower newer''' lowerCAmelCase__ : Dict = self.prepare_image_inputs() lowerCAmelCase__ : Optional[Any] = processor(text=__UpperCAmelCase , images=__UpperCAmelCase ) # For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask'] self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''input_ids''', '''attention_mask'''] )
678
import pyarrow.parquet as pq import pytest from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config from datasets.features.image import Image from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> Optional[int]: assert isinstance(UpperCamelCase , UpperCamelCase ) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('''keep_in_memory''' , [False, True] ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> List[Any]: lowerCAmelCase__ : List[str] = tmp_path / '''cache''' lowerCAmelCase__ : Union[str, Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): lowerCAmelCase__ : List[Any] = ParquetDatasetReader(UpperCamelCase , cache_dir=UpperCamelCase , keep_in_memory=UpperCamelCase ).read() _check_parquet_dataset(UpperCamelCase , UpperCamelCase ) @pytest.mark.parametrize( '''features''' , [ None, {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}, {'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''}, {'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''}, {'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''}, ] , ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> List[Any]: lowerCAmelCase__ : str = tmp_path / '''cache''' lowerCAmelCase__ : Union[str, Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} lowerCAmelCase__ : str = features.copy() if features else default_expected_features lowerCAmelCase__ : List[Any] = ( Features({feature: Value(UpperCamelCase ) for feature, dtype in features.items()} ) if features is not None else None ) lowerCAmelCase__ : Union[str, Any] = ParquetDatasetReader(UpperCamelCase , features=UpperCamelCase , cache_dir=UpperCamelCase ).read() _check_parquet_dataset(UpperCamelCase , UpperCamelCase ) @pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Dict: lowerCAmelCase__ : str = tmp_path / '''cache''' lowerCAmelCase__ : Tuple = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} lowerCAmelCase__ : Union[str, Any] = ParquetDatasetReader(UpperCamelCase , cache_dir=UpperCamelCase , split=UpperCamelCase ).read() _check_parquet_dataset(UpperCamelCase , UpperCamelCase ) assert dataset.split == split if split else "train" @pytest.mark.parametrize('''path_type''' , [str, list] ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> str: if issubclass(UpperCamelCase , UpperCamelCase ): lowerCAmelCase__ : Any = parquet_path elif issubclass(UpperCamelCase , UpperCamelCase ): lowerCAmelCase__ : Any = [parquet_path] lowerCAmelCase__ : int = tmp_path / '''cache''' lowerCAmelCase__ : str = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} lowerCAmelCase__ : Union[str, Any] = ParquetDatasetReader(UpperCamelCase , cache_dir=UpperCamelCase ).read() _check_parquet_dataset(UpperCamelCase , UpperCamelCase ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase=("train",) ) -> str: assert isinstance(UpperCamelCase , UpperCamelCase ) for split in splits: lowerCAmelCase__ : str = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('''keep_in_memory''' , [False, True] ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Optional[int]: lowerCAmelCase__ : Any = tmp_path / '''cache''' lowerCAmelCase__ : Optional[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): lowerCAmelCase__ : Optional[Any] = ParquetDatasetReader( {'''train''': parquet_path} , cache_dir=UpperCamelCase , keep_in_memory=UpperCamelCase ).read() _check_parquet_datasetdict(UpperCamelCase , UpperCamelCase ) @pytest.mark.parametrize( '''features''' , [ None, {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}, {'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''}, {'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''}, {'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''}, ] , ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> int: lowerCAmelCase__ : Any = tmp_path / '''cache''' lowerCAmelCase__ : Tuple = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} lowerCAmelCase__ : Tuple = features.copy() if features else default_expected_features lowerCAmelCase__ : Optional[int] = ( Features({feature: Value(UpperCamelCase ) for feature, dtype in features.items()} ) if features is not None else None ) lowerCAmelCase__ : List[str] = ParquetDatasetReader({'''train''': parquet_path} , features=UpperCamelCase , cache_dir=UpperCamelCase ).read() _check_parquet_datasetdict(UpperCamelCase , UpperCamelCase ) @pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Dict: if split: lowerCAmelCase__ : Tuple = {split: parquet_path} else: lowerCAmelCase__ : int = '''train''' lowerCAmelCase__ : List[Any] = {'''train''': parquet_path, '''test''': parquet_path} lowerCAmelCase__ : Optional[int] = tmp_path / '''cache''' lowerCAmelCase__ : List[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} lowerCAmelCase__ : List[str] = ParquetDatasetReader(UpperCamelCase , cache_dir=UpperCamelCase ).read() _check_parquet_datasetdict(UpperCamelCase , UpperCamelCase , splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> Tuple: lowerCAmelCase__ : Optional[Any] = ParquetDatasetWriter(UpperCamelCase , tmp_path / '''foo.parquet''' ) assert writer.write() > 0 lowerCAmelCase__ : Union[str, Any] = pq.ParquetFile(tmp_path / '''foo.parquet''' ) lowerCAmelCase__ : int = pf.read() assert dataset.data.table == output_table def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> Tuple: lowerCAmelCase__ : List[str] = str(shared_datadir / '''test_image_rgb.jpg''' ) lowerCAmelCase__ : Dict = {'''image''': [image_path]} lowerCAmelCase__ : int = Features({'''image''': Image()} ) lowerCAmelCase__ : Dict = Dataset.from_dict(UpperCamelCase , features=UpperCamelCase ) lowerCAmelCase__ : List[str] = ParquetDatasetWriter(UpperCamelCase , tmp_path / '''foo.parquet''' ) assert writer.write() > 0 lowerCAmelCase__ : Dict = Dataset.from_parquet(str(tmp_path / '''foo.parquet''' ) ) assert dataset.features == reloaded_dataset.features lowerCAmelCase__ : int = ParquetDatasetReader(str(tmp_path / '''foo.parquet''' ) , streaming=UpperCamelCase ).read() assert dataset.features == reloaded_iterable_dataset.features @pytest.mark.parametrize( '''feature, expected''' , [ (Features({'''foo''': Value('''int32''' )} ), None), (Features({'''image''': Image(), '''foo''': Value('''int32''' )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS), (Features({'''nested''': Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS), ] , ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> Any: assert get_writer_batch_size(UpperCamelCase ) == expected
678
1
from __future__ import annotations import math import random from collections.abc import Collection from typing import overload class _lowerCAmelCase : def __init__( self , __UpperCAmelCase = None ): if components is None: lowerCAmelCase__ : int = [] lowerCAmelCase__ : Union[str, Any] = list(__UpperCAmelCase ) def __len__( self ): return len(self.__components ) def __str__( self ): return "(" + ",".join(map(__UpperCAmelCase , self.__components ) ) + ")" def __add__( self , __UpperCAmelCase ): lowerCAmelCase__ : int = len(self ) if size == len(__UpperCAmelCase ): lowerCAmelCase__ : List[str] = [self.__components[i] + other.component(__UpperCAmelCase ) for i in range(__UpperCAmelCase )] return Vector(__UpperCAmelCase ) else: raise Exception('''must have the same size''' ) def __sub__( self , __UpperCAmelCase ): lowerCAmelCase__ : Dict = len(self ) if size == len(__UpperCAmelCase ): lowerCAmelCase__ : Tuple = [self.__components[i] - other.component(__UpperCAmelCase ) for i in range(__UpperCAmelCase )] return Vector(__UpperCAmelCase ) else: # error case raise Exception('''must have the same size''' ) @overload def __mul__( self , __UpperCAmelCase ): ... @overload def __mul__( self , __UpperCAmelCase ): ... def __mul__( self , __UpperCAmelCase ): if isinstance(__UpperCAmelCase , (float, int) ): lowerCAmelCase__ : Optional[int] = [c * other for c in self.__components] return Vector(__UpperCAmelCase ) elif isinstance(__UpperCAmelCase , __UpperCAmelCase ) and len(self ) == len(__UpperCAmelCase ): lowerCAmelCase__ : str = len(self ) lowerCAmelCase__ : Any = [self.__components[i] * other.component(__UpperCAmelCase ) for i in range(__UpperCAmelCase )] return sum(__UpperCAmelCase ) else: # error case raise Exception('''invalid operand!''' ) def __magic_name__( self ): return Vector(self.__components ) def __magic_name__( self , __UpperCAmelCase ): if isinstance(__UpperCAmelCase , __UpperCAmelCase ) and -len(self.__components ) <= i < len(self.__components ): return self.__components[i] else: raise Exception('''index out of range''' ) def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase ): assert -len(self.__components ) <= pos < len(self.__components ) lowerCAmelCase__ : Tuple = value def __magic_name__( self ): if len(self.__components ) == 0: raise Exception('''Vector is empty''' ) lowerCAmelCase__ : str = [c**2 for c in self.__components] return math.sqrt(sum(__UpperCAmelCase ) ) def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase = False ): lowerCAmelCase__ : Any = self * other lowerCAmelCase__ : Any = self.euclidean_length() * other.euclidean_length() if deg: return math.degrees(math.acos(num / den ) ) else: return math.acos(num / den ) def __lowerCAmelCase ( UpperCamelCase ) -> Vector: assert isinstance(UpperCamelCase , UpperCamelCase ) return Vector([0] * dimension ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> Vector: assert isinstance(UpperCamelCase , UpperCamelCase ) and (isinstance(UpperCamelCase , UpperCamelCase )) lowerCAmelCase__ : str = [0] * dimension lowerCAmelCase__ : Optional[int] = 1 return Vector(UpperCamelCase ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Vector: assert ( isinstance(UpperCamelCase , UpperCamelCase ) and isinstance(UpperCamelCase , UpperCamelCase ) and (isinstance(UpperCamelCase , (int, float) )) ) return x * scalar + y def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Vector: random.seed(UpperCamelCase ) lowerCAmelCase__ : Tuple = [random.randint(UpperCamelCase , UpperCamelCase ) for _ in range(UpperCamelCase )] return Vector(UpperCamelCase ) class _lowerCAmelCase : def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): lowerCAmelCase__ : Any = matrix lowerCAmelCase__ : Union[str, Any] = w lowerCAmelCase__ : List[Any] = h def __str__( self ): lowerCAmelCase__ : List[Any] = '''''' for i in range(self.__height ): ans += "|" for j in range(self.__width ): if j < self.__width - 1: ans += str(self.__matrix[i][j] ) + "," else: ans += str(self.__matrix[i][j] ) + "|\n" return ans def __add__( self , __UpperCAmelCase ): if self.__width == other.width() and self.__height == other.height(): lowerCAmelCase__ : str = [] for i in range(self.__height ): lowerCAmelCase__ : Union[str, Any] = [ self.__matrix[i][j] + other.component(__UpperCAmelCase , __UpperCAmelCase ) for j in range(self.__width ) ] matrix.append(__UpperCAmelCase ) return Matrix(__UpperCAmelCase , self.__width , self.__height ) else: raise Exception('''matrix must have the same dimension!''' ) def __sub__( self , __UpperCAmelCase ): if self.__width == other.width() and self.__height == other.height(): lowerCAmelCase__ : List[Any] = [] for i in range(self.__height ): lowerCAmelCase__ : str = [ self.__matrix[i][j] - other.component(__UpperCAmelCase , __UpperCAmelCase ) for j in range(self.__width ) ] matrix.append(__UpperCAmelCase ) return Matrix(__UpperCAmelCase , self.__width , self.__height ) else: raise Exception('''matrices must have the same dimension!''' ) @overload def __mul__( self , __UpperCAmelCase ): ... @overload def __mul__( self , __UpperCAmelCase ): ... def __mul__( self , __UpperCAmelCase ): if isinstance(__UpperCAmelCase , __UpperCAmelCase ): # matrix-vector if len(__UpperCAmelCase ) == self.__width: lowerCAmelCase__ : Optional[Any] = zero_vector(self.__height ) for i in range(self.__height ): lowerCAmelCase__ : int = [ self.__matrix[i][j] * other.component(__UpperCAmelCase ) for j in range(self.__width ) ] ans.change_component(__UpperCAmelCase , sum(__UpperCAmelCase ) ) return ans else: raise Exception( '''vector must have the same size as the ''' '''number of columns of the matrix!''' ) elif isinstance(__UpperCAmelCase , (int, float) ): # matrix-scalar lowerCAmelCase__ : Union[str, Any] = [ [self.__matrix[i][j] * other for j in range(self.__width )] for i in range(self.__height ) ] return Matrix(__UpperCAmelCase , self.__width , self.__height ) return None def __magic_name__( self ): return self.__height def __magic_name__( self ): return self.__width def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase ): if 0 <= x < self.__height and 0 <= y < self.__width: return self.__matrix[x][y] else: raise Exception('''change_component: indices out of bounds''' ) def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): if 0 <= x < self.__height and 0 <= y < self.__width: lowerCAmelCase__ : Optional[int] = value else: raise Exception('''change_component: indices out of bounds''' ) def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase ): if self.__height != self.__width: raise Exception('''Matrix is not square''' ) lowerCAmelCase__ : int = self.__matrix[:x] + self.__matrix[x + 1 :] for i in range(len(__UpperCAmelCase ) ): lowerCAmelCase__ : Optional[Any] = minor[i][:y] + minor[i][y + 1 :] return Matrix(__UpperCAmelCase , self.__width - 1 , self.__height - 1 ).determinant() def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase ): if self.__height != self.__width: raise Exception('''Matrix is not square''' ) if 0 <= x < self.__height and 0 <= y < self.__width: return (-1) ** (x + y) * self.minor(__UpperCAmelCase , __UpperCAmelCase ) else: raise Exception('''Indices out of bounds''' ) def __magic_name__( self ): if self.__height != self.__width: raise Exception('''Matrix is not square''' ) if self.__height < 1: raise Exception('''Matrix has no element''' ) elif self.__height == 1: return self.__matrix[0][0] elif self.__height == 2: return ( self.__matrix[0][0] * self.__matrix[1][1] - self.__matrix[0][1] * self.__matrix[1][0] ) else: lowerCAmelCase__ : List[Any] = [ self.__matrix[0][y] * self.cofactor(0 , __UpperCAmelCase ) for y in range(self.__width ) ] return sum(__UpperCAmelCase ) def __lowerCAmelCase ( UpperCamelCase ) -> Matrix: lowerCAmelCase__ : list[list[float]] = [[0] * n for _ in range(UpperCamelCase )] return Matrix(UpperCamelCase , UpperCamelCase , UpperCamelCase ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Matrix: random.seed(UpperCamelCase ) lowerCAmelCase__ : list[list[float]] = [ [random.randint(UpperCamelCase , UpperCamelCase ) for _ in range(UpperCamelCase )] for _ in range(UpperCamelCase ) ] return Matrix(UpperCamelCase , UpperCamelCase , UpperCamelCase )
678
from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = { """microsoft/focalnet-tiny""": """https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json""", } class _lowerCAmelCase ( _lowercase , _lowercase ): A__ = 'focalnet' def __init__( self , __UpperCAmelCase=224 , __UpperCAmelCase=4 , __UpperCAmelCase=3 , __UpperCAmelCase=96 , __UpperCAmelCase=False , __UpperCAmelCase=[192, 384, 768, 768] , __UpperCAmelCase=[2, 2, 6, 2] , __UpperCAmelCase=[2, 2, 2, 2] , __UpperCAmelCase=[3, 3, 3, 3] , __UpperCAmelCase="gelu" , __UpperCAmelCase=4.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.1 , __UpperCAmelCase=False , __UpperCAmelCase=1e-4 , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=0.02 , __UpperCAmelCase=1e-5 , __UpperCAmelCase=32 , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase , ): super().__init__(**__UpperCAmelCase ) lowerCAmelCase__ : Any = image_size lowerCAmelCase__ : Any = patch_size lowerCAmelCase__ : Tuple = num_channels lowerCAmelCase__ : Dict = embed_dim lowerCAmelCase__ : Optional[int] = use_conv_embed lowerCAmelCase__ : Optional[int] = hidden_sizes lowerCAmelCase__ : Optional[Any] = depths lowerCAmelCase__ : Dict = focal_levels lowerCAmelCase__ : int = focal_windows lowerCAmelCase__ : Optional[Any] = hidden_act lowerCAmelCase__ : Optional[int] = mlp_ratio lowerCAmelCase__ : Optional[Any] = hidden_dropout_prob lowerCAmelCase__ : List[Any] = drop_path_rate lowerCAmelCase__ : Tuple = use_layerscale lowerCAmelCase__ : List[Any] = layerscale_value lowerCAmelCase__ : Dict = use_post_layernorm lowerCAmelCase__ : Dict = use_post_layernorm_in_modulation lowerCAmelCase__ : Dict = normalize_modulator lowerCAmelCase__ : Union[str, Any] = initializer_range lowerCAmelCase__ : List[str] = layer_norm_eps lowerCAmelCase__ : Tuple = encoder_stride lowerCAmelCase__ : Dict = ['''stem'''] + [f"""stage{idx}""" for idx in range(1 , len(self.depths ) + 1 )] lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = get_aligned_output_features_output_indices( out_features=__UpperCAmelCase , out_indices=__UpperCAmelCase , stage_names=self.stage_names )
678
1
import unittest from transformers import is_vision_available from transformers.pipelines import pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class _lowerCAmelCase : @staticmethod def __magic_name__( *__UpperCAmelCase , **__UpperCAmelCase ): pass @is_pipeline_test @require_vision class _lowerCAmelCase ( unittest.TestCase ): @require_torch def __magic_name__( self ): lowerCAmelCase__ : int = pipeline( model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , ) lowerCAmelCase__ : int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) lowerCAmelCase__ : List[str] = image_classifier(__UpperCAmelCase , candidate_labels=['''a''', '''b''', '''c'''] ) # The floating scores are so close, we enter floating error approximation and the order is not guaranteed across # python and torch versions. self.assertIn( nested_simplify(__UpperCAmelCase ) , [ [{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''b'''}, {'''score''': 0.333, '''label''': '''c'''}], [{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''c'''}, {'''score''': 0.333, '''label''': '''b'''}], ] , ) lowerCAmelCase__ : List[Any] = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 ) self.assertEqual( nested_simplify(__UpperCAmelCase ) , [ [ {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, ], ] , ) @require_tf def __magic_name__( self ): lowerCAmelCase__ : List[Any] = pipeline( model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , framework='''tf''' ) lowerCAmelCase__ : Tuple = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) lowerCAmelCase__ : str = image_classifier(__UpperCAmelCase , candidate_labels=['''a''', '''b''', '''c'''] ) self.assertEqual( nested_simplify(__UpperCAmelCase ) , [{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''b'''}, {'''score''': 0.333, '''label''': '''c'''}] , ) lowerCAmelCase__ : List[Any] = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 ) self.assertEqual( nested_simplify(__UpperCAmelCase ) , [ [ {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, ], ] , ) @slow @require_torch def __magic_name__( self ): lowerCAmelCase__ : str = pipeline( task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , ) # This is an image of 2 cats with remotes and no planes lowerCAmelCase__ : List[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) lowerCAmelCase__ : str = image_classifier(__UpperCAmelCase , candidate_labels=['''cat''', '''plane''', '''remote'''] ) self.assertEqual( nested_simplify(__UpperCAmelCase ) , [ {'''score''': 0.511, '''label''': '''remote'''}, {'''score''': 0.485, '''label''': '''cat'''}, {'''score''': 0.004, '''label''': '''plane'''}, ] , ) lowerCAmelCase__ : Tuple = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 ) self.assertEqual( nested_simplify(__UpperCAmelCase ) , [ [ {'''score''': 0.511, '''label''': '''remote'''}, {'''score''': 0.485, '''label''': '''cat'''}, {'''score''': 0.004, '''label''': '''plane'''}, ], ] * 5 , ) @slow @require_tf def __magic_name__( self ): lowerCAmelCase__ : Union[str, Any] = pipeline( task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , framework='''tf''' ) # This is an image of 2 cats with remotes and no planes lowerCAmelCase__ : Optional[int] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) lowerCAmelCase__ : Union[str, Any] = image_classifier(__UpperCAmelCase , candidate_labels=['''cat''', '''plane''', '''remote'''] ) self.assertEqual( nested_simplify(__UpperCAmelCase ) , [ {'''score''': 0.511, '''label''': '''remote'''}, {'''score''': 0.485, '''label''': '''cat'''}, {'''score''': 0.004, '''label''': '''plane'''}, ] , ) lowerCAmelCase__ : Any = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 ) self.assertEqual( nested_simplify(__UpperCAmelCase ) , [ [ {'''score''': 0.511, '''label''': '''remote'''}, {'''score''': 0.485, '''label''': '''cat'''}, {'''score''': 0.004, '''label''': '''plane'''}, ], ] * 5 , )
678
from scipy.stats import pearsonr import datasets lowerCAmelCase_ = """ Pearson correlation coefficient and p-value for testing non-correlation. The Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases. The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. """ lowerCAmelCase_ = """ Args: predictions (`list` of `int`): Predicted class labels, as returned by a model. references (`list` of `int`): Ground truth labels. return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`. Returns: pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation. p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities. Examples: Example 1-A simple example using only predictions and references. >>> pearsonr_metric = datasets.load_metric(\"pearsonr\") >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5]) >>> print(round(results['pearsonr'], 2)) -0.74 Example 2-The same as Example 1, but that also returns the `p-value`. >>> pearsonr_metric = datasets.load_metric(\"pearsonr\") >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True) >>> print(sorted(list(results.keys()))) ['p-value', 'pearsonr'] >>> print(round(results['pearsonr'], 2)) -0.74 >>> print(round(results['p-value'], 2)) 0.15 """ lowerCAmelCase_ = """ @article{2020SciPy-NMeth, author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and Haberland, Matt and Reddy, Tyler and Cournapeau, David and Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and Bright, Jonathan and {van der Walt}, St{\'e}fan J. and Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and Kern, Robert and Larson, Eric and Carey, C J and Polat, Ilhan and Feng, Yu and Moore, Eric W. and {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and Harris, Charles R. and Archibald, Anne M. and Ribeiro, Antonio H. and Pedregosa, Fabian and {van Mulbregt}, Paul and {SciPy 1.0 Contributors}}, title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific Computing in Python}}, journal = {Nature Methods}, year = {2020}, volume = {17}, pages = {261--272}, adsurl = {https://rdcu.be/b08Wh}, doi = {10.1038/s41592-019-0686-2}, } """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class _lowerCAmelCase ( datasets.Metric ): def __magic_name__( self ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''float''' ), '''references''': datasets.Value('''float''' ), } ) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'''] , ) def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False ): if return_pvalue: lowerCAmelCase__ : Union[str, Any] = pearsonr(__UpperCAmelCase , __UpperCAmelCase ) return {"pearsonr": results[0], "p-value": results[1]} else: return {"pearsonr": float(pearsonr(__UpperCAmelCase , __UpperCAmelCase )[0] )}
678
1
import copy import os from collections import OrderedDict from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union if TYPE_CHECKING: from ...processing_utils import ProcessorMixin from ...utils import TensorType from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = { """google/owlvit-base-patch32""": """https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json""", """google/owlvit-base-patch16""": """https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json""", """google/owlvit-large-patch14""": """https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json""", } class _lowerCAmelCase ( _lowercase ): A__ = 'owlvit_text_model' def __init__( self , __UpperCAmelCase=4_9408 , __UpperCAmelCase=512 , __UpperCAmelCase=2048 , __UpperCAmelCase=12 , __UpperCAmelCase=8 , __UpperCAmelCase=16 , __UpperCAmelCase="quick_gelu" , __UpperCAmelCase=1e-5 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1.0 , __UpperCAmelCase=0 , __UpperCAmelCase=4_9406 , __UpperCAmelCase=4_9407 , **__UpperCAmelCase , ): super().__init__(pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase ) lowerCAmelCase__ : List[Any] = vocab_size lowerCAmelCase__ : Tuple = hidden_size lowerCAmelCase__ : List[Any] = intermediate_size lowerCAmelCase__ : Union[str, Any] = num_hidden_layers lowerCAmelCase__ : Dict = num_attention_heads lowerCAmelCase__ : Any = max_position_embeddings lowerCAmelCase__ : List[str] = hidden_act lowerCAmelCase__ : str = layer_norm_eps lowerCAmelCase__ : str = attention_dropout lowerCAmelCase__ : Union[str, Any] = initializer_range lowerCAmelCase__ : Dict = initializer_factor @classmethod def __magic_name__( cls , __UpperCAmelCase , **__UpperCAmelCase ): cls._set_token_in_kwargs(__UpperCAmelCase ) lowerCAmelCase__ , lowerCAmelCase__ : int = cls.get_config_dict(__UpperCAmelCase , **__UpperCAmelCase ) # get the text config dict if we are loading from OwlViTConfig if config_dict.get('''model_type''' ) == "owlvit": lowerCAmelCase__ : List[str] = config_dict['''text_config'''] if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type: logger.warning( f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """ f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" ) return cls.from_dict(__UpperCAmelCase , **__UpperCAmelCase ) class _lowerCAmelCase ( _lowercase ): A__ = 'owlvit_vision_model' def __init__( self , __UpperCAmelCase=768 , __UpperCAmelCase=3072 , __UpperCAmelCase=12 , __UpperCAmelCase=12 , __UpperCAmelCase=3 , __UpperCAmelCase=768 , __UpperCAmelCase=32 , __UpperCAmelCase="quick_gelu" , __UpperCAmelCase=1e-5 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1.0 , **__UpperCAmelCase , ): super().__init__(**__UpperCAmelCase ) lowerCAmelCase__ : Any = hidden_size lowerCAmelCase__ : int = intermediate_size lowerCAmelCase__ : Optional[int] = num_hidden_layers lowerCAmelCase__ : Any = num_attention_heads lowerCAmelCase__ : str = num_channels lowerCAmelCase__ : str = image_size lowerCAmelCase__ : str = patch_size lowerCAmelCase__ : int = hidden_act lowerCAmelCase__ : Tuple = layer_norm_eps lowerCAmelCase__ : List[str] = attention_dropout lowerCAmelCase__ : Tuple = initializer_range lowerCAmelCase__ : List[Any] = initializer_factor @classmethod def __magic_name__( cls , __UpperCAmelCase , **__UpperCAmelCase ): cls._set_token_in_kwargs(__UpperCAmelCase ) lowerCAmelCase__ , lowerCAmelCase__ : Any = cls.get_config_dict(__UpperCAmelCase , **__UpperCAmelCase ) # get the vision config dict if we are loading from OwlViTConfig if config_dict.get('''model_type''' ) == "owlvit": lowerCAmelCase__ : Optional[int] = config_dict['''vision_config'''] if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type: logger.warning( f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """ f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" ) return cls.from_dict(__UpperCAmelCase , **__UpperCAmelCase ) class _lowerCAmelCase ( _lowercase ): A__ = 'owlvit' A__ = True def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=512 , __UpperCAmelCase=2.6592 , __UpperCAmelCase=True , **__UpperCAmelCase , ): super().__init__(**__UpperCAmelCase ) if text_config is None: lowerCAmelCase__ : int = {} logger.info('''text_config is None. Initializing the OwlViTTextConfig with default values.''' ) if vision_config is None: lowerCAmelCase__ : Tuple = {} logger.info('''vision_config is None. initializing the OwlViTVisionConfig with default values.''' ) lowerCAmelCase__ : str = OwlViTTextConfig(**__UpperCAmelCase ) lowerCAmelCase__ : Optional[Any] = OwlViTVisionConfig(**__UpperCAmelCase ) lowerCAmelCase__ : Union[str, Any] = projection_dim lowerCAmelCase__ : int = logit_scale_init_value lowerCAmelCase__ : Optional[Any] = return_dict lowerCAmelCase__ : Union[str, Any] = 1.0 @classmethod def __magic_name__( cls , __UpperCAmelCase , **__UpperCAmelCase ): cls._set_token_in_kwargs(__UpperCAmelCase ) lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = cls.get_config_dict(__UpperCAmelCase , **__UpperCAmelCase ) if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type: logger.warning( f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """ f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" ) return cls.from_dict(__UpperCAmelCase , **__UpperCAmelCase ) @classmethod def __magic_name__( cls , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ): lowerCAmelCase__ : str = {} lowerCAmelCase__ : Optional[Any] = text_config lowerCAmelCase__ : int = vision_config return cls.from_dict(__UpperCAmelCase , **__UpperCAmelCase ) def __magic_name__( self ): lowerCAmelCase__ : Optional[int] = copy.deepcopy(self.__dict__ ) lowerCAmelCase__ : Union[str, Any] = self.text_config.to_dict() lowerCAmelCase__ : Dict = self.vision_config.to_dict() lowerCAmelCase__ : str = self.__class__.model_type return output class _lowerCAmelCase ( _lowercase ): @property def __magic_name__( self ): return OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''sequence'''}), ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ('''attention_mask''', {0: '''batch''', 1: '''sequence'''}), ] ) @property def __magic_name__( self ): return OrderedDict( [ ('''logits_per_image''', {0: '''batch'''}), ('''logits_per_text''', {0: '''batch'''}), ('''text_embeds''', {0: '''batch'''}), ('''image_embeds''', {0: '''batch'''}), ] ) @property def __magic_name__( self ): return 1e-4 def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase = -1 , __UpperCAmelCase = -1 , __UpperCAmelCase = None , ): lowerCAmelCase__ : List[str] = super().generate_dummy_inputs( processor.tokenizer , batch_size=__UpperCAmelCase , seq_length=__UpperCAmelCase , framework=__UpperCAmelCase ) lowerCAmelCase__ : Optional[Any] = super().generate_dummy_inputs( processor.image_processor , batch_size=__UpperCAmelCase , framework=__UpperCAmelCase ) return {**text_input_dict, **image_input_dict} @property def __magic_name__( self ): return 14
678
from manim import * class _lowerCAmelCase ( _lowercase ): def __magic_name__( self ): lowerCAmelCase__ : Tuple = Rectangle(height=0.5 , width=0.5 ) lowerCAmelCase__ : Dict = Rectangle(height=0.25 , width=0.25 ) lowerCAmelCase__ : Dict = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 ) lowerCAmelCase__ : Optional[Any] = [mem.copy() for i in range(6 )] lowerCAmelCase__ : int = [mem.copy() for i in range(6 )] lowerCAmelCase__ : Optional[Any] = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 ) lowerCAmelCase__ : str = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 ) lowerCAmelCase__ : List[str] = VGroup(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 ) lowerCAmelCase__ : int = Text('''CPU''' , font_size=24 ) lowerCAmelCase__ : int = Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0.5 , aligned_edge=__UpperCAmelCase ) cpu.move_to([-2.5, -0.5, 0] ) self.add(__UpperCAmelCase ) lowerCAmelCase__ : Optional[int] = [mem.copy() for i in range(4 )] lowerCAmelCase__ : Tuple = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 ) lowerCAmelCase__ : Tuple = Text('''GPU''' , font_size=24 ) lowerCAmelCase__ : int = Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0.5 , aligned_edge=__UpperCAmelCase ) gpu.move_to([-1, -1, 0] ) self.add(__UpperCAmelCase ) lowerCAmelCase__ : int = [mem.copy() for i in range(6 )] lowerCAmelCase__ : List[Any] = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 ) lowerCAmelCase__ : Tuple = Text('''Model''' , font_size=24 ) lowerCAmelCase__ : List[Any] = Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0.5 , aligned_edge=__UpperCAmelCase ) model.move_to([3, -1.0, 0] ) self.add(__UpperCAmelCase ) lowerCAmelCase__ : Dict = [] lowerCAmelCase__ : List[str] = [] lowerCAmelCase__ : Optional[Any] = [] for i, rect in enumerate(__UpperCAmelCase ): rect.set_stroke(__UpperCAmelCase ) lowerCAmelCase__ : Any = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(__UpperCAmelCase , opacity=0.7 ) if i == 0: cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=__UpperCAmelCase ) cpu_target.set_x(cpu_target.get_x() + 0.1 ) elif i == 3: cpu_target.next_to(model_cpu_arr[0] , direction=__UpperCAmelCase , buff=0.0 ) else: cpu_target.next_to(model_cpu_arr[i - 1] , direction=__UpperCAmelCase , buff=0.0 ) self.add(__UpperCAmelCase ) model_cpu_arr.append(__UpperCAmelCase ) self.add(*__UpperCAmelCase , *__UpperCAmelCase , *__UpperCAmelCase ) lowerCAmelCase__ : Any = [mem.copy() for i in range(6 )] lowerCAmelCase__ : Optional[Any] = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 ) lowerCAmelCase__ : Any = Text('''Loaded Checkpoint''' , font_size=24 ) lowerCAmelCase__ : Optional[Any] = Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0.5 , aligned_edge=__UpperCAmelCase ) checkpoint.move_to([3, 0.5, 0] ) self.add(__UpperCAmelCase ) lowerCAmelCase__ : Dict = [] lowerCAmelCase__ : str = [] for i, rect in enumerate(__UpperCAmelCase ): lowerCAmelCase__ : Union[str, Any] = fill.copy().set_fill(__UpperCAmelCase , opacity=0.7 ) target.move_to(__UpperCAmelCase ) ckpt_arr.append(__UpperCAmelCase ) lowerCAmelCase__ : List[str] = target.copy() if i < 5: cpu_target.move_to(cpu_left_col_base[i + 1] ) else: cpu_target.move_to(cpu_right_col_base[i - 5] ) ckpt_cpu_arr.append(__UpperCAmelCase ) self.add(*__UpperCAmelCase , *__UpperCAmelCase ) lowerCAmelCase__ : Optional[Any] = Square(side_length=2.2 ) key.move_to([-5, 2, 0] ) lowerCAmelCase__ : List[Any] = MarkupText( f"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , ) key_text.move_to([-5, 2.4, 0] ) self.add(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : List[str] = MarkupText( f"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=18 , ) blue_text.next_to(__UpperCAmelCase , DOWN * 2.4 , aligned_edge=key_text.get_left() ) self.add(__UpperCAmelCase ) lowerCAmelCase__ : str = MarkupText( f"""Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.""" , font_size=24 , ) step_a.move_to([2, 2, 0] ) lowerCAmelCase__ : Optional[Any] = [meta_mem.copy() for i in range(6 )] lowerCAmelCase__ : Dict = [meta_mem.copy() for i in range(6 )] lowerCAmelCase__ : Union[str, Any] = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 ) lowerCAmelCase__ : Dict = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 ) lowerCAmelCase__ : str = VGroup(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 ) lowerCAmelCase__ : List[str] = Text('''Disk''' , font_size=24 ) lowerCAmelCase__ : Any = Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0.5 , aligned_edge=__UpperCAmelCase ) disk.move_to([-4.0, -1.25, 0] ) self.play(Write(__UpperCAmelCase , run_time=3 ) , Write(__UpperCAmelCase , run_time=1 ) , Create(__UpperCAmelCase , run_time=1 ) ) lowerCAmelCase__ : str = [] for i, rect in enumerate(__UpperCAmelCase ): lowerCAmelCase__ : Dict = rect.copy() target.generate_target() target.target.move_to(disk_left_col_base[i] ).scale(0.5 ) animations.append(MoveToTarget(__UpperCAmelCase , run_time=1.5 ) ) self.play(*__UpperCAmelCase ) self.play(FadeOut(__UpperCAmelCase ) ) lowerCAmelCase__ : int = MarkupText(f"""Then, the checkpoint is removed from memory\nthrough garbage collection.""" , font_size=24 ) step_a.move_to([2, 2, 0] ) self.play(Write(__UpperCAmelCase , run_time=3 ) ) self.play( FadeOut(__UpperCAmelCase , __UpperCAmelCase , *__UpperCAmelCase , *__UpperCAmelCase ) , ) self.wait()
678
1
from manim import * class _lowerCAmelCase ( _lowercase ): def __magic_name__( self ): lowerCAmelCase__ : Tuple = Rectangle(height=0.5 , width=0.5 ) lowerCAmelCase__ : Dict = Rectangle(height=0.25 , width=0.25 ) lowerCAmelCase__ : Dict = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 ) lowerCAmelCase__ : Optional[Any] = [mem.copy() for i in range(6 )] lowerCAmelCase__ : int = [mem.copy() for i in range(6 )] lowerCAmelCase__ : Optional[Any] = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 ) lowerCAmelCase__ : str = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 ) lowerCAmelCase__ : List[str] = VGroup(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 ) lowerCAmelCase__ : int = Text('''CPU''' , font_size=24 ) lowerCAmelCase__ : int = Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0.5 , aligned_edge=__UpperCAmelCase ) cpu.move_to([-2.5, -0.5, 0] ) self.add(__UpperCAmelCase ) lowerCAmelCase__ : Optional[int] = [mem.copy() for i in range(4 )] lowerCAmelCase__ : Tuple = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 ) lowerCAmelCase__ : Tuple = Text('''GPU''' , font_size=24 ) lowerCAmelCase__ : int = Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0.5 , aligned_edge=__UpperCAmelCase ) gpu.move_to([-1, -1, 0] ) self.add(__UpperCAmelCase ) lowerCAmelCase__ : int = [mem.copy() for i in range(6 )] lowerCAmelCase__ : List[Any] = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 ) lowerCAmelCase__ : Tuple = Text('''Model''' , font_size=24 ) lowerCAmelCase__ : List[Any] = Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0.5 , aligned_edge=__UpperCAmelCase ) model.move_to([3, -1.0, 0] ) self.add(__UpperCAmelCase ) lowerCAmelCase__ : Dict = [] lowerCAmelCase__ : List[str] = [] lowerCAmelCase__ : Optional[Any] = [] for i, rect in enumerate(__UpperCAmelCase ): rect.set_stroke(__UpperCAmelCase ) lowerCAmelCase__ : Any = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(__UpperCAmelCase , opacity=0.7 ) if i == 0: cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=__UpperCAmelCase ) cpu_target.set_x(cpu_target.get_x() + 0.1 ) elif i == 3: cpu_target.next_to(model_cpu_arr[0] , direction=__UpperCAmelCase , buff=0.0 ) else: cpu_target.next_to(model_cpu_arr[i - 1] , direction=__UpperCAmelCase , buff=0.0 ) self.add(__UpperCAmelCase ) model_cpu_arr.append(__UpperCAmelCase ) self.add(*__UpperCAmelCase , *__UpperCAmelCase , *__UpperCAmelCase ) lowerCAmelCase__ : Any = [mem.copy() for i in range(6 )] lowerCAmelCase__ : Optional[Any] = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 ) lowerCAmelCase__ : Any = Text('''Loaded Checkpoint''' , font_size=24 ) lowerCAmelCase__ : Optional[Any] = Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0.5 , aligned_edge=__UpperCAmelCase ) checkpoint.move_to([3, 0.5, 0] ) self.add(__UpperCAmelCase ) lowerCAmelCase__ : Dict = [] lowerCAmelCase__ : str = [] for i, rect in enumerate(__UpperCAmelCase ): lowerCAmelCase__ : Union[str, Any] = fill.copy().set_fill(__UpperCAmelCase , opacity=0.7 ) target.move_to(__UpperCAmelCase ) ckpt_arr.append(__UpperCAmelCase ) lowerCAmelCase__ : List[str] = target.copy() if i < 5: cpu_target.move_to(cpu_left_col_base[i + 1] ) else: cpu_target.move_to(cpu_right_col_base[i - 5] ) ckpt_cpu_arr.append(__UpperCAmelCase ) self.add(*__UpperCAmelCase , *__UpperCAmelCase ) lowerCAmelCase__ : Optional[Any] = Square(side_length=2.2 ) key.move_to([-5, 2, 0] ) lowerCAmelCase__ : List[Any] = MarkupText( f"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , ) key_text.move_to([-5, 2.4, 0] ) self.add(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : List[str] = MarkupText( f"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=18 , ) blue_text.next_to(__UpperCAmelCase , DOWN * 2.4 , aligned_edge=key_text.get_left() ) self.add(__UpperCAmelCase ) lowerCAmelCase__ : str = MarkupText( f"""Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.""" , font_size=24 , ) step_a.move_to([2, 2, 0] ) lowerCAmelCase__ : Optional[Any] = [meta_mem.copy() for i in range(6 )] lowerCAmelCase__ : Dict = [meta_mem.copy() for i in range(6 )] lowerCAmelCase__ : Union[str, Any] = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 ) lowerCAmelCase__ : Dict = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 ) lowerCAmelCase__ : str = VGroup(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 ) lowerCAmelCase__ : List[str] = Text('''Disk''' , font_size=24 ) lowerCAmelCase__ : Any = Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0.5 , aligned_edge=__UpperCAmelCase ) disk.move_to([-4.0, -1.25, 0] ) self.play(Write(__UpperCAmelCase , run_time=3 ) , Write(__UpperCAmelCase , run_time=1 ) , Create(__UpperCAmelCase , run_time=1 ) ) lowerCAmelCase__ : str = [] for i, rect in enumerate(__UpperCAmelCase ): lowerCAmelCase__ : Dict = rect.copy() target.generate_target() target.target.move_to(disk_left_col_base[i] ).scale(0.5 ) animations.append(MoveToTarget(__UpperCAmelCase , run_time=1.5 ) ) self.play(*__UpperCAmelCase ) self.play(FadeOut(__UpperCAmelCase ) ) lowerCAmelCase__ : int = MarkupText(f"""Then, the checkpoint is removed from memory\nthrough garbage collection.""" , font_size=24 ) step_a.move_to([2, 2, 0] ) self.play(Write(__UpperCAmelCase , run_time=3 ) ) self.play( FadeOut(__UpperCAmelCase , __UpperCAmelCase , *__UpperCAmelCase , *__UpperCAmelCase ) , ) self.wait()
678
import collections import os import re from pathlib import Path lowerCAmelCase_ = """src/transformers""" # Matches is_xxx_available() lowerCAmelCase_ = re.compile(R"""is\_([a-z_]*)_available()""") # Catches a one-line _import_struct = {xxx} lowerCAmelCase_ = re.compile(R"""^_import_structure\s+=\s+\{([^\}]+)\}""") # Catches a line with a key-values pattern: "bla": ["foo", "bar"] lowerCAmelCase_ = re.compile(R"""\s+\"\S*\":\s+\[([^\]]*)\]""") # Catches a line if not is_foo_available lowerCAmelCase_ = re.compile(R"""^\s*if\s+not\s+is\_[a-z_]*\_available\(\)""") # Catches a line _import_struct["bla"].append("foo") lowerCAmelCase_ = re.compile(R"""^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)""") # Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"] lowerCAmelCase_ = re.compile(R"""^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]""") # Catches a line with an object between quotes and a comma: "MyModel", lowerCAmelCase_ = re.compile(R"""^\s+\"([^\"]+)\",""") # Catches a line with objects between brackets only: ["foo", "bar"], lowerCAmelCase_ = re.compile(R"""^\s+\[([^\]]+)\]""") # Catches a line with from foo import bar, bla, boo lowerCAmelCase_ = re.compile(R"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""") # Catches a line with try: lowerCAmelCase_ = re.compile(R"""^\s*try:""") # Catches a line with else: lowerCAmelCase_ = re.compile(R"""^\s*else:""") def __lowerCAmelCase ( UpperCamelCase ) -> int: if _re_test_backend.search(UpperCamelCase ) is None: return None lowerCAmelCase__ : int = [b[0] for b in _re_backend.findall(UpperCamelCase )] backends.sort() return "_and_".join(UpperCamelCase ) def __lowerCAmelCase ( UpperCamelCase ) -> Any: with open(UpperCamelCase , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: lowerCAmelCase__ : Union[str, Any] = f.readlines() lowerCAmelCase__ : Tuple = 0 while line_index < len(UpperCamelCase ) and not lines[line_index].startswith('''_import_structure = {''' ): line_index += 1 # If this is a traditional init, just return. if line_index >= len(UpperCamelCase ): return None # First grab the objects without a specific backend in _import_structure lowerCAmelCase__ : List[str] = [] while not lines[line_index].startswith('''if TYPE_CHECKING''' ) and find_backend(lines[line_index] ) is None: lowerCAmelCase__ : str = lines[line_index] # If we have everything on a single line, let's deal with it. if _re_one_line_import_struct.search(UpperCamelCase ): lowerCAmelCase__ : str = _re_one_line_import_struct.search(UpperCamelCase ).groups()[0] lowerCAmelCase__ : Optional[Any] = re.findall(R'''\[([^\]]+)\]''' , UpperCamelCase ) for imp in imports: objects.extend([obj[1:-1] for obj in imp.split(''', ''' )] ) line_index += 1 continue lowerCAmelCase__ : Tuple = _re_import_struct_key_value.search(UpperCamelCase ) if single_line_import_search is not None: lowerCAmelCase__ : Optional[Any] = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(''', ''' ) if len(UpperCamelCase ) > 0] objects.extend(UpperCamelCase ) elif line.startswith(''' ''' * 8 + '''"''' ): objects.append(line[9:-3] ) line_index += 1 lowerCAmelCase__ : Any = {'''none''': objects} # Let's continue with backend-specific objects in _import_structure while not lines[line_index].startswith('''if TYPE_CHECKING''' ): # If the line is an if not is_backend_available, we grab all objects associated. lowerCAmelCase__ : List[str] = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: lowerCAmelCase__ : Union[str, Any] = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 lowerCAmelCase__ : Optional[int] = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 4 ): lowerCAmelCase__ : str = lines[line_index] if _re_import_struct_add_one.search(UpperCamelCase ) is not None: objects.append(_re_import_struct_add_one.search(UpperCamelCase ).groups()[0] ) elif _re_import_struct_add_many.search(UpperCamelCase ) is not None: lowerCAmelCase__ : Optional[int] = _re_import_struct_add_many.search(UpperCamelCase ).groups()[0].split(''', ''' ) lowerCAmelCase__ : List[Any] = [obj[1:-1] for obj in imports if len(UpperCamelCase ) > 0] objects.extend(UpperCamelCase ) elif _re_between_brackets.search(UpperCamelCase ) is not None: lowerCAmelCase__ : List[Any] = _re_between_brackets.search(UpperCamelCase ).groups()[0].split(''', ''' ) lowerCAmelCase__ : Optional[int] = [obj[1:-1] for obj in imports if len(UpperCamelCase ) > 0] objects.extend(UpperCamelCase ) elif _re_quote_object.search(UpperCamelCase ) is not None: objects.append(_re_quote_object.search(UpperCamelCase ).groups()[0] ) elif line.startswith(''' ''' * 8 + '''"''' ): objects.append(line[9:-3] ) elif line.startswith(''' ''' * 12 + '''"''' ): objects.append(line[13:-3] ) line_index += 1 lowerCAmelCase__ : Optional[Any] = objects else: line_index += 1 # At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend lowerCAmelCase__ : Any = [] while ( line_index < len(UpperCamelCase ) and find_backend(lines[line_index] ) is None and not lines[line_index].startswith('''else''' ) ): lowerCAmelCase__ : Tuple = lines[line_index] lowerCAmelCase__ : List[Any] = _re_import.search(UpperCamelCase ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(''', ''' ) ) elif line.startswith(''' ''' * 8 ): objects.append(line[8:-2] ) line_index += 1 lowerCAmelCase__ : Dict = {'''none''': objects} # Let's continue with backend-specific objects while line_index < len(UpperCamelCase ): # If the line is an if is_backend_available, we grab all objects associated. lowerCAmelCase__ : str = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: lowerCAmelCase__ : Tuple = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 lowerCAmelCase__ : str = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 8 ): lowerCAmelCase__ : Any = lines[line_index] lowerCAmelCase__ : Union[str, Any] = _re_import.search(UpperCamelCase ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(''', ''' ) ) elif line.startswith(''' ''' * 12 ): objects.append(line[12:-2] ) line_index += 1 lowerCAmelCase__ : str = objects else: line_index += 1 return import_dict_objects, type_hint_objects def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> List[Any]: def find_duplicates(UpperCamelCase ): return [k for k, v in collections.Counter(UpperCamelCase ).items() if v > 1] if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ): return ["Both sides of the init do not have the same backends!"] lowerCAmelCase__ : Optional[Any] = [] for key in import_dict_objects.keys(): lowerCAmelCase__ : str = find_duplicates(import_dict_objects[key] ) if duplicate_imports: errors.append(F"""Duplicate _import_structure definitions for: {duplicate_imports}""" ) lowerCAmelCase__ : int = find_duplicates(type_hint_objects[key] ) if duplicate_type_hints: errors.append(F"""Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}""" ) if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ): lowerCAmelCase__ : List[Any] = '''base imports''' if key == '''none''' else F"""{key} backend""" errors.append(F"""Differences for {name}:""" ) for a in type_hint_objects[key]: if a not in import_dict_objects[key]: errors.append(F""" {a} in TYPE_HINT but not in _import_structure.""" ) for a in import_dict_objects[key]: if a not in type_hint_objects[key]: errors.append(F""" {a} in _import_structure but not in TYPE_HINT.""" ) return errors def __lowerCAmelCase ( ) -> Optional[Any]: lowerCAmelCase__ : Dict = [] for root, _, files in os.walk(UpperCamelCase ): if "__init__.py" in files: lowerCAmelCase__ : Any = os.path.join(UpperCamelCase , '''__init__.py''' ) lowerCAmelCase__ : List[Any] = parse_init(UpperCamelCase ) if objects is not None: lowerCAmelCase__ : Optional[int] = analyze_results(*UpperCamelCase ) if len(UpperCamelCase ) > 0: lowerCAmelCase__ : Tuple = F"""Problem in {fname}, both halves do not define the same objects.\n{errors[0]}""" failures.append('''\n'''.join(UpperCamelCase ) ) if len(UpperCamelCase ) > 0: raise ValueError('''\n\n'''.join(UpperCamelCase ) ) def __lowerCAmelCase ( ) -> Tuple: lowerCAmelCase__ : str = [] for path, directories, files in os.walk(UpperCamelCase ): for folder in directories: # Ignore private modules if folder.startswith('''_''' ): directories.remove(UpperCamelCase ) continue # Ignore leftovers from branches (empty folders apart from pycache) if len(list((Path(UpperCamelCase ) / folder).glob('''*.py''' ) ) ) == 0: continue lowerCAmelCase__ : Tuple = str((Path(UpperCamelCase ) / folder).relative_to(UpperCamelCase ) ) lowerCAmelCase__ : Dict = short_path.replace(os.path.sep , '''.''' ) submodules.append(UpperCamelCase ) for fname in files: if fname == "__init__.py": continue lowerCAmelCase__ : Union[str, Any] = str((Path(UpperCamelCase ) / fname).relative_to(UpperCamelCase ) ) lowerCAmelCase__ : Tuple = short_path.replace('''.py''' , '''''' ).replace(os.path.sep , '''.''' ) if len(submodule.split('''.''' ) ) == 1: submodules.append(UpperCamelCase ) return submodules lowerCAmelCase_ = [ """convert_pytorch_checkpoint_to_tf2""", """modeling_flax_pytorch_utils""", """models.esm.openfold_utils""", ] def __lowerCAmelCase ( ) -> List[str]: # This is to make sure the transformers module imported is the one in the repo. from transformers.utils import direct_transformers_import lowerCAmelCase__ : Dict = direct_transformers_import(UpperCamelCase ) lowerCAmelCase__ : int = set(transformers._import_structure.keys() ) # This contains all the base keys of the _import_structure object defined in the init, but if the user is missing # some optional dependencies, they may not have all of them. Thus we read the init to read all additions and # (potentiall re-) add them. with open(os.path.join(UpperCamelCase , '''__init__.py''' ) , '''r''' ) as f: lowerCAmelCase__ : str = f.read() import_structure_keys.update(set(re.findall(R'''import_structure\[\"([^\"]*)\"\]''' , UpperCamelCase ) ) ) lowerCAmelCase__ : Optional[int] = [ module for module in get_transformers_submodules() if module not in IGNORE_SUBMODULES and module not in import_structure_keys ] if len(UpperCamelCase ) > 0: lowerCAmelCase__ : List[Any] = '''\n'''.join(F"""- {module}""" for module in module_not_registered ) raise ValueError( '''The following submodules are not properly registed in the main init of Transformers:\n''' F"""{list_of_modules}\n""" '''Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.''' ) if __name__ == "__main__": check_all_inits() check_submodules()
678
1
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto import CONFIG_MAPPING lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = { """microsoft/table-transformer-detection""": ( """https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json""" ), } class _lowerCAmelCase ( _lowercase ): A__ = 'table-transformer' A__ = ['past_key_values'] A__ = { 'hidden_size': 'd_model', 'num_attention_heads': 'encoder_attention_heads', } def __init__( self , __UpperCAmelCase=True , __UpperCAmelCase=None , __UpperCAmelCase=3 , __UpperCAmelCase=100 , __UpperCAmelCase=6 , __UpperCAmelCase=2048 , __UpperCAmelCase=8 , __UpperCAmelCase=6 , __UpperCAmelCase=2048 , __UpperCAmelCase=8 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=True , __UpperCAmelCase="relu" , __UpperCAmelCase=256 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1.0 , __UpperCAmelCase=False , __UpperCAmelCase="sine" , __UpperCAmelCase="resnet50" , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=1 , __UpperCAmelCase=5 , __UpperCAmelCase=2 , __UpperCAmelCase=1 , __UpperCAmelCase=1 , __UpperCAmelCase=5 , __UpperCAmelCase=2 , __UpperCAmelCase=0.1 , **__UpperCAmelCase , ): if backbone_config is not None and use_timm_backbone: raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' ) if not use_timm_backbone: if backbone_config is None: logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' ) lowerCAmelCase__ : Union[str, Any] = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] ) elif isinstance(__UpperCAmelCase , __UpperCAmelCase ): lowerCAmelCase__ : Tuple = backbone_config.get('''model_type''' ) lowerCAmelCase__ : Any = CONFIG_MAPPING[backbone_model_type] lowerCAmelCase__ : List[Any] = config_class.from_dict(__UpperCAmelCase ) # set timm attributes to None lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = None, None, None lowerCAmelCase__ : str = use_timm_backbone lowerCAmelCase__ : List[Any] = backbone_config lowerCAmelCase__ : Optional[Any] = num_channels lowerCAmelCase__ : Optional[Any] = num_queries lowerCAmelCase__ : int = d_model lowerCAmelCase__ : Union[str, Any] = encoder_ffn_dim lowerCAmelCase__ : Dict = encoder_layers lowerCAmelCase__ : Union[str, Any] = encoder_attention_heads lowerCAmelCase__ : Any = decoder_ffn_dim lowerCAmelCase__ : Dict = decoder_layers lowerCAmelCase__ : Dict = decoder_attention_heads lowerCAmelCase__ : Optional[int] = dropout lowerCAmelCase__ : List[Any] = attention_dropout lowerCAmelCase__ : Tuple = activation_dropout lowerCAmelCase__ : Tuple = activation_function lowerCAmelCase__ : int = init_std lowerCAmelCase__ : Any = init_xavier_std lowerCAmelCase__ : Tuple = encoder_layerdrop lowerCAmelCase__ : List[str] = decoder_layerdrop lowerCAmelCase__ : Any = encoder_layers lowerCAmelCase__ : int = auxiliary_loss lowerCAmelCase__ : str = position_embedding_type lowerCAmelCase__ : Optional[int] = backbone lowerCAmelCase__ : Union[str, Any] = use_pretrained_backbone lowerCAmelCase__ : Dict = dilation # Hungarian matcher lowerCAmelCase__ : int = class_cost lowerCAmelCase__ : Union[str, Any] = bbox_cost lowerCAmelCase__ : List[str] = giou_cost # Loss coefficients lowerCAmelCase__ : List[Any] = mask_loss_coefficient lowerCAmelCase__ : Optional[int] = dice_loss_coefficient lowerCAmelCase__ : str = bbox_loss_coefficient lowerCAmelCase__ : Optional[Any] = giou_loss_coefficient lowerCAmelCase__ : str = eos_coefficient super().__init__(is_encoder_decoder=__UpperCAmelCase , **__UpperCAmelCase ) @property def __magic_name__( self ): return self.encoder_attention_heads @property def __magic_name__( self ): return self.d_model class _lowerCAmelCase ( _lowercase ): A__ = version.parse('1.11' ) @property def __magic_name__( self ): return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ('''pixel_mask''', {0: '''batch'''}), ] ) @property def __magic_name__( self ): return 1e-5 @property def __magic_name__( self ): return 12
678
import math import os import unittest from transformers import MegatronBertConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, MegatronBertForCausalLM, MegatronBertForMaskedLM, MegatronBertForMultipleChoice, MegatronBertForNextSentencePrediction, MegatronBertForPreTraining, MegatronBertForQuestionAnswering, MegatronBertForSequenceClassification, MegatronBertForTokenClassification, MegatronBertModel, ) class _lowerCAmelCase : def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=64 , __UpperCAmelCase=32 , __UpperCAmelCase=5 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=512 , __UpperCAmelCase=16 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=3 , __UpperCAmelCase=4 , __UpperCAmelCase=None , ): lowerCAmelCase__ : Optional[int] = parent lowerCAmelCase__ : Tuple = batch_size lowerCAmelCase__ : Union[str, Any] = seq_length lowerCAmelCase__ : str = is_training lowerCAmelCase__ : Union[str, Any] = use_input_mask lowerCAmelCase__ : List[Any] = use_token_type_ids lowerCAmelCase__ : int = use_labels lowerCAmelCase__ : List[Any] = vocab_size lowerCAmelCase__ : Optional[int] = hidden_size lowerCAmelCase__ : List[str] = embedding_size lowerCAmelCase__ : Optional[int] = num_hidden_layers lowerCAmelCase__ : Optional[int] = num_attention_heads lowerCAmelCase__ : List[str] = intermediate_size lowerCAmelCase__ : Tuple = hidden_act lowerCAmelCase__ : Union[str, Any] = hidden_dropout_prob lowerCAmelCase__ : Optional[int] = attention_probs_dropout_prob lowerCAmelCase__ : Union[str, Any] = max_position_embeddings lowerCAmelCase__ : List[Any] = type_vocab_size lowerCAmelCase__ : Optional[Any] = type_sequence_label_size lowerCAmelCase__ : List[Any] = initializer_range lowerCAmelCase__ : Optional[Any] = num_labels lowerCAmelCase__ : List[str] = num_choices lowerCAmelCase__ : Any = scope def __magic_name__( self ): lowerCAmelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCAmelCase__ : str = None if self.use_input_mask: lowerCAmelCase__ : List[str] = random_attention_mask([self.batch_size, self.seq_length] ) lowerCAmelCase__ : Optional[Any] = None if self.use_token_type_ids: lowerCAmelCase__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowerCAmelCase__ : Dict = None lowerCAmelCase__ : Dict = None lowerCAmelCase__ : Optional[int] = None if self.use_labels: lowerCAmelCase__ : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCAmelCase__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowerCAmelCase__ : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices ) lowerCAmelCase__ : Union[str, Any] = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __magic_name__( self ): return MegatronBertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , ) def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): lowerCAmelCase__ : Union[str, Any] = MegatronBertModel(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() lowerCAmelCase__ : Union[str, Any] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase ) lowerCAmelCase__ : Union[str, Any] = model(__UpperCAmelCase , token_type_ids=__UpperCAmelCase ) lowerCAmelCase__ : Optional[Any] = model(__UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): lowerCAmelCase__ : List[Any] = MegatronBertForMaskedLM(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() lowerCAmelCase__ : Tuple = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): lowerCAmelCase__ : Optional[Any] = MegatronBertForCausalLM(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() lowerCAmelCase__ : Union[str, Any] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): lowerCAmelCase__ : str = MegatronBertForNextSentencePrediction(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() lowerCAmelCase__ : Dict = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) ) def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): lowerCAmelCase__ : str = MegatronBertForPreTraining(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() lowerCAmelCase__ : int = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase , next_sentence_label=__UpperCAmelCase , ) self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) ) def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): lowerCAmelCase__ : str = MegatronBertForQuestionAnswering(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() lowerCAmelCase__ : Union[str, Any] = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , start_positions=__UpperCAmelCase , end_positions=__UpperCAmelCase , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): lowerCAmelCase__ : Optional[Any] = self.num_labels lowerCAmelCase__ : Union[str, Any] = MegatronBertForSequenceClassification(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() lowerCAmelCase__ : Dict = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): lowerCAmelCase__ : Optional[Any] = self.num_labels lowerCAmelCase__ : str = MegatronBertForTokenClassification(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() lowerCAmelCase__ : Dict = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): lowerCAmelCase__ : Optional[Any] = self.num_choices lowerCAmelCase__ : Dict = MegatronBertForMultipleChoice(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() lowerCAmelCase__ : Union[str, Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowerCAmelCase__ : int = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowerCAmelCase__ : List[str] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowerCAmelCase__ : Any = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def __magic_name__( self ): lowerCAmelCase__ : Union[str, Any] = self.prepare_config_and_inputs() ( ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ) : Optional[int] = config_and_inputs lowerCAmelCase__ : int = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class _lowerCAmelCase ( _lowercase , _lowercase , unittest.TestCase ): A__ = ( ( MegatronBertModel, MegatronBertForMaskedLM, MegatronBertForCausalLM, MegatronBertForMultipleChoice, MegatronBertForNextSentencePrediction, MegatronBertForPreTraining, MegatronBertForQuestionAnswering, MegatronBertForSequenceClassification, MegatronBertForTokenClassification, ) if is_torch_available() else () ) A__ = ( { 'feature-extraction': MegatronBertModel, 'fill-mask': MegatronBertForMaskedLM, 'question-answering': MegatronBertForQuestionAnswering, 'text-classification': MegatronBertForSequenceClassification, 'text-generation': MegatronBertForCausalLM, 'token-classification': MegatronBertForTokenClassification, 'zero-shot': MegatronBertForSequenceClassification, } if is_torch_available() else {} ) A__ = True # test_resize_embeddings = False A__ = False def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False ): lowerCAmelCase__ : List[Any] = super()._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase ) if return_labels: if model_class in get_values(__UpperCAmelCase ): lowerCAmelCase__ : Optional[int] = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__UpperCAmelCase ) lowerCAmelCase__ : Optional[Any] = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__UpperCAmelCase ) return inputs_dict def __magic_name__( self ): lowerCAmelCase__ : str = MegatronBertModelTester(self ) lowerCAmelCase__ : Dict = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37 ) def __magic_name__( self ): self.config_tester.run_common_tests() def __magic_name__( self ): lowerCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_model(*__UpperCAmelCase ) def __magic_name__( self ): lowerCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_masked_lm(*__UpperCAmelCase ) def __magic_name__( self ): lowerCAmelCase__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*__UpperCAmelCase ) def __magic_name__( self ): lowerCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*__UpperCAmelCase ) def __magic_name__( self ): lowerCAmelCase__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_pretraining(*__UpperCAmelCase ) def __magic_name__( self ): lowerCAmelCase__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_question_answering(*__UpperCAmelCase ) def __magic_name__( self ): lowerCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*__UpperCAmelCase ) def __magic_name__( self ): lowerCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_token_classification(*__UpperCAmelCase ) def __lowerCAmelCase ( UpperCamelCase ) -> Optional[int]: return torch.tensor( UpperCamelCase , dtype=torch.long , device=UpperCamelCase , ) lowerCAmelCase_ = 1e-4 @require_torch @require_sentencepiece @require_tokenizers class _lowerCAmelCase ( unittest.TestCase ): @slow @unittest.skip('''Model is not available.''' ) def __magic_name__( self ): lowerCAmelCase__ : int = '''nvidia/megatron-bert-uncased-345m''' if "MYDIR" in os.environ: lowerCAmelCase__ : Union[str, Any] = os.path.join(os.environ['''MYDIR'''] , __UpperCAmelCase ) lowerCAmelCase__ : Tuple = MegatronBertModel.from_pretrained(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.half() lowerCAmelCase__ : Optional[int] = _long_tensor([[101, 7110, 1005, 1056, 2023, 1_1333, 1_7413, 1029, 102]] ) with torch.no_grad(): lowerCAmelCase__ : List[Any] = model(__UpperCAmelCase )[0] lowerCAmelCase__ : List[Any] = torch.Size((1, 9, 1024) ) self.assertEqual(output.shape , __UpperCAmelCase ) lowerCAmelCase__ : Union[str, Any] = [-0.6040, -0.2517, -0.1025, 0.3420, -0.6758, -0.0017, -0.1089, -0.1990, 0.5728] for ii in range(3 ): for jj in range(3 ): lowerCAmelCase__ : Union[str, Any] = output[0, ii, jj] lowerCAmelCase__ : Optional[Any] = expected[3 * ii + jj] lowerCAmelCase__ : List[str] = '''ii={} jj={} a={} b={}'''.format(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) self.assertTrue(math.isclose(__UpperCAmelCase , __UpperCAmelCase , rel_tol=__UpperCAmelCase , abs_tol=__UpperCAmelCase ) , msg=__UpperCAmelCase )
678
1
def __lowerCAmelCase ( UpperCamelCase = 1000 ) -> int: lowerCAmelCase__ , lowerCAmelCase__ : Dict = 1, 1 lowerCAmelCase__ : str = 2 while True: lowerCAmelCase__ : Tuple = 0 lowerCAmelCase__ : Tuple = fa + fa lowerCAmelCase__ , lowerCAmelCase__ : Dict = fa, f index += 1 for _ in str(UpperCamelCase ): i += 1 if i == n: break return index if __name__ == "__main__": print(solution(int(str(input()).strip())))
678
import warnings from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast from ...onnx.utils import compute_effective_axis_dimension from ...utils import TensorType, is_torch_available, logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = { """facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/config.json""", # See all BART models at https://huggingface.co/models?filter=bart } class _lowerCAmelCase ( _lowercase ): A__ = 'bart' A__ = ['past_key_values'] A__ = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'} def __init__( self , __UpperCAmelCase=5_0265 , __UpperCAmelCase=1024 , __UpperCAmelCase=12 , __UpperCAmelCase=4096 , __UpperCAmelCase=16 , __UpperCAmelCase=12 , __UpperCAmelCase=4096 , __UpperCAmelCase=16 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase="gelu" , __UpperCAmelCase=1024 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.02 , __UpperCAmelCase=0.0 , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=3 , __UpperCAmelCase=1 , __UpperCAmelCase=0 , __UpperCAmelCase=2 , __UpperCAmelCase=True , __UpperCAmelCase=2 , __UpperCAmelCase=2 , **__UpperCAmelCase , ): lowerCAmelCase__ : Union[str, Any] = vocab_size lowerCAmelCase__ : Optional[Any] = max_position_embeddings lowerCAmelCase__ : int = d_model lowerCAmelCase__ : str = encoder_ffn_dim lowerCAmelCase__ : Any = encoder_layers lowerCAmelCase__ : Dict = encoder_attention_heads lowerCAmelCase__ : Union[str, Any] = decoder_ffn_dim lowerCAmelCase__ : Union[str, Any] = decoder_layers lowerCAmelCase__ : Any = decoder_attention_heads lowerCAmelCase__ : Tuple = dropout lowerCAmelCase__ : Any = attention_dropout lowerCAmelCase__ : Any = activation_dropout lowerCAmelCase__ : Optional[Any] = activation_function lowerCAmelCase__ : Union[str, Any] = init_std lowerCAmelCase__ : Union[str, Any] = encoder_layerdrop lowerCAmelCase__ : int = decoder_layerdrop lowerCAmelCase__ : Optional[int] = classifier_dropout lowerCAmelCase__ : str = use_cache lowerCAmelCase__ : int = encoder_layers lowerCAmelCase__ : List[str] = scale_embedding # scale factor will be sqrt(d_model) if True super().__init__( num_labels=__UpperCAmelCase , pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , is_encoder_decoder=__UpperCAmelCase , decoder_start_token_id=__UpperCAmelCase , forced_eos_token_id=__UpperCAmelCase , **__UpperCAmelCase , ) # ensure backward compatibility for BART CNN models if self.forced_bos_token_id is None and kwargs.get('''force_bos_token_to_be_generated''' , __UpperCAmelCase ): lowerCAmelCase__ : str = self.bos_token_id warnings.warn( f"""Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. """ '''The config can simply be saved and uploaded again to be fixed.''' ) class _lowerCAmelCase ( _lowercase ): @property def __magic_name__( self ): if self.task in ["default", "seq2seq-lm"]: lowerCAmelCase__ : Tuple = OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}), ] ) if self.use_past: lowerCAmelCase__ : Any = {0: '''batch'''} lowerCAmelCase__ : Tuple = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''} else: lowerCAmelCase__ : Optional[int] = {0: '''batch''', 1: '''decoder_sequence'''} lowerCAmelCase__ : Optional[Any] = {0: '''batch''', 1: '''decoder_sequence'''} if self.use_past: self.fill_with_past_key_values_(__UpperCAmelCase , direction='''inputs''' ) elif self.task == "causal-lm": # TODO: figure this case out. lowerCAmelCase__ : List[str] = OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}), ] ) if self.use_past: lowerCAmelCase__ , lowerCAmelCase__ : Tuple = self.num_layers for i in range(__UpperCAmelCase ): lowerCAmelCase__ : int = {0: '''batch''', 2: '''past_sequence + sequence'''} lowerCAmelCase__ : Union[str, Any] = {0: '''batch''', 2: '''past_sequence + sequence'''} else: lowerCAmelCase__ : List[str] = OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}), ('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}), ] ) return common_inputs @property def __magic_name__( self ): if self.task in ["default", "seq2seq-lm"]: lowerCAmelCase__ : Dict = super().outputs else: lowerCAmelCase__ : Any = super(__UpperCAmelCase , self ).outputs if self.use_past: lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = self.num_layers for i in range(__UpperCAmelCase ): lowerCAmelCase__ : List[Any] = {0: '''batch''', 2: '''past_sequence + sequence'''} lowerCAmelCase__ : Optional[Any] = {0: '''batch''', 2: '''past_sequence + sequence'''} return common_outputs def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase = -1 , __UpperCAmelCase = -1 , __UpperCAmelCase = False , __UpperCAmelCase = None , ): lowerCAmelCase__ : int = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) # Generate decoder inputs lowerCAmelCase__ : int = seq_length if not self.use_past else 1 lowerCAmelCase__ : Dict = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : Optional[int] = {f"""decoder_{name}""": tensor for name, tensor in decoder_inputs.items()} lowerCAmelCase__ : str = dict(**__UpperCAmelCase , **__UpperCAmelCase ) if self.use_past: if not is_torch_available(): raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' ) else: import torch lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = common_inputs['''input_ids'''].shape lowerCAmelCase__ : List[str] = common_inputs['''decoder_input_ids'''].shape[1] lowerCAmelCase__ , lowerCAmelCase__ : Dict = self.num_attention_heads lowerCAmelCase__ : Any = ( batch, num_encoder_attention_heads, encoder_seq_length, self._config.hidden_size // num_encoder_attention_heads, ) lowerCAmelCase__ : List[Any] = decoder_seq_length + 3 lowerCAmelCase__ : Any = ( batch, num_decoder_attention_heads, decoder_past_length, self._config.hidden_size // num_decoder_attention_heads, ) lowerCAmelCase__ : Any = torch.cat( [common_inputs['''decoder_attention_mask'''], torch.ones(__UpperCAmelCase , __UpperCAmelCase )] , dim=1 ) lowerCAmelCase__ : List[str] = [] # If the number of encoder and decoder layers are present in the model configuration, both are considered lowerCAmelCase__ , lowerCAmelCase__ : Any = self.num_layers lowerCAmelCase__ : Union[str, Any] = min(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : Any = max(__UpperCAmelCase , __UpperCAmelCase ) - min_num_layers lowerCAmelCase__ : Dict = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder''' for _ in range(__UpperCAmelCase ): common_inputs["past_key_values"].append( ( torch.zeros(__UpperCAmelCase ), torch.zeros(__UpperCAmelCase ), torch.zeros(__UpperCAmelCase ), torch.zeros(__UpperCAmelCase ), ) ) # TODO: test this. lowerCAmelCase__ : Any = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape for _ in range(__UpperCAmelCase , __UpperCAmelCase ): common_inputs["past_key_values"].append((torch.zeros(__UpperCAmelCase ), torch.zeros(__UpperCAmelCase )) ) return common_inputs def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase = -1 , __UpperCAmelCase = -1 , __UpperCAmelCase = False , __UpperCAmelCase = None , ): lowerCAmelCase__ : Optional[int] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) if self.use_past: if not is_torch_available(): raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' ) else: import torch lowerCAmelCase__ , lowerCAmelCase__ : Any = common_inputs['''input_ids'''].shape # Not using the same length for past_key_values lowerCAmelCase__ : List[str] = seqlen + 2 lowerCAmelCase__ , lowerCAmelCase__ : int = self.num_layers lowerCAmelCase__ , lowerCAmelCase__ : Tuple = self.num_attention_heads lowerCAmelCase__ : Optional[Any] = ( batch, num_encoder_attention_heads, past_key_values_length, self._config.hidden_size // num_encoder_attention_heads, ) lowerCAmelCase__ : Optional[Any] = common_inputs['''attention_mask'''].dtype lowerCAmelCase__ : List[Any] = torch.cat( [common_inputs['''attention_mask'''], torch.ones(__UpperCAmelCase , __UpperCAmelCase , dtype=__UpperCAmelCase )] , dim=1 ) lowerCAmelCase__ : List[Any] = [ (torch.zeros(__UpperCAmelCase ), torch.zeros(__UpperCAmelCase )) for _ in range(__UpperCAmelCase ) ] return common_inputs def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase = -1 , __UpperCAmelCase = -1 , __UpperCAmelCase = False , __UpperCAmelCase = None , ): # Copied from OnnxConfig.generate_dummy_inputs # Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity. # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX lowerCAmelCase__ : Tuple = compute_effective_axis_dimension( __UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX lowerCAmelCase__ : Optional[Any] = tokenizer.num_special_tokens_to_add(__UpperCAmelCase ) lowerCAmelCase__ : int = compute_effective_axis_dimension( __UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__UpperCAmelCase ) # Generate dummy inputs according to compute batch and sequence lowerCAmelCase__ : Optional[int] = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size lowerCAmelCase__ : str = dict(tokenizer(__UpperCAmelCase , return_tensors=__UpperCAmelCase ) ) return common_inputs def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase = -1 , __UpperCAmelCase = -1 , __UpperCAmelCase = False , __UpperCAmelCase = None , ): if self.task in ["default", "seq2seq-lm"]: lowerCAmelCase__ : Union[str, Any] = self._generate_dummy_inputs_for_default_and_seqaseq_lm( __UpperCAmelCase , batch_size=__UpperCAmelCase , seq_length=__UpperCAmelCase , is_pair=__UpperCAmelCase , framework=__UpperCAmelCase ) elif self.task == "causal-lm": lowerCAmelCase__ : List[str] = self._generate_dummy_inputs_for_causal_lm( __UpperCAmelCase , batch_size=__UpperCAmelCase , seq_length=__UpperCAmelCase , is_pair=__UpperCAmelCase , framework=__UpperCAmelCase ) else: lowerCAmelCase__ : Optional[Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( __UpperCAmelCase , batch_size=__UpperCAmelCase , seq_length=__UpperCAmelCase , is_pair=__UpperCAmelCase , framework=__UpperCAmelCase ) return common_inputs def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): if self.task in ["default", "seq2seq-lm"]: lowerCAmelCase__ : Any = super()._flatten_past_key_values_(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) else: lowerCAmelCase__ : Dict = super(__UpperCAmelCase , self )._flatten_past_key_values_( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
678
1
import importlib import json import os from collections import OrderedDict from typing import Dict, Optional, Union # Build the list of all image processors from ...configuration_utils import PretrainedConfig from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code from ...image_processing_utils import ImageProcessingMixin from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging from .auto_factory import _LazyAutoMapping from .configuration_auto import ( CONFIG_MAPPING_NAMES, AutoConfig, model_type_to_module_name, replace_list_option_in_docstrings, ) lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = OrderedDict( [ ("""align""", """EfficientNetImageProcessor"""), ("""beit""", """BeitImageProcessor"""), ("""bit""", """BitImageProcessor"""), ("""blip""", """BlipImageProcessor"""), ("""blip-2""", """BlipImageProcessor"""), ("""bridgetower""", """BridgeTowerImageProcessor"""), ("""chinese_clip""", """ChineseCLIPImageProcessor"""), ("""clip""", """CLIPImageProcessor"""), ("""clipseg""", """ViTImageProcessor"""), ("""conditional_detr""", """ConditionalDetrImageProcessor"""), ("""convnext""", """ConvNextImageProcessor"""), ("""convnextv2""", """ConvNextImageProcessor"""), ("""cvt""", """ConvNextImageProcessor"""), ("""data2vec-vision""", """BeitImageProcessor"""), ("""deformable_detr""", """DeformableDetrImageProcessor"""), ("""deit""", """DeiTImageProcessor"""), ("""deta""", """DetaImageProcessor"""), ("""detr""", """DetrImageProcessor"""), ("""dinat""", """ViTImageProcessor"""), ("""donut-swin""", """DonutImageProcessor"""), ("""dpt""", """DPTImageProcessor"""), ("""efficientformer""", """EfficientFormerImageProcessor"""), ("""efficientnet""", """EfficientNetImageProcessor"""), ("""flava""", """FlavaImageProcessor"""), ("""focalnet""", """BitImageProcessor"""), ("""git""", """CLIPImageProcessor"""), ("""glpn""", """GLPNImageProcessor"""), ("""groupvit""", """CLIPImageProcessor"""), ("""imagegpt""", """ImageGPTImageProcessor"""), ("""instructblip""", """BlipImageProcessor"""), ("""layoutlmv2""", """LayoutLMv2ImageProcessor"""), ("""layoutlmv3""", """LayoutLMv3ImageProcessor"""), ("""levit""", """LevitImageProcessor"""), ("""mask2former""", """Mask2FormerImageProcessor"""), ("""maskformer""", """MaskFormerImageProcessor"""), ("""mgp-str""", """ViTImageProcessor"""), ("""mobilenet_v1""", """MobileNetV1ImageProcessor"""), ("""mobilenet_v2""", """MobileNetV2ImageProcessor"""), ("""mobilevit""", """MobileViTImageProcessor"""), ("""mobilevit""", """MobileViTImageProcessor"""), ("""mobilevitv2""", """MobileViTImageProcessor"""), ("""nat""", """ViTImageProcessor"""), ("""oneformer""", """OneFormerImageProcessor"""), ("""owlvit""", """OwlViTImageProcessor"""), ("""perceiver""", """PerceiverImageProcessor"""), ("""pix2struct""", """Pix2StructImageProcessor"""), ("""poolformer""", """PoolFormerImageProcessor"""), ("""regnet""", """ConvNextImageProcessor"""), ("""resnet""", """ConvNextImageProcessor"""), ("""sam""", """SamImageProcessor"""), ("""segformer""", """SegformerImageProcessor"""), ("""swiftformer""", """ViTImageProcessor"""), ("""swin""", """ViTImageProcessor"""), ("""swin2sr""", """Swin2SRImageProcessor"""), ("""swinv2""", """ViTImageProcessor"""), ("""table-transformer""", """DetrImageProcessor"""), ("""timesformer""", """VideoMAEImageProcessor"""), ("""tvlt""", """TvltImageProcessor"""), ("""upernet""", """SegformerImageProcessor"""), ("""van""", """ConvNextImageProcessor"""), ("""videomae""", """VideoMAEImageProcessor"""), ("""vilt""", """ViltImageProcessor"""), ("""vit""", """ViTImageProcessor"""), ("""vit_hybrid""", """ViTHybridImageProcessor"""), ("""vit_mae""", """ViTImageProcessor"""), ("""vit_msn""", """ViTImageProcessor"""), ("""xclip""", """CLIPImageProcessor"""), ("""yolos""", """YolosImageProcessor"""), ] ) lowerCAmelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES) def __lowerCAmelCase ( UpperCamelCase ) -> str: for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items(): if class_name in extractors: lowerCAmelCase__ : Tuple = model_type_to_module_name(UpperCamelCase ) lowerCAmelCase__ : Union[str, Any] = importlib.import_module(F""".{module_name}""" , '''transformers.models''' ) try: return getattr(UpperCamelCase , UpperCamelCase ) except AttributeError: continue for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items(): if getattr(UpperCamelCase , '''__name__''' , UpperCamelCase ) == class_name: return extractor # We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main # init and we return the proper dummy to get an appropriate error message. lowerCAmelCase__ : List[Any] = importlib.import_module('''transformers''' ) if hasattr(UpperCamelCase , UpperCamelCase ): return getattr(UpperCamelCase , UpperCamelCase ) return None def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase = None , UpperCamelCase = False , UpperCamelCase = False , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = False , **UpperCamelCase , ) -> Optional[Any]: lowerCAmelCase__ : List[str] = get_file_from_repo( UpperCamelCase , UpperCamelCase , cache_dir=UpperCamelCase , force_download=UpperCamelCase , resume_download=UpperCamelCase , proxies=UpperCamelCase , use_auth_token=UpperCamelCase , revision=UpperCamelCase , local_files_only=UpperCamelCase , ) if resolved_config_file is None: logger.info( '''Could not locate the image processor configuration file, will try to use the model config instead.''' ) return {} with open(UpperCamelCase , encoding='''utf-8''' ) as reader: return json.load(UpperCamelCase ) class _lowerCAmelCase : def __init__( self ): raise EnvironmentError( '''AutoImageProcessor is designed to be instantiated ''' '''using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method.''' ) @classmethod @replace_list_option_in_docstrings(__UpperCAmelCase ) def __magic_name__( cls , __UpperCAmelCase , **__UpperCAmelCase ): lowerCAmelCase__ : Tuple = kwargs.pop('''config''' , __UpperCAmelCase ) lowerCAmelCase__ : List[Any] = kwargs.pop('''trust_remote_code''' , __UpperCAmelCase ) lowerCAmelCase__ : Any = True lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = ImageProcessingMixin.get_image_processor_dict(__UpperCAmelCase , **__UpperCAmelCase ) lowerCAmelCase__ : Optional[int] = config_dict.get('''image_processor_type''' , __UpperCAmelCase ) lowerCAmelCase__ : Optional[int] = None if "AutoImageProcessor" in config_dict.get('''auto_map''' , {} ): lowerCAmelCase__ : Tuple = config_dict['''auto_map''']['''AutoImageProcessor'''] # If we still don't have the image processor class, check if we're loading from a previous feature extractor config # and if so, infer the image processor class from there. if image_processor_class is None and image_processor_auto_map is None: lowerCAmelCase__ : Dict = config_dict.pop('''feature_extractor_type''' , __UpperCAmelCase ) if feature_extractor_class is not None: logger.warning( '''Could not find image processor class in the image processor config or the model config. Loading''' ''' based on pattern matching with the model\'s feature extractor configuration.''' ) lowerCAmelCase__ : List[Any] = feature_extractor_class.replace('''FeatureExtractor''' , '''ImageProcessor''' ) if "AutoFeatureExtractor" in config_dict.get('''auto_map''' , {} ): lowerCAmelCase__ : str = config_dict['''auto_map''']['''AutoFeatureExtractor'''] lowerCAmelCase__ : Any = feature_extractor_auto_map.replace('''FeatureExtractor''' , '''ImageProcessor''' ) logger.warning( '''Could not find image processor auto map in the image processor config or the model config.''' ''' Loading based on pattern matching with the model\'s feature extractor configuration.''' ) # If we don't find the image processor class in the image processor config, let's try the model config. if image_processor_class is None and image_processor_auto_map is None: if not isinstance(__UpperCAmelCase , __UpperCAmelCase ): lowerCAmelCase__ : Optional[int] = AutoConfig.from_pretrained(__UpperCAmelCase , **__UpperCAmelCase ) # It could be in `config.image_processor_type`` lowerCAmelCase__ : Dict = getattr(__UpperCAmelCase , '''image_processor_type''' , __UpperCAmelCase ) if hasattr(__UpperCAmelCase , '''auto_map''' ) and "AutoImageProcessor" in config.auto_map: lowerCAmelCase__ : str = config.auto_map['''AutoImageProcessor'''] if image_processor_class is not None: lowerCAmelCase__ : List[Any] = image_processor_class_from_name(__UpperCAmelCase ) lowerCAmelCase__ : List[str] = image_processor_auto_map is not None lowerCAmelCase__ : Union[str, Any] = image_processor_class is not None or type(__UpperCAmelCase ) in IMAGE_PROCESSOR_MAPPING lowerCAmelCase__ : List[Any] = resolve_trust_remote_code( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) if has_remote_code and trust_remote_code: lowerCAmelCase__ : List[Any] = get_class_from_dynamic_module( __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ) lowerCAmelCase__ : Tuple = kwargs.pop('''code_revision''' , __UpperCAmelCase ) if os.path.isdir(__UpperCAmelCase ): image_processor_class.register_for_auto_class() return image_processor_class.from_dict(__UpperCAmelCase , **__UpperCAmelCase ) elif image_processor_class is not None: return image_processor_class.from_dict(__UpperCAmelCase , **__UpperCAmelCase ) # Last try: we use the IMAGE_PROCESSOR_MAPPING. elif type(__UpperCAmelCase ) in IMAGE_PROCESSOR_MAPPING: lowerCAmelCase__ : str = IMAGE_PROCESSOR_MAPPING[type(__UpperCAmelCase )] return image_processor_class.from_dict(__UpperCAmelCase , **__UpperCAmelCase ) raise ValueError( f"""Unrecognized image processor in {pretrained_model_name_or_path}. Should have a """ f"""`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following """ f"""`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}""" ) @staticmethod def __magic_name__( __UpperCAmelCase , __UpperCAmelCase ): IMAGE_PROCESSOR_MAPPING.register(__UpperCAmelCase , __UpperCAmelCase )
678
import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = { """asapp/sew-d-tiny-100k""": """https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json""", # See all SEW-D models at https://huggingface.co/models?filter=sew-d } class _lowerCAmelCase ( _lowercase ): A__ = 'sew-d' def __init__( self , __UpperCAmelCase=32 , __UpperCAmelCase=768 , __UpperCAmelCase=12 , __UpperCAmelCase=12 , __UpperCAmelCase=3072 , __UpperCAmelCase=2 , __UpperCAmelCase=512 , __UpperCAmelCase=256 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=("p2c", "c2p") , __UpperCAmelCase="layer_norm" , __UpperCAmelCase="gelu_python" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1e-7 , __UpperCAmelCase=1e-5 , __UpperCAmelCase="group" , __UpperCAmelCase="gelu" , __UpperCAmelCase=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , __UpperCAmelCase=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , __UpperCAmelCase=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , __UpperCAmelCase=False , __UpperCAmelCase=128 , __UpperCAmelCase=16 , __UpperCAmelCase=True , __UpperCAmelCase=0.05 , __UpperCAmelCase=10 , __UpperCAmelCase=2 , __UpperCAmelCase=0.0 , __UpperCAmelCase=10 , __UpperCAmelCase=0 , __UpperCAmelCase="mean" , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=256 , __UpperCAmelCase=0 , __UpperCAmelCase=1 , __UpperCAmelCase=2 , **__UpperCAmelCase , ): super().__init__(**__UpperCAmelCase , pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase ) lowerCAmelCase__ : Optional[int] = hidden_size lowerCAmelCase__ : Optional[int] = feat_extract_norm lowerCAmelCase__ : str = feat_extract_activation lowerCAmelCase__ : int = list(__UpperCAmelCase ) lowerCAmelCase__ : int = list(__UpperCAmelCase ) lowerCAmelCase__ : Any = list(__UpperCAmelCase ) lowerCAmelCase__ : int = conv_bias lowerCAmelCase__ : List[Any] = num_conv_pos_embeddings lowerCAmelCase__ : Optional[int] = num_conv_pos_embedding_groups lowerCAmelCase__ : int = len(self.conv_dim ) lowerCAmelCase__ : Union[str, Any] = num_hidden_layers lowerCAmelCase__ : Any = intermediate_size lowerCAmelCase__ : int = squeeze_factor lowerCAmelCase__ : int = max_position_embeddings lowerCAmelCase__ : Any = position_buckets lowerCAmelCase__ : Optional[int] = share_att_key lowerCAmelCase__ : Tuple = relative_attention lowerCAmelCase__ : Optional[int] = norm_rel_ebd lowerCAmelCase__ : Tuple = list(__UpperCAmelCase ) lowerCAmelCase__ : List[Any] = hidden_act lowerCAmelCase__ : Any = num_attention_heads lowerCAmelCase__ : Optional[int] = hidden_dropout lowerCAmelCase__ : Union[str, Any] = attention_dropout lowerCAmelCase__ : str = activation_dropout lowerCAmelCase__ : List[Any] = feat_proj_dropout lowerCAmelCase__ : Any = final_dropout lowerCAmelCase__ : Optional[int] = layer_norm_eps lowerCAmelCase__ : List[str] = feature_layer_norm_eps lowerCAmelCase__ : Tuple = initializer_range lowerCAmelCase__ : Tuple = vocab_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( '''Configuration for convolutional layers is incorrect.''' '''It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,''' f"""but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)""" f"""= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 lowerCAmelCase__ : Tuple = apply_spec_augment lowerCAmelCase__ : List[str] = mask_time_prob lowerCAmelCase__ : int = mask_time_length lowerCAmelCase__ : int = mask_time_min_masks lowerCAmelCase__ : Optional[int] = mask_feature_prob lowerCAmelCase__ : int = mask_feature_length lowerCAmelCase__ : int = mask_feature_min_masks # ctc loss lowerCAmelCase__ : Optional[Any] = ctc_loss_reduction lowerCAmelCase__ : Any = ctc_zero_infinity # sequence classification lowerCAmelCase__ : Tuple = use_weighted_layer_sum lowerCAmelCase__ : Dict = classifier_proj_size @property def __magic_name__( self ): return functools.reduce(operator.mul , self.conv_stride , 1 )
678
1
lowerCAmelCase_ = 8.31_44_62 # Unit - J mol-1 K-1 def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> float: if moles < 0 or kelvin < 0 or volume < 0: raise ValueError('''Invalid inputs. Enter positive value.''' ) return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> float: if moles < 0 or kelvin < 0 or pressure < 0: raise ValueError('''Invalid inputs. Enter positive value.''' ) return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure if __name__ == "__main__": from doctest import testmod testmod()
678
import unittest from transformers import PegasusTokenizer, PegasusTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin lowerCAmelCase_ = get_tests_dir("""fixtures/test_sentencepiece_no_bos.model""") @require_sentencepiece @require_tokenizers class _lowerCAmelCase ( _lowercase , unittest.TestCase ): A__ = PegasusTokenizer A__ = PegasusTokenizerFast A__ = True A__ = True def __magic_name__( self ): super().setUp() # We have a SentencePiece fixture for testing lowerCAmelCase__ : Union[str, Any] = PegasusTokenizer(__UpperCAmelCase ) tokenizer.save_pretrained(self.tmpdirname ) @cached_property def __magic_name__( self ): return PegasusTokenizer.from_pretrained('''google/pegasus-large''' ) def __magic_name__( self , **__UpperCAmelCase ): return PegasusTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase ) def __magic_name__( self , __UpperCAmelCase ): return ("This is a test", "This is a test") def __magic_name__( self ): lowerCAmelCase__ : Optional[Any] = '''</s>''' lowerCAmelCase__ : Optional[int] = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCAmelCase ) , __UpperCAmelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCAmelCase ) , __UpperCAmelCase ) def __magic_name__( self ): lowerCAmelCase__ : Tuple = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '''<pad>''' ) self.assertEqual(vocab_keys[1] , '''</s>''' ) self.assertEqual(vocab_keys[-1] , '''v''' ) self.assertEqual(len(__UpperCAmelCase ) , 1103 ) def __magic_name__( self ): self.assertEqual(self.get_tokenizer().vocab_size , 1103 ) def __magic_name__( self ): lowerCAmelCase__ : int = self.rust_tokenizer_class.from_pretrained(self.tmpdirname ) lowerCAmelCase__ : Tuple = self.tokenizer_class.from_pretrained(self.tmpdirname ) lowerCAmelCase__ : int = ( '''Let\'s see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important''' ''' </s> <pad> <pad> <pad>''' ) lowerCAmelCase__ : Any = rust_tokenizer([raw_input_str] , return_tensors=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ).input_ids[0] lowerCAmelCase__ : Dict = py_tokenizer([raw_input_str] , return_tensors=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ).input_ids[0] self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase ) def __magic_name__( self ): lowerCAmelCase__ : Any = self._large_tokenizer # <mask_1> masks whole sentence while <mask_2> masks single word lowerCAmelCase__ : List[str] = '''<mask_1> To ensure a <mask_2> flow of bank resolutions.''' lowerCAmelCase__ : Tuple = [2, 413, 615, 114, 3, 1971, 113, 1679, 1_0710, 107, 1] lowerCAmelCase__ : Tuple = tokenizer([raw_input_str] , return_tensors=__UpperCAmelCase ).input_ids[0] self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase ) def __magic_name__( self ): lowerCAmelCase__ : Dict = self._large_tokenizer # The tracebacks for the following asserts are **better** without messages or self.assertEqual assert tokenizer.vocab_size == 9_6103 assert tokenizer.pad_token_id == 0 assert tokenizer.eos_token_id == 1 assert tokenizer.offset == 103 assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105 assert tokenizer.unk_token == "<unk>" assert tokenizer.model_max_length == 1024 lowerCAmelCase__ : str = '''To ensure a smooth flow of bank resolutions.''' lowerCAmelCase__ : int = [413, 615, 114, 2291, 1971, 113, 1679, 1_0710, 107, 1] lowerCAmelCase__ : List[Any] = tokenizer([raw_input_str] , return_tensors=__UpperCAmelCase ).input_ids[0] self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase ) assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"] @require_torch def __magic_name__( self ): lowerCAmelCase__ : Optional[int] = ['''This is going to be way too long.''' * 150, '''short example'''] lowerCAmelCase__ : List[str] = ['''not super long but more than 5 tokens''', '''tiny'''] lowerCAmelCase__ : Tuple = self._large_tokenizer(__UpperCAmelCase , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , return_tensors='''pt''' ) lowerCAmelCase__ : Optional[int] = self._large_tokenizer( text_target=__UpperCAmelCase , max_length=5 , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , return_tensors='''pt''' ) assert batch.input_ids.shape == (2, 1024) assert batch.attention_mask.shape == (2, 1024) assert targets["input_ids"].shape == (2, 5) assert len(__UpperCAmelCase ) == 2 # input_ids, attention_mask. @slow def __magic_name__( self ): # fmt: off lowerCAmelCase__ : Optional[int] = {'''input_ids''': [[3_8979, 143, 1_8485, 606, 130, 2_6669, 8_7686, 121, 5_4189, 1129, 111, 2_6669, 8_7686, 121, 9114, 1_4787, 121, 1_3249, 158, 592, 956, 121, 1_4621, 3_1576, 143, 6_2613, 108, 9688, 930, 4_3430, 1_1562, 6_2613, 304, 108, 1_1443, 897, 108, 9314, 1_7415, 6_3399, 108, 1_1443, 7614, 1_8316, 118, 4284, 7148, 1_2430, 143, 1400, 2_5703, 158, 111, 4284, 7148, 1_1772, 143, 2_1297, 1064, 158, 122, 204, 3506, 1754, 1133, 1_4787, 1581, 115, 3_3224, 4482, 111, 1355, 110, 2_9173, 317, 5_0833, 108, 2_0147, 9_4665, 111, 7_7198, 107, 1], [110, 6_2613, 117, 638, 112, 1133, 121, 2_0098, 1355, 7_9050, 1_3872, 135, 1596, 5_3541, 1352, 141, 1_3039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 1_8289, 1_7780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=__UpperCAmelCase , model_name='''google/bigbird-pegasus-large-arxiv''' , revision='''ba85d0851d708441f91440d509690f1ab6353415''' , ) @require_sentencepiece @require_tokenizers class _lowerCAmelCase ( _lowercase , unittest.TestCase ): A__ = PegasusTokenizer A__ = PegasusTokenizerFast A__ = True A__ = True def __magic_name__( self ): super().setUp() # We have a SentencePiece fixture for testing lowerCAmelCase__ : List[Any] = PegasusTokenizer(__UpperCAmelCase , offset=0 , mask_token_sent=__UpperCAmelCase , mask_token='''[MASK]''' ) tokenizer.save_pretrained(self.tmpdirname ) @cached_property def __magic_name__( self ): return PegasusTokenizer.from_pretrained('''google/bigbird-pegasus-large-arxiv''' ) def __magic_name__( self , **__UpperCAmelCase ): return PegasusTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase ) def __magic_name__( self , __UpperCAmelCase ): return ("This is a test", "This is a test") def __magic_name__( self ): lowerCAmelCase__ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(self.tmpdirname ) lowerCAmelCase__ : int = self.tokenizer_class.from_pretrained(self.tmpdirname ) lowerCAmelCase__ : str = ( '''Let\'s see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>''' ''' <pad> <pad> <pad>''' ) lowerCAmelCase__ : Optional[Any] = rust_tokenizer([raw_input_str] , return_tensors=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ).input_ids[0] lowerCAmelCase__ : int = py_tokenizer([raw_input_str] , return_tensors=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ).input_ids[0] self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase ) @require_torch def __magic_name__( self ): lowerCAmelCase__ : Optional[Any] = ['''This is going to be way too long.''' * 1000, '''short example'''] lowerCAmelCase__ : int = ['''not super long but more than 5 tokens''', '''tiny'''] lowerCAmelCase__ : Tuple = self._large_tokenizer(__UpperCAmelCase , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , return_tensors='''pt''' ) lowerCAmelCase__ : Tuple = self._large_tokenizer( text_target=__UpperCAmelCase , max_length=5 , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , return_tensors='''pt''' ) assert batch.input_ids.shape == (2, 4096) assert batch.attention_mask.shape == (2, 4096) assert targets["input_ids"].shape == (2, 5) assert len(__UpperCAmelCase ) == 2 # input_ids, attention_mask. def __magic_name__( self ): lowerCAmelCase__ : List[str] = ( '''This is an example string that is used to test the original TF implementation against the HF''' ''' implementation''' ) lowerCAmelCase__ : Union[str, Any] = self._large_tokenizer(__UpperCAmelCase ).input_ids self.assertListEqual( __UpperCAmelCase , [182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 2_5016, 3137, 464, 109, 2_6955, 3137, 1] , )
678
1
import inspect import unittest from transformers import MobileNetVaConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import MobileNetVaImageProcessor class _lowerCAmelCase ( _lowercase ): def __magic_name__( self ): lowerCAmelCase__ : str = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(__UpperCAmelCase , '''tf_padding''' ) ) self.parent.assertTrue(hasattr(__UpperCAmelCase , '''depth_multiplier''' ) ) class _lowerCAmelCase : def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=3 , __UpperCAmelCase=32 , __UpperCAmelCase=0.25 , __UpperCAmelCase=8 , __UpperCAmelCase=8 , __UpperCAmelCase=6 , __UpperCAmelCase=32 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase="relu6" , __UpperCAmelCase=1280 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.02 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=10 , __UpperCAmelCase=None , ): lowerCAmelCase__ : List[Any] = parent lowerCAmelCase__ : Any = batch_size lowerCAmelCase__ : Optional[Any] = num_channels lowerCAmelCase__ : List[str] = image_size lowerCAmelCase__ : Tuple = depth_multiplier lowerCAmelCase__ : int = depth_divisible_by lowerCAmelCase__ : Any = min_depth lowerCAmelCase__ : List[Any] = expand_ratio lowerCAmelCase__ : List[Any] = tf_padding lowerCAmelCase__ : int = output_stride lowerCAmelCase__ : Union[str, Any] = first_layer_is_expansion lowerCAmelCase__ : Union[str, Any] = finegrained_output lowerCAmelCase__ : str = hidden_act lowerCAmelCase__ : Union[str, Any] = last_hidden_size if finegrained_output else int(last_hidden_size * depth_multiplier ) lowerCAmelCase__ : Optional[Any] = classifier_dropout_prob lowerCAmelCase__ : Dict = use_labels lowerCAmelCase__ : Tuple = is_training lowerCAmelCase__ : Tuple = num_labels lowerCAmelCase__ : List[Any] = initializer_range lowerCAmelCase__ : Optional[Any] = scope def __magic_name__( self ): lowerCAmelCase__ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCAmelCase__ : List[Any] = None lowerCAmelCase__ : Optional[Any] = None if self.use_labels: lowerCAmelCase__ : Union[str, Any] = ids_tensor([self.batch_size] , self.num_labels ) lowerCAmelCase__ : Optional[Any] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) lowerCAmelCase__ : str = self.get_config() return config, pixel_values, labels, pixel_labels def __magic_name__( self ): return MobileNetVaConfig( num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , depth_divisible_by=self.depth_divisible_by , min_depth=self.min_depth , expand_ratio=self.expand_ratio , output_stride=self.output_stride , first_layer_is_expansion=self.first_layer_is_expansion , finegrained_output=self.finegrained_output , hidden_act=self.hidden_act , tf_padding=self.tf_padding , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , ) def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): lowerCAmelCase__ : Any = MobileNetVaModel(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() lowerCAmelCase__ : Optional[Any] = model(__UpperCAmelCase ) self.parent.assertEqual( result.last_hidden_state.shape , ( self.batch_size, self.last_hidden_size, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) self.parent.assertEqual( result.pooler_output.shape , (self.batch_size, self.last_hidden_size) , ) def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): lowerCAmelCase__ : int = self.num_labels lowerCAmelCase__ : Optional[int] = MobileNetVaForImageClassification(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() lowerCAmelCase__ : Any = model(__UpperCAmelCase , labels=__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): lowerCAmelCase__ : Tuple = self.num_labels lowerCAmelCase__ : List[Any] = MobileNetVaForSemanticSegmentation(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() lowerCAmelCase__ : Optional[Any] = model(__UpperCAmelCase ) self.parent.assertEqual( result.logits.shape , ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) lowerCAmelCase__ : List[str] = model(__UpperCAmelCase , labels=__UpperCAmelCase ) self.parent.assertEqual( result.logits.shape , ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) def __magic_name__( self ): lowerCAmelCase__ : Optional[Any] = self.prepare_config_and_inputs() lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : int = config_and_inputs lowerCAmelCase__ : Optional[Any] = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class _lowerCAmelCase ( _lowercase , _lowercase , unittest.TestCase ): A__ = ( (MobileNetVaModel, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation) if is_torch_available() else () ) A__ = ( { 'feature-extraction': MobileNetVaModel, 'image-classification': MobileNetVaForImageClassification, 'image-segmentation': MobileNetVaForSemanticSegmentation, } if is_torch_available() else {} ) A__ = False A__ = False A__ = False A__ = False def __magic_name__( self ): lowerCAmelCase__ : Any = MobileNetVaModelTester(self ) lowerCAmelCase__ : Tuple = MobileNetVaConfigTester(self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase ) def __magic_name__( self ): self.config_tester.run_common_tests() @unittest.skip(reason='''MobileNetV2 does not use inputs_embeds''' ) def __magic_name__( self ): pass @unittest.skip(reason='''MobileNetV2 does not support input and output embeddings''' ) def __magic_name__( self ): pass @unittest.skip(reason='''MobileNetV2 does not output attentions''' ) def __magic_name__( self ): pass def __magic_name__( self ): lowerCAmelCase__ , lowerCAmelCase__ : str = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase__ : Any = model_class(__UpperCAmelCase ) lowerCAmelCase__ : Optional[int] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCAmelCase__ : List[str] = [*signature.parameters.keys()] lowerCAmelCase__ : Tuple = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , __UpperCAmelCase ) def __magic_name__( self ): lowerCAmelCase__ : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCAmelCase ) def __magic_name__( self ): def check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): lowerCAmelCase__ : int = model_class(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() with torch.no_grad(): lowerCAmelCase__ : Union[str, Any] = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) ) lowerCAmelCase__ : str = outputs.hidden_states lowerCAmelCase__ : Any = 16 self.assertEqual(len(__UpperCAmelCase ) , __UpperCAmelCase ) lowerCAmelCase__ , lowerCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase__ : List[str] = True check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowerCAmelCase__ : Optional[Any] = True check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) def __magic_name__( self ): lowerCAmelCase__ : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__UpperCAmelCase ) def __magic_name__( self ): lowerCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*__UpperCAmelCase ) @slow def __magic_name__( self ): for model_name in MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCAmelCase__ : Dict = MobileNetVaModel.from_pretrained(__UpperCAmelCase ) self.assertIsNotNone(__UpperCAmelCase ) def __lowerCAmelCase ( ) -> Dict: lowerCAmelCase__ : Dict = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision class _lowerCAmelCase ( unittest.TestCase ): @cached_property def __magic_name__( self ): return ( MobileNetVaImageProcessor.from_pretrained('''google/mobilenet_v2_1.0_224''' ) if is_vision_available() else None ) @slow def __magic_name__( self ): lowerCAmelCase__ : List[str] = MobileNetVaForImageClassification.from_pretrained('''google/mobilenet_v2_1.0_224''' ).to(__UpperCAmelCase ) lowerCAmelCase__ : List[str] = self.default_image_processor lowerCAmelCase__ : Union[str, Any] = prepare_img() lowerCAmelCase__ : Optional[int] = image_processor(images=__UpperCAmelCase , return_tensors='''pt''' ).to(__UpperCAmelCase ) # forward pass with torch.no_grad(): lowerCAmelCase__ : List[str] = model(**__UpperCAmelCase ) # verify the logits lowerCAmelCase__ : Union[str, Any] = torch.Size((1, 1001) ) self.assertEqual(outputs.logits.shape , __UpperCAmelCase ) lowerCAmelCase__ : Any = torch.tensor([0.2445, -1.1993, 0.1905] ).to(__UpperCAmelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCAmelCase , atol=1e-4 ) ) @slow def __magic_name__( self ): lowerCAmelCase__ : str = MobileNetVaForSemanticSegmentation.from_pretrained('''google/deeplabv3_mobilenet_v2_1.0_513''' ) lowerCAmelCase__ : Union[str, Any] = model.to(__UpperCAmelCase ) lowerCAmelCase__ : Tuple = MobileNetVaImageProcessor.from_pretrained('''google/deeplabv3_mobilenet_v2_1.0_513''' ) lowerCAmelCase__ : Optional[Any] = prepare_img() lowerCAmelCase__ : List[str] = image_processor(images=__UpperCAmelCase , return_tensors='''pt''' ).to(__UpperCAmelCase ) # forward pass with torch.no_grad(): lowerCAmelCase__ : List[Any] = model(**__UpperCAmelCase ) lowerCAmelCase__ : int = outputs.logits # verify the logits lowerCAmelCase__ : Dict = torch.Size((1, 21, 65, 65) ) self.assertEqual(logits.shape , __UpperCAmelCase ) lowerCAmelCase__ : Any = torch.tensor( [ [[17.5790, 17.7581, 18.3355], [18.3257, 18.4230, 18.8973], [18.6169, 18.8650, 19.2187]], [[-2.1595, -2.0977, -2.3741], [-2.4226, -2.3028, -2.6835], [-2.7819, -2.5991, -2.7706]], [[4.2058, 4.8317, 4.7638], [4.4136, 5.0361, 4.9383], [4.5028, 4.9644, 4.8734]], ] , device=__UpperCAmelCase , ) self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __UpperCAmelCase , atol=1e-4 ) )
678
from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = { """naver-clova-ix/donut-base""": """https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json""", # See all Donut models at https://huggingface.co/models?filter=donut-swin } class _lowerCAmelCase ( _lowercase ): A__ = 'donut-swin' A__ = { 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers', } def __init__( self , __UpperCAmelCase=224 , __UpperCAmelCase=4 , __UpperCAmelCase=3 , __UpperCAmelCase=96 , __UpperCAmelCase=[2, 2, 6, 2] , __UpperCAmelCase=[3, 6, 12, 24] , __UpperCAmelCase=7 , __UpperCAmelCase=4.0 , __UpperCAmelCase=True , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.1 , __UpperCAmelCase="gelu" , __UpperCAmelCase=False , __UpperCAmelCase=0.02 , __UpperCAmelCase=1e-5 , **__UpperCAmelCase , ): super().__init__(**__UpperCAmelCase ) lowerCAmelCase__ : Union[str, Any] = image_size lowerCAmelCase__ : List[str] = patch_size lowerCAmelCase__ : int = num_channels lowerCAmelCase__ : Optional[Any] = embed_dim lowerCAmelCase__ : int = depths lowerCAmelCase__ : Dict = len(__UpperCAmelCase ) lowerCAmelCase__ : Union[str, Any] = num_heads lowerCAmelCase__ : Dict = window_size lowerCAmelCase__ : str = mlp_ratio lowerCAmelCase__ : Optional[int] = qkv_bias lowerCAmelCase__ : Any = hidden_dropout_prob lowerCAmelCase__ : Union[str, Any] = attention_probs_dropout_prob lowerCAmelCase__ : List[str] = drop_path_rate lowerCAmelCase__ : Tuple = hidden_act lowerCAmelCase__ : List[str] = use_absolute_embeddings lowerCAmelCase__ : Dict = layer_norm_eps lowerCAmelCase__ : Any = initializer_range # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model lowerCAmelCase__ : List[Any] = int(embed_dim * 2 ** (len(__UpperCAmelCase ) - 1) )
678
1
import inspect import os import unittest from dataclasses import dataclass import torch from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs from accelerate.state import AcceleratorState from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu from accelerate.utils import KwargsHandler @dataclass class _lowerCAmelCase ( _lowercase ): A__ = 0 A__ = False A__ = 3.0 class _lowerCAmelCase ( unittest.TestCase ): def __magic_name__( self ): # If no defaults are changed, `to_kwargs` returns an empty dict. self.assertDictEqual(MockClass().to_kwargs() , {} ) self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {'''a''': 2} ) self.assertDictEqual(MockClass(a=2 , b=__UpperCAmelCase ).to_kwargs() , {'''a''': 2, '''b''': True} ) self.assertDictEqual(MockClass(a=2 , c=2.25 ).to_kwargs() , {'''a''': 2, '''c''': 2.25} ) @require_cuda def __magic_name__( self ): # If no defaults are changed, `to_kwargs` returns an empty dict. lowerCAmelCase__ : int = GradScalerKwargs(init_scale=1024 , growth_factor=2 ) AcceleratorState._reset_state() lowerCAmelCase__ : Tuple = Accelerator(mixed_precision='''fp16''' , kwargs_handlers=[scaler_handler] ) print(accelerator.use_fpaa ) lowerCAmelCase__ : Dict = accelerator.scaler # Check the kwargs have been applied self.assertEqual(scaler._init_scale , 1024.0 ) self.assertEqual(scaler._growth_factor , 2.0 ) # Check the other values are at the default self.assertEqual(scaler._backoff_factor , 0.5 ) self.assertEqual(scaler._growth_interval , 2000 ) self.assertEqual(scaler._enabled , __UpperCAmelCase ) @require_multi_gpu def __magic_name__( self ): lowerCAmelCase__ : Union[str, Any] = ['''torchrun''', f"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )] execute_subprocess_async(__UpperCAmelCase , env=os.environ.copy() ) if __name__ == "__main__": lowerCAmelCase_ = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True) lowerCAmelCase_ = Accelerator(kwargs_handlers=[ddp_scaler]) lowerCAmelCase_ = torch.nn.Linear(1_00, 2_00) lowerCAmelCase_ = accelerator.prepare(model) # Check the values changed in kwargs lowerCAmelCase_ = """""" lowerCAmelCase_ = model.bucket_bytes_cap // (10_24 * 10_24) if observed_bucket_cap_map != 15: error_msg += F"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n" if model.find_unused_parameters is not True: error_msg += F"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n" # Check the values of the defaults if model.dim != 0: error_msg += F"Default value not respected, should have `0` but found {model.dim}.\n" if model.broadcast_buffers is not True: error_msg += F"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n" if model.gradient_as_bucket_view is not False: error_msg += F"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n" # Raise error at the end to make sure we don't stop at the first failure. if len(error_msg) > 0: raise ValueError(error_msg)
678
lowerCAmelCase_ = { """meter""": """m""", """kilometer""": """km""", """megametre""": """Mm""", """gigametre""": """Gm""", """terametre""": """Tm""", """petametre""": """Pm""", """exametre""": """Em""", """zettametre""": """Zm""", """yottametre""": """Ym""", } # Exponent of the factor(meter) lowerCAmelCase_ = { """m""": 0, """km""": 3, """Mm""": 6, """Gm""": 9, """Tm""": 12, """Pm""": 15, """Em""": 18, """Zm""": 21, """Ym""": 24, } def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> float: lowerCAmelCase__ : List[str] = from_type.lower().strip('''s''' ) lowerCAmelCase__ : List[str] = to_type.lower().strip('''s''' ) lowerCAmelCase__ : str = UNIT_SYMBOL.get(UpperCamelCase , UpperCamelCase ) lowerCAmelCase__ : Any = UNIT_SYMBOL.get(UpperCamelCase , UpperCamelCase ) if from_sanitized not in METRIC_CONVERSION: lowerCAmelCase__ : Tuple = ( F"""Invalid 'from_type' value: {from_type!r}.\n""" F"""Conversion abbreviations are: {", ".join(UpperCamelCase )}""" ) raise ValueError(UpperCamelCase ) if to_sanitized not in METRIC_CONVERSION: lowerCAmelCase__ : List[Any] = ( F"""Invalid 'to_type' value: {to_type!r}.\n""" F"""Conversion abbreviations are: {", ".join(UpperCamelCase )}""" ) raise ValueError(UpperCamelCase ) lowerCAmelCase__ : Optional[Any] = METRIC_CONVERSION[from_sanitized] lowerCAmelCase__ : Optional[Any] = METRIC_CONVERSION[to_sanitized] lowerCAmelCase__ : int = 1 if from_exponent > to_exponent: lowerCAmelCase__ : List[str] = from_exponent - to_exponent else: lowerCAmelCase__ : Dict = -(to_exponent - from_exponent) return value * pow(10 , UpperCamelCase ) if __name__ == "__main__": from doctest import testmod testmod()
678
1
import argparse from transformers import BigBirdConfig, BigBirdForPreTraining, BigBirdForQuestionAnswering, load_tf_weights_in_big_bird from transformers.utils import logging logging.set_verbosity_info() def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> List[Any]: # Initialise PyTorch model lowerCAmelCase__ : Dict = BigBirdConfig.from_json_file(UpperCamelCase ) print(F"""Building PyTorch model from configuration: {config}""" ) if is_trivia_qa: lowerCAmelCase__ : List[Any] = BigBirdForQuestionAnswering(UpperCamelCase ) else: lowerCAmelCase__ : str = BigBirdForPreTraining(UpperCamelCase ) # Load weights from tf checkpoint load_tf_weights_in_big_bird(UpperCamelCase , UpperCamelCase , is_trivia_qa=UpperCamelCase ) # Save pytorch-model print(F"""Save PyTorch model to {pytorch_dump_path}""" ) model.save_pretrained(UpperCamelCase ) if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path.""" ) parser.add_argument( """--big_bird_config_file""", default=None, type=str, required=True, help=( """The config json file corresponding to the pre-trained BERT model. \n""" """This specifies the model architecture.""" ), ) parser.add_argument( """--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) parser.add_argument( """--is_trivia_qa""", action="""store_true""", help="""Whether to convert a model with a trivia_qa head.""" ) lowerCAmelCase_ = parser.parse_args() convert_tf_checkpoint_to_pytorch( args.tf_checkpoint_path, args.big_bird_config_file, args.pytorch_dump_path, args.is_trivia_qa )
678
import unittest from transformers import is_vision_available from transformers.pipelines import pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class _lowerCAmelCase : @staticmethod def __magic_name__( *__UpperCAmelCase , **__UpperCAmelCase ): pass @is_pipeline_test @require_vision class _lowerCAmelCase ( unittest.TestCase ): @require_torch def __magic_name__( self ): lowerCAmelCase__ : int = pipeline( model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , ) lowerCAmelCase__ : int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) lowerCAmelCase__ : List[str] = image_classifier(__UpperCAmelCase , candidate_labels=['''a''', '''b''', '''c'''] ) # The floating scores are so close, we enter floating error approximation and the order is not guaranteed across # python and torch versions. self.assertIn( nested_simplify(__UpperCAmelCase ) , [ [{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''b'''}, {'''score''': 0.333, '''label''': '''c'''}], [{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''c'''}, {'''score''': 0.333, '''label''': '''b'''}], ] , ) lowerCAmelCase__ : List[Any] = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 ) self.assertEqual( nested_simplify(__UpperCAmelCase ) , [ [ {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, ], ] , ) @require_tf def __magic_name__( self ): lowerCAmelCase__ : List[Any] = pipeline( model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , framework='''tf''' ) lowerCAmelCase__ : Tuple = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) lowerCAmelCase__ : str = image_classifier(__UpperCAmelCase , candidate_labels=['''a''', '''b''', '''c'''] ) self.assertEqual( nested_simplify(__UpperCAmelCase ) , [{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''b'''}, {'''score''': 0.333, '''label''': '''c'''}] , ) lowerCAmelCase__ : List[Any] = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 ) self.assertEqual( nested_simplify(__UpperCAmelCase ) , [ [ {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, ], ] , ) @slow @require_torch def __magic_name__( self ): lowerCAmelCase__ : str = pipeline( task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , ) # This is an image of 2 cats with remotes and no planes lowerCAmelCase__ : List[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) lowerCAmelCase__ : str = image_classifier(__UpperCAmelCase , candidate_labels=['''cat''', '''plane''', '''remote'''] ) self.assertEqual( nested_simplify(__UpperCAmelCase ) , [ {'''score''': 0.511, '''label''': '''remote'''}, {'''score''': 0.485, '''label''': '''cat'''}, {'''score''': 0.004, '''label''': '''plane'''}, ] , ) lowerCAmelCase__ : Tuple = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 ) self.assertEqual( nested_simplify(__UpperCAmelCase ) , [ [ {'''score''': 0.511, '''label''': '''remote'''}, {'''score''': 0.485, '''label''': '''cat'''}, {'''score''': 0.004, '''label''': '''plane'''}, ], ] * 5 , ) @slow @require_tf def __magic_name__( self ): lowerCAmelCase__ : Union[str, Any] = pipeline( task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , framework='''tf''' ) # This is an image of 2 cats with remotes and no planes lowerCAmelCase__ : Optional[int] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) lowerCAmelCase__ : Union[str, Any] = image_classifier(__UpperCAmelCase , candidate_labels=['''cat''', '''plane''', '''remote'''] ) self.assertEqual( nested_simplify(__UpperCAmelCase ) , [ {'''score''': 0.511, '''label''': '''remote'''}, {'''score''': 0.485, '''label''': '''cat'''}, {'''score''': 0.004, '''label''': '''plane'''}, ] , ) lowerCAmelCase__ : Any = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 ) self.assertEqual( nested_simplify(__UpperCAmelCase ) , [ [ {'''score''': 0.511, '''label''': '''remote'''}, {'''score''': 0.485, '''label''': '''cat'''}, {'''score''': 0.004, '''label''': '''plane'''}, ], ] * 5 , )
678
1
import contextlib import csv import json import os import sqlitea import tarfile import textwrap import zipfile import pyarrow as pa import pyarrow.parquet as pq import pytest import datasets import datasets.config @pytest.fixture(scope='''session''' ) def __lowerCAmelCase ( ) -> Optional[int]: lowerCAmelCase__ : Any = 10 lowerCAmelCase__ : List[str] = datasets.Features( { '''tokens''': datasets.Sequence(datasets.Value('''string''' ) ), '''labels''': datasets.Sequence(datasets.ClassLabel(names=['''negative''', '''positive'''] ) ), '''answers''': datasets.Sequence( { '''text''': datasets.Value('''string''' ), '''answer_start''': datasets.Value('''int32''' ), } ), '''id''': datasets.Value('''int64''' ), } ) lowerCAmelCase__ : Any = datasets.Dataset.from_dict( { '''tokens''': [['''foo'''] * 5] * n, '''labels''': [[1] * 5] * n, '''answers''': [{'''answer_start''': [97], '''text''': ['''1976''']}] * 10, '''id''': list(range(UpperCamelCase ) ), } , features=UpperCamelCase , ) return dataset @pytest.fixture(scope='''session''' ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> int: lowerCAmelCase__ : List[Any] = str(tmp_path_factory.mktemp('''data''' ) / '''file.arrow''' ) dataset.map(cache_file_name=UpperCamelCase ) return filename # FILE_CONTENT + files lowerCAmelCase_ = """\ Text data. Second line of data.""" @pytest.fixture(scope='''session''' ) def __lowerCAmelCase ( UpperCamelCase ) -> Any: lowerCAmelCase__ : int = tmp_path_factory.mktemp('''data''' ) / '''file.txt''' lowerCAmelCase__ : Union[str, Any] = FILE_CONTENT with open(UpperCamelCase , '''w''' ) as f: f.write(UpperCamelCase ) return filename @pytest.fixture(scope='''session''' ) def __lowerCAmelCase ( UpperCamelCase ) -> str: import bza lowerCAmelCase__ : Any = tmp_path_factory.mktemp('''data''' ) / '''file.txt.bz2''' lowerCAmelCase__ : str = bytes(UpperCamelCase , '''utf-8''' ) with bza.open(UpperCamelCase , '''wb''' ) as f: f.write(UpperCamelCase ) return path @pytest.fixture(scope='''session''' ) def __lowerCAmelCase ( UpperCamelCase ) -> Tuple: import gzip lowerCAmelCase__ : str = str(tmp_path_factory.mktemp('''data''' ) / '''file.txt.gz''' ) lowerCAmelCase__ : List[Any] = bytes(UpperCamelCase , '''utf-8''' ) with gzip.open(UpperCamelCase , '''wb''' ) as f: f.write(UpperCamelCase ) return path @pytest.fixture(scope='''session''' ) def __lowerCAmelCase ( UpperCamelCase ) -> List[Any]: if datasets.config.LZ4_AVAILABLE: import lza.frame lowerCAmelCase__ : Dict = tmp_path_factory.mktemp('''data''' ) / '''file.txt.lz4''' lowerCAmelCase__ : str = bytes(UpperCamelCase , '''utf-8''' ) with lza.frame.open(UpperCamelCase , '''wb''' ) as f: f.write(UpperCamelCase ) return path @pytest.fixture(scope='''session''' ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> Dict: if datasets.config.PY7ZR_AVAILABLE: import pyazr lowerCAmelCase__ : Union[str, Any] = tmp_path_factory.mktemp('''data''' ) / '''file.txt.7z''' with pyazr.SevenZipFile(UpperCamelCase , '''w''' ) as archive: archive.write(UpperCamelCase , arcname=os.path.basename(UpperCamelCase ) ) return path @pytest.fixture(scope='''session''' ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> List[Any]: import tarfile lowerCAmelCase__ : Dict = tmp_path_factory.mktemp('''data''' ) / '''file.txt.tar''' with tarfile.TarFile(UpperCamelCase , '''w''' ) as f: f.add(UpperCamelCase , arcname=os.path.basename(UpperCamelCase ) ) return path @pytest.fixture(scope='''session''' ) def __lowerCAmelCase ( UpperCamelCase ) -> Union[str, Any]: import lzma lowerCAmelCase__ : Tuple = tmp_path_factory.mktemp('''data''' ) / '''file.txt.xz''' lowerCAmelCase__ : List[Any] = bytes(UpperCamelCase , '''utf-8''' ) with lzma.open(UpperCamelCase , '''wb''' ) as f: f.write(UpperCamelCase ) return path @pytest.fixture(scope='''session''' ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> str: import zipfile lowerCAmelCase__ : Any = tmp_path_factory.mktemp('''data''' ) / '''file.txt.zip''' with zipfile.ZipFile(UpperCamelCase , '''w''' ) as f: f.write(UpperCamelCase , arcname=os.path.basename(UpperCamelCase ) ) return path @pytest.fixture(scope='''session''' ) def __lowerCAmelCase ( UpperCamelCase ) -> List[str]: if datasets.config.ZSTANDARD_AVAILABLE: import zstandard as zstd lowerCAmelCase__ : List[str] = tmp_path_factory.mktemp('''data''' ) / '''file.txt.zst''' lowerCAmelCase__ : str = bytes(UpperCamelCase , '''utf-8''' ) with zstd.open(UpperCamelCase , '''wb''' ) as f: f.write(UpperCamelCase ) return path @pytest.fixture(scope='''session''' ) def __lowerCAmelCase ( UpperCamelCase ) -> str: lowerCAmelCase__ : Any = tmp_path_factory.mktemp('''data''' ) / '''file.xml''' lowerCAmelCase__ : Union[str, Any] = textwrap.dedent( '''\ <?xml version="1.0" encoding="UTF-8" ?> <tmx version="1.4"> <header segtype="sentence" srclang="ca" /> <body> <tu> <tuv xml:lang="ca"><seg>Contingut 1</seg></tuv> <tuv xml:lang="en"><seg>Content 1</seg></tuv> </tu> <tu> <tuv xml:lang="ca"><seg>Contingut 2</seg></tuv> <tuv xml:lang="en"><seg>Content 2</seg></tuv> </tu> <tu> <tuv xml:lang="ca"><seg>Contingut 3</seg></tuv> <tuv xml:lang="en"><seg>Content 3</seg></tuv> </tu> <tu> <tuv xml:lang="ca"><seg>Contingut 4</seg></tuv> <tuv xml:lang="en"><seg>Content 4</seg></tuv> </tu> <tu> <tuv xml:lang="ca"><seg>Contingut 5</seg></tuv> <tuv xml:lang="en"><seg>Content 5</seg></tuv> </tu> </body> </tmx>''' ) with open(UpperCamelCase , '''w''' ) as f: f.write(UpperCamelCase ) return filename lowerCAmelCase_ = [ {"""col_1""": """0""", """col_2""": 0, """col_3""": 0.0}, {"""col_1""": """1""", """col_2""": 1, """col_3""": 1.0}, {"""col_1""": """2""", """col_2""": 2, """col_3""": 2.0}, {"""col_1""": """3""", """col_2""": 3, """col_3""": 3.0}, ] lowerCAmelCase_ = [ {"""col_1""": """4""", """col_2""": 4, """col_3""": 4.0}, {"""col_1""": """5""", """col_2""": 5, """col_3""": 5.0}, ] lowerCAmelCase_ = { """col_1""": ["""0""", """1""", """2""", """3"""], """col_2""": [0, 1, 2, 3], """col_3""": [0.0, 1.0, 2.0, 3.0], } lowerCAmelCase_ = [ {"""col_3""": 0.0, """col_1""": """0""", """col_2""": 0}, {"""col_3""": 1.0, """col_1""": """1""", """col_2""": 1}, ] lowerCAmelCase_ = [ {"""col_1""": """s0""", """col_2""": 0, """col_3""": 0.0}, {"""col_1""": """s1""", """col_2""": 1, """col_3""": 1.0}, {"""col_1""": """s2""", """col_2""": 2, """col_3""": 2.0}, {"""col_1""": """s3""", """col_2""": 3, """col_3""": 3.0}, ] @pytest.fixture(scope='''session''' ) def __lowerCAmelCase ( ) -> Tuple: return DATA_DICT_OF_LISTS @pytest.fixture(scope='''session''' ) def __lowerCAmelCase ( UpperCamelCase ) -> int: lowerCAmelCase__ : Optional[int] = datasets.Dataset.from_dict(UpperCamelCase ) lowerCAmelCase__ : str = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.arrow''' ) dataset.map(cache_file_name=UpperCamelCase ) return path @pytest.fixture(scope='''session''' ) def __lowerCAmelCase ( UpperCamelCase ) -> Tuple: lowerCAmelCase__ : Tuple = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.sqlite''' ) with contextlib.closing(sqlitea.connect(UpperCamelCase ) ) as con: lowerCAmelCase__ : Optional[Any] = con.cursor() cur.execute('''CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)''' ) for item in DATA: cur.execute('''INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)''' , tuple(item.values() ) ) con.commit() return path @pytest.fixture(scope='''session''' ) def __lowerCAmelCase ( UpperCamelCase ) -> List[Any]: lowerCAmelCase__ : str = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.csv''' ) with open(UpperCamelCase , '''w''' , newline='''''' ) as f: lowerCAmelCase__ : Tuple = csv.DictWriter(UpperCamelCase , fieldnames=['''col_1''', '''col_2''', '''col_3'''] ) writer.writeheader() for item in DATA: writer.writerow(UpperCamelCase ) return path @pytest.fixture(scope='''session''' ) def __lowerCAmelCase ( UpperCamelCase ) -> Optional[int]: lowerCAmelCase__ : Union[str, Any] = str(tmp_path_factory.mktemp('''data''' ) / '''dataset2.csv''' ) with open(UpperCamelCase , '''w''' , newline='''''' ) as f: lowerCAmelCase__ : Optional[int] = csv.DictWriter(UpperCamelCase , fieldnames=['''col_1''', '''col_2''', '''col_3'''] ) writer.writeheader() for item in DATA: writer.writerow(UpperCamelCase ) return path @pytest.fixture(scope='''session''' ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> Any: import bza lowerCAmelCase__ : Union[str, Any] = tmp_path_factory.mktemp('''data''' ) / '''dataset.csv.bz2''' with open(UpperCamelCase , '''rb''' ) as f: lowerCAmelCase__ : List[str] = f.read() # data = bytes(FILE_CONTENT, "utf-8") with bza.open(UpperCamelCase , '''wb''' ) as f: f.write(UpperCamelCase ) return path @pytest.fixture(scope='''session''' ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> int: lowerCAmelCase__ : List[Any] = tmp_path_factory.mktemp('''data''' ) / '''dataset.csv.zip''' with zipfile.ZipFile(UpperCamelCase , '''w''' ) as f: f.write(UpperCamelCase , arcname=os.path.basename(UpperCamelCase ) ) f.write(UpperCamelCase , arcname=os.path.basename(UpperCamelCase ) ) return path @pytest.fixture(scope='''session''' ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Optional[int]: lowerCAmelCase__ : List[str] = tmp_path_factory.mktemp('''data''' ) / '''dataset.csv.zip''' with zipfile.ZipFile(UpperCamelCase , '''w''' ) as f: f.write(UpperCamelCase , arcname=os.path.basename(csv_path.replace('''.csv''' , '''.CSV''' ) ) ) f.write(UpperCamelCase , arcname=os.path.basename(csva_path.replace('''.csv''' , '''.CSV''' ) ) ) return path @pytest.fixture(scope='''session''' ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Dict: lowerCAmelCase__ : Union[str, Any] = tmp_path_factory.mktemp('''data''' ) / '''dataset_with_dir.csv.zip''' with zipfile.ZipFile(UpperCamelCase , '''w''' ) as f: f.write(UpperCamelCase , arcname=os.path.join('''main_dir''' , os.path.basename(UpperCamelCase ) ) ) f.write(UpperCamelCase , arcname=os.path.join('''main_dir''' , os.path.basename(UpperCamelCase ) ) ) return path @pytest.fixture(scope='''session''' ) def __lowerCAmelCase ( UpperCamelCase ) -> Any: lowerCAmelCase__ : Tuple = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.parquet''' ) lowerCAmelCase__ : str = pa.schema( { '''col_1''': pa.string(), '''col_2''': pa.intaa(), '''col_3''': pa.floataa(), } ) with open(UpperCamelCase , '''wb''' ) as f: lowerCAmelCase__ : List[Any] = pq.ParquetWriter(UpperCamelCase , schema=UpperCamelCase ) lowerCAmelCase__ : Union[str, Any] = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(UpperCamelCase ) )] for k in DATA[0]} , schema=UpperCamelCase ) writer.write_table(UpperCamelCase ) writer.close() return path @pytest.fixture(scope='''session''' ) def __lowerCAmelCase ( UpperCamelCase ) -> Union[str, Any]: lowerCAmelCase__ : int = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.json''' ) lowerCAmelCase__ : List[Any] = {'''data''': DATA} with open(UpperCamelCase , '''w''' ) as f: json.dump(UpperCamelCase , UpperCamelCase ) return path @pytest.fixture(scope='''session''' ) def __lowerCAmelCase ( UpperCamelCase ) -> Union[str, Any]: lowerCAmelCase__ : Tuple = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.json''' ) lowerCAmelCase__ : List[str] = {'''data''': DATA_DICT_OF_LISTS} with open(UpperCamelCase , '''w''' ) as f: json.dump(UpperCamelCase , UpperCamelCase ) return path @pytest.fixture(scope='''session''' ) def __lowerCAmelCase ( UpperCamelCase ) -> Tuple: lowerCAmelCase__ : Dict = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.jsonl''' ) with open(UpperCamelCase , '''w''' ) as f: for item in DATA: f.write(json.dumps(UpperCamelCase ) + '''\n''' ) return path @pytest.fixture(scope='''session''' ) def __lowerCAmelCase ( UpperCamelCase ) -> Any: lowerCAmelCase__ : List[Any] = str(tmp_path_factory.mktemp('''data''' ) / '''dataset2.jsonl''' ) with open(UpperCamelCase , '''w''' ) as f: for item in DATA: f.write(json.dumps(UpperCamelCase ) + '''\n''' ) return path @pytest.fixture(scope='''session''' ) def __lowerCAmelCase ( UpperCamelCase ) -> Dict: lowerCAmelCase__ : Optional[int] = str(tmp_path_factory.mktemp('''data''' ) / '''dataset_312.jsonl''' ) with open(UpperCamelCase , '''w''' ) as f: for item in DATA_312: f.write(json.dumps(UpperCamelCase ) + '''\n''' ) return path @pytest.fixture(scope='''session''' ) def __lowerCAmelCase ( UpperCamelCase ) -> int: lowerCAmelCase__ : int = str(tmp_path_factory.mktemp('''data''' ) / '''dataset-str.jsonl''' ) with open(UpperCamelCase , '''w''' ) as f: for item in DATA_STR: f.write(json.dumps(UpperCamelCase ) + '''\n''' ) return path @pytest.fixture(scope='''session''' ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> Any: import gzip lowerCAmelCase__ : Optional[Any] = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.txt.gz''' ) with open(UpperCamelCase , '''rb''' ) as orig_file: with gzip.open(UpperCamelCase , '''wb''' ) as zipped_file: zipped_file.writelines(UpperCamelCase ) return path @pytest.fixture(scope='''session''' ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> List[Any]: import gzip lowerCAmelCase__ : List[Any] = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.jsonl.gz''' ) with open(UpperCamelCase , '''rb''' ) as orig_file: with gzip.open(UpperCamelCase , '''wb''' ) as zipped_file: zipped_file.writelines(UpperCamelCase ) return path @pytest.fixture(scope='''session''' ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Dict: lowerCAmelCase__ : List[str] = tmp_path_factory.mktemp('''data''' ) / '''dataset.jsonl.zip''' with zipfile.ZipFile(UpperCamelCase , '''w''' ) as f: f.write(UpperCamelCase , arcname=os.path.basename(UpperCamelCase ) ) f.write(UpperCamelCase , arcname=os.path.basename(UpperCamelCase ) ) return path @pytest.fixture(scope='''session''' ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Optional[Any]: lowerCAmelCase__ : int = tmp_path_factory.mktemp('''data''' ) / '''dataset_nested.jsonl.zip''' with zipfile.ZipFile(UpperCamelCase , '''w''' ) as f: f.write(UpperCamelCase , arcname=os.path.join('''nested''' , os.path.basename(UpperCamelCase ) ) ) return path @pytest.fixture(scope='''session''' ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Optional[Any]: lowerCAmelCase__ : List[Any] = tmp_path_factory.mktemp('''data''' ) / '''dataset_with_dir.jsonl.zip''' with zipfile.ZipFile(UpperCamelCase , '''w''' ) as f: f.write(UpperCamelCase , arcname=os.path.join('''main_dir''' , os.path.basename(UpperCamelCase ) ) ) f.write(UpperCamelCase , arcname=os.path.join('''main_dir''' , os.path.basename(UpperCamelCase ) ) ) return path @pytest.fixture(scope='''session''' ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> List[str]: lowerCAmelCase__ : str = tmp_path_factory.mktemp('''data''' ) / '''dataset.jsonl.tar''' with tarfile.TarFile(UpperCamelCase , '''w''' ) as f: f.add(UpperCamelCase , arcname=os.path.basename(UpperCamelCase ) ) f.add(UpperCamelCase , arcname=os.path.basename(UpperCamelCase ) ) return path @pytest.fixture(scope='''session''' ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> str: lowerCAmelCase__ : int = tmp_path_factory.mktemp('''data''' ) / '''dataset_nested.jsonl.tar''' with tarfile.TarFile(UpperCamelCase , '''w''' ) as f: f.add(UpperCamelCase , arcname=os.path.join('''nested''' , os.path.basename(UpperCamelCase ) ) ) return path @pytest.fixture(scope='''session''' ) def __lowerCAmelCase ( UpperCamelCase ) -> Optional[int]: lowerCAmelCase__ : List[str] = ['''0''', '''1''', '''2''', '''3'''] lowerCAmelCase__ : List[str] = str(tmp_path_factory.mktemp('''data''' ) / '''dataset.txt''' ) with open(UpperCamelCase , '''w''' ) as f: for item in data: f.write(item + '''\n''' ) return path @pytest.fixture(scope='''session''' ) def __lowerCAmelCase ( UpperCamelCase ) -> List[Any]: lowerCAmelCase__ : Union[str, Any] = ['''0''', '''1''', '''2''', '''3'''] lowerCAmelCase__ : Optional[int] = str(tmp_path_factory.mktemp('''data''' ) / '''dataset2.txt''' ) with open(UpperCamelCase , '''w''' ) as f: for item in data: f.write(item + '''\n''' ) return path @pytest.fixture(scope='''session''' ) def __lowerCAmelCase ( UpperCamelCase ) -> List[str]: lowerCAmelCase__ : int = ['''0''', '''1''', '''2''', '''3'''] lowerCAmelCase__ : Dict = tmp_path_factory.mktemp('''data''' ) / '''dataset.abc''' with open(UpperCamelCase , '''w''' ) as f: for item in data: f.write(item + '''\n''' ) return path @pytest.fixture(scope='''session''' ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Optional[int]: lowerCAmelCase__ : Union[str, Any] = tmp_path_factory.mktemp('''data''' ) / '''dataset.text.zip''' with zipfile.ZipFile(UpperCamelCase , '''w''' ) as f: f.write(UpperCamelCase , arcname=os.path.basename(UpperCamelCase ) ) f.write(UpperCamelCase , arcname=os.path.basename(UpperCamelCase ) ) return path @pytest.fixture(scope='''session''' ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Dict: lowerCAmelCase__ : Dict = tmp_path_factory.mktemp('''data''' ) / '''dataset_with_dir.text.zip''' with zipfile.ZipFile(UpperCamelCase , '''w''' ) as f: f.write(UpperCamelCase , arcname=os.path.join('''main_dir''' , os.path.basename(UpperCamelCase ) ) ) f.write(UpperCamelCase , arcname=os.path.join('''main_dir''' , os.path.basename(UpperCamelCase ) ) ) return path @pytest.fixture(scope='''session''' ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Any: lowerCAmelCase__ : Dict = tmp_path_factory.mktemp('''data''' ) / '''dataset.ext.zip''' with zipfile.ZipFile(UpperCamelCase , '''w''' ) as f: f.write(UpperCamelCase , arcname=os.path.basename('''unsupported.ext''' ) ) f.write(UpperCamelCase , arcname=os.path.basename('''unsupported_2.ext''' ) ) return path @pytest.fixture(scope='''session''' ) def __lowerCAmelCase ( UpperCamelCase ) -> Dict: lowerCAmelCase__ : Optional[int] = '''\n'''.join(['''First''', '''Second\u2029with Unicode new line''', '''Third'''] ) lowerCAmelCase__ : Dict = str(tmp_path_factory.mktemp('''data''' ) / '''dataset_with_unicode_new_lines.txt''' ) with open(UpperCamelCase , '''w''' , encoding='''utf-8''' ) as f: f.write(UpperCamelCase ) return path @pytest.fixture(scope='''session''' ) def __lowerCAmelCase ( ) -> Dict: return os.path.join('''tests''' , '''features''' , '''data''' , '''test_image_rgb.jpg''' ) @pytest.fixture(scope='''session''' ) def __lowerCAmelCase ( ) -> Optional[Any]: return os.path.join('''tests''' , '''features''' , '''data''' , '''test_audio_44100.wav''' ) @pytest.fixture(scope='''session''' ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> Optional[Any]: lowerCAmelCase__ : List[str] = tmp_path_factory.mktemp('''data''' ) / '''dataset.img.zip''' with zipfile.ZipFile(UpperCamelCase , '''w''' ) as f: f.write(UpperCamelCase , arcname=os.path.basename(UpperCamelCase ) ) f.write(UpperCamelCase , arcname=os.path.basename(UpperCamelCase ).replace('''.jpg''' , '''2.jpg''' ) ) return path @pytest.fixture(scope='''session''' ) def __lowerCAmelCase ( UpperCamelCase ) -> str: lowerCAmelCase__ : List[str] = tmp_path_factory.mktemp('''data_dir''' ) (data_dir / "subdir").mkdir() with open(data_dir / '''subdir''' / '''train.txt''' , '''w''' ) as f: f.write('''foo\n''' * 10 ) with open(data_dir / '''subdir''' / '''test.txt''' , '''w''' ) as f: f.write('''bar\n''' * 10 ) # hidden file with open(data_dir / '''subdir''' / '''.test.txt''' , '''w''' ) as f: f.write('''bar\n''' * 10 ) # hidden directory (data_dir / ".subdir").mkdir() with open(data_dir / '''.subdir''' / '''train.txt''' , '''w''' ) as f: f.write('''foo\n''' * 10 ) with open(data_dir / '''.subdir''' / '''test.txt''' , '''w''' ) as f: f.write('''bar\n''' * 10 ) return data_dir
678
lowerCAmelCase_ = [ [0, 16, 13, 0, 0, 0], [0, 0, 10, 12, 0, 0], [0, 4, 0, 0, 14, 0], [0, 0, 9, 0, 0, 20], [0, 0, 0, 7, 0, 4], [0, 0, 0, 0, 0, 0], ] def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> List[Any]: # Return True if there is node that has not iterated. lowerCAmelCase__ : Optional[int] = [False] * len(UpperCamelCase ) lowerCAmelCase__ : Tuple = [s] lowerCAmelCase__ : Dict = True while queue: lowerCAmelCase__ : int = queue.pop(0 ) for ind in range(len(graph[u] ) ): if visited[ind] is False and graph[u][ind] > 0: queue.append(UpperCamelCase ) lowerCAmelCase__ : Optional[Any] = True lowerCAmelCase__ : Optional[int] = u return visited[t] def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Optional[Any]: lowerCAmelCase__ : Any = [-1] * (len(UpperCamelCase )) lowerCAmelCase__ : List[Any] = 0 lowerCAmelCase__ : Tuple = [] lowerCAmelCase__ : Optional[int] = [i[:] for i in graph] # Record original cut, copy. while bfs(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ): lowerCAmelCase__ : List[Any] = float('''Inf''' ) lowerCAmelCase__ : Dict = sink while s != source: # Find the minimum value in select path lowerCAmelCase__ : Tuple = min(UpperCamelCase , graph[parent[s]][s] ) lowerCAmelCase__ : List[Any] = parent[s] max_flow += path_flow lowerCAmelCase__ : List[Any] = sink while v != source: lowerCAmelCase__ : Dict = parent[v] graph[u][v] -= path_flow graph[v][u] += path_flow lowerCAmelCase__ : Optional[Any] = parent[v] for i in range(len(UpperCamelCase ) ): for j in range(len(graph[0] ) ): if graph[i][j] == 0 and temp[i][j] > 0: res.append((i, j) ) return res if __name__ == "__main__": print(mincut(test_graph, source=0, sink=5))
678
1
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import cached_download, hf_hub_url from PIL import Image from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor from transformers.utils import logging logging.set_verbosity_info() lowerCAmelCase_ = logging.get_logger(__name__) def __lowerCAmelCase ( UpperCamelCase ) -> List[str]: lowerCAmelCase__ : Dict = DPTConfig() if "large" in checkpoint_url: lowerCAmelCase__ : int = 1024 lowerCAmelCase__ : List[str] = 4096 lowerCAmelCase__ : List[str] = 24 lowerCAmelCase__ : Union[str, Any] = 16 lowerCAmelCase__ : str = [5, 11, 17, 23] lowerCAmelCase__ : Dict = [256, 512, 1024, 1024] lowerCAmelCase__ : Optional[Any] = (1, 384, 384) if "ade" in checkpoint_url: lowerCAmelCase__ : Any = True lowerCAmelCase__ : Optional[Any] = 150 lowerCAmelCase__ : List[str] = '''huggingface/label-files''' lowerCAmelCase__ : str = '''ade20k-id2label.json''' lowerCAmelCase__ : Optional[int] = json.load(open(cached_download(hf_hub_url(UpperCamelCase , UpperCamelCase , repo_type='''dataset''' ) ) , '''r''' ) ) lowerCAmelCase__ : Tuple = {int(UpperCamelCase ): v for k, v in idalabel.items()} lowerCAmelCase__ : Dict = idalabel lowerCAmelCase__ : Optional[int] = {v: k for k, v in idalabel.items()} lowerCAmelCase__ : int = [1, 150, 480, 480] return config, expected_shape def __lowerCAmelCase ( UpperCamelCase ) -> Dict: lowerCAmelCase__ : Tuple = ['''pretrained.model.head.weight''', '''pretrained.model.head.bias'''] for k in ignore_keys: state_dict.pop(UpperCamelCase , UpperCamelCase ) def __lowerCAmelCase ( UpperCamelCase ) -> Tuple: if ( "pretrained.model" in name and "cls_token" not in name and "pos_embed" not in name and "patch_embed" not in name ): lowerCAmelCase__ : Any = name.replace('''pretrained.model''' , '''dpt.encoder''' ) if "pretrained.model" in name: lowerCAmelCase__ : int = name.replace('''pretrained.model''' , '''dpt.embeddings''' ) if "patch_embed" in name: lowerCAmelCase__ : Tuple = name.replace('''patch_embed''' , '''patch_embeddings''' ) if "pos_embed" in name: lowerCAmelCase__ : Union[str, Any] = name.replace('''pos_embed''' , '''position_embeddings''' ) if "attn.proj" in name: lowerCAmelCase__ : Optional[int] = name.replace('''attn.proj''' , '''attention.output.dense''' ) if "proj" in name and "project" not in name: lowerCAmelCase__ : Optional[Any] = name.replace('''proj''' , '''projection''' ) if "blocks" in name: lowerCAmelCase__ : str = name.replace('''blocks''' , '''layer''' ) if "mlp.fc1" in name: lowerCAmelCase__ : Optional[int] = name.replace('''mlp.fc1''' , '''intermediate.dense''' ) if "mlp.fc2" in name: lowerCAmelCase__ : List[Any] = name.replace('''mlp.fc2''' , '''output.dense''' ) if "norm1" in name: lowerCAmelCase__ : Tuple = name.replace('''norm1''' , '''layernorm_before''' ) if "norm2" in name: lowerCAmelCase__ : str = name.replace('''norm2''' , '''layernorm_after''' ) if "scratch.output_conv" in name: lowerCAmelCase__ : Optional[int] = name.replace('''scratch.output_conv''' , '''head''' ) if "scratch" in name: lowerCAmelCase__ : Tuple = name.replace('''scratch''' , '''neck''' ) if "layer1_rn" in name: lowerCAmelCase__ : List[Any] = name.replace('''layer1_rn''' , '''convs.0''' ) if "layer2_rn" in name: lowerCAmelCase__ : str = name.replace('''layer2_rn''' , '''convs.1''' ) if "layer3_rn" in name: lowerCAmelCase__ : Optional[Any] = name.replace('''layer3_rn''' , '''convs.2''' ) if "layer4_rn" in name: lowerCAmelCase__ : Union[str, Any] = name.replace('''layer4_rn''' , '''convs.3''' ) if "refinenet" in name: lowerCAmelCase__ : Optional[int] = int(name[len('''neck.refinenet''' ) : len('''neck.refinenet''' ) + 1] ) # tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3 lowerCAmelCase__ : int = name.replace(F"""refinenet{layer_idx}""" , F"""fusion_stage.layers.{abs(layer_idx-4 )}""" ) if "out_conv" in name: lowerCAmelCase__ : Optional[Any] = name.replace('''out_conv''' , '''projection''' ) if "resConfUnit1" in name: lowerCAmelCase__ : Union[str, Any] = name.replace('''resConfUnit1''' , '''residual_layer1''' ) if "resConfUnit2" in name: lowerCAmelCase__ : Optional[Any] = name.replace('''resConfUnit2''' , '''residual_layer2''' ) if "conv1" in name: lowerCAmelCase__ : Any = name.replace('''conv1''' , '''convolution1''' ) if "conv2" in name: lowerCAmelCase__ : Optional[Any] = name.replace('''conv2''' , '''convolution2''' ) # readout blocks if "pretrained.act_postprocess1.0.project.0" in name: lowerCAmelCase__ : List[Any] = name.replace('''pretrained.act_postprocess1.0.project.0''' , '''neck.reassemble_stage.readout_projects.0.0''' ) if "pretrained.act_postprocess2.0.project.0" in name: lowerCAmelCase__ : Optional[int] = name.replace('''pretrained.act_postprocess2.0.project.0''' , '''neck.reassemble_stage.readout_projects.1.0''' ) if "pretrained.act_postprocess3.0.project.0" in name: lowerCAmelCase__ : Tuple = name.replace('''pretrained.act_postprocess3.0.project.0''' , '''neck.reassemble_stage.readout_projects.2.0''' ) if "pretrained.act_postprocess4.0.project.0" in name: lowerCAmelCase__ : List[Any] = name.replace('''pretrained.act_postprocess4.0.project.0''' , '''neck.reassemble_stage.readout_projects.3.0''' ) # resize blocks if "pretrained.act_postprocess1.3" in name: lowerCAmelCase__ : Optional[Any] = name.replace('''pretrained.act_postprocess1.3''' , '''neck.reassemble_stage.layers.0.projection''' ) if "pretrained.act_postprocess1.4" in name: lowerCAmelCase__ : Tuple = name.replace('''pretrained.act_postprocess1.4''' , '''neck.reassemble_stage.layers.0.resize''' ) if "pretrained.act_postprocess2.3" in name: lowerCAmelCase__ : Any = name.replace('''pretrained.act_postprocess2.3''' , '''neck.reassemble_stage.layers.1.projection''' ) if "pretrained.act_postprocess2.4" in name: lowerCAmelCase__ : Optional[int] = name.replace('''pretrained.act_postprocess2.4''' , '''neck.reassemble_stage.layers.1.resize''' ) if "pretrained.act_postprocess3.3" in name: lowerCAmelCase__ : int = name.replace('''pretrained.act_postprocess3.3''' , '''neck.reassemble_stage.layers.2.projection''' ) if "pretrained.act_postprocess4.3" in name: lowerCAmelCase__ : int = name.replace('''pretrained.act_postprocess4.3''' , '''neck.reassemble_stage.layers.3.projection''' ) if "pretrained.act_postprocess4.4" in name: lowerCAmelCase__ : List[str] = name.replace('''pretrained.act_postprocess4.4''' , '''neck.reassemble_stage.layers.3.resize''' ) if "pretrained" in name: lowerCAmelCase__ : Union[str, Any] = name.replace('''pretrained''' , '''dpt''' ) if "bn" in name: lowerCAmelCase__ : List[Any] = name.replace('''bn''' , '''batch_norm''' ) if "head" in name: lowerCAmelCase__ : List[str] = name.replace('''head''' , '''head.head''' ) if "encoder.norm" in name: lowerCAmelCase__ : List[Any] = name.replace('''encoder.norm''' , '''layernorm''' ) if "auxlayer" in name: lowerCAmelCase__ : Dict = name.replace('''auxlayer''' , '''auxiliary_head.head''' ) return name def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> Optional[int]: for i in range(config.num_hidden_layers ): # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) lowerCAmelCase__ : Optional[int] = state_dict.pop(F"""dpt.encoder.layer.{i}.attn.qkv.weight""" ) lowerCAmelCase__ : Optional[Any] = state_dict.pop(F"""dpt.encoder.layer.{i}.attn.qkv.bias""" ) # next, add query, keys and values (in that order) to the state dict lowerCAmelCase__ : Tuple = in_proj_weight[: config.hidden_size, :] lowerCAmelCase__ : Tuple = in_proj_bias[: config.hidden_size] lowerCAmelCase__ : Dict = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] lowerCAmelCase__ : Dict = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] lowerCAmelCase__ : Dict = in_proj_weight[ -config.hidden_size :, : ] lowerCAmelCase__ : Optional[Any] = in_proj_bias[-config.hidden_size :] def __lowerCAmelCase ( ) -> Tuple: lowerCAmelCase__ : List[Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg''' lowerCAmelCase__ : int = Image.open(requests.get(UpperCamelCase , stream=UpperCamelCase ).raw ) return im @torch.no_grad() def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Any: lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = get_dpt_config(UpperCamelCase ) # load original state_dict from URL lowerCAmelCase__ : str = torch.hub.load_state_dict_from_url(UpperCamelCase , map_location='''cpu''' ) # remove certain keys remove_ignore_keys_(UpperCamelCase ) # rename keys for key in state_dict.copy().keys(): lowerCAmelCase__ : Tuple = state_dict.pop(UpperCamelCase ) lowerCAmelCase__ : Dict = val # read in qkv matrices read_in_q_k_v(UpperCamelCase , UpperCamelCase ) # load HuggingFace model lowerCAmelCase__ : str = DPTForSemanticSegmentation(UpperCamelCase ) if '''ade''' in checkpoint_url else DPTForDepthEstimation(UpperCamelCase ) model.load_state_dict(UpperCamelCase ) model.eval() # Check outputs on an image lowerCAmelCase__ : Tuple = 480 if '''ade''' in checkpoint_url else 384 lowerCAmelCase__ : Tuple = DPTImageProcessor(size=UpperCamelCase ) lowerCAmelCase__ : str = prepare_img() lowerCAmelCase__ : Tuple = image_processor(UpperCamelCase , return_tensors='''pt''' ) # forward pass lowerCAmelCase__ : List[Any] = model(**UpperCamelCase ).logits if '''ade''' in checkpoint_url else model(**UpperCamelCase ).predicted_depth # Assert logits lowerCAmelCase__ : Union[str, Any] = torch.tensor([[6.3199, 6.3629, 6.4148], [6.3850, 6.3615, 6.4166], [6.3519, 6.3176, 6.3575]] ) if "ade" in checkpoint_url: lowerCAmelCase__ : List[Any] = torch.tensor([[4.0480, 4.2420, 4.4360], [4.3124, 4.5693, 4.8261], [4.5768, 4.8965, 5.2163]] ) assert outputs.shape == torch.Size(UpperCamelCase ) assert ( torch.allclose(outputs[0, 0, :3, :3] , UpperCamelCase , atol=1E-4 ) if "ade" in checkpoint_url else torch.allclose(outputs[0, :3, :3] , UpperCamelCase ) ) Path(UpperCamelCase ).mkdir(exist_ok=UpperCamelCase ) print(F"""Saving model to {pytorch_dump_folder_path}""" ) model.save_pretrained(UpperCamelCase ) print(F"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(UpperCamelCase ) if push_to_hub: print('''Pushing model to hub...''' ) model.push_to_hub( repo_path_or_name=Path(UpperCamelCase , UpperCamelCase ) , organization='''nielsr''' , commit_message='''Add model''' , use_temp_dir=UpperCamelCase , ) image_processor.push_to_hub( repo_path_or_name=Path(UpperCamelCase , UpperCamelCase ) , organization='''nielsr''' , commit_message='''Add image processor''' , use_temp_dir=UpperCamelCase , ) if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--checkpoint_url""", default="""https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt""", type=str, help="""URL of the original DPT checkpoint you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model directory.""", ) parser.add_argument( """--push_to_hub""", action="""store_true""", ) parser.add_argument( """--model_name""", default="""dpt-large""", type=str, help="""Name of the model, in case you're pushing to the hub.""", ) lowerCAmelCase_ = parser.parse_args() convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
678
import unittest from transformers import ( MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING, TextGenerationPipeline, logging, pipeline, ) from transformers.testing_utils import ( CaptureLogger, is_pipeline_test, require_accelerate, require_tf, require_torch, require_torch_gpu, require_torch_or_tf, ) from .test_pipelines_common import ANY @is_pipeline_test @require_torch_or_tf class _lowerCAmelCase ( unittest.TestCase ): A__ = MODEL_FOR_CAUSAL_LM_MAPPING A__ = TF_MODEL_FOR_CAUSAL_LM_MAPPING @require_torch def __magic_name__( self ): lowerCAmelCase__ : Tuple = pipeline(task='''text-generation''' , model='''sshleifer/tiny-ctrl''' , framework='''pt''' ) # Using `do_sample=False` to force deterministic output lowerCAmelCase__ : Optional[int] = text_generator('''This is a test''' , do_sample=__UpperCAmelCase ) self.assertEqual( __UpperCAmelCase , [ { '''generated_text''': ( '''This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.''' ''' oscope. FiliFili@@''' ) } ] , ) lowerCAmelCase__ : List[str] = text_generator(['''This is a test''', '''This is a second test'''] ) self.assertEqual( __UpperCAmelCase , [ [ { '''generated_text''': ( '''This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.''' ''' oscope. FiliFili@@''' ) } ], [ { '''generated_text''': ( '''This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy''' ''' oscope. oscope. FiliFili@@''' ) } ], ] , ) lowerCAmelCase__ : str = text_generator('''This is a test''' , do_sample=__UpperCAmelCase , num_return_sequences=2 , return_tensors=__UpperCAmelCase ) self.assertEqual( __UpperCAmelCase , [ {'''generated_token_ids''': ANY(__UpperCAmelCase )}, {'''generated_token_ids''': ANY(__UpperCAmelCase )}, ] , ) lowerCAmelCase__ : List[Any] = text_generator.model.config.eos_token_id lowerCAmelCase__ : List[Any] = '''<pad>''' lowerCAmelCase__ : List[Any] = text_generator( ['''This is a test''', '''This is a second test'''] , do_sample=__UpperCAmelCase , num_return_sequences=2 , batch_size=2 , return_tensors=__UpperCAmelCase , ) self.assertEqual( __UpperCAmelCase , [ [ {'''generated_token_ids''': ANY(__UpperCAmelCase )}, {'''generated_token_ids''': ANY(__UpperCAmelCase )}, ], [ {'''generated_token_ids''': ANY(__UpperCAmelCase )}, {'''generated_token_ids''': ANY(__UpperCAmelCase )}, ], ] , ) @require_tf def __magic_name__( self ): lowerCAmelCase__ : int = pipeline(task='''text-generation''' , model='''sshleifer/tiny-ctrl''' , framework='''tf''' ) # Using `do_sample=False` to force deterministic output lowerCAmelCase__ : List[Any] = text_generator('''This is a test''' , do_sample=__UpperCAmelCase ) self.assertEqual( __UpperCAmelCase , [ { '''generated_text''': ( '''This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵''' ''' please,''' ) } ] , ) lowerCAmelCase__ : List[str] = text_generator(['''This is a test''', '''This is a second test'''] , do_sample=__UpperCAmelCase ) self.assertEqual( __UpperCAmelCase , [ [ { '''generated_text''': ( '''This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵''' ''' please,''' ) } ], [ { '''generated_text''': ( '''This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes''' ''' Cannes 閲閲Cannes Cannes Cannes 攵 please,''' ) } ], ] , ) def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): lowerCAmelCase__ : Dict = TextGenerationPipeline(model=__UpperCAmelCase , tokenizer=__UpperCAmelCase ) return text_generator, ["This is a test", "Another test"] def __magic_name__( self ): lowerCAmelCase__ : Any = '''Hello I believe in''' lowerCAmelCase__ : List[Any] = pipeline('''text-generation''' , model='''hf-internal-testing/tiny-random-gpt2''' ) lowerCAmelCase__ : Optional[int] = text_generator(__UpperCAmelCase ) self.assertEqual( __UpperCAmelCase , [{'''generated_text''': '''Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe'''}] , ) lowerCAmelCase__ : List[str] = text_generator(__UpperCAmelCase , stop_sequence=''' fe''' ) self.assertEqual(__UpperCAmelCase , [{'''generated_text''': '''Hello I believe in fe'''}] ) def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase ): lowerCAmelCase__ : str = text_generator.model lowerCAmelCase__ : Optional[int] = text_generator.tokenizer lowerCAmelCase__ : Tuple = text_generator('''This is a test''' ) self.assertEqual(__UpperCAmelCase , [{'''generated_text''': ANY(__UpperCAmelCase )}] ) self.assertTrue(outputs[0]['''generated_text'''].startswith('''This is a test''' ) ) lowerCAmelCase__ : Optional[int] = text_generator('''This is a test''' , return_full_text=__UpperCAmelCase ) self.assertEqual(__UpperCAmelCase , [{'''generated_text''': ANY(__UpperCAmelCase )}] ) self.assertNotIn('''This is a test''' , outputs[0]['''generated_text'''] ) lowerCAmelCase__ : Dict = pipeline(task='''text-generation''' , model=__UpperCAmelCase , tokenizer=__UpperCAmelCase , return_full_text=__UpperCAmelCase ) lowerCAmelCase__ : Dict = text_generator('''This is a test''' ) self.assertEqual(__UpperCAmelCase , [{'''generated_text''': ANY(__UpperCAmelCase )}] ) self.assertNotIn('''This is a test''' , outputs[0]['''generated_text'''] ) lowerCAmelCase__ : List[str] = text_generator('''This is a test''' , return_full_text=__UpperCAmelCase ) self.assertEqual(__UpperCAmelCase , [{'''generated_text''': ANY(__UpperCAmelCase )}] ) self.assertTrue(outputs[0]['''generated_text'''].startswith('''This is a test''' ) ) lowerCAmelCase__ : Optional[int] = text_generator(['''This is great !''', '''Something else'''] , num_return_sequences=2 , do_sample=__UpperCAmelCase ) self.assertEqual( __UpperCAmelCase , [ [{'''generated_text''': ANY(__UpperCAmelCase )}, {'''generated_text''': ANY(__UpperCAmelCase )}], [{'''generated_text''': ANY(__UpperCAmelCase )}, {'''generated_text''': ANY(__UpperCAmelCase )}], ] , ) if text_generator.tokenizer.pad_token is not None: lowerCAmelCase__ : List[str] = text_generator( ['''This is great !''', '''Something else'''] , num_return_sequences=2 , batch_size=2 , do_sample=__UpperCAmelCase ) self.assertEqual( __UpperCAmelCase , [ [{'''generated_text''': ANY(__UpperCAmelCase )}, {'''generated_text''': ANY(__UpperCAmelCase )}], [{'''generated_text''': ANY(__UpperCAmelCase )}, {'''generated_text''': ANY(__UpperCAmelCase )}], ] , ) with self.assertRaises(__UpperCAmelCase ): lowerCAmelCase__ : Any = text_generator('''test''' , return_full_text=__UpperCAmelCase , return_text=__UpperCAmelCase ) with self.assertRaises(__UpperCAmelCase ): lowerCAmelCase__ : Optional[int] = text_generator('''test''' , return_full_text=__UpperCAmelCase , return_tensors=__UpperCAmelCase ) with self.assertRaises(__UpperCAmelCase ): lowerCAmelCase__ : str = text_generator('''test''' , return_text=__UpperCAmelCase , return_tensors=__UpperCAmelCase ) # Empty prompt is slighly special # it requires BOS token to exist. # Special case for Pegasus which will always append EOS so will # work even without BOS. if ( text_generator.tokenizer.bos_token_id is not None or "Pegasus" in tokenizer.__class__.__name__ or "Git" in model.__class__.__name__ ): lowerCAmelCase__ : str = text_generator('''''' ) self.assertEqual(__UpperCAmelCase , [{'''generated_text''': ANY(__UpperCAmelCase )}] ) else: with self.assertRaises((ValueError, AssertionError) ): lowerCAmelCase__ : List[str] = text_generator('''''' ) if text_generator.framework == "tf": # TF generation does not support max_new_tokens, and it's impossible # to control long generation with only max_length without # fancy calculation, dismissing tests for now. return # We don't care about infinite range models. # They already work. # Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly. lowerCAmelCase__ : Optional[Any] = ['''RwkvForCausalLM''', '''XGLMForCausalLM''', '''GPTNeoXForCausalLM'''] if ( tokenizer.model_max_length < 1_0000 and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS ): # Handling of large generations with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ): text_generator('''This is a test''' * 500 , max_new_tokens=20 ) lowerCAmelCase__ : Optional[Any] = text_generator('''This is a test''' * 500 , handle_long_generation='''hole''' , max_new_tokens=20 ) # Hole strategy cannot work with self.assertRaises(__UpperCAmelCase ): text_generator( '''This is a test''' * 500 , handle_long_generation='''hole''' , max_new_tokens=tokenizer.model_max_length + 10 , ) @require_torch @require_accelerate @require_torch_gpu def __magic_name__( self ): import torch # Classic `model_kwargs` lowerCAmelCase__ : List[str] = pipeline( model='''hf-internal-testing/tiny-random-bloom''' , model_kwargs={'''device_map''': '''auto''', '''torch_dtype''': torch.bfloataa} , ) self.assertEqual(pipe.model.device , torch.device(0 ) ) self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa ) lowerCAmelCase__ : Any = pipe('''This is a test''' ) self.assertEqual( __UpperCAmelCase , [ { '''generated_text''': ( '''This is a test test test test test test test test test test test test test test test test''' ''' test''' ) } ] , ) # Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.) lowerCAmelCase__ : Dict = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' , torch_dtype=torch.bfloataa ) self.assertEqual(pipe.model.device , torch.device(0 ) ) self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa ) lowerCAmelCase__ : Union[str, Any] = pipe('''This is a test''' ) self.assertEqual( __UpperCAmelCase , [ { '''generated_text''': ( '''This is a test test test test test test test test test test test test test test test test''' ''' test''' ) } ] , ) # torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602 lowerCAmelCase__ : str = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' ) self.assertEqual(pipe.model.device , torch.device(0 ) ) self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa ) lowerCAmelCase__ : Any = pipe('''This is a test''' ) self.assertEqual( __UpperCAmelCase , [ { '''generated_text''': ( '''This is a test test test test test test test test test test test test test test test test''' ''' test''' ) } ] , ) @require_torch @require_torch_gpu def __magic_name__( self ): import torch lowerCAmelCase__ : List[str] = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device=0 , torch_dtype=torch.floataa ) pipe('''This is a test''' ) @require_torch @require_accelerate @require_torch_gpu def __magic_name__( self ): import torch lowerCAmelCase__ : Any = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' , torch_dtype=torch.floataa ) pipe('''This is a test''' , do_sample=__UpperCAmelCase , top_p=0.5 ) def __magic_name__( self ): lowerCAmelCase__ : int = '''Hello world''' lowerCAmelCase__ : Union[str, Any] = pipeline('''text-generation''' , model='''hf-internal-testing/tiny-random-gpt2''' ) if text_generator.model.framework == "tf": lowerCAmelCase__ : List[Any] = logging.get_logger('''transformers.generation.tf_utils''' ) else: lowerCAmelCase__ : Dict = logging.get_logger('''transformers.generation.utils''' ) lowerCAmelCase__ : Optional[Any] = '''Both `max_new_tokens`''' # The beggining of the message to be checked in this test # Both are set by the user -> log warning with CaptureLogger(__UpperCAmelCase ) as cl: lowerCAmelCase__ : List[str] = text_generator(__UpperCAmelCase , max_length=10 , max_new_tokens=1 ) self.assertIn(__UpperCAmelCase , cl.out ) # The user only sets one -> no warning with CaptureLogger(__UpperCAmelCase ) as cl: lowerCAmelCase__ : Any = text_generator(__UpperCAmelCase , max_new_tokens=1 ) self.assertNotIn(__UpperCAmelCase , cl.out ) with CaptureLogger(__UpperCAmelCase ) as cl: lowerCAmelCase__ : Union[str, Any] = text_generator(__UpperCAmelCase , max_length=10 ) self.assertNotIn(__UpperCAmelCase , cl.out )
678
1
from __future__ import annotations import inspect import unittest from typing import List, Tuple from transformers import RegNetConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class _lowerCAmelCase : def __init__( self , __UpperCAmelCase , __UpperCAmelCase=3 , __UpperCAmelCase=32 , __UpperCAmelCase=3 , __UpperCAmelCase=10 , __UpperCAmelCase=[10, 20, 30, 40] , __UpperCAmelCase=[1, 1, 2, 1] , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase="relu" , __UpperCAmelCase=3 , __UpperCAmelCase=None , ): lowerCAmelCase__ : List[Any] = parent lowerCAmelCase__ : Any = batch_size lowerCAmelCase__ : Optional[Any] = image_size lowerCAmelCase__ : Optional[int] = num_channels lowerCAmelCase__ : Union[str, Any] = embeddings_size lowerCAmelCase__ : Optional[Any] = hidden_sizes lowerCAmelCase__ : str = depths lowerCAmelCase__ : Optional[Any] = is_training lowerCAmelCase__ : Optional[int] = use_labels lowerCAmelCase__ : Tuple = hidden_act lowerCAmelCase__ : Union[str, Any] = num_labels lowerCAmelCase__ : str = scope lowerCAmelCase__ : Any = len(__UpperCAmelCase ) def __magic_name__( self ): lowerCAmelCase__ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCAmelCase__ : List[Any] = None if self.use_labels: lowerCAmelCase__ : str = ids_tensor([self.batch_size] , self.num_labels ) lowerCAmelCase__ : Union[str, Any] = self.get_config() return config, pixel_values, labels def __magic_name__( self ): return RegNetConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , ) def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): lowerCAmelCase__ : List[str] = TFRegNetModel(config=__UpperCAmelCase ) lowerCAmelCase__ : int = model(__UpperCAmelCase , training=__UpperCAmelCase ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): lowerCAmelCase__ : Tuple = self.num_labels lowerCAmelCase__ : str = TFRegNetForImageClassification(__UpperCAmelCase ) lowerCAmelCase__ : List[str] = model(__UpperCAmelCase , labels=__UpperCAmelCase , training=__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __magic_name__( self ): lowerCAmelCase__ : Optional[Any] = self.prepare_config_and_inputs() lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = config_and_inputs lowerCAmelCase__ : Optional[Any] = {'''pixel_values''': pixel_values} return config, inputs_dict @require_tf class _lowerCAmelCase ( _lowercase , _lowercase , unittest.TestCase ): A__ = (TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else () A__ = ( {'feature-extraction': TFRegNetModel, 'image-classification': TFRegNetForImageClassification} if is_tf_available() else {} ) A__ = False A__ = False A__ = False A__ = False A__ = False def __magic_name__( self ): lowerCAmelCase__ : Optional[int] = TFRegNetModelTester(self ) lowerCAmelCase__ : List[Any] = ConfigTester(self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase ) def __magic_name__( self ): return @unittest.skip(reason='''RegNet does not use inputs_embeds''' ) def __magic_name__( self ): pass @unittest.skipIf( not is_tf_available() or len(tf.config.list_physical_devices('''GPU''' ) ) == 0 , reason='''TF does not support backprop for grouped convolutions on CPU.''' , ) @slow def __magic_name__( self ): super().test_keras_fit() @unittest.skip(reason='''RegNet does not support input and output embeddings''' ) def __magic_name__( self ): pass def __magic_name__( self ): lowerCAmelCase__ , lowerCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase__ : Optional[int] = model_class(__UpperCAmelCase ) lowerCAmelCase__ : Tuple = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCAmelCase__ : Any = [*signature.parameters.keys()] lowerCAmelCase__ : List[Any] = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , __UpperCAmelCase ) def __magic_name__( self ): lowerCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCAmelCase ) def __magic_name__( self ): def check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): lowerCAmelCase__ : str = model_class(__UpperCAmelCase ) lowerCAmelCase__ : str = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) , training=__UpperCAmelCase ) lowerCAmelCase__ : Any = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states lowerCAmelCase__ : str = self.model_tester.num_stages self.assertEqual(len(__UpperCAmelCase ) , expected_num_stages + 1 ) # RegNet's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , ) lowerCAmelCase__ , lowerCAmelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common() lowerCAmelCase__ : str = ['''basic''', '''bottleneck'''] for model_class in self.all_model_classes: for layer_type in layers_type: lowerCAmelCase__ : Union[str, Any] = layer_type lowerCAmelCase__ : str = True check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowerCAmelCase__ : Union[str, Any] = True check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) def __magic_name__( self ): lowerCAmelCase__ , lowerCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() def check_equivalence(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase={} ): lowerCAmelCase__ : str = model(__UpperCAmelCase , return_dict=__UpperCAmelCase , **__UpperCAmelCase ) lowerCAmelCase__ : Optional[Any] = model(__UpperCAmelCase , return_dict=__UpperCAmelCase , **__UpperCAmelCase ).to_tuple() def recursive_check(__UpperCAmelCase , __UpperCAmelCase ): if isinstance(__UpperCAmelCase , (List, Tuple) ): for tuple_iterable_value, dict_iterable_value in zip(__UpperCAmelCase , __UpperCAmelCase ): recursive_check(__UpperCAmelCase , __UpperCAmelCase ) elif tuple_object is None: return else: self.assertTrue( all(tf.equal(__UpperCAmelCase , __UpperCAmelCase ) ) , msg=( '''Tuple and dict output are not equal. Difference:''' f""" {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}""" ) , ) recursive_check(__UpperCAmelCase , __UpperCAmelCase ) for model_class in self.all_model_classes: lowerCAmelCase__ : int = model_class(__UpperCAmelCase ) lowerCAmelCase__ : str = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : Union[str, Any] = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) check_equivalence(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : Optional[Any] = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase ) lowerCAmelCase__ : Optional[Any] = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase ) check_equivalence(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : Optional[int] = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : List[Any] = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) check_equivalence(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , {'''output_hidden_states''': True} ) lowerCAmelCase__ : List[str] = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase ) lowerCAmelCase__ : List[str] = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase ) check_equivalence(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , {'''output_hidden_states''': True} ) def __magic_name__( self ): lowerCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__UpperCAmelCase ) @slow def __magic_name__( self ): for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCAmelCase__ : Dict = TFRegNetModel.from_pretrained(__UpperCAmelCase ) self.assertIsNotNone(__UpperCAmelCase ) def __lowerCAmelCase ( ) -> Union[str, Any]: lowerCAmelCase__ : Optional[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_tf @require_vision class _lowerCAmelCase ( unittest.TestCase ): @cached_property def __magic_name__( self ): return ( AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def __magic_name__( self ): lowerCAmelCase__ : Dict = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) lowerCAmelCase__ : Optional[Any] = self.default_image_processor lowerCAmelCase__ : Optional[int] = prepare_img() lowerCAmelCase__ : Optional[Any] = image_processor(images=__UpperCAmelCase , return_tensors='''tf''' ) # forward pass lowerCAmelCase__ : Tuple = model(**__UpperCAmelCase , training=__UpperCAmelCase ) # verify the logits lowerCAmelCase__ : str = tf.TensorShape((1, 1000) ) self.assertEqual(outputs.logits.shape , __UpperCAmelCase ) lowerCAmelCase__ : Dict = tf.constant([-0.4180, -1.5051, -3.4836] ) tf.debugging.assert_near(outputs.logits[0, :3] , __UpperCAmelCase , atol=1e-4 )
678
def __lowerCAmelCase ( UpperCamelCase ) -> str: return "".join([hex(UpperCamelCase )[2:].zfill(2 ).upper() for byte in list(UpperCamelCase )] ) def __lowerCAmelCase ( UpperCamelCase ) -> bytes: # Check data validity, following RFC3548 # https://www.ietf.org/rfc/rfc3548.txt if (len(UpperCamelCase ) % 2) != 0: raise ValueError( '''Base16 encoded data is invalid: Data does not have an even number of hex digits.''' ) # Check the character set - the standard base16 alphabet # is uppercase according to RFC3548 section 6 if not set(UpperCamelCase ) <= set('''0123456789ABCDEF''' ): raise ValueError( '''Base16 encoded data is invalid: Data is not uppercase hex or it contains invalid characters.''' ) # For every two hexadecimal digits (= a byte), turn it into an integer. # Then, string the result together into bytes, and return it. return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(UpperCamelCase ) , 2 ) ) if __name__ == "__main__": import doctest doctest.testmod()
678
1
import collections import os import re from pathlib import Path lowerCAmelCase_ = """src/transformers""" # Matches is_xxx_available() lowerCAmelCase_ = re.compile(R"""is\_([a-z_]*)_available()""") # Catches a one-line _import_struct = {xxx} lowerCAmelCase_ = re.compile(R"""^_import_structure\s+=\s+\{([^\}]+)\}""") # Catches a line with a key-values pattern: "bla": ["foo", "bar"] lowerCAmelCase_ = re.compile(R"""\s+\"\S*\":\s+\[([^\]]*)\]""") # Catches a line if not is_foo_available lowerCAmelCase_ = re.compile(R"""^\s*if\s+not\s+is\_[a-z_]*\_available\(\)""") # Catches a line _import_struct["bla"].append("foo") lowerCAmelCase_ = re.compile(R"""^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)""") # Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"] lowerCAmelCase_ = re.compile(R"""^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]""") # Catches a line with an object between quotes and a comma: "MyModel", lowerCAmelCase_ = re.compile(R"""^\s+\"([^\"]+)\",""") # Catches a line with objects between brackets only: ["foo", "bar"], lowerCAmelCase_ = re.compile(R"""^\s+\[([^\]]+)\]""") # Catches a line with from foo import bar, bla, boo lowerCAmelCase_ = re.compile(R"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""") # Catches a line with try: lowerCAmelCase_ = re.compile(R"""^\s*try:""") # Catches a line with else: lowerCAmelCase_ = re.compile(R"""^\s*else:""") def __lowerCAmelCase ( UpperCamelCase ) -> int: if _re_test_backend.search(UpperCamelCase ) is None: return None lowerCAmelCase__ : int = [b[0] for b in _re_backend.findall(UpperCamelCase )] backends.sort() return "_and_".join(UpperCamelCase ) def __lowerCAmelCase ( UpperCamelCase ) -> Any: with open(UpperCamelCase , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: lowerCAmelCase__ : Union[str, Any] = f.readlines() lowerCAmelCase__ : Tuple = 0 while line_index < len(UpperCamelCase ) and not lines[line_index].startswith('''_import_structure = {''' ): line_index += 1 # If this is a traditional init, just return. if line_index >= len(UpperCamelCase ): return None # First grab the objects without a specific backend in _import_structure lowerCAmelCase__ : List[str] = [] while not lines[line_index].startswith('''if TYPE_CHECKING''' ) and find_backend(lines[line_index] ) is None: lowerCAmelCase__ : str = lines[line_index] # If we have everything on a single line, let's deal with it. if _re_one_line_import_struct.search(UpperCamelCase ): lowerCAmelCase__ : str = _re_one_line_import_struct.search(UpperCamelCase ).groups()[0] lowerCAmelCase__ : Optional[Any] = re.findall(R'''\[([^\]]+)\]''' , UpperCamelCase ) for imp in imports: objects.extend([obj[1:-1] for obj in imp.split(''', ''' )] ) line_index += 1 continue lowerCAmelCase__ : Tuple = _re_import_struct_key_value.search(UpperCamelCase ) if single_line_import_search is not None: lowerCAmelCase__ : Optional[Any] = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(''', ''' ) if len(UpperCamelCase ) > 0] objects.extend(UpperCamelCase ) elif line.startswith(''' ''' * 8 + '''"''' ): objects.append(line[9:-3] ) line_index += 1 lowerCAmelCase__ : Any = {'''none''': objects} # Let's continue with backend-specific objects in _import_structure while not lines[line_index].startswith('''if TYPE_CHECKING''' ): # If the line is an if not is_backend_available, we grab all objects associated. lowerCAmelCase__ : List[str] = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: lowerCAmelCase__ : Union[str, Any] = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 lowerCAmelCase__ : Optional[int] = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 4 ): lowerCAmelCase__ : str = lines[line_index] if _re_import_struct_add_one.search(UpperCamelCase ) is not None: objects.append(_re_import_struct_add_one.search(UpperCamelCase ).groups()[0] ) elif _re_import_struct_add_many.search(UpperCamelCase ) is not None: lowerCAmelCase__ : Optional[int] = _re_import_struct_add_many.search(UpperCamelCase ).groups()[0].split(''', ''' ) lowerCAmelCase__ : List[Any] = [obj[1:-1] for obj in imports if len(UpperCamelCase ) > 0] objects.extend(UpperCamelCase ) elif _re_between_brackets.search(UpperCamelCase ) is not None: lowerCAmelCase__ : List[Any] = _re_between_brackets.search(UpperCamelCase ).groups()[0].split(''', ''' ) lowerCAmelCase__ : Optional[int] = [obj[1:-1] for obj in imports if len(UpperCamelCase ) > 0] objects.extend(UpperCamelCase ) elif _re_quote_object.search(UpperCamelCase ) is not None: objects.append(_re_quote_object.search(UpperCamelCase ).groups()[0] ) elif line.startswith(''' ''' * 8 + '''"''' ): objects.append(line[9:-3] ) elif line.startswith(''' ''' * 12 + '''"''' ): objects.append(line[13:-3] ) line_index += 1 lowerCAmelCase__ : Optional[Any] = objects else: line_index += 1 # At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend lowerCAmelCase__ : Any = [] while ( line_index < len(UpperCamelCase ) and find_backend(lines[line_index] ) is None and not lines[line_index].startswith('''else''' ) ): lowerCAmelCase__ : Tuple = lines[line_index] lowerCAmelCase__ : List[Any] = _re_import.search(UpperCamelCase ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(''', ''' ) ) elif line.startswith(''' ''' * 8 ): objects.append(line[8:-2] ) line_index += 1 lowerCAmelCase__ : Dict = {'''none''': objects} # Let's continue with backend-specific objects while line_index < len(UpperCamelCase ): # If the line is an if is_backend_available, we grab all objects associated. lowerCAmelCase__ : str = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: lowerCAmelCase__ : Tuple = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 lowerCAmelCase__ : str = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 8 ): lowerCAmelCase__ : Any = lines[line_index] lowerCAmelCase__ : Union[str, Any] = _re_import.search(UpperCamelCase ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(''', ''' ) ) elif line.startswith(''' ''' * 12 ): objects.append(line[12:-2] ) line_index += 1 lowerCAmelCase__ : str = objects else: line_index += 1 return import_dict_objects, type_hint_objects def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> List[Any]: def find_duplicates(UpperCamelCase ): return [k for k, v in collections.Counter(UpperCamelCase ).items() if v > 1] if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ): return ["Both sides of the init do not have the same backends!"] lowerCAmelCase__ : Optional[Any] = [] for key in import_dict_objects.keys(): lowerCAmelCase__ : str = find_duplicates(import_dict_objects[key] ) if duplicate_imports: errors.append(F"""Duplicate _import_structure definitions for: {duplicate_imports}""" ) lowerCAmelCase__ : int = find_duplicates(type_hint_objects[key] ) if duplicate_type_hints: errors.append(F"""Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}""" ) if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ): lowerCAmelCase__ : List[Any] = '''base imports''' if key == '''none''' else F"""{key} backend""" errors.append(F"""Differences for {name}:""" ) for a in type_hint_objects[key]: if a not in import_dict_objects[key]: errors.append(F""" {a} in TYPE_HINT but not in _import_structure.""" ) for a in import_dict_objects[key]: if a not in type_hint_objects[key]: errors.append(F""" {a} in _import_structure but not in TYPE_HINT.""" ) return errors def __lowerCAmelCase ( ) -> Optional[Any]: lowerCAmelCase__ : Dict = [] for root, _, files in os.walk(UpperCamelCase ): if "__init__.py" in files: lowerCAmelCase__ : Any = os.path.join(UpperCamelCase , '''__init__.py''' ) lowerCAmelCase__ : List[Any] = parse_init(UpperCamelCase ) if objects is not None: lowerCAmelCase__ : Optional[int] = analyze_results(*UpperCamelCase ) if len(UpperCamelCase ) > 0: lowerCAmelCase__ : Tuple = F"""Problem in {fname}, both halves do not define the same objects.\n{errors[0]}""" failures.append('''\n'''.join(UpperCamelCase ) ) if len(UpperCamelCase ) > 0: raise ValueError('''\n\n'''.join(UpperCamelCase ) ) def __lowerCAmelCase ( ) -> Tuple: lowerCAmelCase__ : str = [] for path, directories, files in os.walk(UpperCamelCase ): for folder in directories: # Ignore private modules if folder.startswith('''_''' ): directories.remove(UpperCamelCase ) continue # Ignore leftovers from branches (empty folders apart from pycache) if len(list((Path(UpperCamelCase ) / folder).glob('''*.py''' ) ) ) == 0: continue lowerCAmelCase__ : Tuple = str((Path(UpperCamelCase ) / folder).relative_to(UpperCamelCase ) ) lowerCAmelCase__ : Dict = short_path.replace(os.path.sep , '''.''' ) submodules.append(UpperCamelCase ) for fname in files: if fname == "__init__.py": continue lowerCAmelCase__ : Union[str, Any] = str((Path(UpperCamelCase ) / fname).relative_to(UpperCamelCase ) ) lowerCAmelCase__ : Tuple = short_path.replace('''.py''' , '''''' ).replace(os.path.sep , '''.''' ) if len(submodule.split('''.''' ) ) == 1: submodules.append(UpperCamelCase ) return submodules lowerCAmelCase_ = [ """convert_pytorch_checkpoint_to_tf2""", """modeling_flax_pytorch_utils""", """models.esm.openfold_utils""", ] def __lowerCAmelCase ( ) -> List[str]: # This is to make sure the transformers module imported is the one in the repo. from transformers.utils import direct_transformers_import lowerCAmelCase__ : Dict = direct_transformers_import(UpperCamelCase ) lowerCAmelCase__ : int = set(transformers._import_structure.keys() ) # This contains all the base keys of the _import_structure object defined in the init, but if the user is missing # some optional dependencies, they may not have all of them. Thus we read the init to read all additions and # (potentiall re-) add them. with open(os.path.join(UpperCamelCase , '''__init__.py''' ) , '''r''' ) as f: lowerCAmelCase__ : str = f.read() import_structure_keys.update(set(re.findall(R'''import_structure\[\"([^\"]*)\"\]''' , UpperCamelCase ) ) ) lowerCAmelCase__ : Optional[int] = [ module for module in get_transformers_submodules() if module not in IGNORE_SUBMODULES and module not in import_structure_keys ] if len(UpperCamelCase ) > 0: lowerCAmelCase__ : List[Any] = '''\n'''.join(F"""- {module}""" for module in module_not_registered ) raise ValueError( '''The following submodules are not properly registed in the main init of Transformers:\n''' F"""{list_of_modules}\n""" '''Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.''' ) if __name__ == "__main__": check_all_inits() check_submodules()
678
import torch from diffusers import DPMSolverSDEScheduler from diffusers.utils import torch_device from diffusers.utils.testing_utils import require_torchsde from .test_schedulers import SchedulerCommonTest @require_torchsde class _lowerCAmelCase ( _lowercase ): A__ = (DPMSolverSDEScheduler,) A__ = 10 def __magic_name__( self , **__UpperCAmelCase ): lowerCAmelCase__ : Dict = { '''num_train_timesteps''': 1100, '''beta_start''': 0.0001, '''beta_end''': 0.02, '''beta_schedule''': '''linear''', '''noise_sampler_seed''': 0, } config.update(**__UpperCAmelCase ) return config def __magic_name__( self ): for timesteps in [10, 50, 100, 1000]: self.check_over_configs(num_train_timesteps=__UpperCAmelCase ) def __magic_name__( self ): for beta_start, beta_end in zip([0.00001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ): self.check_over_configs(beta_start=__UpperCAmelCase , beta_end=__UpperCAmelCase ) def __magic_name__( self ): for schedule in ["linear", "scaled_linear"]: self.check_over_configs(beta_schedule=__UpperCAmelCase ) def __magic_name__( self ): for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=__UpperCAmelCase ) def __magic_name__( self ): lowerCAmelCase__ : List[Any] = self.scheduler_classes[0] lowerCAmelCase__ : str = self.get_scheduler_config() lowerCAmelCase__ : Optional[Any] = scheduler_class(**__UpperCAmelCase ) scheduler.set_timesteps(self.num_inference_steps ) lowerCAmelCase__ : Union[str, Any] = self.dummy_model() lowerCAmelCase__ : Any = self.dummy_sample_deter * scheduler.init_noise_sigma lowerCAmelCase__ : Optional[Any] = sample.to(__UpperCAmelCase ) for i, t in enumerate(scheduler.timesteps ): lowerCAmelCase__ : Optional[Any] = scheduler.scale_model_input(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : int = model(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : List[str] = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : List[str] = output.prev_sample lowerCAmelCase__ : Union[str, Any] = torch.sum(torch.abs(__UpperCAmelCase ) ) lowerCAmelCase__ : Optional[Any] = torch.mean(torch.abs(__UpperCAmelCase ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 167.47821044921875 ) < 1e-2 assert abs(result_mean.item() - 0.2178705964565277 ) < 1e-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 171.59352111816406 ) < 1e-2 assert abs(result_mean.item() - 0.22342906892299652 ) < 1e-3 else: assert abs(result_sum.item() - 162.52383422851562 ) < 1e-2 assert abs(result_mean.item() - 0.211619570851326 ) < 1e-3 def __magic_name__( self ): lowerCAmelCase__ : Dict = self.scheduler_classes[0] lowerCAmelCase__ : Tuple = self.get_scheduler_config(prediction_type='''v_prediction''' ) lowerCAmelCase__ : Tuple = scheduler_class(**__UpperCAmelCase ) scheduler.set_timesteps(self.num_inference_steps ) lowerCAmelCase__ : Optional[Any] = self.dummy_model() lowerCAmelCase__ : Union[str, Any] = self.dummy_sample_deter * scheduler.init_noise_sigma lowerCAmelCase__ : Tuple = sample.to(__UpperCAmelCase ) for i, t in enumerate(scheduler.timesteps ): lowerCAmelCase__ : List[str] = scheduler.scale_model_input(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : List[Any] = model(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : int = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : int = output.prev_sample lowerCAmelCase__ : Any = torch.sum(torch.abs(__UpperCAmelCase ) ) lowerCAmelCase__ : Optional[Any] = torch.mean(torch.abs(__UpperCAmelCase ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 124.77149200439453 ) < 1e-2 assert abs(result_mean.item() - 0.16226289014816284 ) < 1e-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 128.1663360595703 ) < 1e-2 assert abs(result_mean.item() - 0.16688326001167297 ) < 1e-3 else: assert abs(result_sum.item() - 119.8487548828125 ) < 1e-2 assert abs(result_mean.item() - 0.1560530662536621 ) < 1e-3 def __magic_name__( self ): lowerCAmelCase__ : Optional[int] = self.scheduler_classes[0] lowerCAmelCase__ : Tuple = self.get_scheduler_config() lowerCAmelCase__ : Tuple = scheduler_class(**__UpperCAmelCase ) scheduler.set_timesteps(self.num_inference_steps , device=__UpperCAmelCase ) lowerCAmelCase__ : Dict = self.dummy_model() lowerCAmelCase__ : Union[str, Any] = self.dummy_sample_deter.to(__UpperCAmelCase ) * scheduler.init_noise_sigma for t in scheduler.timesteps: lowerCAmelCase__ : List[str] = scheduler.scale_model_input(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : Optional[int] = model(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : Tuple = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : Optional[int] = output.prev_sample lowerCAmelCase__ : Dict = torch.sum(torch.abs(__UpperCAmelCase ) ) lowerCAmelCase__ : Dict = torch.mean(torch.abs(__UpperCAmelCase ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 167.46957397460938 ) < 1e-2 assert abs(result_mean.item() - 0.21805934607982635 ) < 1e-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 171.59353637695312 ) < 1e-2 assert abs(result_mean.item() - 0.22342908382415771 ) < 1e-3 else: assert abs(result_sum.item() - 162.52383422851562 ) < 1e-2 assert abs(result_mean.item() - 0.211619570851326 ) < 1e-3 def __magic_name__( self ): lowerCAmelCase__ : Optional[Any] = self.scheduler_classes[0] lowerCAmelCase__ : Dict = self.get_scheduler_config() lowerCAmelCase__ : Optional[int] = scheduler_class(**__UpperCAmelCase , use_karras_sigmas=__UpperCAmelCase ) scheduler.set_timesteps(self.num_inference_steps , device=__UpperCAmelCase ) lowerCAmelCase__ : List[Any] = self.dummy_model() lowerCAmelCase__ : int = self.dummy_sample_deter.to(__UpperCAmelCase ) * scheduler.init_noise_sigma lowerCAmelCase__ : Union[str, Any] = sample.to(__UpperCAmelCase ) for t in scheduler.timesteps: lowerCAmelCase__ : Optional[Any] = scheduler.scale_model_input(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : Union[str, Any] = model(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : Tuple = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : Dict = output.prev_sample lowerCAmelCase__ : int = torch.sum(torch.abs(__UpperCAmelCase ) ) lowerCAmelCase__ : Optional[Any] = torch.mean(torch.abs(__UpperCAmelCase ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 176.66974135742188 ) < 1e-2 assert abs(result_mean.item() - 0.23003872730981811 ) < 1e-2 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 177.63653564453125 ) < 1e-2 assert abs(result_mean.item() - 0.23003872730981811 ) < 1e-2 else: assert abs(result_sum.item() - 170.3135223388672 ) < 1e-2 assert abs(result_mean.item() - 0.23003872730981811 ) < 1e-2
678
1
print((lambda quine: quine % quine)("""print((lambda quine: quine %% quine)(%r))"""))
678
import time import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers.generation import ( MaxLengthCriteria, MaxNewTokensCriteria, MaxTimeCriteria, StoppingCriteriaList, validate_stopping_criteria, ) @require_torch class _lowerCAmelCase ( unittest.TestCase ): def __magic_name__( self , __UpperCAmelCase ): lowerCAmelCase__ : Union[str, Any] = 3 lowerCAmelCase__ : Tuple = 250 lowerCAmelCase__ : List[Any] = ids_tensor((batch_size, length) , __UpperCAmelCase ) lowerCAmelCase__ : Optional[Any] = torch.ones((batch_size, length) , device=__UpperCAmelCase , dtype=torch.float ) / length return input_ids, scores def __magic_name__( self ): lowerCAmelCase__ , lowerCAmelCase__ : List[str] = self._get_tensors(5 ) lowerCAmelCase__ : List[str] = StoppingCriteriaList( [ MaxLengthCriteria(max_length=10 ), MaxTimeCriteria(max_time=0.1 ), ] ) self.assertFalse(criteria(__UpperCAmelCase , __UpperCAmelCase ) ) lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = self._get_tensors(9 ) self.assertFalse(criteria(__UpperCAmelCase , __UpperCAmelCase ) ) lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = self._get_tensors(10 ) self.assertTrue(criteria(__UpperCAmelCase , __UpperCAmelCase ) ) def __magic_name__( self ): lowerCAmelCase__ : Tuple = MaxLengthCriteria(max_length=10 ) lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = self._get_tensors(5 ) self.assertFalse(criteria(__UpperCAmelCase , __UpperCAmelCase ) ) lowerCAmelCase__ , lowerCAmelCase__ : Tuple = self._get_tensors(9 ) self.assertFalse(criteria(__UpperCAmelCase , __UpperCAmelCase ) ) lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = self._get_tensors(10 ) self.assertTrue(criteria(__UpperCAmelCase , __UpperCAmelCase ) ) def __magic_name__( self ): lowerCAmelCase__ : Optional[int] = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 ) lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = self._get_tensors(5 ) self.assertFalse(criteria(__UpperCAmelCase , __UpperCAmelCase ) ) lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = self._get_tensors(9 ) self.assertFalse(criteria(__UpperCAmelCase , __UpperCAmelCase ) ) lowerCAmelCase__ , lowerCAmelCase__ : Tuple = self._get_tensors(10 ) self.assertTrue(criteria(__UpperCAmelCase , __UpperCAmelCase ) ) lowerCAmelCase__ : int = StoppingCriteriaList([criteria] ) self.assertEqual(criteria_list.max_length , 10 ) def __magic_name__( self ): lowerCAmelCase__ , lowerCAmelCase__ : List[str] = self._get_tensors(5 ) lowerCAmelCase__ : Any = MaxTimeCriteria(max_time=0.1 ) self.assertFalse(criteria(__UpperCAmelCase , __UpperCAmelCase ) ) lowerCAmelCase__ : int = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 ) self.assertTrue(criteria(__UpperCAmelCase , __UpperCAmelCase ) ) def __magic_name__( self ): validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 10 ) with self.assertWarns(__UpperCAmelCase ): validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 11 ) lowerCAmelCase__ : List[str] = validate_stopping_criteria(StoppingCriteriaList() , 11 ) self.assertEqual(len(__UpperCAmelCase ) , 1 )
678
1
def __lowerCAmelCase ( UpperCamelCase = 10 , UpperCamelCase = 22 ) -> int: lowerCAmelCase__ : Tuple = range(1 , UpperCamelCase ) lowerCAmelCase__ : Tuple = range(1 , UpperCamelCase ) return sum( 1 for power in powers for base in bases if len(str(base**power ) ) == power ) if __name__ == "__main__": print(F"""{solution(10, 22) = }""")
678
from functools import reduce lowerCAmelCase_ = ( """73167176531330624919225119674426574742355349194934""" """96983520312774506326239578318016984801869478851843""" """85861560789112949495459501737958331952853208805511""" """12540698747158523863050715693290963295227443043557""" """66896648950445244523161731856403098711121722383113""" """62229893423380308135336276614282806444486645238749""" """30358907296290491560440772390713810515859307960866""" """70172427121883998797908792274921901699720888093776""" """65727333001053367881220235421809751254540594752243""" """52584907711670556013604839586446706324415722155397""" """53697817977846174064955149290862569321978468622482""" """83972241375657056057490261407972968652414535100474""" """82166370484403199890008895243450658541227588666881""" """16427171479924442928230863465674813919123162824586""" """17866458359124566529476545682848912883142607690042""" """24219022671055626321111109370544217506941658960408""" """07198403850962455444362981230987879927244284909188""" """84580156166097919133875499200524063689912560717606""" """05886116467109405077541002256983155200055935729725""" """71636269561882670428252483600823257530420752963450""" ) def __lowerCAmelCase ( UpperCamelCase = N ) -> int: return max( # mypy cannot properly interpret reduce int(reduce(lambda UpperCamelCase , UpperCamelCase : str(int(UpperCamelCase ) * int(UpperCamelCase ) ) , n[i : i + 13] ) ) for i in range(len(UpperCamelCase ) - 12 ) ) if __name__ == "__main__": print(F"""{solution() = }""")
678
1
import torch from diffusers import DPMSolverSDEScheduler from diffusers.utils import torch_device from diffusers.utils.testing_utils import require_torchsde from .test_schedulers import SchedulerCommonTest @require_torchsde class _lowerCAmelCase ( _lowercase ): A__ = (DPMSolverSDEScheduler,) A__ = 10 def __magic_name__( self , **__UpperCAmelCase ): lowerCAmelCase__ : Dict = { '''num_train_timesteps''': 1100, '''beta_start''': 0.0001, '''beta_end''': 0.02, '''beta_schedule''': '''linear''', '''noise_sampler_seed''': 0, } config.update(**__UpperCAmelCase ) return config def __magic_name__( self ): for timesteps in [10, 50, 100, 1000]: self.check_over_configs(num_train_timesteps=__UpperCAmelCase ) def __magic_name__( self ): for beta_start, beta_end in zip([0.00001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ): self.check_over_configs(beta_start=__UpperCAmelCase , beta_end=__UpperCAmelCase ) def __magic_name__( self ): for schedule in ["linear", "scaled_linear"]: self.check_over_configs(beta_schedule=__UpperCAmelCase ) def __magic_name__( self ): for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=__UpperCAmelCase ) def __magic_name__( self ): lowerCAmelCase__ : List[Any] = self.scheduler_classes[0] lowerCAmelCase__ : str = self.get_scheduler_config() lowerCAmelCase__ : Optional[Any] = scheduler_class(**__UpperCAmelCase ) scheduler.set_timesteps(self.num_inference_steps ) lowerCAmelCase__ : Union[str, Any] = self.dummy_model() lowerCAmelCase__ : Any = self.dummy_sample_deter * scheduler.init_noise_sigma lowerCAmelCase__ : Optional[Any] = sample.to(__UpperCAmelCase ) for i, t in enumerate(scheduler.timesteps ): lowerCAmelCase__ : Optional[Any] = scheduler.scale_model_input(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : int = model(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : List[str] = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : List[str] = output.prev_sample lowerCAmelCase__ : Union[str, Any] = torch.sum(torch.abs(__UpperCAmelCase ) ) lowerCAmelCase__ : Optional[Any] = torch.mean(torch.abs(__UpperCAmelCase ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 167.47821044921875 ) < 1e-2 assert abs(result_mean.item() - 0.2178705964565277 ) < 1e-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 171.59352111816406 ) < 1e-2 assert abs(result_mean.item() - 0.22342906892299652 ) < 1e-3 else: assert abs(result_sum.item() - 162.52383422851562 ) < 1e-2 assert abs(result_mean.item() - 0.211619570851326 ) < 1e-3 def __magic_name__( self ): lowerCAmelCase__ : Dict = self.scheduler_classes[0] lowerCAmelCase__ : Tuple = self.get_scheduler_config(prediction_type='''v_prediction''' ) lowerCAmelCase__ : Tuple = scheduler_class(**__UpperCAmelCase ) scheduler.set_timesteps(self.num_inference_steps ) lowerCAmelCase__ : Optional[Any] = self.dummy_model() lowerCAmelCase__ : Union[str, Any] = self.dummy_sample_deter * scheduler.init_noise_sigma lowerCAmelCase__ : Tuple = sample.to(__UpperCAmelCase ) for i, t in enumerate(scheduler.timesteps ): lowerCAmelCase__ : List[str] = scheduler.scale_model_input(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : List[Any] = model(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : int = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : int = output.prev_sample lowerCAmelCase__ : Any = torch.sum(torch.abs(__UpperCAmelCase ) ) lowerCAmelCase__ : Optional[Any] = torch.mean(torch.abs(__UpperCAmelCase ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 124.77149200439453 ) < 1e-2 assert abs(result_mean.item() - 0.16226289014816284 ) < 1e-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 128.1663360595703 ) < 1e-2 assert abs(result_mean.item() - 0.16688326001167297 ) < 1e-3 else: assert abs(result_sum.item() - 119.8487548828125 ) < 1e-2 assert abs(result_mean.item() - 0.1560530662536621 ) < 1e-3 def __magic_name__( self ): lowerCAmelCase__ : Optional[int] = self.scheduler_classes[0] lowerCAmelCase__ : Tuple = self.get_scheduler_config() lowerCAmelCase__ : Tuple = scheduler_class(**__UpperCAmelCase ) scheduler.set_timesteps(self.num_inference_steps , device=__UpperCAmelCase ) lowerCAmelCase__ : Dict = self.dummy_model() lowerCAmelCase__ : Union[str, Any] = self.dummy_sample_deter.to(__UpperCAmelCase ) * scheduler.init_noise_sigma for t in scheduler.timesteps: lowerCAmelCase__ : List[str] = scheduler.scale_model_input(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : Optional[int] = model(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : Tuple = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : Optional[int] = output.prev_sample lowerCAmelCase__ : Dict = torch.sum(torch.abs(__UpperCAmelCase ) ) lowerCAmelCase__ : Dict = torch.mean(torch.abs(__UpperCAmelCase ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 167.46957397460938 ) < 1e-2 assert abs(result_mean.item() - 0.21805934607982635 ) < 1e-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 171.59353637695312 ) < 1e-2 assert abs(result_mean.item() - 0.22342908382415771 ) < 1e-3 else: assert abs(result_sum.item() - 162.52383422851562 ) < 1e-2 assert abs(result_mean.item() - 0.211619570851326 ) < 1e-3 def __magic_name__( self ): lowerCAmelCase__ : Optional[Any] = self.scheduler_classes[0] lowerCAmelCase__ : Dict = self.get_scheduler_config() lowerCAmelCase__ : Optional[int] = scheduler_class(**__UpperCAmelCase , use_karras_sigmas=__UpperCAmelCase ) scheduler.set_timesteps(self.num_inference_steps , device=__UpperCAmelCase ) lowerCAmelCase__ : List[Any] = self.dummy_model() lowerCAmelCase__ : int = self.dummy_sample_deter.to(__UpperCAmelCase ) * scheduler.init_noise_sigma lowerCAmelCase__ : Union[str, Any] = sample.to(__UpperCAmelCase ) for t in scheduler.timesteps: lowerCAmelCase__ : Optional[Any] = scheduler.scale_model_input(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : Union[str, Any] = model(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : Tuple = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : Dict = output.prev_sample lowerCAmelCase__ : int = torch.sum(torch.abs(__UpperCAmelCase ) ) lowerCAmelCase__ : Optional[Any] = torch.mean(torch.abs(__UpperCAmelCase ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 176.66974135742188 ) < 1e-2 assert abs(result_mean.item() - 0.23003872730981811 ) < 1e-2 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 177.63653564453125 ) < 1e-2 assert abs(result_mean.item() - 0.23003872730981811 ) < 1e-2 else: assert abs(result_sum.item() - 170.3135223388672 ) < 1e-2 assert abs(result_mean.item() - 0.23003872730981811 ) < 1e-2
678
import re import jax.numpy as jnp from flax.traverse_util import flatten_dict, unflatten_dict from jax.random import PRNGKey from ..utils import logging lowerCAmelCase_ = logging.get_logger(__name__) def __lowerCAmelCase ( UpperCamelCase ) -> List[str]: lowerCAmelCase__ : int = R'''\w+[.]\d+''' lowerCAmelCase__ : Tuple = re.findall(UpperCamelCase , UpperCamelCase ) for pat in pats: lowerCAmelCase__ : List[str] = key.replace(UpperCamelCase , '''_'''.join(pat.split('''.''' ) ) ) return key def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Dict: lowerCAmelCase__ : Union[str, Any] = pt_tuple_key[:-1] + ('''scale''',) if ( any('''norm''' in str_ for str_ in pt_tuple_key ) and (pt_tuple_key[-1] == "bias") and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict) and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict) ): lowerCAmelCase__ : List[str] = pt_tuple_key[:-1] + ('''scale''',) return renamed_pt_tuple_key, pt_tensor elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict: lowerCAmelCase__ : str = pt_tuple_key[:-1] + ('''scale''',) return renamed_pt_tuple_key, pt_tensor # embedding if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict: lowerCAmelCase__ : str = pt_tuple_key[:-1] + ('''embedding''',) return renamed_pt_tuple_key, pt_tensor # conv layer lowerCAmelCase__ : str = pt_tuple_key[:-1] + ('''kernel''',) if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4: lowerCAmelCase__ : Dict = pt_tensor.transpose(2 , 3 , 1 , 0 ) return renamed_pt_tuple_key, pt_tensor # linear layer lowerCAmelCase__ : List[Any] = pt_tuple_key[:-1] + ('''kernel''',) if pt_tuple_key[-1] == "weight": lowerCAmelCase__ : str = pt_tensor.T return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm weight lowerCAmelCase__ : Any = pt_tuple_key[:-1] + ('''weight''',) if pt_tuple_key[-1] == "gamma": return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm bias lowerCAmelCase__ : List[Any] = pt_tuple_key[:-1] + ('''bias''',) if pt_tuple_key[-1] == "beta": return renamed_pt_tuple_key, pt_tensor return pt_tuple_key, pt_tensor def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase=42 ) -> Any: # Step 1: Convert pytorch tensor to numpy lowerCAmelCase__ : Optional[Any] = {k: v.numpy() for k, v in pt_state_dict.items()} # Step 2: Since the model is stateless, get random Flax params lowerCAmelCase__ : Tuple = flax_model.init_weights(PRNGKey(UpperCamelCase ) ) lowerCAmelCase__ : Any = flatten_dict(UpperCamelCase ) lowerCAmelCase__ : List[Any] = {} # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): lowerCAmelCase__ : str = rename_key(UpperCamelCase ) lowerCAmelCase__ : Union[str, Any] = tuple(renamed_pt_key.split('''.''' ) ) # Correctly rename weight parameters lowerCAmelCase__ , lowerCAmelCase__ : List[str] = rename_key_and_reshape_tensor(UpperCamelCase , UpperCamelCase , UpperCamelCase ) if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( F"""PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape """ F"""{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.""" ) # also add unexpected weight so that warning is thrown lowerCAmelCase__ : List[str] = jnp.asarray(UpperCamelCase ) return unflatten_dict(UpperCamelCase )
678
1
import argparse import math import os import torch from neural_compressor.utils.pytorch import load from PIL import Image from transformers import CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel def __lowerCAmelCase ( ) -> Optional[Any]: lowerCAmelCase__ : List[str] = argparse.ArgumentParser() parser.add_argument( '''-m''' , '''--pretrained_model_name_or_path''' , type=UpperCamelCase , default=UpperCamelCase , required=UpperCamelCase , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , ) parser.add_argument( '''-c''' , '''--caption''' , type=UpperCamelCase , default='''robotic cat with wings''' , help='''Text used to generate images.''' , ) parser.add_argument( '''-n''' , '''--images_num''' , type=UpperCamelCase , default=4 , help='''How much images to generate.''' , ) parser.add_argument( '''-s''' , '''--seed''' , type=UpperCamelCase , default=42 , help='''Seed for random process.''' , ) parser.add_argument( '''-ci''' , '''--cuda_id''' , type=UpperCamelCase , default=0 , help='''cuda_id.''' , ) lowerCAmelCase__ : Dict = parser.parse_args() return args def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Tuple: if not len(UpperCamelCase ) == rows * cols: raise ValueError('''The specified number of rows and columns are not correct.''' ) lowerCAmelCase__ , lowerCAmelCase__ : List[str] = imgs[0].size lowerCAmelCase__ : Dict = Image.new('''RGB''' , size=(cols * w, rows * h) ) lowerCAmelCase__ , lowerCAmelCase__ : Dict = grid.size for i, img in enumerate(UpperCamelCase ): grid.paste(UpperCamelCase , box=(i % cols * w, i // cols * h) ) return grid def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase="robotic cat with wings" , UpperCamelCase=7.5 , UpperCamelCase=50 , UpperCamelCase=1 , UpperCamelCase=42 , ) -> Union[str, Any]: lowerCAmelCase__ : int = torch.Generator(pipeline.device ).manual_seed(UpperCamelCase ) lowerCAmelCase__ : List[str] = pipeline( UpperCamelCase , guidance_scale=UpperCamelCase , num_inference_steps=UpperCamelCase , generator=UpperCamelCase , num_images_per_prompt=UpperCamelCase , ).images lowerCAmelCase__ : Union[str, Any] = int(math.sqrt(UpperCamelCase ) ) lowerCAmelCase__ : str = image_grid(UpperCamelCase , rows=_rows , cols=num_images_per_prompt // _rows ) return grid, images lowerCAmelCase_ = parse_args() # Load models and create wrapper for stable diffusion lowerCAmelCase_ = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder="""tokenizer""") lowerCAmelCase_ = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="""text_encoder""") lowerCAmelCase_ = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder="""vae""") lowerCAmelCase_ = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="""unet""") lowerCAmelCase_ = StableDiffusionPipeline.from_pretrained( args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer ) lowerCAmelCase_ = lambda images, clip_input: (images, False) if os.path.exists(os.path.join(args.pretrained_model_name_or_path, """best_model.pt""")): lowerCAmelCase_ = load(args.pretrained_model_name_or_path, model=unet) unet.eval() setattr(pipeline, """unet""", unet) else: lowerCAmelCase_ = unet.to(torch.device("""cuda""", args.cuda_id)) lowerCAmelCase_ = pipeline.to(unet.device) lowerCAmelCase_ , lowerCAmelCase_ = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed) grid.save(os.path.join(args.pretrained_model_name_or_path, """{}.png""".format("""_""".join(args.caption.split())))) lowerCAmelCase_ = os.path.join(args.pretrained_model_name_or_path, """_""".join(args.caption.split())) os.makedirs(dirname, exist_ok=True) for idx, image in enumerate(images): image.save(os.path.join(dirname, """{}.png""".format(idx + 1)))
678
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available lowerCAmelCase_ = { """configuration_nezha""": ["""NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """NezhaConfig"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ """NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST""", """NezhaForNextSentencePrediction""", """NezhaForMaskedLM""", """NezhaForPreTraining""", """NezhaForMultipleChoice""", """NezhaForQuestionAnswering""", """NezhaForSequenceClassification""", """NezhaForTokenClassification""", """NezhaModel""", """NezhaPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_nezha import ( NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST, NezhaForMaskedLM, NezhaForMultipleChoice, NezhaForNextSentencePrediction, NezhaForPreTraining, NezhaForQuestionAnswering, NezhaForSequenceClassification, NezhaForTokenClassification, NezhaModel, NezhaPreTrainedModel, ) else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
678
1
import os import sys import unittest lowerCAmelCase_ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, """utils""")) import check_dummies # noqa: E402 from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402 # Align TRANSFORMERS_PATH in check_dummies with the current path lowerCAmelCase_ = os.path.join(git_repo_path, """src""", """diffusers""") class _lowerCAmelCase ( unittest.TestCase ): def __magic_name__( self ): lowerCAmelCase__ : Optional[int] = find_backend(''' if not is_torch_available():''' ) self.assertEqual(__UpperCAmelCase , '''torch''' ) # backend_with_underscore = find_backend(" if not is_tensorflow_text_available():") # self.assertEqual(backend_with_underscore, "tensorflow_text") lowerCAmelCase__ : Tuple = find_backend(''' if not (is_torch_available() and is_transformers_available()):''' ) self.assertEqual(__UpperCAmelCase , '''torch_and_transformers''' ) # double_backend_with_underscore = find_backend( # " if not (is_sentencepiece_available() and is_tensorflow_text_available()):" # ) # self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text") lowerCAmelCase__ : Dict = find_backend( ''' if not (is_torch_available() and is_transformers_available() and is_onnx_available()):''' ) self.assertEqual(__UpperCAmelCase , '''torch_and_transformers_and_onnx''' ) def __magic_name__( self ): lowerCAmelCase__ : Tuple = read_init() # We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects self.assertIn('''torch''' , __UpperCAmelCase ) self.assertIn('''torch_and_transformers''' , __UpperCAmelCase ) self.assertIn('''flax_and_transformers''' , __UpperCAmelCase ) self.assertIn('''torch_and_transformers_and_onnx''' , __UpperCAmelCase ) # Likewise, we can't assert on the exact content of a key self.assertIn('''UNet2DModel''' , objects['''torch'''] ) self.assertIn('''FlaxUNet2DConditionModel''' , objects['''flax'''] ) self.assertIn('''StableDiffusionPipeline''' , objects['''torch_and_transformers'''] ) self.assertIn('''FlaxStableDiffusionPipeline''' , objects['''flax_and_transformers'''] ) self.assertIn('''LMSDiscreteScheduler''' , objects['''torch_and_scipy'''] ) self.assertIn('''OnnxStableDiffusionPipeline''' , objects['''torch_and_transformers_and_onnx'''] ) def __magic_name__( self ): lowerCAmelCase__ : Optional[int] = create_dummy_object('''CONSTANT''' , '''\'torch\'''' ) self.assertEqual(__UpperCAmelCase , '''\nCONSTANT = None\n''' ) lowerCAmelCase__ : Tuple = create_dummy_object('''function''' , '''\'torch\'''' ) self.assertEqual( __UpperCAmelCase , '''\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n''' ) lowerCAmelCase__ : str = ''' class FakeClass(metaclass=DummyObject): _backends = \'torch\' def __init__(self, *args, **kwargs): requires_backends(self, \'torch\') @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, \'torch\') @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, \'torch\') ''' lowerCAmelCase__ : Optional[int] = create_dummy_object('''FakeClass''' , '''\'torch\'''' ) self.assertEqual(__UpperCAmelCase , __UpperCAmelCase ) def __magic_name__( self ): lowerCAmelCase__ : List[Any] = '''# This file is autogenerated by the command `make fix-copies`, do not edit. from ..utils import DummyObject, requires_backends CONSTANT = None def function(*args, **kwargs): requires_backends(function, ["torch"]) class FakeClass(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, ["torch"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["torch"]) ''' lowerCAmelCase__ : Any = create_dummy_files({'''torch''': ['''CONSTANT''', '''function''', '''FakeClass''']} ) self.assertEqual(dummy_files['''torch'''] , __UpperCAmelCase )
678
import tempfile import unittest import numpy as np from huggingface_hub import HfFolder, delete_repo from requests.exceptions import HTTPError from transformers import BertConfig, is_flax_available from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax if is_flax_available(): import os from flax.core.frozen_dict import unfreeze from flax.traverse_util import flatten_dict from transformers import FlaxBertModel lowerCAmelCase_ = """0.12""" # assumed parallelism: 8 @require_flax @is_staging_test class _lowerCAmelCase ( unittest.TestCase ): @classmethod def __magic_name__( cls ): lowerCAmelCase__ : Dict = TOKEN HfFolder.save_token(__UpperCAmelCase ) @classmethod def __magic_name__( cls ): try: delete_repo(token=cls._token , repo_id='''test-model-flax''' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='''valid_org/test-model-flax-org''' ) except HTTPError: pass def __magic_name__( self ): lowerCAmelCase__ : List[Any] = BertConfig( vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 ) lowerCAmelCase__ : List[Any] = FlaxBertModel(__UpperCAmelCase ) model.push_to_hub('''test-model-flax''' , use_auth_token=self._token ) lowerCAmelCase__ : List[Any] = FlaxBertModel.from_pretrained(f"""{USER}/test-model-flax""" ) lowerCAmelCase__ : Union[str, Any] = flatten_dict(unfreeze(model.params ) ) lowerCAmelCase__ : List[Any] = flatten_dict(unfreeze(new_model.params ) ) for key in base_params.keys(): lowerCAmelCase__ : str = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(__UpperCAmelCase , 1e-3 , msg=f"""{key} not identical""" ) # Reset repo delete_repo(token=self._token , repo_id='''test-model-flax''' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(__UpperCAmelCase , repo_id='''test-model-flax''' , push_to_hub=__UpperCAmelCase , use_auth_token=self._token ) lowerCAmelCase__ : Tuple = FlaxBertModel.from_pretrained(f"""{USER}/test-model-flax""" ) lowerCAmelCase__ : List[str] = flatten_dict(unfreeze(model.params ) ) lowerCAmelCase__ : Optional[int] = flatten_dict(unfreeze(new_model.params ) ) for key in base_params.keys(): lowerCAmelCase__ : Union[str, Any] = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(__UpperCAmelCase , 1e-3 , msg=f"""{key} not identical""" ) def __magic_name__( self ): lowerCAmelCase__ : List[str] = BertConfig( vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 ) lowerCAmelCase__ : Optional[Any] = FlaxBertModel(__UpperCAmelCase ) model.push_to_hub('''valid_org/test-model-flax-org''' , use_auth_token=self._token ) lowerCAmelCase__ : str = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' ) lowerCAmelCase__ : List[str] = flatten_dict(unfreeze(model.params ) ) lowerCAmelCase__ : Dict = flatten_dict(unfreeze(new_model.params ) ) for key in base_params.keys(): lowerCAmelCase__ : Tuple = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(__UpperCAmelCase , 1e-3 , msg=f"""{key} not identical""" ) # Reset repo delete_repo(token=self._token , repo_id='''valid_org/test-model-flax-org''' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained( __UpperCAmelCase , repo_id='''valid_org/test-model-flax-org''' , push_to_hub=__UpperCAmelCase , use_auth_token=self._token ) lowerCAmelCase__ : Union[str, Any] = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' ) lowerCAmelCase__ : Optional[int] = flatten_dict(unfreeze(model.params ) ) lowerCAmelCase__ : List[str] = flatten_dict(unfreeze(new_model.params ) ) for key in base_params.keys(): lowerCAmelCase__ : Tuple = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(__UpperCAmelCase , 1e-3 , msg=f"""{key} not identical""" ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> Tuple: lowerCAmelCase__ : Any = True lowerCAmelCase__ : Any = flatten_dict(modela.params ) lowerCAmelCase__ : List[str] = flatten_dict(modela.params ) for key in flat_params_a.keys(): if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1E-4: lowerCAmelCase__ : Optional[Any] = False return models_are_equal @require_flax class _lowerCAmelCase ( unittest.TestCase ): def __magic_name__( self ): lowerCAmelCase__ : List[str] = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' ) lowerCAmelCase__ : List[str] = FlaxBertModel(__UpperCAmelCase ) lowerCAmelCase__ : Optional[int] = '''bert''' with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(os.path.join(__UpperCAmelCase , __UpperCAmelCase ) ) with self.assertRaises(__UpperCAmelCase ): lowerCAmelCase__ : Optional[int] = FlaxBertModel.from_pretrained(__UpperCAmelCase ) lowerCAmelCase__ : List[str] = FlaxBertModel.from_pretrained(__UpperCAmelCase , subfolder=__UpperCAmelCase ) self.assertTrue(check_models_equal(__UpperCAmelCase , __UpperCAmelCase ) ) def __magic_name__( self ): lowerCAmelCase__ : Tuple = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' ) lowerCAmelCase__ : Union[str, Any] = FlaxBertModel(__UpperCAmelCase ) lowerCAmelCase__ : Dict = '''bert''' with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(os.path.join(__UpperCAmelCase , __UpperCAmelCase ) , max_shard_size='''10KB''' ) with self.assertRaises(__UpperCAmelCase ): lowerCAmelCase__ : Tuple = FlaxBertModel.from_pretrained(__UpperCAmelCase ) lowerCAmelCase__ : Any = FlaxBertModel.from_pretrained(__UpperCAmelCase , subfolder=__UpperCAmelCase ) self.assertTrue(check_models_equal(__UpperCAmelCase , __UpperCAmelCase ) ) def __magic_name__( self ): lowerCAmelCase__ : List[str] = '''bert''' lowerCAmelCase__ : int = '''hf-internal-testing/tiny-random-bert-subfolder''' with self.assertRaises(__UpperCAmelCase ): lowerCAmelCase__ : Dict = FlaxBertModel.from_pretrained(__UpperCAmelCase ) lowerCAmelCase__ : Optional[int] = FlaxBertModel.from_pretrained(__UpperCAmelCase , subfolder=__UpperCAmelCase ) self.assertIsNotNone(__UpperCAmelCase ) def __magic_name__( self ): lowerCAmelCase__ : List[Any] = '''bert''' lowerCAmelCase__ : Tuple = '''hf-internal-testing/tiny-random-bert-sharded-subfolder''' with self.assertRaises(__UpperCAmelCase ): lowerCAmelCase__ : Union[str, Any] = FlaxBertModel.from_pretrained(__UpperCAmelCase ) lowerCAmelCase__ : Optional[Any] = FlaxBertModel.from_pretrained(__UpperCAmelCase , subfolder=__UpperCAmelCase ) self.assertIsNotNone(__UpperCAmelCase )
678
1
import argparse import json import os import re import shutil import torch from transformers import BioGptConfig, BioGptForCausalLM from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE from transformers.utils import WEIGHTS_NAME, logging logging.set_verbosity_warning() lowerCAmelCase_ = 2 class _lowerCAmelCase : def __init__( self , *, # begin keyword-only arguments __UpperCAmelCase="<s>" , __UpperCAmelCase="<pad>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="<unk>" , __UpperCAmelCase=None , ): lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Any = bos, unk, pad, eos lowerCAmelCase__ : int = [] lowerCAmelCase__ : Tuple = [] lowerCAmelCase__ : Dict = {} lowerCAmelCase__ : int = self.add_symbol(__UpperCAmelCase ) lowerCAmelCase__ : List[str] = self.add_symbol(__UpperCAmelCase ) lowerCAmelCase__ : str = self.add_symbol(__UpperCAmelCase ) lowerCAmelCase__ : Any = self.add_symbol(__UpperCAmelCase ) if extra_special_symbols: for s in extra_special_symbols: self.add_symbol(__UpperCAmelCase ) lowerCAmelCase__ : Optional[int] = len(self.symbols ) def __eq__( self , __UpperCAmelCase ): return self.indices == other.indices def __getitem__( self , __UpperCAmelCase ): if idx < len(self.symbols ): return self.symbols[idx] return self.unk_word def __len__( self ): return len(self.symbols ) def __contains__( self , __UpperCAmelCase ): return sym in self.indices @classmethod def __magic_name__( cls , __UpperCAmelCase ): lowerCAmelCase__ : List[Any] = cls() d.add_from_file(__UpperCAmelCase ) return d def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase=1 , __UpperCAmelCase=False ): if word in self.indices and not overwrite: lowerCAmelCase__ : Union[str, Any] = self.indices[word] lowerCAmelCase__ : Tuple = self.count[idx] + n return idx else: lowerCAmelCase__ : str = len(self.symbols ) lowerCAmelCase__ : List[str] = idx self.symbols.append(__UpperCAmelCase ) self.count.append(__UpperCAmelCase ) return idx def __magic_name__( self , __UpperCAmelCase ): return 0 def __magic_name__( self , __UpperCAmelCase ): if isinstance(__UpperCAmelCase , __UpperCAmelCase ): try: with open(__UpperCAmelCase , '''r''' , encoding='''utf-8''' ) as fd: self.add_from_file(__UpperCAmelCase ) except FileNotFoundError as fnfe: raise fnfe except UnicodeError: raise Exception('''Incorrect encoding detected in {}, please rebuild the dataset'''.format(__UpperCAmelCase ) ) return lowerCAmelCase__ : Dict = f.readlines() lowerCAmelCase__ : Optional[Any] = self._load_meta(__UpperCAmelCase ) for line in lines[indices_start_line:]: try: lowerCAmelCase__ , lowerCAmelCase__ : Any = line.rstrip().rsplit(''' ''' , 1 ) if field == "#fairseq:overwrite": lowerCAmelCase__ : Any = True lowerCAmelCase__ , lowerCAmelCase__ : List[str] = line.rsplit(''' ''' , 1 ) else: lowerCAmelCase__ : Optional[Any] = False lowerCAmelCase__ : List[str] = int(__UpperCAmelCase ) lowerCAmelCase__ : Optional[int] = line if word in self and not overwrite: raise RuntimeError( '''Duplicate word found when loading Dictionary: \'{}\'. ''' '''Duplicate words can overwrite earlier ones by adding the ''' '''#fairseq:overwrite flag at the end of the corresponding row ''' '''in the dictionary file. If using the Camembert model, please ''' '''download an updated copy of the model file.'''.format(__UpperCAmelCase ) ) self.add_symbol(__UpperCAmelCase , n=__UpperCAmelCase , overwrite=__UpperCAmelCase ) except ValueError: raise ValueError('''Incorrect dictionary format, expected \'<token> <cnt> [flags]\'''' ) def __lowerCAmelCase ( UpperCamelCase ) -> List[Any]: # (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up, # e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7} lowerCAmelCase__ : str = dict((re.sub(R'''@@$''' , '''''' , UpperCamelCase ), v) if k.endswith('''@@''' ) else (re.sub(R'''$''' , '''</w>''' , UpperCamelCase ), v) for k, v in d.items() ) lowerCAmelCase__ : Any = '''<s> <pad> </s> <unk>'''.split() # restore the special tokens for k in keep_keys: del da[F"""{k}</w>"""] lowerCAmelCase__ : str = d[k] # restore return da def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> Union[str, Any]: # prep if not os.path.exists(UpperCamelCase ): raise ValueError(F"""path {biogpt_checkpoint_path} does not exist!""" ) os.makedirs(UpperCamelCase , exist_ok=UpperCamelCase ) print(F"""Writing results to {pytorch_dump_folder_path}""" ) # handle various types of models lowerCAmelCase__ : Tuple = os.path.join(UpperCamelCase , '''checkpoint.pt''' ) if not os.path.isfile(UpperCamelCase ): raise ValueError(F"""path to the file {checkpoint_file} does not exist!""" ) lowerCAmelCase__ : Any = torch.load(UpperCamelCase , map_location='''cpu''' ) lowerCAmelCase__ : Tuple = chkpt['''cfg''']['''model'''] # dicts lowerCAmelCase__ : int = os.path.join(UpperCamelCase , '''dict.txt''' ) if not os.path.isfile(UpperCamelCase ): raise ValueError(F"""path to the file {dict_file} does not exist!""" ) lowerCAmelCase__ : Tuple = Dictionary.load(UpperCamelCase ) lowerCAmelCase__ : int = rewrite_dict_keys(src_dict.indices ) lowerCAmelCase__ : Tuple = len(UpperCamelCase ) lowerCAmelCase__ : List[Any] = os.path.join(UpperCamelCase , VOCAB_FILES_NAMES['''vocab_file'''] ) print(F"""Generating {src_vocab_file} of {src_vocab_size} records""" ) with open(UpperCamelCase , '''w''' , encoding='''utf-8''' ) as f: f.write(json.dumps(UpperCamelCase , ensure_ascii=UpperCamelCase , indent=UpperCamelCase ) ) # merges_file (bpecodes) lowerCAmelCase__ : Tuple = os.path.join(UpperCamelCase , '''bpecodes''' ) if not os.path.isfile(UpperCamelCase ): raise ValueError(F"""path to the file {bpecodes_file} does not exist!""" ) lowerCAmelCase__ : int = os.path.join(UpperCamelCase , VOCAB_FILES_NAMES['''merges_file'''] ) shutil.copyfile(UpperCamelCase , UpperCamelCase ) # model config lowerCAmelCase__ : List[str] = os.path.join(UpperCamelCase , '''config.json''' ) lowerCAmelCase__ : List[Any] = { '''activation_dropout''': args['''activation_dropout'''], '''architectures''': ['''BioGptForCausalLM'''], '''attention_probs_dropout_prob''': args['''attention_dropout'''], '''bos_token_id''': 0, '''eos_token_id''': 2, '''hidden_act''': args['''activation_fn'''], '''hidden_dropout_prob''': args['''dropout'''], '''hidden_size''': args['''decoder_embed_dim'''], '''initializer_range''': 0.02, '''intermediate_size''': args['''decoder_ffn_embed_dim'''], '''layer_norm_eps''': 1E-12, '''layerdrop''': args['''decoder_layerdrop'''], '''max_position_embeddings''': args['''max_target_positions'''], '''model_type''': '''biogpt''', '''num_attention_heads''': args['''decoder_attention_heads'''], '''num_hidden_layers''': args['''decoder_layers'''], '''pad_token_id''': 1, '''scale_embedding''': not args['''no_scale_embedding'''], '''tie_word_embeddings''': args['''share_decoder_input_output_embed'''], '''vocab_size''': src_vocab_size, } # good hparam defaults to start with print(F"""Generating {biogpt_model_config_file}""" ) with open(UpperCamelCase , '''w''' , encoding='''utf-8''' ) as f: f.write(json.dumps(UpperCamelCase , ensure_ascii=UpperCamelCase , indent=UpperCamelCase ) ) # tokenizer config lowerCAmelCase__ : int = os.path.join(UpperCamelCase , UpperCamelCase ) lowerCAmelCase__ : List[str] = { '''bos_token''': '''<s>''', '''eos_token''': '''</s>''', '''model_max_length''': 1024, '''pad_token''': '''<pad>''', '''special_tokens_map_file''': None, '''tokenizer_class''': '''BioGptTokenizer''', '''unk_token''': '''<unk>''', } print(F"""Generating {biogpt_tokenizer_config_file}""" ) with open(UpperCamelCase , '''w''' , encoding='''utf-8''' ) as f: f.write(json.dumps(UpperCamelCase , ensure_ascii=UpperCamelCase , indent=UpperCamelCase ) ) # model lowerCAmelCase__ : List[str] = chkpt['''model'''] # remove unneeded keys lowerCAmelCase__ : Optional[int] = [ '''decoder.version''', ] for k in ignore_keys: model_state_dict.pop(UpperCamelCase , UpperCamelCase ) lowerCAmelCase__ : List[str] = list(model_state_dict.keys() ) for layer_name in layer_names: if layer_name.endswith('''output_projection.weight''' ): lowerCAmelCase__ : int = model_state_dict.pop(UpperCamelCase ) else: lowerCAmelCase__ : Dict = model_state_dict.pop(UpperCamelCase ) lowerCAmelCase__ : int = BioGptConfig.from_pretrained(UpperCamelCase ) lowerCAmelCase__ : Dict = BioGptForCausalLM(UpperCamelCase ) # check that it loads ok model_new.load_state_dict(UpperCamelCase ) # save lowerCAmelCase__ : List[str] = os.path.join(UpperCamelCase , UpperCamelCase ) print(F"""Generating {pytorch_weights_dump_path}""" ) torch.save(UpperCamelCase , UpperCamelCase ) print('''Conversion is done!''' ) if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--biogpt_checkpoint_path""", default=None, type=str, required=True, help=( """Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,""" """ bpecodes, etc.""" ), ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) lowerCAmelCase_ = parser.parse_args() convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
678
from random import randint from tempfile import TemporaryFile import numpy as np def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Any: lowerCAmelCase__ : Optional[Any] = 0 if start < end: lowerCAmelCase__ : Union[str, Any] = randint(UpperCamelCase , UpperCamelCase ) lowerCAmelCase__ : List[str] = a[end] lowerCAmelCase__ : List[Any] = a[pivot] lowerCAmelCase__ : str = temp lowerCAmelCase__ , lowerCAmelCase__ : Tuple = _in_place_partition(UpperCamelCase , UpperCamelCase , UpperCamelCase ) count += _in_place_quick_sort(UpperCamelCase , UpperCamelCase , p - 1 ) count += _in_place_quick_sort(UpperCamelCase , p + 1 , UpperCamelCase ) return count def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Optional[Any]: lowerCAmelCase__ : List[str] = 0 lowerCAmelCase__ : List[Any] = randint(UpperCamelCase , UpperCamelCase ) lowerCAmelCase__ : str = a[end] lowerCAmelCase__ : Optional[int] = a[pivot] lowerCAmelCase__ : List[str] = temp lowerCAmelCase__ : str = start - 1 for index in range(UpperCamelCase , UpperCamelCase ): count += 1 if a[index] < a[end]: # check if current val is less than pivot value lowerCAmelCase__ : List[str] = new_pivot_index + 1 lowerCAmelCase__ : int = a[new_pivot_index] lowerCAmelCase__ : int = a[index] lowerCAmelCase__ : Tuple = temp lowerCAmelCase__ : Optional[Any] = a[new_pivot_index + 1] lowerCAmelCase__ : List[str] = a[end] lowerCAmelCase__ : Union[str, Any] = temp return new_pivot_index + 1, count lowerCAmelCase_ = TemporaryFile() lowerCAmelCase_ = 1_00 # 1000 elements are to be sorted lowerCAmelCase_ , lowerCAmelCase_ = 0, 1 # mean and standard deviation lowerCAmelCase_ = np.random.normal(mu, sigma, p) np.save(outfile, X) print("""The array is""") print(X) outfile.seek(0) # using the same array lowerCAmelCase_ = np.load(outfile) lowerCAmelCase_ = len(M) - 1 lowerCAmelCase_ = _in_place_quick_sort(M, 0, r) print( """No of Comparisons for 100 elements selected from a standard normal distribution""" """is :""" ) print(z)
678
1
lowerCAmelCase_ = [ [0, 16, 13, 0, 0, 0], [0, 0, 10, 12, 0, 0], [0, 4, 0, 0, 14, 0], [0, 0, 9, 0, 0, 20], [0, 0, 0, 7, 0, 4], [0, 0, 0, 0, 0, 0], ] def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> List[Any]: # Return True if there is node that has not iterated. lowerCAmelCase__ : Optional[int] = [False] * len(UpperCamelCase ) lowerCAmelCase__ : Tuple = [s] lowerCAmelCase__ : Dict = True while queue: lowerCAmelCase__ : int = queue.pop(0 ) for ind in range(len(graph[u] ) ): if visited[ind] is False and graph[u][ind] > 0: queue.append(UpperCamelCase ) lowerCAmelCase__ : Optional[Any] = True lowerCAmelCase__ : Optional[int] = u return visited[t] def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Optional[Any]: lowerCAmelCase__ : Any = [-1] * (len(UpperCamelCase )) lowerCAmelCase__ : List[Any] = 0 lowerCAmelCase__ : Tuple = [] lowerCAmelCase__ : Optional[int] = [i[:] for i in graph] # Record original cut, copy. while bfs(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ): lowerCAmelCase__ : List[Any] = float('''Inf''' ) lowerCAmelCase__ : Dict = sink while s != source: # Find the minimum value in select path lowerCAmelCase__ : Tuple = min(UpperCamelCase , graph[parent[s]][s] ) lowerCAmelCase__ : List[Any] = parent[s] max_flow += path_flow lowerCAmelCase__ : List[Any] = sink while v != source: lowerCAmelCase__ : Dict = parent[v] graph[u][v] -= path_flow graph[v][u] += path_flow lowerCAmelCase__ : Optional[Any] = parent[v] for i in range(len(UpperCamelCase ) ): for j in range(len(graph[0] ) ): if graph[i][j] == 0 and temp[i][j] > 0: res.append((i, j) ) return res if __name__ == "__main__": print(mincut(test_graph, source=0, sink=5))
678
import pyarrow.parquet as pq import pytest from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config from datasets.features.image import Image from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> Optional[int]: assert isinstance(UpperCamelCase , UpperCamelCase ) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('''keep_in_memory''' , [False, True] ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> List[Any]: lowerCAmelCase__ : List[str] = tmp_path / '''cache''' lowerCAmelCase__ : Union[str, Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): lowerCAmelCase__ : List[Any] = ParquetDatasetReader(UpperCamelCase , cache_dir=UpperCamelCase , keep_in_memory=UpperCamelCase ).read() _check_parquet_dataset(UpperCamelCase , UpperCamelCase ) @pytest.mark.parametrize( '''features''' , [ None, {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}, {'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''}, {'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''}, {'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''}, ] , ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> List[Any]: lowerCAmelCase__ : str = tmp_path / '''cache''' lowerCAmelCase__ : Union[str, Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} lowerCAmelCase__ : str = features.copy() if features else default_expected_features lowerCAmelCase__ : List[Any] = ( Features({feature: Value(UpperCamelCase ) for feature, dtype in features.items()} ) if features is not None else None ) lowerCAmelCase__ : Union[str, Any] = ParquetDatasetReader(UpperCamelCase , features=UpperCamelCase , cache_dir=UpperCamelCase ).read() _check_parquet_dataset(UpperCamelCase , UpperCamelCase ) @pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Dict: lowerCAmelCase__ : str = tmp_path / '''cache''' lowerCAmelCase__ : Tuple = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} lowerCAmelCase__ : Union[str, Any] = ParquetDatasetReader(UpperCamelCase , cache_dir=UpperCamelCase , split=UpperCamelCase ).read() _check_parquet_dataset(UpperCamelCase , UpperCamelCase ) assert dataset.split == split if split else "train" @pytest.mark.parametrize('''path_type''' , [str, list] ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> str: if issubclass(UpperCamelCase , UpperCamelCase ): lowerCAmelCase__ : Any = parquet_path elif issubclass(UpperCamelCase , UpperCamelCase ): lowerCAmelCase__ : Any = [parquet_path] lowerCAmelCase__ : int = tmp_path / '''cache''' lowerCAmelCase__ : str = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} lowerCAmelCase__ : Union[str, Any] = ParquetDatasetReader(UpperCamelCase , cache_dir=UpperCamelCase ).read() _check_parquet_dataset(UpperCamelCase , UpperCamelCase ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase=("train",) ) -> str: assert isinstance(UpperCamelCase , UpperCamelCase ) for split in splits: lowerCAmelCase__ : str = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('''keep_in_memory''' , [False, True] ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Optional[int]: lowerCAmelCase__ : Any = tmp_path / '''cache''' lowerCAmelCase__ : Optional[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): lowerCAmelCase__ : Optional[Any] = ParquetDatasetReader( {'''train''': parquet_path} , cache_dir=UpperCamelCase , keep_in_memory=UpperCamelCase ).read() _check_parquet_datasetdict(UpperCamelCase , UpperCamelCase ) @pytest.mark.parametrize( '''features''' , [ None, {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}, {'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''}, {'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''}, {'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''}, ] , ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> int: lowerCAmelCase__ : Any = tmp_path / '''cache''' lowerCAmelCase__ : Tuple = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} lowerCAmelCase__ : Tuple = features.copy() if features else default_expected_features lowerCAmelCase__ : Optional[int] = ( Features({feature: Value(UpperCamelCase ) for feature, dtype in features.items()} ) if features is not None else None ) lowerCAmelCase__ : List[str] = ParquetDatasetReader({'''train''': parquet_path} , features=UpperCamelCase , cache_dir=UpperCamelCase ).read() _check_parquet_datasetdict(UpperCamelCase , UpperCamelCase ) @pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Dict: if split: lowerCAmelCase__ : Tuple = {split: parquet_path} else: lowerCAmelCase__ : int = '''train''' lowerCAmelCase__ : List[Any] = {'''train''': parquet_path, '''test''': parquet_path} lowerCAmelCase__ : Optional[int] = tmp_path / '''cache''' lowerCAmelCase__ : List[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} lowerCAmelCase__ : List[str] = ParquetDatasetReader(UpperCamelCase , cache_dir=UpperCamelCase ).read() _check_parquet_datasetdict(UpperCamelCase , UpperCamelCase , splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> Tuple: lowerCAmelCase__ : Optional[Any] = ParquetDatasetWriter(UpperCamelCase , tmp_path / '''foo.parquet''' ) assert writer.write() > 0 lowerCAmelCase__ : Union[str, Any] = pq.ParquetFile(tmp_path / '''foo.parquet''' ) lowerCAmelCase__ : int = pf.read() assert dataset.data.table == output_table def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> Tuple: lowerCAmelCase__ : List[str] = str(shared_datadir / '''test_image_rgb.jpg''' ) lowerCAmelCase__ : Dict = {'''image''': [image_path]} lowerCAmelCase__ : int = Features({'''image''': Image()} ) lowerCAmelCase__ : Dict = Dataset.from_dict(UpperCamelCase , features=UpperCamelCase ) lowerCAmelCase__ : List[str] = ParquetDatasetWriter(UpperCamelCase , tmp_path / '''foo.parquet''' ) assert writer.write() > 0 lowerCAmelCase__ : Dict = Dataset.from_parquet(str(tmp_path / '''foo.parquet''' ) ) assert dataset.features == reloaded_dataset.features lowerCAmelCase__ : int = ParquetDatasetReader(str(tmp_path / '''foo.parquet''' ) , streaming=UpperCamelCase ).read() assert dataset.features == reloaded_iterable_dataset.features @pytest.mark.parametrize( '''feature, expected''' , [ (Features({'''foo''': Value('''int32''' )} ), None), (Features({'''image''': Image(), '''foo''': Value('''int32''' )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS), (Features({'''nested''': Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS), ] , ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> Any: assert get_writer_batch_size(UpperCamelCase ) == expected
678
1
import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import BertTokenizer, BertTokenizerFast from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import AlignProcessor, EfficientNetImageProcessor @require_vision class _lowerCAmelCase ( unittest.TestCase ): def __magic_name__( self ): lowerCAmelCase__ : Dict = tempfile.mkdtemp() lowerCAmelCase__ : str = [ '''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''', ] lowerCAmelCase__ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) lowerCAmelCase__ : int = { '''do_resize''': True, '''size''': 20, '''do_center_crop''': True, '''crop_size''': 18, '''do_normalize''': True, '''image_mean''': [0.48145466, 0.4578275, 0.40821073], '''image_std''': [0.26862954, 0.26130258, 0.27577711], } lowerCAmelCase__ : List[Any] = os.path.join(self.tmpdirname , __UpperCAmelCase ) with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp: json.dump(__UpperCAmelCase , __UpperCAmelCase ) def __magic_name__( self , **__UpperCAmelCase ): return BertTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase ) def __magic_name__( self , **__UpperCAmelCase ): return BertTokenizerFast.from_pretrained(self.tmpdirname , **__UpperCAmelCase ) def __magic_name__( self , **__UpperCAmelCase ): return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **__UpperCAmelCase ) def __magic_name__( self ): shutil.rmtree(self.tmpdirname ) def __magic_name__( self ): lowerCAmelCase__ : Optional[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] lowerCAmelCase__ : Dict = [Image.fromarray(np.moveaxis(__UpperCAmelCase , 0 , -1 ) ) for x in image_inputs] return image_inputs def __magic_name__( self ): lowerCAmelCase__ : Optional[int] = self.get_tokenizer() lowerCAmelCase__ : Any = self.get_rust_tokenizer() lowerCAmelCase__ : Union[str, Any] = self.get_image_processor() lowerCAmelCase__ : str = AlignProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase ) processor_slow.save_pretrained(self.tmpdirname ) lowerCAmelCase__ : Any = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=__UpperCAmelCase ) lowerCAmelCase__ : int = AlignProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase ) processor_fast.save_pretrained(self.tmpdirname ) lowerCAmelCase__ : Dict = AlignProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , __UpperCAmelCase ) self.assertIsInstance(processor_fast.tokenizer , __UpperCAmelCase ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , __UpperCAmelCase ) self.assertIsInstance(processor_fast.image_processor , __UpperCAmelCase ) def __magic_name__( self ): lowerCAmelCase__ : int = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) lowerCAmelCase__ : Optional[int] = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' ) lowerCAmelCase__ : List[Any] = self.get_image_processor(do_normalize=__UpperCAmelCase , padding_value=1.0 ) lowerCAmelCase__ : Optional[int] = AlignProcessor.from_pretrained( self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=__UpperCAmelCase , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , __UpperCAmelCase ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , __UpperCAmelCase ) def __magic_name__( self ): lowerCAmelCase__ : List[Any] = self.get_image_processor() lowerCAmelCase__ : int = self.get_tokenizer() lowerCAmelCase__ : List[Any] = AlignProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase ) lowerCAmelCase__ : Optional[int] = self.prepare_image_inputs() lowerCAmelCase__ : str = image_processor(__UpperCAmelCase , return_tensors='''np''' ) lowerCAmelCase__ : List[str] = processor(images=__UpperCAmelCase , return_tensors='''np''' ) for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 ) def __magic_name__( self ): lowerCAmelCase__ : Dict = self.get_image_processor() lowerCAmelCase__ : Dict = self.get_tokenizer() lowerCAmelCase__ : str = AlignProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase ) lowerCAmelCase__ : int = '''lower newer''' lowerCAmelCase__ : int = processor(text=__UpperCAmelCase ) lowerCAmelCase__ : Dict = tokenizer(__UpperCAmelCase , padding='''max_length''' , max_length=64 ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def __magic_name__( self ): lowerCAmelCase__ : str = self.get_image_processor() lowerCAmelCase__ : Any = self.get_tokenizer() lowerCAmelCase__ : Union[str, Any] = AlignProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase ) lowerCAmelCase__ : Optional[int] = '''lower newer''' lowerCAmelCase__ : Optional[int] = self.prepare_image_inputs() lowerCAmelCase__ : List[Any] = processor(text=__UpperCAmelCase , images=__UpperCAmelCase ) self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] ) # test if it raises when no input is passed with pytest.raises(__UpperCAmelCase ): processor() def __magic_name__( self ): lowerCAmelCase__ : Optional[Any] = self.get_image_processor() lowerCAmelCase__ : int = self.get_tokenizer() lowerCAmelCase__ : Any = AlignProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase ) lowerCAmelCase__ : Dict = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] lowerCAmelCase__ : List[Any] = processor.batch_decode(__UpperCAmelCase ) lowerCAmelCase__ : Optional[int] = tokenizer.batch_decode(__UpperCAmelCase ) self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase ) def __magic_name__( self ): lowerCAmelCase__ : Optional[int] = self.get_image_processor() lowerCAmelCase__ : Tuple = self.get_tokenizer() lowerCAmelCase__ : List[Any] = AlignProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase ) lowerCAmelCase__ : Dict = '''lower newer''' lowerCAmelCase__ : Dict = self.prepare_image_inputs() lowerCAmelCase__ : Optional[int] = processor(text=__UpperCAmelCase , images=__UpperCAmelCase ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
678
from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = { """microsoft/focalnet-tiny""": """https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json""", } class _lowerCAmelCase ( _lowercase , _lowercase ): A__ = 'focalnet' def __init__( self , __UpperCAmelCase=224 , __UpperCAmelCase=4 , __UpperCAmelCase=3 , __UpperCAmelCase=96 , __UpperCAmelCase=False , __UpperCAmelCase=[192, 384, 768, 768] , __UpperCAmelCase=[2, 2, 6, 2] , __UpperCAmelCase=[2, 2, 2, 2] , __UpperCAmelCase=[3, 3, 3, 3] , __UpperCAmelCase="gelu" , __UpperCAmelCase=4.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.1 , __UpperCAmelCase=False , __UpperCAmelCase=1e-4 , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=0.02 , __UpperCAmelCase=1e-5 , __UpperCAmelCase=32 , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase , ): super().__init__(**__UpperCAmelCase ) lowerCAmelCase__ : Any = image_size lowerCAmelCase__ : Any = patch_size lowerCAmelCase__ : Tuple = num_channels lowerCAmelCase__ : Dict = embed_dim lowerCAmelCase__ : Optional[int] = use_conv_embed lowerCAmelCase__ : Optional[int] = hidden_sizes lowerCAmelCase__ : Optional[Any] = depths lowerCAmelCase__ : Dict = focal_levels lowerCAmelCase__ : int = focal_windows lowerCAmelCase__ : Optional[Any] = hidden_act lowerCAmelCase__ : Optional[int] = mlp_ratio lowerCAmelCase__ : Optional[Any] = hidden_dropout_prob lowerCAmelCase__ : List[Any] = drop_path_rate lowerCAmelCase__ : Tuple = use_layerscale lowerCAmelCase__ : List[Any] = layerscale_value lowerCAmelCase__ : Dict = use_post_layernorm lowerCAmelCase__ : Dict = use_post_layernorm_in_modulation lowerCAmelCase__ : Dict = normalize_modulator lowerCAmelCase__ : Union[str, Any] = initializer_range lowerCAmelCase__ : List[str] = layer_norm_eps lowerCAmelCase__ : Tuple = encoder_stride lowerCAmelCase__ : Dict = ['''stem'''] + [f"""stage{idx}""" for idx in range(1 , len(self.depths ) + 1 )] lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = get_aligned_output_features_output_indices( out_features=__UpperCAmelCase , out_indices=__UpperCAmelCase , stage_names=self.stage_names )
678
1
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_video_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import VivitImageProcessor class _lowerCAmelCase ( unittest.TestCase ): def __init__( self , __UpperCAmelCase , __UpperCAmelCase=7 , __UpperCAmelCase=3 , __UpperCAmelCase=10 , __UpperCAmelCase=18 , __UpperCAmelCase=30 , __UpperCAmelCase=400 , __UpperCAmelCase=True , __UpperCAmelCase=None , __UpperCAmelCase=True , __UpperCAmelCase=[0.5, 0.5, 0.5] , __UpperCAmelCase=[0.5, 0.5, 0.5] , __UpperCAmelCase=None , ): lowerCAmelCase__ : Dict = size if size is not None else {'''shortest_edge''': 18} lowerCAmelCase__ : int = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18} lowerCAmelCase__ : Any = parent lowerCAmelCase__ : List[str] = batch_size lowerCAmelCase__ : List[Any] = num_channels lowerCAmelCase__ : Any = num_frames lowerCAmelCase__ : Optional[Any] = image_size lowerCAmelCase__ : Union[str, Any] = min_resolution lowerCAmelCase__ : Optional[Any] = max_resolution lowerCAmelCase__ : Union[str, Any] = do_resize lowerCAmelCase__ : Optional[Any] = size lowerCAmelCase__ : Optional[int] = do_normalize lowerCAmelCase__ : int = image_mean lowerCAmelCase__ : int = image_std lowerCAmelCase__ : Optional[Any] = crop_size def __magic_name__( self ): return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, "crop_size": self.crop_size, } @require_torch @require_vision class _lowerCAmelCase ( _lowercase , unittest.TestCase ): A__ = VivitImageProcessor if is_vision_available() else None def __magic_name__( self ): lowerCAmelCase__ : Dict = VivitImageProcessingTester(self ) @property def __magic_name__( self ): return self.image_processor_tester.prepare_image_processor_dict() def __magic_name__( self ): lowerCAmelCase__ : int = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__UpperCAmelCase , '''image_mean''' ) ) self.assertTrue(hasattr(__UpperCAmelCase , '''image_std''' ) ) self.assertTrue(hasattr(__UpperCAmelCase , '''do_normalize''' ) ) self.assertTrue(hasattr(__UpperCAmelCase , '''do_resize''' ) ) self.assertTrue(hasattr(__UpperCAmelCase , '''do_center_crop''' ) ) self.assertTrue(hasattr(__UpperCAmelCase , '''size''' ) ) def __magic_name__( self ): lowerCAmelCase__ : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''shortest_edge''': 18} ) self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} ) lowerCAmelCase__ : List[str] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 ) self.assertEqual(image_processor.size , {'''shortest_edge''': 42} ) self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} ) def __magic_name__( self ): # Initialize image_processing lowerCAmelCase__ : str = self.image_processing_class(**self.image_processor_dict ) # create random PIL videos lowerCAmelCase__ : List[str] = prepare_video_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase ) for video in video_inputs: self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase ) self.assertIsInstance(video[0] , Image.Image ) # Test not batched input lowerCAmelCase__ : Tuple = image_processing(video_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_videos.shape , ( 1, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched lowerCAmelCase__ : Tuple = image_processing(__UpperCAmelCase , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_videos.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def __magic_name__( self ): # Initialize image_processing lowerCAmelCase__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors lowerCAmelCase__ : Dict = prepare_video_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase , numpify=__UpperCAmelCase ) for video in video_inputs: self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase ) self.assertIsInstance(video[0] , np.ndarray ) # Test not batched input lowerCAmelCase__ : List[Any] = image_processing(video_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_videos.shape , ( 1, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched lowerCAmelCase__ : str = image_processing(__UpperCAmelCase , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_videos.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def __magic_name__( self ): # Initialize image_processing lowerCAmelCase__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors lowerCAmelCase__ : Optional[Any] = prepare_video_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase , torchify=__UpperCAmelCase ) for video in video_inputs: self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase ) self.assertIsInstance(video[0] , torch.Tensor ) # Test not batched input lowerCAmelCase__ : int = image_processing(video_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_videos.shape , ( 1, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched lowerCAmelCase__ : Dict = image_processing(__UpperCAmelCase , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_videos.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , )
678
from scipy.stats import pearsonr import datasets lowerCAmelCase_ = """ Pearson correlation coefficient and p-value for testing non-correlation. The Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases. The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. """ lowerCAmelCase_ = """ Args: predictions (`list` of `int`): Predicted class labels, as returned by a model. references (`list` of `int`): Ground truth labels. return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`. Returns: pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation. p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities. Examples: Example 1-A simple example using only predictions and references. >>> pearsonr_metric = datasets.load_metric(\"pearsonr\") >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5]) >>> print(round(results['pearsonr'], 2)) -0.74 Example 2-The same as Example 1, but that also returns the `p-value`. >>> pearsonr_metric = datasets.load_metric(\"pearsonr\") >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True) >>> print(sorted(list(results.keys()))) ['p-value', 'pearsonr'] >>> print(round(results['pearsonr'], 2)) -0.74 >>> print(round(results['p-value'], 2)) 0.15 """ lowerCAmelCase_ = """ @article{2020SciPy-NMeth, author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and Haberland, Matt and Reddy, Tyler and Cournapeau, David and Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and Bright, Jonathan and {van der Walt}, St{\'e}fan J. and Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and Kern, Robert and Larson, Eric and Carey, C J and Polat, Ilhan and Feng, Yu and Moore, Eric W. and {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and Harris, Charles R. and Archibald, Anne M. and Ribeiro, Antonio H. and Pedregosa, Fabian and {van Mulbregt}, Paul and {SciPy 1.0 Contributors}}, title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific Computing in Python}}, journal = {Nature Methods}, year = {2020}, volume = {17}, pages = {261--272}, adsurl = {https://rdcu.be/b08Wh}, doi = {10.1038/s41592-019-0686-2}, } """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class _lowerCAmelCase ( datasets.Metric ): def __magic_name__( self ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''float''' ), '''references''': datasets.Value('''float''' ), } ) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'''] , ) def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False ): if return_pvalue: lowerCAmelCase__ : Union[str, Any] = pearsonr(__UpperCAmelCase , __UpperCAmelCase ) return {"pearsonr": results[0], "p-value": results[1]} else: return {"pearsonr": float(pearsonr(__UpperCAmelCase , __UpperCAmelCase )[0] )}
678
1
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> float: _validate_point(UpperCamelCase ) _validate_point(UpperCamelCase ) if len(UpperCamelCase ) != len(UpperCamelCase ): raise ValueError('''Both points must be in the same n-dimensional space''' ) return float(sum(abs(a - b ) for a, b in zip(UpperCamelCase , UpperCamelCase ) ) ) def __lowerCAmelCase ( UpperCamelCase ) -> None: if point: if isinstance(UpperCamelCase , UpperCamelCase ): for item in point: if not isinstance(UpperCamelCase , (int, float) ): lowerCAmelCase__ : Any = ( '''Expected a list of numbers as input, found ''' F"""{type(UpperCamelCase ).__name__}""" ) raise TypeError(UpperCamelCase ) else: lowerCAmelCase__ : List[str] = F"""Expected a list of numbers as input, found {type(UpperCamelCase ).__name__}""" raise TypeError(UpperCamelCase ) else: raise ValueError('''Missing an input''' ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> float: _validate_point(UpperCamelCase ) _validate_point(UpperCamelCase ) if len(UpperCamelCase ) != len(UpperCamelCase ): raise ValueError('''Both points must be in the same n-dimensional space''' ) return float(sum(abs(x - y ) for x, y in zip(UpperCamelCase , UpperCamelCase ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
678
from manim import * class _lowerCAmelCase ( _lowercase ): def __magic_name__( self ): lowerCAmelCase__ : Tuple = Rectangle(height=0.5 , width=0.5 ) lowerCAmelCase__ : Dict = Rectangle(height=0.25 , width=0.25 ) lowerCAmelCase__ : Dict = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 ) lowerCAmelCase__ : Optional[Any] = [mem.copy() for i in range(6 )] lowerCAmelCase__ : int = [mem.copy() for i in range(6 )] lowerCAmelCase__ : Optional[Any] = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 ) lowerCAmelCase__ : str = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 ) lowerCAmelCase__ : List[str] = VGroup(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 ) lowerCAmelCase__ : int = Text('''CPU''' , font_size=24 ) lowerCAmelCase__ : int = Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0.5 , aligned_edge=__UpperCAmelCase ) cpu.move_to([-2.5, -0.5, 0] ) self.add(__UpperCAmelCase ) lowerCAmelCase__ : Optional[int] = [mem.copy() for i in range(4 )] lowerCAmelCase__ : Tuple = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 ) lowerCAmelCase__ : Tuple = Text('''GPU''' , font_size=24 ) lowerCAmelCase__ : int = Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0.5 , aligned_edge=__UpperCAmelCase ) gpu.move_to([-1, -1, 0] ) self.add(__UpperCAmelCase ) lowerCAmelCase__ : int = [mem.copy() for i in range(6 )] lowerCAmelCase__ : List[Any] = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 ) lowerCAmelCase__ : Tuple = Text('''Model''' , font_size=24 ) lowerCAmelCase__ : List[Any] = Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0.5 , aligned_edge=__UpperCAmelCase ) model.move_to([3, -1.0, 0] ) self.add(__UpperCAmelCase ) lowerCAmelCase__ : Dict = [] lowerCAmelCase__ : List[str] = [] lowerCAmelCase__ : Optional[Any] = [] for i, rect in enumerate(__UpperCAmelCase ): rect.set_stroke(__UpperCAmelCase ) lowerCAmelCase__ : Any = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(__UpperCAmelCase , opacity=0.7 ) if i == 0: cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=__UpperCAmelCase ) cpu_target.set_x(cpu_target.get_x() + 0.1 ) elif i == 3: cpu_target.next_to(model_cpu_arr[0] , direction=__UpperCAmelCase , buff=0.0 ) else: cpu_target.next_to(model_cpu_arr[i - 1] , direction=__UpperCAmelCase , buff=0.0 ) self.add(__UpperCAmelCase ) model_cpu_arr.append(__UpperCAmelCase ) self.add(*__UpperCAmelCase , *__UpperCAmelCase , *__UpperCAmelCase ) lowerCAmelCase__ : Any = [mem.copy() for i in range(6 )] lowerCAmelCase__ : Optional[Any] = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 ) lowerCAmelCase__ : Any = Text('''Loaded Checkpoint''' , font_size=24 ) lowerCAmelCase__ : Optional[Any] = Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0.5 , aligned_edge=__UpperCAmelCase ) checkpoint.move_to([3, 0.5, 0] ) self.add(__UpperCAmelCase ) lowerCAmelCase__ : Dict = [] lowerCAmelCase__ : str = [] for i, rect in enumerate(__UpperCAmelCase ): lowerCAmelCase__ : Union[str, Any] = fill.copy().set_fill(__UpperCAmelCase , opacity=0.7 ) target.move_to(__UpperCAmelCase ) ckpt_arr.append(__UpperCAmelCase ) lowerCAmelCase__ : List[str] = target.copy() if i < 5: cpu_target.move_to(cpu_left_col_base[i + 1] ) else: cpu_target.move_to(cpu_right_col_base[i - 5] ) ckpt_cpu_arr.append(__UpperCAmelCase ) self.add(*__UpperCAmelCase , *__UpperCAmelCase ) lowerCAmelCase__ : Optional[Any] = Square(side_length=2.2 ) key.move_to([-5, 2, 0] ) lowerCAmelCase__ : List[Any] = MarkupText( f"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , ) key_text.move_to([-5, 2.4, 0] ) self.add(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : List[str] = MarkupText( f"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=18 , ) blue_text.next_to(__UpperCAmelCase , DOWN * 2.4 , aligned_edge=key_text.get_left() ) self.add(__UpperCAmelCase ) lowerCAmelCase__ : str = MarkupText( f"""Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.""" , font_size=24 , ) step_a.move_to([2, 2, 0] ) lowerCAmelCase__ : Optional[Any] = [meta_mem.copy() for i in range(6 )] lowerCAmelCase__ : Dict = [meta_mem.copy() for i in range(6 )] lowerCAmelCase__ : Union[str, Any] = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 ) lowerCAmelCase__ : Dict = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 ) lowerCAmelCase__ : str = VGroup(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 ) lowerCAmelCase__ : List[str] = Text('''Disk''' , font_size=24 ) lowerCAmelCase__ : Any = Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0.5 , aligned_edge=__UpperCAmelCase ) disk.move_to([-4.0, -1.25, 0] ) self.play(Write(__UpperCAmelCase , run_time=3 ) , Write(__UpperCAmelCase , run_time=1 ) , Create(__UpperCAmelCase , run_time=1 ) ) lowerCAmelCase__ : str = [] for i, rect in enumerate(__UpperCAmelCase ): lowerCAmelCase__ : Dict = rect.copy() target.generate_target() target.target.move_to(disk_left_col_base[i] ).scale(0.5 ) animations.append(MoveToTarget(__UpperCAmelCase , run_time=1.5 ) ) self.play(*__UpperCAmelCase ) self.play(FadeOut(__UpperCAmelCase ) ) lowerCAmelCase__ : int = MarkupText(f"""Then, the checkpoint is removed from memory\nthrough garbage collection.""" , font_size=24 ) step_a.move_to([2, 2, 0] ) self.play(Write(__UpperCAmelCase , run_time=3 ) ) self.play( FadeOut(__UpperCAmelCase , __UpperCAmelCase , *__UpperCAmelCase , *__UpperCAmelCase ) , ) self.wait()
678
1
lowerCAmelCase_ = """ # Transformers installation ! pip install transformers datasets # To install from source instead of the last release, comment the command above and uncomment the following one. # ! pip install git+https://github.com/huggingface/transformers.git """ lowerCAmelCase_ = [{"""type""": """code""", """content""": INSTALL_CONTENT}] lowerCAmelCase_ = { """{processor_class}""": """FakeProcessorClass""", """{model_class}""": """FakeModelClass""", """{object_class}""": """FakeObjectClass""", }
678
import collections import os import re from pathlib import Path lowerCAmelCase_ = """src/transformers""" # Matches is_xxx_available() lowerCAmelCase_ = re.compile(R"""is\_([a-z_]*)_available()""") # Catches a one-line _import_struct = {xxx} lowerCAmelCase_ = re.compile(R"""^_import_structure\s+=\s+\{([^\}]+)\}""") # Catches a line with a key-values pattern: "bla": ["foo", "bar"] lowerCAmelCase_ = re.compile(R"""\s+\"\S*\":\s+\[([^\]]*)\]""") # Catches a line if not is_foo_available lowerCAmelCase_ = re.compile(R"""^\s*if\s+not\s+is\_[a-z_]*\_available\(\)""") # Catches a line _import_struct["bla"].append("foo") lowerCAmelCase_ = re.compile(R"""^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)""") # Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"] lowerCAmelCase_ = re.compile(R"""^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]""") # Catches a line with an object between quotes and a comma: "MyModel", lowerCAmelCase_ = re.compile(R"""^\s+\"([^\"]+)\",""") # Catches a line with objects between brackets only: ["foo", "bar"], lowerCAmelCase_ = re.compile(R"""^\s+\[([^\]]+)\]""") # Catches a line with from foo import bar, bla, boo lowerCAmelCase_ = re.compile(R"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""") # Catches a line with try: lowerCAmelCase_ = re.compile(R"""^\s*try:""") # Catches a line with else: lowerCAmelCase_ = re.compile(R"""^\s*else:""") def __lowerCAmelCase ( UpperCamelCase ) -> int: if _re_test_backend.search(UpperCamelCase ) is None: return None lowerCAmelCase__ : int = [b[0] for b in _re_backend.findall(UpperCamelCase )] backends.sort() return "_and_".join(UpperCamelCase ) def __lowerCAmelCase ( UpperCamelCase ) -> Any: with open(UpperCamelCase , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: lowerCAmelCase__ : Union[str, Any] = f.readlines() lowerCAmelCase__ : Tuple = 0 while line_index < len(UpperCamelCase ) and not lines[line_index].startswith('''_import_structure = {''' ): line_index += 1 # If this is a traditional init, just return. if line_index >= len(UpperCamelCase ): return None # First grab the objects without a specific backend in _import_structure lowerCAmelCase__ : List[str] = [] while not lines[line_index].startswith('''if TYPE_CHECKING''' ) and find_backend(lines[line_index] ) is None: lowerCAmelCase__ : str = lines[line_index] # If we have everything on a single line, let's deal with it. if _re_one_line_import_struct.search(UpperCamelCase ): lowerCAmelCase__ : str = _re_one_line_import_struct.search(UpperCamelCase ).groups()[0] lowerCAmelCase__ : Optional[Any] = re.findall(R'''\[([^\]]+)\]''' , UpperCamelCase ) for imp in imports: objects.extend([obj[1:-1] for obj in imp.split(''', ''' )] ) line_index += 1 continue lowerCAmelCase__ : Tuple = _re_import_struct_key_value.search(UpperCamelCase ) if single_line_import_search is not None: lowerCAmelCase__ : Optional[Any] = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(''', ''' ) if len(UpperCamelCase ) > 0] objects.extend(UpperCamelCase ) elif line.startswith(''' ''' * 8 + '''"''' ): objects.append(line[9:-3] ) line_index += 1 lowerCAmelCase__ : Any = {'''none''': objects} # Let's continue with backend-specific objects in _import_structure while not lines[line_index].startswith('''if TYPE_CHECKING''' ): # If the line is an if not is_backend_available, we grab all objects associated. lowerCAmelCase__ : List[str] = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: lowerCAmelCase__ : Union[str, Any] = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 lowerCAmelCase__ : Optional[int] = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 4 ): lowerCAmelCase__ : str = lines[line_index] if _re_import_struct_add_one.search(UpperCamelCase ) is not None: objects.append(_re_import_struct_add_one.search(UpperCamelCase ).groups()[0] ) elif _re_import_struct_add_many.search(UpperCamelCase ) is not None: lowerCAmelCase__ : Optional[int] = _re_import_struct_add_many.search(UpperCamelCase ).groups()[0].split(''', ''' ) lowerCAmelCase__ : List[Any] = [obj[1:-1] for obj in imports if len(UpperCamelCase ) > 0] objects.extend(UpperCamelCase ) elif _re_between_brackets.search(UpperCamelCase ) is not None: lowerCAmelCase__ : List[Any] = _re_between_brackets.search(UpperCamelCase ).groups()[0].split(''', ''' ) lowerCAmelCase__ : Optional[int] = [obj[1:-1] for obj in imports if len(UpperCamelCase ) > 0] objects.extend(UpperCamelCase ) elif _re_quote_object.search(UpperCamelCase ) is not None: objects.append(_re_quote_object.search(UpperCamelCase ).groups()[0] ) elif line.startswith(''' ''' * 8 + '''"''' ): objects.append(line[9:-3] ) elif line.startswith(''' ''' * 12 + '''"''' ): objects.append(line[13:-3] ) line_index += 1 lowerCAmelCase__ : Optional[Any] = objects else: line_index += 1 # At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend lowerCAmelCase__ : Any = [] while ( line_index < len(UpperCamelCase ) and find_backend(lines[line_index] ) is None and not lines[line_index].startswith('''else''' ) ): lowerCAmelCase__ : Tuple = lines[line_index] lowerCAmelCase__ : List[Any] = _re_import.search(UpperCamelCase ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(''', ''' ) ) elif line.startswith(''' ''' * 8 ): objects.append(line[8:-2] ) line_index += 1 lowerCAmelCase__ : Dict = {'''none''': objects} # Let's continue with backend-specific objects while line_index < len(UpperCamelCase ): # If the line is an if is_backend_available, we grab all objects associated. lowerCAmelCase__ : str = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: lowerCAmelCase__ : Tuple = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 lowerCAmelCase__ : str = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 8 ): lowerCAmelCase__ : Any = lines[line_index] lowerCAmelCase__ : Union[str, Any] = _re_import.search(UpperCamelCase ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(''', ''' ) ) elif line.startswith(''' ''' * 12 ): objects.append(line[12:-2] ) line_index += 1 lowerCAmelCase__ : str = objects else: line_index += 1 return import_dict_objects, type_hint_objects def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> List[Any]: def find_duplicates(UpperCamelCase ): return [k for k, v in collections.Counter(UpperCamelCase ).items() if v > 1] if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ): return ["Both sides of the init do not have the same backends!"] lowerCAmelCase__ : Optional[Any] = [] for key in import_dict_objects.keys(): lowerCAmelCase__ : str = find_duplicates(import_dict_objects[key] ) if duplicate_imports: errors.append(F"""Duplicate _import_structure definitions for: {duplicate_imports}""" ) lowerCAmelCase__ : int = find_duplicates(type_hint_objects[key] ) if duplicate_type_hints: errors.append(F"""Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}""" ) if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ): lowerCAmelCase__ : List[Any] = '''base imports''' if key == '''none''' else F"""{key} backend""" errors.append(F"""Differences for {name}:""" ) for a in type_hint_objects[key]: if a not in import_dict_objects[key]: errors.append(F""" {a} in TYPE_HINT but not in _import_structure.""" ) for a in import_dict_objects[key]: if a not in type_hint_objects[key]: errors.append(F""" {a} in _import_structure but not in TYPE_HINT.""" ) return errors def __lowerCAmelCase ( ) -> Optional[Any]: lowerCAmelCase__ : Dict = [] for root, _, files in os.walk(UpperCamelCase ): if "__init__.py" in files: lowerCAmelCase__ : Any = os.path.join(UpperCamelCase , '''__init__.py''' ) lowerCAmelCase__ : List[Any] = parse_init(UpperCamelCase ) if objects is not None: lowerCAmelCase__ : Optional[int] = analyze_results(*UpperCamelCase ) if len(UpperCamelCase ) > 0: lowerCAmelCase__ : Tuple = F"""Problem in {fname}, both halves do not define the same objects.\n{errors[0]}""" failures.append('''\n'''.join(UpperCamelCase ) ) if len(UpperCamelCase ) > 0: raise ValueError('''\n\n'''.join(UpperCamelCase ) ) def __lowerCAmelCase ( ) -> Tuple: lowerCAmelCase__ : str = [] for path, directories, files in os.walk(UpperCamelCase ): for folder in directories: # Ignore private modules if folder.startswith('''_''' ): directories.remove(UpperCamelCase ) continue # Ignore leftovers from branches (empty folders apart from pycache) if len(list((Path(UpperCamelCase ) / folder).glob('''*.py''' ) ) ) == 0: continue lowerCAmelCase__ : Tuple = str((Path(UpperCamelCase ) / folder).relative_to(UpperCamelCase ) ) lowerCAmelCase__ : Dict = short_path.replace(os.path.sep , '''.''' ) submodules.append(UpperCamelCase ) for fname in files: if fname == "__init__.py": continue lowerCAmelCase__ : Union[str, Any] = str((Path(UpperCamelCase ) / fname).relative_to(UpperCamelCase ) ) lowerCAmelCase__ : Tuple = short_path.replace('''.py''' , '''''' ).replace(os.path.sep , '''.''' ) if len(submodule.split('''.''' ) ) == 1: submodules.append(UpperCamelCase ) return submodules lowerCAmelCase_ = [ """convert_pytorch_checkpoint_to_tf2""", """modeling_flax_pytorch_utils""", """models.esm.openfold_utils""", ] def __lowerCAmelCase ( ) -> List[str]: # This is to make sure the transformers module imported is the one in the repo. from transformers.utils import direct_transformers_import lowerCAmelCase__ : Dict = direct_transformers_import(UpperCamelCase ) lowerCAmelCase__ : int = set(transformers._import_structure.keys() ) # This contains all the base keys of the _import_structure object defined in the init, but if the user is missing # some optional dependencies, they may not have all of them. Thus we read the init to read all additions and # (potentiall re-) add them. with open(os.path.join(UpperCamelCase , '''__init__.py''' ) , '''r''' ) as f: lowerCAmelCase__ : str = f.read() import_structure_keys.update(set(re.findall(R'''import_structure\[\"([^\"]*)\"\]''' , UpperCamelCase ) ) ) lowerCAmelCase__ : Optional[int] = [ module for module in get_transformers_submodules() if module not in IGNORE_SUBMODULES and module not in import_structure_keys ] if len(UpperCamelCase ) > 0: lowerCAmelCase__ : List[Any] = '''\n'''.join(F"""- {module}""" for module in module_not_registered ) raise ValueError( '''The following submodules are not properly registed in the main init of Transformers:\n''' F"""{list_of_modules}\n""" '''Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.''' ) if __name__ == "__main__": check_all_inits() check_submodules()
678
1
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, convert_to_rgb, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging lowerCAmelCase_ = logging.get_logger(__name__) if is_vision_available(): import PIL class _lowerCAmelCase ( _lowercase ): A__ = ['pixel_values'] def __init__( self , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = PILImageResampling.BICUBIC , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = True , __UpperCAmelCase = 1 / 255 , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = True , **__UpperCAmelCase , ): super().__init__(**__UpperCAmelCase ) lowerCAmelCase__ : Dict = size if size is not None else {'''shortest_edge''': 224} lowerCAmelCase__ : Tuple = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase ) lowerCAmelCase__ : int = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224} lowerCAmelCase__ : List[str] = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase , param_name='''crop_size''' ) lowerCAmelCase__ : Union[str, Any] = do_resize lowerCAmelCase__ : Tuple = size lowerCAmelCase__ : Dict = resample lowerCAmelCase__ : Tuple = do_center_crop lowerCAmelCase__ : List[Any] = crop_size lowerCAmelCase__ : Optional[Any] = do_rescale lowerCAmelCase__ : str = rescale_factor lowerCAmelCase__ : Tuple = do_normalize lowerCAmelCase__ : str = image_mean if image_mean is not None else OPENAI_CLIP_MEAN lowerCAmelCase__ : int = image_std if image_std is not None else OPENAI_CLIP_STD lowerCAmelCase__ : Union[str, Any] = do_convert_rgb def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = PILImageResampling.BICUBIC , __UpperCAmelCase = None , **__UpperCAmelCase , ): lowerCAmelCase__ : Dict = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase ) if "shortest_edge" not in size: raise ValueError(f"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" ) lowerCAmelCase__ : Union[str, Any] = get_resize_output_image_size(__UpperCAmelCase , size=size['''shortest_edge'''] , default_to_square=__UpperCAmelCase ) return resize(__UpperCAmelCase , size=__UpperCAmelCase , resample=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase ) def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ): lowerCAmelCase__ : Optional[Any] = get_size_dict(__UpperCAmelCase ) if "height" not in size or "width" not in size: raise ValueError(f"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""" ) return center_crop(__UpperCAmelCase , size=(size['''height'''], size['''width''']) , data_format=__UpperCAmelCase , **__UpperCAmelCase ) def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ): return rescale(__UpperCAmelCase , scale=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase ) def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ): return normalize(__UpperCAmelCase , mean=__UpperCAmelCase , std=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase ) def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = ChannelDimension.FIRST , **__UpperCAmelCase , ): lowerCAmelCase__ : int = do_resize if do_resize is not None else self.do_resize lowerCAmelCase__ : Union[str, Any] = size if size is not None else self.size lowerCAmelCase__ : str = get_size_dict(__UpperCAmelCase , param_name='''size''' , default_to_square=__UpperCAmelCase ) lowerCAmelCase__ : List[Any] = resample if resample is not None else self.resample lowerCAmelCase__ : List[str] = do_center_crop if do_center_crop is not None else self.do_center_crop lowerCAmelCase__ : Union[str, Any] = crop_size if crop_size is not None else self.crop_size lowerCAmelCase__ : Dict = get_size_dict(__UpperCAmelCase , param_name='''crop_size''' , default_to_square=__UpperCAmelCase ) lowerCAmelCase__ : Any = do_rescale if do_rescale is not None else self.do_rescale lowerCAmelCase__ : Dict = rescale_factor if rescale_factor is not None else self.rescale_factor lowerCAmelCase__ : Dict = do_normalize if do_normalize is not None else self.do_normalize lowerCAmelCase__ : List[Any] = image_mean if image_mean is not None else self.image_mean lowerCAmelCase__ : int = image_std if image_std is not None else self.image_std lowerCAmelCase__ : List[str] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb lowerCAmelCase__ : Any = make_list_of_images(__UpperCAmelCase ) if not valid_images(__UpperCAmelCase ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None: raise ValueError('''Size must be specified if do_resize is True.''' ) if do_center_crop and crop_size is None: raise ValueError('''Crop size must be specified if do_center_crop is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # PIL RGBA images are converted to RGB if do_convert_rgb: lowerCAmelCase__ : List[Any] = [convert_to_rgb(__UpperCAmelCase ) for image in images] # All transformations expect numpy arrays. lowerCAmelCase__ : Optional[Any] = [to_numpy_array(__UpperCAmelCase ) for image in images] if do_resize: lowerCAmelCase__ : str = [self.resize(image=__UpperCAmelCase , size=__UpperCAmelCase , resample=__UpperCAmelCase ) for image in images] if do_center_crop: lowerCAmelCase__ : Tuple = [self.center_crop(image=__UpperCAmelCase , size=__UpperCAmelCase ) for image in images] if do_rescale: lowerCAmelCase__ : int = [self.rescale(image=__UpperCAmelCase , scale=__UpperCAmelCase ) for image in images] if do_normalize: lowerCAmelCase__ : List[Any] = [self.normalize(image=__UpperCAmelCase , mean=__UpperCAmelCase , std=__UpperCAmelCase ) for image in images] lowerCAmelCase__ : Any = [to_channel_dimension_format(__UpperCAmelCase , __UpperCAmelCase ) for image in images] lowerCAmelCase__ : Tuple = {'''pixel_values''': images} return BatchFeature(data=__UpperCAmelCase , tensor_type=__UpperCAmelCase )
678
import math import os import unittest from transformers import MegatronBertConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, MegatronBertForCausalLM, MegatronBertForMaskedLM, MegatronBertForMultipleChoice, MegatronBertForNextSentencePrediction, MegatronBertForPreTraining, MegatronBertForQuestionAnswering, MegatronBertForSequenceClassification, MegatronBertForTokenClassification, MegatronBertModel, ) class _lowerCAmelCase : def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=64 , __UpperCAmelCase=32 , __UpperCAmelCase=5 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=512 , __UpperCAmelCase=16 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=3 , __UpperCAmelCase=4 , __UpperCAmelCase=None , ): lowerCAmelCase__ : Optional[int] = parent lowerCAmelCase__ : Tuple = batch_size lowerCAmelCase__ : Union[str, Any] = seq_length lowerCAmelCase__ : str = is_training lowerCAmelCase__ : Union[str, Any] = use_input_mask lowerCAmelCase__ : List[Any] = use_token_type_ids lowerCAmelCase__ : int = use_labels lowerCAmelCase__ : List[Any] = vocab_size lowerCAmelCase__ : Optional[int] = hidden_size lowerCAmelCase__ : List[str] = embedding_size lowerCAmelCase__ : Optional[int] = num_hidden_layers lowerCAmelCase__ : Optional[int] = num_attention_heads lowerCAmelCase__ : List[str] = intermediate_size lowerCAmelCase__ : Tuple = hidden_act lowerCAmelCase__ : Union[str, Any] = hidden_dropout_prob lowerCAmelCase__ : Optional[int] = attention_probs_dropout_prob lowerCAmelCase__ : Union[str, Any] = max_position_embeddings lowerCAmelCase__ : List[Any] = type_vocab_size lowerCAmelCase__ : Optional[Any] = type_sequence_label_size lowerCAmelCase__ : List[Any] = initializer_range lowerCAmelCase__ : Optional[Any] = num_labels lowerCAmelCase__ : List[str] = num_choices lowerCAmelCase__ : Any = scope def __magic_name__( self ): lowerCAmelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCAmelCase__ : str = None if self.use_input_mask: lowerCAmelCase__ : List[str] = random_attention_mask([self.batch_size, self.seq_length] ) lowerCAmelCase__ : Optional[Any] = None if self.use_token_type_ids: lowerCAmelCase__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowerCAmelCase__ : Dict = None lowerCAmelCase__ : Dict = None lowerCAmelCase__ : Optional[int] = None if self.use_labels: lowerCAmelCase__ : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCAmelCase__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowerCAmelCase__ : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices ) lowerCAmelCase__ : Union[str, Any] = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __magic_name__( self ): return MegatronBertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , ) def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): lowerCAmelCase__ : Union[str, Any] = MegatronBertModel(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() lowerCAmelCase__ : Union[str, Any] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase ) lowerCAmelCase__ : Union[str, Any] = model(__UpperCAmelCase , token_type_ids=__UpperCAmelCase ) lowerCAmelCase__ : Optional[Any] = model(__UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): lowerCAmelCase__ : List[Any] = MegatronBertForMaskedLM(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() lowerCAmelCase__ : Tuple = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): lowerCAmelCase__ : Optional[Any] = MegatronBertForCausalLM(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() lowerCAmelCase__ : Union[str, Any] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): lowerCAmelCase__ : str = MegatronBertForNextSentencePrediction(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() lowerCAmelCase__ : Dict = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) ) def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): lowerCAmelCase__ : str = MegatronBertForPreTraining(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() lowerCAmelCase__ : int = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase , next_sentence_label=__UpperCAmelCase , ) self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) ) def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): lowerCAmelCase__ : str = MegatronBertForQuestionAnswering(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() lowerCAmelCase__ : Union[str, Any] = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , start_positions=__UpperCAmelCase , end_positions=__UpperCAmelCase , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): lowerCAmelCase__ : Optional[Any] = self.num_labels lowerCAmelCase__ : Union[str, Any] = MegatronBertForSequenceClassification(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() lowerCAmelCase__ : Dict = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): lowerCAmelCase__ : Optional[Any] = self.num_labels lowerCAmelCase__ : str = MegatronBertForTokenClassification(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() lowerCAmelCase__ : Dict = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): lowerCAmelCase__ : Optional[Any] = self.num_choices lowerCAmelCase__ : Dict = MegatronBertForMultipleChoice(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() lowerCAmelCase__ : Union[str, Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowerCAmelCase__ : int = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowerCAmelCase__ : List[str] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowerCAmelCase__ : Any = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def __magic_name__( self ): lowerCAmelCase__ : Union[str, Any] = self.prepare_config_and_inputs() ( ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ) : Optional[int] = config_and_inputs lowerCAmelCase__ : int = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class _lowerCAmelCase ( _lowercase , _lowercase , unittest.TestCase ): A__ = ( ( MegatronBertModel, MegatronBertForMaskedLM, MegatronBertForCausalLM, MegatronBertForMultipleChoice, MegatronBertForNextSentencePrediction, MegatronBertForPreTraining, MegatronBertForQuestionAnswering, MegatronBertForSequenceClassification, MegatronBertForTokenClassification, ) if is_torch_available() else () ) A__ = ( { 'feature-extraction': MegatronBertModel, 'fill-mask': MegatronBertForMaskedLM, 'question-answering': MegatronBertForQuestionAnswering, 'text-classification': MegatronBertForSequenceClassification, 'text-generation': MegatronBertForCausalLM, 'token-classification': MegatronBertForTokenClassification, 'zero-shot': MegatronBertForSequenceClassification, } if is_torch_available() else {} ) A__ = True # test_resize_embeddings = False A__ = False def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False ): lowerCAmelCase__ : List[Any] = super()._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase ) if return_labels: if model_class in get_values(__UpperCAmelCase ): lowerCAmelCase__ : Optional[int] = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__UpperCAmelCase ) lowerCAmelCase__ : Optional[Any] = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__UpperCAmelCase ) return inputs_dict def __magic_name__( self ): lowerCAmelCase__ : str = MegatronBertModelTester(self ) lowerCAmelCase__ : Dict = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37 ) def __magic_name__( self ): self.config_tester.run_common_tests() def __magic_name__( self ): lowerCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_model(*__UpperCAmelCase ) def __magic_name__( self ): lowerCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_masked_lm(*__UpperCAmelCase ) def __magic_name__( self ): lowerCAmelCase__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*__UpperCAmelCase ) def __magic_name__( self ): lowerCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*__UpperCAmelCase ) def __magic_name__( self ): lowerCAmelCase__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_pretraining(*__UpperCAmelCase ) def __magic_name__( self ): lowerCAmelCase__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_question_answering(*__UpperCAmelCase ) def __magic_name__( self ): lowerCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*__UpperCAmelCase ) def __magic_name__( self ): lowerCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_token_classification(*__UpperCAmelCase ) def __lowerCAmelCase ( UpperCamelCase ) -> Optional[int]: return torch.tensor( UpperCamelCase , dtype=torch.long , device=UpperCamelCase , ) lowerCAmelCase_ = 1e-4 @require_torch @require_sentencepiece @require_tokenizers class _lowerCAmelCase ( unittest.TestCase ): @slow @unittest.skip('''Model is not available.''' ) def __magic_name__( self ): lowerCAmelCase__ : int = '''nvidia/megatron-bert-uncased-345m''' if "MYDIR" in os.environ: lowerCAmelCase__ : Union[str, Any] = os.path.join(os.environ['''MYDIR'''] , __UpperCAmelCase ) lowerCAmelCase__ : Tuple = MegatronBertModel.from_pretrained(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.half() lowerCAmelCase__ : Optional[int] = _long_tensor([[101, 7110, 1005, 1056, 2023, 1_1333, 1_7413, 1029, 102]] ) with torch.no_grad(): lowerCAmelCase__ : List[Any] = model(__UpperCAmelCase )[0] lowerCAmelCase__ : List[Any] = torch.Size((1, 9, 1024) ) self.assertEqual(output.shape , __UpperCAmelCase ) lowerCAmelCase__ : Union[str, Any] = [-0.6040, -0.2517, -0.1025, 0.3420, -0.6758, -0.0017, -0.1089, -0.1990, 0.5728] for ii in range(3 ): for jj in range(3 ): lowerCAmelCase__ : Union[str, Any] = output[0, ii, jj] lowerCAmelCase__ : Optional[Any] = expected[3 * ii + jj] lowerCAmelCase__ : List[str] = '''ii={} jj={} a={} b={}'''.format(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) self.assertTrue(math.isclose(__UpperCAmelCase , __UpperCAmelCase , rel_tol=__UpperCAmelCase , abs_tol=__UpperCAmelCase ) , msg=__UpperCAmelCase )
678
1
import unittest import numpy as np from transformers.testing_utils import is_flaky, require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DonutImageProcessor class _lowerCAmelCase ( unittest.TestCase ): def __init__( self , __UpperCAmelCase , __UpperCAmelCase=7 , __UpperCAmelCase=3 , __UpperCAmelCase=18 , __UpperCAmelCase=30 , __UpperCAmelCase=400 , __UpperCAmelCase=True , __UpperCAmelCase=None , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=[0.5, 0.5, 0.5] , __UpperCAmelCase=[0.5, 0.5, 0.5] , ): lowerCAmelCase__ : str = parent lowerCAmelCase__ : Optional[int] = batch_size lowerCAmelCase__ : Tuple = num_channels lowerCAmelCase__ : int = image_size lowerCAmelCase__ : Union[str, Any] = min_resolution lowerCAmelCase__ : Optional[int] = max_resolution lowerCAmelCase__ : List[Any] = do_resize lowerCAmelCase__ : str = size if size is not None else {'''height''': 18, '''width''': 20} lowerCAmelCase__ : str = do_thumbnail lowerCAmelCase__ : Dict = do_align_axis lowerCAmelCase__ : Any = do_pad lowerCAmelCase__ : Any = do_normalize lowerCAmelCase__ : Any = image_mean lowerCAmelCase__ : int = image_std def __magic_name__( self ): return { "do_resize": self.do_resize, "size": self.size, "do_thumbnail": self.do_thumbnail, "do_align_long_axis": self.do_align_axis, "do_pad": self.do_pad, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, } @require_torch @require_vision class _lowerCAmelCase ( _lowercase , unittest.TestCase ): A__ = DonutImageProcessor if is_vision_available() else None def __magic_name__( self ): lowerCAmelCase__ : Tuple = DonutImageProcessingTester(self ) @property def __magic_name__( self ): return self.image_processor_tester.prepare_image_processor_dict() def __magic_name__( self ): lowerCAmelCase__ : Any = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__UpperCAmelCase , '''do_resize''' ) ) self.assertTrue(hasattr(__UpperCAmelCase , '''size''' ) ) self.assertTrue(hasattr(__UpperCAmelCase , '''do_thumbnail''' ) ) self.assertTrue(hasattr(__UpperCAmelCase , '''do_align_long_axis''' ) ) self.assertTrue(hasattr(__UpperCAmelCase , '''do_pad''' ) ) self.assertTrue(hasattr(__UpperCAmelCase , '''do_normalize''' ) ) self.assertTrue(hasattr(__UpperCAmelCase , '''image_mean''' ) ) self.assertTrue(hasattr(__UpperCAmelCase , '''image_std''' ) ) def __magic_name__( self ): lowerCAmelCase__ : Any = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 20} ) lowerCAmelCase__ : int = self.image_processing_class.from_dict(self.image_processor_dict , size=42 ) self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} ) # Previous config had dimensions in (width, height) order lowerCAmelCase__ : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) ) self.assertEqual(image_processor.size , {'''height''': 84, '''width''': 42} ) def __magic_name__( self ): pass @is_flaky() def __magic_name__( self ): # Initialize image_processing lowerCAmelCase__ : Tuple = self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowerCAmelCase__ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase ) for image in image_inputs: self.assertIsInstance(__UpperCAmelCase , Image.Image ) # Test not batched input lowerCAmelCase__ : str = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , ) # Test batched lowerCAmelCase__ : Optional[Any] = image_processing(__UpperCAmelCase , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , ) @is_flaky() def __magic_name__( self ): # Initialize image_processing lowerCAmelCase__ : str = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors lowerCAmelCase__ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase , numpify=__UpperCAmelCase ) for image in image_inputs: self.assertIsInstance(__UpperCAmelCase , np.ndarray ) # Test not batched input lowerCAmelCase__ : Any = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , ) # Test batched lowerCAmelCase__ : Optional[int] = image_processing(__UpperCAmelCase , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , ) @is_flaky() def __magic_name__( self ): # Initialize image_processing lowerCAmelCase__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors lowerCAmelCase__ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase , torchify=__UpperCAmelCase ) for image in image_inputs: self.assertIsInstance(__UpperCAmelCase , torch.Tensor ) # Test not batched input lowerCAmelCase__ : Any = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , ) # Test batched lowerCAmelCase__ : Dict = image_processing(__UpperCAmelCase , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , )
678
import warnings from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast from ...onnx.utils import compute_effective_axis_dimension from ...utils import TensorType, is_torch_available, logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = { """facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/config.json""", # See all BART models at https://huggingface.co/models?filter=bart } class _lowerCAmelCase ( _lowercase ): A__ = 'bart' A__ = ['past_key_values'] A__ = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'} def __init__( self , __UpperCAmelCase=5_0265 , __UpperCAmelCase=1024 , __UpperCAmelCase=12 , __UpperCAmelCase=4096 , __UpperCAmelCase=16 , __UpperCAmelCase=12 , __UpperCAmelCase=4096 , __UpperCAmelCase=16 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase="gelu" , __UpperCAmelCase=1024 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.02 , __UpperCAmelCase=0.0 , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=3 , __UpperCAmelCase=1 , __UpperCAmelCase=0 , __UpperCAmelCase=2 , __UpperCAmelCase=True , __UpperCAmelCase=2 , __UpperCAmelCase=2 , **__UpperCAmelCase , ): lowerCAmelCase__ : Union[str, Any] = vocab_size lowerCAmelCase__ : Optional[Any] = max_position_embeddings lowerCAmelCase__ : int = d_model lowerCAmelCase__ : str = encoder_ffn_dim lowerCAmelCase__ : Any = encoder_layers lowerCAmelCase__ : Dict = encoder_attention_heads lowerCAmelCase__ : Union[str, Any] = decoder_ffn_dim lowerCAmelCase__ : Union[str, Any] = decoder_layers lowerCAmelCase__ : Any = decoder_attention_heads lowerCAmelCase__ : Tuple = dropout lowerCAmelCase__ : Any = attention_dropout lowerCAmelCase__ : Any = activation_dropout lowerCAmelCase__ : Optional[Any] = activation_function lowerCAmelCase__ : Union[str, Any] = init_std lowerCAmelCase__ : Union[str, Any] = encoder_layerdrop lowerCAmelCase__ : int = decoder_layerdrop lowerCAmelCase__ : Optional[int] = classifier_dropout lowerCAmelCase__ : str = use_cache lowerCAmelCase__ : int = encoder_layers lowerCAmelCase__ : List[str] = scale_embedding # scale factor will be sqrt(d_model) if True super().__init__( num_labels=__UpperCAmelCase , pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , is_encoder_decoder=__UpperCAmelCase , decoder_start_token_id=__UpperCAmelCase , forced_eos_token_id=__UpperCAmelCase , **__UpperCAmelCase , ) # ensure backward compatibility for BART CNN models if self.forced_bos_token_id is None and kwargs.get('''force_bos_token_to_be_generated''' , __UpperCAmelCase ): lowerCAmelCase__ : str = self.bos_token_id warnings.warn( f"""Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. """ '''The config can simply be saved and uploaded again to be fixed.''' ) class _lowerCAmelCase ( _lowercase ): @property def __magic_name__( self ): if self.task in ["default", "seq2seq-lm"]: lowerCAmelCase__ : Tuple = OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}), ] ) if self.use_past: lowerCAmelCase__ : Any = {0: '''batch'''} lowerCAmelCase__ : Tuple = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''} else: lowerCAmelCase__ : Optional[int] = {0: '''batch''', 1: '''decoder_sequence'''} lowerCAmelCase__ : Optional[Any] = {0: '''batch''', 1: '''decoder_sequence'''} if self.use_past: self.fill_with_past_key_values_(__UpperCAmelCase , direction='''inputs''' ) elif self.task == "causal-lm": # TODO: figure this case out. lowerCAmelCase__ : List[str] = OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}), ] ) if self.use_past: lowerCAmelCase__ , lowerCAmelCase__ : Tuple = self.num_layers for i in range(__UpperCAmelCase ): lowerCAmelCase__ : int = {0: '''batch''', 2: '''past_sequence + sequence'''} lowerCAmelCase__ : Union[str, Any] = {0: '''batch''', 2: '''past_sequence + sequence'''} else: lowerCAmelCase__ : List[str] = OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}), ('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}), ] ) return common_inputs @property def __magic_name__( self ): if self.task in ["default", "seq2seq-lm"]: lowerCAmelCase__ : Dict = super().outputs else: lowerCAmelCase__ : Any = super(__UpperCAmelCase , self ).outputs if self.use_past: lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = self.num_layers for i in range(__UpperCAmelCase ): lowerCAmelCase__ : List[Any] = {0: '''batch''', 2: '''past_sequence + sequence'''} lowerCAmelCase__ : Optional[Any] = {0: '''batch''', 2: '''past_sequence + sequence'''} return common_outputs def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase = -1 , __UpperCAmelCase = -1 , __UpperCAmelCase = False , __UpperCAmelCase = None , ): lowerCAmelCase__ : int = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) # Generate decoder inputs lowerCAmelCase__ : int = seq_length if not self.use_past else 1 lowerCAmelCase__ : Dict = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : Optional[int] = {f"""decoder_{name}""": tensor for name, tensor in decoder_inputs.items()} lowerCAmelCase__ : str = dict(**__UpperCAmelCase , **__UpperCAmelCase ) if self.use_past: if not is_torch_available(): raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' ) else: import torch lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = common_inputs['''input_ids'''].shape lowerCAmelCase__ : List[str] = common_inputs['''decoder_input_ids'''].shape[1] lowerCAmelCase__ , lowerCAmelCase__ : Dict = self.num_attention_heads lowerCAmelCase__ : Any = ( batch, num_encoder_attention_heads, encoder_seq_length, self._config.hidden_size // num_encoder_attention_heads, ) lowerCAmelCase__ : List[Any] = decoder_seq_length + 3 lowerCAmelCase__ : Any = ( batch, num_decoder_attention_heads, decoder_past_length, self._config.hidden_size // num_decoder_attention_heads, ) lowerCAmelCase__ : Any = torch.cat( [common_inputs['''decoder_attention_mask'''], torch.ones(__UpperCAmelCase , __UpperCAmelCase )] , dim=1 ) lowerCAmelCase__ : List[str] = [] # If the number of encoder and decoder layers are present in the model configuration, both are considered lowerCAmelCase__ , lowerCAmelCase__ : Any = self.num_layers lowerCAmelCase__ : Union[str, Any] = min(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : Any = max(__UpperCAmelCase , __UpperCAmelCase ) - min_num_layers lowerCAmelCase__ : Dict = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder''' for _ in range(__UpperCAmelCase ): common_inputs["past_key_values"].append( ( torch.zeros(__UpperCAmelCase ), torch.zeros(__UpperCAmelCase ), torch.zeros(__UpperCAmelCase ), torch.zeros(__UpperCAmelCase ), ) ) # TODO: test this. lowerCAmelCase__ : Any = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape for _ in range(__UpperCAmelCase , __UpperCAmelCase ): common_inputs["past_key_values"].append((torch.zeros(__UpperCAmelCase ), torch.zeros(__UpperCAmelCase )) ) return common_inputs def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase = -1 , __UpperCAmelCase = -1 , __UpperCAmelCase = False , __UpperCAmelCase = None , ): lowerCAmelCase__ : Optional[int] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) if self.use_past: if not is_torch_available(): raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' ) else: import torch lowerCAmelCase__ , lowerCAmelCase__ : Any = common_inputs['''input_ids'''].shape # Not using the same length for past_key_values lowerCAmelCase__ : List[str] = seqlen + 2 lowerCAmelCase__ , lowerCAmelCase__ : int = self.num_layers lowerCAmelCase__ , lowerCAmelCase__ : Tuple = self.num_attention_heads lowerCAmelCase__ : Optional[Any] = ( batch, num_encoder_attention_heads, past_key_values_length, self._config.hidden_size // num_encoder_attention_heads, ) lowerCAmelCase__ : Optional[Any] = common_inputs['''attention_mask'''].dtype lowerCAmelCase__ : List[Any] = torch.cat( [common_inputs['''attention_mask'''], torch.ones(__UpperCAmelCase , __UpperCAmelCase , dtype=__UpperCAmelCase )] , dim=1 ) lowerCAmelCase__ : List[Any] = [ (torch.zeros(__UpperCAmelCase ), torch.zeros(__UpperCAmelCase )) for _ in range(__UpperCAmelCase ) ] return common_inputs def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase = -1 , __UpperCAmelCase = -1 , __UpperCAmelCase = False , __UpperCAmelCase = None , ): # Copied from OnnxConfig.generate_dummy_inputs # Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity. # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX lowerCAmelCase__ : Tuple = compute_effective_axis_dimension( __UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX lowerCAmelCase__ : Optional[Any] = tokenizer.num_special_tokens_to_add(__UpperCAmelCase ) lowerCAmelCase__ : int = compute_effective_axis_dimension( __UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__UpperCAmelCase ) # Generate dummy inputs according to compute batch and sequence lowerCAmelCase__ : Optional[int] = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size lowerCAmelCase__ : str = dict(tokenizer(__UpperCAmelCase , return_tensors=__UpperCAmelCase ) ) return common_inputs def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase = -1 , __UpperCAmelCase = -1 , __UpperCAmelCase = False , __UpperCAmelCase = None , ): if self.task in ["default", "seq2seq-lm"]: lowerCAmelCase__ : Union[str, Any] = self._generate_dummy_inputs_for_default_and_seqaseq_lm( __UpperCAmelCase , batch_size=__UpperCAmelCase , seq_length=__UpperCAmelCase , is_pair=__UpperCAmelCase , framework=__UpperCAmelCase ) elif self.task == "causal-lm": lowerCAmelCase__ : List[str] = self._generate_dummy_inputs_for_causal_lm( __UpperCAmelCase , batch_size=__UpperCAmelCase , seq_length=__UpperCAmelCase , is_pair=__UpperCAmelCase , framework=__UpperCAmelCase ) else: lowerCAmelCase__ : Optional[Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( __UpperCAmelCase , batch_size=__UpperCAmelCase , seq_length=__UpperCAmelCase , is_pair=__UpperCAmelCase , framework=__UpperCAmelCase ) return common_inputs def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): if self.task in ["default", "seq2seq-lm"]: lowerCAmelCase__ : Any = super()._flatten_past_key_values_(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) else: lowerCAmelCase__ : Dict = super(__UpperCAmelCase , self )._flatten_past_key_values_( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
678
1
import argparse import re import requests import torch # git clone https://github.com/salesforce/BLIP.git from models.blip import blip_decoder from models.blip_itm import blip_itm from models.blip_vqa import blip_vqa from PIL import Image from torchvision import transforms from torchvision.transforms.functional import InterpolationMode from transformers import ( BertTokenizer, BlipConfig, BlipForConditionalGeneration, BlipForImageTextRetrieval, BlipForQuestionAnswering, ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> str: lowerCAmelCase__ : Dict = '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg''' lowerCAmelCase__ : Optional[Any] = Image.open(requests.get(UpperCamelCase , stream=UpperCamelCase ).raw ).convert('''RGB''' ) lowerCAmelCase__ : Union[str, Any] = transforms.Compose( [ transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC ), transforms.ToTensor(), transforms.Normalize((0.48145466, 0.4578275, 0.40821073) , (0.26862954, 0.26130258, 0.27577711) ), ] ) lowerCAmelCase__ : List[str] = transform(UpperCamelCase ).unsqueeze(0 ).to(UpperCamelCase ) return image def __lowerCAmelCase ( UpperCamelCase ) -> Optional[Any]: if "visual_encoder" in key: lowerCAmelCase__ : Optional[int] = re.sub('''visual_encoder*''' , '''vision_model.encoder''' , UpperCamelCase ) if "blocks" in key: lowerCAmelCase__ : List[str] = re.sub(R'''blocks''' , '''layers''' , UpperCamelCase ) if "attn" in key: lowerCAmelCase__ : Any = re.sub(R'''attn''' , '''self_attn''' , UpperCamelCase ) if "norm1" in key: lowerCAmelCase__ : Any = re.sub(R'''norm1''' , '''layer_norm1''' , UpperCamelCase ) if "norm2" in key: lowerCAmelCase__ : Optional[Any] = re.sub(R'''norm2''' , '''layer_norm2''' , UpperCamelCase ) if "encoder.norm" in key: lowerCAmelCase__ : int = re.sub(R'''encoder.norm''' , '''post_layernorm''' , UpperCamelCase ) if "encoder.patch_embed.proj" in key: lowerCAmelCase__ : Tuple = re.sub(R'''encoder.patch_embed.proj''' , '''embeddings.patch_embedding''' , UpperCamelCase ) if "encoder.pos_embed" in key: lowerCAmelCase__ : Optional[Any] = re.sub(R'''encoder.pos_embed''' , '''embeddings.position_embedding''' , UpperCamelCase ) if "encoder.cls_token" in key: lowerCAmelCase__ : Optional[int] = re.sub(R'''encoder.cls_token''' , '''embeddings.class_embedding''' , UpperCamelCase ) if "self_attn" in key: lowerCAmelCase__ : Optional[Any] = re.sub(R'''self_attn.proj''' , '''self_attn.projection''' , UpperCamelCase ) return key @torch.no_grad() def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase=None ) -> Optional[int]: if config_path is not None: lowerCAmelCase__ : Optional[int] = BlipConfig.from_pretrained(UpperCamelCase ) else: lowerCAmelCase__ : int = BlipConfig(projection_dim=512 , text_config={} , vision_config={} ) lowerCAmelCase__ : List[str] = BlipForConditionalGeneration(UpperCamelCase ).eval() lowerCAmelCase__ : Any = '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth''' lowerCAmelCase__ : Optional[int] = blip_decoder(pretrained=UpperCamelCase , image_size=384 , vit='''base''' ) lowerCAmelCase__ : Tuple = pt_model.eval() lowerCAmelCase__ : Optional[int] = pt_model.state_dict() for key in modified_state_dict.copy(): lowerCAmelCase__ : Any = modified_state_dict.pop(UpperCamelCase ) lowerCAmelCase__ : Optional[Any] = rename_key(UpperCamelCase ) lowerCAmelCase__ : List[str] = value hf_model.load_state_dict(UpperCamelCase ) lowerCAmelCase__ : Union[str, Any] = 384 lowerCAmelCase__ : Tuple = load_demo_image(image_size=UpperCamelCase , device='''cpu''' ) lowerCAmelCase__ : List[Any] = BertTokenizer.from_pretrained('''bert-base-uncased''' ) lowerCAmelCase__ : Dict = tokenizer(['''a picture of'''] ).input_ids lowerCAmelCase__ : int = hf_model.generate(UpperCamelCase , UpperCamelCase ) assert out[0].tolist() == [30522, 1037, 3861, 1997, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102] lowerCAmelCase__ : List[Any] = hf_model.generate(UpperCamelCase ) assert out[0].tolist() == [30522, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102] if pytorch_dump_folder_path is not None: hf_model.save_pretrained(UpperCamelCase ) # model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth' lowerCAmelCase__ : List[Any] = ( '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth''' ) lowerCAmelCase__ : Dict = blip_vqa(pretrained=UpperCamelCase , image_size=UpperCamelCase , vit='''base''' ) vqa_model.eval() lowerCAmelCase__ : Optional[Any] = vqa_model.state_dict() for key in modified_state_dict.copy(): lowerCAmelCase__ : List[str] = modified_state_dict.pop(UpperCamelCase ) lowerCAmelCase__ : int = rename_key(UpperCamelCase ) lowerCAmelCase__ : Dict = value lowerCAmelCase__ : Dict = BlipForQuestionAnswering(UpperCamelCase ) hf_vqa_model.load_state_dict(UpperCamelCase ) lowerCAmelCase__ : Union[str, Any] = ['''How many dogs are in this image?'''] lowerCAmelCase__ : Union[str, Any] = tokenizer(UpperCamelCase , return_tensors='''pt''' ).input_ids lowerCAmelCase__ : Any = hf_vqa_model.generate(UpperCamelCase , UpperCamelCase ) print(tokenizer.decode(answer[0] ) ) assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]" if pytorch_dump_folder_path is not None: hf_vqa_model.save_pretrained(pytorch_dump_folder_path + '''_vqa''' ) lowerCAmelCase__ : List[str] = '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth''' lowerCAmelCase__ : int = blip_itm(pretrained=UpperCamelCase , image_size=UpperCamelCase , vit='''base''' ) itm_model.eval() lowerCAmelCase__ : int = itm_model.state_dict() for key in modified_state_dict.copy(): lowerCAmelCase__ : Dict = modified_state_dict.pop(UpperCamelCase ) lowerCAmelCase__ : int = rename_key(UpperCamelCase ) lowerCAmelCase__ : Dict = value lowerCAmelCase__ : List[str] = BlipForImageTextRetrieval(UpperCamelCase ) lowerCAmelCase__ : str = ['''A picture of a woman with a dog sitting in a beach'''] lowerCAmelCase__ : Dict = tokenizer( UpperCamelCase , return_tensors='''pt''' , padding='''max_length''' , truncation=UpperCamelCase , max_length=35 , ).input_ids hf_itm_model.load_state_dict(UpperCamelCase ) hf_itm_model.eval() lowerCAmelCase__ : Optional[int] = hf_itm_model(UpperCamelCase , UpperCamelCase , use_itm_head=UpperCamelCase ) lowerCAmelCase__ : Tuple = hf_itm_model(UpperCamelCase , UpperCamelCase , use_itm_head=UpperCamelCase ) assert out[0].item() == 0.2110687494277954 assert torch.nn.functional.softmax(out_itm[0] , dim=1 )[:, 1].item() == 0.45698845386505127 if pytorch_dump_folder_path is not None: hf_itm_model.save_pretrained(pytorch_dump_folder_path + '''_itm''' ) if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") lowerCAmelCase_ = parser.parse_args() convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
678
import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = { """asapp/sew-d-tiny-100k""": """https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json""", # See all SEW-D models at https://huggingface.co/models?filter=sew-d } class _lowerCAmelCase ( _lowercase ): A__ = 'sew-d' def __init__( self , __UpperCAmelCase=32 , __UpperCAmelCase=768 , __UpperCAmelCase=12 , __UpperCAmelCase=12 , __UpperCAmelCase=3072 , __UpperCAmelCase=2 , __UpperCAmelCase=512 , __UpperCAmelCase=256 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=("p2c", "c2p") , __UpperCAmelCase="layer_norm" , __UpperCAmelCase="gelu_python" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1e-7 , __UpperCAmelCase=1e-5 , __UpperCAmelCase="group" , __UpperCAmelCase="gelu" , __UpperCAmelCase=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , __UpperCAmelCase=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , __UpperCAmelCase=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , __UpperCAmelCase=False , __UpperCAmelCase=128 , __UpperCAmelCase=16 , __UpperCAmelCase=True , __UpperCAmelCase=0.05 , __UpperCAmelCase=10 , __UpperCAmelCase=2 , __UpperCAmelCase=0.0 , __UpperCAmelCase=10 , __UpperCAmelCase=0 , __UpperCAmelCase="mean" , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=256 , __UpperCAmelCase=0 , __UpperCAmelCase=1 , __UpperCAmelCase=2 , **__UpperCAmelCase , ): super().__init__(**__UpperCAmelCase , pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase ) lowerCAmelCase__ : Optional[int] = hidden_size lowerCAmelCase__ : Optional[int] = feat_extract_norm lowerCAmelCase__ : str = feat_extract_activation lowerCAmelCase__ : int = list(__UpperCAmelCase ) lowerCAmelCase__ : int = list(__UpperCAmelCase ) lowerCAmelCase__ : Any = list(__UpperCAmelCase ) lowerCAmelCase__ : int = conv_bias lowerCAmelCase__ : List[Any] = num_conv_pos_embeddings lowerCAmelCase__ : Optional[int] = num_conv_pos_embedding_groups lowerCAmelCase__ : int = len(self.conv_dim ) lowerCAmelCase__ : Union[str, Any] = num_hidden_layers lowerCAmelCase__ : Any = intermediate_size lowerCAmelCase__ : int = squeeze_factor lowerCAmelCase__ : int = max_position_embeddings lowerCAmelCase__ : Any = position_buckets lowerCAmelCase__ : Optional[int] = share_att_key lowerCAmelCase__ : Tuple = relative_attention lowerCAmelCase__ : Optional[int] = norm_rel_ebd lowerCAmelCase__ : Tuple = list(__UpperCAmelCase ) lowerCAmelCase__ : List[Any] = hidden_act lowerCAmelCase__ : Any = num_attention_heads lowerCAmelCase__ : Optional[int] = hidden_dropout lowerCAmelCase__ : Union[str, Any] = attention_dropout lowerCAmelCase__ : str = activation_dropout lowerCAmelCase__ : List[Any] = feat_proj_dropout lowerCAmelCase__ : Any = final_dropout lowerCAmelCase__ : Optional[int] = layer_norm_eps lowerCAmelCase__ : List[str] = feature_layer_norm_eps lowerCAmelCase__ : Tuple = initializer_range lowerCAmelCase__ : Tuple = vocab_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( '''Configuration for convolutional layers is incorrect.''' '''It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,''' f"""but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)""" f"""= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 lowerCAmelCase__ : Tuple = apply_spec_augment lowerCAmelCase__ : List[str] = mask_time_prob lowerCAmelCase__ : int = mask_time_length lowerCAmelCase__ : int = mask_time_min_masks lowerCAmelCase__ : Optional[int] = mask_feature_prob lowerCAmelCase__ : int = mask_feature_length lowerCAmelCase__ : int = mask_feature_min_masks # ctc loss lowerCAmelCase__ : Optional[Any] = ctc_loss_reduction lowerCAmelCase__ : Any = ctc_zero_infinity # sequence classification lowerCAmelCase__ : Tuple = use_weighted_layer_sum lowerCAmelCase__ : Dict = classifier_proj_size @property def __magic_name__( self ): return functools.reduce(operator.mul , self.conv_stride , 1 )
678
1
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_barthez import BarthezTokenizer else: lowerCAmelCase_ = None lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""} lowerCAmelCase_ = { """vocab_file""": { """moussaKam/mbarthez""": """https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model""", """moussaKam/barthez""": """https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model""", """moussaKam/barthez-orangesum-title""": ( """https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model""" ), }, """tokenizer_file""": { """moussaKam/mbarthez""": """https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json""", """moussaKam/barthez""": """https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json""", """moussaKam/barthez-orangesum-title""": ( """https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json""" ), }, } lowerCAmelCase_ = { """moussaKam/mbarthez""": 10_24, """moussaKam/barthez""": 10_24, """moussaKam/barthez-orangesum-title""": 10_24, } lowerCAmelCase_ = """▁""" class _lowerCAmelCase ( _lowercase ): A__ = VOCAB_FILES_NAMES A__ = PRETRAINED_VOCAB_FILES_MAP A__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES A__ = ['input_ids', 'attention_mask'] A__ = BarthezTokenizer def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase="<s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="</s>" , __UpperCAmelCase="<s>" , __UpperCAmelCase="<unk>" , __UpperCAmelCase="<pad>" , __UpperCAmelCase="<mask>" , **__UpperCAmelCase , ): # Mask token behave like a normal word, i.e. include the space before it lowerCAmelCase__ : Dict = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else mask_token super().__init__( __UpperCAmelCase , tokenizer_file=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , **__UpperCAmelCase , ) lowerCAmelCase__ : str = vocab_file lowerCAmelCase__ : Tuple = False if not self.vocab_file else True def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase = None ): if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] lowerCAmelCase__ : Optional[Any] = [self.cls_token_id] lowerCAmelCase__ : Union[str, Any] = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase = None ): lowerCAmelCase__ : Dict = [self.sep_token_id] lowerCAmelCase__ : Union[str, Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase = None ): if not self.can_save_slow_tokenizer: raise ValueError( '''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ''' '''tokenizer.''' ) if not os.path.isdir(__UpperCAmelCase ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return lowerCAmelCase__ : List[Any] = os.path.join( __UpperCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ): copyfile(self.vocab_file , __UpperCAmelCase ) return (out_vocab_file,)
678
import unittest from transformers import PegasusTokenizer, PegasusTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin lowerCAmelCase_ = get_tests_dir("""fixtures/test_sentencepiece_no_bos.model""") @require_sentencepiece @require_tokenizers class _lowerCAmelCase ( _lowercase , unittest.TestCase ): A__ = PegasusTokenizer A__ = PegasusTokenizerFast A__ = True A__ = True def __magic_name__( self ): super().setUp() # We have a SentencePiece fixture for testing lowerCAmelCase__ : Union[str, Any] = PegasusTokenizer(__UpperCAmelCase ) tokenizer.save_pretrained(self.tmpdirname ) @cached_property def __magic_name__( self ): return PegasusTokenizer.from_pretrained('''google/pegasus-large''' ) def __magic_name__( self , **__UpperCAmelCase ): return PegasusTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase ) def __magic_name__( self , __UpperCAmelCase ): return ("This is a test", "This is a test") def __magic_name__( self ): lowerCAmelCase__ : Optional[Any] = '''</s>''' lowerCAmelCase__ : Optional[int] = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCAmelCase ) , __UpperCAmelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCAmelCase ) , __UpperCAmelCase ) def __magic_name__( self ): lowerCAmelCase__ : Tuple = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '''<pad>''' ) self.assertEqual(vocab_keys[1] , '''</s>''' ) self.assertEqual(vocab_keys[-1] , '''v''' ) self.assertEqual(len(__UpperCAmelCase ) , 1103 ) def __magic_name__( self ): self.assertEqual(self.get_tokenizer().vocab_size , 1103 ) def __magic_name__( self ): lowerCAmelCase__ : int = self.rust_tokenizer_class.from_pretrained(self.tmpdirname ) lowerCAmelCase__ : Tuple = self.tokenizer_class.from_pretrained(self.tmpdirname ) lowerCAmelCase__ : int = ( '''Let\'s see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important''' ''' </s> <pad> <pad> <pad>''' ) lowerCAmelCase__ : Any = rust_tokenizer([raw_input_str] , return_tensors=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ).input_ids[0] lowerCAmelCase__ : Dict = py_tokenizer([raw_input_str] , return_tensors=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ).input_ids[0] self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase ) def __magic_name__( self ): lowerCAmelCase__ : Any = self._large_tokenizer # <mask_1> masks whole sentence while <mask_2> masks single word lowerCAmelCase__ : List[str] = '''<mask_1> To ensure a <mask_2> flow of bank resolutions.''' lowerCAmelCase__ : Tuple = [2, 413, 615, 114, 3, 1971, 113, 1679, 1_0710, 107, 1] lowerCAmelCase__ : Tuple = tokenizer([raw_input_str] , return_tensors=__UpperCAmelCase ).input_ids[0] self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase ) def __magic_name__( self ): lowerCAmelCase__ : Dict = self._large_tokenizer # The tracebacks for the following asserts are **better** without messages or self.assertEqual assert tokenizer.vocab_size == 9_6103 assert tokenizer.pad_token_id == 0 assert tokenizer.eos_token_id == 1 assert tokenizer.offset == 103 assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105 assert tokenizer.unk_token == "<unk>" assert tokenizer.model_max_length == 1024 lowerCAmelCase__ : str = '''To ensure a smooth flow of bank resolutions.''' lowerCAmelCase__ : int = [413, 615, 114, 2291, 1971, 113, 1679, 1_0710, 107, 1] lowerCAmelCase__ : List[Any] = tokenizer([raw_input_str] , return_tensors=__UpperCAmelCase ).input_ids[0] self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase ) assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"] @require_torch def __magic_name__( self ): lowerCAmelCase__ : Optional[int] = ['''This is going to be way too long.''' * 150, '''short example'''] lowerCAmelCase__ : List[str] = ['''not super long but more than 5 tokens''', '''tiny'''] lowerCAmelCase__ : Tuple = self._large_tokenizer(__UpperCAmelCase , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , return_tensors='''pt''' ) lowerCAmelCase__ : Optional[int] = self._large_tokenizer( text_target=__UpperCAmelCase , max_length=5 , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , return_tensors='''pt''' ) assert batch.input_ids.shape == (2, 1024) assert batch.attention_mask.shape == (2, 1024) assert targets["input_ids"].shape == (2, 5) assert len(__UpperCAmelCase ) == 2 # input_ids, attention_mask. @slow def __magic_name__( self ): # fmt: off lowerCAmelCase__ : Optional[int] = {'''input_ids''': [[3_8979, 143, 1_8485, 606, 130, 2_6669, 8_7686, 121, 5_4189, 1129, 111, 2_6669, 8_7686, 121, 9114, 1_4787, 121, 1_3249, 158, 592, 956, 121, 1_4621, 3_1576, 143, 6_2613, 108, 9688, 930, 4_3430, 1_1562, 6_2613, 304, 108, 1_1443, 897, 108, 9314, 1_7415, 6_3399, 108, 1_1443, 7614, 1_8316, 118, 4284, 7148, 1_2430, 143, 1400, 2_5703, 158, 111, 4284, 7148, 1_1772, 143, 2_1297, 1064, 158, 122, 204, 3506, 1754, 1133, 1_4787, 1581, 115, 3_3224, 4482, 111, 1355, 110, 2_9173, 317, 5_0833, 108, 2_0147, 9_4665, 111, 7_7198, 107, 1], [110, 6_2613, 117, 638, 112, 1133, 121, 2_0098, 1355, 7_9050, 1_3872, 135, 1596, 5_3541, 1352, 141, 1_3039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 1_8289, 1_7780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=__UpperCAmelCase , model_name='''google/bigbird-pegasus-large-arxiv''' , revision='''ba85d0851d708441f91440d509690f1ab6353415''' , ) @require_sentencepiece @require_tokenizers class _lowerCAmelCase ( _lowercase , unittest.TestCase ): A__ = PegasusTokenizer A__ = PegasusTokenizerFast A__ = True A__ = True def __magic_name__( self ): super().setUp() # We have a SentencePiece fixture for testing lowerCAmelCase__ : List[Any] = PegasusTokenizer(__UpperCAmelCase , offset=0 , mask_token_sent=__UpperCAmelCase , mask_token='''[MASK]''' ) tokenizer.save_pretrained(self.tmpdirname ) @cached_property def __magic_name__( self ): return PegasusTokenizer.from_pretrained('''google/bigbird-pegasus-large-arxiv''' ) def __magic_name__( self , **__UpperCAmelCase ): return PegasusTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase ) def __magic_name__( self , __UpperCAmelCase ): return ("This is a test", "This is a test") def __magic_name__( self ): lowerCAmelCase__ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(self.tmpdirname ) lowerCAmelCase__ : int = self.tokenizer_class.from_pretrained(self.tmpdirname ) lowerCAmelCase__ : str = ( '''Let\'s see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>''' ''' <pad> <pad> <pad>''' ) lowerCAmelCase__ : Optional[Any] = rust_tokenizer([raw_input_str] , return_tensors=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ).input_ids[0] lowerCAmelCase__ : int = py_tokenizer([raw_input_str] , return_tensors=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ).input_ids[0] self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase ) @require_torch def __magic_name__( self ): lowerCAmelCase__ : Optional[Any] = ['''This is going to be way too long.''' * 1000, '''short example'''] lowerCAmelCase__ : int = ['''not super long but more than 5 tokens''', '''tiny'''] lowerCAmelCase__ : Tuple = self._large_tokenizer(__UpperCAmelCase , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , return_tensors='''pt''' ) lowerCAmelCase__ : Tuple = self._large_tokenizer( text_target=__UpperCAmelCase , max_length=5 , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , return_tensors='''pt''' ) assert batch.input_ids.shape == (2, 4096) assert batch.attention_mask.shape == (2, 4096) assert targets["input_ids"].shape == (2, 5) assert len(__UpperCAmelCase ) == 2 # input_ids, attention_mask. def __magic_name__( self ): lowerCAmelCase__ : List[str] = ( '''This is an example string that is used to test the original TF implementation against the HF''' ''' implementation''' ) lowerCAmelCase__ : Union[str, Any] = self._large_tokenizer(__UpperCAmelCase ).input_ids self.assertListEqual( __UpperCAmelCase , [182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 2_5016, 3137, 464, 109, 2_6955, 3137, 1] , )
678
1
from tempfile import TemporaryDirectory from unittest import TestCase from unittest.mock import MagicMock, patch from transformers import AutoModel, TFAutoModel from transformers.onnx import FeaturesManager from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch @require_torch @require_tf class _lowerCAmelCase ( _lowercase ): def __magic_name__( self ): lowerCAmelCase__ : List[Any] = SMALL_MODEL_IDENTIFIER lowerCAmelCase__ : Optional[int] = '''pt''' lowerCAmelCase__ : Dict = '''tf''' def __magic_name__( self , __UpperCAmelCase ): lowerCAmelCase__ : str = AutoModel.from_pretrained(self.test_model ) model_pt.save_pretrained(__UpperCAmelCase ) def __magic_name__( self , __UpperCAmelCase ): lowerCAmelCase__ : Optional[Any] = TFAutoModel.from_pretrained(self.test_model , from_pt=__UpperCAmelCase ) model_tf.save_pretrained(__UpperCAmelCase ) def __magic_name__( self ): lowerCAmelCase__ : int = '''mock_framework''' # Framework provided - return whatever the user provides lowerCAmelCase__ : int = FeaturesManager.determine_framework(self.test_model , __UpperCAmelCase ) self.assertEqual(__UpperCAmelCase , __UpperCAmelCase ) # Local checkpoint and framework provided - return provided framework # PyTorch checkpoint with TemporaryDirectory() as local_pt_ckpt: self._setup_pt_ckpt(__UpperCAmelCase ) lowerCAmelCase__ : Tuple = FeaturesManager.determine_framework(__UpperCAmelCase , __UpperCAmelCase ) self.assertEqual(__UpperCAmelCase , __UpperCAmelCase ) # TensorFlow checkpoint with TemporaryDirectory() as local_tf_ckpt: self._setup_tf_ckpt(__UpperCAmelCase ) lowerCAmelCase__ : int = FeaturesManager.determine_framework(__UpperCAmelCase , __UpperCAmelCase ) self.assertEqual(__UpperCAmelCase , __UpperCAmelCase ) def __magic_name__( self ): # PyTorch checkpoint with TemporaryDirectory() as local_pt_ckpt: self._setup_pt_ckpt(__UpperCAmelCase ) lowerCAmelCase__ : Dict = FeaturesManager.determine_framework(__UpperCAmelCase ) self.assertEqual(__UpperCAmelCase , self.framework_pt ) # TensorFlow checkpoint with TemporaryDirectory() as local_tf_ckpt: self._setup_tf_ckpt(__UpperCAmelCase ) lowerCAmelCase__ : Union[str, Any] = FeaturesManager.determine_framework(__UpperCAmelCase ) self.assertEqual(__UpperCAmelCase , self.framework_tf ) # Invalid local checkpoint with TemporaryDirectory() as local_invalid_ckpt: with self.assertRaises(__UpperCAmelCase ): lowerCAmelCase__ : Any = FeaturesManager.determine_framework(__UpperCAmelCase ) def __magic_name__( self ): lowerCAmelCase__ : List[str] = MagicMock(return_value=__UpperCAmelCase ) with patch('''transformers.onnx.features.is_tf_available''' , __UpperCAmelCase ): lowerCAmelCase__ : Any = FeaturesManager.determine_framework(self.test_model ) self.assertEqual(__UpperCAmelCase , self.framework_pt ) # PyTorch not in environment -> use TensorFlow lowerCAmelCase__ : str = MagicMock(return_value=__UpperCAmelCase ) with patch('''transformers.onnx.features.is_torch_available''' , __UpperCAmelCase ): lowerCAmelCase__ : Optional[Any] = FeaturesManager.determine_framework(self.test_model ) self.assertEqual(__UpperCAmelCase , self.framework_tf ) # Both in environment -> use PyTorch lowerCAmelCase__ : int = MagicMock(return_value=__UpperCAmelCase ) lowerCAmelCase__ : int = MagicMock(return_value=__UpperCAmelCase ) with patch('''transformers.onnx.features.is_tf_available''' , __UpperCAmelCase ), patch( '''transformers.onnx.features.is_torch_available''' , __UpperCAmelCase ): lowerCAmelCase__ : List[Any] = FeaturesManager.determine_framework(self.test_model ) self.assertEqual(__UpperCAmelCase , self.framework_pt ) # Both not in environment -> raise error lowerCAmelCase__ : Tuple = MagicMock(return_value=__UpperCAmelCase ) lowerCAmelCase__ : Optional[Any] = MagicMock(return_value=__UpperCAmelCase ) with patch('''transformers.onnx.features.is_tf_available''' , __UpperCAmelCase ), patch( '''transformers.onnx.features.is_torch_available''' , __UpperCAmelCase ): with self.assertRaises(__UpperCAmelCase ): lowerCAmelCase__ : int = FeaturesManager.determine_framework(self.test_model )
678
from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = { """naver-clova-ix/donut-base""": """https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json""", # See all Donut models at https://huggingface.co/models?filter=donut-swin } class _lowerCAmelCase ( _lowercase ): A__ = 'donut-swin' A__ = { 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers', } def __init__( self , __UpperCAmelCase=224 , __UpperCAmelCase=4 , __UpperCAmelCase=3 , __UpperCAmelCase=96 , __UpperCAmelCase=[2, 2, 6, 2] , __UpperCAmelCase=[3, 6, 12, 24] , __UpperCAmelCase=7 , __UpperCAmelCase=4.0 , __UpperCAmelCase=True , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.1 , __UpperCAmelCase="gelu" , __UpperCAmelCase=False , __UpperCAmelCase=0.02 , __UpperCAmelCase=1e-5 , **__UpperCAmelCase , ): super().__init__(**__UpperCAmelCase ) lowerCAmelCase__ : Union[str, Any] = image_size lowerCAmelCase__ : List[str] = patch_size lowerCAmelCase__ : int = num_channels lowerCAmelCase__ : Optional[Any] = embed_dim lowerCAmelCase__ : int = depths lowerCAmelCase__ : Dict = len(__UpperCAmelCase ) lowerCAmelCase__ : Union[str, Any] = num_heads lowerCAmelCase__ : Dict = window_size lowerCAmelCase__ : str = mlp_ratio lowerCAmelCase__ : Optional[int] = qkv_bias lowerCAmelCase__ : Any = hidden_dropout_prob lowerCAmelCase__ : Union[str, Any] = attention_probs_dropout_prob lowerCAmelCase__ : List[str] = drop_path_rate lowerCAmelCase__ : Tuple = hidden_act lowerCAmelCase__ : List[str] = use_absolute_embeddings lowerCAmelCase__ : Dict = layer_norm_eps lowerCAmelCase__ : Any = initializer_range # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model lowerCAmelCase__ : List[Any] = int(embed_dim * 2 ** (len(__UpperCAmelCase ) - 1) )
678
1
import itertools import os import random import tempfile import unittest import numpy as np from datasets import load_dataset from transformers import is_speech_available from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio from transformers.utils.import_utils import is_torch_available from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin if is_speech_available(): from transformers import WhisperFeatureExtractor if is_torch_available(): import torch lowerCAmelCase_ = random.Random() def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase=1.0 , UpperCamelCase=None , UpperCamelCase=None ) -> List[str]: if rng is None: lowerCAmelCase__ : str = global_rng lowerCAmelCase__ : str = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values @require_torch @require_torchaudio class _lowerCAmelCase ( unittest.TestCase ): def __init__( self , __UpperCAmelCase , __UpperCAmelCase=7 , __UpperCAmelCase=400 , __UpperCAmelCase=2000 , __UpperCAmelCase=10 , __UpperCAmelCase=160 , __UpperCAmelCase=8 , __UpperCAmelCase=0.0 , __UpperCAmelCase=4000 , __UpperCAmelCase=False , __UpperCAmelCase=True , ): lowerCAmelCase__ : Any = parent lowerCAmelCase__ : List[str] = batch_size lowerCAmelCase__ : List[Any] = min_seq_length lowerCAmelCase__ : int = max_seq_length lowerCAmelCase__ : Dict = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) lowerCAmelCase__ : Any = padding_value lowerCAmelCase__ : Tuple = sampling_rate lowerCAmelCase__ : Union[str, Any] = return_attention_mask lowerCAmelCase__ : Optional[int] = do_normalize lowerCAmelCase__ : int = feature_size lowerCAmelCase__ : Tuple = chunk_length lowerCAmelCase__ : Any = hop_length def __magic_name__( self ): return { "feature_size": self.feature_size, "hop_length": self.hop_length, "chunk_length": self.chunk_length, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "return_attention_mask": self.return_attention_mask, "do_normalize": self.do_normalize, } def __magic_name__( self , __UpperCAmelCase=False , __UpperCAmelCase=False ): def _flatten(__UpperCAmelCase ): return list(itertools.chain(*__UpperCAmelCase ) ) if equal_length: lowerCAmelCase__ : List[str] = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )] else: # make sure that inputs increase in size lowerCAmelCase__ : List[Any] = [ floats_list((x, self.feature_size) ) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff ) ] if numpify: lowerCAmelCase__ : Any = [np.asarray(__UpperCAmelCase ) for x in speech_inputs] return speech_inputs @require_torch @require_torchaudio class _lowerCAmelCase ( _lowercase , unittest.TestCase ): A__ = WhisperFeatureExtractor if is_speech_available() else None def __magic_name__( self ): lowerCAmelCase__ : List[Any] = WhisperFeatureExtractionTester(self ) def __magic_name__( self ): lowerCAmelCase__ : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: lowerCAmelCase__ : str = feat_extract_first.save_pretrained(__UpperCAmelCase )[0] check_json_file_has_correct_format(__UpperCAmelCase ) lowerCAmelCase__ : Union[str, Any] = self.feature_extraction_class.from_pretrained(__UpperCAmelCase ) lowerCAmelCase__ : Optional[Any] = feat_extract_first.to_dict() lowerCAmelCase__ : Optional[int] = feat_extract_second.to_dict() lowerCAmelCase__ : Any = feat_extract_first.mel_filters lowerCAmelCase__ : Tuple = feat_extract_second.mel_filters self.assertTrue(np.allclose(__UpperCAmelCase , __UpperCAmelCase ) ) self.assertEqual(__UpperCAmelCase , __UpperCAmelCase ) def __magic_name__( self ): lowerCAmelCase__ : Dict = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: lowerCAmelCase__ : Tuple = os.path.join(__UpperCAmelCase , '''feat_extract.json''' ) feat_extract_first.to_json_file(__UpperCAmelCase ) lowerCAmelCase__ : List[Any] = self.feature_extraction_class.from_json_file(__UpperCAmelCase ) lowerCAmelCase__ : List[str] = feat_extract_first.to_dict() lowerCAmelCase__ : str = feat_extract_second.to_dict() lowerCAmelCase__ : Dict = feat_extract_first.mel_filters lowerCAmelCase__ : int = feat_extract_second.mel_filters self.assertTrue(np.allclose(__UpperCAmelCase , __UpperCAmelCase ) ) self.assertEqual(__UpperCAmelCase , __UpperCAmelCase ) def __magic_name__( self ): # Tests that all call wrap to encode_plus and batch_encode_plus lowerCAmelCase__ : str = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 lowerCAmelCase__ : List[str] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )] lowerCAmelCase__ : int = [np.asarray(__UpperCAmelCase ) for speech_input in speech_inputs] # Test feature size lowerCAmelCase__ : Optional[Any] = feature_extractor(__UpperCAmelCase , padding='''max_length''' , return_tensors='''np''' ).input_features self.assertTrue(input_features.ndim == 3 ) self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames ) self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size ) # Test not batched input lowerCAmelCase__ : Union[str, Any] = feature_extractor(speech_inputs[0] , return_tensors='''np''' ).input_features lowerCAmelCase__ : List[str] = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' ).input_features self.assertTrue(np.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1e-3 ) ) # Test batched lowerCAmelCase__ : Union[str, Any] = feature_extractor(__UpperCAmelCase , return_tensors='''np''' ).input_features lowerCAmelCase__ : int = feature_extractor(__UpperCAmelCase , return_tensors='''np''' ).input_features for enc_seq_a, enc_seq_a in zip(__UpperCAmelCase , __UpperCAmelCase ): self.assertTrue(np.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1e-3 ) ) # Test 2-D numpy arrays are batched. lowerCAmelCase__ : Dict = [floats_list((1, x) )[0] for x in (800, 800, 800)] lowerCAmelCase__ : Optional[Any] = np.asarray(__UpperCAmelCase ) lowerCAmelCase__ : Tuple = feature_extractor(__UpperCAmelCase , return_tensors='''np''' ).input_features lowerCAmelCase__ : Dict = feature_extractor(__UpperCAmelCase , return_tensors='''np''' ).input_features for enc_seq_a, enc_seq_a in zip(__UpperCAmelCase , __UpperCAmelCase ): self.assertTrue(np.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1e-3 ) ) # Test truncation required lowerCAmelCase__ : Union[str, Any] = [floats_list((1, x) )[0] for x in range(200 , (feature_extractor.n_samples + 500) , 200 )] lowerCAmelCase__ : Any = [np.asarray(__UpperCAmelCase ) for speech_input in speech_inputs] lowerCAmelCase__ : int = [x[: feature_extractor.n_samples] for x in speech_inputs] lowerCAmelCase__ : Dict = [np.asarray(__UpperCAmelCase ) for speech_input in speech_inputs_truncated] lowerCAmelCase__ : Dict = feature_extractor(__UpperCAmelCase , return_tensors='''np''' ).input_features lowerCAmelCase__ : Optional[int] = feature_extractor(__UpperCAmelCase , return_tensors='''np''' ).input_features for enc_seq_a, enc_seq_a in zip(__UpperCAmelCase , __UpperCAmelCase ): self.assertTrue(np.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1e-3 ) ) def __magic_name__( self ): import torch lowerCAmelCase__ : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) lowerCAmelCase__ : Optional[Any] = np.random.rand(100 , 32 ).astype(np.floataa ) lowerCAmelCase__ : Union[str, Any] = np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: lowerCAmelCase__ : Optional[int] = feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''np''' ) self.assertTrue(np_processed.input_features.dtype == np.floataa ) lowerCAmelCase__ : List[Any] = feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''pt''' ) self.assertTrue(pt_processed.input_features.dtype == torch.floataa ) def __magic_name__( self , __UpperCAmelCase ): lowerCAmelCase__ : str = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' ) # automatic decoding with librispeech lowerCAmelCase__ : int = ds.sort('''id''' ).select(range(__UpperCAmelCase ) )[:num_samples]['''audio'''] return [x["array"] for x in speech_samples] def __magic_name__( self ): # fmt: off lowerCAmelCase__ : Optional[Any] = torch.tensor( [ 0.1193, -0.0946, -0.1098, -0.0196, 0.0225, -0.0690, -0.1736, 0.0951, 0.0971, -0.0817, -0.0702, 0.0162, 0.0260, 0.0017, -0.0192, -0.1678, 0.0709, -0.1867, -0.0655, -0.0274, -0.0234, -0.1884, -0.0516, -0.0554, -0.0274, -0.1425, -0.1423, 0.0837, 0.0377, -0.0854 ] ) # fmt: on lowerCAmelCase__ : List[Any] = self._load_datasamples(1 ) lowerCAmelCase__ : Optional[int] = WhisperFeatureExtractor() lowerCAmelCase__ : Dict = feature_extractor(__UpperCAmelCase , return_tensors='''pt''' ).input_features self.assertEqual(input_features.shape , (1, 80, 3000) ) self.assertTrue(torch.allclose(input_features[0, 0, :30] , __UpperCAmelCase , atol=1e-4 ) ) def __magic_name__( self ): lowerCAmelCase__ : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) lowerCAmelCase__ : Dict = self._load_datasamples(1 )[0] lowerCAmelCase__ : Tuple = ((audio - audio.min()) / (audio.max() - audio.min())) * 6_5535 # Rescale to [0, 65535] to show issue lowerCAmelCase__ : List[str] = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=__UpperCAmelCase )[0] self.assertTrue(np.all(np.mean(__UpperCAmelCase ) < 1e-3 ) ) self.assertTrue(np.all(np.abs(np.var(__UpperCAmelCase ) - 1 ) < 1e-3 ) )
678
lowerCAmelCase_ = { """meter""": """m""", """kilometer""": """km""", """megametre""": """Mm""", """gigametre""": """Gm""", """terametre""": """Tm""", """petametre""": """Pm""", """exametre""": """Em""", """zettametre""": """Zm""", """yottametre""": """Ym""", } # Exponent of the factor(meter) lowerCAmelCase_ = { """m""": 0, """km""": 3, """Mm""": 6, """Gm""": 9, """Tm""": 12, """Pm""": 15, """Em""": 18, """Zm""": 21, """Ym""": 24, } def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> float: lowerCAmelCase__ : List[str] = from_type.lower().strip('''s''' ) lowerCAmelCase__ : List[str] = to_type.lower().strip('''s''' ) lowerCAmelCase__ : str = UNIT_SYMBOL.get(UpperCamelCase , UpperCamelCase ) lowerCAmelCase__ : Any = UNIT_SYMBOL.get(UpperCamelCase , UpperCamelCase ) if from_sanitized not in METRIC_CONVERSION: lowerCAmelCase__ : Tuple = ( F"""Invalid 'from_type' value: {from_type!r}.\n""" F"""Conversion abbreviations are: {", ".join(UpperCamelCase )}""" ) raise ValueError(UpperCamelCase ) if to_sanitized not in METRIC_CONVERSION: lowerCAmelCase__ : List[Any] = ( F"""Invalid 'to_type' value: {to_type!r}.\n""" F"""Conversion abbreviations are: {", ".join(UpperCamelCase )}""" ) raise ValueError(UpperCamelCase ) lowerCAmelCase__ : Optional[Any] = METRIC_CONVERSION[from_sanitized] lowerCAmelCase__ : Optional[Any] = METRIC_CONVERSION[to_sanitized] lowerCAmelCase__ : int = 1 if from_exponent > to_exponent: lowerCAmelCase__ : List[str] = from_exponent - to_exponent else: lowerCAmelCase__ : Dict = -(to_exponent - from_exponent) return value * pow(10 , UpperCamelCase ) if __name__ == "__main__": from doctest import testmod testmod()
678
1
import sys lowerCAmelCase_ = ( """73167176531330624919225119674426574742355349194934""" """96983520312774506326239578318016984801869478851843""" """85861560789112949495459501737958331952853208805511""" """12540698747158523863050715693290963295227443043557""" """66896648950445244523161731856403098711121722383113""" """62229893423380308135336276614282806444486645238749""" """30358907296290491560440772390713810515859307960866""" """70172427121883998797908792274921901699720888093776""" """65727333001053367881220235421809751254540594752243""" """52584907711670556013604839586446706324415722155397""" """53697817977846174064955149290862569321978468622482""" """83972241375657056057490261407972968652414535100474""" """82166370484403199890008895243450658541227588666881""" """16427171479924442928230863465674813919123162824586""" """17866458359124566529476545682848912883142607690042""" """24219022671055626321111109370544217506941658960408""" """07198403850962455444362981230987879927244284909188""" """84580156166097919133875499200524063689912560717606""" """05886116467109405077541002256983155200055935729725""" """71636269561882670428252483600823257530420752963450""" ) def __lowerCAmelCase ( UpperCamelCase ) -> int: lowerCAmelCase__ : str = 1 for digit in s: product *= int(UpperCamelCase ) return product def __lowerCAmelCase ( UpperCamelCase = N ) -> int: lowerCAmelCase__ : Optional[Any] = -sys.maxsize - 1 lowerCAmelCase__ : Any = n[:13] lowerCAmelCase__ : List[Any] = 13 while cur_index < len(UpperCamelCase ) - 13: if int(n[cur_index] ) >= int(substr[0] ): lowerCAmelCase__ : int = substr[1:] + n[cur_index] cur_index += 1 else: lowerCAmelCase__ : Any = max(UpperCamelCase , str_eval(UpperCamelCase ) ) lowerCAmelCase__ : Any = n[cur_index : cur_index + 13] cur_index += 13 return largest_product if __name__ == "__main__": print(F"""{solution() = }""")
678
import unittest from transformers import is_vision_available from transformers.pipelines import pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class _lowerCAmelCase : @staticmethod def __magic_name__( *__UpperCAmelCase , **__UpperCAmelCase ): pass @is_pipeline_test @require_vision class _lowerCAmelCase ( unittest.TestCase ): @require_torch def __magic_name__( self ): lowerCAmelCase__ : int = pipeline( model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , ) lowerCAmelCase__ : int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) lowerCAmelCase__ : List[str] = image_classifier(__UpperCAmelCase , candidate_labels=['''a''', '''b''', '''c'''] ) # The floating scores are so close, we enter floating error approximation and the order is not guaranteed across # python and torch versions. self.assertIn( nested_simplify(__UpperCAmelCase ) , [ [{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''b'''}, {'''score''': 0.333, '''label''': '''c'''}], [{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''c'''}, {'''score''': 0.333, '''label''': '''b'''}], ] , ) lowerCAmelCase__ : List[Any] = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 ) self.assertEqual( nested_simplify(__UpperCAmelCase ) , [ [ {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, ], ] , ) @require_tf def __magic_name__( self ): lowerCAmelCase__ : List[Any] = pipeline( model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , framework='''tf''' ) lowerCAmelCase__ : Tuple = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) lowerCAmelCase__ : str = image_classifier(__UpperCAmelCase , candidate_labels=['''a''', '''b''', '''c'''] ) self.assertEqual( nested_simplify(__UpperCAmelCase ) , [{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''b'''}, {'''score''': 0.333, '''label''': '''c'''}] , ) lowerCAmelCase__ : List[Any] = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 ) self.assertEqual( nested_simplify(__UpperCAmelCase ) , [ [ {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, ], ] , ) @slow @require_torch def __magic_name__( self ): lowerCAmelCase__ : str = pipeline( task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , ) # This is an image of 2 cats with remotes and no planes lowerCAmelCase__ : List[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) lowerCAmelCase__ : str = image_classifier(__UpperCAmelCase , candidate_labels=['''cat''', '''plane''', '''remote'''] ) self.assertEqual( nested_simplify(__UpperCAmelCase ) , [ {'''score''': 0.511, '''label''': '''remote'''}, {'''score''': 0.485, '''label''': '''cat'''}, {'''score''': 0.004, '''label''': '''plane'''}, ] , ) lowerCAmelCase__ : Tuple = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 ) self.assertEqual( nested_simplify(__UpperCAmelCase ) , [ [ {'''score''': 0.511, '''label''': '''remote'''}, {'''score''': 0.485, '''label''': '''cat'''}, {'''score''': 0.004, '''label''': '''plane'''}, ], ] * 5 , ) @slow @require_tf def __magic_name__( self ): lowerCAmelCase__ : Union[str, Any] = pipeline( task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , framework='''tf''' ) # This is an image of 2 cats with remotes and no planes lowerCAmelCase__ : Optional[int] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) lowerCAmelCase__ : Union[str, Any] = image_classifier(__UpperCAmelCase , candidate_labels=['''cat''', '''plane''', '''remote'''] ) self.assertEqual( nested_simplify(__UpperCAmelCase ) , [ {'''score''': 0.511, '''label''': '''remote'''}, {'''score''': 0.485, '''label''': '''cat'''}, {'''score''': 0.004, '''label''': '''plane'''}, ] , ) lowerCAmelCase__ : Any = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 ) self.assertEqual( nested_simplify(__UpperCAmelCase ) , [ [ {'''score''': 0.511, '''label''': '''remote'''}, {'''score''': 0.485, '''label''': '''cat'''}, {'''score''': 0.004, '''label''': '''plane'''}, ], ] * 5 , )
678
1
from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = { """sayakpaul/vit-msn-base""": """https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json""", # See all ViT MSN models at https://huggingface.co/models?filter=vit_msn } class _lowerCAmelCase ( _lowercase ): A__ = 'vit_msn' def __init__( self , __UpperCAmelCase=768 , __UpperCAmelCase=12 , __UpperCAmelCase=12 , __UpperCAmelCase=3072 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1e-06 , __UpperCAmelCase=224 , __UpperCAmelCase=16 , __UpperCAmelCase=3 , __UpperCAmelCase=True , **__UpperCAmelCase , ): super().__init__(**__UpperCAmelCase ) lowerCAmelCase__ : str = hidden_size lowerCAmelCase__ : Tuple = num_hidden_layers lowerCAmelCase__ : List[str] = num_attention_heads lowerCAmelCase__ : Union[str, Any] = intermediate_size lowerCAmelCase__ : List[str] = hidden_act lowerCAmelCase__ : Any = hidden_dropout_prob lowerCAmelCase__ : int = attention_probs_dropout_prob lowerCAmelCase__ : Dict = initializer_range lowerCAmelCase__ : Optional[Any] = layer_norm_eps lowerCAmelCase__ : List[Any] = image_size lowerCAmelCase__ : Optional[Any] = patch_size lowerCAmelCase__ : Optional[int] = num_channels lowerCAmelCase__ : str = qkv_bias
678
lowerCAmelCase_ = [ [0, 16, 13, 0, 0, 0], [0, 0, 10, 12, 0, 0], [0, 4, 0, 0, 14, 0], [0, 0, 9, 0, 0, 20], [0, 0, 0, 7, 0, 4], [0, 0, 0, 0, 0, 0], ] def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> List[Any]: # Return True if there is node that has not iterated. lowerCAmelCase__ : Optional[int] = [False] * len(UpperCamelCase ) lowerCAmelCase__ : Tuple = [s] lowerCAmelCase__ : Dict = True while queue: lowerCAmelCase__ : int = queue.pop(0 ) for ind in range(len(graph[u] ) ): if visited[ind] is False and graph[u][ind] > 0: queue.append(UpperCamelCase ) lowerCAmelCase__ : Optional[Any] = True lowerCAmelCase__ : Optional[int] = u return visited[t] def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Optional[Any]: lowerCAmelCase__ : Any = [-1] * (len(UpperCamelCase )) lowerCAmelCase__ : List[Any] = 0 lowerCAmelCase__ : Tuple = [] lowerCAmelCase__ : Optional[int] = [i[:] for i in graph] # Record original cut, copy. while bfs(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ): lowerCAmelCase__ : List[Any] = float('''Inf''' ) lowerCAmelCase__ : Dict = sink while s != source: # Find the minimum value in select path lowerCAmelCase__ : Tuple = min(UpperCamelCase , graph[parent[s]][s] ) lowerCAmelCase__ : List[Any] = parent[s] max_flow += path_flow lowerCAmelCase__ : List[Any] = sink while v != source: lowerCAmelCase__ : Dict = parent[v] graph[u][v] -= path_flow graph[v][u] += path_flow lowerCAmelCase__ : Optional[Any] = parent[v] for i in range(len(UpperCamelCase ) ): for j in range(len(graph[0] ) ): if graph[i][j] == 0 and temp[i][j] > 0: res.append((i, j) ) return res if __name__ == "__main__": print(mincut(test_graph, source=0, sink=5))
678
1
import logging import os import sys from dataclasses import dataclass, field from typing import Optional from seqaseq_trainer import SeqaSeqTrainer from seqaseq_training_args import SeqaSeqTrainingArguments import transformers from transformers import ( AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer, HfArgumentParser, MBartTokenizer, MBartTokenizerFast, set_seed, ) from transformers.trainer_utils import EvaluationStrategy, is_main_process from transformers.training_args import ParallelMode from utils import ( SeqaSeqDataCollator, SeqaSeqDataset, assert_all_frozen, build_compute_metrics_fn, check_output_dir, freeze_embeds, freeze_params, lmap, save_json, use_task_specific_params, write_txt_file, ) lowerCAmelCase_ = logging.getLogger(__name__) @dataclass class _lowerCAmelCase : A__ = field( metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} ) A__ = field( default=_lowercase , metadata={'help': 'Pretrained config name or path if not the same as model_name'} ) A__ = field( default=_lowercase , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} ) A__ = field( default=_lowercase , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , ) A__ = field(default=_lowercase , metadata={'help': 'Whether tp freeze the encoder.'} ) A__ = field(default=_lowercase , metadata={'help': 'Whether to freeze the embeddings.'} ) @dataclass class _lowerCAmelCase : A__ = field( metadata={'help': 'The input data dir. Should contain the .tsv files (or other data files) for the task.'} ) A__ = field( default='summarization' , metadata={'help': 'Task name, summarization (or summarization_{dataset} for pegasus) or translation'} , ) A__ = field( default=1024 , metadata={ 'help': ( 'The maximum total input sequence length after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) } , ) A__ = field( default=128 , metadata={ 'help': ( 'The maximum total sequence length for target text after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) } , ) A__ = field( default=142 , metadata={ 'help': ( 'The maximum total sequence length for validation target text after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded. ' 'This argument is also used to override the ``max_length`` param of ``model.generate``, which is used ' 'during ``evaluate`` and ``predict``.' ) } , ) A__ = field( default=142 , metadata={ 'help': ( 'The maximum total sequence length for test target text after tokenization. Sequences longer ' 'than this will be truncated, sequences shorter will be padded.' ) } , ) A__ = field(default=-1 , metadata={'help': '# training examples. -1 means use all.'} ) A__ = field(default=-1 , metadata={'help': '# validation examples. -1 means use all.'} ) A__ = field(default=-1 , metadata={'help': '# test examples. -1 means use all.'} ) A__ = field(default=_lowercase , metadata={'help': 'Source language id for translation.'} ) A__ = field(default=_lowercase , metadata={'help': 'Target language id for translation.'} ) A__ = field(default=_lowercase , metadata={'help': '# num_beams to use for evaluation.'} ) A__ = field( default=_lowercase , metadata={'help': 'If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined.'} , ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> int: logger.info(F"""***** {split} metrics *****""" ) for key in sorted(metrics.keys() ): logger.info(F""" {key} = {metrics[key]}""" ) save_json(UpperCamelCase , os.path.join(UpperCamelCase , F"""{split}_results.json""" ) ) def __lowerCAmelCase ( ) -> Dict: # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. lowerCAmelCase__ : Optional[int] = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Tuple = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = parser.parse_args_into_dataclasses() check_output_dir(UpperCamelCase ) # Setup logging logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( '''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() logger.info('''Training/evaluation parameters %s''' , UpperCamelCase ) # Set seed set_seed(training_args.seed ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. lowerCAmelCase__ : Tuple = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) lowerCAmelCase__ : int = ('''encoder_layerdrop''', '''decoder_layerdrop''', '''dropout''', '''attention_dropout''') for p in extra_model_params: if getattr(UpperCamelCase , UpperCamelCase , UpperCamelCase ): assert hasattr(UpperCamelCase , UpperCamelCase ), F"""({config.__class__.__name__}) doesn't have a `{p}` attribute""" setattr(UpperCamelCase , UpperCamelCase , getattr(UpperCamelCase , UpperCamelCase ) ) lowerCAmelCase__ : int = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) lowerCAmelCase__ : Any = AutoModelForSeqaSeqLM.from_pretrained( model_args.model_name_or_path , from_tf='''.ckpt''' in model_args.model_name_or_path , config=UpperCamelCase , cache_dir=model_args.cache_dir , ) # use task specific params use_task_specific_params(UpperCamelCase , data_args.task ) # set num_beams for evaluation if data_args.eval_beams is None: lowerCAmelCase__ : List[Any] = model.config.num_beams # set decoder_start_token_id for MBart if model.config.decoder_start_token_id is None and isinstance(UpperCamelCase , (MBartTokenizer, MBartTokenizerFast) ): assert ( data_args.tgt_lang is not None and data_args.src_lang is not None ), "mBart requires --tgt_lang and --src_lang" if isinstance(UpperCamelCase , UpperCamelCase ): lowerCAmelCase__ : Tuple = tokenizer.lang_code_to_id[data_args.tgt_lang] else: lowerCAmelCase__ : int = tokenizer.convert_tokens_to_ids(data_args.tgt_lang ) if model_args.freeze_embeds: freeze_embeds(UpperCamelCase ) if model_args.freeze_encoder: freeze_params(model.get_encoder() ) assert_all_frozen(model.get_encoder() ) lowerCAmelCase__ : Tuple = SeqaSeqDataset # Get datasets lowerCAmelCase__ : Tuple = ( dataset_class( UpperCamelCase , type_path='''train''' , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , ) if training_args.do_train else None ) lowerCAmelCase__ : List[str] = ( dataset_class( UpperCamelCase , type_path='''val''' , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , ) if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO else None ) lowerCAmelCase__ : Optional[Any] = ( dataset_class( UpperCamelCase , type_path='''test''' , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '''''' , ) if training_args.do_predict else None ) # Initialize our Trainer lowerCAmelCase__ : Any = ( build_compute_metrics_fn(data_args.task , UpperCamelCase ) if training_args.predict_with_generate else None ) lowerCAmelCase__ : Any = SeqaSeqTrainer( model=UpperCamelCase , args=UpperCamelCase , data_args=UpperCamelCase , train_dataset=UpperCamelCase , eval_dataset=UpperCamelCase , data_collator=SeqaSeqDataCollator( UpperCamelCase , UpperCamelCase , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=UpperCamelCase , tokenizer=UpperCamelCase , ) lowerCAmelCase__ : Any = {} # Training if training_args.do_train: logger.info('''*** Train ***''' ) lowerCAmelCase__ : Union[str, Any] = trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) lowerCAmelCase__ : List[Any] = train_result.metrics lowerCAmelCase__ : Tuple = data_args.n_train trainer.save_model() # this also saves the tokenizer if trainer.is_world_process_zero(): handle_metrics('''train''' , UpperCamelCase , training_args.output_dir ) all_metrics.update(UpperCamelCase ) # Need to save the state, since Trainer.save_model saves only the tokenizer with the model trainer.state.save_to_json(os.path.join(training_args.output_dir , '''trainer_state.json''' ) ) # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) tokenizer.save_pretrained(training_args.output_dir ) # Evaluation if training_args.do_eval: logger.info('''*** Evaluate ***''' ) lowerCAmelCase__ : Optional[Any] = trainer.evaluate(metric_key_prefix='''val''' ) lowerCAmelCase__ : int = data_args.n_val lowerCAmelCase__ : List[Any] = round(metrics['''val_loss'''] , 4 ) if trainer.is_world_process_zero(): handle_metrics('''val''' , UpperCamelCase , training_args.output_dir ) all_metrics.update(UpperCamelCase ) if training_args.do_predict: logger.info('''*** Predict ***''' ) lowerCAmelCase__ : Optional[int] = trainer.predict(test_dataset=UpperCamelCase , metric_key_prefix='''test''' ) lowerCAmelCase__ : List[str] = test_output.metrics lowerCAmelCase__ : Any = data_args.n_test if trainer.is_world_process_zero(): lowerCAmelCase__ : Tuple = round(metrics['''test_loss'''] , 4 ) handle_metrics('''test''' , UpperCamelCase , training_args.output_dir ) all_metrics.update(UpperCamelCase ) if training_args.predict_with_generate: lowerCAmelCase__ : int = tokenizer.batch_decode( test_output.predictions , skip_special_tokens=UpperCamelCase , clean_up_tokenization_spaces=UpperCamelCase ) lowerCAmelCase__ : Any = lmap(str.strip , UpperCamelCase ) write_txt_file(UpperCamelCase , os.path.join(training_args.output_dir , '''test_generations.txt''' ) ) if trainer.is_world_process_zero(): save_json(UpperCamelCase , os.path.join(training_args.output_dir , '''all_results.json''' ) ) return all_metrics def __lowerCAmelCase ( UpperCamelCase ) -> List[str]: # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
678
import unittest from transformers import ( MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING, TextGenerationPipeline, logging, pipeline, ) from transformers.testing_utils import ( CaptureLogger, is_pipeline_test, require_accelerate, require_tf, require_torch, require_torch_gpu, require_torch_or_tf, ) from .test_pipelines_common import ANY @is_pipeline_test @require_torch_or_tf class _lowerCAmelCase ( unittest.TestCase ): A__ = MODEL_FOR_CAUSAL_LM_MAPPING A__ = TF_MODEL_FOR_CAUSAL_LM_MAPPING @require_torch def __magic_name__( self ): lowerCAmelCase__ : Tuple = pipeline(task='''text-generation''' , model='''sshleifer/tiny-ctrl''' , framework='''pt''' ) # Using `do_sample=False` to force deterministic output lowerCAmelCase__ : Optional[int] = text_generator('''This is a test''' , do_sample=__UpperCAmelCase ) self.assertEqual( __UpperCAmelCase , [ { '''generated_text''': ( '''This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.''' ''' oscope. FiliFili@@''' ) } ] , ) lowerCAmelCase__ : List[str] = text_generator(['''This is a test''', '''This is a second test'''] ) self.assertEqual( __UpperCAmelCase , [ [ { '''generated_text''': ( '''This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.''' ''' oscope. FiliFili@@''' ) } ], [ { '''generated_text''': ( '''This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy''' ''' oscope. oscope. FiliFili@@''' ) } ], ] , ) lowerCAmelCase__ : str = text_generator('''This is a test''' , do_sample=__UpperCAmelCase , num_return_sequences=2 , return_tensors=__UpperCAmelCase ) self.assertEqual( __UpperCAmelCase , [ {'''generated_token_ids''': ANY(__UpperCAmelCase )}, {'''generated_token_ids''': ANY(__UpperCAmelCase )}, ] , ) lowerCAmelCase__ : List[Any] = text_generator.model.config.eos_token_id lowerCAmelCase__ : List[Any] = '''<pad>''' lowerCAmelCase__ : List[Any] = text_generator( ['''This is a test''', '''This is a second test'''] , do_sample=__UpperCAmelCase , num_return_sequences=2 , batch_size=2 , return_tensors=__UpperCAmelCase , ) self.assertEqual( __UpperCAmelCase , [ [ {'''generated_token_ids''': ANY(__UpperCAmelCase )}, {'''generated_token_ids''': ANY(__UpperCAmelCase )}, ], [ {'''generated_token_ids''': ANY(__UpperCAmelCase )}, {'''generated_token_ids''': ANY(__UpperCAmelCase )}, ], ] , ) @require_tf def __magic_name__( self ): lowerCAmelCase__ : int = pipeline(task='''text-generation''' , model='''sshleifer/tiny-ctrl''' , framework='''tf''' ) # Using `do_sample=False` to force deterministic output lowerCAmelCase__ : List[Any] = text_generator('''This is a test''' , do_sample=__UpperCAmelCase ) self.assertEqual( __UpperCAmelCase , [ { '''generated_text''': ( '''This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵''' ''' please,''' ) } ] , ) lowerCAmelCase__ : List[str] = text_generator(['''This is a test''', '''This is a second test'''] , do_sample=__UpperCAmelCase ) self.assertEqual( __UpperCAmelCase , [ [ { '''generated_text''': ( '''This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵''' ''' please,''' ) } ], [ { '''generated_text''': ( '''This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes''' ''' Cannes 閲閲Cannes Cannes Cannes 攵 please,''' ) } ], ] , ) def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): lowerCAmelCase__ : Dict = TextGenerationPipeline(model=__UpperCAmelCase , tokenizer=__UpperCAmelCase ) return text_generator, ["This is a test", "Another test"] def __magic_name__( self ): lowerCAmelCase__ : Any = '''Hello I believe in''' lowerCAmelCase__ : List[Any] = pipeline('''text-generation''' , model='''hf-internal-testing/tiny-random-gpt2''' ) lowerCAmelCase__ : Optional[int] = text_generator(__UpperCAmelCase ) self.assertEqual( __UpperCAmelCase , [{'''generated_text''': '''Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe'''}] , ) lowerCAmelCase__ : List[str] = text_generator(__UpperCAmelCase , stop_sequence=''' fe''' ) self.assertEqual(__UpperCAmelCase , [{'''generated_text''': '''Hello I believe in fe'''}] ) def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase ): lowerCAmelCase__ : str = text_generator.model lowerCAmelCase__ : Optional[int] = text_generator.tokenizer lowerCAmelCase__ : Tuple = text_generator('''This is a test''' ) self.assertEqual(__UpperCAmelCase , [{'''generated_text''': ANY(__UpperCAmelCase )}] ) self.assertTrue(outputs[0]['''generated_text'''].startswith('''This is a test''' ) ) lowerCAmelCase__ : Optional[int] = text_generator('''This is a test''' , return_full_text=__UpperCAmelCase ) self.assertEqual(__UpperCAmelCase , [{'''generated_text''': ANY(__UpperCAmelCase )}] ) self.assertNotIn('''This is a test''' , outputs[0]['''generated_text'''] ) lowerCAmelCase__ : Dict = pipeline(task='''text-generation''' , model=__UpperCAmelCase , tokenizer=__UpperCAmelCase , return_full_text=__UpperCAmelCase ) lowerCAmelCase__ : Dict = text_generator('''This is a test''' ) self.assertEqual(__UpperCAmelCase , [{'''generated_text''': ANY(__UpperCAmelCase )}] ) self.assertNotIn('''This is a test''' , outputs[0]['''generated_text'''] ) lowerCAmelCase__ : List[str] = text_generator('''This is a test''' , return_full_text=__UpperCAmelCase ) self.assertEqual(__UpperCAmelCase , [{'''generated_text''': ANY(__UpperCAmelCase )}] ) self.assertTrue(outputs[0]['''generated_text'''].startswith('''This is a test''' ) ) lowerCAmelCase__ : Optional[int] = text_generator(['''This is great !''', '''Something else'''] , num_return_sequences=2 , do_sample=__UpperCAmelCase ) self.assertEqual( __UpperCAmelCase , [ [{'''generated_text''': ANY(__UpperCAmelCase )}, {'''generated_text''': ANY(__UpperCAmelCase )}], [{'''generated_text''': ANY(__UpperCAmelCase )}, {'''generated_text''': ANY(__UpperCAmelCase )}], ] , ) if text_generator.tokenizer.pad_token is not None: lowerCAmelCase__ : List[str] = text_generator( ['''This is great !''', '''Something else'''] , num_return_sequences=2 , batch_size=2 , do_sample=__UpperCAmelCase ) self.assertEqual( __UpperCAmelCase , [ [{'''generated_text''': ANY(__UpperCAmelCase )}, {'''generated_text''': ANY(__UpperCAmelCase )}], [{'''generated_text''': ANY(__UpperCAmelCase )}, {'''generated_text''': ANY(__UpperCAmelCase )}], ] , ) with self.assertRaises(__UpperCAmelCase ): lowerCAmelCase__ : Any = text_generator('''test''' , return_full_text=__UpperCAmelCase , return_text=__UpperCAmelCase ) with self.assertRaises(__UpperCAmelCase ): lowerCAmelCase__ : Optional[int] = text_generator('''test''' , return_full_text=__UpperCAmelCase , return_tensors=__UpperCAmelCase ) with self.assertRaises(__UpperCAmelCase ): lowerCAmelCase__ : str = text_generator('''test''' , return_text=__UpperCAmelCase , return_tensors=__UpperCAmelCase ) # Empty prompt is slighly special # it requires BOS token to exist. # Special case for Pegasus which will always append EOS so will # work even without BOS. if ( text_generator.tokenizer.bos_token_id is not None or "Pegasus" in tokenizer.__class__.__name__ or "Git" in model.__class__.__name__ ): lowerCAmelCase__ : str = text_generator('''''' ) self.assertEqual(__UpperCAmelCase , [{'''generated_text''': ANY(__UpperCAmelCase )}] ) else: with self.assertRaises((ValueError, AssertionError) ): lowerCAmelCase__ : List[str] = text_generator('''''' ) if text_generator.framework == "tf": # TF generation does not support max_new_tokens, and it's impossible # to control long generation with only max_length without # fancy calculation, dismissing tests for now. return # We don't care about infinite range models. # They already work. # Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly. lowerCAmelCase__ : Optional[Any] = ['''RwkvForCausalLM''', '''XGLMForCausalLM''', '''GPTNeoXForCausalLM'''] if ( tokenizer.model_max_length < 1_0000 and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS ): # Handling of large generations with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ): text_generator('''This is a test''' * 500 , max_new_tokens=20 ) lowerCAmelCase__ : Optional[Any] = text_generator('''This is a test''' * 500 , handle_long_generation='''hole''' , max_new_tokens=20 ) # Hole strategy cannot work with self.assertRaises(__UpperCAmelCase ): text_generator( '''This is a test''' * 500 , handle_long_generation='''hole''' , max_new_tokens=tokenizer.model_max_length + 10 , ) @require_torch @require_accelerate @require_torch_gpu def __magic_name__( self ): import torch # Classic `model_kwargs` lowerCAmelCase__ : List[str] = pipeline( model='''hf-internal-testing/tiny-random-bloom''' , model_kwargs={'''device_map''': '''auto''', '''torch_dtype''': torch.bfloataa} , ) self.assertEqual(pipe.model.device , torch.device(0 ) ) self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa ) lowerCAmelCase__ : Any = pipe('''This is a test''' ) self.assertEqual( __UpperCAmelCase , [ { '''generated_text''': ( '''This is a test test test test test test test test test test test test test test test test''' ''' test''' ) } ] , ) # Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.) lowerCAmelCase__ : Dict = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' , torch_dtype=torch.bfloataa ) self.assertEqual(pipe.model.device , torch.device(0 ) ) self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa ) lowerCAmelCase__ : Union[str, Any] = pipe('''This is a test''' ) self.assertEqual( __UpperCAmelCase , [ { '''generated_text''': ( '''This is a test test test test test test test test test test test test test test test test''' ''' test''' ) } ] , ) # torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602 lowerCAmelCase__ : str = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' ) self.assertEqual(pipe.model.device , torch.device(0 ) ) self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa ) lowerCAmelCase__ : Any = pipe('''This is a test''' ) self.assertEqual( __UpperCAmelCase , [ { '''generated_text''': ( '''This is a test test test test test test test test test test test test test test test test''' ''' test''' ) } ] , ) @require_torch @require_torch_gpu def __magic_name__( self ): import torch lowerCAmelCase__ : List[str] = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device=0 , torch_dtype=torch.floataa ) pipe('''This is a test''' ) @require_torch @require_accelerate @require_torch_gpu def __magic_name__( self ): import torch lowerCAmelCase__ : Any = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' , torch_dtype=torch.floataa ) pipe('''This is a test''' , do_sample=__UpperCAmelCase , top_p=0.5 ) def __magic_name__( self ): lowerCAmelCase__ : int = '''Hello world''' lowerCAmelCase__ : Union[str, Any] = pipeline('''text-generation''' , model='''hf-internal-testing/tiny-random-gpt2''' ) if text_generator.model.framework == "tf": lowerCAmelCase__ : List[Any] = logging.get_logger('''transformers.generation.tf_utils''' ) else: lowerCAmelCase__ : Dict = logging.get_logger('''transformers.generation.utils''' ) lowerCAmelCase__ : Optional[Any] = '''Both `max_new_tokens`''' # The beggining of the message to be checked in this test # Both are set by the user -> log warning with CaptureLogger(__UpperCAmelCase ) as cl: lowerCAmelCase__ : List[str] = text_generator(__UpperCAmelCase , max_length=10 , max_new_tokens=1 ) self.assertIn(__UpperCAmelCase , cl.out ) # The user only sets one -> no warning with CaptureLogger(__UpperCAmelCase ) as cl: lowerCAmelCase__ : Any = text_generator(__UpperCAmelCase , max_new_tokens=1 ) self.assertNotIn(__UpperCAmelCase , cl.out ) with CaptureLogger(__UpperCAmelCase ) as cl: lowerCAmelCase__ : Union[str, Any] = text_generator(__UpperCAmelCase , max_length=10 ) self.assertNotIn(__UpperCAmelCase , cl.out )
678
1
from ..utils import DummyObject, requires_backends class _lowerCAmelCase ( metaclass=_lowercase ): A__ = ['torch', 'torchsde'] def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ): requires_backends(self , ['''torch''', '''torchsde'''] ) @classmethod def __magic_name__( cls , *__UpperCAmelCase , **__UpperCAmelCase ): requires_backends(cls , ['''torch''', '''torchsde'''] ) @classmethod def __magic_name__( cls , *__UpperCAmelCase , **__UpperCAmelCase ): requires_backends(cls , ['''torch''', '''torchsde'''] )
678
def __lowerCAmelCase ( UpperCamelCase ) -> str: return "".join([hex(UpperCamelCase )[2:].zfill(2 ).upper() for byte in list(UpperCamelCase )] ) def __lowerCAmelCase ( UpperCamelCase ) -> bytes: # Check data validity, following RFC3548 # https://www.ietf.org/rfc/rfc3548.txt if (len(UpperCamelCase ) % 2) != 0: raise ValueError( '''Base16 encoded data is invalid: Data does not have an even number of hex digits.''' ) # Check the character set - the standard base16 alphabet # is uppercase according to RFC3548 section 6 if not set(UpperCamelCase ) <= set('''0123456789ABCDEF''' ): raise ValueError( '''Base16 encoded data is invalid: Data is not uppercase hex or it contains invalid characters.''' ) # For every two hexadecimal digits (= a byte), turn it into an integer. # Then, string the result together into bytes, and return it. return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(UpperCamelCase ) , 2 ) ) if __name__ == "__main__": import doctest doctest.testmod()
678
1
import multiprocessing import time from arguments import PretokenizationArguments from datasets import load_dataset from transformers import AutoTokenizer, HfArgumentParser def __lowerCAmelCase ( UpperCamelCase ) -> List[str]: lowerCAmelCase__ : Tuple = {} lowerCAmelCase__ : Optional[int] = tokenizer(example['''content'''] , truncation=UpperCamelCase )['''input_ids'''] lowerCAmelCase__ : Union[str, Any] = len(example['''content'''] ) / len(output['''input_ids'''] ) return output lowerCAmelCase_ = HfArgumentParser(PretokenizationArguments) lowerCAmelCase_ = parser.parse_args() if args.num_workers is None: lowerCAmelCase_ = multiprocessing.cpu_count() lowerCAmelCase_ = AutoTokenizer.from_pretrained(args.tokenizer_dir) lowerCAmelCase_ = time.time() lowerCAmelCase_ = load_dataset(args.dataset_name, split="""train""") print(F"""Dataset loaded in {time.time()-t_start:.2f}s""") lowerCAmelCase_ = time.time() lowerCAmelCase_ = ds.map( tokenize, num_proc=args.num_workers, remove_columns=[ """repo_name""", """path""", """copies""", """size""", """content""", """license""", """hash""", """line_mean""", """line_max""", """alpha_frac""", """autogenerated""", ], ) print(F"""Dataset tokenized in {time.time()-t_start:.2f}s""") lowerCAmelCase_ = time.time() ds.push_to_hub(args.tokenized_data_repo) print(F"""Data pushed to the hub in {time.time()-t_start:.2f}s""")
678
import torch from diffusers import DPMSolverSDEScheduler from diffusers.utils import torch_device from diffusers.utils.testing_utils import require_torchsde from .test_schedulers import SchedulerCommonTest @require_torchsde class _lowerCAmelCase ( _lowercase ): A__ = (DPMSolverSDEScheduler,) A__ = 10 def __magic_name__( self , **__UpperCAmelCase ): lowerCAmelCase__ : Dict = { '''num_train_timesteps''': 1100, '''beta_start''': 0.0001, '''beta_end''': 0.02, '''beta_schedule''': '''linear''', '''noise_sampler_seed''': 0, } config.update(**__UpperCAmelCase ) return config def __magic_name__( self ): for timesteps in [10, 50, 100, 1000]: self.check_over_configs(num_train_timesteps=__UpperCAmelCase ) def __magic_name__( self ): for beta_start, beta_end in zip([0.00001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ): self.check_over_configs(beta_start=__UpperCAmelCase , beta_end=__UpperCAmelCase ) def __magic_name__( self ): for schedule in ["linear", "scaled_linear"]: self.check_over_configs(beta_schedule=__UpperCAmelCase ) def __magic_name__( self ): for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=__UpperCAmelCase ) def __magic_name__( self ): lowerCAmelCase__ : List[Any] = self.scheduler_classes[0] lowerCAmelCase__ : str = self.get_scheduler_config() lowerCAmelCase__ : Optional[Any] = scheduler_class(**__UpperCAmelCase ) scheduler.set_timesteps(self.num_inference_steps ) lowerCAmelCase__ : Union[str, Any] = self.dummy_model() lowerCAmelCase__ : Any = self.dummy_sample_deter * scheduler.init_noise_sigma lowerCAmelCase__ : Optional[Any] = sample.to(__UpperCAmelCase ) for i, t in enumerate(scheduler.timesteps ): lowerCAmelCase__ : Optional[Any] = scheduler.scale_model_input(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : int = model(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : List[str] = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : List[str] = output.prev_sample lowerCAmelCase__ : Union[str, Any] = torch.sum(torch.abs(__UpperCAmelCase ) ) lowerCAmelCase__ : Optional[Any] = torch.mean(torch.abs(__UpperCAmelCase ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 167.47821044921875 ) < 1e-2 assert abs(result_mean.item() - 0.2178705964565277 ) < 1e-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 171.59352111816406 ) < 1e-2 assert abs(result_mean.item() - 0.22342906892299652 ) < 1e-3 else: assert abs(result_sum.item() - 162.52383422851562 ) < 1e-2 assert abs(result_mean.item() - 0.211619570851326 ) < 1e-3 def __magic_name__( self ): lowerCAmelCase__ : Dict = self.scheduler_classes[0] lowerCAmelCase__ : Tuple = self.get_scheduler_config(prediction_type='''v_prediction''' ) lowerCAmelCase__ : Tuple = scheduler_class(**__UpperCAmelCase ) scheduler.set_timesteps(self.num_inference_steps ) lowerCAmelCase__ : Optional[Any] = self.dummy_model() lowerCAmelCase__ : Union[str, Any] = self.dummy_sample_deter * scheduler.init_noise_sigma lowerCAmelCase__ : Tuple = sample.to(__UpperCAmelCase ) for i, t in enumerate(scheduler.timesteps ): lowerCAmelCase__ : List[str] = scheduler.scale_model_input(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : List[Any] = model(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : int = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : int = output.prev_sample lowerCAmelCase__ : Any = torch.sum(torch.abs(__UpperCAmelCase ) ) lowerCAmelCase__ : Optional[Any] = torch.mean(torch.abs(__UpperCAmelCase ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 124.77149200439453 ) < 1e-2 assert abs(result_mean.item() - 0.16226289014816284 ) < 1e-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 128.1663360595703 ) < 1e-2 assert abs(result_mean.item() - 0.16688326001167297 ) < 1e-3 else: assert abs(result_sum.item() - 119.8487548828125 ) < 1e-2 assert abs(result_mean.item() - 0.1560530662536621 ) < 1e-3 def __magic_name__( self ): lowerCAmelCase__ : Optional[int] = self.scheduler_classes[0] lowerCAmelCase__ : Tuple = self.get_scheduler_config() lowerCAmelCase__ : Tuple = scheduler_class(**__UpperCAmelCase ) scheduler.set_timesteps(self.num_inference_steps , device=__UpperCAmelCase ) lowerCAmelCase__ : Dict = self.dummy_model() lowerCAmelCase__ : Union[str, Any] = self.dummy_sample_deter.to(__UpperCAmelCase ) * scheduler.init_noise_sigma for t in scheduler.timesteps: lowerCAmelCase__ : List[str] = scheduler.scale_model_input(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : Optional[int] = model(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : Tuple = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : Optional[int] = output.prev_sample lowerCAmelCase__ : Dict = torch.sum(torch.abs(__UpperCAmelCase ) ) lowerCAmelCase__ : Dict = torch.mean(torch.abs(__UpperCAmelCase ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 167.46957397460938 ) < 1e-2 assert abs(result_mean.item() - 0.21805934607982635 ) < 1e-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 171.59353637695312 ) < 1e-2 assert abs(result_mean.item() - 0.22342908382415771 ) < 1e-3 else: assert abs(result_sum.item() - 162.52383422851562 ) < 1e-2 assert abs(result_mean.item() - 0.211619570851326 ) < 1e-3 def __magic_name__( self ): lowerCAmelCase__ : Optional[Any] = self.scheduler_classes[0] lowerCAmelCase__ : Dict = self.get_scheduler_config() lowerCAmelCase__ : Optional[int] = scheduler_class(**__UpperCAmelCase , use_karras_sigmas=__UpperCAmelCase ) scheduler.set_timesteps(self.num_inference_steps , device=__UpperCAmelCase ) lowerCAmelCase__ : List[Any] = self.dummy_model() lowerCAmelCase__ : int = self.dummy_sample_deter.to(__UpperCAmelCase ) * scheduler.init_noise_sigma lowerCAmelCase__ : Union[str, Any] = sample.to(__UpperCAmelCase ) for t in scheduler.timesteps: lowerCAmelCase__ : Optional[Any] = scheduler.scale_model_input(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : Union[str, Any] = model(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : Tuple = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : Dict = output.prev_sample lowerCAmelCase__ : int = torch.sum(torch.abs(__UpperCAmelCase ) ) lowerCAmelCase__ : Optional[Any] = torch.mean(torch.abs(__UpperCAmelCase ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 176.66974135742188 ) < 1e-2 assert abs(result_mean.item() - 0.23003872730981811 ) < 1e-2 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 177.63653564453125 ) < 1e-2 assert abs(result_mean.item() - 0.23003872730981811 ) < 1e-2 else: assert abs(result_sum.item() - 170.3135223388672 ) < 1e-2 assert abs(result_mean.item() - 0.23003872730981811 ) < 1e-2
678
1
import warnings from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast from ...onnx.utils import compute_effective_axis_dimension from ...utils import TensorType, is_torch_available, logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = { """facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/config.json""", # See all BART models at https://huggingface.co/models?filter=bart } class _lowerCAmelCase ( _lowercase ): A__ = 'bart' A__ = ['past_key_values'] A__ = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'} def __init__( self , __UpperCAmelCase=5_0265 , __UpperCAmelCase=1024 , __UpperCAmelCase=12 , __UpperCAmelCase=4096 , __UpperCAmelCase=16 , __UpperCAmelCase=12 , __UpperCAmelCase=4096 , __UpperCAmelCase=16 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase="gelu" , __UpperCAmelCase=1024 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.02 , __UpperCAmelCase=0.0 , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=3 , __UpperCAmelCase=1 , __UpperCAmelCase=0 , __UpperCAmelCase=2 , __UpperCAmelCase=True , __UpperCAmelCase=2 , __UpperCAmelCase=2 , **__UpperCAmelCase , ): lowerCAmelCase__ : Union[str, Any] = vocab_size lowerCAmelCase__ : Optional[Any] = max_position_embeddings lowerCAmelCase__ : int = d_model lowerCAmelCase__ : str = encoder_ffn_dim lowerCAmelCase__ : Any = encoder_layers lowerCAmelCase__ : Dict = encoder_attention_heads lowerCAmelCase__ : Union[str, Any] = decoder_ffn_dim lowerCAmelCase__ : Union[str, Any] = decoder_layers lowerCAmelCase__ : Any = decoder_attention_heads lowerCAmelCase__ : Tuple = dropout lowerCAmelCase__ : Any = attention_dropout lowerCAmelCase__ : Any = activation_dropout lowerCAmelCase__ : Optional[Any] = activation_function lowerCAmelCase__ : Union[str, Any] = init_std lowerCAmelCase__ : Union[str, Any] = encoder_layerdrop lowerCAmelCase__ : int = decoder_layerdrop lowerCAmelCase__ : Optional[int] = classifier_dropout lowerCAmelCase__ : str = use_cache lowerCAmelCase__ : int = encoder_layers lowerCAmelCase__ : List[str] = scale_embedding # scale factor will be sqrt(d_model) if True super().__init__( num_labels=__UpperCAmelCase , pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , is_encoder_decoder=__UpperCAmelCase , decoder_start_token_id=__UpperCAmelCase , forced_eos_token_id=__UpperCAmelCase , **__UpperCAmelCase , ) # ensure backward compatibility for BART CNN models if self.forced_bos_token_id is None and kwargs.get('''force_bos_token_to_be_generated''' , __UpperCAmelCase ): lowerCAmelCase__ : str = self.bos_token_id warnings.warn( f"""Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. """ '''The config can simply be saved and uploaded again to be fixed.''' ) class _lowerCAmelCase ( _lowercase ): @property def __magic_name__( self ): if self.task in ["default", "seq2seq-lm"]: lowerCAmelCase__ : Tuple = OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}), ] ) if self.use_past: lowerCAmelCase__ : Any = {0: '''batch'''} lowerCAmelCase__ : Tuple = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''} else: lowerCAmelCase__ : Optional[int] = {0: '''batch''', 1: '''decoder_sequence'''} lowerCAmelCase__ : Optional[Any] = {0: '''batch''', 1: '''decoder_sequence'''} if self.use_past: self.fill_with_past_key_values_(__UpperCAmelCase , direction='''inputs''' ) elif self.task == "causal-lm": # TODO: figure this case out. lowerCAmelCase__ : List[str] = OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}), ] ) if self.use_past: lowerCAmelCase__ , lowerCAmelCase__ : Tuple = self.num_layers for i in range(__UpperCAmelCase ): lowerCAmelCase__ : int = {0: '''batch''', 2: '''past_sequence + sequence'''} lowerCAmelCase__ : Union[str, Any] = {0: '''batch''', 2: '''past_sequence + sequence'''} else: lowerCAmelCase__ : List[str] = OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}), ('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}), ] ) return common_inputs @property def __magic_name__( self ): if self.task in ["default", "seq2seq-lm"]: lowerCAmelCase__ : Dict = super().outputs else: lowerCAmelCase__ : Any = super(__UpperCAmelCase , self ).outputs if self.use_past: lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = self.num_layers for i in range(__UpperCAmelCase ): lowerCAmelCase__ : List[Any] = {0: '''batch''', 2: '''past_sequence + sequence'''} lowerCAmelCase__ : Optional[Any] = {0: '''batch''', 2: '''past_sequence + sequence'''} return common_outputs def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase = -1 , __UpperCAmelCase = -1 , __UpperCAmelCase = False , __UpperCAmelCase = None , ): lowerCAmelCase__ : int = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) # Generate decoder inputs lowerCAmelCase__ : int = seq_length if not self.use_past else 1 lowerCAmelCase__ : Dict = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : Optional[int] = {f"""decoder_{name}""": tensor for name, tensor in decoder_inputs.items()} lowerCAmelCase__ : str = dict(**__UpperCAmelCase , **__UpperCAmelCase ) if self.use_past: if not is_torch_available(): raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' ) else: import torch lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = common_inputs['''input_ids'''].shape lowerCAmelCase__ : List[str] = common_inputs['''decoder_input_ids'''].shape[1] lowerCAmelCase__ , lowerCAmelCase__ : Dict = self.num_attention_heads lowerCAmelCase__ : Any = ( batch, num_encoder_attention_heads, encoder_seq_length, self._config.hidden_size // num_encoder_attention_heads, ) lowerCAmelCase__ : List[Any] = decoder_seq_length + 3 lowerCAmelCase__ : Any = ( batch, num_decoder_attention_heads, decoder_past_length, self._config.hidden_size // num_decoder_attention_heads, ) lowerCAmelCase__ : Any = torch.cat( [common_inputs['''decoder_attention_mask'''], torch.ones(__UpperCAmelCase , __UpperCAmelCase )] , dim=1 ) lowerCAmelCase__ : List[str] = [] # If the number of encoder and decoder layers are present in the model configuration, both are considered lowerCAmelCase__ , lowerCAmelCase__ : Any = self.num_layers lowerCAmelCase__ : Union[str, Any] = min(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : Any = max(__UpperCAmelCase , __UpperCAmelCase ) - min_num_layers lowerCAmelCase__ : Dict = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder''' for _ in range(__UpperCAmelCase ): common_inputs["past_key_values"].append( ( torch.zeros(__UpperCAmelCase ), torch.zeros(__UpperCAmelCase ), torch.zeros(__UpperCAmelCase ), torch.zeros(__UpperCAmelCase ), ) ) # TODO: test this. lowerCAmelCase__ : Any = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape for _ in range(__UpperCAmelCase , __UpperCAmelCase ): common_inputs["past_key_values"].append((torch.zeros(__UpperCAmelCase ), torch.zeros(__UpperCAmelCase )) ) return common_inputs def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase = -1 , __UpperCAmelCase = -1 , __UpperCAmelCase = False , __UpperCAmelCase = None , ): lowerCAmelCase__ : Optional[int] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) if self.use_past: if not is_torch_available(): raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' ) else: import torch lowerCAmelCase__ , lowerCAmelCase__ : Any = common_inputs['''input_ids'''].shape # Not using the same length for past_key_values lowerCAmelCase__ : List[str] = seqlen + 2 lowerCAmelCase__ , lowerCAmelCase__ : int = self.num_layers lowerCAmelCase__ , lowerCAmelCase__ : Tuple = self.num_attention_heads lowerCAmelCase__ : Optional[Any] = ( batch, num_encoder_attention_heads, past_key_values_length, self._config.hidden_size // num_encoder_attention_heads, ) lowerCAmelCase__ : Optional[Any] = common_inputs['''attention_mask'''].dtype lowerCAmelCase__ : List[Any] = torch.cat( [common_inputs['''attention_mask'''], torch.ones(__UpperCAmelCase , __UpperCAmelCase , dtype=__UpperCAmelCase )] , dim=1 ) lowerCAmelCase__ : List[Any] = [ (torch.zeros(__UpperCAmelCase ), torch.zeros(__UpperCAmelCase )) for _ in range(__UpperCAmelCase ) ] return common_inputs def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase = -1 , __UpperCAmelCase = -1 , __UpperCAmelCase = False , __UpperCAmelCase = None , ): # Copied from OnnxConfig.generate_dummy_inputs # Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity. # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX lowerCAmelCase__ : Tuple = compute_effective_axis_dimension( __UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX lowerCAmelCase__ : Optional[Any] = tokenizer.num_special_tokens_to_add(__UpperCAmelCase ) lowerCAmelCase__ : int = compute_effective_axis_dimension( __UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__UpperCAmelCase ) # Generate dummy inputs according to compute batch and sequence lowerCAmelCase__ : Optional[int] = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size lowerCAmelCase__ : str = dict(tokenizer(__UpperCAmelCase , return_tensors=__UpperCAmelCase ) ) return common_inputs def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase = -1 , __UpperCAmelCase = -1 , __UpperCAmelCase = False , __UpperCAmelCase = None , ): if self.task in ["default", "seq2seq-lm"]: lowerCAmelCase__ : Union[str, Any] = self._generate_dummy_inputs_for_default_and_seqaseq_lm( __UpperCAmelCase , batch_size=__UpperCAmelCase , seq_length=__UpperCAmelCase , is_pair=__UpperCAmelCase , framework=__UpperCAmelCase ) elif self.task == "causal-lm": lowerCAmelCase__ : List[str] = self._generate_dummy_inputs_for_causal_lm( __UpperCAmelCase , batch_size=__UpperCAmelCase , seq_length=__UpperCAmelCase , is_pair=__UpperCAmelCase , framework=__UpperCAmelCase ) else: lowerCAmelCase__ : Optional[Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( __UpperCAmelCase , batch_size=__UpperCAmelCase , seq_length=__UpperCAmelCase , is_pair=__UpperCAmelCase , framework=__UpperCAmelCase ) return common_inputs def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): if self.task in ["default", "seq2seq-lm"]: lowerCAmelCase__ : Any = super()._flatten_past_key_values_(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) else: lowerCAmelCase__ : Dict = super(__UpperCAmelCase , self )._flatten_past_key_values_( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
678
import time import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers.generation import ( MaxLengthCriteria, MaxNewTokensCriteria, MaxTimeCriteria, StoppingCriteriaList, validate_stopping_criteria, ) @require_torch class _lowerCAmelCase ( unittest.TestCase ): def __magic_name__( self , __UpperCAmelCase ): lowerCAmelCase__ : Union[str, Any] = 3 lowerCAmelCase__ : Tuple = 250 lowerCAmelCase__ : List[Any] = ids_tensor((batch_size, length) , __UpperCAmelCase ) lowerCAmelCase__ : Optional[Any] = torch.ones((batch_size, length) , device=__UpperCAmelCase , dtype=torch.float ) / length return input_ids, scores def __magic_name__( self ): lowerCAmelCase__ , lowerCAmelCase__ : List[str] = self._get_tensors(5 ) lowerCAmelCase__ : List[str] = StoppingCriteriaList( [ MaxLengthCriteria(max_length=10 ), MaxTimeCriteria(max_time=0.1 ), ] ) self.assertFalse(criteria(__UpperCAmelCase , __UpperCAmelCase ) ) lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = self._get_tensors(9 ) self.assertFalse(criteria(__UpperCAmelCase , __UpperCAmelCase ) ) lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = self._get_tensors(10 ) self.assertTrue(criteria(__UpperCAmelCase , __UpperCAmelCase ) ) def __magic_name__( self ): lowerCAmelCase__ : Tuple = MaxLengthCriteria(max_length=10 ) lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = self._get_tensors(5 ) self.assertFalse(criteria(__UpperCAmelCase , __UpperCAmelCase ) ) lowerCAmelCase__ , lowerCAmelCase__ : Tuple = self._get_tensors(9 ) self.assertFalse(criteria(__UpperCAmelCase , __UpperCAmelCase ) ) lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = self._get_tensors(10 ) self.assertTrue(criteria(__UpperCAmelCase , __UpperCAmelCase ) ) def __magic_name__( self ): lowerCAmelCase__ : Optional[int] = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 ) lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = self._get_tensors(5 ) self.assertFalse(criteria(__UpperCAmelCase , __UpperCAmelCase ) ) lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = self._get_tensors(9 ) self.assertFalse(criteria(__UpperCAmelCase , __UpperCAmelCase ) ) lowerCAmelCase__ , lowerCAmelCase__ : Tuple = self._get_tensors(10 ) self.assertTrue(criteria(__UpperCAmelCase , __UpperCAmelCase ) ) lowerCAmelCase__ : int = StoppingCriteriaList([criteria] ) self.assertEqual(criteria_list.max_length , 10 ) def __magic_name__( self ): lowerCAmelCase__ , lowerCAmelCase__ : List[str] = self._get_tensors(5 ) lowerCAmelCase__ : Any = MaxTimeCriteria(max_time=0.1 ) self.assertFalse(criteria(__UpperCAmelCase , __UpperCAmelCase ) ) lowerCAmelCase__ : int = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 ) self.assertTrue(criteria(__UpperCAmelCase , __UpperCAmelCase ) ) def __magic_name__( self ): validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 10 ) with self.assertWarns(__UpperCAmelCase ): validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 11 ) lowerCAmelCase__ : List[str] = validate_stopping_criteria(StoppingCriteriaList() , 11 ) self.assertEqual(len(__UpperCAmelCase ) , 1 )
678
1
import dataclasses import re import string from typing import Any, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple import numpy as np from . import residue_constants lowerCAmelCase_ = Mapping[str, np.ndarray] lowerCAmelCase_ = Mapping[str, Any] # Is a nested dict. lowerCAmelCase_ = 0.01 @dataclasses.dataclass(frozen=_lowercase ) class _lowerCAmelCase : A__ = 42 # [num_res, num_atom_type, 3] # Amino-acid type for each residue represented as an integer between 0 and # 20, where 20 is 'X'. A__ = 42 # [num_res] # Binary float mask to indicate presence of a particular atom. 1.0 if an atom # is present and 0.0 if not. This should be used for loss masking. A__ = 42 # [num_res, num_atom_type] # Residue index as used in PDB. It is not necessarily continuous or 0-indexed. A__ = 42 # [num_res] # B-factors, or temperature factors, of each residue (in sq. angstroms units), # representing the displacement of the residue from its ground truth mean # value. A__ = 42 # [num_res, num_atom_type] # Chain indices for multi-chain predictions A__ = None # Optional remark about the protein. Included as a comment in output PDB # files A__ = None # Templates used to generate this protein (prediction-only) A__ = None # Chain corresponding to each parent A__ = None def __lowerCAmelCase ( UpperCamelCase ) -> Protein: lowerCAmelCase__ : int = R'''(\[[A-Z]+\]\n)''' lowerCAmelCase__ : List[str] = [tag.strip() for tag in re.split(UpperCamelCase , UpperCamelCase ) if len(UpperCamelCase ) > 0] lowerCAmelCase__ : Iterator[Tuple[str, List[str]]] = zip(tags[0::2] , [l.split('''\n''' ) for l in tags[1::2]] ) lowerCAmelCase__ : List[str] = ["N", "CA", "C"] lowerCAmelCase__ : int = None lowerCAmelCase__ : Tuple = None lowerCAmelCase__ : Dict = None for g in groups: if "[PRIMARY]" == g[0]: lowerCAmelCase__ : Dict = g[1][0].strip() for i in range(len(UpperCamelCase ) ): if seq[i] not in residue_constants.restypes: lowerCAmelCase__ : List[str] = '''X''' # FIXME: strings are immutable lowerCAmelCase__ : Any = np.array( [residue_constants.restype_order.get(UpperCamelCase , residue_constants.restype_num ) for res_symbol in seq] ) elif "[TERTIARY]" == g[0]: lowerCAmelCase__ : List[List[float]] = [] for axis in range(3 ): tertiary.append(list(map(UpperCamelCase , g[1][axis].split() ) ) ) lowerCAmelCase__ : Dict = np.array(UpperCamelCase ) lowerCAmelCase__ : Tuple = np.zeros((len(tertiary[0] ) // 3, residue_constants.atom_type_num, 3) ).astype(np.floataa ) for i, atom in enumerate(UpperCamelCase ): lowerCAmelCase__ : Union[str, Any] = np.transpose(tertiary_np[:, i::3] ) atom_positions *= PICO_TO_ANGSTROM elif "[MASK]" == g[0]: lowerCAmelCase__ : Any = np.array(list(map({'''-''': 0, '''+''': 1}.get , g[1][0].strip() ) ) ) lowerCAmelCase__ : Union[str, Any] = np.zeros( ( len(UpperCamelCase ), residue_constants.atom_type_num, ) ).astype(np.floataa ) for i, atom in enumerate(UpperCamelCase ): lowerCAmelCase__ : Any = 1 atom_mask *= mask[..., None] assert aatype is not None return Protein( atom_positions=UpperCamelCase , atom_mask=UpperCamelCase , aatype=UpperCamelCase , residue_index=np.arange(len(UpperCamelCase ) ) , b_factors=UpperCamelCase , ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase = 0 ) -> List[str]: lowerCAmelCase__ : List[str] = [] lowerCAmelCase__ : Optional[Any] = prot.remark if remark is not None: pdb_headers.append(F"""REMARK {remark}""" ) lowerCAmelCase__ : int = prot.parents lowerCAmelCase__ : Any = prot.parents_chain_index if parents is not None and parents_chain_index is not None: lowerCAmelCase__ : int = [p for i, p in zip(UpperCamelCase , UpperCamelCase ) if i == chain_id] if parents is None or len(UpperCamelCase ) == 0: lowerCAmelCase__ : Optional[Any] = ['''N/A'''] pdb_headers.append(F"""PARENT {" ".join(UpperCamelCase )}""" ) return pdb_headers def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> str: lowerCAmelCase__ : List[str] = [] lowerCAmelCase__ : Optional[Any] = pdb_str.split('''\n''' ) lowerCAmelCase__ : Any = prot.remark if remark is not None: out_pdb_lines.append(F"""REMARK {remark}""" ) lowerCAmelCase__ : List[List[str]] if prot.parents is not None and len(prot.parents ) > 0: lowerCAmelCase__ : Optional[Any] = [] if prot.parents_chain_index is not None: lowerCAmelCase__ : Dict[str, List[str]] = {} for p, i in zip(prot.parents , prot.parents_chain_index ): parent_dict.setdefault(str(UpperCamelCase ) , [] ) parent_dict[str(UpperCamelCase )].append(UpperCamelCase ) lowerCAmelCase__ : List[str] = max([int(UpperCamelCase ) for chain_idx in parent_dict] ) for i in range(max_idx + 1 ): lowerCAmelCase__ : str = parent_dict.get(str(UpperCamelCase ) , ['''N/A'''] ) parents_per_chain.append(UpperCamelCase ) else: parents_per_chain.append(list(prot.parents ) ) else: lowerCAmelCase__ : Dict = [['''N/A''']] def make_parent_line(UpperCamelCase ) -> str: return F"""PARENT {" ".join(UpperCamelCase )}""" out_pdb_lines.append(make_parent_line(parents_per_chain[0] ) ) lowerCAmelCase__ : Tuple = 0 for i, l in enumerate(UpperCamelCase ): if "PARENT" not in l and "REMARK" not in l: out_pdb_lines.append(UpperCamelCase ) if "TER" in l and "END" not in lines[i + 1]: chain_counter += 1 if not chain_counter >= len(UpperCamelCase ): lowerCAmelCase__ : Union[str, Any] = parents_per_chain[chain_counter] else: lowerCAmelCase__ : str = ['''N/A'''] out_pdb_lines.append(make_parent_line(UpperCamelCase ) ) return "\n".join(UpperCamelCase ) def __lowerCAmelCase ( UpperCamelCase ) -> str: lowerCAmelCase__ : str = residue_constants.restypes + ['''X'''] def res_atoa(UpperCamelCase ) -> str: return residue_constants.restype_atoa.get(restypes[r] , '''UNK''' ) lowerCAmelCase__ : Any = residue_constants.atom_types lowerCAmelCase__ : List[str] = [] lowerCAmelCase__ : Any = prot.atom_mask lowerCAmelCase__ : str = prot.aatype lowerCAmelCase__ : Dict = prot.atom_positions lowerCAmelCase__ : str = prot.residue_index.astype(np.intaa ) lowerCAmelCase__ : Dict = prot.b_factors lowerCAmelCase__ : Optional[Any] = prot.chain_index if np.any(aatype > residue_constants.restype_num ): raise ValueError('''Invalid aatypes.''' ) lowerCAmelCase__ : Tuple = get_pdb_headers(UpperCamelCase ) if len(UpperCamelCase ) > 0: pdb_lines.extend(UpperCamelCase ) lowerCAmelCase__ : str = aatype.shape[0] lowerCAmelCase__ : Tuple = 1 lowerCAmelCase__ : int = 0 lowerCAmelCase__ : str = string.ascii_uppercase lowerCAmelCase__ : str = None # Add all atom sites. for i in range(UpperCamelCase ): lowerCAmelCase__ : List[Any] = res_atoa(aatype[i] ) for atom_name, pos, mask, b_factor in zip(UpperCamelCase , atom_positions[i] , atom_mask[i] , b_factors[i] ): if mask < 0.5: continue lowerCAmelCase__ : List[str] = '''ATOM''' lowerCAmelCase__ : Dict = atom_name if len(UpperCamelCase ) == 4 else F""" {atom_name}""" lowerCAmelCase__ : Union[str, Any] = '''''' lowerCAmelCase__ : List[Any] = '''''' lowerCAmelCase__ : Any = 1.00 lowerCAmelCase__ : str = atom_name[0] # Protein supports only C, N, O, S, this works. lowerCAmelCase__ : Dict = '''''' lowerCAmelCase__ : str = '''A''' if chain_index is not None: lowerCAmelCase__ : Tuple = chain_tags[chain_index[i]] # PDB is a columnar format, every space matters here! lowerCAmelCase__ : Optional[Any] = ( F"""{record_type:<6}{atom_index:>5} {name:<4}{alt_loc:>1}""" F"""{res_name_a:>3} {chain_tag:>1}""" F"""{residue_index[i]:>4}{insertion_code:>1} """ F"""{pos[0]:>8.3f}{pos[1]:>8.3f}{pos[2]:>8.3f}""" F"""{occupancy:>6.2f}{b_factor:>6.2f} """ F"""{element:>2}{charge:>2}""" ) pdb_lines.append(UpperCamelCase ) atom_index += 1 lowerCAmelCase__ : Optional[Any] = i == n - 1 if chain_index is not None: if i != n - 1 and chain_index[i + 1] != prev_chain_index: lowerCAmelCase__ : List[Any] = True lowerCAmelCase__ : Dict = chain_index[i + 1] if should_terminate: # Close the chain. lowerCAmelCase__ : Optional[int] = '''TER''' lowerCAmelCase__ : List[Any] = ( F"""{chain_end:<6}{atom_index:>5} {res_atoa(aatype[i] ):>3} {chain_tag:>1}{residue_index[i]:>4}""" ) pdb_lines.append(UpperCamelCase ) atom_index += 1 if i != n - 1: # "prev" is a misnomer here. This happens at the beginning of # each new chain. pdb_lines.extend(get_pdb_headers(UpperCamelCase , UpperCamelCase ) ) pdb_lines.append('''END''' ) pdb_lines.append('''''' ) return "\n".join(UpperCamelCase ) def __lowerCAmelCase ( UpperCamelCase ) -> np.ndarray: return residue_constants.STANDARD_ATOM_MASK[prot.aatype] def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = None , ) -> Protein: return Protein( aatype=features['''aatype'''] , atom_positions=result['''final_atom_positions'''] , atom_mask=result['''final_atom_mask'''] , residue_index=features['''residue_index'''] + 1 , b_factors=b_factors if b_factors is not None else np.zeros_like(result['''final_atom_mask'''] ) , chain_index=UpperCamelCase , remark=UpperCamelCase , parents=UpperCamelCase , parents_chain_index=UpperCamelCase , )
678
from functools import reduce lowerCAmelCase_ = ( """73167176531330624919225119674426574742355349194934""" """96983520312774506326239578318016984801869478851843""" """85861560789112949495459501737958331952853208805511""" """12540698747158523863050715693290963295227443043557""" """66896648950445244523161731856403098711121722383113""" """62229893423380308135336276614282806444486645238749""" """30358907296290491560440772390713810515859307960866""" """70172427121883998797908792274921901699720888093776""" """65727333001053367881220235421809751254540594752243""" """52584907711670556013604839586446706324415722155397""" """53697817977846174064955149290862569321978468622482""" """83972241375657056057490261407972968652414535100474""" """82166370484403199890008895243450658541227588666881""" """16427171479924442928230863465674813919123162824586""" """17866458359124566529476545682848912883142607690042""" """24219022671055626321111109370544217506941658960408""" """07198403850962455444362981230987879927244284909188""" """84580156166097919133875499200524063689912560717606""" """05886116467109405077541002256983155200055935729725""" """71636269561882670428252483600823257530420752963450""" ) def __lowerCAmelCase ( UpperCamelCase = N ) -> int: return max( # mypy cannot properly interpret reduce int(reduce(lambda UpperCamelCase , UpperCamelCase : str(int(UpperCamelCase ) * int(UpperCamelCase ) ) , n[i : i + 13] ) ) for i in range(len(UpperCamelCase ) - 12 ) ) if __name__ == "__main__": print(F"""{solution() = }""")
678
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available lowerCAmelCase_ = { """configuration_nezha""": ["""NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """NezhaConfig"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ """NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST""", """NezhaForNextSentencePrediction""", """NezhaForMaskedLM""", """NezhaForPreTraining""", """NezhaForMultipleChoice""", """NezhaForQuestionAnswering""", """NezhaForSequenceClassification""", """NezhaForTokenClassification""", """NezhaModel""", """NezhaPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_nezha import ( NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST, NezhaForMaskedLM, NezhaForMultipleChoice, NezhaForNextSentencePrediction, NezhaForPreTraining, NezhaForQuestionAnswering, NezhaForSequenceClassification, NezhaForTokenClassification, NezhaModel, NezhaPreTrainedModel, ) else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
678
import re import jax.numpy as jnp from flax.traverse_util import flatten_dict, unflatten_dict from jax.random import PRNGKey from ..utils import logging lowerCAmelCase_ = logging.get_logger(__name__) def __lowerCAmelCase ( UpperCamelCase ) -> List[str]: lowerCAmelCase__ : int = R'''\w+[.]\d+''' lowerCAmelCase__ : Tuple = re.findall(UpperCamelCase , UpperCamelCase ) for pat in pats: lowerCAmelCase__ : List[str] = key.replace(UpperCamelCase , '''_'''.join(pat.split('''.''' ) ) ) return key def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Dict: lowerCAmelCase__ : Union[str, Any] = pt_tuple_key[:-1] + ('''scale''',) if ( any('''norm''' in str_ for str_ in pt_tuple_key ) and (pt_tuple_key[-1] == "bias") and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict) and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict) ): lowerCAmelCase__ : List[str] = pt_tuple_key[:-1] + ('''scale''',) return renamed_pt_tuple_key, pt_tensor elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict: lowerCAmelCase__ : str = pt_tuple_key[:-1] + ('''scale''',) return renamed_pt_tuple_key, pt_tensor # embedding if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict: lowerCAmelCase__ : str = pt_tuple_key[:-1] + ('''embedding''',) return renamed_pt_tuple_key, pt_tensor # conv layer lowerCAmelCase__ : str = pt_tuple_key[:-1] + ('''kernel''',) if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4: lowerCAmelCase__ : Dict = pt_tensor.transpose(2 , 3 , 1 , 0 ) return renamed_pt_tuple_key, pt_tensor # linear layer lowerCAmelCase__ : List[Any] = pt_tuple_key[:-1] + ('''kernel''',) if pt_tuple_key[-1] == "weight": lowerCAmelCase__ : str = pt_tensor.T return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm weight lowerCAmelCase__ : Any = pt_tuple_key[:-1] + ('''weight''',) if pt_tuple_key[-1] == "gamma": return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm bias lowerCAmelCase__ : List[Any] = pt_tuple_key[:-1] + ('''bias''',) if pt_tuple_key[-1] == "beta": return renamed_pt_tuple_key, pt_tensor return pt_tuple_key, pt_tensor def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase=42 ) -> Any: # Step 1: Convert pytorch tensor to numpy lowerCAmelCase__ : Optional[Any] = {k: v.numpy() for k, v in pt_state_dict.items()} # Step 2: Since the model is stateless, get random Flax params lowerCAmelCase__ : Tuple = flax_model.init_weights(PRNGKey(UpperCamelCase ) ) lowerCAmelCase__ : Any = flatten_dict(UpperCamelCase ) lowerCAmelCase__ : List[Any] = {} # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): lowerCAmelCase__ : str = rename_key(UpperCamelCase ) lowerCAmelCase__ : Union[str, Any] = tuple(renamed_pt_key.split('''.''' ) ) # Correctly rename weight parameters lowerCAmelCase__ , lowerCAmelCase__ : List[str] = rename_key_and_reshape_tensor(UpperCamelCase , UpperCamelCase , UpperCamelCase ) if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( F"""PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape """ F"""{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.""" ) # also add unexpected weight so that warning is thrown lowerCAmelCase__ : List[str] = jnp.asarray(UpperCamelCase ) return unflatten_dict(UpperCamelCase )
678
1
from importlib import import_module from .logging import get_logger lowerCAmelCase_ = get_logger(__name__) class _lowerCAmelCase : def __init__( self , __UpperCAmelCase , __UpperCAmelCase=None ): lowerCAmelCase__ : Dict = attrs or [] if module is not None: for key in module.__dict__: if key in attrs or not key.startswith('''__''' ): setattr(self , __UpperCAmelCase , getattr(__UpperCAmelCase , __UpperCAmelCase ) ) lowerCAmelCase__ : Optional[int] = module._original_module if isinstance(__UpperCAmelCase , _PatchedModuleObj ) else module class _lowerCAmelCase : A__ = [] def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None ): lowerCAmelCase__ : Dict = obj lowerCAmelCase__ : Optional[Any] = target lowerCAmelCase__ : Tuple = new lowerCAmelCase__ : Tuple = target.split('''.''' )[0] lowerCAmelCase__ : Optional[int] = {} lowerCAmelCase__ : List[Any] = attrs or [] def __enter__( self ): *lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = self.target.split('''.''' ) # Patch modules: # it's used to patch attributes of submodules like "os.path.join"; # in this case we need to patch "os" and "os.path" for i in range(len(__UpperCAmelCase ) ): try: lowerCAmelCase__ : List[str] = import_module('''.'''.join(submodules[: i + 1] ) ) except ModuleNotFoundError: continue # We iterate over all the globals in self.obj in case we find "os" or "os.path" for attr in self.obj.__dir__(): lowerCAmelCase__ : Union[str, Any] = getattr(self.obj , __UpperCAmelCase ) # We don't check for the name of the global, but rather if its value *is* "os" or "os.path". # This allows to patch renamed modules like "from os import path as ospath". if obj_attr is submodule or ( (isinstance(__UpperCAmelCase , _PatchedModuleObj ) and obj_attr._original_module is submodule) ): lowerCAmelCase__ : Any = obj_attr # patch at top level setattr(self.obj , __UpperCAmelCase , _PatchedModuleObj(__UpperCAmelCase , attrs=self.attrs ) ) lowerCAmelCase__ : Optional[int] = getattr(self.obj , __UpperCAmelCase ) # construct lower levels patches for key in submodules[i + 1 :]: setattr(__UpperCAmelCase , __UpperCAmelCase , _PatchedModuleObj(getattr(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) , attrs=self.attrs ) ) lowerCAmelCase__ : Union[str, Any] = getattr(__UpperCAmelCase , __UpperCAmelCase ) # finally set the target attribute setattr(__UpperCAmelCase , __UpperCAmelCase , self.new ) # Patch attribute itself: # it's used for builtins like "open", # and also to patch "os.path.join" we may also need to patch "join" # itself if it was imported as "from os.path import join". if submodules: # if it's an attribute of a submodule like "os.path.join" try: lowerCAmelCase__ : Optional[Any] = getattr(import_module('''.'''.join(__UpperCAmelCase ) ) , __UpperCAmelCase ) except (AttributeError, ModuleNotFoundError): return # We iterate over all the globals in self.obj in case we find "os.path.join" for attr in self.obj.__dir__(): # We don't check for the name of the global, but rather if its value *is* "os.path.join". # This allows to patch renamed attributes like "from os.path import join as pjoin". if getattr(self.obj , __UpperCAmelCase ) is attr_value: lowerCAmelCase__ : Optional[Any] = getattr(self.obj , __UpperCAmelCase ) setattr(self.obj , __UpperCAmelCase , self.new ) elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open" lowerCAmelCase__ : str = globals()['''__builtins__'''][target_attr] setattr(self.obj , __UpperCAmelCase , self.new ) else: raise RuntimeError(f"""Tried to patch attribute {target_attr} instead of a submodule.""" ) def __exit__( self , *__UpperCAmelCase ): for attr in list(self.original ): setattr(self.obj , __UpperCAmelCase , self.original.pop(__UpperCAmelCase ) ) def __magic_name__( self ): self.__enter__() self._active_patches.append(self ) def __magic_name__( self ): try: self._active_patches.remove(self ) except ValueError: # If the patch hasn't been started this will fail return None return self.__exit__()
678
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available lowerCAmelCase_ = { """configuration_nezha""": ["""NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """NezhaConfig"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ """NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST""", """NezhaForNextSentencePrediction""", """NezhaForMaskedLM""", """NezhaForPreTraining""", """NezhaForMultipleChoice""", """NezhaForQuestionAnswering""", """NezhaForSequenceClassification""", """NezhaForTokenClassification""", """NezhaModel""", """NezhaPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_nezha import ( NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST, NezhaForMaskedLM, NezhaForMultipleChoice, NezhaForNextSentencePrediction, NezhaForPreTraining, NezhaForQuestionAnswering, NezhaForSequenceClassification, NezhaForTokenClassification, NezhaModel, NezhaPreTrainedModel, ) else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
678
1
import inspect import unittest import numpy as np from transformers import BeitConfig from transformers.testing_utils import require_flax, require_vision, slow from transformers.utils import cached_property, is_flax_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor if is_flax_available(): import jax from transformers import FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling, FlaxBeitModel if is_vision_available(): from PIL import Image from transformers import BeitImageProcessor class _lowerCAmelCase ( unittest.TestCase ): def __init__( self , __UpperCAmelCase , __UpperCAmelCase=100 , __UpperCAmelCase=13 , __UpperCAmelCase=30 , __UpperCAmelCase=2 , __UpperCAmelCase=3 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=32 , __UpperCAmelCase=5 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=10 , __UpperCAmelCase=0.02 , __UpperCAmelCase=3 , ): lowerCAmelCase__ : Optional[int] = parent lowerCAmelCase__ : Tuple = vocab_size lowerCAmelCase__ : Optional[int] = batch_size lowerCAmelCase__ : List[str] = image_size lowerCAmelCase__ : Tuple = patch_size lowerCAmelCase__ : List[str] = num_channels lowerCAmelCase__ : str = is_training lowerCAmelCase__ : List[Any] = use_labels lowerCAmelCase__ : List[Any] = hidden_size lowerCAmelCase__ : Optional[Any] = num_hidden_layers lowerCAmelCase__ : List[Any] = num_attention_heads lowerCAmelCase__ : List[str] = intermediate_size lowerCAmelCase__ : Any = hidden_act lowerCAmelCase__ : int = hidden_dropout_prob lowerCAmelCase__ : Any = attention_probs_dropout_prob lowerCAmelCase__ : Dict = type_sequence_label_size lowerCAmelCase__ : Optional[int] = initializer_range # in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) lowerCAmelCase__ : int = (image_size // patch_size) ** 2 lowerCAmelCase__ : str = num_patches + 1 def __magic_name__( self ): lowerCAmelCase__ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCAmelCase__ : int = None if self.use_labels: lowerCAmelCase__ : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCAmelCase__ : str = BeitConfig( vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , ) return config, pixel_values, labels def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): lowerCAmelCase__ : Optional[int] = FlaxBeitModel(config=__UpperCAmelCase ) lowerCAmelCase__ : Optional[Any] = model(__UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): lowerCAmelCase__ : int = FlaxBeitForMaskedImageModeling(config=__UpperCAmelCase ) lowerCAmelCase__ : Union[str, Any] = model(__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) ) def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): lowerCAmelCase__ : Union[str, Any] = self.type_sequence_label_size lowerCAmelCase__ : Tuple = FlaxBeitForImageClassification(config=__UpperCAmelCase ) lowerCAmelCase__ : Any = model(__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images lowerCAmelCase__ : List[str] = 1 lowerCAmelCase__ : Union[str, Any] = FlaxBeitForImageClassification(__UpperCAmelCase ) lowerCAmelCase__ : Optional[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowerCAmelCase__ : List[str] = model(__UpperCAmelCase ) def __magic_name__( self ): lowerCAmelCase__ : str = self.prepare_config_and_inputs() ( ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ) : Any = config_and_inputs lowerCAmelCase__ : Optional[Any] = {'''pixel_values''': pixel_values} return config, inputs_dict @require_flax class _lowerCAmelCase ( _lowercase , unittest.TestCase ): A__ = ( (FlaxBeitModel, FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling) if is_flax_available() else () ) def __magic_name__( self ): lowerCAmelCase__ : int = FlaxBeitModelTester(self ) lowerCAmelCase__ : int = ConfigTester(self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase , hidden_size=37 ) def __magic_name__( self ): self.config_tester.run_common_tests() def __magic_name__( self ): lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase__ : int = model_class(__UpperCAmelCase ) lowerCAmelCase__ : Optional[Any] = inspect.signature(model.__call__ ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCAmelCase__ : List[Any] = [*signature.parameters.keys()] lowerCAmelCase__ : Union[str, Any] = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , __UpperCAmelCase ) def __magic_name__( self ): lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): lowerCAmelCase__ : Tuple = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : Optional[int] = model_class(__UpperCAmelCase ) @jax.jit def model_jitted(__UpperCAmelCase , **__UpperCAmelCase ): return model(pixel_values=__UpperCAmelCase , **__UpperCAmelCase ) with self.subTest('''JIT Enabled''' ): lowerCAmelCase__ : Tuple = model_jitted(**__UpperCAmelCase ).to_tuple() with self.subTest('''JIT Disabled''' ): with jax.disable_jit(): lowerCAmelCase__ : Tuple = model_jitted(**__UpperCAmelCase ).to_tuple() self.assertEqual(len(__UpperCAmelCase ) , len(__UpperCAmelCase ) ) for jitted_output, output in zip(__UpperCAmelCase , __UpperCAmelCase ): self.assertEqual(jitted_output.shape , output.shape ) def __magic_name__( self ): lowerCAmelCase__ : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCAmelCase ) def __magic_name__( self ): lowerCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*__UpperCAmelCase ) def __magic_name__( self ): lowerCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__UpperCAmelCase ) @slow def __magic_name__( self ): for model_class_name in self.all_model_classes: lowerCAmelCase__ : List[Any] = model_class_name.from_pretrained('''microsoft/beit-base-patch16-224''' ) lowerCAmelCase__ : Union[str, Any] = model(np.ones((1, 3, 224, 224) ) ) self.assertIsNotNone(__UpperCAmelCase ) def __lowerCAmelCase ( ) -> Tuple: lowerCAmelCase__ : List[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_vision @require_flax class _lowerCAmelCase ( unittest.TestCase ): @cached_property def __magic_name__( self ): return BeitImageProcessor.from_pretrained('''microsoft/beit-base-patch16-224''' ) if is_vision_available() else None @slow def __magic_name__( self ): lowerCAmelCase__ : Any = FlaxBeitForMaskedImageModeling.from_pretrained('''microsoft/beit-base-patch16-224-pt22k''' ) lowerCAmelCase__ : int = self.default_image_processor lowerCAmelCase__ : str = prepare_img() lowerCAmelCase__ : List[Any] = image_processor(images=__UpperCAmelCase , return_tensors='''np''' ).pixel_values # prepare bool_masked_pos lowerCAmelCase__ : str = np.ones((1, 196) , dtype=__UpperCAmelCase ) # forward pass lowerCAmelCase__ : List[str] = model(pixel_values=__UpperCAmelCase , bool_masked_pos=__UpperCAmelCase ) lowerCAmelCase__ : Union[str, Any] = outputs.logits # verify the logits lowerCAmelCase__ : Tuple = (1, 196, 8192) self.assertEqual(logits.shape , __UpperCAmelCase ) lowerCAmelCase__ : int = np.array( [[-3.2437, 0.5072, -13.9174], [-3.2456, 0.4948, -13.9401], [-3.2033, 0.5121, -13.8550]] ) self.assertTrue(np.allclose(logits[bool_masked_pos][:3, :3] , __UpperCAmelCase , atol=1e-2 ) ) @slow def __magic_name__( self ): lowerCAmelCase__ : Optional[int] = FlaxBeitForImageClassification.from_pretrained('''microsoft/beit-base-patch16-224''' ) lowerCAmelCase__ : List[str] = self.default_image_processor lowerCAmelCase__ : Dict = prepare_img() lowerCAmelCase__ : Optional[int] = image_processor(images=__UpperCAmelCase , return_tensors='''np''' ) # forward pass lowerCAmelCase__ : Union[str, Any] = model(**__UpperCAmelCase ) lowerCAmelCase__ : List[str] = outputs.logits # verify the logits lowerCAmelCase__ : Union[str, Any] = (1, 1000) self.assertEqual(logits.shape , __UpperCAmelCase ) lowerCAmelCase__ : Union[str, Any] = np.array([-1.2385, -1.0987, -1.0108] ) self.assertTrue(np.allclose(logits[0, :3] , __UpperCAmelCase , atol=1e-4 ) ) lowerCAmelCase__ : Union[str, Any] = 281 self.assertEqual(logits.argmax(-1 ).item() , __UpperCAmelCase ) @slow def __magic_name__( self ): lowerCAmelCase__ : List[str] = FlaxBeitForImageClassification.from_pretrained('''microsoft/beit-large-patch16-224-pt22k-ft22k''' ) lowerCAmelCase__ : List[Any] = self.default_image_processor lowerCAmelCase__ : int = prepare_img() lowerCAmelCase__ : Optional[int] = image_processor(images=__UpperCAmelCase , return_tensors='''np''' ) # forward pass lowerCAmelCase__ : str = model(**__UpperCAmelCase ) lowerCAmelCase__ : Union[str, Any] = outputs.logits # verify the logits lowerCAmelCase__ : Optional[int] = (1, 2_1841) self.assertEqual(logits.shape , __UpperCAmelCase ) lowerCAmelCase__ : Any = np.array([1.6881, -0.2787, 0.5901] ) self.assertTrue(np.allclose(logits[0, :3] , __UpperCAmelCase , atol=1e-4 ) ) lowerCAmelCase__ : int = 2396 self.assertEqual(logits.argmax(-1 ).item() , __UpperCAmelCase )
678
import tempfile import unittest import numpy as np from huggingface_hub import HfFolder, delete_repo from requests.exceptions import HTTPError from transformers import BertConfig, is_flax_available from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax if is_flax_available(): import os from flax.core.frozen_dict import unfreeze from flax.traverse_util import flatten_dict from transformers import FlaxBertModel lowerCAmelCase_ = """0.12""" # assumed parallelism: 8 @require_flax @is_staging_test class _lowerCAmelCase ( unittest.TestCase ): @classmethod def __magic_name__( cls ): lowerCAmelCase__ : Dict = TOKEN HfFolder.save_token(__UpperCAmelCase ) @classmethod def __magic_name__( cls ): try: delete_repo(token=cls._token , repo_id='''test-model-flax''' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='''valid_org/test-model-flax-org''' ) except HTTPError: pass def __magic_name__( self ): lowerCAmelCase__ : List[Any] = BertConfig( vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 ) lowerCAmelCase__ : List[Any] = FlaxBertModel(__UpperCAmelCase ) model.push_to_hub('''test-model-flax''' , use_auth_token=self._token ) lowerCAmelCase__ : List[Any] = FlaxBertModel.from_pretrained(f"""{USER}/test-model-flax""" ) lowerCAmelCase__ : Union[str, Any] = flatten_dict(unfreeze(model.params ) ) lowerCAmelCase__ : List[Any] = flatten_dict(unfreeze(new_model.params ) ) for key in base_params.keys(): lowerCAmelCase__ : str = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(__UpperCAmelCase , 1e-3 , msg=f"""{key} not identical""" ) # Reset repo delete_repo(token=self._token , repo_id='''test-model-flax''' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(__UpperCAmelCase , repo_id='''test-model-flax''' , push_to_hub=__UpperCAmelCase , use_auth_token=self._token ) lowerCAmelCase__ : Tuple = FlaxBertModel.from_pretrained(f"""{USER}/test-model-flax""" ) lowerCAmelCase__ : List[str] = flatten_dict(unfreeze(model.params ) ) lowerCAmelCase__ : Optional[int] = flatten_dict(unfreeze(new_model.params ) ) for key in base_params.keys(): lowerCAmelCase__ : Union[str, Any] = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(__UpperCAmelCase , 1e-3 , msg=f"""{key} not identical""" ) def __magic_name__( self ): lowerCAmelCase__ : List[str] = BertConfig( vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 ) lowerCAmelCase__ : Optional[Any] = FlaxBertModel(__UpperCAmelCase ) model.push_to_hub('''valid_org/test-model-flax-org''' , use_auth_token=self._token ) lowerCAmelCase__ : str = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' ) lowerCAmelCase__ : List[str] = flatten_dict(unfreeze(model.params ) ) lowerCAmelCase__ : Dict = flatten_dict(unfreeze(new_model.params ) ) for key in base_params.keys(): lowerCAmelCase__ : Tuple = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(__UpperCAmelCase , 1e-3 , msg=f"""{key} not identical""" ) # Reset repo delete_repo(token=self._token , repo_id='''valid_org/test-model-flax-org''' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained( __UpperCAmelCase , repo_id='''valid_org/test-model-flax-org''' , push_to_hub=__UpperCAmelCase , use_auth_token=self._token ) lowerCAmelCase__ : Union[str, Any] = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' ) lowerCAmelCase__ : Optional[int] = flatten_dict(unfreeze(model.params ) ) lowerCAmelCase__ : List[str] = flatten_dict(unfreeze(new_model.params ) ) for key in base_params.keys(): lowerCAmelCase__ : Tuple = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(__UpperCAmelCase , 1e-3 , msg=f"""{key} not identical""" ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> Tuple: lowerCAmelCase__ : Any = True lowerCAmelCase__ : Any = flatten_dict(modela.params ) lowerCAmelCase__ : List[str] = flatten_dict(modela.params ) for key in flat_params_a.keys(): if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1E-4: lowerCAmelCase__ : Optional[Any] = False return models_are_equal @require_flax class _lowerCAmelCase ( unittest.TestCase ): def __magic_name__( self ): lowerCAmelCase__ : List[str] = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' ) lowerCAmelCase__ : List[str] = FlaxBertModel(__UpperCAmelCase ) lowerCAmelCase__ : Optional[int] = '''bert''' with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(os.path.join(__UpperCAmelCase , __UpperCAmelCase ) ) with self.assertRaises(__UpperCAmelCase ): lowerCAmelCase__ : Optional[int] = FlaxBertModel.from_pretrained(__UpperCAmelCase ) lowerCAmelCase__ : List[str] = FlaxBertModel.from_pretrained(__UpperCAmelCase , subfolder=__UpperCAmelCase ) self.assertTrue(check_models_equal(__UpperCAmelCase , __UpperCAmelCase ) ) def __magic_name__( self ): lowerCAmelCase__ : Tuple = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' ) lowerCAmelCase__ : Union[str, Any] = FlaxBertModel(__UpperCAmelCase ) lowerCAmelCase__ : Dict = '''bert''' with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(os.path.join(__UpperCAmelCase , __UpperCAmelCase ) , max_shard_size='''10KB''' ) with self.assertRaises(__UpperCAmelCase ): lowerCAmelCase__ : Tuple = FlaxBertModel.from_pretrained(__UpperCAmelCase ) lowerCAmelCase__ : Any = FlaxBertModel.from_pretrained(__UpperCAmelCase , subfolder=__UpperCAmelCase ) self.assertTrue(check_models_equal(__UpperCAmelCase , __UpperCAmelCase ) ) def __magic_name__( self ): lowerCAmelCase__ : List[str] = '''bert''' lowerCAmelCase__ : int = '''hf-internal-testing/tiny-random-bert-subfolder''' with self.assertRaises(__UpperCAmelCase ): lowerCAmelCase__ : Dict = FlaxBertModel.from_pretrained(__UpperCAmelCase ) lowerCAmelCase__ : Optional[int] = FlaxBertModel.from_pretrained(__UpperCAmelCase , subfolder=__UpperCAmelCase ) self.assertIsNotNone(__UpperCAmelCase ) def __magic_name__( self ): lowerCAmelCase__ : List[Any] = '''bert''' lowerCAmelCase__ : Tuple = '''hf-internal-testing/tiny-random-bert-sharded-subfolder''' with self.assertRaises(__UpperCAmelCase ): lowerCAmelCase__ : Union[str, Any] = FlaxBertModel.from_pretrained(__UpperCAmelCase ) lowerCAmelCase__ : Optional[Any] = FlaxBertModel.from_pretrained(__UpperCAmelCase , subfolder=__UpperCAmelCase ) self.assertIsNotNone(__UpperCAmelCase )
678
1
import math import time from transformers import Trainer, is_torch_tpu_available from transformers.trainer_utils import PredictionOutput, speed_metrics if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm import torch_xla.debug.metrics as met class _lowerCAmelCase ( _lowercase ): def __init__( self , *__UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase ): super().__init__(*__UpperCAmelCase , **__UpperCAmelCase ) lowerCAmelCase__ : Any = eval_examples lowerCAmelCase__ : Dict = post_process_function def __magic_name__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase = "eval" ): lowerCAmelCase__ : Tuple = self.eval_dataset if eval_dataset is None else eval_dataset lowerCAmelCase__ : str = self.get_eval_dataloader(__UpperCAmelCase ) lowerCAmelCase__ : int = self.eval_examples if eval_examples is None else eval_examples # Temporarily disable metric computation, we will do it in the loop here. lowerCAmelCase__ : Union[str, Any] = self.compute_metrics lowerCAmelCase__ : Tuple = None lowerCAmelCase__ : List[Any] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop lowerCAmelCase__ : Any = time.time() try: lowerCAmelCase__ : Optional[int] = eval_loop( __UpperCAmelCase , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__UpperCAmelCase , metric_key_prefix=__UpperCAmelCase , ) finally: lowerCAmelCase__ : Optional[Any] = compute_metrics lowerCAmelCase__ : int = self.args.eval_batch_size * self.args.world_size if f"""{metric_key_prefix}_jit_compilation_time""" in output.metrics: start_time += output.metrics[f"""{metric_key_prefix}_jit_compilation_time"""] output.metrics.update( speed_metrics( __UpperCAmelCase , __UpperCAmelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) ) if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save: # Only the main node write the results by default lowerCAmelCase__ : int = self.post_process_function(__UpperCAmelCase , __UpperCAmelCase , output.predictions ) lowerCAmelCase__ : List[str] = self.compute_metrics(__UpperCAmelCase ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(f"""{metric_key_prefix}_""" ): lowerCAmelCase__ : Dict = metrics.pop(__UpperCAmelCase ) metrics.update(output.metrics ) else: lowerCAmelCase__ : List[Any] = output.metrics if self.args.should_log: # Only the main node log the results by default self.log(__UpperCAmelCase ) if self.args.tpu_metrics_debug or self.args.debug: # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.) xm.master_print(met.metrics_report() ) lowerCAmelCase__ : Optional[int] = self.callback_handler.on_evaluate(self.args , self.state , self.control , __UpperCAmelCase ) return metrics def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase = "test" ): lowerCAmelCase__ : Dict = self.get_test_dataloader(__UpperCAmelCase ) # Temporarily disable metric computation, we will do it in the loop here. lowerCAmelCase__ : Union[str, Any] = self.compute_metrics lowerCAmelCase__ : List[Any] = None lowerCAmelCase__ : Any = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop lowerCAmelCase__ : Optional[Any] = time.time() try: lowerCAmelCase__ : int = eval_loop( __UpperCAmelCase , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__UpperCAmelCase , metric_key_prefix=__UpperCAmelCase , ) finally: lowerCAmelCase__ : Tuple = compute_metrics lowerCAmelCase__ : Optional[int] = self.args.eval_batch_size * self.args.world_size if f"""{metric_key_prefix}_jit_compilation_time""" in output.metrics: start_time += output.metrics[f"""{metric_key_prefix}_jit_compilation_time"""] output.metrics.update( speed_metrics( __UpperCAmelCase , __UpperCAmelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) ) if self.post_process_function is None or self.compute_metrics is None: return output lowerCAmelCase__ : Optional[int] = self.post_process_function(__UpperCAmelCase , __UpperCAmelCase , output.predictions , '''predict''' ) lowerCAmelCase__ : str = self.compute_metrics(__UpperCAmelCase ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(f"""{metric_key_prefix}_""" ): lowerCAmelCase__ : List[Any] = metrics.pop(__UpperCAmelCase ) metrics.update(output.metrics ) return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=__UpperCAmelCase )
678
from random import randint from tempfile import TemporaryFile import numpy as np def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Any: lowerCAmelCase__ : Optional[Any] = 0 if start < end: lowerCAmelCase__ : Union[str, Any] = randint(UpperCamelCase , UpperCamelCase ) lowerCAmelCase__ : List[str] = a[end] lowerCAmelCase__ : List[Any] = a[pivot] lowerCAmelCase__ : str = temp lowerCAmelCase__ , lowerCAmelCase__ : Tuple = _in_place_partition(UpperCamelCase , UpperCamelCase , UpperCamelCase ) count += _in_place_quick_sort(UpperCamelCase , UpperCamelCase , p - 1 ) count += _in_place_quick_sort(UpperCamelCase , p + 1 , UpperCamelCase ) return count def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Optional[Any]: lowerCAmelCase__ : List[str] = 0 lowerCAmelCase__ : List[Any] = randint(UpperCamelCase , UpperCamelCase ) lowerCAmelCase__ : str = a[end] lowerCAmelCase__ : Optional[int] = a[pivot] lowerCAmelCase__ : List[str] = temp lowerCAmelCase__ : str = start - 1 for index in range(UpperCamelCase , UpperCamelCase ): count += 1 if a[index] < a[end]: # check if current val is less than pivot value lowerCAmelCase__ : List[str] = new_pivot_index + 1 lowerCAmelCase__ : int = a[new_pivot_index] lowerCAmelCase__ : int = a[index] lowerCAmelCase__ : Tuple = temp lowerCAmelCase__ : Optional[Any] = a[new_pivot_index + 1] lowerCAmelCase__ : List[str] = a[end] lowerCAmelCase__ : Union[str, Any] = temp return new_pivot_index + 1, count lowerCAmelCase_ = TemporaryFile() lowerCAmelCase_ = 1_00 # 1000 elements are to be sorted lowerCAmelCase_ , lowerCAmelCase_ = 0, 1 # mean and standard deviation lowerCAmelCase_ = np.random.normal(mu, sigma, p) np.save(outfile, X) print("""The array is""") print(X) outfile.seek(0) # using the same array lowerCAmelCase_ = np.load(outfile) lowerCAmelCase_ = len(M) - 1 lowerCAmelCase_ = _in_place_quick_sort(M, 0, r) print( """No of Comparisons for 100 elements selected from a standard normal distribution""" """is :""" ) print(z)
678
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) if is_sentencepiece_available(): from ..ta.tokenization_ta import TaTokenizer else: from ...utils.dummy_sentencepiece_objects import TaTokenizer lowerCAmelCase_ = TaTokenizer if is_tokenizers_available(): from ..ta.tokenization_ta_fast import TaTokenizerFast else: from ...utils.dummy_tokenizers_objects import TaTokenizerFast lowerCAmelCase_ = TaTokenizerFast lowerCAmelCase_ = {"""configuration_mt5""": ["""MT5Config""", """MT5OnnxConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ """MT5EncoderModel""", """MT5ForConditionalGeneration""", """MT5ForQuestionAnswering""", """MT5Model""", """MT5PreTrainedModel""", """MT5Stack""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = ["""TFMT5EncoderModel""", """TFMT5ForConditionalGeneration""", """TFMT5Model"""] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = ["""FlaxMT5EncoderModel""", """FlaxMT5ForConditionalGeneration""", """FlaxMT5Model"""] if TYPE_CHECKING: from .configuration_mta import MTaConfig, MTaOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mta import ( MTaEncoderModel, MTaForConditionalGeneration, MTaForQuestionAnswering, MTaModel, MTaPreTrainedModel, MTaStack, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel else: import sys lowerCAmelCase_ = _LazyModule( __name__, globals()["""__file__"""], _import_structure, extra_objects={"""MT5Tokenizer""": MTaTokenizer, """MT5TokenizerFast""": MTaTokenizerFast}, module_spec=__spec__, )
678
import pyarrow.parquet as pq import pytest from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config from datasets.features.image import Image from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> Optional[int]: assert isinstance(UpperCamelCase , UpperCamelCase ) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('''keep_in_memory''' , [False, True] ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> List[Any]: lowerCAmelCase__ : List[str] = tmp_path / '''cache''' lowerCAmelCase__ : Union[str, Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): lowerCAmelCase__ : List[Any] = ParquetDatasetReader(UpperCamelCase , cache_dir=UpperCamelCase , keep_in_memory=UpperCamelCase ).read() _check_parquet_dataset(UpperCamelCase , UpperCamelCase ) @pytest.mark.parametrize( '''features''' , [ None, {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}, {'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''}, {'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''}, {'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''}, ] , ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> List[Any]: lowerCAmelCase__ : str = tmp_path / '''cache''' lowerCAmelCase__ : Union[str, Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} lowerCAmelCase__ : str = features.copy() if features else default_expected_features lowerCAmelCase__ : List[Any] = ( Features({feature: Value(UpperCamelCase ) for feature, dtype in features.items()} ) if features is not None else None ) lowerCAmelCase__ : Union[str, Any] = ParquetDatasetReader(UpperCamelCase , features=UpperCamelCase , cache_dir=UpperCamelCase ).read() _check_parquet_dataset(UpperCamelCase , UpperCamelCase ) @pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Dict: lowerCAmelCase__ : str = tmp_path / '''cache''' lowerCAmelCase__ : Tuple = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} lowerCAmelCase__ : Union[str, Any] = ParquetDatasetReader(UpperCamelCase , cache_dir=UpperCamelCase , split=UpperCamelCase ).read() _check_parquet_dataset(UpperCamelCase , UpperCamelCase ) assert dataset.split == split if split else "train" @pytest.mark.parametrize('''path_type''' , [str, list] ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> str: if issubclass(UpperCamelCase , UpperCamelCase ): lowerCAmelCase__ : Any = parquet_path elif issubclass(UpperCamelCase , UpperCamelCase ): lowerCAmelCase__ : Any = [parquet_path] lowerCAmelCase__ : int = tmp_path / '''cache''' lowerCAmelCase__ : str = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} lowerCAmelCase__ : Union[str, Any] = ParquetDatasetReader(UpperCamelCase , cache_dir=UpperCamelCase ).read() _check_parquet_dataset(UpperCamelCase , UpperCamelCase ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase=("train",) ) -> str: assert isinstance(UpperCamelCase , UpperCamelCase ) for split in splits: lowerCAmelCase__ : str = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('''keep_in_memory''' , [False, True] ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Optional[int]: lowerCAmelCase__ : Any = tmp_path / '''cache''' lowerCAmelCase__ : Optional[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): lowerCAmelCase__ : Optional[Any] = ParquetDatasetReader( {'''train''': parquet_path} , cache_dir=UpperCamelCase , keep_in_memory=UpperCamelCase ).read() _check_parquet_datasetdict(UpperCamelCase , UpperCamelCase ) @pytest.mark.parametrize( '''features''' , [ None, {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}, {'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''}, {'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''}, {'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''}, ] , ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> int: lowerCAmelCase__ : Any = tmp_path / '''cache''' lowerCAmelCase__ : Tuple = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} lowerCAmelCase__ : Tuple = features.copy() if features else default_expected_features lowerCAmelCase__ : Optional[int] = ( Features({feature: Value(UpperCamelCase ) for feature, dtype in features.items()} ) if features is not None else None ) lowerCAmelCase__ : List[str] = ParquetDatasetReader({'''train''': parquet_path} , features=UpperCamelCase , cache_dir=UpperCamelCase ).read() _check_parquet_datasetdict(UpperCamelCase , UpperCamelCase ) @pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Dict: if split: lowerCAmelCase__ : Tuple = {split: parquet_path} else: lowerCAmelCase__ : int = '''train''' lowerCAmelCase__ : List[Any] = {'''train''': parquet_path, '''test''': parquet_path} lowerCAmelCase__ : Optional[int] = tmp_path / '''cache''' lowerCAmelCase__ : List[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} lowerCAmelCase__ : List[str] = ParquetDatasetReader(UpperCamelCase , cache_dir=UpperCamelCase ).read() _check_parquet_datasetdict(UpperCamelCase , UpperCamelCase , splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> Tuple: lowerCAmelCase__ : Optional[Any] = ParquetDatasetWriter(UpperCamelCase , tmp_path / '''foo.parquet''' ) assert writer.write() > 0 lowerCAmelCase__ : Union[str, Any] = pq.ParquetFile(tmp_path / '''foo.parquet''' ) lowerCAmelCase__ : int = pf.read() assert dataset.data.table == output_table def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> Tuple: lowerCAmelCase__ : List[str] = str(shared_datadir / '''test_image_rgb.jpg''' ) lowerCAmelCase__ : Dict = {'''image''': [image_path]} lowerCAmelCase__ : int = Features({'''image''': Image()} ) lowerCAmelCase__ : Dict = Dataset.from_dict(UpperCamelCase , features=UpperCamelCase ) lowerCAmelCase__ : List[str] = ParquetDatasetWriter(UpperCamelCase , tmp_path / '''foo.parquet''' ) assert writer.write() > 0 lowerCAmelCase__ : Dict = Dataset.from_parquet(str(tmp_path / '''foo.parquet''' ) ) assert dataset.features == reloaded_dataset.features lowerCAmelCase__ : int = ParquetDatasetReader(str(tmp_path / '''foo.parquet''' ) , streaming=UpperCamelCase ).read() assert dataset.features == reloaded_iterable_dataset.features @pytest.mark.parametrize( '''feature, expected''' , [ (Features({'''foo''': Value('''int32''' )} ), None), (Features({'''image''': Image(), '''foo''': Value('''int32''' )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS), (Features({'''nested''': Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS), ] , ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> Any: assert get_writer_batch_size(UpperCamelCase ) == expected
678
1
import timeit import numpy as np import datasets from datasets.arrow_writer import ArrowWriter from datasets.features.features import _ArrayXD def __lowerCAmelCase ( UpperCamelCase ) -> List[Any]: def wrapper(*UpperCamelCase , **UpperCamelCase ): lowerCAmelCase__ : List[str] = timeit.default_timer() lowerCAmelCase__ : List[str] = func(*UpperCamelCase , **UpperCamelCase ) lowerCAmelCase__ : str = timeit.default_timer() - starttime return delta lowerCAmelCase__ : List[Any] = func.__name__ return wrapper def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase=100 , UpperCamelCase=None ) -> List[str]: lowerCAmelCase__ : Optional[Any] = [] lowerCAmelCase__ : List[str] = seq_shapes or {} for i in range(UpperCamelCase ): lowerCAmelCase__ : Tuple = {} for col_id, (k, v) in enumerate(features.items() ): if isinstance(UpperCamelCase , _ArrayXD ): lowerCAmelCase__ : int = np.random.rand(*v.shape ).astype(v.dtype ) elif isinstance(UpperCamelCase , datasets.Value ): if v.dtype == "string": lowerCAmelCase__ : List[str] = '''The small grey turtle was surprisingly fast when challenged.''' else: lowerCAmelCase__ : List[Any] = np.random.randint(10 , size=1 ).astype(v.dtype ).item() elif isinstance(UpperCamelCase , datasets.Sequence ): while isinstance(UpperCamelCase , datasets.Sequence ): lowerCAmelCase__ : str = v.feature lowerCAmelCase__ : Any = seq_shapes[k] lowerCAmelCase__ : Optional[int] = np.random.rand(*UpperCamelCase ).astype(v.dtype ) lowerCAmelCase__ : Dict = data dummy_data.append((i, example) ) return dummy_data def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase=100 , UpperCamelCase=None ) -> str: lowerCAmelCase__ : str = generate_examples(UpperCamelCase , num_examples=UpperCamelCase , seq_shapes=UpperCamelCase ) with ArrowWriter(features=UpperCamelCase , path=UpperCamelCase ) as writer: for key, record in dummy_data: lowerCAmelCase__ : Optional[int] = features.encode_example(UpperCamelCase ) writer.write(UpperCamelCase ) lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = writer.finalize() if not num_final_examples == num_examples: raise ValueError( F"""Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}.""" ) lowerCAmelCase__ : int = datasets.Dataset.from_file(filename=UpperCamelCase , info=datasets.DatasetInfo(features=UpperCamelCase ) ) return dataset
678
from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = { """microsoft/focalnet-tiny""": """https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json""", } class _lowerCAmelCase ( _lowercase , _lowercase ): A__ = 'focalnet' def __init__( self , __UpperCAmelCase=224 , __UpperCAmelCase=4 , __UpperCAmelCase=3 , __UpperCAmelCase=96 , __UpperCAmelCase=False , __UpperCAmelCase=[192, 384, 768, 768] , __UpperCAmelCase=[2, 2, 6, 2] , __UpperCAmelCase=[2, 2, 2, 2] , __UpperCAmelCase=[3, 3, 3, 3] , __UpperCAmelCase="gelu" , __UpperCAmelCase=4.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.1 , __UpperCAmelCase=False , __UpperCAmelCase=1e-4 , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=0.02 , __UpperCAmelCase=1e-5 , __UpperCAmelCase=32 , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase , ): super().__init__(**__UpperCAmelCase ) lowerCAmelCase__ : Any = image_size lowerCAmelCase__ : Any = patch_size lowerCAmelCase__ : Tuple = num_channels lowerCAmelCase__ : Dict = embed_dim lowerCAmelCase__ : Optional[int] = use_conv_embed lowerCAmelCase__ : Optional[int] = hidden_sizes lowerCAmelCase__ : Optional[Any] = depths lowerCAmelCase__ : Dict = focal_levels lowerCAmelCase__ : int = focal_windows lowerCAmelCase__ : Optional[Any] = hidden_act lowerCAmelCase__ : Optional[int] = mlp_ratio lowerCAmelCase__ : Optional[Any] = hidden_dropout_prob lowerCAmelCase__ : List[Any] = drop_path_rate lowerCAmelCase__ : Tuple = use_layerscale lowerCAmelCase__ : List[Any] = layerscale_value lowerCAmelCase__ : Dict = use_post_layernorm lowerCAmelCase__ : Dict = use_post_layernorm_in_modulation lowerCAmelCase__ : Dict = normalize_modulator lowerCAmelCase__ : Union[str, Any] = initializer_range lowerCAmelCase__ : List[str] = layer_norm_eps lowerCAmelCase__ : Tuple = encoder_stride lowerCAmelCase__ : Dict = ['''stem'''] + [f"""stage{idx}""" for idx in range(1 , len(self.depths ) + 1 )] lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = get_aligned_output_features_output_indices( out_features=__UpperCAmelCase , out_indices=__UpperCAmelCase , stage_names=self.stage_names )
678
1
import copy import inspect import unittest from transformers import PretrainedConfig, SwiftFormerConfig from transformers.testing_utils import ( require_torch, require_vision, slow, torch_device, ) from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import SwiftFormerForImageClassification, SwiftFormerModel from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class _lowerCAmelCase : def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=3 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=224 , __UpperCAmelCase=1000 , __UpperCAmelCase=[3, 3, 6, 4] , __UpperCAmelCase=[48, 56, 112, 220] , ): lowerCAmelCase__ : str = parent lowerCAmelCase__ : Optional[int] = batch_size lowerCAmelCase__ : List[str] = num_channels lowerCAmelCase__ : Optional[Any] = is_training lowerCAmelCase__ : List[Any] = use_labels lowerCAmelCase__ : int = hidden_dropout_prob lowerCAmelCase__ : Union[str, Any] = attention_probs_dropout_prob lowerCAmelCase__ : List[str] = num_labels lowerCAmelCase__ : Any = image_size lowerCAmelCase__ : str = layer_depths lowerCAmelCase__ : List[str] = embed_dims def __magic_name__( self ): lowerCAmelCase__ : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCAmelCase__ : List[Any] = None if self.use_labels: lowerCAmelCase__ : Dict = ids_tensor([self.batch_size] , self.num_labels ) lowerCAmelCase__ : Tuple = self.get_config() return config, pixel_values, labels def __magic_name__( self ): return SwiftFormerConfig( depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act='''gelu''' , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=__UpperCAmelCase , layer_scale_init_value=1e-5 , ) def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): lowerCAmelCase__ : int = SwiftFormerModel(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() lowerCAmelCase__ : Union[str, Any] = model(__UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) ) def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): lowerCAmelCase__ : List[Any] = self.num_labels lowerCAmelCase__ : List[Any] = SwiftFormerForImageClassification(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() lowerCAmelCase__ : Optional[int] = model(__UpperCAmelCase , labels=__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) lowerCAmelCase__ : str = SwiftFormerForImageClassification(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() lowerCAmelCase__ : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCAmelCase__ : Dict = model(__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __magic_name__( self ): ((lowerCAmelCase__) , (lowerCAmelCase__) , (lowerCAmelCase__)) : Tuple = self.prepare_config_and_inputs() lowerCAmelCase__ : List[str] = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class _lowerCAmelCase ( _lowercase , _lowercase , unittest.TestCase ): A__ = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else () A__ = ( {'feature-extraction': SwiftFormerModel, 'image-classification': SwiftFormerForImageClassification} if is_torch_available() else {} ) A__ = False A__ = False A__ = False A__ = False A__ = False def __magic_name__( self ): lowerCAmelCase__ : Optional[Any] = SwiftFormerModelTester(self ) lowerCAmelCase__ : Union[str, Any] = ConfigTester( self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , ) def __magic_name__( self ): self.config_tester.run_common_tests() @unittest.skip(reason='''SwiftFormer does not use inputs_embeds''' ) def __magic_name__( self ): pass def __magic_name__( self ): lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase__ : Dict = model_class(__UpperCAmelCase ) lowerCAmelCase__ : Union[str, Any] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__UpperCAmelCase , nn.Linear ) ) def __magic_name__( self ): lowerCAmelCase__ , lowerCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase__ : Optional[int] = model_class(__UpperCAmelCase ) lowerCAmelCase__ : Union[str, Any] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCAmelCase__ : Union[str, Any] = [*signature.parameters.keys()] lowerCAmelCase__ : Dict = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , __UpperCAmelCase ) def __magic_name__( self ): lowerCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCAmelCase ) def __magic_name__( self ): lowerCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__UpperCAmelCase ) @slow def __magic_name__( self ): for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCAmelCase__ : List[str] = SwiftFormerModel.from_pretrained(__UpperCAmelCase ) self.assertIsNotNone(__UpperCAmelCase ) @unittest.skip(reason='''SwiftFormer does not output attentions''' ) def __magic_name__( self ): pass def __magic_name__( self ): def check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): lowerCAmelCase__ : Optional[int] = model_class(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() with torch.no_grad(): lowerCAmelCase__ : Tuple = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) ) lowerCAmelCase__ : int = outputs.hidden_states lowerCAmelCase__ : Tuple = 8 self.assertEqual(len(__UpperCAmelCase ) , __UpperCAmelCase ) # TODO # SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width) # with the width and height being successively divided by 2, after every 2 blocks for i in range(len(__UpperCAmelCase ) ): self.assertEqual( hidden_states[i].shape , torch.Size( [ self.model_tester.batch_size, self.model_tester.embed_dims[i // 2], (self.model_tester.image_size // 4) // 2 ** (i // 2), (self.model_tester.image_size // 4) // 2 ** (i // 2), ] ) , ) lowerCAmelCase__ , lowerCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase__ : Union[str, Any] = True check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowerCAmelCase__ : Optional[int] = True check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) def __magic_name__( self ): def _config_zero_init(__UpperCAmelCase ): lowerCAmelCase__ : Optional[int] = copy.deepcopy(__UpperCAmelCase ) for key in configs_no_init.__dict__.keys(): if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key: setattr(__UpperCAmelCase , __UpperCAmelCase , 1e-10 ) if isinstance(getattr(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) , __UpperCAmelCase ): lowerCAmelCase__ : Optional[int] = _config_zero_init(getattr(__UpperCAmelCase , __UpperCAmelCase ) ) setattr(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) return configs_no_init lowerCAmelCase__ , lowerCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() lowerCAmelCase__ : Tuple = _config_zero_init(__UpperCAmelCase ) for model_class in self.all_model_classes: lowerCAmelCase__ : List[str] = model_class(config=__UpperCAmelCase ) for name, param in model.named_parameters(): if param.requires_grad: self.assertIn( ((param.data.mean() * 1e9) / 1e9).round().item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , ) @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' ) def __magic_name__( self ): pass def __lowerCAmelCase ( ) -> Union[str, Any]: lowerCAmelCase__ : int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision class _lowerCAmelCase ( unittest.TestCase ): @cached_property def __magic_name__( self ): return ViTImageProcessor.from_pretrained('''MBZUAI/swiftformer-xs''' ) if is_vision_available() else None @slow def __magic_name__( self ): lowerCAmelCase__ : Union[str, Any] = SwiftFormerForImageClassification.from_pretrained('''MBZUAI/swiftformer-xs''' ).to(__UpperCAmelCase ) lowerCAmelCase__ : int = self.default_image_processor lowerCAmelCase__ : Dict = prepare_img() lowerCAmelCase__ : Union[str, Any] = image_processor(images=__UpperCAmelCase , return_tensors='''pt''' ).to(__UpperCAmelCase ) # forward pass with torch.no_grad(): lowerCAmelCase__ : int = model(**__UpperCAmelCase ) # verify the logits lowerCAmelCase__ : List[Any] = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , __UpperCAmelCase ) lowerCAmelCase__ : Optional[int] = torch.tensor([[-2.1703e00, 2.1107e00, -2.0811e00]] ).to(__UpperCAmelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCAmelCase , atol=1e-4 ) )
678
from scipy.stats import pearsonr import datasets lowerCAmelCase_ = """ Pearson correlation coefficient and p-value for testing non-correlation. The Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases. The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. """ lowerCAmelCase_ = """ Args: predictions (`list` of `int`): Predicted class labels, as returned by a model. references (`list` of `int`): Ground truth labels. return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`. Returns: pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation. p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities. Examples: Example 1-A simple example using only predictions and references. >>> pearsonr_metric = datasets.load_metric(\"pearsonr\") >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5]) >>> print(round(results['pearsonr'], 2)) -0.74 Example 2-The same as Example 1, but that also returns the `p-value`. >>> pearsonr_metric = datasets.load_metric(\"pearsonr\") >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True) >>> print(sorted(list(results.keys()))) ['p-value', 'pearsonr'] >>> print(round(results['pearsonr'], 2)) -0.74 >>> print(round(results['p-value'], 2)) 0.15 """ lowerCAmelCase_ = """ @article{2020SciPy-NMeth, author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and Haberland, Matt and Reddy, Tyler and Cournapeau, David and Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and Bright, Jonathan and {van der Walt}, St{\'e}fan J. and Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and Kern, Robert and Larson, Eric and Carey, C J and Polat, Ilhan and Feng, Yu and Moore, Eric W. and {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and Harris, Charles R. and Archibald, Anne M. and Ribeiro, Antonio H. and Pedregosa, Fabian and {van Mulbregt}, Paul and {SciPy 1.0 Contributors}}, title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific Computing in Python}}, journal = {Nature Methods}, year = {2020}, volume = {17}, pages = {261--272}, adsurl = {https://rdcu.be/b08Wh}, doi = {10.1038/s41592-019-0686-2}, } """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class _lowerCAmelCase ( datasets.Metric ): def __magic_name__( self ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''float''' ), '''references''': datasets.Value('''float''' ), } ) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'''] , ) def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False ): if return_pvalue: lowerCAmelCase__ : Union[str, Any] = pearsonr(__UpperCAmelCase , __UpperCAmelCase ) return {"pearsonr": results[0], "p-value": results[1]} else: return {"pearsonr": float(pearsonr(__UpperCAmelCase , __UpperCAmelCase )[0] )}
678
1
import unittest import numpy as np from transformers import MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING, TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING from transformers.pipelines import AudioClassificationPipeline, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_torchaudio, slow, ) from .test_pipelines_common import ANY @is_pipeline_test class _lowerCAmelCase ( unittest.TestCase ): A__ = MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING A__ = TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): lowerCAmelCase__ : Dict = AudioClassificationPipeline(model=__UpperCAmelCase , feature_extractor=__UpperCAmelCase ) # test with a raw waveform lowerCAmelCase__ : Optional[int] = np.zeros((3_4000,) ) lowerCAmelCase__ : Tuple = np.zeros((1_4000,) ) return audio_classifier, [audioa, audio] def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase ): lowerCAmelCase__ , lowerCAmelCase__ : Any = examples lowerCAmelCase__ : List[str] = audio_classifier(__UpperCAmelCase ) # by default a model is initialized with num_labels=2 self.assertEqual( __UpperCAmelCase , [ {'''score''': ANY(__UpperCAmelCase ), '''label''': ANY(__UpperCAmelCase )}, {'''score''': ANY(__UpperCAmelCase ), '''label''': ANY(__UpperCAmelCase )}, ] , ) lowerCAmelCase__ : Any = audio_classifier(__UpperCAmelCase , top_k=1 ) self.assertEqual( __UpperCAmelCase , [ {'''score''': ANY(__UpperCAmelCase ), '''label''': ANY(__UpperCAmelCase )}, ] , ) self.run_torchaudio(__UpperCAmelCase ) @require_torchaudio def __magic_name__( self , __UpperCAmelCase ): import datasets # test with a local file lowerCAmelCase__ : Tuple = datasets.load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' ) lowerCAmelCase__ : Dict = dataset[0]['''audio''']['''array'''] lowerCAmelCase__ : List[Any] = audio_classifier(__UpperCAmelCase ) self.assertEqual( __UpperCAmelCase , [ {'''score''': ANY(__UpperCAmelCase ), '''label''': ANY(__UpperCAmelCase )}, {'''score''': ANY(__UpperCAmelCase ), '''label''': ANY(__UpperCAmelCase )}, ] , ) @require_torch def __magic_name__( self ): lowerCAmelCase__ : Any = '''anton-l/wav2vec2-random-tiny-classifier''' lowerCAmelCase__ : Tuple = pipeline('''audio-classification''' , model=__UpperCAmelCase ) lowerCAmelCase__ : List[Any] = np.ones((8000,) ) lowerCAmelCase__ : Union[str, Any] = audio_classifier(__UpperCAmelCase , top_k=4 ) lowerCAmelCase__ : str = [ {'''score''': 0.0842, '''label''': '''no'''}, {'''score''': 0.0838, '''label''': '''up'''}, {'''score''': 0.0837, '''label''': '''go'''}, {'''score''': 0.0834, '''label''': '''right'''}, ] lowerCAmelCase__ : Optional[int] = [ {'''score''': 0.0845, '''label''': '''stop'''}, {'''score''': 0.0844, '''label''': '''on'''}, {'''score''': 0.0841, '''label''': '''right'''}, {'''score''': 0.0834, '''label''': '''left'''}, ] self.assertIn(nested_simplify(__UpperCAmelCase , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] ) lowerCAmelCase__ : List[Any] = {'''array''': np.ones((8000,) ), '''sampling_rate''': audio_classifier.feature_extractor.sampling_rate} lowerCAmelCase__ : Optional[Any] = audio_classifier(__UpperCAmelCase , top_k=4 ) self.assertIn(nested_simplify(__UpperCAmelCase , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] ) @require_torch @slow def __magic_name__( self ): import datasets lowerCAmelCase__ : Union[str, Any] = '''superb/wav2vec2-base-superb-ks''' lowerCAmelCase__ : Optional[int] = pipeline('''audio-classification''' , model=__UpperCAmelCase ) lowerCAmelCase__ : Optional[int] = datasets.load_dataset('''anton-l/superb_dummy''' , '''ks''' , split='''test''' ) lowerCAmelCase__ : Dict = np.array(dataset[3]['''speech'''] , dtype=np.floataa ) lowerCAmelCase__ : List[str] = audio_classifier(__UpperCAmelCase , top_k=4 ) self.assertEqual( nested_simplify(__UpperCAmelCase , decimals=3 ) , [ {'''score''': 0.981, '''label''': '''go'''}, {'''score''': 0.007, '''label''': '''up'''}, {'''score''': 0.006, '''label''': '''_unknown_'''}, {'''score''': 0.001, '''label''': '''down'''}, ] , ) @require_tf @unittest.skip('''Audio classification is not implemented for TF''' ) def __magic_name__( self ): pass
678
from manim import * class _lowerCAmelCase ( _lowercase ): def __magic_name__( self ): lowerCAmelCase__ : Tuple = Rectangle(height=0.5 , width=0.5 ) lowerCAmelCase__ : Dict = Rectangle(height=0.25 , width=0.25 ) lowerCAmelCase__ : Dict = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 ) lowerCAmelCase__ : Optional[Any] = [mem.copy() for i in range(6 )] lowerCAmelCase__ : int = [mem.copy() for i in range(6 )] lowerCAmelCase__ : Optional[Any] = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 ) lowerCAmelCase__ : str = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 ) lowerCAmelCase__ : List[str] = VGroup(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 ) lowerCAmelCase__ : int = Text('''CPU''' , font_size=24 ) lowerCAmelCase__ : int = Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0.5 , aligned_edge=__UpperCAmelCase ) cpu.move_to([-2.5, -0.5, 0] ) self.add(__UpperCAmelCase ) lowerCAmelCase__ : Optional[int] = [mem.copy() for i in range(4 )] lowerCAmelCase__ : Tuple = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 ) lowerCAmelCase__ : Tuple = Text('''GPU''' , font_size=24 ) lowerCAmelCase__ : int = Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0.5 , aligned_edge=__UpperCAmelCase ) gpu.move_to([-1, -1, 0] ) self.add(__UpperCAmelCase ) lowerCAmelCase__ : int = [mem.copy() for i in range(6 )] lowerCAmelCase__ : List[Any] = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 ) lowerCAmelCase__ : Tuple = Text('''Model''' , font_size=24 ) lowerCAmelCase__ : List[Any] = Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0.5 , aligned_edge=__UpperCAmelCase ) model.move_to([3, -1.0, 0] ) self.add(__UpperCAmelCase ) lowerCAmelCase__ : Dict = [] lowerCAmelCase__ : List[str] = [] lowerCAmelCase__ : Optional[Any] = [] for i, rect in enumerate(__UpperCAmelCase ): rect.set_stroke(__UpperCAmelCase ) lowerCAmelCase__ : Any = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(__UpperCAmelCase , opacity=0.7 ) if i == 0: cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=__UpperCAmelCase ) cpu_target.set_x(cpu_target.get_x() + 0.1 ) elif i == 3: cpu_target.next_to(model_cpu_arr[0] , direction=__UpperCAmelCase , buff=0.0 ) else: cpu_target.next_to(model_cpu_arr[i - 1] , direction=__UpperCAmelCase , buff=0.0 ) self.add(__UpperCAmelCase ) model_cpu_arr.append(__UpperCAmelCase ) self.add(*__UpperCAmelCase , *__UpperCAmelCase , *__UpperCAmelCase ) lowerCAmelCase__ : Any = [mem.copy() for i in range(6 )] lowerCAmelCase__ : Optional[Any] = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 ) lowerCAmelCase__ : Any = Text('''Loaded Checkpoint''' , font_size=24 ) lowerCAmelCase__ : Optional[Any] = Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0.5 , aligned_edge=__UpperCAmelCase ) checkpoint.move_to([3, 0.5, 0] ) self.add(__UpperCAmelCase ) lowerCAmelCase__ : Dict = [] lowerCAmelCase__ : str = [] for i, rect in enumerate(__UpperCAmelCase ): lowerCAmelCase__ : Union[str, Any] = fill.copy().set_fill(__UpperCAmelCase , opacity=0.7 ) target.move_to(__UpperCAmelCase ) ckpt_arr.append(__UpperCAmelCase ) lowerCAmelCase__ : List[str] = target.copy() if i < 5: cpu_target.move_to(cpu_left_col_base[i + 1] ) else: cpu_target.move_to(cpu_right_col_base[i - 5] ) ckpt_cpu_arr.append(__UpperCAmelCase ) self.add(*__UpperCAmelCase , *__UpperCAmelCase ) lowerCAmelCase__ : Optional[Any] = Square(side_length=2.2 ) key.move_to([-5, 2, 0] ) lowerCAmelCase__ : List[Any] = MarkupText( f"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , ) key_text.move_to([-5, 2.4, 0] ) self.add(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : List[str] = MarkupText( f"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=18 , ) blue_text.next_to(__UpperCAmelCase , DOWN * 2.4 , aligned_edge=key_text.get_left() ) self.add(__UpperCAmelCase ) lowerCAmelCase__ : str = MarkupText( f"""Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.""" , font_size=24 , ) step_a.move_to([2, 2, 0] ) lowerCAmelCase__ : Optional[Any] = [meta_mem.copy() for i in range(6 )] lowerCAmelCase__ : Dict = [meta_mem.copy() for i in range(6 )] lowerCAmelCase__ : Union[str, Any] = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 ) lowerCAmelCase__ : Dict = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 ) lowerCAmelCase__ : str = VGroup(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 ) lowerCAmelCase__ : List[str] = Text('''Disk''' , font_size=24 ) lowerCAmelCase__ : Any = Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0.5 , aligned_edge=__UpperCAmelCase ) disk.move_to([-4.0, -1.25, 0] ) self.play(Write(__UpperCAmelCase , run_time=3 ) , Write(__UpperCAmelCase , run_time=1 ) , Create(__UpperCAmelCase , run_time=1 ) ) lowerCAmelCase__ : str = [] for i, rect in enumerate(__UpperCAmelCase ): lowerCAmelCase__ : Dict = rect.copy() target.generate_target() target.target.move_to(disk_left_col_base[i] ).scale(0.5 ) animations.append(MoveToTarget(__UpperCAmelCase , run_time=1.5 ) ) self.play(*__UpperCAmelCase ) self.play(FadeOut(__UpperCAmelCase ) ) lowerCAmelCase__ : int = MarkupText(f"""Then, the checkpoint is removed from memory\nthrough garbage collection.""" , font_size=24 ) step_a.move_to([2, 2, 0] ) self.play(Write(__UpperCAmelCase , run_time=3 ) ) self.play( FadeOut(__UpperCAmelCase , __UpperCAmelCase , *__UpperCAmelCase , *__UpperCAmelCase ) , ) self.wait()
678
1
import unittest from transformers import PegasusTokenizer, PegasusTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin lowerCAmelCase_ = get_tests_dir("""fixtures/test_sentencepiece_no_bos.model""") @require_sentencepiece @require_tokenizers class _lowerCAmelCase ( _lowercase , unittest.TestCase ): A__ = PegasusTokenizer A__ = PegasusTokenizerFast A__ = True A__ = True def __magic_name__( self ): super().setUp() # We have a SentencePiece fixture for testing lowerCAmelCase__ : Union[str, Any] = PegasusTokenizer(__UpperCAmelCase ) tokenizer.save_pretrained(self.tmpdirname ) @cached_property def __magic_name__( self ): return PegasusTokenizer.from_pretrained('''google/pegasus-large''' ) def __magic_name__( self , **__UpperCAmelCase ): return PegasusTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase ) def __magic_name__( self , __UpperCAmelCase ): return ("This is a test", "This is a test") def __magic_name__( self ): lowerCAmelCase__ : Optional[Any] = '''</s>''' lowerCAmelCase__ : Optional[int] = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCAmelCase ) , __UpperCAmelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCAmelCase ) , __UpperCAmelCase ) def __magic_name__( self ): lowerCAmelCase__ : Tuple = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '''<pad>''' ) self.assertEqual(vocab_keys[1] , '''</s>''' ) self.assertEqual(vocab_keys[-1] , '''v''' ) self.assertEqual(len(__UpperCAmelCase ) , 1103 ) def __magic_name__( self ): self.assertEqual(self.get_tokenizer().vocab_size , 1103 ) def __magic_name__( self ): lowerCAmelCase__ : int = self.rust_tokenizer_class.from_pretrained(self.tmpdirname ) lowerCAmelCase__ : Tuple = self.tokenizer_class.from_pretrained(self.tmpdirname ) lowerCAmelCase__ : int = ( '''Let\'s see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important''' ''' </s> <pad> <pad> <pad>''' ) lowerCAmelCase__ : Any = rust_tokenizer([raw_input_str] , return_tensors=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ).input_ids[0] lowerCAmelCase__ : Dict = py_tokenizer([raw_input_str] , return_tensors=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ).input_ids[0] self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase ) def __magic_name__( self ): lowerCAmelCase__ : Any = self._large_tokenizer # <mask_1> masks whole sentence while <mask_2> masks single word lowerCAmelCase__ : List[str] = '''<mask_1> To ensure a <mask_2> flow of bank resolutions.''' lowerCAmelCase__ : Tuple = [2, 413, 615, 114, 3, 1971, 113, 1679, 1_0710, 107, 1] lowerCAmelCase__ : Tuple = tokenizer([raw_input_str] , return_tensors=__UpperCAmelCase ).input_ids[0] self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase ) def __magic_name__( self ): lowerCAmelCase__ : Dict = self._large_tokenizer # The tracebacks for the following asserts are **better** without messages or self.assertEqual assert tokenizer.vocab_size == 9_6103 assert tokenizer.pad_token_id == 0 assert tokenizer.eos_token_id == 1 assert tokenizer.offset == 103 assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105 assert tokenizer.unk_token == "<unk>" assert tokenizer.model_max_length == 1024 lowerCAmelCase__ : str = '''To ensure a smooth flow of bank resolutions.''' lowerCAmelCase__ : int = [413, 615, 114, 2291, 1971, 113, 1679, 1_0710, 107, 1] lowerCAmelCase__ : List[Any] = tokenizer([raw_input_str] , return_tensors=__UpperCAmelCase ).input_ids[0] self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase ) assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"] @require_torch def __magic_name__( self ): lowerCAmelCase__ : Optional[int] = ['''This is going to be way too long.''' * 150, '''short example'''] lowerCAmelCase__ : List[str] = ['''not super long but more than 5 tokens''', '''tiny'''] lowerCAmelCase__ : Tuple = self._large_tokenizer(__UpperCAmelCase , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , return_tensors='''pt''' ) lowerCAmelCase__ : Optional[int] = self._large_tokenizer( text_target=__UpperCAmelCase , max_length=5 , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , return_tensors='''pt''' ) assert batch.input_ids.shape == (2, 1024) assert batch.attention_mask.shape == (2, 1024) assert targets["input_ids"].shape == (2, 5) assert len(__UpperCAmelCase ) == 2 # input_ids, attention_mask. @slow def __magic_name__( self ): # fmt: off lowerCAmelCase__ : Optional[int] = {'''input_ids''': [[3_8979, 143, 1_8485, 606, 130, 2_6669, 8_7686, 121, 5_4189, 1129, 111, 2_6669, 8_7686, 121, 9114, 1_4787, 121, 1_3249, 158, 592, 956, 121, 1_4621, 3_1576, 143, 6_2613, 108, 9688, 930, 4_3430, 1_1562, 6_2613, 304, 108, 1_1443, 897, 108, 9314, 1_7415, 6_3399, 108, 1_1443, 7614, 1_8316, 118, 4284, 7148, 1_2430, 143, 1400, 2_5703, 158, 111, 4284, 7148, 1_1772, 143, 2_1297, 1064, 158, 122, 204, 3506, 1754, 1133, 1_4787, 1581, 115, 3_3224, 4482, 111, 1355, 110, 2_9173, 317, 5_0833, 108, 2_0147, 9_4665, 111, 7_7198, 107, 1], [110, 6_2613, 117, 638, 112, 1133, 121, 2_0098, 1355, 7_9050, 1_3872, 135, 1596, 5_3541, 1352, 141, 1_3039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 1_8289, 1_7780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=__UpperCAmelCase , model_name='''google/bigbird-pegasus-large-arxiv''' , revision='''ba85d0851d708441f91440d509690f1ab6353415''' , ) @require_sentencepiece @require_tokenizers class _lowerCAmelCase ( _lowercase , unittest.TestCase ): A__ = PegasusTokenizer A__ = PegasusTokenizerFast A__ = True A__ = True def __magic_name__( self ): super().setUp() # We have a SentencePiece fixture for testing lowerCAmelCase__ : List[Any] = PegasusTokenizer(__UpperCAmelCase , offset=0 , mask_token_sent=__UpperCAmelCase , mask_token='''[MASK]''' ) tokenizer.save_pretrained(self.tmpdirname ) @cached_property def __magic_name__( self ): return PegasusTokenizer.from_pretrained('''google/bigbird-pegasus-large-arxiv''' ) def __magic_name__( self , **__UpperCAmelCase ): return PegasusTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase ) def __magic_name__( self , __UpperCAmelCase ): return ("This is a test", "This is a test") def __magic_name__( self ): lowerCAmelCase__ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(self.tmpdirname ) lowerCAmelCase__ : int = self.tokenizer_class.from_pretrained(self.tmpdirname ) lowerCAmelCase__ : str = ( '''Let\'s see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>''' ''' <pad> <pad> <pad>''' ) lowerCAmelCase__ : Optional[Any] = rust_tokenizer([raw_input_str] , return_tensors=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ).input_ids[0] lowerCAmelCase__ : int = py_tokenizer([raw_input_str] , return_tensors=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ).input_ids[0] self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase ) @require_torch def __magic_name__( self ): lowerCAmelCase__ : Optional[Any] = ['''This is going to be way too long.''' * 1000, '''short example'''] lowerCAmelCase__ : int = ['''not super long but more than 5 tokens''', '''tiny'''] lowerCAmelCase__ : Tuple = self._large_tokenizer(__UpperCAmelCase , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , return_tensors='''pt''' ) lowerCAmelCase__ : Tuple = self._large_tokenizer( text_target=__UpperCAmelCase , max_length=5 , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , return_tensors='''pt''' ) assert batch.input_ids.shape == (2, 4096) assert batch.attention_mask.shape == (2, 4096) assert targets["input_ids"].shape == (2, 5) assert len(__UpperCAmelCase ) == 2 # input_ids, attention_mask. def __magic_name__( self ): lowerCAmelCase__ : List[str] = ( '''This is an example string that is used to test the original TF implementation against the HF''' ''' implementation''' ) lowerCAmelCase__ : Union[str, Any] = self._large_tokenizer(__UpperCAmelCase ).input_ids self.assertListEqual( __UpperCAmelCase , [182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 2_5016, 3137, 464, 109, 2_6955, 3137, 1] , )
678
import collections import os import re from pathlib import Path lowerCAmelCase_ = """src/transformers""" # Matches is_xxx_available() lowerCAmelCase_ = re.compile(R"""is\_([a-z_]*)_available()""") # Catches a one-line _import_struct = {xxx} lowerCAmelCase_ = re.compile(R"""^_import_structure\s+=\s+\{([^\}]+)\}""") # Catches a line with a key-values pattern: "bla": ["foo", "bar"] lowerCAmelCase_ = re.compile(R"""\s+\"\S*\":\s+\[([^\]]*)\]""") # Catches a line if not is_foo_available lowerCAmelCase_ = re.compile(R"""^\s*if\s+not\s+is\_[a-z_]*\_available\(\)""") # Catches a line _import_struct["bla"].append("foo") lowerCAmelCase_ = re.compile(R"""^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)""") # Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"] lowerCAmelCase_ = re.compile(R"""^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]""") # Catches a line with an object between quotes and a comma: "MyModel", lowerCAmelCase_ = re.compile(R"""^\s+\"([^\"]+)\",""") # Catches a line with objects between brackets only: ["foo", "bar"], lowerCAmelCase_ = re.compile(R"""^\s+\[([^\]]+)\]""") # Catches a line with from foo import bar, bla, boo lowerCAmelCase_ = re.compile(R"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""") # Catches a line with try: lowerCAmelCase_ = re.compile(R"""^\s*try:""") # Catches a line with else: lowerCAmelCase_ = re.compile(R"""^\s*else:""") def __lowerCAmelCase ( UpperCamelCase ) -> int: if _re_test_backend.search(UpperCamelCase ) is None: return None lowerCAmelCase__ : int = [b[0] for b in _re_backend.findall(UpperCamelCase )] backends.sort() return "_and_".join(UpperCamelCase ) def __lowerCAmelCase ( UpperCamelCase ) -> Any: with open(UpperCamelCase , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: lowerCAmelCase__ : Union[str, Any] = f.readlines() lowerCAmelCase__ : Tuple = 0 while line_index < len(UpperCamelCase ) and not lines[line_index].startswith('''_import_structure = {''' ): line_index += 1 # If this is a traditional init, just return. if line_index >= len(UpperCamelCase ): return None # First grab the objects without a specific backend in _import_structure lowerCAmelCase__ : List[str] = [] while not lines[line_index].startswith('''if TYPE_CHECKING''' ) and find_backend(lines[line_index] ) is None: lowerCAmelCase__ : str = lines[line_index] # If we have everything on a single line, let's deal with it. if _re_one_line_import_struct.search(UpperCamelCase ): lowerCAmelCase__ : str = _re_one_line_import_struct.search(UpperCamelCase ).groups()[0] lowerCAmelCase__ : Optional[Any] = re.findall(R'''\[([^\]]+)\]''' , UpperCamelCase ) for imp in imports: objects.extend([obj[1:-1] for obj in imp.split(''', ''' )] ) line_index += 1 continue lowerCAmelCase__ : Tuple = _re_import_struct_key_value.search(UpperCamelCase ) if single_line_import_search is not None: lowerCAmelCase__ : Optional[Any] = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(''', ''' ) if len(UpperCamelCase ) > 0] objects.extend(UpperCamelCase ) elif line.startswith(''' ''' * 8 + '''"''' ): objects.append(line[9:-3] ) line_index += 1 lowerCAmelCase__ : Any = {'''none''': objects} # Let's continue with backend-specific objects in _import_structure while not lines[line_index].startswith('''if TYPE_CHECKING''' ): # If the line is an if not is_backend_available, we grab all objects associated. lowerCAmelCase__ : List[str] = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: lowerCAmelCase__ : Union[str, Any] = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 lowerCAmelCase__ : Optional[int] = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 4 ): lowerCAmelCase__ : str = lines[line_index] if _re_import_struct_add_one.search(UpperCamelCase ) is not None: objects.append(_re_import_struct_add_one.search(UpperCamelCase ).groups()[0] ) elif _re_import_struct_add_many.search(UpperCamelCase ) is not None: lowerCAmelCase__ : Optional[int] = _re_import_struct_add_many.search(UpperCamelCase ).groups()[0].split(''', ''' ) lowerCAmelCase__ : List[Any] = [obj[1:-1] for obj in imports if len(UpperCamelCase ) > 0] objects.extend(UpperCamelCase ) elif _re_between_brackets.search(UpperCamelCase ) is not None: lowerCAmelCase__ : List[Any] = _re_between_brackets.search(UpperCamelCase ).groups()[0].split(''', ''' ) lowerCAmelCase__ : Optional[int] = [obj[1:-1] for obj in imports if len(UpperCamelCase ) > 0] objects.extend(UpperCamelCase ) elif _re_quote_object.search(UpperCamelCase ) is not None: objects.append(_re_quote_object.search(UpperCamelCase ).groups()[0] ) elif line.startswith(''' ''' * 8 + '''"''' ): objects.append(line[9:-3] ) elif line.startswith(''' ''' * 12 + '''"''' ): objects.append(line[13:-3] ) line_index += 1 lowerCAmelCase__ : Optional[Any] = objects else: line_index += 1 # At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend lowerCAmelCase__ : Any = [] while ( line_index < len(UpperCamelCase ) and find_backend(lines[line_index] ) is None and not lines[line_index].startswith('''else''' ) ): lowerCAmelCase__ : Tuple = lines[line_index] lowerCAmelCase__ : List[Any] = _re_import.search(UpperCamelCase ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(''', ''' ) ) elif line.startswith(''' ''' * 8 ): objects.append(line[8:-2] ) line_index += 1 lowerCAmelCase__ : Dict = {'''none''': objects} # Let's continue with backend-specific objects while line_index < len(UpperCamelCase ): # If the line is an if is_backend_available, we grab all objects associated. lowerCAmelCase__ : str = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: lowerCAmelCase__ : Tuple = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 lowerCAmelCase__ : str = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 8 ): lowerCAmelCase__ : Any = lines[line_index] lowerCAmelCase__ : Union[str, Any] = _re_import.search(UpperCamelCase ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(''', ''' ) ) elif line.startswith(''' ''' * 12 ): objects.append(line[12:-2] ) line_index += 1 lowerCAmelCase__ : str = objects else: line_index += 1 return import_dict_objects, type_hint_objects def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> List[Any]: def find_duplicates(UpperCamelCase ): return [k for k, v in collections.Counter(UpperCamelCase ).items() if v > 1] if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ): return ["Both sides of the init do not have the same backends!"] lowerCAmelCase__ : Optional[Any] = [] for key in import_dict_objects.keys(): lowerCAmelCase__ : str = find_duplicates(import_dict_objects[key] ) if duplicate_imports: errors.append(F"""Duplicate _import_structure definitions for: {duplicate_imports}""" ) lowerCAmelCase__ : int = find_duplicates(type_hint_objects[key] ) if duplicate_type_hints: errors.append(F"""Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}""" ) if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ): lowerCAmelCase__ : List[Any] = '''base imports''' if key == '''none''' else F"""{key} backend""" errors.append(F"""Differences for {name}:""" ) for a in type_hint_objects[key]: if a not in import_dict_objects[key]: errors.append(F""" {a} in TYPE_HINT but not in _import_structure.""" ) for a in import_dict_objects[key]: if a not in type_hint_objects[key]: errors.append(F""" {a} in _import_structure but not in TYPE_HINT.""" ) return errors def __lowerCAmelCase ( ) -> Optional[Any]: lowerCAmelCase__ : Dict = [] for root, _, files in os.walk(UpperCamelCase ): if "__init__.py" in files: lowerCAmelCase__ : Any = os.path.join(UpperCamelCase , '''__init__.py''' ) lowerCAmelCase__ : List[Any] = parse_init(UpperCamelCase ) if objects is not None: lowerCAmelCase__ : Optional[int] = analyze_results(*UpperCamelCase ) if len(UpperCamelCase ) > 0: lowerCAmelCase__ : Tuple = F"""Problem in {fname}, both halves do not define the same objects.\n{errors[0]}""" failures.append('''\n'''.join(UpperCamelCase ) ) if len(UpperCamelCase ) > 0: raise ValueError('''\n\n'''.join(UpperCamelCase ) ) def __lowerCAmelCase ( ) -> Tuple: lowerCAmelCase__ : str = [] for path, directories, files in os.walk(UpperCamelCase ): for folder in directories: # Ignore private modules if folder.startswith('''_''' ): directories.remove(UpperCamelCase ) continue # Ignore leftovers from branches (empty folders apart from pycache) if len(list((Path(UpperCamelCase ) / folder).glob('''*.py''' ) ) ) == 0: continue lowerCAmelCase__ : Tuple = str((Path(UpperCamelCase ) / folder).relative_to(UpperCamelCase ) ) lowerCAmelCase__ : Dict = short_path.replace(os.path.sep , '''.''' ) submodules.append(UpperCamelCase ) for fname in files: if fname == "__init__.py": continue lowerCAmelCase__ : Union[str, Any] = str((Path(UpperCamelCase ) / fname).relative_to(UpperCamelCase ) ) lowerCAmelCase__ : Tuple = short_path.replace('''.py''' , '''''' ).replace(os.path.sep , '''.''' ) if len(submodule.split('''.''' ) ) == 1: submodules.append(UpperCamelCase ) return submodules lowerCAmelCase_ = [ """convert_pytorch_checkpoint_to_tf2""", """modeling_flax_pytorch_utils""", """models.esm.openfold_utils""", ] def __lowerCAmelCase ( ) -> List[str]: # This is to make sure the transformers module imported is the one in the repo. from transformers.utils import direct_transformers_import lowerCAmelCase__ : Dict = direct_transformers_import(UpperCamelCase ) lowerCAmelCase__ : int = set(transformers._import_structure.keys() ) # This contains all the base keys of the _import_structure object defined in the init, but if the user is missing # some optional dependencies, they may not have all of them. Thus we read the init to read all additions and # (potentiall re-) add them. with open(os.path.join(UpperCamelCase , '''__init__.py''' ) , '''r''' ) as f: lowerCAmelCase__ : str = f.read() import_structure_keys.update(set(re.findall(R'''import_structure\[\"([^\"]*)\"\]''' , UpperCamelCase ) ) ) lowerCAmelCase__ : Optional[int] = [ module for module in get_transformers_submodules() if module not in IGNORE_SUBMODULES and module not in import_structure_keys ] if len(UpperCamelCase ) > 0: lowerCAmelCase__ : List[Any] = '''\n'''.join(F"""- {module}""" for module in module_not_registered ) raise ValueError( '''The following submodules are not properly registed in the main init of Transformers:\n''' F"""{list_of_modules}\n""" '''Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.''' ) if __name__ == "__main__": check_all_inits() check_submodules()
678
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCAmelCase_ = { """configuration_whisper""": ["""WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """WhisperConfig""", """WhisperOnnxConfig"""], """feature_extraction_whisper""": ["""WhisperFeatureExtractor"""], """processing_whisper""": ["""WhisperProcessor"""], """tokenization_whisper""": ["""WhisperTokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = ["""WhisperTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ """WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST""", """WhisperForConditionalGeneration""", """WhisperModel""", """WhisperPreTrainedModel""", """WhisperForAudioClassification""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ """TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFWhisperForConditionalGeneration""", """TFWhisperModel""", """TFWhisperPreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ """FlaxWhisperForConditionalGeneration""", """FlaxWhisperModel""", """FlaxWhisperPreTrainedModel""", """FlaxWhisperForAudioClassification""", ] if TYPE_CHECKING: from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig from .feature_extraction_whisper import WhisperFeatureExtractor from .processing_whisper import WhisperProcessor from .tokenization_whisper import WhisperTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_whisper_fast import WhisperTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_whisper import ( WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST, WhisperForAudioClassification, WhisperForConditionalGeneration, WhisperModel, WhisperPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_whisper import ( TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST, TFWhisperForConditionalGeneration, TFWhisperModel, TFWhisperPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_whisper import ( FlaxWhisperForAudioClassification, FlaxWhisperForConditionalGeneration, FlaxWhisperModel, FlaxWhisperPreTrainedModel, ) else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
678
import math import os import unittest from transformers import MegatronBertConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, MegatronBertForCausalLM, MegatronBertForMaskedLM, MegatronBertForMultipleChoice, MegatronBertForNextSentencePrediction, MegatronBertForPreTraining, MegatronBertForQuestionAnswering, MegatronBertForSequenceClassification, MegatronBertForTokenClassification, MegatronBertModel, ) class _lowerCAmelCase : def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=64 , __UpperCAmelCase=32 , __UpperCAmelCase=5 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=512 , __UpperCAmelCase=16 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=3 , __UpperCAmelCase=4 , __UpperCAmelCase=None , ): lowerCAmelCase__ : Optional[int] = parent lowerCAmelCase__ : Tuple = batch_size lowerCAmelCase__ : Union[str, Any] = seq_length lowerCAmelCase__ : str = is_training lowerCAmelCase__ : Union[str, Any] = use_input_mask lowerCAmelCase__ : List[Any] = use_token_type_ids lowerCAmelCase__ : int = use_labels lowerCAmelCase__ : List[Any] = vocab_size lowerCAmelCase__ : Optional[int] = hidden_size lowerCAmelCase__ : List[str] = embedding_size lowerCAmelCase__ : Optional[int] = num_hidden_layers lowerCAmelCase__ : Optional[int] = num_attention_heads lowerCAmelCase__ : List[str] = intermediate_size lowerCAmelCase__ : Tuple = hidden_act lowerCAmelCase__ : Union[str, Any] = hidden_dropout_prob lowerCAmelCase__ : Optional[int] = attention_probs_dropout_prob lowerCAmelCase__ : Union[str, Any] = max_position_embeddings lowerCAmelCase__ : List[Any] = type_vocab_size lowerCAmelCase__ : Optional[Any] = type_sequence_label_size lowerCAmelCase__ : List[Any] = initializer_range lowerCAmelCase__ : Optional[Any] = num_labels lowerCAmelCase__ : List[str] = num_choices lowerCAmelCase__ : Any = scope def __magic_name__( self ): lowerCAmelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCAmelCase__ : str = None if self.use_input_mask: lowerCAmelCase__ : List[str] = random_attention_mask([self.batch_size, self.seq_length] ) lowerCAmelCase__ : Optional[Any] = None if self.use_token_type_ids: lowerCAmelCase__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowerCAmelCase__ : Dict = None lowerCAmelCase__ : Dict = None lowerCAmelCase__ : Optional[int] = None if self.use_labels: lowerCAmelCase__ : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCAmelCase__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowerCAmelCase__ : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices ) lowerCAmelCase__ : Union[str, Any] = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __magic_name__( self ): return MegatronBertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , ) def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): lowerCAmelCase__ : Union[str, Any] = MegatronBertModel(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() lowerCAmelCase__ : Union[str, Any] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase ) lowerCAmelCase__ : Union[str, Any] = model(__UpperCAmelCase , token_type_ids=__UpperCAmelCase ) lowerCAmelCase__ : Optional[Any] = model(__UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): lowerCAmelCase__ : List[Any] = MegatronBertForMaskedLM(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() lowerCAmelCase__ : Tuple = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): lowerCAmelCase__ : Optional[Any] = MegatronBertForCausalLM(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() lowerCAmelCase__ : Union[str, Any] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): lowerCAmelCase__ : str = MegatronBertForNextSentencePrediction(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() lowerCAmelCase__ : Dict = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) ) def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): lowerCAmelCase__ : str = MegatronBertForPreTraining(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() lowerCAmelCase__ : int = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase , next_sentence_label=__UpperCAmelCase , ) self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) ) def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): lowerCAmelCase__ : str = MegatronBertForQuestionAnswering(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() lowerCAmelCase__ : Union[str, Any] = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , start_positions=__UpperCAmelCase , end_positions=__UpperCAmelCase , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): lowerCAmelCase__ : Optional[Any] = self.num_labels lowerCAmelCase__ : Union[str, Any] = MegatronBertForSequenceClassification(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() lowerCAmelCase__ : Dict = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): lowerCAmelCase__ : Optional[Any] = self.num_labels lowerCAmelCase__ : str = MegatronBertForTokenClassification(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() lowerCAmelCase__ : Dict = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): lowerCAmelCase__ : Optional[Any] = self.num_choices lowerCAmelCase__ : Dict = MegatronBertForMultipleChoice(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() lowerCAmelCase__ : Union[str, Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowerCAmelCase__ : int = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowerCAmelCase__ : List[str] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowerCAmelCase__ : Any = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def __magic_name__( self ): lowerCAmelCase__ : Union[str, Any] = self.prepare_config_and_inputs() ( ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ) : Optional[int] = config_and_inputs lowerCAmelCase__ : int = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class _lowerCAmelCase ( _lowercase , _lowercase , unittest.TestCase ): A__ = ( ( MegatronBertModel, MegatronBertForMaskedLM, MegatronBertForCausalLM, MegatronBertForMultipleChoice, MegatronBertForNextSentencePrediction, MegatronBertForPreTraining, MegatronBertForQuestionAnswering, MegatronBertForSequenceClassification, MegatronBertForTokenClassification, ) if is_torch_available() else () ) A__ = ( { 'feature-extraction': MegatronBertModel, 'fill-mask': MegatronBertForMaskedLM, 'question-answering': MegatronBertForQuestionAnswering, 'text-classification': MegatronBertForSequenceClassification, 'text-generation': MegatronBertForCausalLM, 'token-classification': MegatronBertForTokenClassification, 'zero-shot': MegatronBertForSequenceClassification, } if is_torch_available() else {} ) A__ = True # test_resize_embeddings = False A__ = False def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False ): lowerCAmelCase__ : List[Any] = super()._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase ) if return_labels: if model_class in get_values(__UpperCAmelCase ): lowerCAmelCase__ : Optional[int] = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__UpperCAmelCase ) lowerCAmelCase__ : Optional[Any] = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__UpperCAmelCase ) return inputs_dict def __magic_name__( self ): lowerCAmelCase__ : str = MegatronBertModelTester(self ) lowerCAmelCase__ : Dict = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37 ) def __magic_name__( self ): self.config_tester.run_common_tests() def __magic_name__( self ): lowerCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_model(*__UpperCAmelCase ) def __magic_name__( self ): lowerCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_masked_lm(*__UpperCAmelCase ) def __magic_name__( self ): lowerCAmelCase__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*__UpperCAmelCase ) def __magic_name__( self ): lowerCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*__UpperCAmelCase ) def __magic_name__( self ): lowerCAmelCase__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_pretraining(*__UpperCAmelCase ) def __magic_name__( self ): lowerCAmelCase__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_question_answering(*__UpperCAmelCase ) def __magic_name__( self ): lowerCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*__UpperCAmelCase ) def __magic_name__( self ): lowerCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_token_classification(*__UpperCAmelCase ) def __lowerCAmelCase ( UpperCamelCase ) -> Optional[int]: return torch.tensor( UpperCamelCase , dtype=torch.long , device=UpperCamelCase , ) lowerCAmelCase_ = 1e-4 @require_torch @require_sentencepiece @require_tokenizers class _lowerCAmelCase ( unittest.TestCase ): @slow @unittest.skip('''Model is not available.''' ) def __magic_name__( self ): lowerCAmelCase__ : int = '''nvidia/megatron-bert-uncased-345m''' if "MYDIR" in os.environ: lowerCAmelCase__ : Union[str, Any] = os.path.join(os.environ['''MYDIR'''] , __UpperCAmelCase ) lowerCAmelCase__ : Tuple = MegatronBertModel.from_pretrained(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.half() lowerCAmelCase__ : Optional[int] = _long_tensor([[101, 7110, 1005, 1056, 2023, 1_1333, 1_7413, 1029, 102]] ) with torch.no_grad(): lowerCAmelCase__ : List[Any] = model(__UpperCAmelCase )[0] lowerCAmelCase__ : List[Any] = torch.Size((1, 9, 1024) ) self.assertEqual(output.shape , __UpperCAmelCase ) lowerCAmelCase__ : Union[str, Any] = [-0.6040, -0.2517, -0.1025, 0.3420, -0.6758, -0.0017, -0.1089, -0.1990, 0.5728] for ii in range(3 ): for jj in range(3 ): lowerCAmelCase__ : Union[str, Any] = output[0, ii, jj] lowerCAmelCase__ : Optional[Any] = expected[3 * ii + jj] lowerCAmelCase__ : List[str] = '''ii={} jj={} a={} b={}'''.format(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) self.assertTrue(math.isclose(__UpperCAmelCase , __UpperCAmelCase , rel_tol=__UpperCAmelCase , abs_tol=__UpperCAmelCase ) , msg=__UpperCAmelCase )
678
1
import time import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers.generation import ( MaxLengthCriteria, MaxNewTokensCriteria, MaxTimeCriteria, StoppingCriteriaList, validate_stopping_criteria, ) @require_torch class _lowerCAmelCase ( unittest.TestCase ): def __magic_name__( self , __UpperCAmelCase ): lowerCAmelCase__ : Union[str, Any] = 3 lowerCAmelCase__ : Tuple = 250 lowerCAmelCase__ : List[Any] = ids_tensor((batch_size, length) , __UpperCAmelCase ) lowerCAmelCase__ : Optional[Any] = torch.ones((batch_size, length) , device=__UpperCAmelCase , dtype=torch.float ) / length return input_ids, scores def __magic_name__( self ): lowerCAmelCase__ , lowerCAmelCase__ : List[str] = self._get_tensors(5 ) lowerCAmelCase__ : List[str] = StoppingCriteriaList( [ MaxLengthCriteria(max_length=10 ), MaxTimeCriteria(max_time=0.1 ), ] ) self.assertFalse(criteria(__UpperCAmelCase , __UpperCAmelCase ) ) lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = self._get_tensors(9 ) self.assertFalse(criteria(__UpperCAmelCase , __UpperCAmelCase ) ) lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = self._get_tensors(10 ) self.assertTrue(criteria(__UpperCAmelCase , __UpperCAmelCase ) ) def __magic_name__( self ): lowerCAmelCase__ : Tuple = MaxLengthCriteria(max_length=10 ) lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = self._get_tensors(5 ) self.assertFalse(criteria(__UpperCAmelCase , __UpperCAmelCase ) ) lowerCAmelCase__ , lowerCAmelCase__ : Tuple = self._get_tensors(9 ) self.assertFalse(criteria(__UpperCAmelCase , __UpperCAmelCase ) ) lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = self._get_tensors(10 ) self.assertTrue(criteria(__UpperCAmelCase , __UpperCAmelCase ) ) def __magic_name__( self ): lowerCAmelCase__ : Optional[int] = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 ) lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = self._get_tensors(5 ) self.assertFalse(criteria(__UpperCAmelCase , __UpperCAmelCase ) ) lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = self._get_tensors(9 ) self.assertFalse(criteria(__UpperCAmelCase , __UpperCAmelCase ) ) lowerCAmelCase__ , lowerCAmelCase__ : Tuple = self._get_tensors(10 ) self.assertTrue(criteria(__UpperCAmelCase , __UpperCAmelCase ) ) lowerCAmelCase__ : int = StoppingCriteriaList([criteria] ) self.assertEqual(criteria_list.max_length , 10 ) def __magic_name__( self ): lowerCAmelCase__ , lowerCAmelCase__ : List[str] = self._get_tensors(5 ) lowerCAmelCase__ : Any = MaxTimeCriteria(max_time=0.1 ) self.assertFalse(criteria(__UpperCAmelCase , __UpperCAmelCase ) ) lowerCAmelCase__ : int = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 ) self.assertTrue(criteria(__UpperCAmelCase , __UpperCAmelCase ) ) def __magic_name__( self ): validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 10 ) with self.assertWarns(__UpperCAmelCase ): validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 11 ) lowerCAmelCase__ : List[str] = validate_stopping_criteria(StoppingCriteriaList() , 11 ) self.assertEqual(len(__UpperCAmelCase ) , 1 )
678
import warnings from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast from ...onnx.utils import compute_effective_axis_dimension from ...utils import TensorType, is_torch_available, logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = { """facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/config.json""", # See all BART models at https://huggingface.co/models?filter=bart } class _lowerCAmelCase ( _lowercase ): A__ = 'bart' A__ = ['past_key_values'] A__ = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'} def __init__( self , __UpperCAmelCase=5_0265 , __UpperCAmelCase=1024 , __UpperCAmelCase=12 , __UpperCAmelCase=4096 , __UpperCAmelCase=16 , __UpperCAmelCase=12 , __UpperCAmelCase=4096 , __UpperCAmelCase=16 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase="gelu" , __UpperCAmelCase=1024 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.02 , __UpperCAmelCase=0.0 , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=3 , __UpperCAmelCase=1 , __UpperCAmelCase=0 , __UpperCAmelCase=2 , __UpperCAmelCase=True , __UpperCAmelCase=2 , __UpperCAmelCase=2 , **__UpperCAmelCase , ): lowerCAmelCase__ : Union[str, Any] = vocab_size lowerCAmelCase__ : Optional[Any] = max_position_embeddings lowerCAmelCase__ : int = d_model lowerCAmelCase__ : str = encoder_ffn_dim lowerCAmelCase__ : Any = encoder_layers lowerCAmelCase__ : Dict = encoder_attention_heads lowerCAmelCase__ : Union[str, Any] = decoder_ffn_dim lowerCAmelCase__ : Union[str, Any] = decoder_layers lowerCAmelCase__ : Any = decoder_attention_heads lowerCAmelCase__ : Tuple = dropout lowerCAmelCase__ : Any = attention_dropout lowerCAmelCase__ : Any = activation_dropout lowerCAmelCase__ : Optional[Any] = activation_function lowerCAmelCase__ : Union[str, Any] = init_std lowerCAmelCase__ : Union[str, Any] = encoder_layerdrop lowerCAmelCase__ : int = decoder_layerdrop lowerCAmelCase__ : Optional[int] = classifier_dropout lowerCAmelCase__ : str = use_cache lowerCAmelCase__ : int = encoder_layers lowerCAmelCase__ : List[str] = scale_embedding # scale factor will be sqrt(d_model) if True super().__init__( num_labels=__UpperCAmelCase , pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , is_encoder_decoder=__UpperCAmelCase , decoder_start_token_id=__UpperCAmelCase , forced_eos_token_id=__UpperCAmelCase , **__UpperCAmelCase , ) # ensure backward compatibility for BART CNN models if self.forced_bos_token_id is None and kwargs.get('''force_bos_token_to_be_generated''' , __UpperCAmelCase ): lowerCAmelCase__ : str = self.bos_token_id warnings.warn( f"""Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. """ '''The config can simply be saved and uploaded again to be fixed.''' ) class _lowerCAmelCase ( _lowercase ): @property def __magic_name__( self ): if self.task in ["default", "seq2seq-lm"]: lowerCAmelCase__ : Tuple = OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}), ] ) if self.use_past: lowerCAmelCase__ : Any = {0: '''batch'''} lowerCAmelCase__ : Tuple = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''} else: lowerCAmelCase__ : Optional[int] = {0: '''batch''', 1: '''decoder_sequence'''} lowerCAmelCase__ : Optional[Any] = {0: '''batch''', 1: '''decoder_sequence'''} if self.use_past: self.fill_with_past_key_values_(__UpperCAmelCase , direction='''inputs''' ) elif self.task == "causal-lm": # TODO: figure this case out. lowerCAmelCase__ : List[str] = OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}), ] ) if self.use_past: lowerCAmelCase__ , lowerCAmelCase__ : Tuple = self.num_layers for i in range(__UpperCAmelCase ): lowerCAmelCase__ : int = {0: '''batch''', 2: '''past_sequence + sequence'''} lowerCAmelCase__ : Union[str, Any] = {0: '''batch''', 2: '''past_sequence + sequence'''} else: lowerCAmelCase__ : List[str] = OrderedDict( [ ('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}), ('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}), ('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}), ] ) return common_inputs @property def __magic_name__( self ): if self.task in ["default", "seq2seq-lm"]: lowerCAmelCase__ : Dict = super().outputs else: lowerCAmelCase__ : Any = super(__UpperCAmelCase , self ).outputs if self.use_past: lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = self.num_layers for i in range(__UpperCAmelCase ): lowerCAmelCase__ : List[Any] = {0: '''batch''', 2: '''past_sequence + sequence'''} lowerCAmelCase__ : Optional[Any] = {0: '''batch''', 2: '''past_sequence + sequence'''} return common_outputs def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase = -1 , __UpperCAmelCase = -1 , __UpperCAmelCase = False , __UpperCAmelCase = None , ): lowerCAmelCase__ : int = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) # Generate decoder inputs lowerCAmelCase__ : int = seq_length if not self.use_past else 1 lowerCAmelCase__ : Dict = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : Optional[int] = {f"""decoder_{name}""": tensor for name, tensor in decoder_inputs.items()} lowerCAmelCase__ : str = dict(**__UpperCAmelCase , **__UpperCAmelCase ) if self.use_past: if not is_torch_available(): raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' ) else: import torch lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = common_inputs['''input_ids'''].shape lowerCAmelCase__ : List[str] = common_inputs['''decoder_input_ids'''].shape[1] lowerCAmelCase__ , lowerCAmelCase__ : Dict = self.num_attention_heads lowerCAmelCase__ : Any = ( batch, num_encoder_attention_heads, encoder_seq_length, self._config.hidden_size // num_encoder_attention_heads, ) lowerCAmelCase__ : List[Any] = decoder_seq_length + 3 lowerCAmelCase__ : Any = ( batch, num_decoder_attention_heads, decoder_past_length, self._config.hidden_size // num_decoder_attention_heads, ) lowerCAmelCase__ : Any = torch.cat( [common_inputs['''decoder_attention_mask'''], torch.ones(__UpperCAmelCase , __UpperCAmelCase )] , dim=1 ) lowerCAmelCase__ : List[str] = [] # If the number of encoder and decoder layers are present in the model configuration, both are considered lowerCAmelCase__ , lowerCAmelCase__ : Any = self.num_layers lowerCAmelCase__ : Union[str, Any] = min(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : Any = max(__UpperCAmelCase , __UpperCAmelCase ) - min_num_layers lowerCAmelCase__ : Dict = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder''' for _ in range(__UpperCAmelCase ): common_inputs["past_key_values"].append( ( torch.zeros(__UpperCAmelCase ), torch.zeros(__UpperCAmelCase ), torch.zeros(__UpperCAmelCase ), torch.zeros(__UpperCAmelCase ), ) ) # TODO: test this. lowerCAmelCase__ : Any = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape for _ in range(__UpperCAmelCase , __UpperCAmelCase ): common_inputs["past_key_values"].append((torch.zeros(__UpperCAmelCase ), torch.zeros(__UpperCAmelCase )) ) return common_inputs def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase = -1 , __UpperCAmelCase = -1 , __UpperCAmelCase = False , __UpperCAmelCase = None , ): lowerCAmelCase__ : Optional[int] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) if self.use_past: if not is_torch_available(): raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' ) else: import torch lowerCAmelCase__ , lowerCAmelCase__ : Any = common_inputs['''input_ids'''].shape # Not using the same length for past_key_values lowerCAmelCase__ : List[str] = seqlen + 2 lowerCAmelCase__ , lowerCAmelCase__ : int = self.num_layers lowerCAmelCase__ , lowerCAmelCase__ : Tuple = self.num_attention_heads lowerCAmelCase__ : Optional[Any] = ( batch, num_encoder_attention_heads, past_key_values_length, self._config.hidden_size // num_encoder_attention_heads, ) lowerCAmelCase__ : Optional[Any] = common_inputs['''attention_mask'''].dtype lowerCAmelCase__ : List[Any] = torch.cat( [common_inputs['''attention_mask'''], torch.ones(__UpperCAmelCase , __UpperCAmelCase , dtype=__UpperCAmelCase )] , dim=1 ) lowerCAmelCase__ : List[Any] = [ (torch.zeros(__UpperCAmelCase ), torch.zeros(__UpperCAmelCase )) for _ in range(__UpperCAmelCase ) ] return common_inputs def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase = -1 , __UpperCAmelCase = -1 , __UpperCAmelCase = False , __UpperCAmelCase = None , ): # Copied from OnnxConfig.generate_dummy_inputs # Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity. # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX lowerCAmelCase__ : Tuple = compute_effective_axis_dimension( __UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX lowerCAmelCase__ : Optional[Any] = tokenizer.num_special_tokens_to_add(__UpperCAmelCase ) lowerCAmelCase__ : int = compute_effective_axis_dimension( __UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__UpperCAmelCase ) # Generate dummy inputs according to compute batch and sequence lowerCAmelCase__ : Optional[int] = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size lowerCAmelCase__ : str = dict(tokenizer(__UpperCAmelCase , return_tensors=__UpperCAmelCase ) ) return common_inputs def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase = -1 , __UpperCAmelCase = -1 , __UpperCAmelCase = False , __UpperCAmelCase = None , ): if self.task in ["default", "seq2seq-lm"]: lowerCAmelCase__ : Union[str, Any] = self._generate_dummy_inputs_for_default_and_seqaseq_lm( __UpperCAmelCase , batch_size=__UpperCAmelCase , seq_length=__UpperCAmelCase , is_pair=__UpperCAmelCase , framework=__UpperCAmelCase ) elif self.task == "causal-lm": lowerCAmelCase__ : List[str] = self._generate_dummy_inputs_for_causal_lm( __UpperCAmelCase , batch_size=__UpperCAmelCase , seq_length=__UpperCAmelCase , is_pair=__UpperCAmelCase , framework=__UpperCAmelCase ) else: lowerCAmelCase__ : Optional[Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( __UpperCAmelCase , batch_size=__UpperCAmelCase , seq_length=__UpperCAmelCase , is_pair=__UpperCAmelCase , framework=__UpperCAmelCase ) return common_inputs def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): if self.task in ["default", "seq2seq-lm"]: lowerCAmelCase__ : Any = super()._flatten_past_key_values_(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) else: lowerCAmelCase__ : Dict = super(__UpperCAmelCase , self )._flatten_past_key_values_( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
678
1
import math import os import unittest from transformers import MegatronBertConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, MegatronBertForCausalLM, MegatronBertForMaskedLM, MegatronBertForMultipleChoice, MegatronBertForNextSentencePrediction, MegatronBertForPreTraining, MegatronBertForQuestionAnswering, MegatronBertForSequenceClassification, MegatronBertForTokenClassification, MegatronBertModel, ) class _lowerCAmelCase : def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=64 , __UpperCAmelCase=32 , __UpperCAmelCase=5 , __UpperCAmelCase=4 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=512 , __UpperCAmelCase=16 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=3 , __UpperCAmelCase=4 , __UpperCAmelCase=None , ): lowerCAmelCase__ : Optional[int] = parent lowerCAmelCase__ : Tuple = batch_size lowerCAmelCase__ : Union[str, Any] = seq_length lowerCAmelCase__ : str = is_training lowerCAmelCase__ : Union[str, Any] = use_input_mask lowerCAmelCase__ : List[Any] = use_token_type_ids lowerCAmelCase__ : int = use_labels lowerCAmelCase__ : List[Any] = vocab_size lowerCAmelCase__ : Optional[int] = hidden_size lowerCAmelCase__ : List[str] = embedding_size lowerCAmelCase__ : Optional[int] = num_hidden_layers lowerCAmelCase__ : Optional[int] = num_attention_heads lowerCAmelCase__ : List[str] = intermediate_size lowerCAmelCase__ : Tuple = hidden_act lowerCAmelCase__ : Union[str, Any] = hidden_dropout_prob lowerCAmelCase__ : Optional[int] = attention_probs_dropout_prob lowerCAmelCase__ : Union[str, Any] = max_position_embeddings lowerCAmelCase__ : List[Any] = type_vocab_size lowerCAmelCase__ : Optional[Any] = type_sequence_label_size lowerCAmelCase__ : List[Any] = initializer_range lowerCAmelCase__ : Optional[Any] = num_labels lowerCAmelCase__ : List[str] = num_choices lowerCAmelCase__ : Any = scope def __magic_name__( self ): lowerCAmelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCAmelCase__ : str = None if self.use_input_mask: lowerCAmelCase__ : List[str] = random_attention_mask([self.batch_size, self.seq_length] ) lowerCAmelCase__ : Optional[Any] = None if self.use_token_type_ids: lowerCAmelCase__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowerCAmelCase__ : Dict = None lowerCAmelCase__ : Dict = None lowerCAmelCase__ : Optional[int] = None if self.use_labels: lowerCAmelCase__ : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCAmelCase__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowerCAmelCase__ : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices ) lowerCAmelCase__ : Union[str, Any] = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __magic_name__( self ): return MegatronBertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , ) def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): lowerCAmelCase__ : Union[str, Any] = MegatronBertModel(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() lowerCAmelCase__ : Union[str, Any] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase ) lowerCAmelCase__ : Union[str, Any] = model(__UpperCAmelCase , token_type_ids=__UpperCAmelCase ) lowerCAmelCase__ : Optional[Any] = model(__UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): lowerCAmelCase__ : List[Any] = MegatronBertForMaskedLM(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() lowerCAmelCase__ : Tuple = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): lowerCAmelCase__ : Optional[Any] = MegatronBertForCausalLM(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() lowerCAmelCase__ : Union[str, Any] = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): lowerCAmelCase__ : str = MegatronBertForNextSentencePrediction(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() lowerCAmelCase__ : Dict = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) ) def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): lowerCAmelCase__ : str = MegatronBertForPreTraining(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() lowerCAmelCase__ : int = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase , next_sentence_label=__UpperCAmelCase , ) self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) ) def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): lowerCAmelCase__ : str = MegatronBertForQuestionAnswering(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() lowerCAmelCase__ : Union[str, Any] = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , start_positions=__UpperCAmelCase , end_positions=__UpperCAmelCase , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): lowerCAmelCase__ : Optional[Any] = self.num_labels lowerCAmelCase__ : Union[str, Any] = MegatronBertForSequenceClassification(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() lowerCAmelCase__ : Dict = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): lowerCAmelCase__ : Optional[Any] = self.num_labels lowerCAmelCase__ : str = MegatronBertForTokenClassification(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() lowerCAmelCase__ : Dict = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): lowerCAmelCase__ : Optional[Any] = self.num_choices lowerCAmelCase__ : Dict = MegatronBertForMultipleChoice(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() lowerCAmelCase__ : Union[str, Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowerCAmelCase__ : int = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowerCAmelCase__ : List[str] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowerCAmelCase__ : Any = model( __UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def __magic_name__( self ): lowerCAmelCase__ : Union[str, Any] = self.prepare_config_and_inputs() ( ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ( lowerCAmelCase__ ) , ) : Optional[int] = config_and_inputs lowerCAmelCase__ : int = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class _lowerCAmelCase ( _lowercase , _lowercase , unittest.TestCase ): A__ = ( ( MegatronBertModel, MegatronBertForMaskedLM, MegatronBertForCausalLM, MegatronBertForMultipleChoice, MegatronBertForNextSentencePrediction, MegatronBertForPreTraining, MegatronBertForQuestionAnswering, MegatronBertForSequenceClassification, MegatronBertForTokenClassification, ) if is_torch_available() else () ) A__ = ( { 'feature-extraction': MegatronBertModel, 'fill-mask': MegatronBertForMaskedLM, 'question-answering': MegatronBertForQuestionAnswering, 'text-classification': MegatronBertForSequenceClassification, 'text-generation': MegatronBertForCausalLM, 'token-classification': MegatronBertForTokenClassification, 'zero-shot': MegatronBertForSequenceClassification, } if is_torch_available() else {} ) A__ = True # test_resize_embeddings = False A__ = False def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False ): lowerCAmelCase__ : List[Any] = super()._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase ) if return_labels: if model_class in get_values(__UpperCAmelCase ): lowerCAmelCase__ : Optional[int] = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__UpperCAmelCase ) lowerCAmelCase__ : Optional[Any] = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=__UpperCAmelCase ) return inputs_dict def __magic_name__( self ): lowerCAmelCase__ : str = MegatronBertModelTester(self ) lowerCAmelCase__ : Dict = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37 ) def __magic_name__( self ): self.config_tester.run_common_tests() def __magic_name__( self ): lowerCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_model(*__UpperCAmelCase ) def __magic_name__( self ): lowerCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_masked_lm(*__UpperCAmelCase ) def __magic_name__( self ): lowerCAmelCase__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*__UpperCAmelCase ) def __magic_name__( self ): lowerCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*__UpperCAmelCase ) def __magic_name__( self ): lowerCAmelCase__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_pretraining(*__UpperCAmelCase ) def __magic_name__( self ): lowerCAmelCase__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_question_answering(*__UpperCAmelCase ) def __magic_name__( self ): lowerCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*__UpperCAmelCase ) def __magic_name__( self ): lowerCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_megatron_bert_for_token_classification(*__UpperCAmelCase ) def __lowerCAmelCase ( UpperCamelCase ) -> Optional[int]: return torch.tensor( UpperCamelCase , dtype=torch.long , device=UpperCamelCase , ) lowerCAmelCase_ = 1e-4 @require_torch @require_sentencepiece @require_tokenizers class _lowerCAmelCase ( unittest.TestCase ): @slow @unittest.skip('''Model is not available.''' ) def __magic_name__( self ): lowerCAmelCase__ : int = '''nvidia/megatron-bert-uncased-345m''' if "MYDIR" in os.environ: lowerCAmelCase__ : Union[str, Any] = os.path.join(os.environ['''MYDIR'''] , __UpperCAmelCase ) lowerCAmelCase__ : Tuple = MegatronBertModel.from_pretrained(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.half() lowerCAmelCase__ : Optional[int] = _long_tensor([[101, 7110, 1005, 1056, 2023, 1_1333, 1_7413, 1029, 102]] ) with torch.no_grad(): lowerCAmelCase__ : List[Any] = model(__UpperCAmelCase )[0] lowerCAmelCase__ : List[Any] = torch.Size((1, 9, 1024) ) self.assertEqual(output.shape , __UpperCAmelCase ) lowerCAmelCase__ : Union[str, Any] = [-0.6040, -0.2517, -0.1025, 0.3420, -0.6758, -0.0017, -0.1089, -0.1990, 0.5728] for ii in range(3 ): for jj in range(3 ): lowerCAmelCase__ : Union[str, Any] = output[0, ii, jj] lowerCAmelCase__ : Optional[Any] = expected[3 * ii + jj] lowerCAmelCase__ : List[str] = '''ii={} jj={} a={} b={}'''.format(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) self.assertTrue(math.isclose(__UpperCAmelCase , __UpperCAmelCase , rel_tol=__UpperCAmelCase , abs_tol=__UpperCAmelCase ) , msg=__UpperCAmelCase )
678
import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = { """asapp/sew-d-tiny-100k""": """https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json""", # See all SEW-D models at https://huggingface.co/models?filter=sew-d } class _lowerCAmelCase ( _lowercase ): A__ = 'sew-d' def __init__( self , __UpperCAmelCase=32 , __UpperCAmelCase=768 , __UpperCAmelCase=12 , __UpperCAmelCase=12 , __UpperCAmelCase=3072 , __UpperCAmelCase=2 , __UpperCAmelCase=512 , __UpperCAmelCase=256 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=("p2c", "c2p") , __UpperCAmelCase="layer_norm" , __UpperCAmelCase="gelu_python" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1e-7 , __UpperCAmelCase=1e-5 , __UpperCAmelCase="group" , __UpperCAmelCase="gelu" , __UpperCAmelCase=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , __UpperCAmelCase=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , __UpperCAmelCase=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , __UpperCAmelCase=False , __UpperCAmelCase=128 , __UpperCAmelCase=16 , __UpperCAmelCase=True , __UpperCAmelCase=0.05 , __UpperCAmelCase=10 , __UpperCAmelCase=2 , __UpperCAmelCase=0.0 , __UpperCAmelCase=10 , __UpperCAmelCase=0 , __UpperCAmelCase="mean" , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=256 , __UpperCAmelCase=0 , __UpperCAmelCase=1 , __UpperCAmelCase=2 , **__UpperCAmelCase , ): super().__init__(**__UpperCAmelCase , pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase ) lowerCAmelCase__ : Optional[int] = hidden_size lowerCAmelCase__ : Optional[int] = feat_extract_norm lowerCAmelCase__ : str = feat_extract_activation lowerCAmelCase__ : int = list(__UpperCAmelCase ) lowerCAmelCase__ : int = list(__UpperCAmelCase ) lowerCAmelCase__ : Any = list(__UpperCAmelCase ) lowerCAmelCase__ : int = conv_bias lowerCAmelCase__ : List[Any] = num_conv_pos_embeddings lowerCAmelCase__ : Optional[int] = num_conv_pos_embedding_groups lowerCAmelCase__ : int = len(self.conv_dim ) lowerCAmelCase__ : Union[str, Any] = num_hidden_layers lowerCAmelCase__ : Any = intermediate_size lowerCAmelCase__ : int = squeeze_factor lowerCAmelCase__ : int = max_position_embeddings lowerCAmelCase__ : Any = position_buckets lowerCAmelCase__ : Optional[int] = share_att_key lowerCAmelCase__ : Tuple = relative_attention lowerCAmelCase__ : Optional[int] = norm_rel_ebd lowerCAmelCase__ : Tuple = list(__UpperCAmelCase ) lowerCAmelCase__ : List[Any] = hidden_act lowerCAmelCase__ : Any = num_attention_heads lowerCAmelCase__ : Optional[int] = hidden_dropout lowerCAmelCase__ : Union[str, Any] = attention_dropout lowerCAmelCase__ : str = activation_dropout lowerCAmelCase__ : List[Any] = feat_proj_dropout lowerCAmelCase__ : Any = final_dropout lowerCAmelCase__ : Optional[int] = layer_norm_eps lowerCAmelCase__ : List[str] = feature_layer_norm_eps lowerCAmelCase__ : Tuple = initializer_range lowerCAmelCase__ : Tuple = vocab_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( '''Configuration for convolutional layers is incorrect.''' '''It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,''' f"""but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)""" f"""= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 lowerCAmelCase__ : Tuple = apply_spec_augment lowerCAmelCase__ : List[str] = mask_time_prob lowerCAmelCase__ : int = mask_time_length lowerCAmelCase__ : int = mask_time_min_masks lowerCAmelCase__ : Optional[int] = mask_feature_prob lowerCAmelCase__ : int = mask_feature_length lowerCAmelCase__ : int = mask_feature_min_masks # ctc loss lowerCAmelCase__ : Optional[Any] = ctc_loss_reduction lowerCAmelCase__ : Any = ctc_zero_infinity # sequence classification lowerCAmelCase__ : Tuple = use_weighted_layer_sum lowerCAmelCase__ : Dict = classifier_proj_size @property def __magic_name__( self ): return functools.reduce(operator.mul , self.conv_stride , 1 )
678
1
import collections import inspect import unittest from typing import Dict, List, Tuple from transformers import MaskFormerSwinConfig from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device from transformers.utils import is_torch_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import MaskFormerSwinBackbone from transformers.models.maskformer import MaskFormerSwinModel class _lowerCAmelCase : def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=32 , __UpperCAmelCase=2 , __UpperCAmelCase=3 , __UpperCAmelCase=16 , __UpperCAmelCase=[1, 2, 1] , __UpperCAmelCase=[2, 2, 4] , __UpperCAmelCase=2 , __UpperCAmelCase=2.0 , __UpperCAmelCase=True , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.1 , __UpperCAmelCase="gelu" , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=0.02 , __UpperCAmelCase=1e-5 , __UpperCAmelCase=True , __UpperCAmelCase=None , __UpperCAmelCase=True , __UpperCAmelCase=10 , __UpperCAmelCase=8 , __UpperCAmelCase=["stage1", "stage2", "stage3"] , __UpperCAmelCase=[1, 2, 3] , ): lowerCAmelCase__ : List[Any] = parent lowerCAmelCase__ : Optional[Any] = batch_size lowerCAmelCase__ : Union[str, Any] = image_size lowerCAmelCase__ : Union[str, Any] = patch_size lowerCAmelCase__ : Any = num_channels lowerCAmelCase__ : List[str] = embed_dim lowerCAmelCase__ : List[str] = depths lowerCAmelCase__ : str = num_heads lowerCAmelCase__ : Any = window_size lowerCAmelCase__ : Union[str, Any] = mlp_ratio lowerCAmelCase__ : Union[str, Any] = qkv_bias lowerCAmelCase__ : List[str] = hidden_dropout_prob lowerCAmelCase__ : List[Any] = attention_probs_dropout_prob lowerCAmelCase__ : str = drop_path_rate lowerCAmelCase__ : int = hidden_act lowerCAmelCase__ : Any = use_absolute_embeddings lowerCAmelCase__ : Union[str, Any] = patch_norm lowerCAmelCase__ : Union[str, Any] = layer_norm_eps lowerCAmelCase__ : int = initializer_range lowerCAmelCase__ : Tuple = is_training lowerCAmelCase__ : List[Any] = scope lowerCAmelCase__ : int = use_labels lowerCAmelCase__ : str = type_sequence_label_size lowerCAmelCase__ : List[Any] = encoder_stride lowerCAmelCase__ : Tuple = out_features lowerCAmelCase__ : Dict = out_indices def __magic_name__( self ): lowerCAmelCase__ : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowerCAmelCase__ : str = None if self.use_labels: lowerCAmelCase__ : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCAmelCase__ : Dict = self.get_config() return config, pixel_values, labels def __magic_name__( self ): return MaskFormerSwinConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , ) def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): lowerCAmelCase__ : Union[str, Any] = MaskFormerSwinModel(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() lowerCAmelCase__ : str = model(__UpperCAmelCase ) lowerCAmelCase__ : Any = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1)) lowerCAmelCase__ : List[str] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) ) def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): lowerCAmelCase__ : int = MaskFormerSwinBackbone(config=__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() lowerCAmelCase__ : Dict = model(__UpperCAmelCase ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , [16, 32, 64] ) # verify ValueError with self.parent.assertRaises(__UpperCAmelCase ): lowerCAmelCase__ : List[Any] = ['''stem'''] lowerCAmelCase__ : Any = MaskFormerSwinBackbone(config=__UpperCAmelCase ) def __magic_name__( self ): lowerCAmelCase__ : Dict = self.prepare_config_and_inputs() lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : str = config_and_inputs lowerCAmelCase__ : str = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class _lowerCAmelCase ( _lowercase , _lowercase , unittest.TestCase ): A__ = ( ( MaskFormerSwinModel, MaskFormerSwinBackbone, ) if is_torch_available() else () ) A__ = {'feature-extraction': MaskFormerSwinModel} if is_torch_available() else {} A__ = False A__ = False A__ = False A__ = False A__ = False def __magic_name__( self ): lowerCAmelCase__ : List[str] = MaskFormerSwinModelTester(self ) lowerCAmelCase__ : Optional[int] = ConfigTester(self , config_class=__UpperCAmelCase , embed_dim=37 ) @require_torch_multi_gpu @unittest.skip( reason=( '''`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn\'t work well with''' ''' `nn.DataParallel`''' ) ) def __magic_name__( self ): pass def __magic_name__( self ): self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def __magic_name__( self ): return def __magic_name__( self ): lowerCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCAmelCase ) def __magic_name__( self ): lowerCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*__UpperCAmelCase ) @unittest.skip('''Swin does not use inputs_embeds''' ) def __magic_name__( self ): pass @unittest.skip('''Swin does not support feedforward chunking''' ) def __magic_name__( self ): pass def __magic_name__( self ): lowerCAmelCase__ , lowerCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase__ : List[str] = model_class(__UpperCAmelCase ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) lowerCAmelCase__ : Optional[Any] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__UpperCAmelCase , nn.Linear ) ) def __magic_name__( self ): lowerCAmelCase__ , lowerCAmelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowerCAmelCase__ : Union[str, Any] = model_class(__UpperCAmelCase ) lowerCAmelCase__ : Union[str, Any] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowerCAmelCase__ : List[str] = [*signature.parameters.keys()] lowerCAmelCase__ : Any = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , __UpperCAmelCase ) @unittest.skip(reason='''MaskFormerSwin is only used as backbone and doesn\'t support output_attentions''' ) def __magic_name__( self ): pass @unittest.skip(reason='''MaskFormerSwin is only used as an internal backbone''' ) def __magic_name__( self ): pass def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): lowerCAmelCase__ : List[Any] = model_class(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() with torch.no_grad(): lowerCAmelCase__ : Union[str, Any] = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) ) lowerCAmelCase__ : Any = outputs.hidden_states lowerCAmelCase__ : int = getattr( self.model_tester , '''expected_num_hidden_layers''' , len(self.model_tester.depths ) + 1 ) self.assertEqual(len(__UpperCAmelCase ) , __UpperCAmelCase ) # Swin has a different seq_length lowerCAmelCase__ : Optional[int] = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) lowerCAmelCase__ : Tuple = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) def __magic_name__( self ): lowerCAmelCase__ , lowerCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common() lowerCAmelCase__ : Any = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes: lowerCAmelCase__ : Optional[Any] = True self.check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowerCAmelCase__ : List[str] = True self.check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) def __magic_name__( self ): lowerCAmelCase__ , lowerCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() lowerCAmelCase__ : Optional[Any] = 3 lowerCAmelCase__ : Dict = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) lowerCAmelCase__ : Union[str, Any] = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) lowerCAmelCase__ : Dict = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) lowerCAmelCase__ : Optional[int] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes: lowerCAmelCase__ : Tuple = True self.check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , (padded_height, padded_width) ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowerCAmelCase__ : Dict = True self.check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , (padded_height, padded_width) ) @unittest.skip(reason='''MaskFormerSwin doesn\'t have pretrained checkpoints''' ) def __magic_name__( self ): pass @unittest.skip(reason='''This will be fixed once MaskFormerSwin is replaced by native Swin''' ) def __magic_name__( self ): pass @unittest.skip(reason='''This will be fixed once MaskFormerSwin is replaced by native Swin''' ) def __magic_name__( self ): pass def __magic_name__( self ): lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() def set_nan_tensor_to_zero(__UpperCAmelCase ): lowerCAmelCase__ : List[str] = 0 return t def check_equivalence(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase={} ): with torch.no_grad(): lowerCAmelCase__ : Optional[int] = model(**__UpperCAmelCase , return_dict=__UpperCAmelCase , **__UpperCAmelCase ) lowerCAmelCase__ : List[Any] = model(**__UpperCAmelCase , return_dict=__UpperCAmelCase , **__UpperCAmelCase ).to_tuple() def recursive_check(__UpperCAmelCase , __UpperCAmelCase ): if isinstance(__UpperCAmelCase , (List, Tuple) ): for tuple_iterable_value, dict_iterable_value in zip(__UpperCAmelCase , __UpperCAmelCase ): recursive_check(__UpperCAmelCase , __UpperCAmelCase ) elif isinstance(__UpperCAmelCase , __UpperCAmelCase ): for tuple_iterable_value, dict_iterable_value in zip( tuple_object.values() , dict_object.values() ): recursive_check(__UpperCAmelCase , __UpperCAmelCase ) elif tuple_object is None: return else: self.assertTrue( torch.allclose( set_nan_tensor_to_zero(__UpperCAmelCase ) , set_nan_tensor_to_zero(__UpperCAmelCase ) , atol=1e-5 ) , msg=( '''Tuple and dict output are not equal. Difference:''' f""" {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:""" f""" {torch.isnan(__UpperCAmelCase ).any()} and `inf`: {torch.isinf(__UpperCAmelCase )}. Dict has""" f""" `nan`: {torch.isnan(__UpperCAmelCase ).any()} and `inf`: {torch.isinf(__UpperCAmelCase )}.""" ) , ) recursive_check(__UpperCAmelCase , __UpperCAmelCase ) for model_class in self.all_model_classes: lowerCAmelCase__ : Any = model_class(__UpperCAmelCase ) model.to(__UpperCAmelCase ) model.eval() lowerCAmelCase__ : List[Any] = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : Union[str, Any] = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) check_equivalence(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : List[Any] = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase ) lowerCAmelCase__ : List[Any] = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase ) check_equivalence(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : List[str] = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : Optional[Any] = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) check_equivalence(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , {'''output_hidden_states''': True} ) lowerCAmelCase__ : Optional[int] = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase ) lowerCAmelCase__ : Dict = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase ) check_equivalence(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , {'''output_hidden_states''': True} ) @require_torch class _lowerCAmelCase ( unittest.TestCase , _lowercase ): A__ = (MaskFormerSwinBackbone,) if is_torch_available() else () A__ = MaskFormerSwinConfig def __magic_name__( self ): lowerCAmelCase__ : List[str] = MaskFormerSwinModelTester(self ) def __magic_name__( self ): lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() lowerCAmelCase__ : List[str] = inputs_dict['''pixel_values'''].shape[0] for backbone_class in self.all_model_classes: lowerCAmelCase__ : str = backbone_class(__UpperCAmelCase ) backbone.to(__UpperCAmelCase ) backbone.eval() lowerCAmelCase__ : Dict = backbone(**__UpperCAmelCase ) # Test default outputs and verify feature maps self.assertIsInstance(outputs.feature_maps , __UpperCAmelCase ) self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) ) for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ): self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) ) self.assertIsNone(outputs.hidden_states ) self.assertIsNone(outputs.attentions ) # Test output_hidden_states=True lowerCAmelCase__ : Dict = backbone(**__UpperCAmelCase , output_hidden_states=__UpperCAmelCase ) self.assertIsNotNone(outputs.hidden_states ) self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) ) # We skip the stem layer for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ): for hidden_state in hidden_states: # Hidden states are in the format (batch_size, (height * width), n_channels) lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Dict = hidden_state.shape self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) ) # Test output_attentions=True if self.has_attentions: lowerCAmelCase__ : int = backbone(**__UpperCAmelCase , output_attentions=__UpperCAmelCase ) self.assertIsNotNone(outputs.attentions )
678
import unittest from transformers import PegasusTokenizer, PegasusTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin lowerCAmelCase_ = get_tests_dir("""fixtures/test_sentencepiece_no_bos.model""") @require_sentencepiece @require_tokenizers class _lowerCAmelCase ( _lowercase , unittest.TestCase ): A__ = PegasusTokenizer A__ = PegasusTokenizerFast A__ = True A__ = True def __magic_name__( self ): super().setUp() # We have a SentencePiece fixture for testing lowerCAmelCase__ : Union[str, Any] = PegasusTokenizer(__UpperCAmelCase ) tokenizer.save_pretrained(self.tmpdirname ) @cached_property def __magic_name__( self ): return PegasusTokenizer.from_pretrained('''google/pegasus-large''' ) def __magic_name__( self , **__UpperCAmelCase ): return PegasusTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase ) def __magic_name__( self , __UpperCAmelCase ): return ("This is a test", "This is a test") def __magic_name__( self ): lowerCAmelCase__ : Optional[Any] = '''</s>''' lowerCAmelCase__ : Optional[int] = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCAmelCase ) , __UpperCAmelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCAmelCase ) , __UpperCAmelCase ) def __magic_name__( self ): lowerCAmelCase__ : Tuple = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '''<pad>''' ) self.assertEqual(vocab_keys[1] , '''</s>''' ) self.assertEqual(vocab_keys[-1] , '''v''' ) self.assertEqual(len(__UpperCAmelCase ) , 1103 ) def __magic_name__( self ): self.assertEqual(self.get_tokenizer().vocab_size , 1103 ) def __magic_name__( self ): lowerCAmelCase__ : int = self.rust_tokenizer_class.from_pretrained(self.tmpdirname ) lowerCAmelCase__ : Tuple = self.tokenizer_class.from_pretrained(self.tmpdirname ) lowerCAmelCase__ : int = ( '''Let\'s see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important''' ''' </s> <pad> <pad> <pad>''' ) lowerCAmelCase__ : Any = rust_tokenizer([raw_input_str] , return_tensors=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ).input_ids[0] lowerCAmelCase__ : Dict = py_tokenizer([raw_input_str] , return_tensors=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ).input_ids[0] self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase ) def __magic_name__( self ): lowerCAmelCase__ : Any = self._large_tokenizer # <mask_1> masks whole sentence while <mask_2> masks single word lowerCAmelCase__ : List[str] = '''<mask_1> To ensure a <mask_2> flow of bank resolutions.''' lowerCAmelCase__ : Tuple = [2, 413, 615, 114, 3, 1971, 113, 1679, 1_0710, 107, 1] lowerCAmelCase__ : Tuple = tokenizer([raw_input_str] , return_tensors=__UpperCAmelCase ).input_ids[0] self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase ) def __magic_name__( self ): lowerCAmelCase__ : Dict = self._large_tokenizer # The tracebacks for the following asserts are **better** without messages or self.assertEqual assert tokenizer.vocab_size == 9_6103 assert tokenizer.pad_token_id == 0 assert tokenizer.eos_token_id == 1 assert tokenizer.offset == 103 assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105 assert tokenizer.unk_token == "<unk>" assert tokenizer.model_max_length == 1024 lowerCAmelCase__ : str = '''To ensure a smooth flow of bank resolutions.''' lowerCAmelCase__ : int = [413, 615, 114, 2291, 1971, 113, 1679, 1_0710, 107, 1] lowerCAmelCase__ : List[Any] = tokenizer([raw_input_str] , return_tensors=__UpperCAmelCase ).input_ids[0] self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase ) assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"] @require_torch def __magic_name__( self ): lowerCAmelCase__ : Optional[int] = ['''This is going to be way too long.''' * 150, '''short example'''] lowerCAmelCase__ : List[str] = ['''not super long but more than 5 tokens''', '''tiny'''] lowerCAmelCase__ : Tuple = self._large_tokenizer(__UpperCAmelCase , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , return_tensors='''pt''' ) lowerCAmelCase__ : Optional[int] = self._large_tokenizer( text_target=__UpperCAmelCase , max_length=5 , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , return_tensors='''pt''' ) assert batch.input_ids.shape == (2, 1024) assert batch.attention_mask.shape == (2, 1024) assert targets["input_ids"].shape == (2, 5) assert len(__UpperCAmelCase ) == 2 # input_ids, attention_mask. @slow def __magic_name__( self ): # fmt: off lowerCAmelCase__ : Optional[int] = {'''input_ids''': [[3_8979, 143, 1_8485, 606, 130, 2_6669, 8_7686, 121, 5_4189, 1129, 111, 2_6669, 8_7686, 121, 9114, 1_4787, 121, 1_3249, 158, 592, 956, 121, 1_4621, 3_1576, 143, 6_2613, 108, 9688, 930, 4_3430, 1_1562, 6_2613, 304, 108, 1_1443, 897, 108, 9314, 1_7415, 6_3399, 108, 1_1443, 7614, 1_8316, 118, 4284, 7148, 1_2430, 143, 1400, 2_5703, 158, 111, 4284, 7148, 1_1772, 143, 2_1297, 1064, 158, 122, 204, 3506, 1754, 1133, 1_4787, 1581, 115, 3_3224, 4482, 111, 1355, 110, 2_9173, 317, 5_0833, 108, 2_0147, 9_4665, 111, 7_7198, 107, 1], [110, 6_2613, 117, 638, 112, 1133, 121, 2_0098, 1355, 7_9050, 1_3872, 135, 1596, 5_3541, 1352, 141, 1_3039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 1_8289, 1_7780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=__UpperCAmelCase , model_name='''google/bigbird-pegasus-large-arxiv''' , revision='''ba85d0851d708441f91440d509690f1ab6353415''' , ) @require_sentencepiece @require_tokenizers class _lowerCAmelCase ( _lowercase , unittest.TestCase ): A__ = PegasusTokenizer A__ = PegasusTokenizerFast A__ = True A__ = True def __magic_name__( self ): super().setUp() # We have a SentencePiece fixture for testing lowerCAmelCase__ : List[Any] = PegasusTokenizer(__UpperCAmelCase , offset=0 , mask_token_sent=__UpperCAmelCase , mask_token='''[MASK]''' ) tokenizer.save_pretrained(self.tmpdirname ) @cached_property def __magic_name__( self ): return PegasusTokenizer.from_pretrained('''google/bigbird-pegasus-large-arxiv''' ) def __magic_name__( self , **__UpperCAmelCase ): return PegasusTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase ) def __magic_name__( self , __UpperCAmelCase ): return ("This is a test", "This is a test") def __magic_name__( self ): lowerCAmelCase__ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(self.tmpdirname ) lowerCAmelCase__ : int = self.tokenizer_class.from_pretrained(self.tmpdirname ) lowerCAmelCase__ : str = ( '''Let\'s see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>''' ''' <pad> <pad> <pad>''' ) lowerCAmelCase__ : Optional[Any] = rust_tokenizer([raw_input_str] , return_tensors=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ).input_ids[0] lowerCAmelCase__ : int = py_tokenizer([raw_input_str] , return_tensors=__UpperCAmelCase , add_special_tokens=__UpperCAmelCase ).input_ids[0] self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase ) @require_torch def __magic_name__( self ): lowerCAmelCase__ : Optional[Any] = ['''This is going to be way too long.''' * 1000, '''short example'''] lowerCAmelCase__ : int = ['''not super long but more than 5 tokens''', '''tiny'''] lowerCAmelCase__ : Tuple = self._large_tokenizer(__UpperCAmelCase , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , return_tensors='''pt''' ) lowerCAmelCase__ : Tuple = self._large_tokenizer( text_target=__UpperCAmelCase , max_length=5 , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , return_tensors='''pt''' ) assert batch.input_ids.shape == (2, 4096) assert batch.attention_mask.shape == (2, 4096) assert targets["input_ids"].shape == (2, 5) assert len(__UpperCAmelCase ) == 2 # input_ids, attention_mask. def __magic_name__( self ): lowerCAmelCase__ : List[str] = ( '''This is an example string that is used to test the original TF implementation against the HF''' ''' implementation''' ) lowerCAmelCase__ : Union[str, Any] = self._large_tokenizer(__UpperCAmelCase ).input_ids self.assertListEqual( __UpperCAmelCase , [182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 2_5016, 3137, 464, 109, 2_6955, 3137, 1] , )
678
1
import os from argparse import ArgumentParser from typing import List import torch.utils.data from datasets import Dataset, IterableDataset from datasets.distributed import split_dataset_by_node lowerCAmelCase_ = 4 lowerCAmelCase_ = 3 class _lowerCAmelCase ( _lowercase ): pass def __lowerCAmelCase ( UpperCamelCase ) -> List[Any]: for shard in shards: for i in range(UpperCamelCase ): yield {"i": i, "shard": shard} def __lowerCAmelCase ( ) -> Optional[Any]: lowerCAmelCase__ : List[Any] = int(os.environ['''RANK'''] ) lowerCAmelCase__ : Optional[Any] = int(os.environ['''WORLD_SIZE'''] ) lowerCAmelCase__ : List[str] = ArgumentParser() parser.add_argument('''--streaming''' , type=UpperCamelCase ) parser.add_argument('''--local_rank''' , type=UpperCamelCase ) parser.add_argument('''--num_workers''' , type=UpperCamelCase , default=0 ) lowerCAmelCase__ : List[Any] = parser.parse_args() lowerCAmelCase__ : Union[str, Any] = args.streaming lowerCAmelCase__ : List[str] = args.num_workers lowerCAmelCase__ : Union[str, Any] = {'''shards''': [F"""shard_{shard_idx}""" for shard_idx in range(UpperCamelCase )]} lowerCAmelCase__ : List[Any] = IterableDataset.from_generator(UpperCamelCase , gen_kwargs=UpperCamelCase ) if not streaming: lowerCAmelCase__ : int = Dataset.from_list(list(UpperCamelCase ) ) lowerCAmelCase__ : Optional[int] = split_dataset_by_node(UpperCamelCase , rank=UpperCamelCase , world_size=UpperCamelCase ) lowerCAmelCase__ : Optional[int] = torch.utils.data.DataLoader(UpperCamelCase , num_workers=UpperCamelCase ) lowerCAmelCase__ : Optional[int] = NUM_SHARDS * NUM_ITEMS_PER_SHARD lowerCAmelCase__ : List[str] = full_size // world_size expected_local_size += int(rank < (full_size % world_size) ) lowerCAmelCase__ : Union[str, Any] = sum(1 for _ in dataloader ) if local_size != expected_local_size: raise FailedTestError(F"""local_size {local_size} != expected_local_size {expected_local_size}""" ) if __name__ == "__main__": main()
678
from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = { """naver-clova-ix/donut-base""": """https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json""", # See all Donut models at https://huggingface.co/models?filter=donut-swin } class _lowerCAmelCase ( _lowercase ): A__ = 'donut-swin' A__ = { 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers', } def __init__( self , __UpperCAmelCase=224 , __UpperCAmelCase=4 , __UpperCAmelCase=3 , __UpperCAmelCase=96 , __UpperCAmelCase=[2, 2, 6, 2] , __UpperCAmelCase=[3, 6, 12, 24] , __UpperCAmelCase=7 , __UpperCAmelCase=4.0 , __UpperCAmelCase=True , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.1 , __UpperCAmelCase="gelu" , __UpperCAmelCase=False , __UpperCAmelCase=0.02 , __UpperCAmelCase=1e-5 , **__UpperCAmelCase , ): super().__init__(**__UpperCAmelCase ) lowerCAmelCase__ : Union[str, Any] = image_size lowerCAmelCase__ : List[str] = patch_size lowerCAmelCase__ : int = num_channels lowerCAmelCase__ : Optional[Any] = embed_dim lowerCAmelCase__ : int = depths lowerCAmelCase__ : Dict = len(__UpperCAmelCase ) lowerCAmelCase__ : Union[str, Any] = num_heads lowerCAmelCase__ : Dict = window_size lowerCAmelCase__ : str = mlp_ratio lowerCAmelCase__ : Optional[int] = qkv_bias lowerCAmelCase__ : Any = hidden_dropout_prob lowerCAmelCase__ : Union[str, Any] = attention_probs_dropout_prob lowerCAmelCase__ : List[str] = drop_path_rate lowerCAmelCase__ : Tuple = hidden_act lowerCAmelCase__ : List[str] = use_absolute_embeddings lowerCAmelCase__ : Dict = layer_norm_eps lowerCAmelCase__ : Any = initializer_range # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model lowerCAmelCase__ : List[Any] = int(embed_dim * 2 ** (len(__UpperCAmelCase ) - 1) )
678
1
def __lowerCAmelCase ( UpperCamelCase ) -> List[str]: lowerCAmelCase__ : List[str] = [] lowerCAmelCase__ : List[str] = [] lowerCAmelCase__ : Dict = { '''^''': 3, '''*''': 2, '''/''': 2, '''%''': 2, '''+''': 1, '''-''': 1, } # Priority of each operator lowerCAmelCase__ : Tuple = len(UpperCamelCase ) if (len(UpperCamelCase ) > 7) else 7 # Print table header for output print( '''Symbol'''.center(8 ) , '''Stack'''.center(UpperCamelCase ) , '''Postfix'''.center(UpperCamelCase ) , sep=''' | ''' , ) print('''-''' * (print_width * 3 + 7) ) for x in infix: if x.isalpha() or x.isdigit(): post_fix.append(UpperCamelCase ) # if x is Alphabet / Digit, add it to Postfix elif x == "(": stack.append(UpperCamelCase ) # if x is "(" push to Stack elif x == ")": # if x is ")" pop stack until "(" is encountered while stack[-1] != "(": post_fix.append(stack.pop() ) # Pop stack & add the content to Postfix stack.pop() else: if len(UpperCamelCase ) == 0: stack.append(UpperCamelCase ) # If stack is empty, push x to stack else: # while priority of x is not > priority of element in the stack while len(UpperCamelCase ) > 0 and priority[x] <= priority[stack[-1]]: post_fix.append(stack.pop() ) # pop stack & add to Postfix stack.append(UpperCamelCase ) # push x to stack print( x.center(8 ) , (''''''.join(UpperCamelCase )).ljust(UpperCamelCase ) , (''''''.join(UpperCamelCase )).ljust(UpperCamelCase ) , sep=''' | ''' , ) # Output in tabular format while len(UpperCamelCase ) > 0: # while stack is not empty post_fix.append(stack.pop() ) # pop stack & add to Postfix print( ''' '''.center(8 ) , (''''''.join(UpperCamelCase )).ljust(UpperCamelCase ) , (''''''.join(UpperCamelCase )).ljust(UpperCamelCase ) , sep=''' | ''' , ) # Output in tabular format return "".join(UpperCamelCase ) # return Postfix as str def __lowerCAmelCase ( UpperCamelCase ) -> Optional[Any]: lowerCAmelCase__ : List[Any] = list(infix[::-1] ) # reverse the infix equation for i in range(len(UpperCamelCase ) ): if infix[i] == "(": lowerCAmelCase__ : Tuple = ''')''' # change "(" to ")" elif infix[i] == ")": lowerCAmelCase__ : List[str] = '''(''' # change ")" to "(" return (infix_2_postfix(''''''.join(UpperCamelCase ) ))[ ::-1 ] # call infix_2_postfix on Infix, return reverse of Postfix if __name__ == "__main__": lowerCAmelCase_ = input("""\nEnter an Infix Equation = """) # Input an Infix equation lowerCAmelCase_ = """""".join(Infix.split()) # Remove spaces from the input print("""\n\t""", Infix, """(Infix) -> """, infix_2_prefix(Infix), """(Prefix)""")
678
lowerCAmelCase_ = { """meter""": """m""", """kilometer""": """km""", """megametre""": """Mm""", """gigametre""": """Gm""", """terametre""": """Tm""", """petametre""": """Pm""", """exametre""": """Em""", """zettametre""": """Zm""", """yottametre""": """Ym""", } # Exponent of the factor(meter) lowerCAmelCase_ = { """m""": 0, """km""": 3, """Mm""": 6, """Gm""": 9, """Tm""": 12, """Pm""": 15, """Em""": 18, """Zm""": 21, """Ym""": 24, } def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> float: lowerCAmelCase__ : List[str] = from_type.lower().strip('''s''' ) lowerCAmelCase__ : List[str] = to_type.lower().strip('''s''' ) lowerCAmelCase__ : str = UNIT_SYMBOL.get(UpperCamelCase , UpperCamelCase ) lowerCAmelCase__ : Any = UNIT_SYMBOL.get(UpperCamelCase , UpperCamelCase ) if from_sanitized not in METRIC_CONVERSION: lowerCAmelCase__ : Tuple = ( F"""Invalid 'from_type' value: {from_type!r}.\n""" F"""Conversion abbreviations are: {", ".join(UpperCamelCase )}""" ) raise ValueError(UpperCamelCase ) if to_sanitized not in METRIC_CONVERSION: lowerCAmelCase__ : List[Any] = ( F"""Invalid 'to_type' value: {to_type!r}.\n""" F"""Conversion abbreviations are: {", ".join(UpperCamelCase )}""" ) raise ValueError(UpperCamelCase ) lowerCAmelCase__ : Optional[Any] = METRIC_CONVERSION[from_sanitized] lowerCAmelCase__ : Optional[Any] = METRIC_CONVERSION[to_sanitized] lowerCAmelCase__ : int = 1 if from_exponent > to_exponent: lowerCAmelCase__ : List[str] = from_exponent - to_exponent else: lowerCAmelCase__ : Dict = -(to_exponent - from_exponent) return value * pow(10 , UpperCamelCase ) if __name__ == "__main__": from doctest import testmod testmod()
678
1
import logging import os import sys from dataclasses import dataclass, field from typing import Optional import torch from datasets import load_dataset from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor from torchvision.transforms.functional import InterpolationMode import transformers from transformers import ( HfArgumentParser, Trainer, TrainingArguments, ViTImageProcessor, ViTMAEConfig, ViTMAEForPreTraining, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version lowerCAmelCase_ = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("""4.31.0""") require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt""") @dataclass class _lowerCAmelCase : A__ = field( default='cifar10' , metadata={'help': 'Name of a dataset from the datasets package'} ) A__ = field( default=_lowercase , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} ) A__ = field( default=_lowercase , metadata={'help': 'The column name of the images in the files.'} ) A__ = field(default=_lowercase , metadata={'help': 'A folder containing the training data.'} ) A__ = field(default=_lowercase , metadata={'help': 'A folder containing the validation data.'} ) A__ = field( default=0.15 , metadata={'help': 'Percent to split off of train for validation.'} ) A__ = field( default=_lowercase , metadata={ 'help': ( 'For debugging purposes or quicker training, truncate the number of training examples to this ' 'value if set.' ) } , ) A__ = field( default=_lowercase , metadata={ 'help': ( 'For debugging purposes or quicker training, truncate the number of evaluation examples to this ' 'value if set.' ) } , ) def __magic_name__( self ): lowerCAmelCase__ : str = {} if self.train_dir is not None: lowerCAmelCase__ : Optional[Any] = self.train_dir if self.validation_dir is not None: lowerCAmelCase__ : Any = self.validation_dir lowerCAmelCase__ : Dict = data_files if data_files else None @dataclass class _lowerCAmelCase : A__ = field( default=_lowercase , metadata={ 'help': ( 'The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.' ) } , ) A__ = field( default=_lowercase , metadata={'help': 'Pretrained config name or path if not the same as model_name_or_path'} ) A__ = field( default=_lowercase , metadata={ 'help': ( 'Override some existing default config settings when a model is trained from scratch. Example: ' 'n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index' ) } , ) A__ = field( default=_lowercase , metadata={'help': 'Where do you want to store the pretrained models downloaded from s3'} ) A__ = field( default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , ) A__ = field(default=_lowercase , metadata={'help': 'Name or path of preprocessor config.'} ) A__ = field( default=_lowercase , metadata={ 'help': ( 'Will use the token generated when running `huggingface-cli login` (necessary to use this script ' 'with private models).' ) } , ) A__ = field( default=0.75 , metadata={'help': 'The ratio of the number of masked tokens in the input sequence.'} ) A__ = field( default=_lowercase , metadata={'help': 'Whether or not to train with normalized pixel values as target.'} ) @dataclass class _lowerCAmelCase ( _lowercase ): A__ = field( default=1E-3 , metadata={'help': 'Base learning rate: absolute_lr = base_lr * total_batch_size / 256.'} ) def __lowerCAmelCase ( UpperCamelCase ) -> Any: lowerCAmelCase__ : str = torch.stack([example['''pixel_values'''] for example in examples] ) return {"pixel_values": pixel_values} def __lowerCAmelCase ( ) -> Tuple: # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. lowerCAmelCase__ : str = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Any = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : int = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry('''run_mae''' , UpperCamelCase , UpperCamelCase ) # Setup logging logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() lowerCAmelCase__ : Tuple = training_args.get_process_log_level() logger.setLevel(UpperCamelCase ) transformers.utils.logging.set_verbosity(UpperCamelCase ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}""" + F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" ) logger.info(F"""Training/evaluation parameters {training_args}""" ) # Detecting last checkpoint. lowerCAmelCase__ : Any = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: lowerCAmelCase__ : int = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F"""Output directory ({training_args.output_dir}) already exists and is not empty. """ '''Use --overwrite_output_dir to overcome.''' ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """ '''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' ) # Initialize our dataset. lowerCAmelCase__ : int = load_dataset( data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) # If we don't have a validation split, split off a percentage of train as validation. lowerCAmelCase__ : Tuple = None if '''validation''' in ds.keys() else data_args.train_val_split if isinstance(data_args.train_val_split , UpperCamelCase ) and data_args.train_val_split > 0.0: lowerCAmelCase__ : int = ds['''train'''].train_test_split(data_args.train_val_split ) lowerCAmelCase__ : str = split['''train'''] lowerCAmelCase__ : List[str] = split['''test'''] # Load pretrained model and image processor # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. lowerCAmelCase__ : Dict = { '''cache_dir''': model_args.cache_dir, '''revision''': model_args.model_revision, '''use_auth_token''': True if model_args.use_auth_token else None, } if model_args.config_name: lowerCAmelCase__ : List[str] = ViTMAEConfig.from_pretrained(model_args.config_name , **UpperCamelCase ) elif model_args.model_name_or_path: lowerCAmelCase__ : str = ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **UpperCamelCase ) else: lowerCAmelCase__ : List[str] = ViTMAEConfig() logger.warning('''You are instantiating a new config instance from scratch.''' ) if model_args.config_overrides is not None: logger.info(F"""Overriding config: {model_args.config_overrides}""" ) config.update_from_string(model_args.config_overrides ) logger.info(F"""New config: {config}""" ) # adapt config config.update( { '''mask_ratio''': model_args.mask_ratio, '''norm_pix_loss''': model_args.norm_pix_loss, } ) # create image processor if model_args.image_processor_name: lowerCAmelCase__ : Tuple = ViTImageProcessor.from_pretrained(model_args.image_processor_name , **UpperCamelCase ) elif model_args.model_name_or_path: lowerCAmelCase__ : List[Any] = ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **UpperCamelCase ) else: lowerCAmelCase__ : Dict = ViTImageProcessor() # create model if model_args.model_name_or_path: lowerCAmelCase__ : List[Any] = ViTMAEForPreTraining.from_pretrained( model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=UpperCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) else: logger.info('''Training new model from scratch''' ) lowerCAmelCase__ : Any = ViTMAEForPreTraining(UpperCamelCase ) if training_args.do_train: lowerCAmelCase__ : Union[str, Any] = ds['''train'''].column_names else: lowerCAmelCase__ : Dict = ds['''validation'''].column_names if data_args.image_column_name is not None: lowerCAmelCase__ : Union[str, Any] = data_args.image_column_name elif "image" in column_names: lowerCAmelCase__ : Dict = '''image''' elif "img" in column_names: lowerCAmelCase__ : Dict = '''img''' else: lowerCAmelCase__ : Optional[int] = column_names[0] # transformations as done in original MAE paper # source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py if "shortest_edge" in image_processor.size: lowerCAmelCase__ : int = image_processor.size['''shortest_edge'''] else: lowerCAmelCase__ : Dict = (image_processor.size['''height'''], image_processor.size['''width''']) lowerCAmelCase__ : int = Compose( [ Lambda(lambda UpperCamelCase : img.convert('''RGB''' ) if img.mode != "RGB" else img ), RandomResizedCrop(UpperCamelCase , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ), RandomHorizontalFlip(), ToTensor(), Normalize(mean=image_processor.image_mean , std=image_processor.image_std ), ] ) def preprocess_images(UpperCamelCase ): lowerCAmelCase__ : Optional[Any] = [transforms(UpperCamelCase ) for image in examples[image_column_name]] return examples if training_args.do_train: if "train" not in ds: raise ValueError('''--do_train requires a train dataset''' ) if data_args.max_train_samples is not None: lowerCAmelCase__ : str = ds['''train'''].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) ) # Set the training transforms ds["train"].set_transform(UpperCamelCase ) if training_args.do_eval: if "validation" not in ds: raise ValueError('''--do_eval requires a validation dataset''' ) if data_args.max_eval_samples is not None: lowerCAmelCase__ : str = ( ds['''validation'''].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) ) ) # Set the validation transforms ds["validation"].set_transform(UpperCamelCase ) # Compute absolute learning rate lowerCAmelCase__ : List[Any] = ( training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size ) if training_args.base_learning_rate is not None: lowerCAmelCase__ : str = training_args.base_learning_rate * total_train_batch_size / 256 # Initialize our trainer lowerCAmelCase__ : List[str] = Trainer( model=UpperCamelCase , args=UpperCamelCase , train_dataset=ds['''train'''] if training_args.do_train else None , eval_dataset=ds['''validation'''] if training_args.do_eval else None , tokenizer=UpperCamelCase , data_collator=UpperCamelCase , ) # Training if training_args.do_train: lowerCAmelCase__ : Tuple = None if training_args.resume_from_checkpoint is not None: lowerCAmelCase__ : int = training_args.resume_from_checkpoint elif last_checkpoint is not None: lowerCAmelCase__ : str = last_checkpoint lowerCAmelCase__ : List[str] = trainer.train(resume_from_checkpoint=UpperCamelCase ) trainer.save_model() trainer.log_metrics('''train''' , train_result.metrics ) trainer.save_metrics('''train''' , train_result.metrics ) trainer.save_state() # Evaluation if training_args.do_eval: lowerCAmelCase__ : str = trainer.evaluate() trainer.log_metrics('''eval''' , UpperCamelCase ) trainer.save_metrics('''eval''' , UpperCamelCase ) # Write model card and (optionally) push to hub lowerCAmelCase__ : Union[str, Any] = { '''tasks''': '''masked-auto-encoding''', '''dataset''': data_args.dataset_name, '''tags''': ['''masked-auto-encoding'''], } if training_args.push_to_hub: trainer.push_to_hub(**UpperCamelCase ) else: trainer.create_model_card(**UpperCamelCase ) def __lowerCAmelCase ( UpperCamelCase ) -> Optional[Any]: # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
678
import unittest from transformers import is_vision_available from transformers.pipelines import pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class _lowerCAmelCase : @staticmethod def __magic_name__( *__UpperCAmelCase , **__UpperCAmelCase ): pass @is_pipeline_test @require_vision class _lowerCAmelCase ( unittest.TestCase ): @require_torch def __magic_name__( self ): lowerCAmelCase__ : int = pipeline( model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , ) lowerCAmelCase__ : int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) lowerCAmelCase__ : List[str] = image_classifier(__UpperCAmelCase , candidate_labels=['''a''', '''b''', '''c'''] ) # The floating scores are so close, we enter floating error approximation and the order is not guaranteed across # python and torch versions. self.assertIn( nested_simplify(__UpperCAmelCase ) , [ [{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''b'''}, {'''score''': 0.333, '''label''': '''c'''}], [{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''c'''}, {'''score''': 0.333, '''label''': '''b'''}], ] , ) lowerCAmelCase__ : List[Any] = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 ) self.assertEqual( nested_simplify(__UpperCAmelCase ) , [ [ {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, ], ] , ) @require_tf def __magic_name__( self ): lowerCAmelCase__ : List[Any] = pipeline( model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , framework='''tf''' ) lowerCAmelCase__ : Tuple = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) lowerCAmelCase__ : str = image_classifier(__UpperCAmelCase , candidate_labels=['''a''', '''b''', '''c'''] ) self.assertEqual( nested_simplify(__UpperCAmelCase ) , [{'''score''': 0.333, '''label''': '''a'''}, {'''score''': 0.333, '''label''': '''b'''}, {'''score''': 0.333, '''label''': '''c'''}] , ) lowerCAmelCase__ : List[Any] = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2 ) self.assertEqual( nested_simplify(__UpperCAmelCase ) , [ [ {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, ], [ {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, {'''score''': 0.333, '''label''': ANY(__UpperCAmelCase )}, ], ] , ) @slow @require_torch def __magic_name__( self ): lowerCAmelCase__ : str = pipeline( task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , ) # This is an image of 2 cats with remotes and no planes lowerCAmelCase__ : List[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) lowerCAmelCase__ : str = image_classifier(__UpperCAmelCase , candidate_labels=['''cat''', '''plane''', '''remote'''] ) self.assertEqual( nested_simplify(__UpperCAmelCase ) , [ {'''score''': 0.511, '''label''': '''remote'''}, {'''score''': 0.485, '''label''': '''cat'''}, {'''score''': 0.004, '''label''': '''plane'''}, ] , ) lowerCAmelCase__ : Tuple = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 ) self.assertEqual( nested_simplify(__UpperCAmelCase ) , [ [ {'''score''': 0.511, '''label''': '''remote'''}, {'''score''': 0.485, '''label''': '''cat'''}, {'''score''': 0.004, '''label''': '''plane'''}, ], ] * 5 , ) @slow @require_tf def __magic_name__( self ): lowerCAmelCase__ : Union[str, Any] = pipeline( task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , framework='''tf''' ) # This is an image of 2 cats with remotes and no planes lowerCAmelCase__ : Optional[int] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) lowerCAmelCase__ : Union[str, Any] = image_classifier(__UpperCAmelCase , candidate_labels=['''cat''', '''plane''', '''remote'''] ) self.assertEqual( nested_simplify(__UpperCAmelCase ) , [ {'''score''': 0.511, '''label''': '''remote'''}, {'''score''': 0.485, '''label''': '''cat'''}, {'''score''': 0.004, '''label''': '''plane'''}, ] , ) lowerCAmelCase__ : Any = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2 ) self.assertEqual( nested_simplify(__UpperCAmelCase ) , [ [ {'''score''': 0.511, '''label''': '''remote'''}, {'''score''': 0.485, '''label''': '''cat'''}, {'''score''': 0.004, '''label''': '''plane'''}, ], ] * 5 , )
678
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) lowerCAmelCase_ = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = ["""NllbTokenizer"""] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = ["""NllbTokenizerFast"""] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_nllb import NllbTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_nllb_fast import NllbTokenizerFast else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
678
lowerCAmelCase_ = [ [0, 16, 13, 0, 0, 0], [0, 0, 10, 12, 0, 0], [0, 4, 0, 0, 14, 0], [0, 0, 9, 0, 0, 20], [0, 0, 0, 7, 0, 4], [0, 0, 0, 0, 0, 0], ] def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> List[Any]: # Return True if there is node that has not iterated. lowerCAmelCase__ : Optional[int] = [False] * len(UpperCamelCase ) lowerCAmelCase__ : Tuple = [s] lowerCAmelCase__ : Dict = True while queue: lowerCAmelCase__ : int = queue.pop(0 ) for ind in range(len(graph[u] ) ): if visited[ind] is False and graph[u][ind] > 0: queue.append(UpperCamelCase ) lowerCAmelCase__ : Optional[Any] = True lowerCAmelCase__ : Optional[int] = u return visited[t] def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Optional[Any]: lowerCAmelCase__ : Any = [-1] * (len(UpperCamelCase )) lowerCAmelCase__ : List[Any] = 0 lowerCAmelCase__ : Tuple = [] lowerCAmelCase__ : Optional[int] = [i[:] for i in graph] # Record original cut, copy. while bfs(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ): lowerCAmelCase__ : List[Any] = float('''Inf''' ) lowerCAmelCase__ : Dict = sink while s != source: # Find the minimum value in select path lowerCAmelCase__ : Tuple = min(UpperCamelCase , graph[parent[s]][s] ) lowerCAmelCase__ : List[Any] = parent[s] max_flow += path_flow lowerCAmelCase__ : List[Any] = sink while v != source: lowerCAmelCase__ : Dict = parent[v] graph[u][v] -= path_flow graph[v][u] += path_flow lowerCAmelCase__ : Optional[Any] = parent[v] for i in range(len(UpperCamelCase ) ): for j in range(len(graph[0] ) ): if graph[i][j] == 0 and temp[i][j] > 0: res.append((i, j) ) return res if __name__ == "__main__": print(mincut(test_graph, source=0, sink=5))
678
1
from argparse import ArgumentParser, Namespace from ..utils import logging from . import BaseTransformersCLICommand def __lowerCAmelCase ( UpperCamelCase ) -> Dict: return ConvertCommand( args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name ) lowerCAmelCase_ = """ transformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires TensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions. """ class _lowerCAmelCase ( _lowercase ): @staticmethod def __magic_name__( __UpperCAmelCase ): lowerCAmelCase__ : int = parser.add_parser( '''convert''' , help='''CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints.''' , ) train_parser.add_argument('''--model_type''' , type=__UpperCAmelCase , required=__UpperCAmelCase , help='''Model\'s type.''' ) train_parser.add_argument( '''--tf_checkpoint''' , type=__UpperCAmelCase , required=__UpperCAmelCase , help='''TensorFlow checkpoint path or folder.''' ) train_parser.add_argument( '''--pytorch_dump_output''' , type=__UpperCAmelCase , required=__UpperCAmelCase , help='''Path to the PyTorch saved model output.''' ) train_parser.add_argument('''--config''' , type=__UpperCAmelCase , default='''''' , help='''Configuration file path or folder.''' ) train_parser.add_argument( '''--finetuning_task_name''' , type=__UpperCAmelCase , default=__UpperCAmelCase , help='''Optional fine-tuning task name if the TF model was a finetuned model.''' , ) train_parser.set_defaults(func=__UpperCAmelCase ) def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , *__UpperCAmelCase , ): lowerCAmelCase__ : str = logging.get_logger('''transformers-cli/converting''' ) self._logger.info(f"""Loading model {model_type}""" ) lowerCAmelCase__ : Any = model_type lowerCAmelCase__ : Dict = tf_checkpoint lowerCAmelCase__ : Optional[Any] = pytorch_dump_output lowerCAmelCase__ : Tuple = config lowerCAmelCase__ : List[str] = finetuning_task_name def __magic_name__( self ): if self._model_type == "albert": try: from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import ( convert_tf_checkpoint_to_pytorch, ) except ImportError: raise ImportError(__UpperCAmelCase ) convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "bert": try: from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import ( convert_tf_checkpoint_to_pytorch, ) except ImportError: raise ImportError(__UpperCAmelCase ) convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "funnel": try: from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import ( convert_tf_checkpoint_to_pytorch, ) except ImportError: raise ImportError(__UpperCAmelCase ) convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "t5": try: from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch except ImportError: raise ImportError(__UpperCAmelCase ) convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "gpt": from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import ( convert_openai_checkpoint_to_pytorch, ) convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "transfo_xl": try: from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import ( convert_transfo_xl_checkpoint_to_pytorch, ) except ImportError: raise ImportError(__UpperCAmelCase ) if "ckpt" in self._tf_checkpoint.lower(): lowerCAmelCase__ : Dict = self._tf_checkpoint lowerCAmelCase__ : Any = '''''' else: lowerCAmelCase__ : List[Any] = self._tf_checkpoint lowerCAmelCase__ : Union[str, Any] = '''''' convert_transfo_xl_checkpoint_to_pytorch( __UpperCAmelCase , self._config , self._pytorch_dump_output , __UpperCAmelCase ) elif self._model_type == "gpt2": try: from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import ( convert_gpta_checkpoint_to_pytorch, ) except ImportError: raise ImportError(__UpperCAmelCase ) convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) elif self._model_type == "xlnet": try: from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import ( convert_xlnet_checkpoint_to_pytorch, ) except ImportError: raise ImportError(__UpperCAmelCase ) convert_xlnet_checkpoint_to_pytorch( self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name ) elif self._model_type == "xlm": from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import ( convert_xlm_checkpoint_to_pytorch, ) convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output ) elif self._model_type == "lxmert": from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import ( convert_lxmert_checkpoint_to_pytorch, ) convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output ) elif self._model_type == "rembert": from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import ( convert_rembert_tf_checkpoint_to_pytorch, ) convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output ) else: raise ValueError( '''--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]''' )
678
import unittest from transformers import ( MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING, TextGenerationPipeline, logging, pipeline, ) from transformers.testing_utils import ( CaptureLogger, is_pipeline_test, require_accelerate, require_tf, require_torch, require_torch_gpu, require_torch_or_tf, ) from .test_pipelines_common import ANY @is_pipeline_test @require_torch_or_tf class _lowerCAmelCase ( unittest.TestCase ): A__ = MODEL_FOR_CAUSAL_LM_MAPPING A__ = TF_MODEL_FOR_CAUSAL_LM_MAPPING @require_torch def __magic_name__( self ): lowerCAmelCase__ : Tuple = pipeline(task='''text-generation''' , model='''sshleifer/tiny-ctrl''' , framework='''pt''' ) # Using `do_sample=False` to force deterministic output lowerCAmelCase__ : Optional[int] = text_generator('''This is a test''' , do_sample=__UpperCAmelCase ) self.assertEqual( __UpperCAmelCase , [ { '''generated_text''': ( '''This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.''' ''' oscope. FiliFili@@''' ) } ] , ) lowerCAmelCase__ : List[str] = text_generator(['''This is a test''', '''This is a second test'''] ) self.assertEqual( __UpperCAmelCase , [ [ { '''generated_text''': ( '''This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.''' ''' oscope. FiliFili@@''' ) } ], [ { '''generated_text''': ( '''This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy''' ''' oscope. oscope. FiliFili@@''' ) } ], ] , ) lowerCAmelCase__ : str = text_generator('''This is a test''' , do_sample=__UpperCAmelCase , num_return_sequences=2 , return_tensors=__UpperCAmelCase ) self.assertEqual( __UpperCAmelCase , [ {'''generated_token_ids''': ANY(__UpperCAmelCase )}, {'''generated_token_ids''': ANY(__UpperCAmelCase )}, ] , ) lowerCAmelCase__ : List[Any] = text_generator.model.config.eos_token_id lowerCAmelCase__ : List[Any] = '''<pad>''' lowerCAmelCase__ : List[Any] = text_generator( ['''This is a test''', '''This is a second test'''] , do_sample=__UpperCAmelCase , num_return_sequences=2 , batch_size=2 , return_tensors=__UpperCAmelCase , ) self.assertEqual( __UpperCAmelCase , [ [ {'''generated_token_ids''': ANY(__UpperCAmelCase )}, {'''generated_token_ids''': ANY(__UpperCAmelCase )}, ], [ {'''generated_token_ids''': ANY(__UpperCAmelCase )}, {'''generated_token_ids''': ANY(__UpperCAmelCase )}, ], ] , ) @require_tf def __magic_name__( self ): lowerCAmelCase__ : int = pipeline(task='''text-generation''' , model='''sshleifer/tiny-ctrl''' , framework='''tf''' ) # Using `do_sample=False` to force deterministic output lowerCAmelCase__ : List[Any] = text_generator('''This is a test''' , do_sample=__UpperCAmelCase ) self.assertEqual( __UpperCAmelCase , [ { '''generated_text''': ( '''This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵''' ''' please,''' ) } ] , ) lowerCAmelCase__ : List[str] = text_generator(['''This is a test''', '''This is a second test'''] , do_sample=__UpperCAmelCase ) self.assertEqual( __UpperCAmelCase , [ [ { '''generated_text''': ( '''This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵''' ''' please,''' ) } ], [ { '''generated_text''': ( '''This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes''' ''' Cannes 閲閲Cannes Cannes Cannes 攵 please,''' ) } ], ] , ) def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): lowerCAmelCase__ : Dict = TextGenerationPipeline(model=__UpperCAmelCase , tokenizer=__UpperCAmelCase ) return text_generator, ["This is a test", "Another test"] def __magic_name__( self ): lowerCAmelCase__ : Any = '''Hello I believe in''' lowerCAmelCase__ : List[Any] = pipeline('''text-generation''' , model='''hf-internal-testing/tiny-random-gpt2''' ) lowerCAmelCase__ : Optional[int] = text_generator(__UpperCAmelCase ) self.assertEqual( __UpperCAmelCase , [{'''generated_text''': '''Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe'''}] , ) lowerCAmelCase__ : List[str] = text_generator(__UpperCAmelCase , stop_sequence=''' fe''' ) self.assertEqual(__UpperCAmelCase , [{'''generated_text''': '''Hello I believe in fe'''}] ) def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase ): lowerCAmelCase__ : str = text_generator.model lowerCAmelCase__ : Optional[int] = text_generator.tokenizer lowerCAmelCase__ : Tuple = text_generator('''This is a test''' ) self.assertEqual(__UpperCAmelCase , [{'''generated_text''': ANY(__UpperCAmelCase )}] ) self.assertTrue(outputs[0]['''generated_text'''].startswith('''This is a test''' ) ) lowerCAmelCase__ : Optional[int] = text_generator('''This is a test''' , return_full_text=__UpperCAmelCase ) self.assertEqual(__UpperCAmelCase , [{'''generated_text''': ANY(__UpperCAmelCase )}] ) self.assertNotIn('''This is a test''' , outputs[0]['''generated_text'''] ) lowerCAmelCase__ : Dict = pipeline(task='''text-generation''' , model=__UpperCAmelCase , tokenizer=__UpperCAmelCase , return_full_text=__UpperCAmelCase ) lowerCAmelCase__ : Dict = text_generator('''This is a test''' ) self.assertEqual(__UpperCAmelCase , [{'''generated_text''': ANY(__UpperCAmelCase )}] ) self.assertNotIn('''This is a test''' , outputs[0]['''generated_text'''] ) lowerCAmelCase__ : List[str] = text_generator('''This is a test''' , return_full_text=__UpperCAmelCase ) self.assertEqual(__UpperCAmelCase , [{'''generated_text''': ANY(__UpperCAmelCase )}] ) self.assertTrue(outputs[0]['''generated_text'''].startswith('''This is a test''' ) ) lowerCAmelCase__ : Optional[int] = text_generator(['''This is great !''', '''Something else'''] , num_return_sequences=2 , do_sample=__UpperCAmelCase ) self.assertEqual( __UpperCAmelCase , [ [{'''generated_text''': ANY(__UpperCAmelCase )}, {'''generated_text''': ANY(__UpperCAmelCase )}], [{'''generated_text''': ANY(__UpperCAmelCase )}, {'''generated_text''': ANY(__UpperCAmelCase )}], ] , ) if text_generator.tokenizer.pad_token is not None: lowerCAmelCase__ : List[str] = text_generator( ['''This is great !''', '''Something else'''] , num_return_sequences=2 , batch_size=2 , do_sample=__UpperCAmelCase ) self.assertEqual( __UpperCAmelCase , [ [{'''generated_text''': ANY(__UpperCAmelCase )}, {'''generated_text''': ANY(__UpperCAmelCase )}], [{'''generated_text''': ANY(__UpperCAmelCase )}, {'''generated_text''': ANY(__UpperCAmelCase )}], ] , ) with self.assertRaises(__UpperCAmelCase ): lowerCAmelCase__ : Any = text_generator('''test''' , return_full_text=__UpperCAmelCase , return_text=__UpperCAmelCase ) with self.assertRaises(__UpperCAmelCase ): lowerCAmelCase__ : Optional[int] = text_generator('''test''' , return_full_text=__UpperCAmelCase , return_tensors=__UpperCAmelCase ) with self.assertRaises(__UpperCAmelCase ): lowerCAmelCase__ : str = text_generator('''test''' , return_text=__UpperCAmelCase , return_tensors=__UpperCAmelCase ) # Empty prompt is slighly special # it requires BOS token to exist. # Special case for Pegasus which will always append EOS so will # work even without BOS. if ( text_generator.tokenizer.bos_token_id is not None or "Pegasus" in tokenizer.__class__.__name__ or "Git" in model.__class__.__name__ ): lowerCAmelCase__ : str = text_generator('''''' ) self.assertEqual(__UpperCAmelCase , [{'''generated_text''': ANY(__UpperCAmelCase )}] ) else: with self.assertRaises((ValueError, AssertionError) ): lowerCAmelCase__ : List[str] = text_generator('''''' ) if text_generator.framework == "tf": # TF generation does not support max_new_tokens, and it's impossible # to control long generation with only max_length without # fancy calculation, dismissing tests for now. return # We don't care about infinite range models. # They already work. # Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly. lowerCAmelCase__ : Optional[Any] = ['''RwkvForCausalLM''', '''XGLMForCausalLM''', '''GPTNeoXForCausalLM'''] if ( tokenizer.model_max_length < 1_0000 and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS ): # Handling of large generations with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ): text_generator('''This is a test''' * 500 , max_new_tokens=20 ) lowerCAmelCase__ : Optional[Any] = text_generator('''This is a test''' * 500 , handle_long_generation='''hole''' , max_new_tokens=20 ) # Hole strategy cannot work with self.assertRaises(__UpperCAmelCase ): text_generator( '''This is a test''' * 500 , handle_long_generation='''hole''' , max_new_tokens=tokenizer.model_max_length + 10 , ) @require_torch @require_accelerate @require_torch_gpu def __magic_name__( self ): import torch # Classic `model_kwargs` lowerCAmelCase__ : List[str] = pipeline( model='''hf-internal-testing/tiny-random-bloom''' , model_kwargs={'''device_map''': '''auto''', '''torch_dtype''': torch.bfloataa} , ) self.assertEqual(pipe.model.device , torch.device(0 ) ) self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa ) lowerCAmelCase__ : Any = pipe('''This is a test''' ) self.assertEqual( __UpperCAmelCase , [ { '''generated_text''': ( '''This is a test test test test test test test test test test test test test test test test''' ''' test''' ) } ] , ) # Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.) lowerCAmelCase__ : Dict = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' , torch_dtype=torch.bfloataa ) self.assertEqual(pipe.model.device , torch.device(0 ) ) self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa ) lowerCAmelCase__ : Union[str, Any] = pipe('''This is a test''' ) self.assertEqual( __UpperCAmelCase , [ { '''generated_text''': ( '''This is a test test test test test test test test test test test test test test test test''' ''' test''' ) } ] , ) # torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602 lowerCAmelCase__ : str = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' ) self.assertEqual(pipe.model.device , torch.device(0 ) ) self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa ) lowerCAmelCase__ : Any = pipe('''This is a test''' ) self.assertEqual( __UpperCAmelCase , [ { '''generated_text''': ( '''This is a test test test test test test test test test test test test test test test test''' ''' test''' ) } ] , ) @require_torch @require_torch_gpu def __magic_name__( self ): import torch lowerCAmelCase__ : List[str] = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device=0 , torch_dtype=torch.floataa ) pipe('''This is a test''' ) @require_torch @require_accelerate @require_torch_gpu def __magic_name__( self ): import torch lowerCAmelCase__ : Any = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' , torch_dtype=torch.floataa ) pipe('''This is a test''' , do_sample=__UpperCAmelCase , top_p=0.5 ) def __magic_name__( self ): lowerCAmelCase__ : int = '''Hello world''' lowerCAmelCase__ : Union[str, Any] = pipeline('''text-generation''' , model='''hf-internal-testing/tiny-random-gpt2''' ) if text_generator.model.framework == "tf": lowerCAmelCase__ : List[Any] = logging.get_logger('''transformers.generation.tf_utils''' ) else: lowerCAmelCase__ : Dict = logging.get_logger('''transformers.generation.utils''' ) lowerCAmelCase__ : Optional[Any] = '''Both `max_new_tokens`''' # The beggining of the message to be checked in this test # Both are set by the user -> log warning with CaptureLogger(__UpperCAmelCase ) as cl: lowerCAmelCase__ : List[str] = text_generator(__UpperCAmelCase , max_length=10 , max_new_tokens=1 ) self.assertIn(__UpperCAmelCase , cl.out ) # The user only sets one -> no warning with CaptureLogger(__UpperCAmelCase ) as cl: lowerCAmelCase__ : Any = text_generator(__UpperCAmelCase , max_new_tokens=1 ) self.assertNotIn(__UpperCAmelCase , cl.out ) with CaptureLogger(__UpperCAmelCase ) as cl: lowerCAmelCase__ : Union[str, Any] = text_generator(__UpperCAmelCase , max_length=10 ) self.assertNotIn(__UpperCAmelCase , cl.out )
678
1
import unittest from pathlib import Path from tempfile import TemporaryDirectory from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available from transformers.models.gpta.tokenization_gpta import GPTaTokenizer from transformers.testing_utils import require_keras_nlp, require_tf, slow if is_tf_available(): import tensorflow as tf if is_keras_nlp_available(): from transformers.models.gpta import TFGPTaTokenizer lowerCAmelCase_ = ["""gpt2"""] lowerCAmelCase_ = """gpt2""" if is_tf_available(): class _lowerCAmelCase ( tf.Module ): def __init__( self , __UpperCAmelCase ): super().__init__() lowerCAmelCase__ : Optional[Any] = tokenizer lowerCAmelCase__ : Dict = AutoConfig.from_pretrained(__UpperCAmelCase ) lowerCAmelCase__ : Optional[Any] = TFGPTaLMHeadModel.from_config(__UpperCAmelCase ) @tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name='''text''' ),) ) def __magic_name__( self , __UpperCAmelCase ): lowerCAmelCase__ : int = self.tokenizer(__UpperCAmelCase ) lowerCAmelCase__ : Optional[int] = tokenized['''input_ids'''].to_tensor() lowerCAmelCase__ : int = tf.cast(input_ids_dense > 0 , tf.intaa ) # input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN]) lowerCAmelCase__ : Any = self.model(input_ids=__UpperCAmelCase , attention_mask=__UpperCAmelCase )['''logits'''] return outputs @require_tf @require_keras_nlp class _lowerCAmelCase ( unittest.TestCase ): def __magic_name__( self ): super().setUp() lowerCAmelCase__ : Any = [GPTaTokenizer.from_pretrained(__UpperCAmelCase ) for checkpoint in (TOKENIZER_CHECKPOINTS)] lowerCAmelCase__ : Tuple = [TFGPTaTokenizer.from_pretrained(__UpperCAmelCase ) for checkpoint in TOKENIZER_CHECKPOINTS] assert len(self.tokenizers ) == len(self.tf_tokenizers ) lowerCAmelCase__ : Optional[int] = [ '''This is a straightforward English test sentence.''', '''This one has some weird characters\rto\nsee\r\nif those\u00E9break things.''', '''Now we\'re going to add some Chinese: 一 二 三 一二三''', '''And some much more rare Chinese: 齉 堃 齉堃''', '''Je vais aussi écrire en français pour tester les accents''', '''Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ''', ] lowerCAmelCase__ : Optional[int] = list(zip(self.test_sentences , self.test_sentences[::-1] ) ) def __magic_name__( self ): for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ): for test_inputs in self.test_sentences: lowerCAmelCase__ : List[str] = tokenizer([test_inputs] , return_tensors='''tf''' ) lowerCAmelCase__ : Dict = tf_tokenizer([test_inputs] ) for key in python_outputs.keys(): # convert them to numpy to avoid messing with ragged tensors lowerCAmelCase__ : int = python_outputs[key].numpy() lowerCAmelCase__ : str = tf_outputs[key].numpy() self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) ) self.assertTrue(tf.reduce_all(tf.cast(__UpperCAmelCase , tf.intaa ) == tf_outputs_values ) ) @slow def __magic_name__( self ): for tf_tokenizer in self.tf_tokenizers: lowerCAmelCase__ : Union[str, Any] = tf.function(__UpperCAmelCase ) for test_inputs in self.test_sentences: lowerCAmelCase__ : str = tf.constant(__UpperCAmelCase ) lowerCAmelCase__ : Optional[Any] = compiled_tokenizer(__UpperCAmelCase ) lowerCAmelCase__ : List[str] = tf_tokenizer(__UpperCAmelCase ) for key in eager_outputs.keys(): self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) ) @slow def __magic_name__( self ): for tf_tokenizer in self.tf_tokenizers: lowerCAmelCase__ : Optional[Any] = ModelToSave(tokenizer=__UpperCAmelCase ) lowerCAmelCase__ : int = tf.convert_to_tensor([self.test_sentences[0]] ) lowerCAmelCase__ : Union[str, Any] = model.serving(__UpperCAmelCase ) # Build model with some sample inputs with TemporaryDirectory() as tempdir: lowerCAmelCase__ : List[Any] = Path(__UpperCAmelCase ) / '''saved.model''' tf.saved_model.save(__UpperCAmelCase , __UpperCAmelCase , signatures={'''serving_default''': model.serving} ) lowerCAmelCase__ : int = tf.saved_model.load(__UpperCAmelCase ) lowerCAmelCase__ : Union[str, Any] = loaded_model.signatures['''serving_default'''](__UpperCAmelCase )['''output_0'''] # We may see small differences because the loaded model is compiled, so we need an epsilon for the test self.assertTrue(tf.reduce_all(out == loaded_output ) ) @slow def __magic_name__( self ): for tf_tokenizer in self.tf_tokenizers: lowerCAmelCase__ : str = tf.convert_to_tensor([self.test_sentences[0]] ) lowerCAmelCase__ : List[str] = tf_tokenizer(__UpperCAmelCase ) # Build model with some sample inputs lowerCAmelCase__ : Dict = tf_tokenizer.get_config() lowerCAmelCase__ : int = TFGPTaTokenizer.from_config(__UpperCAmelCase ) lowerCAmelCase__ : Optional[int] = model_from_config(__UpperCAmelCase ) for key in from_config_output.keys(): self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) ) @slow def __magic_name__( self ): for tf_tokenizer in self.tf_tokenizers: # for the test to run lowerCAmelCase__ : Any = 12_3123 for max_length in [3, 5, 1024]: lowerCAmelCase__ : List[str] = tf.convert_to_tensor([self.test_sentences[0]] ) lowerCAmelCase__ : Dict = tf_tokenizer(__UpperCAmelCase , max_length=__UpperCAmelCase ) lowerCAmelCase__ : str = out['''input_ids'''].numpy().shape[1] assert out_length == max_length
678
def __lowerCAmelCase ( UpperCamelCase ) -> str: return "".join([hex(UpperCamelCase )[2:].zfill(2 ).upper() for byte in list(UpperCamelCase )] ) def __lowerCAmelCase ( UpperCamelCase ) -> bytes: # Check data validity, following RFC3548 # https://www.ietf.org/rfc/rfc3548.txt if (len(UpperCamelCase ) % 2) != 0: raise ValueError( '''Base16 encoded data is invalid: Data does not have an even number of hex digits.''' ) # Check the character set - the standard base16 alphabet # is uppercase according to RFC3548 section 6 if not set(UpperCamelCase ) <= set('''0123456789ABCDEF''' ): raise ValueError( '''Base16 encoded data is invalid: Data is not uppercase hex or it contains invalid characters.''' ) # For every two hexadecimal digits (= a byte), turn it into an integer. # Then, string the result together into bytes, and return it. return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(UpperCamelCase ) , 2 ) ) if __name__ == "__main__": import doctest doctest.testmod()
678
1
lowerCAmelCase_ = tuple[float, float, float] lowerCAmelCase_ = tuple[float, float, float] def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> Vectorad: lowerCAmelCase__ : Dict = end_pointa[0] - end_pointa[0] lowerCAmelCase__ : Dict = end_pointa[1] - end_pointa[1] lowerCAmelCase__ : Union[str, Any] = end_pointa[2] - end_pointa[2] return (x, y, z) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> Vectorad: lowerCAmelCase__ : str = ab[1] * ac[2] - ab[2] * ac[1] # *i lowerCAmelCase__ : Optional[int] = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j lowerCAmelCase__ : List[Any] = ab[0] * ac[1] - ab[1] * ac[0] # *k return (x, y, z) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> bool: return tuple(round(UpperCamelCase , UpperCamelCase ) for x in vector ) == (0, 0, 0) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = 10 ) -> bool: lowerCAmelCase__ : Union[str, Any] = create_vector(UpperCamelCase , UpperCamelCase ) lowerCAmelCase__ : List[Any] = create_vector(UpperCamelCase , UpperCamelCase ) return is_zero_vector(get_ad_vectors_cross(UpperCamelCase , UpperCamelCase ) , UpperCamelCase )
678
import torch from diffusers import DPMSolverSDEScheduler from diffusers.utils import torch_device from diffusers.utils.testing_utils import require_torchsde from .test_schedulers import SchedulerCommonTest @require_torchsde class _lowerCAmelCase ( _lowercase ): A__ = (DPMSolverSDEScheduler,) A__ = 10 def __magic_name__( self , **__UpperCAmelCase ): lowerCAmelCase__ : Dict = { '''num_train_timesteps''': 1100, '''beta_start''': 0.0001, '''beta_end''': 0.02, '''beta_schedule''': '''linear''', '''noise_sampler_seed''': 0, } config.update(**__UpperCAmelCase ) return config def __magic_name__( self ): for timesteps in [10, 50, 100, 1000]: self.check_over_configs(num_train_timesteps=__UpperCAmelCase ) def __magic_name__( self ): for beta_start, beta_end in zip([0.00001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ): self.check_over_configs(beta_start=__UpperCAmelCase , beta_end=__UpperCAmelCase ) def __magic_name__( self ): for schedule in ["linear", "scaled_linear"]: self.check_over_configs(beta_schedule=__UpperCAmelCase ) def __magic_name__( self ): for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=__UpperCAmelCase ) def __magic_name__( self ): lowerCAmelCase__ : List[Any] = self.scheduler_classes[0] lowerCAmelCase__ : str = self.get_scheduler_config() lowerCAmelCase__ : Optional[Any] = scheduler_class(**__UpperCAmelCase ) scheduler.set_timesteps(self.num_inference_steps ) lowerCAmelCase__ : Union[str, Any] = self.dummy_model() lowerCAmelCase__ : Any = self.dummy_sample_deter * scheduler.init_noise_sigma lowerCAmelCase__ : Optional[Any] = sample.to(__UpperCAmelCase ) for i, t in enumerate(scheduler.timesteps ): lowerCAmelCase__ : Optional[Any] = scheduler.scale_model_input(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : int = model(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : List[str] = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : List[str] = output.prev_sample lowerCAmelCase__ : Union[str, Any] = torch.sum(torch.abs(__UpperCAmelCase ) ) lowerCAmelCase__ : Optional[Any] = torch.mean(torch.abs(__UpperCAmelCase ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 167.47821044921875 ) < 1e-2 assert abs(result_mean.item() - 0.2178705964565277 ) < 1e-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 171.59352111816406 ) < 1e-2 assert abs(result_mean.item() - 0.22342906892299652 ) < 1e-3 else: assert abs(result_sum.item() - 162.52383422851562 ) < 1e-2 assert abs(result_mean.item() - 0.211619570851326 ) < 1e-3 def __magic_name__( self ): lowerCAmelCase__ : Dict = self.scheduler_classes[0] lowerCAmelCase__ : Tuple = self.get_scheduler_config(prediction_type='''v_prediction''' ) lowerCAmelCase__ : Tuple = scheduler_class(**__UpperCAmelCase ) scheduler.set_timesteps(self.num_inference_steps ) lowerCAmelCase__ : Optional[Any] = self.dummy_model() lowerCAmelCase__ : Union[str, Any] = self.dummy_sample_deter * scheduler.init_noise_sigma lowerCAmelCase__ : Tuple = sample.to(__UpperCAmelCase ) for i, t in enumerate(scheduler.timesteps ): lowerCAmelCase__ : List[str] = scheduler.scale_model_input(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : List[Any] = model(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : int = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : int = output.prev_sample lowerCAmelCase__ : Any = torch.sum(torch.abs(__UpperCAmelCase ) ) lowerCAmelCase__ : Optional[Any] = torch.mean(torch.abs(__UpperCAmelCase ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 124.77149200439453 ) < 1e-2 assert abs(result_mean.item() - 0.16226289014816284 ) < 1e-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 128.1663360595703 ) < 1e-2 assert abs(result_mean.item() - 0.16688326001167297 ) < 1e-3 else: assert abs(result_sum.item() - 119.8487548828125 ) < 1e-2 assert abs(result_mean.item() - 0.1560530662536621 ) < 1e-3 def __magic_name__( self ): lowerCAmelCase__ : Optional[int] = self.scheduler_classes[0] lowerCAmelCase__ : Tuple = self.get_scheduler_config() lowerCAmelCase__ : Tuple = scheduler_class(**__UpperCAmelCase ) scheduler.set_timesteps(self.num_inference_steps , device=__UpperCAmelCase ) lowerCAmelCase__ : Dict = self.dummy_model() lowerCAmelCase__ : Union[str, Any] = self.dummy_sample_deter.to(__UpperCAmelCase ) * scheduler.init_noise_sigma for t in scheduler.timesteps: lowerCAmelCase__ : List[str] = scheduler.scale_model_input(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : Optional[int] = model(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : Tuple = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : Optional[int] = output.prev_sample lowerCAmelCase__ : Dict = torch.sum(torch.abs(__UpperCAmelCase ) ) lowerCAmelCase__ : Dict = torch.mean(torch.abs(__UpperCAmelCase ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 167.46957397460938 ) < 1e-2 assert abs(result_mean.item() - 0.21805934607982635 ) < 1e-3 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 171.59353637695312 ) < 1e-2 assert abs(result_mean.item() - 0.22342908382415771 ) < 1e-3 else: assert abs(result_sum.item() - 162.52383422851562 ) < 1e-2 assert abs(result_mean.item() - 0.211619570851326 ) < 1e-3 def __magic_name__( self ): lowerCAmelCase__ : Optional[Any] = self.scheduler_classes[0] lowerCAmelCase__ : Dict = self.get_scheduler_config() lowerCAmelCase__ : Optional[int] = scheduler_class(**__UpperCAmelCase , use_karras_sigmas=__UpperCAmelCase ) scheduler.set_timesteps(self.num_inference_steps , device=__UpperCAmelCase ) lowerCAmelCase__ : List[Any] = self.dummy_model() lowerCAmelCase__ : int = self.dummy_sample_deter.to(__UpperCAmelCase ) * scheduler.init_noise_sigma lowerCAmelCase__ : Union[str, Any] = sample.to(__UpperCAmelCase ) for t in scheduler.timesteps: lowerCAmelCase__ : Optional[Any] = scheduler.scale_model_input(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : Union[str, Any] = model(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : Tuple = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : Dict = output.prev_sample lowerCAmelCase__ : int = torch.sum(torch.abs(__UpperCAmelCase ) ) lowerCAmelCase__ : Optional[Any] = torch.mean(torch.abs(__UpperCAmelCase ) ) if torch_device in ["mps"]: assert abs(result_sum.item() - 176.66974135742188 ) < 1e-2 assert abs(result_mean.item() - 0.23003872730981811 ) < 1e-2 elif torch_device in ["cuda"]: assert abs(result_sum.item() - 177.63653564453125 ) < 1e-2 assert abs(result_mean.item() - 0.23003872730981811 ) < 1e-2 else: assert abs(result_sum.item() - 170.3135223388672 ) < 1e-2 assert abs(result_mean.item() - 0.23003872730981811 ) < 1e-2
678
1
from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = { """naver-clova-ix/donut-base""": """https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json""", # See all Donut models at https://huggingface.co/models?filter=donut-swin } class _lowerCAmelCase ( _lowercase ): A__ = 'donut-swin' A__ = { 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers', } def __init__( self , __UpperCAmelCase=224 , __UpperCAmelCase=4 , __UpperCAmelCase=3 , __UpperCAmelCase=96 , __UpperCAmelCase=[2, 2, 6, 2] , __UpperCAmelCase=[3, 6, 12, 24] , __UpperCAmelCase=7 , __UpperCAmelCase=4.0 , __UpperCAmelCase=True , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.1 , __UpperCAmelCase="gelu" , __UpperCAmelCase=False , __UpperCAmelCase=0.02 , __UpperCAmelCase=1e-5 , **__UpperCAmelCase , ): super().__init__(**__UpperCAmelCase ) lowerCAmelCase__ : Union[str, Any] = image_size lowerCAmelCase__ : List[str] = patch_size lowerCAmelCase__ : int = num_channels lowerCAmelCase__ : Optional[Any] = embed_dim lowerCAmelCase__ : int = depths lowerCAmelCase__ : Dict = len(__UpperCAmelCase ) lowerCAmelCase__ : Union[str, Any] = num_heads lowerCAmelCase__ : Dict = window_size lowerCAmelCase__ : str = mlp_ratio lowerCAmelCase__ : Optional[int] = qkv_bias lowerCAmelCase__ : Any = hidden_dropout_prob lowerCAmelCase__ : Union[str, Any] = attention_probs_dropout_prob lowerCAmelCase__ : List[str] = drop_path_rate lowerCAmelCase__ : Tuple = hidden_act lowerCAmelCase__ : List[str] = use_absolute_embeddings lowerCAmelCase__ : Dict = layer_norm_eps lowerCAmelCase__ : Any = initializer_range # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model lowerCAmelCase__ : List[Any] = int(embed_dim * 2 ** (len(__UpperCAmelCase ) - 1) )
678
import time import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers.generation import ( MaxLengthCriteria, MaxNewTokensCriteria, MaxTimeCriteria, StoppingCriteriaList, validate_stopping_criteria, ) @require_torch class _lowerCAmelCase ( unittest.TestCase ): def __magic_name__( self , __UpperCAmelCase ): lowerCAmelCase__ : Union[str, Any] = 3 lowerCAmelCase__ : Tuple = 250 lowerCAmelCase__ : List[Any] = ids_tensor((batch_size, length) , __UpperCAmelCase ) lowerCAmelCase__ : Optional[Any] = torch.ones((batch_size, length) , device=__UpperCAmelCase , dtype=torch.float ) / length return input_ids, scores def __magic_name__( self ): lowerCAmelCase__ , lowerCAmelCase__ : List[str] = self._get_tensors(5 ) lowerCAmelCase__ : List[str] = StoppingCriteriaList( [ MaxLengthCriteria(max_length=10 ), MaxTimeCriteria(max_time=0.1 ), ] ) self.assertFalse(criteria(__UpperCAmelCase , __UpperCAmelCase ) ) lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = self._get_tensors(9 ) self.assertFalse(criteria(__UpperCAmelCase , __UpperCAmelCase ) ) lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = self._get_tensors(10 ) self.assertTrue(criteria(__UpperCAmelCase , __UpperCAmelCase ) ) def __magic_name__( self ): lowerCAmelCase__ : Tuple = MaxLengthCriteria(max_length=10 ) lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = self._get_tensors(5 ) self.assertFalse(criteria(__UpperCAmelCase , __UpperCAmelCase ) ) lowerCAmelCase__ , lowerCAmelCase__ : Tuple = self._get_tensors(9 ) self.assertFalse(criteria(__UpperCAmelCase , __UpperCAmelCase ) ) lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = self._get_tensors(10 ) self.assertTrue(criteria(__UpperCAmelCase , __UpperCAmelCase ) ) def __magic_name__( self ): lowerCAmelCase__ : Optional[int] = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 ) lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = self._get_tensors(5 ) self.assertFalse(criteria(__UpperCAmelCase , __UpperCAmelCase ) ) lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = self._get_tensors(9 ) self.assertFalse(criteria(__UpperCAmelCase , __UpperCAmelCase ) ) lowerCAmelCase__ , lowerCAmelCase__ : Tuple = self._get_tensors(10 ) self.assertTrue(criteria(__UpperCAmelCase , __UpperCAmelCase ) ) lowerCAmelCase__ : int = StoppingCriteriaList([criteria] ) self.assertEqual(criteria_list.max_length , 10 ) def __magic_name__( self ): lowerCAmelCase__ , lowerCAmelCase__ : List[str] = self._get_tensors(5 ) lowerCAmelCase__ : Any = MaxTimeCriteria(max_time=0.1 ) self.assertFalse(criteria(__UpperCAmelCase , __UpperCAmelCase ) ) lowerCAmelCase__ : int = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 ) self.assertTrue(criteria(__UpperCAmelCase , __UpperCAmelCase ) ) def __magic_name__( self ): validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 10 ) with self.assertWarns(__UpperCAmelCase ): validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 11 ) lowerCAmelCase__ : List[str] = validate_stopping_criteria(StoppingCriteriaList() , 11 ) self.assertEqual(len(__UpperCAmelCase ) , 1 )
678
1
from .glue import GlueDataset, GlueDataTrainingArguments from .language_modeling import ( LineByLineTextDataset, LineByLineWithRefDataset, LineByLineWithSOPTextDataset, TextDataset, TextDatasetForNextSentencePrediction, ) from .squad import SquadDataset, SquadDataTrainingArguments
678
from functools import reduce lowerCAmelCase_ = ( """73167176531330624919225119674426574742355349194934""" """96983520312774506326239578318016984801869478851843""" """85861560789112949495459501737958331952853208805511""" """12540698747158523863050715693290963295227443043557""" """66896648950445244523161731856403098711121722383113""" """62229893423380308135336276614282806444486645238749""" """30358907296290491560440772390713810515859307960866""" """70172427121883998797908792274921901699720888093776""" """65727333001053367881220235421809751254540594752243""" """52584907711670556013604839586446706324415722155397""" """53697817977846174064955149290862569321978468622482""" """83972241375657056057490261407972968652414535100474""" """82166370484403199890008895243450658541227588666881""" """16427171479924442928230863465674813919123162824586""" """17866458359124566529476545682848912883142607690042""" """24219022671055626321111109370544217506941658960408""" """07198403850962455444362981230987879927244284909188""" """84580156166097919133875499200524063689912560717606""" """05886116467109405077541002256983155200055935729725""" """71636269561882670428252483600823257530420752963450""" ) def __lowerCAmelCase ( UpperCamelCase = N ) -> int: return max( # mypy cannot properly interpret reduce int(reduce(lambda UpperCamelCase , UpperCamelCase : str(int(UpperCamelCase ) * int(UpperCamelCase ) ) , n[i : i + 13] ) ) for i in range(len(UpperCamelCase ) - 12 ) ) if __name__ == "__main__": print(F"""{solution() = }""")
678
1
import argparse import requests import torch # pip3 install salesforce-lavis # I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis_float32 (there's also the fix_lavis branch) # also note: to convert Vicuna checkpoints, we had to include /home/niels/python_projects/checkpoints/FastChat/vicuna-7b in lavis/configs/models/blip2/blip2_instruct_vicuna7b.yaml # same for Vicuna-13b from lavis.models import load_model_and_preprocess from PIL import Image from transformers import ( AutoTokenizer, BlipImageProcessor, InstructBlipConfig, InstructBlipForConditionalGeneration, InstructBlipProcessor, InstructBlipQFormerConfig, InstructBlipVisionConfig, LlamaConfig, LlamaTokenizerFast, TaConfig, TaTokenizerFast, ) from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD def __lowerCAmelCase ( ) -> List[str]: lowerCAmelCase__ : Optional[Any] = '''https://raw.githubusercontent.com/salesforce/LAVIS/main/docs/_static/Confusing-Pictures.jpg''' lowerCAmelCase__ : Tuple = Image.open(requests.get(UpperCamelCase , stream=UpperCamelCase ).raw ).convert('''RGB''' ) return image def __lowerCAmelCase ( UpperCamelCase ) -> Any: lowerCAmelCase__ : str = [] # fmt: off # vision encoder rename_keys.append(('''visual_encoder.cls_token''', '''vision_model.embeddings.class_embedding''') ) rename_keys.append(('''visual_encoder.pos_embed''', '''vision_model.embeddings.position_embedding''') ) rename_keys.append(('''visual_encoder.patch_embed.proj.weight''', '''vision_model.embeddings.patch_embedding.weight''') ) rename_keys.append(('''visual_encoder.patch_embed.proj.bias''', '''vision_model.embeddings.patch_embedding.bias''') ) rename_keys.append(('''ln_vision.weight''', '''vision_model.post_layernorm.weight''') ) rename_keys.append(('''ln_vision.bias''', '''vision_model.post_layernorm.bias''') ) for i in range(config.vision_config.num_hidden_layers ): rename_keys.append((F"""visual_encoder.blocks.{i}.norm1.weight""", F"""vision_model.encoder.layers.{i}.layer_norm1.weight""") ) rename_keys.append((F"""visual_encoder.blocks.{i}.norm1.bias""", F"""vision_model.encoder.layers.{i}.layer_norm1.bias""") ) rename_keys.append((F"""visual_encoder.blocks.{i}.norm2.weight""", F"""vision_model.encoder.layers.{i}.layer_norm2.weight""") ) rename_keys.append((F"""visual_encoder.blocks.{i}.norm2.bias""", F"""vision_model.encoder.layers.{i}.layer_norm2.bias""") ) rename_keys.append((F"""visual_encoder.blocks.{i}.attn.qkv.weight""", F"""vision_model.encoder.layers.{i}.self_attn.qkv.weight""") ) rename_keys.append((F"""visual_encoder.blocks.{i}.attn.proj.weight""", F"""vision_model.encoder.layers.{i}.self_attn.projection.weight""",) ) rename_keys.append((F"""visual_encoder.blocks.{i}.attn.proj.bias""", F"""vision_model.encoder.layers.{i}.self_attn.projection.bias""") ) rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc1.weight""", F"""vision_model.encoder.layers.{i}.mlp.fc1.weight""") ) rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc1.bias""", F"""vision_model.encoder.layers.{i}.mlp.fc1.bias""") ) rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc2.weight""", F"""vision_model.encoder.layers.{i}.mlp.fc2.weight""") ) rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc2.bias""", F"""vision_model.encoder.layers.{i}.mlp.fc2.bias""") ) # QFormer rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.weight''', '''qformer.embeddings.layernorm.weight''') ) rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.bias''', '''qformer.embeddings.layernorm.bias''') ) # fmt: on return rename_keys def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Optional[int]: lowerCAmelCase__ : List[Any] = dct.pop(UpperCamelCase ) lowerCAmelCase__ : Dict = val def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> List[str]: for i in range(config.vision_config.num_hidden_layers ): # read in original q and v biases lowerCAmelCase__ : Optional[Any] = state_dict.pop(F"""visual_encoder.blocks.{i}.attn.q_bias""" ) lowerCAmelCase__ : Dict = state_dict.pop(F"""visual_encoder.blocks.{i}.attn.v_bias""" ) # next, set bias in the state dict lowerCAmelCase__ : Tuple = torch.cat((q_bias, torch.zeros_like(UpperCamelCase , requires_grad=UpperCamelCase ), v_bias) ) lowerCAmelCase__ : Optional[int] = qkv_bias def __lowerCAmelCase ( UpperCamelCase ) -> Optional[Any]: lowerCAmelCase__ : int = 364 if '''coco''' in model_name else 224 lowerCAmelCase__ : Optional[int] = InstructBlipVisionConfig(image_size=UpperCamelCase ).to_dict() # make sure the models have proper bos_token_id and eos_token_id set (important for generation) # seems like flan-T5 models don't have bos_token_id properly set? if "t5-xl" in model_name: lowerCAmelCase__ : Any = TaConfig.from_pretrained('''google/flan-t5-xl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict() elif "t5-xxl" in model_name: lowerCAmelCase__ : int = TaConfig.from_pretrained('''google/flan-t5-xxl''' , dense_act_fn='''gelu''' , bos_token_id=1 ).to_dict() elif "vicuna-7b" in model_name: lowerCAmelCase__ : Optional[Any] = LlamaConfig.from_pretrained('''decapoda-research/llama-7b-hf''' , vocab_size=32001 ).to_dict() elif "vicuna-13b" in model_name: lowerCAmelCase__ : Optional[Any] = LlamaConfig.from_pretrained('''decapoda-research/llama-13b-hf''' , vocab_size=32001 ).to_dict() else: raise ValueError('''Model name not supported''' ) # the authors add one special "[DEC]" token to the vocab of Q-Former, hence vocab size = 30522 + 1 lowerCAmelCase__ : Optional[Any] = InstructBlipQFormerConfig(vocab_size=30523 ).to_dict() lowerCAmelCase__ : int = InstructBlipConfig(vision_config=UpperCamelCase , text_config=UpperCamelCase , qformer_config=UpperCamelCase ) return config, image_size @torch.no_grad() def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase=None , UpperCamelCase=False ) -> Tuple: lowerCAmelCase__ : Optional[Any] = AutoTokenizer.from_pretrained('''bert-base-uncased''' , truncation_side='''left''' ) qformer_tokenizer.add_special_tokens({'''bos_token''': '''[DEC]'''} ) if "t5" in model_name: lowerCAmelCase__ : Any = TaTokenizerFast.from_pretrained('''google/flan-t5-xl''' , truncation_side='''left''' ) elif "vicuna" in model_name: # the following was used in the original implementation: # tokenizer = LlamaTokenizer.from_pretrained("huggyllama/llama-7b", use_fast=False, truncation_side="left") # tokenizer.add_special_tokens({"pad_token": "[PAD]"}) # tokenizer.add_special_tokens({"bos_token": "</s>"}) # tokenizer.add_special_tokens({"eos_token": "</s>"}) # tokenizer.add_special_tokens({"unk_token": "</s>"}) lowerCAmelCase__ : Optional[Any] = LlamaTokenizerFast.from_pretrained( '''huggyllama/llama-7b''' , truncation_side='''left''' , bos_token='''</s>''' , unk_token='''</s>''' ) tokenizer.add_special_tokens({'''pad_token''': '''[PAD]'''} ) lowerCAmelCase__ , lowerCAmelCase__ : str = get_blipa_config(UpperCamelCase ) lowerCAmelCase__ : int = InstructBlipForConditionalGeneration(UpperCamelCase ).eval() lowerCAmelCase__ : str = { '''instructblip-vicuna-7b''': ('''blip2_vicuna_instruct''', '''vicuna7b'''), '''instructblip-vicuna-13b''': ('''blip2_vicuna_instruct''', '''vicuna13b'''), '''instructblip-flan-t5-xl''': ('''blip2_t5_instruct''', '''flant5xl'''), '''instructblip-flan-t5-xxl''': ('''blip2_t5_instruct''', '''flant5xxl'''), } lowerCAmelCase__ , lowerCAmelCase__ : Dict = model_name_to_original[model_name] # load original model print('''Loading original model...''' ) lowerCAmelCase__ : Union[str, Any] = '''cuda:1''' if torch.cuda.is_available() else '''cpu''' lowerCAmelCase__ : Dict = '''cuda:2''' if torch.cuda.is_available() else '''cpu''' lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : str = load_model_and_preprocess( name=UpperCamelCase , model_type=UpperCamelCase , is_eval=UpperCamelCase , device=UpperCamelCase ) original_model.eval() print('''Done!''' ) # update state dict keys lowerCAmelCase__ : Union[str, Any] = original_model.state_dict() lowerCAmelCase__ : List[Any] = create_rename_keys(UpperCamelCase ) for src, dest in rename_keys: rename_key(UpperCamelCase , UpperCamelCase , UpperCamelCase ) # some keys can be renamed efficiently for key, val in state_dict.copy().items(): lowerCAmelCase__ : List[str] = state_dict.pop(UpperCamelCase ) if key.startswith('''Qformer.bert''' ): lowerCAmelCase__ : Dict = key.replace('''Qformer.bert''' , '''qformer''' ) if "attention.self" in key: lowerCAmelCase__ : Optional[int] = key.replace('''self''' , '''attention''' ) if "llm_proj" in key: lowerCAmelCase__ : Optional[Any] = key.replace('''llm_proj''' , '''language_projection''' ) if "t5_proj" in key: lowerCAmelCase__ : List[Any] = key.replace('''t5_proj''' , '''language_projection''' ) if key.startswith('''llm_model''' ): lowerCAmelCase__ : Optional[Any] = key.replace('''llm_model''' , '''language_model''' ) if key.startswith('''t5''' ): lowerCAmelCase__ : Optional[int] = key.replace('''t5''' , '''language''' ) lowerCAmelCase__ : Tuple = val # read in qv biases read_in_q_v_bias(UpperCamelCase , UpperCamelCase ) # note: weights get loaded in torch.float32 by default hf_model.load_state_dict(UpperCamelCase , strict=UpperCamelCase ) lowerCAmelCase__ : Dict = load_demo_image() lowerCAmelCase__ : Dict = '''What is unusual about this image?''' # create processor lowerCAmelCase__ : Optional[Any] = BlipImageProcessor( size={'''height''': image_size, '''width''': image_size} , image_mean=UpperCamelCase , image_std=UpperCamelCase ) lowerCAmelCase__ : Optional[int] = InstructBlipProcessor( image_processor=UpperCamelCase , tokenizer=UpperCamelCase , qformer_tokenizer=UpperCamelCase , ) lowerCAmelCase__ : Tuple = processor(images=UpperCamelCase , text=UpperCamelCase , return_tensors='''pt''' ).to(UpperCamelCase ) # make sure processor creates exact same pixel values lowerCAmelCase__ : Optional[int] = vis_processors['''eval'''](UpperCamelCase ).unsqueeze(0 ).to(UpperCamelCase ) lowerCAmelCase__ : str = inputs.pixel_values assert torch.allclose(original_pixel_values.to(pixel_values.device ) , UpperCamelCase ) original_model.to(UpperCamelCase ) hf_model.to(UpperCamelCase ) with torch.no_grad(): if "vicuna" in model_name: lowerCAmelCase__ : List[str] = original_model({'''image''': original_pixel_values, '''text_input''': [prompt]} ).logits lowerCAmelCase__ : Tuple = hf_model(**UpperCamelCase ).logits else: lowerCAmelCase__ : List[Any] = original_model( {'''image''': original_pixel_values, '''text_input''': [prompt], '''text_output''': ['''\n''']} ).logits lowerCAmelCase__ : List[Any] = tokenizer('''\n''' , return_tensors='''pt''' ).input_ids.to(UpperCamelCase ) lowerCAmelCase__ : str = label_input_ids.masked_fill(label_input_ids == tokenizer.pad_token_id , -100 ) lowerCAmelCase__ : Union[str, Any] = hf_model(**UpperCamelCase , labels=UpperCamelCase ).logits print('''First values of original logits:''' , original_logits[0, :3, :3] ) print('''First values of HF logits:''' , logits[0, :3, :3] ) # assert values assert original_logits.shape == logits.shape lowerCAmelCase__ : Optional[int] = 1E-4 if '''vicuna''' in model_name else 1E-5 assert torch.allclose(original_logits.to(logits.device ) , UpperCamelCase , atol=UpperCamelCase ) print('''Looks ok!''' ) print('''Generating with original model...''' ) lowerCAmelCase__ : Optional[int] = original_model.generate({'''image''': original_pixel_values, '''prompt''': prompt} , num_beams=5 ) # important: we need to cast the weights of the HF model to the appropriate type print('''Generating with HF model...''' ) lowerCAmelCase__ : Any = hf_model.generate( **UpperCamelCase , do_sample=UpperCamelCase , num_beams=5 , max_length=256 , min_length=1 , top_p=0.9 , repetition_penalty=1.5 , length_penalty=1.0 , temperature=1 , ) if "vicuna" in model_name: # convert output id 0 to 2 (eos_token_id) # TODO add this in the generate method? lowerCAmelCase__ : int = 2 print('''Original generation:''' , UpperCamelCase ) lowerCAmelCase__ : Tuple = processor.batch_decode(UpperCamelCase , skip_special_tokens=UpperCamelCase ) lowerCAmelCase__ : Union[str, Any] = [text.strip() for text in output_text] print('''HF generation:''' , UpperCamelCase ) if pytorch_dump_folder_path is not None: processor.save_pretrained(UpperCamelCase ) hf_model.save_pretrained(UpperCamelCase ) if push_to_hub: processor.push_to_hub(F"""Salesforce/{model_name}""" ) hf_model.push_to_hub(F"""Salesforce/{model_name}""" ) if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() lowerCAmelCase_ = [ """instructblip-vicuna-7b""", """instructblip-vicuna-13b""", """instructblip-flan-t5-xl""", """instructblip-flan-t5-xxl""", ] parser.add_argument( """--model_name""", default="""instructblip-flan-t5-xl""", choices=choices, type=str, help="""Path to hf config.json of model to convert""", ) parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether to push the model and processor to the hub after converting""", ) lowerCAmelCase_ = parser.parse_args() convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
678
import re import jax.numpy as jnp from flax.traverse_util import flatten_dict, unflatten_dict from jax.random import PRNGKey from ..utils import logging lowerCAmelCase_ = logging.get_logger(__name__) def __lowerCAmelCase ( UpperCamelCase ) -> List[str]: lowerCAmelCase__ : int = R'''\w+[.]\d+''' lowerCAmelCase__ : Tuple = re.findall(UpperCamelCase , UpperCamelCase ) for pat in pats: lowerCAmelCase__ : List[str] = key.replace(UpperCamelCase , '''_'''.join(pat.split('''.''' ) ) ) return key def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Dict: lowerCAmelCase__ : Union[str, Any] = pt_tuple_key[:-1] + ('''scale''',) if ( any('''norm''' in str_ for str_ in pt_tuple_key ) and (pt_tuple_key[-1] == "bias") and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict) and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict) ): lowerCAmelCase__ : List[str] = pt_tuple_key[:-1] + ('''scale''',) return renamed_pt_tuple_key, pt_tensor elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict: lowerCAmelCase__ : str = pt_tuple_key[:-1] + ('''scale''',) return renamed_pt_tuple_key, pt_tensor # embedding if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict: lowerCAmelCase__ : str = pt_tuple_key[:-1] + ('''embedding''',) return renamed_pt_tuple_key, pt_tensor # conv layer lowerCAmelCase__ : str = pt_tuple_key[:-1] + ('''kernel''',) if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4: lowerCAmelCase__ : Dict = pt_tensor.transpose(2 , 3 , 1 , 0 ) return renamed_pt_tuple_key, pt_tensor # linear layer lowerCAmelCase__ : List[Any] = pt_tuple_key[:-1] + ('''kernel''',) if pt_tuple_key[-1] == "weight": lowerCAmelCase__ : str = pt_tensor.T return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm weight lowerCAmelCase__ : Any = pt_tuple_key[:-1] + ('''weight''',) if pt_tuple_key[-1] == "gamma": return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm bias lowerCAmelCase__ : List[Any] = pt_tuple_key[:-1] + ('''bias''',) if pt_tuple_key[-1] == "beta": return renamed_pt_tuple_key, pt_tensor return pt_tuple_key, pt_tensor def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase=42 ) -> Any: # Step 1: Convert pytorch tensor to numpy lowerCAmelCase__ : Optional[Any] = {k: v.numpy() for k, v in pt_state_dict.items()} # Step 2: Since the model is stateless, get random Flax params lowerCAmelCase__ : Tuple = flax_model.init_weights(PRNGKey(UpperCamelCase ) ) lowerCAmelCase__ : Any = flatten_dict(UpperCamelCase ) lowerCAmelCase__ : List[Any] = {} # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): lowerCAmelCase__ : str = rename_key(UpperCamelCase ) lowerCAmelCase__ : Union[str, Any] = tuple(renamed_pt_key.split('''.''' ) ) # Correctly rename weight parameters lowerCAmelCase__ , lowerCAmelCase__ : List[str] = rename_key_and_reshape_tensor(UpperCamelCase , UpperCamelCase , UpperCamelCase ) if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( F"""PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape """ F"""{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.""" ) # also add unexpected weight so that warning is thrown lowerCAmelCase__ : List[str] = jnp.asarray(UpperCamelCase ) return unflatten_dict(UpperCamelCase )
678
1
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, convert_to_rgb, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging lowerCAmelCase_ = logging.get_logger(__name__) if is_vision_available(): import PIL class _lowerCAmelCase ( _lowercase ): A__ = ['pixel_values'] def __init__( self , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = PILImageResampling.BICUBIC , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = True , __UpperCAmelCase = 1 / 255 , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = True , **__UpperCAmelCase , ): super().__init__(**__UpperCAmelCase ) lowerCAmelCase__ : Optional[Any] = size if size is not None else {'''shortest_edge''': 224} lowerCAmelCase__ : Optional[int] = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase ) lowerCAmelCase__ : Union[str, Any] = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224} lowerCAmelCase__ : Optional[Any] = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase , param_name='''crop_size''' ) lowerCAmelCase__ : List[Any] = do_resize lowerCAmelCase__ : int = size lowerCAmelCase__ : str = resample lowerCAmelCase__ : Tuple = do_center_crop lowerCAmelCase__ : Union[str, Any] = crop_size lowerCAmelCase__ : Dict = do_rescale lowerCAmelCase__ : Tuple = rescale_factor lowerCAmelCase__ : Union[str, Any] = do_normalize lowerCAmelCase__ : int = image_mean if image_mean is not None else OPENAI_CLIP_MEAN lowerCAmelCase__ : Union[str, Any] = image_std if image_std is not None else OPENAI_CLIP_STD lowerCAmelCase__ : int = do_convert_rgb def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = PILImageResampling.BICUBIC , __UpperCAmelCase = None , **__UpperCAmelCase , ): lowerCAmelCase__ : List[str] = get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase ) if "shortest_edge" not in size: raise ValueError(f"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" ) lowerCAmelCase__ : int = get_resize_output_image_size(__UpperCAmelCase , size=size['''shortest_edge'''] , default_to_square=__UpperCAmelCase ) return resize(__UpperCAmelCase , size=__UpperCAmelCase , resample=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase ) def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ): lowerCAmelCase__ : Union[str, Any] = get_size_dict(__UpperCAmelCase ) if "height" not in size or "width" not in size: raise ValueError(f"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""" ) return center_crop(__UpperCAmelCase , size=(size['''height'''], size['''width''']) , data_format=__UpperCAmelCase , **__UpperCAmelCase ) def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ): return rescale(__UpperCAmelCase , scale=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase ) def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ): return normalize(__UpperCAmelCase , mean=__UpperCAmelCase , std=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase ) def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = ChannelDimension.FIRST , **__UpperCAmelCase , ): lowerCAmelCase__ : Optional[Any] = do_resize if do_resize is not None else self.do_resize lowerCAmelCase__ : Optional[int] = size if size is not None else self.size lowerCAmelCase__ : Optional[int] = get_size_dict(__UpperCAmelCase , param_name='''size''' , default_to_square=__UpperCAmelCase ) lowerCAmelCase__ : int = resample if resample is not None else self.resample lowerCAmelCase__ : int = do_center_crop if do_center_crop is not None else self.do_center_crop lowerCAmelCase__ : Tuple = crop_size if crop_size is not None else self.crop_size lowerCAmelCase__ : Union[str, Any] = get_size_dict(__UpperCAmelCase , param_name='''crop_size''' , default_to_square=__UpperCAmelCase ) lowerCAmelCase__ : List[Any] = do_rescale if do_rescale is not None else self.do_rescale lowerCAmelCase__ : List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor lowerCAmelCase__ : Union[str, Any] = do_normalize if do_normalize is not None else self.do_normalize lowerCAmelCase__ : Optional[Any] = image_mean if image_mean is not None else self.image_mean lowerCAmelCase__ : Optional[Any] = image_std if image_std is not None else self.image_std lowerCAmelCase__ : Optional[int] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb lowerCAmelCase__ : Any = make_list_of_images(__UpperCAmelCase ) if not valid_images(__UpperCAmelCase ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None: raise ValueError('''Size must be specified if do_resize is True.''' ) if do_center_crop and crop_size is None: raise ValueError('''Crop size must be specified if do_center_crop is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # PIL RGBA images are converted to RGB if do_convert_rgb: lowerCAmelCase__ : Optional[Any] = [convert_to_rgb(__UpperCAmelCase ) for image in images] # All transformations expect numpy arrays. lowerCAmelCase__ : Union[str, Any] = [to_numpy_array(__UpperCAmelCase ) for image in images] if do_resize: lowerCAmelCase__ : Optional[int] = [self.resize(image=__UpperCAmelCase , size=__UpperCAmelCase , resample=__UpperCAmelCase ) for image in images] if do_center_crop: lowerCAmelCase__ : Dict = [self.center_crop(image=__UpperCAmelCase , size=__UpperCAmelCase ) for image in images] if do_rescale: lowerCAmelCase__ : Dict = [self.rescale(image=__UpperCAmelCase , scale=__UpperCAmelCase ) for image in images] if do_normalize: lowerCAmelCase__ : Optional[Any] = [self.normalize(image=__UpperCAmelCase , mean=__UpperCAmelCase , std=__UpperCAmelCase ) for image in images] lowerCAmelCase__ : Tuple = [to_channel_dimension_format(__UpperCAmelCase , __UpperCAmelCase ) for image in images] lowerCAmelCase__ : Tuple = {'''pixel_values''': images} return BatchFeature(data=__UpperCAmelCase , tensor_type=__UpperCAmelCase )
678
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available lowerCAmelCase_ = { """configuration_nezha""": ["""NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """NezhaConfig"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ """NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST""", """NezhaForNextSentencePrediction""", """NezhaForMaskedLM""", """NezhaForPreTraining""", """NezhaForMultipleChoice""", """NezhaForQuestionAnswering""", """NezhaForSequenceClassification""", """NezhaForTokenClassification""", """NezhaModel""", """NezhaPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_nezha import ( NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST, NezhaForMaskedLM, NezhaForMultipleChoice, NezhaForNextSentencePrediction, NezhaForPreTraining, NezhaForQuestionAnswering, NezhaForSequenceClassification, NezhaForTokenClassification, NezhaModel, NezhaPreTrainedModel, ) else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
678
1
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> int: def count_of_possible_combinations(UpperCamelCase ) -> int: if target < 0: return 0 if target == 0: return 1 return sum(count_of_possible_combinations(target - item ) for item in array ) return count_of_possible_combinations(UpperCamelCase ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> int: def count_of_possible_combinations_with_dp_array( UpperCamelCase , UpperCamelCase ) -> int: if target < 0: return 0 if target == 0: return 1 if dp_array[target] != -1: return dp_array[target] lowerCAmelCase__ : Optional[Any] = sum( count_of_possible_combinations_with_dp_array(target - item , UpperCamelCase ) for item in array ) lowerCAmelCase__ : Optional[int] = answer return answer lowerCAmelCase__ : Optional[int] = [-1] * (target + 1) return count_of_possible_combinations_with_dp_array(UpperCamelCase , UpperCamelCase ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> int: lowerCAmelCase__ : Any = [0] * (target + 1) lowerCAmelCase__ : int = 1 for i in range(1 , target + 1 ): for j in range(UpperCamelCase ): if i - array[j] >= 0: dp_array[i] += dp_array[i - array[j]] return dp_array[target] if __name__ == "__main__": import doctest doctest.testmod() lowerCAmelCase_ = 3 lowerCAmelCase_ = 5 lowerCAmelCase_ = [1, 2, 5] print(combination_sum_iv(n, array, target))
678
import tempfile import unittest import numpy as np from huggingface_hub import HfFolder, delete_repo from requests.exceptions import HTTPError from transformers import BertConfig, is_flax_available from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax if is_flax_available(): import os from flax.core.frozen_dict import unfreeze from flax.traverse_util import flatten_dict from transformers import FlaxBertModel lowerCAmelCase_ = """0.12""" # assumed parallelism: 8 @require_flax @is_staging_test class _lowerCAmelCase ( unittest.TestCase ): @classmethod def __magic_name__( cls ): lowerCAmelCase__ : Dict = TOKEN HfFolder.save_token(__UpperCAmelCase ) @classmethod def __magic_name__( cls ): try: delete_repo(token=cls._token , repo_id='''test-model-flax''' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='''valid_org/test-model-flax-org''' ) except HTTPError: pass def __magic_name__( self ): lowerCAmelCase__ : List[Any] = BertConfig( vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 ) lowerCAmelCase__ : List[Any] = FlaxBertModel(__UpperCAmelCase ) model.push_to_hub('''test-model-flax''' , use_auth_token=self._token ) lowerCAmelCase__ : List[Any] = FlaxBertModel.from_pretrained(f"""{USER}/test-model-flax""" ) lowerCAmelCase__ : Union[str, Any] = flatten_dict(unfreeze(model.params ) ) lowerCAmelCase__ : List[Any] = flatten_dict(unfreeze(new_model.params ) ) for key in base_params.keys(): lowerCAmelCase__ : str = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(__UpperCAmelCase , 1e-3 , msg=f"""{key} not identical""" ) # Reset repo delete_repo(token=self._token , repo_id='''test-model-flax''' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(__UpperCAmelCase , repo_id='''test-model-flax''' , push_to_hub=__UpperCAmelCase , use_auth_token=self._token ) lowerCAmelCase__ : Tuple = FlaxBertModel.from_pretrained(f"""{USER}/test-model-flax""" ) lowerCAmelCase__ : List[str] = flatten_dict(unfreeze(model.params ) ) lowerCAmelCase__ : Optional[int] = flatten_dict(unfreeze(new_model.params ) ) for key in base_params.keys(): lowerCAmelCase__ : Union[str, Any] = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(__UpperCAmelCase , 1e-3 , msg=f"""{key} not identical""" ) def __magic_name__( self ): lowerCAmelCase__ : List[str] = BertConfig( vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 ) lowerCAmelCase__ : Optional[Any] = FlaxBertModel(__UpperCAmelCase ) model.push_to_hub('''valid_org/test-model-flax-org''' , use_auth_token=self._token ) lowerCAmelCase__ : str = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' ) lowerCAmelCase__ : List[str] = flatten_dict(unfreeze(model.params ) ) lowerCAmelCase__ : Dict = flatten_dict(unfreeze(new_model.params ) ) for key in base_params.keys(): lowerCAmelCase__ : Tuple = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(__UpperCAmelCase , 1e-3 , msg=f"""{key} not identical""" ) # Reset repo delete_repo(token=self._token , repo_id='''valid_org/test-model-flax-org''' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained( __UpperCAmelCase , repo_id='''valid_org/test-model-flax-org''' , push_to_hub=__UpperCAmelCase , use_auth_token=self._token ) lowerCAmelCase__ : Union[str, Any] = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' ) lowerCAmelCase__ : Optional[int] = flatten_dict(unfreeze(model.params ) ) lowerCAmelCase__ : List[str] = flatten_dict(unfreeze(new_model.params ) ) for key in base_params.keys(): lowerCAmelCase__ : Tuple = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(__UpperCAmelCase , 1e-3 , msg=f"""{key} not identical""" ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> Tuple: lowerCAmelCase__ : Any = True lowerCAmelCase__ : Any = flatten_dict(modela.params ) lowerCAmelCase__ : List[str] = flatten_dict(modela.params ) for key in flat_params_a.keys(): if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1E-4: lowerCAmelCase__ : Optional[Any] = False return models_are_equal @require_flax class _lowerCAmelCase ( unittest.TestCase ): def __magic_name__( self ): lowerCAmelCase__ : List[str] = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' ) lowerCAmelCase__ : List[str] = FlaxBertModel(__UpperCAmelCase ) lowerCAmelCase__ : Optional[int] = '''bert''' with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(os.path.join(__UpperCAmelCase , __UpperCAmelCase ) ) with self.assertRaises(__UpperCAmelCase ): lowerCAmelCase__ : Optional[int] = FlaxBertModel.from_pretrained(__UpperCAmelCase ) lowerCAmelCase__ : List[str] = FlaxBertModel.from_pretrained(__UpperCAmelCase , subfolder=__UpperCAmelCase ) self.assertTrue(check_models_equal(__UpperCAmelCase , __UpperCAmelCase ) ) def __magic_name__( self ): lowerCAmelCase__ : Tuple = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' ) lowerCAmelCase__ : Union[str, Any] = FlaxBertModel(__UpperCAmelCase ) lowerCAmelCase__ : Dict = '''bert''' with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(os.path.join(__UpperCAmelCase , __UpperCAmelCase ) , max_shard_size='''10KB''' ) with self.assertRaises(__UpperCAmelCase ): lowerCAmelCase__ : Tuple = FlaxBertModel.from_pretrained(__UpperCAmelCase ) lowerCAmelCase__ : Any = FlaxBertModel.from_pretrained(__UpperCAmelCase , subfolder=__UpperCAmelCase ) self.assertTrue(check_models_equal(__UpperCAmelCase , __UpperCAmelCase ) ) def __magic_name__( self ): lowerCAmelCase__ : List[str] = '''bert''' lowerCAmelCase__ : int = '''hf-internal-testing/tiny-random-bert-subfolder''' with self.assertRaises(__UpperCAmelCase ): lowerCAmelCase__ : Dict = FlaxBertModel.from_pretrained(__UpperCAmelCase ) lowerCAmelCase__ : Optional[int] = FlaxBertModel.from_pretrained(__UpperCAmelCase , subfolder=__UpperCAmelCase ) self.assertIsNotNone(__UpperCAmelCase ) def __magic_name__( self ): lowerCAmelCase__ : List[Any] = '''bert''' lowerCAmelCase__ : Tuple = '''hf-internal-testing/tiny-random-bert-sharded-subfolder''' with self.assertRaises(__UpperCAmelCase ): lowerCAmelCase__ : Union[str, Any] = FlaxBertModel.from_pretrained(__UpperCAmelCase ) lowerCAmelCase__ : Optional[Any] = FlaxBertModel.from_pretrained(__UpperCAmelCase , subfolder=__UpperCAmelCase ) self.assertIsNotNone(__UpperCAmelCase )
678
1
import copy from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto.configuration_auto import AutoConfig if TYPE_CHECKING: from ... import PreTrainedTokenizerBase, TensorType lowerCAmelCase_ = logging.get_logger(__name__) class _lowerCAmelCase ( _lowercase ): A__ = 'vision-encoder-decoder' A__ = True def __init__( self , **__UpperCAmelCase ): super().__init__(**__UpperCAmelCase ) if "encoder" not in kwargs or "decoder" not in kwargs: raise ValueError( f"""A configuraton of type {self.model_type} cannot be instantiated because """ f"""not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}""" ) lowerCAmelCase__ : Dict = kwargs.pop('''encoder''' ) lowerCAmelCase__ : Dict = encoder_config.pop('''model_type''' ) lowerCAmelCase__ : List[str] = kwargs.pop('''decoder''' ) lowerCAmelCase__ : Optional[Any] = decoder_config.pop('''model_type''' ) lowerCAmelCase__ : Dict = AutoConfig.for_model(__UpperCAmelCase , **__UpperCAmelCase ) lowerCAmelCase__ : List[Any] = AutoConfig.for_model(__UpperCAmelCase , **__UpperCAmelCase ) lowerCAmelCase__ : str = True @classmethod def __magic_name__( cls , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ): logger.info('''Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''' ) lowerCAmelCase__ : int = True lowerCAmelCase__ : Dict = True return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **__UpperCAmelCase ) def __magic_name__( self ): lowerCAmelCase__ : Any = copy.deepcopy(self.__dict__ ) lowerCAmelCase__ : str = self.encoder.to_dict() lowerCAmelCase__ : Union[str, Any] = self.decoder.to_dict() lowerCAmelCase__ : Tuple = self.__class__.model_type return output class _lowerCAmelCase ( _lowercase ): A__ = version.parse('1.11' ) @property def __magic_name__( self ): return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ] ) @property def __magic_name__( self ): return 1e-4 @property def __magic_name__( self ): return OrderedDict({'''last_hidden_state''': {0: '''batch''', 1: '''encoder_sequence'''}} ) class _lowerCAmelCase ( _lowercase ): @property def __magic_name__( self ): lowerCAmelCase__ : Tuple = OrderedDict() lowerCAmelCase__ : Any = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''} lowerCAmelCase__ : Any = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''} lowerCAmelCase__ : str = {0: '''batch''', 1: '''encoder_sequence'''} return common_inputs def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase = -1 , __UpperCAmelCase = -1 , __UpperCAmelCase = False , __UpperCAmelCase = None , ): import torch lowerCAmelCase__ : Optional[int] = OrderedDict() lowerCAmelCase__ : Any = super().generate_dummy_inputs( __UpperCAmelCase , batch_size=__UpperCAmelCase , seq_length=__UpperCAmelCase , is_pair=__UpperCAmelCase , framework=__UpperCAmelCase ) lowerCAmelCase__ , lowerCAmelCase__ : List[str] = dummy_input['''input_ids'''].shape lowerCAmelCase__ : Dict = (batch, encoder_sequence, self._config.encoder_hidden_size) lowerCAmelCase__ : Union[str, Any] = dummy_input.pop('''input_ids''' ) lowerCAmelCase__ : Tuple = dummy_input.pop('''attention_mask''' ) lowerCAmelCase__ : Dict = torch.zeros(__UpperCAmelCase ) return common_inputs class _lowerCAmelCase ( _lowercase ): @property def __magic_name__( self ): pass def __magic_name__( self , __UpperCAmelCase ): return VisionEncoderDecoderEncoderOnnxConfig(__UpperCAmelCase ) def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = "default" ): lowerCAmelCase__ : Tuple = encoder_config.hidden_size return VisionEncoderDecoderDecoderOnnxConfig(__UpperCAmelCase , __UpperCAmelCase )
678
from random import randint from tempfile import TemporaryFile import numpy as np def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Any: lowerCAmelCase__ : Optional[Any] = 0 if start < end: lowerCAmelCase__ : Union[str, Any] = randint(UpperCamelCase , UpperCamelCase ) lowerCAmelCase__ : List[str] = a[end] lowerCAmelCase__ : List[Any] = a[pivot] lowerCAmelCase__ : str = temp lowerCAmelCase__ , lowerCAmelCase__ : Tuple = _in_place_partition(UpperCamelCase , UpperCamelCase , UpperCamelCase ) count += _in_place_quick_sort(UpperCamelCase , UpperCamelCase , p - 1 ) count += _in_place_quick_sort(UpperCamelCase , p + 1 , UpperCamelCase ) return count def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Optional[Any]: lowerCAmelCase__ : List[str] = 0 lowerCAmelCase__ : List[Any] = randint(UpperCamelCase , UpperCamelCase ) lowerCAmelCase__ : str = a[end] lowerCAmelCase__ : Optional[int] = a[pivot] lowerCAmelCase__ : List[str] = temp lowerCAmelCase__ : str = start - 1 for index in range(UpperCamelCase , UpperCamelCase ): count += 1 if a[index] < a[end]: # check if current val is less than pivot value lowerCAmelCase__ : List[str] = new_pivot_index + 1 lowerCAmelCase__ : int = a[new_pivot_index] lowerCAmelCase__ : int = a[index] lowerCAmelCase__ : Tuple = temp lowerCAmelCase__ : Optional[Any] = a[new_pivot_index + 1] lowerCAmelCase__ : List[str] = a[end] lowerCAmelCase__ : Union[str, Any] = temp return new_pivot_index + 1, count lowerCAmelCase_ = TemporaryFile() lowerCAmelCase_ = 1_00 # 1000 elements are to be sorted lowerCAmelCase_ , lowerCAmelCase_ = 0, 1 # mean and standard deviation lowerCAmelCase_ = np.random.normal(mu, sigma, p) np.save(outfile, X) print("""The array is""") print(X) outfile.seek(0) # using the same array lowerCAmelCase_ = np.load(outfile) lowerCAmelCase_ = len(M) - 1 lowerCAmelCase_ = _in_place_quick_sort(M, 0, r) print( """No of Comparisons for 100 elements selected from a standard normal distribution""" """is :""" ) print(z)
678
1
import torch from diffusers import KDPMaDiscreteScheduler from diffusers.utils import torch_device from .test_schedulers import SchedulerCommonTest class _lowerCAmelCase ( _lowercase ): A__ = (KDPMaDiscreteScheduler,) A__ = 10 def __magic_name__( self , **__UpperCAmelCase ): lowerCAmelCase__ : str = { '''num_train_timesteps''': 1100, '''beta_start''': 0.0001, '''beta_end''': 0.02, '''beta_schedule''': '''linear''', } config.update(**__UpperCAmelCase ) return config def __magic_name__( self ): for timesteps in [10, 50, 100, 1000]: self.check_over_configs(num_train_timesteps=__UpperCAmelCase ) def __magic_name__( self ): for beta_start, beta_end in zip([0.00001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ): self.check_over_configs(beta_start=__UpperCAmelCase , beta_end=__UpperCAmelCase ) def __magic_name__( self ): for schedule in ["linear", "scaled_linear"]: self.check_over_configs(beta_schedule=__UpperCAmelCase ) def __magic_name__( self ): for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=__UpperCAmelCase ) def __magic_name__( self ): lowerCAmelCase__ : Dict = self.scheduler_classes[0] lowerCAmelCase__ : Optional[int] = self.get_scheduler_config(prediction_type='''v_prediction''' ) lowerCAmelCase__ : Optional[int] = scheduler_class(**__UpperCAmelCase ) scheduler.set_timesteps(self.num_inference_steps ) lowerCAmelCase__ : str = self.dummy_model() lowerCAmelCase__ : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma lowerCAmelCase__ : Optional[int] = sample.to(__UpperCAmelCase ) for i, t in enumerate(scheduler.timesteps ): lowerCAmelCase__ : Dict = scheduler.scale_model_input(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : Optional[Any] = model(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : Dict = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : str = output.prev_sample lowerCAmelCase__ : Dict = torch.sum(torch.abs(__UpperCAmelCase ) ) lowerCAmelCase__ : Optional[Any] = torch.mean(torch.abs(__UpperCAmelCase ) ) if torch_device in ["cpu", "mps"]: assert abs(result_sum.item() - 4.6934e-07 ) < 1e-2 assert abs(result_mean.item() - 6.1112e-10 ) < 1e-3 else: # CUDA assert abs(result_sum.item() - 4.693428650170972e-07 ) < 1e-2 assert abs(result_mean.item() - 0.0002 ) < 1e-3 def __magic_name__( self ): if torch_device == "mps": return lowerCAmelCase__ : Any = self.scheduler_classes[0] lowerCAmelCase__ : str = self.get_scheduler_config() lowerCAmelCase__ : Tuple = scheduler_class(**__UpperCAmelCase ) scheduler.set_timesteps(self.num_inference_steps ) lowerCAmelCase__ : List[Any] = self.dummy_model() lowerCAmelCase__ : int = self.dummy_sample_deter * scheduler.init_noise_sigma lowerCAmelCase__ : List[Any] = sample.to(__UpperCAmelCase ) for i, t in enumerate(scheduler.timesteps ): lowerCAmelCase__ : Tuple = scheduler.scale_model_input(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : Any = model(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : str = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : str = output.prev_sample lowerCAmelCase__ : Dict = torch.sum(torch.abs(__UpperCAmelCase ) ) lowerCAmelCase__ : int = torch.mean(torch.abs(__UpperCAmelCase ) ) if torch_device in ["cpu", "mps"]: assert abs(result_sum.item() - 20.4125 ) < 1e-2 assert abs(result_mean.item() - 0.0266 ) < 1e-3 else: # CUDA assert abs(result_sum.item() - 20.4125 ) < 1e-2 assert abs(result_mean.item() - 0.0266 ) < 1e-3 def __magic_name__( self ): if torch_device == "mps": return lowerCAmelCase__ : Dict = self.scheduler_classes[0] lowerCAmelCase__ : List[str] = self.get_scheduler_config() lowerCAmelCase__ : int = scheduler_class(**__UpperCAmelCase ) scheduler.set_timesteps(self.num_inference_steps , device=__UpperCAmelCase ) lowerCAmelCase__ : Union[str, Any] = self.dummy_model() lowerCAmelCase__ : str = self.dummy_sample_deter.to(__UpperCAmelCase ) * scheduler.init_noise_sigma for t in scheduler.timesteps: lowerCAmelCase__ : List[str] = scheduler.scale_model_input(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : List[Any] = model(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : List[Any] = scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : List[str] = output.prev_sample lowerCAmelCase__ : str = torch.sum(torch.abs(__UpperCAmelCase ) ) lowerCAmelCase__ : List[Any] = torch.mean(torch.abs(__UpperCAmelCase ) ) if str(__UpperCAmelCase ).startswith('''cpu''' ): # The following sum varies between 148 and 156 on mps. Why? assert abs(result_sum.item() - 20.4125 ) < 1e-2 assert abs(result_mean.item() - 0.0266 ) < 1e-3 else: # CUDA assert abs(result_sum.item() - 20.4125 ) < 1e-2 assert abs(result_mean.item() - 0.0266 ) < 1e-3
678
import pyarrow.parquet as pq import pytest from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config from datasets.features.image import Image from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> Optional[int]: assert isinstance(UpperCamelCase , UpperCamelCase ) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('''keep_in_memory''' , [False, True] ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> List[Any]: lowerCAmelCase__ : List[str] = tmp_path / '''cache''' lowerCAmelCase__ : Union[str, Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): lowerCAmelCase__ : List[Any] = ParquetDatasetReader(UpperCamelCase , cache_dir=UpperCamelCase , keep_in_memory=UpperCamelCase ).read() _check_parquet_dataset(UpperCamelCase , UpperCamelCase ) @pytest.mark.parametrize( '''features''' , [ None, {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}, {'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''}, {'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''}, {'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''}, ] , ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> List[Any]: lowerCAmelCase__ : str = tmp_path / '''cache''' lowerCAmelCase__ : Union[str, Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} lowerCAmelCase__ : str = features.copy() if features else default_expected_features lowerCAmelCase__ : List[Any] = ( Features({feature: Value(UpperCamelCase ) for feature, dtype in features.items()} ) if features is not None else None ) lowerCAmelCase__ : Union[str, Any] = ParquetDatasetReader(UpperCamelCase , features=UpperCamelCase , cache_dir=UpperCamelCase ).read() _check_parquet_dataset(UpperCamelCase , UpperCamelCase ) @pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Dict: lowerCAmelCase__ : str = tmp_path / '''cache''' lowerCAmelCase__ : Tuple = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} lowerCAmelCase__ : Union[str, Any] = ParquetDatasetReader(UpperCamelCase , cache_dir=UpperCamelCase , split=UpperCamelCase ).read() _check_parquet_dataset(UpperCamelCase , UpperCamelCase ) assert dataset.split == split if split else "train" @pytest.mark.parametrize('''path_type''' , [str, list] ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> str: if issubclass(UpperCamelCase , UpperCamelCase ): lowerCAmelCase__ : Any = parquet_path elif issubclass(UpperCamelCase , UpperCamelCase ): lowerCAmelCase__ : Any = [parquet_path] lowerCAmelCase__ : int = tmp_path / '''cache''' lowerCAmelCase__ : str = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} lowerCAmelCase__ : Union[str, Any] = ParquetDatasetReader(UpperCamelCase , cache_dir=UpperCamelCase ).read() _check_parquet_dataset(UpperCamelCase , UpperCamelCase ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase=("train",) ) -> str: assert isinstance(UpperCamelCase , UpperCamelCase ) for split in splits: lowerCAmelCase__ : str = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize('''keep_in_memory''' , [False, True] ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Optional[int]: lowerCAmelCase__ : Any = tmp_path / '''cache''' lowerCAmelCase__ : Optional[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): lowerCAmelCase__ : Optional[Any] = ParquetDatasetReader( {'''train''': parquet_path} , cache_dir=UpperCamelCase , keep_in_memory=UpperCamelCase ).read() _check_parquet_datasetdict(UpperCamelCase , UpperCamelCase ) @pytest.mark.parametrize( '''features''' , [ None, {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}, {'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''}, {'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''}, {'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''}, ] , ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> int: lowerCAmelCase__ : Any = tmp_path / '''cache''' lowerCAmelCase__ : Tuple = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} lowerCAmelCase__ : Tuple = features.copy() if features else default_expected_features lowerCAmelCase__ : Optional[int] = ( Features({feature: Value(UpperCamelCase ) for feature, dtype in features.items()} ) if features is not None else None ) lowerCAmelCase__ : List[str] = ParquetDatasetReader({'''train''': parquet_path} , features=UpperCamelCase , cache_dir=UpperCamelCase ).read() _check_parquet_datasetdict(UpperCamelCase , UpperCamelCase ) @pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Dict: if split: lowerCAmelCase__ : Tuple = {split: parquet_path} else: lowerCAmelCase__ : int = '''train''' lowerCAmelCase__ : List[Any] = {'''train''': parquet_path, '''test''': parquet_path} lowerCAmelCase__ : Optional[int] = tmp_path / '''cache''' lowerCAmelCase__ : List[Any] = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''} lowerCAmelCase__ : List[str] = ParquetDatasetReader(UpperCamelCase , cache_dir=UpperCamelCase ).read() _check_parquet_datasetdict(UpperCamelCase , UpperCamelCase , splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> Tuple: lowerCAmelCase__ : Optional[Any] = ParquetDatasetWriter(UpperCamelCase , tmp_path / '''foo.parquet''' ) assert writer.write() > 0 lowerCAmelCase__ : Union[str, Any] = pq.ParquetFile(tmp_path / '''foo.parquet''' ) lowerCAmelCase__ : int = pf.read() assert dataset.data.table == output_table def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> Tuple: lowerCAmelCase__ : List[str] = str(shared_datadir / '''test_image_rgb.jpg''' ) lowerCAmelCase__ : Dict = {'''image''': [image_path]} lowerCAmelCase__ : int = Features({'''image''': Image()} ) lowerCAmelCase__ : Dict = Dataset.from_dict(UpperCamelCase , features=UpperCamelCase ) lowerCAmelCase__ : List[str] = ParquetDatasetWriter(UpperCamelCase , tmp_path / '''foo.parquet''' ) assert writer.write() > 0 lowerCAmelCase__ : Dict = Dataset.from_parquet(str(tmp_path / '''foo.parquet''' ) ) assert dataset.features == reloaded_dataset.features lowerCAmelCase__ : int = ParquetDatasetReader(str(tmp_path / '''foo.parquet''' ) , streaming=UpperCamelCase ).read() assert dataset.features == reloaded_iterable_dataset.features @pytest.mark.parametrize( '''feature, expected''' , [ (Features({'''foo''': Value('''int32''' )} ), None), (Features({'''image''': Image(), '''foo''': Value('''int32''' )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS), (Features({'''nested''': Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS), ] , ) def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> Any: assert get_writer_batch_size(UpperCamelCase ) == expected
678
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available lowerCAmelCase_ = { """configuration_poolformer""": [ """POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """PoolFormerConfig""", """PoolFormerOnnxConfig""", ] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = ["""PoolFormerFeatureExtractor"""] lowerCAmelCase_ = ["""PoolFormerImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ """POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """PoolFormerForImageClassification""", """PoolFormerModel""", """PoolFormerPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_poolformer import ( POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, PoolFormerConfig, PoolFormerOnnxConfig, ) try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_poolformer import PoolFormerFeatureExtractor from .image_processing_poolformer import PoolFormerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_poolformer import ( POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, PoolFormerForImageClassification, PoolFormerModel, PoolFormerPreTrainedModel, ) else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
678
from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = { """microsoft/focalnet-tiny""": """https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json""", } class _lowerCAmelCase ( _lowercase , _lowercase ): A__ = 'focalnet' def __init__( self , __UpperCAmelCase=224 , __UpperCAmelCase=4 , __UpperCAmelCase=3 , __UpperCAmelCase=96 , __UpperCAmelCase=False , __UpperCAmelCase=[192, 384, 768, 768] , __UpperCAmelCase=[2, 2, 6, 2] , __UpperCAmelCase=[2, 2, 2, 2] , __UpperCAmelCase=[3, 3, 3, 3] , __UpperCAmelCase="gelu" , __UpperCAmelCase=4.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.1 , __UpperCAmelCase=False , __UpperCAmelCase=1e-4 , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=0.02 , __UpperCAmelCase=1e-5 , __UpperCAmelCase=32 , __UpperCAmelCase=None , __UpperCAmelCase=None , **__UpperCAmelCase , ): super().__init__(**__UpperCAmelCase ) lowerCAmelCase__ : Any = image_size lowerCAmelCase__ : Any = patch_size lowerCAmelCase__ : Tuple = num_channels lowerCAmelCase__ : Dict = embed_dim lowerCAmelCase__ : Optional[int] = use_conv_embed lowerCAmelCase__ : Optional[int] = hidden_sizes lowerCAmelCase__ : Optional[Any] = depths lowerCAmelCase__ : Dict = focal_levels lowerCAmelCase__ : int = focal_windows lowerCAmelCase__ : Optional[Any] = hidden_act lowerCAmelCase__ : Optional[int] = mlp_ratio lowerCAmelCase__ : Optional[Any] = hidden_dropout_prob lowerCAmelCase__ : List[Any] = drop_path_rate lowerCAmelCase__ : Tuple = use_layerscale lowerCAmelCase__ : List[Any] = layerscale_value lowerCAmelCase__ : Dict = use_post_layernorm lowerCAmelCase__ : Dict = use_post_layernorm_in_modulation lowerCAmelCase__ : Dict = normalize_modulator lowerCAmelCase__ : Union[str, Any] = initializer_range lowerCAmelCase__ : List[str] = layer_norm_eps lowerCAmelCase__ : Tuple = encoder_stride lowerCAmelCase__ : Dict = ['''stem'''] + [f"""stage{idx}""" for idx in range(1 , len(self.depths ) + 1 )] lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = get_aligned_output_features_output_indices( out_features=__UpperCAmelCase , out_indices=__UpperCAmelCase , stage_names=self.stage_names )
678
1
from __future__ import annotations def __lowerCAmelCase ( UpperCamelCase ) -> int: # preprocessing the first row for i in range(1 , len(matrix[0] ) ): matrix[0][i] += matrix[0][i - 1] # preprocessing the first column for i in range(1 , len(UpperCamelCase ) ): matrix[i][0] += matrix[i - 1][0] # updating the path cost for current position for i in range(1 , len(UpperCamelCase ) ): for j in range(1 , len(matrix[0] ) ): matrix[i][j] += min(matrix[i - 1][j] , matrix[i][j - 1] ) return matrix[-1][-1] if __name__ == "__main__": import doctest doctest.testmod()
678
from scipy.stats import pearsonr import datasets lowerCAmelCase_ = """ Pearson correlation coefficient and p-value for testing non-correlation. The Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases. The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. """ lowerCAmelCase_ = """ Args: predictions (`list` of `int`): Predicted class labels, as returned by a model. references (`list` of `int`): Ground truth labels. return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`. Returns: pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation. p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities. Examples: Example 1-A simple example using only predictions and references. >>> pearsonr_metric = datasets.load_metric(\"pearsonr\") >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5]) >>> print(round(results['pearsonr'], 2)) -0.74 Example 2-The same as Example 1, but that also returns the `p-value`. >>> pearsonr_metric = datasets.load_metric(\"pearsonr\") >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True) >>> print(sorted(list(results.keys()))) ['p-value', 'pearsonr'] >>> print(round(results['pearsonr'], 2)) -0.74 >>> print(round(results['p-value'], 2)) 0.15 """ lowerCAmelCase_ = """ @article{2020SciPy-NMeth, author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and Haberland, Matt and Reddy, Tyler and Cournapeau, David and Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and Bright, Jonathan and {van der Walt}, St{\'e}fan J. and Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and Kern, Robert and Larson, Eric and Carey, C J and Polat, Ilhan and Feng, Yu and Moore, Eric W. and {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and Harris, Charles R. and Archibald, Anne M. and Ribeiro, Antonio H. and Pedregosa, Fabian and {van Mulbregt}, Paul and {SciPy 1.0 Contributors}}, title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific Computing in Python}}, journal = {Nature Methods}, year = {2020}, volume = {17}, pages = {261--272}, adsurl = {https://rdcu.be/b08Wh}, doi = {10.1038/s41592-019-0686-2}, } """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class _lowerCAmelCase ( datasets.Metric ): def __magic_name__( self ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''float''' ), '''references''': datasets.Value('''float''' ), } ) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'''] , ) def __magic_name__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False ): if return_pvalue: lowerCAmelCase__ : Union[str, Any] = pearsonr(__UpperCAmelCase , __UpperCAmelCase ) return {"pearsonr": results[0], "p-value": results[1]} else: return {"pearsonr": float(pearsonr(__UpperCAmelCase , __UpperCAmelCase )[0] )}
678
1
from maths.is_square_free import is_square_free from maths.prime_factors import prime_factors def __lowerCAmelCase ( UpperCamelCase ) -> int: lowerCAmelCase__ : int = prime_factors(UpperCamelCase ) if is_square_free(UpperCamelCase ): return -1 if len(UpperCamelCase ) % 2 else 1 return 0 if __name__ == "__main__": import doctest doctest.testmod()
678
from manim import * class _lowerCAmelCase ( _lowercase ): def __magic_name__( self ): lowerCAmelCase__ : Tuple = Rectangle(height=0.5 , width=0.5 ) lowerCAmelCase__ : Dict = Rectangle(height=0.25 , width=0.25 ) lowerCAmelCase__ : Dict = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 ) lowerCAmelCase__ : Optional[Any] = [mem.copy() for i in range(6 )] lowerCAmelCase__ : int = [mem.copy() for i in range(6 )] lowerCAmelCase__ : Optional[Any] = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 ) lowerCAmelCase__ : str = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 ) lowerCAmelCase__ : List[str] = VGroup(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 ) lowerCAmelCase__ : int = Text('''CPU''' , font_size=24 ) lowerCAmelCase__ : int = Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0.5 , aligned_edge=__UpperCAmelCase ) cpu.move_to([-2.5, -0.5, 0] ) self.add(__UpperCAmelCase ) lowerCAmelCase__ : Optional[int] = [mem.copy() for i in range(4 )] lowerCAmelCase__ : Tuple = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 ) lowerCAmelCase__ : Tuple = Text('''GPU''' , font_size=24 ) lowerCAmelCase__ : int = Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0.5 , aligned_edge=__UpperCAmelCase ) gpu.move_to([-1, -1, 0] ) self.add(__UpperCAmelCase ) lowerCAmelCase__ : int = [mem.copy() for i in range(6 )] lowerCAmelCase__ : List[Any] = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 ) lowerCAmelCase__ : Tuple = Text('''Model''' , font_size=24 ) lowerCAmelCase__ : List[Any] = Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0.5 , aligned_edge=__UpperCAmelCase ) model.move_to([3, -1.0, 0] ) self.add(__UpperCAmelCase ) lowerCAmelCase__ : Dict = [] lowerCAmelCase__ : List[str] = [] lowerCAmelCase__ : Optional[Any] = [] for i, rect in enumerate(__UpperCAmelCase ): rect.set_stroke(__UpperCAmelCase ) lowerCAmelCase__ : Any = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(__UpperCAmelCase , opacity=0.7 ) if i == 0: cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=__UpperCAmelCase ) cpu_target.set_x(cpu_target.get_x() + 0.1 ) elif i == 3: cpu_target.next_to(model_cpu_arr[0] , direction=__UpperCAmelCase , buff=0.0 ) else: cpu_target.next_to(model_cpu_arr[i - 1] , direction=__UpperCAmelCase , buff=0.0 ) self.add(__UpperCAmelCase ) model_cpu_arr.append(__UpperCAmelCase ) self.add(*__UpperCAmelCase , *__UpperCAmelCase , *__UpperCAmelCase ) lowerCAmelCase__ : Any = [mem.copy() for i in range(6 )] lowerCAmelCase__ : Optional[Any] = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 ) lowerCAmelCase__ : Any = Text('''Loaded Checkpoint''' , font_size=24 ) lowerCAmelCase__ : Optional[Any] = Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0.5 , aligned_edge=__UpperCAmelCase ) checkpoint.move_to([3, 0.5, 0] ) self.add(__UpperCAmelCase ) lowerCAmelCase__ : Dict = [] lowerCAmelCase__ : str = [] for i, rect in enumerate(__UpperCAmelCase ): lowerCAmelCase__ : Union[str, Any] = fill.copy().set_fill(__UpperCAmelCase , opacity=0.7 ) target.move_to(__UpperCAmelCase ) ckpt_arr.append(__UpperCAmelCase ) lowerCAmelCase__ : List[str] = target.copy() if i < 5: cpu_target.move_to(cpu_left_col_base[i + 1] ) else: cpu_target.move_to(cpu_right_col_base[i - 5] ) ckpt_cpu_arr.append(__UpperCAmelCase ) self.add(*__UpperCAmelCase , *__UpperCAmelCase ) lowerCAmelCase__ : Optional[Any] = Square(side_length=2.2 ) key.move_to([-5, 2, 0] ) lowerCAmelCase__ : List[Any] = MarkupText( f"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , ) key_text.move_to([-5, 2.4, 0] ) self.add(__UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ : List[str] = MarkupText( f"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=18 , ) blue_text.next_to(__UpperCAmelCase , DOWN * 2.4 , aligned_edge=key_text.get_left() ) self.add(__UpperCAmelCase ) lowerCAmelCase__ : str = MarkupText( f"""Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.""" , font_size=24 , ) step_a.move_to([2, 2, 0] ) lowerCAmelCase__ : Optional[Any] = [meta_mem.copy() for i in range(6 )] lowerCAmelCase__ : Dict = [meta_mem.copy() for i in range(6 )] lowerCAmelCase__ : Union[str, Any] = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 ) lowerCAmelCase__ : Dict = VGroup(*__UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 ) lowerCAmelCase__ : str = VGroup(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0 ) lowerCAmelCase__ : List[str] = Text('''Disk''' , font_size=24 ) lowerCAmelCase__ : Any = Group(__UpperCAmelCase , __UpperCAmelCase ).arrange(__UpperCAmelCase , buff=0.5 , aligned_edge=__UpperCAmelCase ) disk.move_to([-4.0, -1.25, 0] ) self.play(Write(__UpperCAmelCase , run_time=3 ) , Write(__UpperCAmelCase , run_time=1 ) , Create(__UpperCAmelCase , run_time=1 ) ) lowerCAmelCase__ : str = [] for i, rect in enumerate(__UpperCAmelCase ): lowerCAmelCase__ : Dict = rect.copy() target.generate_target() target.target.move_to(disk_left_col_base[i] ).scale(0.5 ) animations.append(MoveToTarget(__UpperCAmelCase , run_time=1.5 ) ) self.play(*__UpperCAmelCase ) self.play(FadeOut(__UpperCAmelCase ) ) lowerCAmelCase__ : int = MarkupText(f"""Then, the checkpoint is removed from memory\nthrough garbage collection.""" , font_size=24 ) step_a.move_to([2, 2, 0] ) self.play(Write(__UpperCAmelCase , run_time=3 ) ) self.play( FadeOut(__UpperCAmelCase , __UpperCAmelCase , *__UpperCAmelCase , *__UpperCAmelCase ) , ) self.wait()
678
1
import argparse import json import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType from accelerate.utils.deepspeed import DummyOptim, DummyScheduler lowerCAmelCase_ = 16 lowerCAmelCase_ = 32 def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase = 16 , UpperCamelCase = "bert-base-cased" ) -> Union[str, Any]: lowerCAmelCase__ : Any = AutoTokenizer.from_pretrained(UpperCamelCase ) lowerCAmelCase__ : List[Any] = load_dataset('''glue''' , '''mrpc''' ) def tokenize_function(UpperCamelCase ): # max_length=None => use the model max length (it's actually the default) lowerCAmelCase__ : Tuple = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=UpperCamelCase , max_length=UpperCamelCase ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset lowerCAmelCase__ : List[Any] = datasets.map( UpperCamelCase , batched=UpperCamelCase , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , load_from_cache_file=UpperCamelCase ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library lowerCAmelCase__ : Any = tokenized_datasets.rename_column('''label''' , '''labels''' ) def collate_fn(UpperCamelCase ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(UpperCamelCase , padding='''max_length''' , max_length=128 , return_tensors='''pt''' ) return tokenizer.pad(UpperCamelCase , padding='''longest''' , return_tensors='''pt''' ) # Instantiate dataloaders. lowerCAmelCase__ : int = DataLoader( tokenized_datasets['''train'''] , shuffle=UpperCamelCase , collate_fn=UpperCamelCase , batch_size=UpperCamelCase ) lowerCAmelCase__ : str = DataLoader( tokenized_datasets['''validation'''] , shuffle=UpperCamelCase , collate_fn=UpperCamelCase , batch_size=UpperCamelCase ) return train_dataloader, eval_dataloader def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> Tuple: # Initialize accelerator lowerCAmelCase__ : Tuple = Accelerator() # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs lowerCAmelCase__ : Optional[Any] = config['''lr'''] lowerCAmelCase__ : Dict = int(config['''num_epochs'''] ) lowerCAmelCase__ : str = int(config['''seed'''] ) lowerCAmelCase__ : Optional[int] = int(config['''batch_size'''] ) lowerCAmelCase__ : int = args.model_name_or_path set_seed(UpperCamelCase ) lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = get_dataloaders(UpperCamelCase , UpperCamelCase , UpperCamelCase ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) lowerCAmelCase__ : List[Any] = AutoModelForSequenceClassification.from_pretrained(UpperCamelCase , return_dict=UpperCamelCase ) # Instantiate optimizer lowerCAmelCase__ : Any = ( AdamW if accelerator.state.deepspeed_plugin is None or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config else DummyOptim ) lowerCAmelCase__ : Union[str, Any] = optimizer_cls(params=model.parameters() , lr=UpperCamelCase ) if accelerator.state.deepspeed_plugin is not None: lowerCAmelCase__ : List[Any] = accelerator.state.deepspeed_plugin.deepspeed_config[ '''gradient_accumulation_steps''' ] else: lowerCAmelCase__ : int = 1 lowerCAmelCase__ : Any = (len(UpperCamelCase ) * num_epochs) // gradient_accumulation_steps # Instantiate scheduler if ( accelerator.state.deepspeed_plugin is None or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config ): lowerCAmelCase__ : Optional[Any] = get_linear_schedule_with_warmup( optimizer=UpperCamelCase , num_warmup_steps=0 , num_training_steps=UpperCamelCase , ) else: lowerCAmelCase__ : Any = DummyScheduler(UpperCamelCase , total_num_steps=UpperCamelCase , warmup_num_steps=0 ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Any = accelerator.prepare( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) # We need to keep track of how many total steps we have iterated over lowerCAmelCase__ : Union[str, Any] = 0 # We also need to keep track of the stating epoch so files are named properly lowerCAmelCase__ : Optional[int] = 0 # Now we train the model lowerCAmelCase__ : int = evaluate.load('''glue''' , '''mrpc''' ) lowerCAmelCase__ : Optional[Any] = 0 lowerCAmelCase__ : int = {} for epoch in range(UpperCamelCase , UpperCamelCase ): model.train() for step, batch in enumerate(UpperCamelCase ): lowerCAmelCase__ : Any = model(**UpperCamelCase ) lowerCAmelCase__ : Dict = outputs.loss lowerCAmelCase__ : Optional[int] = loss / gradient_accumulation_steps accelerator.backward(UpperCamelCase ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() overall_step += 1 model.eval() lowerCAmelCase__ : Union[str, Any] = 0 for step, batch in enumerate(UpperCamelCase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): lowerCAmelCase__ : int = model(**UpperCamelCase ) lowerCAmelCase__ : Dict = outputs.logits.argmax(dim=-1 ) # It is slightly faster to call this once, than multiple times lowerCAmelCase__ , lowerCAmelCase__ : List[str] = accelerator.gather( (predictions, batch['''labels''']) ) # If we are in a multiprocess environment, the last batch has duplicates if accelerator.use_distributed: if step == len(UpperCamelCase ) - 1: lowerCAmelCase__ : List[str] = predictions[: len(eval_dataloader.dataset ) - samples_seen] lowerCAmelCase__ : str = references[: len(eval_dataloader.dataset ) - samples_seen] else: samples_seen += references.shape[0] metric.add_batch( predictions=UpperCamelCase , references=UpperCamelCase , ) lowerCAmelCase__ : Optional[int] = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F"""epoch {epoch}:""" , UpperCamelCase ) lowerCAmelCase__ : int = eval_metric['''accuracy'''] if best_performance < eval_metric["accuracy"]: lowerCAmelCase__ : int = eval_metric['''accuracy'''] if args.performance_lower_bound is not None: assert ( args.performance_lower_bound <= best_performance ), F"""Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}""" accelerator.wait_for_everyone() if accelerator.is_main_process: with open(os.path.join(args.output_dir , '''all_results.json''' ) , '''w''' ) as f: json.dump(UpperCamelCase , UpperCamelCase ) def __lowerCAmelCase ( ) -> str: lowerCAmelCase__ : Optional[int] = argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' ) parser.add_argument( '''--model_name_or_path''' , type=UpperCamelCase , default='''bert-base-cased''' , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=UpperCamelCase , ) parser.add_argument( '''--output_dir''' , type=UpperCamelCase , default='''.''' , help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' , ) parser.add_argument( '''--performance_lower_bound''' , type=UpperCamelCase , default=UpperCamelCase , help='''Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.''' , ) parser.add_argument( '''--num_epochs''' , type=UpperCamelCase , default=3 , help='''Number of train epochs.''' , ) lowerCAmelCase__ : Any = parser.parse_args() lowerCAmelCase__ : str = {'''lr''': 2E-5, '''num_epochs''': args.num_epochs, '''seed''': 42, '''batch_size''': 16} training_function(UpperCamelCase , UpperCamelCase ) if __name__ == "__main__": main()
678
import collections import os import re from pathlib import Path lowerCAmelCase_ = """src/transformers""" # Matches is_xxx_available() lowerCAmelCase_ = re.compile(R"""is\_([a-z_]*)_available()""") # Catches a one-line _import_struct = {xxx} lowerCAmelCase_ = re.compile(R"""^_import_structure\s+=\s+\{([^\}]+)\}""") # Catches a line with a key-values pattern: "bla": ["foo", "bar"] lowerCAmelCase_ = re.compile(R"""\s+\"\S*\":\s+\[([^\]]*)\]""") # Catches a line if not is_foo_available lowerCAmelCase_ = re.compile(R"""^\s*if\s+not\s+is\_[a-z_]*\_available\(\)""") # Catches a line _import_struct["bla"].append("foo") lowerCAmelCase_ = re.compile(R"""^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)""") # Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"] lowerCAmelCase_ = re.compile(R"""^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]""") # Catches a line with an object between quotes and a comma: "MyModel", lowerCAmelCase_ = re.compile(R"""^\s+\"([^\"]+)\",""") # Catches a line with objects between brackets only: ["foo", "bar"], lowerCAmelCase_ = re.compile(R"""^\s+\[([^\]]+)\]""") # Catches a line with from foo import bar, bla, boo lowerCAmelCase_ = re.compile(R"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""") # Catches a line with try: lowerCAmelCase_ = re.compile(R"""^\s*try:""") # Catches a line with else: lowerCAmelCase_ = re.compile(R"""^\s*else:""") def __lowerCAmelCase ( UpperCamelCase ) -> int: if _re_test_backend.search(UpperCamelCase ) is None: return None lowerCAmelCase__ : int = [b[0] for b in _re_backend.findall(UpperCamelCase )] backends.sort() return "_and_".join(UpperCamelCase ) def __lowerCAmelCase ( UpperCamelCase ) -> Any: with open(UpperCamelCase , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: lowerCAmelCase__ : Union[str, Any] = f.readlines() lowerCAmelCase__ : Tuple = 0 while line_index < len(UpperCamelCase ) and not lines[line_index].startswith('''_import_structure = {''' ): line_index += 1 # If this is a traditional init, just return. if line_index >= len(UpperCamelCase ): return None # First grab the objects without a specific backend in _import_structure lowerCAmelCase__ : List[str] = [] while not lines[line_index].startswith('''if TYPE_CHECKING''' ) and find_backend(lines[line_index] ) is None: lowerCAmelCase__ : str = lines[line_index] # If we have everything on a single line, let's deal with it. if _re_one_line_import_struct.search(UpperCamelCase ): lowerCAmelCase__ : str = _re_one_line_import_struct.search(UpperCamelCase ).groups()[0] lowerCAmelCase__ : Optional[Any] = re.findall(R'''\[([^\]]+)\]''' , UpperCamelCase ) for imp in imports: objects.extend([obj[1:-1] for obj in imp.split(''', ''' )] ) line_index += 1 continue lowerCAmelCase__ : Tuple = _re_import_struct_key_value.search(UpperCamelCase ) if single_line_import_search is not None: lowerCAmelCase__ : Optional[Any] = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(''', ''' ) if len(UpperCamelCase ) > 0] objects.extend(UpperCamelCase ) elif line.startswith(''' ''' * 8 + '''"''' ): objects.append(line[9:-3] ) line_index += 1 lowerCAmelCase__ : Any = {'''none''': objects} # Let's continue with backend-specific objects in _import_structure while not lines[line_index].startswith('''if TYPE_CHECKING''' ): # If the line is an if not is_backend_available, we grab all objects associated. lowerCAmelCase__ : List[str] = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: lowerCAmelCase__ : Union[str, Any] = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 lowerCAmelCase__ : Optional[int] = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 4 ): lowerCAmelCase__ : str = lines[line_index] if _re_import_struct_add_one.search(UpperCamelCase ) is not None: objects.append(_re_import_struct_add_one.search(UpperCamelCase ).groups()[0] ) elif _re_import_struct_add_many.search(UpperCamelCase ) is not None: lowerCAmelCase__ : Optional[int] = _re_import_struct_add_many.search(UpperCamelCase ).groups()[0].split(''', ''' ) lowerCAmelCase__ : List[Any] = [obj[1:-1] for obj in imports if len(UpperCamelCase ) > 0] objects.extend(UpperCamelCase ) elif _re_between_brackets.search(UpperCamelCase ) is not None: lowerCAmelCase__ : List[Any] = _re_between_brackets.search(UpperCamelCase ).groups()[0].split(''', ''' ) lowerCAmelCase__ : Optional[int] = [obj[1:-1] for obj in imports if len(UpperCamelCase ) > 0] objects.extend(UpperCamelCase ) elif _re_quote_object.search(UpperCamelCase ) is not None: objects.append(_re_quote_object.search(UpperCamelCase ).groups()[0] ) elif line.startswith(''' ''' * 8 + '''"''' ): objects.append(line[9:-3] ) elif line.startswith(''' ''' * 12 + '''"''' ): objects.append(line[13:-3] ) line_index += 1 lowerCAmelCase__ : Optional[Any] = objects else: line_index += 1 # At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend lowerCAmelCase__ : Any = [] while ( line_index < len(UpperCamelCase ) and find_backend(lines[line_index] ) is None and not lines[line_index].startswith('''else''' ) ): lowerCAmelCase__ : Tuple = lines[line_index] lowerCAmelCase__ : List[Any] = _re_import.search(UpperCamelCase ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(''', ''' ) ) elif line.startswith(''' ''' * 8 ): objects.append(line[8:-2] ) line_index += 1 lowerCAmelCase__ : Dict = {'''none''': objects} # Let's continue with backend-specific objects while line_index < len(UpperCamelCase ): # If the line is an if is_backend_available, we grab all objects associated. lowerCAmelCase__ : str = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: lowerCAmelCase__ : Tuple = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 lowerCAmelCase__ : str = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 8 ): lowerCAmelCase__ : Any = lines[line_index] lowerCAmelCase__ : Union[str, Any] = _re_import.search(UpperCamelCase ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(''', ''' ) ) elif line.startswith(''' ''' * 12 ): objects.append(line[12:-2] ) line_index += 1 lowerCAmelCase__ : str = objects else: line_index += 1 return import_dict_objects, type_hint_objects def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> List[Any]: def find_duplicates(UpperCamelCase ): return [k for k, v in collections.Counter(UpperCamelCase ).items() if v > 1] if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ): return ["Both sides of the init do not have the same backends!"] lowerCAmelCase__ : Optional[Any] = [] for key in import_dict_objects.keys(): lowerCAmelCase__ : str = find_duplicates(import_dict_objects[key] ) if duplicate_imports: errors.append(F"""Duplicate _import_structure definitions for: {duplicate_imports}""" ) lowerCAmelCase__ : int = find_duplicates(type_hint_objects[key] ) if duplicate_type_hints: errors.append(F"""Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}""" ) if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ): lowerCAmelCase__ : List[Any] = '''base imports''' if key == '''none''' else F"""{key} backend""" errors.append(F"""Differences for {name}:""" ) for a in type_hint_objects[key]: if a not in import_dict_objects[key]: errors.append(F""" {a} in TYPE_HINT but not in _import_structure.""" ) for a in import_dict_objects[key]: if a not in type_hint_objects[key]: errors.append(F""" {a} in _import_structure but not in TYPE_HINT.""" ) return errors def __lowerCAmelCase ( ) -> Optional[Any]: lowerCAmelCase__ : Dict = [] for root, _, files in os.walk(UpperCamelCase ): if "__init__.py" in files: lowerCAmelCase__ : Any = os.path.join(UpperCamelCase , '''__init__.py''' ) lowerCAmelCase__ : List[Any] = parse_init(UpperCamelCase ) if objects is not None: lowerCAmelCase__ : Optional[int] = analyze_results(*UpperCamelCase ) if len(UpperCamelCase ) > 0: lowerCAmelCase__ : Tuple = F"""Problem in {fname}, both halves do not define the same objects.\n{errors[0]}""" failures.append('''\n'''.join(UpperCamelCase ) ) if len(UpperCamelCase ) > 0: raise ValueError('''\n\n'''.join(UpperCamelCase ) ) def __lowerCAmelCase ( ) -> Tuple: lowerCAmelCase__ : str = [] for path, directories, files in os.walk(UpperCamelCase ): for folder in directories: # Ignore private modules if folder.startswith('''_''' ): directories.remove(UpperCamelCase ) continue # Ignore leftovers from branches (empty folders apart from pycache) if len(list((Path(UpperCamelCase ) / folder).glob('''*.py''' ) ) ) == 0: continue lowerCAmelCase__ : Tuple = str((Path(UpperCamelCase ) / folder).relative_to(UpperCamelCase ) ) lowerCAmelCase__ : Dict = short_path.replace(os.path.sep , '''.''' ) submodules.append(UpperCamelCase ) for fname in files: if fname == "__init__.py": continue lowerCAmelCase__ : Union[str, Any] = str((Path(UpperCamelCase ) / fname).relative_to(UpperCamelCase ) ) lowerCAmelCase__ : Tuple = short_path.replace('''.py''' , '''''' ).replace(os.path.sep , '''.''' ) if len(submodule.split('''.''' ) ) == 1: submodules.append(UpperCamelCase ) return submodules lowerCAmelCase_ = [ """convert_pytorch_checkpoint_to_tf2""", """modeling_flax_pytorch_utils""", """models.esm.openfold_utils""", ] def __lowerCAmelCase ( ) -> List[str]: # This is to make sure the transformers module imported is the one in the repo. from transformers.utils import direct_transformers_import lowerCAmelCase__ : Dict = direct_transformers_import(UpperCamelCase ) lowerCAmelCase__ : int = set(transformers._import_structure.keys() ) # This contains all the base keys of the _import_structure object defined in the init, but if the user is missing # some optional dependencies, they may not have all of them. Thus we read the init to read all additions and # (potentiall re-) add them. with open(os.path.join(UpperCamelCase , '''__init__.py''' ) , '''r''' ) as f: lowerCAmelCase__ : str = f.read() import_structure_keys.update(set(re.findall(R'''import_structure\[\"([^\"]*)\"\]''' , UpperCamelCase ) ) ) lowerCAmelCase__ : Optional[int] = [ module for module in get_transformers_submodules() if module not in IGNORE_SUBMODULES and module not in import_structure_keys ] if len(UpperCamelCase ) > 0: lowerCAmelCase__ : List[Any] = '''\n'''.join(F"""- {module}""" for module in module_not_registered ) raise ValueError( '''The following submodules are not properly registed in the main init of Transformers:\n''' F"""{list_of_modules}\n""" '''Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.''' ) if __name__ == "__main__": check_all_inits() check_submodules()
678
1