code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
import numpy as np
from transformers import BatchFeature
from transformers.testing_utils import require_tf, require_torch
from .test_feature_extraction_common import FeatureExtractionSavingTestMixin
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
# to overwrite at feature extractactor specific tests
a_ : int =None
a_ : Optional[Any] =None
@property
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
return self.feat_extract_tester.prepare_feat_extract_dict()
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
_snake_case : List[str] = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(UpperCamelCase , 'feature_size' ) )
self.assertTrue(hasattr(UpperCamelCase , 'sampling_rate' ) )
self.assertTrue(hasattr(UpperCamelCase , 'padding_value' ) )
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
_snake_case : List[Any] = self.feat_extract_tester.prepare_inputs_for_common()
_snake_case : str = self.feature_extraction_class(**self.feat_extract_dict )
_snake_case : Tuple = feat_extract.model_input_names[0]
_snake_case : Optional[Any] = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(UpperCamelCase ) == len(UpperCamelCase ) for x, y in zip(UpperCamelCase , processed_features[input_name] ) ) )
_snake_case : Any = self.feat_extract_tester.prepare_inputs_for_common(equal_length=UpperCamelCase )
_snake_case : Tuple = BatchFeature({input_name: speech_inputs} , tensor_type='np' )
_snake_case : Optional[int] = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
_snake_case : Tuple = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_torch
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
_snake_case : Optional[int] = self.feat_extract_tester.prepare_inputs_for_common(equal_length=UpperCamelCase )
_snake_case : Tuple = self.feature_extraction_class(**self.feat_extract_dict )
_snake_case : Optional[Any] = feat_extract.model_input_names[0]
_snake_case : List[Any] = BatchFeature({input_name: speech_inputs} , tensor_type='pt' )
_snake_case : Any = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
_snake_case : List[Any] = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_tf
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
_snake_case : int = self.feat_extract_tester.prepare_inputs_for_common(equal_length=UpperCamelCase )
_snake_case : Optional[int] = self.feature_extraction_class(**self.feat_extract_dict )
_snake_case : Dict = feat_extract.model_input_names[0]
_snake_case : Union[str, Any] = BatchFeature({input_name: speech_inputs} , tensor_type='tf' )
_snake_case : List[Any] = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
_snake_case : Dict = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
def UpperCamelCase_ ( self : List[Any] , UpperCamelCase : List[Any]=False ):
'''simple docstring'''
def _inputs_have_equal_length(UpperCamelCase : Optional[int] ):
_snake_case : Optional[Any] = len(input[0] )
for input_slice in input[1:]:
if len(UpperCamelCase ) != length:
return False
return True
def _inputs_are_equal(UpperCamelCase : Union[str, Any] , UpperCamelCase : List[str] ):
if len(UpperCamelCase ) != len(UpperCamelCase ):
return False
for input_slice_a, input_slice_a in zip(UpperCamelCase , UpperCamelCase ):
if not np.allclose(np.asarray(UpperCamelCase ) , np.asarray(UpperCamelCase ) , atol=1e-3 ):
return False
return True
_snake_case : str = self.feature_extraction_class(**self.feat_extract_dict )
_snake_case : List[str] = self.feat_extract_tester.prepare_inputs_for_common(numpify=UpperCamelCase )
_snake_case : Optional[Any] = feat_extract.model_input_names[0]
_snake_case : Tuple = BatchFeature({input_name: speech_inputs} )
_snake_case : str = self.feat_extract_tester.seq_length_diff
_snake_case : Union[str, Any] = self.feat_extract_tester.max_seq_length + pad_diff
_snake_case : Optional[int] = self.feat_extract_tester.min_seq_length
_snake_case : Union[str, Any] = self.feat_extract_tester.batch_size
_snake_case : Union[str, Any] = self.feat_extract_tester.feature_size
# test padding for List[int] + numpy
_snake_case : Any = feat_extract.pad(UpperCamelCase , padding=UpperCamelCase )
_snake_case : Any = input_a[input_name]
_snake_case : Any = feat_extract.pad(UpperCamelCase , padding='longest' )
_snake_case : Optional[Any] = input_a[input_name]
_snake_case : Tuple = feat_extract.pad(UpperCamelCase , padding='max_length' , max_length=len(speech_inputs[-1] ) )
_snake_case : Union[str, Any] = input_a[input_name]
_snake_case : Tuple = feat_extract.pad(UpperCamelCase , padding='longest' , return_tensors='np' )
_snake_case : List[str] = input_a[input_name]
# max_length parameter has to be provided when setting `padding="max_length"`
with self.assertRaises(UpperCamelCase ):
feat_extract.pad(UpperCamelCase , padding='max_length' )[input_name]
_snake_case : Union[str, Any] = feat_extract.pad(
UpperCamelCase , padding='max_length' , max_length=UpperCamelCase , return_tensors='np' )
_snake_case : int = input_a[input_name]
self.assertFalse(_inputs_have_equal_length(UpperCamelCase ) )
self.assertTrue(_inputs_have_equal_length(UpperCamelCase ) )
self.assertTrue(_inputs_have_equal_length(UpperCamelCase ) )
self.assertTrue(_inputs_are_equal(UpperCamelCase , UpperCamelCase ) )
self.assertTrue(len(input_a[0] ) == pad_min_length )
self.assertTrue(len(input_a[1] ) == pad_min_length + pad_diff )
self.assertTrue(input_a.shape[:2] == (batch_size, len(input_a[0] )) )
self.assertTrue(input_a.shape[:2] == (batch_size, pad_max_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == input_a.shape[2] == feature_size )
# test padding for `pad_to_multiple_of` for List[int] + numpy
_snake_case : Optional[int] = feat_extract.pad(UpperCamelCase , pad_to_multiple_of=10 )
_snake_case : Optional[Any] = input_a[input_name]
_snake_case : str = feat_extract.pad(UpperCamelCase , padding='longest' , pad_to_multiple_of=10 )
_snake_case : List[Any] = input_a[input_name]
_snake_case : List[str] = feat_extract.pad(
UpperCamelCase , padding='max_length' , pad_to_multiple_of=10 , max_length=UpperCamelCase )
_snake_case : Dict = input_a[input_name]
_snake_case : int = feat_extract.pad(
UpperCamelCase , padding='max_length' , pad_to_multiple_of=10 , max_length=UpperCamelCase , return_tensors='np' , )
_snake_case : Optional[int] = input_a[input_name]
self.assertTrue(all(len(UpperCamelCase ) % 10 == 0 for x in input_a ) )
self.assertTrue(_inputs_are_equal(UpperCamelCase , UpperCamelCase ) )
_snake_case : Union[str, Any] = pad_max_length if pad_max_length % 10 == 0 else (pad_max_length // 10 + 1) * 10
self.assertTrue(all(len(UpperCamelCase ) == expected_mult_pad_length for x in input_a ) )
self.assertEqual(input_a.shape[:2] , (batch_size, expected_mult_pad_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == feature_size )
# Check padding value is correct
_snake_case : Optional[Any] = (np.ones(self.feat_extract_tester.feature_size ) * feat_extract.padding_value).sum()
self.assertTrue(
abs(np.asarray(input_a[0] )[pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) )
< 1e-3 )
self.assertTrue(
abs(
np.asarray(input_a[1] )[pad_min_length + pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - pad_diff) )
< 1e-3 )
self.assertTrue(
abs(
np.asarray(input_a[2] )[pad_min_length + 2 * pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - 2 * pad_diff) )
< 1e-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) ) < 1e-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (expected_mult_pad_length - pad_min_length) )
< 1e-3 )
def UpperCamelCase_ ( self : Dict , UpperCamelCase : Optional[int]=False ):
'''simple docstring'''
def _inputs_have_equal_length(UpperCamelCase : int ):
_snake_case : Dict = len(input[0] )
for input_slice in input[1:]:
if len(UpperCamelCase ) != length:
return False
return True
def _inputs_are_equal(UpperCamelCase : List[Any] , UpperCamelCase : Union[str, Any] ):
if len(UpperCamelCase ) != len(UpperCamelCase ):
return False
for input_slice_a, input_slice_a in zip(UpperCamelCase , UpperCamelCase ):
if not np.allclose(np.asarray(UpperCamelCase ) , np.asarray(UpperCamelCase ) , atol=1e-3 ):
return False
return True
_snake_case : Dict = self.feature_extraction_class(**self.feat_extract_dict )
_snake_case : int = self.feat_extract_tester.prepare_inputs_for_common(numpify=UpperCamelCase )
_snake_case : Optional[Any] = feat_extract.model_input_names[0]
_snake_case : str = BatchFeature({input_name: speech_inputs} )
# truncate to smallest
_snake_case : int = feat_extract.pad(
UpperCamelCase , padding='max_length' , max_length=len(speech_inputs[0] ) , truncation=UpperCamelCase )
_snake_case : List[Any] = input_a[input_name]
_snake_case : List[Any] = feat_extract.pad(UpperCamelCase , padding='max_length' , max_length=len(speech_inputs[0] ) )
_snake_case : int = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(UpperCamelCase ) )
self.assertFalse(_inputs_have_equal_length(UpperCamelCase ) )
# truncate to smallest with np
_snake_case : Optional[Any] = feat_extract.pad(
UpperCamelCase , padding='max_length' , max_length=len(speech_inputs[0] ) , return_tensors='np' , truncation=UpperCamelCase , )
_snake_case : Any = input_a[input_name]
_snake_case : int = feat_extract.pad(
UpperCamelCase , padding='max_length' , max_length=len(speech_inputs[0] ) , return_tensors='np' )
_snake_case : Any = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(UpperCamelCase ) )
self.assertTrue(input_a.shape[1] == len(speech_inputs[0] ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(UpperCamelCase ) )
# truncate to middle
_snake_case : str = feat_extract.pad(
UpperCamelCase , padding='max_length' , max_length=len(speech_inputs[1] ) , truncation=UpperCamelCase , return_tensors='np' , )
_snake_case : Optional[int] = input_a[input_name]
_snake_case : str = feat_extract.pad(
UpperCamelCase , padding='max_length' , max_length=len(speech_inputs[1] ) , truncation=UpperCamelCase )
_snake_case : Optional[int] = input_a[input_name]
_snake_case : Optional[int] = feat_extract.pad(
UpperCamelCase , padding='max_length' , max_length=len(speech_inputs[1] ) , return_tensors='np' )
_snake_case : List[Any] = input_a[input_name]
self.assertTrue(input_a.shape[1] == len(speech_inputs[1] ) )
self.assertTrue(_inputs_have_equal_length(UpperCamelCase ) )
self.assertTrue(_inputs_have_equal_length(UpperCamelCase ) )
self.assertTrue(_inputs_are_equal(UpperCamelCase , UpperCamelCase ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(UpperCamelCase ) )
self.assertTrue(len(input_a[-1] ) == len(speech_inputs[-1] ) )
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(UpperCamelCase ):
feat_extract.pad(UpperCamelCase , truncation=UpperCamelCase )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(UpperCamelCase ):
feat_extract.pad(UpperCamelCase , padding='longest' , truncation=UpperCamelCase )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(UpperCamelCase ):
feat_extract.pad(UpperCamelCase , padding='longest' , truncation=UpperCamelCase )[input_name]
# max_length parameter has to be provided when setting `truncation=True` and padding="max_length"
with self.assertRaises(UpperCamelCase ):
feat_extract.pad(UpperCamelCase , padding='max_length' , truncation=UpperCamelCase )[input_name]
# test truncation for `pad_to_multiple_of` for List[int] + numpy
_snake_case : List[str] = 12
_snake_case : int = feat_extract.pad(
UpperCamelCase , padding='max_length' , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=UpperCamelCase , truncation=UpperCamelCase , )
_snake_case : List[Any] = input_a[input_name]
_snake_case : Optional[int] = feat_extract.pad(
UpperCamelCase , padding='max_length' , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=UpperCamelCase , )
_snake_case : Optional[int] = input_a[input_name]
# retrieve expected_length as multiple of pad_to_multiple_of
_snake_case : Tuple = len(speech_inputs[0] )
if expected_length % pad_to_multiple_of != 0:
_snake_case : Union[str, Any] = ((len(speech_inputs[0] ) // pad_to_multiple_of) + 1) * pad_to_multiple_of
self.assertTrue(len(input_a[0] ) == expected_length )
self.assertTrue(_inputs_have_equal_length(UpperCamelCase ) )
self.assertFalse(_inputs_have_equal_length(UpperCamelCase ) )
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
self._check_padding(numpify=UpperCamelCase )
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
self._check_padding(numpify=UpperCamelCase )
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
self._check_truncation(numpify=UpperCamelCase )
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
self._check_truncation(numpify=UpperCamelCase )
@require_torch
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
_snake_case : List[Any] = self.feature_extraction_class(**self.feat_extract_dict )
_snake_case : Dict = self.feat_extract_tester.prepare_inputs_for_common()
_snake_case : List[str] = feat_extract.model_input_names[0]
_snake_case : List[Any] = BatchFeature({input_name: speech_inputs} )
_snake_case : List[Any] = feat_extract.pad(UpperCamelCase , padding='longest' , return_tensors='np' )[input_name]
_snake_case : List[str] = feat_extract.pad(UpperCamelCase , padding='longest' , return_tensors='pt' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1e-2 )
@require_tf
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
_snake_case : Any = self.feature_extraction_class(**self.feat_extract_dict )
_snake_case : int = self.feat_extract_tester.prepare_inputs_for_common()
_snake_case : Dict = feat_extract.model_input_names[0]
_snake_case : str = BatchFeature({input_name: speech_inputs} )
_snake_case : Tuple = feat_extract.pad(UpperCamelCase , padding='longest' , return_tensors='np' )[input_name]
_snake_case : Tuple = feat_extract.pad(UpperCamelCase , padding='longest' , return_tensors='tf' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_tf.numpy().astype(np.floataa ).sum() ) < 1e-2 )
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
_snake_case : Any = self.feat_extract_dict
_snake_case : str = True
_snake_case : Optional[Any] = self.feature_extraction_class(**UpperCamelCase )
_snake_case : Tuple = self.feat_extract_tester.prepare_inputs_for_common()
_snake_case : Optional[int] = [len(UpperCamelCase ) for x in speech_inputs]
_snake_case : Optional[Any] = feat_extract.model_input_names[0]
_snake_case : str = BatchFeature({input_name: speech_inputs} )
_snake_case : Dict = feat_extract.pad(UpperCamelCase , padding='longest' , return_tensors='np' )
self.assertIn('attention_mask' , UpperCamelCase )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , UpperCamelCase )
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
_snake_case : List[Any] = self.feat_extract_dict
_snake_case : str = True
_snake_case : str = self.feature_extraction_class(**UpperCamelCase )
_snake_case : Dict = self.feat_extract_tester.prepare_inputs_for_common()
_snake_case : Dict = [len(UpperCamelCase ) for x in speech_inputs]
_snake_case : Union[str, Any] = feat_extract.model_input_names[0]
_snake_case : Tuple = BatchFeature({input_name: speech_inputs} )
_snake_case : Any = min(UpperCamelCase )
_snake_case : Union[str, Any] = feat_extract.pad(
UpperCamelCase , padding='max_length' , max_length=UpperCamelCase , truncation=UpperCamelCase , return_tensors='np' )
self.assertIn('attention_mask' , UpperCamelCase )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
| 411
|
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : int ="""linear"""
a_ : List[Any] ="""cosine"""
a_ : Optional[int] ="""cosine_with_restarts"""
a_ : List[str] ="""polynomial"""
a_ : Optional[Any] ="""constant"""
a_ : List[str] ="""constant_with_warmup"""
a_ : Optional[int] ="""piecewise_constant"""
def lowerCamelCase_ ( lowerCAmelCase: Optimizer , lowerCAmelCase: int = -1 )-> Optional[Any]:
return LambdaLR(lowerCAmelCase , lambda lowerCAmelCase : 1 , last_epoch=lowerCAmelCase )
def lowerCamelCase_ ( lowerCAmelCase: Optimizer , lowerCAmelCase: int , lowerCAmelCase: int = -1 )-> Tuple:
def lr_lambda(lowerCAmelCase: int ):
if current_step < num_warmup_steps:
return float(lowerCAmelCase ) / float(max(1.0 , lowerCAmelCase ) )
return 1.0
return LambdaLR(lowerCAmelCase , lowerCAmelCase , last_epoch=lowerCAmelCase )
def lowerCamelCase_ ( lowerCAmelCase: Optimizer , lowerCAmelCase: str , lowerCAmelCase: int = -1 )-> Union[str, Any]:
_snake_case : Any = {}
_snake_case : Optional[Any] = step_rules.split(',' )
for rule_str in rule_list[:-1]:
_snake_case , _snake_case : Tuple = rule_str.split(':' )
_snake_case : Optional[int] = int(lowerCAmelCase )
_snake_case : List[Any] = float(lowerCAmelCase )
_snake_case : int = value
_snake_case : Union[str, Any] = float(rule_list[-1] )
def create_rules_function(lowerCAmelCase: List[str] , lowerCAmelCase: str ):
def rule_func(lowerCAmelCase: int ) -> float:
_snake_case : List[str] = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(lowerCAmelCase ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
_snake_case : Any = create_rules_function(lowerCAmelCase , lowerCAmelCase )
return LambdaLR(lowerCAmelCase , lowerCAmelCase , last_epoch=lowerCAmelCase )
def lowerCamelCase_ ( lowerCAmelCase: Optional[int] , lowerCAmelCase: int , lowerCAmelCase: str , lowerCAmelCase: Union[str, Any]=-1 )-> Union[str, Any]:
def lr_lambda(lowerCAmelCase: int ):
if current_step < num_warmup_steps:
return float(lowerCAmelCase ) / float(max(1 , lowerCAmelCase ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def lowerCamelCase_ ( lowerCAmelCase: Optimizer , lowerCAmelCase: int , lowerCAmelCase: int , lowerCAmelCase: float = 0.5 , lowerCAmelCase: int = -1 )-> str:
def lr_lambda(lowerCAmelCase: Union[str, Any] ):
if current_step < num_warmup_steps:
return float(lowerCAmelCase ) / float(max(1 , lowerCAmelCase ) )
_snake_case : str = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(lowerCAmelCase ) * 2.0 * progress )) )
return LambdaLR(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def lowerCamelCase_ ( lowerCAmelCase: Optimizer , lowerCAmelCase: int , lowerCAmelCase: int , lowerCAmelCase: int = 1 , lowerCAmelCase: int = -1 )-> Tuple:
def lr_lambda(lowerCAmelCase: Optional[Any] ):
if current_step < num_warmup_steps:
return float(lowerCAmelCase ) / float(max(1 , lowerCAmelCase ) )
_snake_case : Union[str, Any] = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(lowerCAmelCase ) * progress) % 1.0) )) )
return LambdaLR(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def lowerCamelCase_ ( lowerCAmelCase: Tuple , lowerCAmelCase: Tuple , lowerCAmelCase: Union[str, Any] , lowerCAmelCase: Dict=1E-7 , lowerCAmelCase: Union[str, Any]=1.0 , lowerCAmelCase: Any=-1 )-> Any:
_snake_case : int = optimizer.defaults['lr']
if not (lr_init > lr_end):
raise ValueError(F"""lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})""" )
def lr_lambda(lowerCAmelCase: int ):
if current_step < num_warmup_steps:
return float(lowerCAmelCase ) / float(max(1 , lowerCAmelCase ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
_snake_case : Optional[int] = lr_init - lr_end
_snake_case : str = num_training_steps - num_warmup_steps
_snake_case : Optional[Any] = 1 - (current_step - num_warmup_steps) / decay_steps
_snake_case : List[str] = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
lowerCAmelCase_ = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def lowerCamelCase_ ( lowerCAmelCase: Union[str, SchedulerType] , lowerCAmelCase: Optimizer , lowerCAmelCase: Optional[str] = None , lowerCAmelCase: Optional[int] = None , lowerCAmelCase: Optional[int] = None , lowerCAmelCase: int = 1 , lowerCAmelCase: float = 1.0 , lowerCAmelCase: int = -1 , )-> Dict:
_snake_case : Union[str, Any] = SchedulerType(lowerCAmelCase )
_snake_case : str = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(lowerCAmelCase , last_epoch=lowerCAmelCase )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(lowerCAmelCase , step_rules=lowerCAmelCase , last_epoch=lowerCAmelCase )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(F"""{name} requires `num_warmup_steps`, please provide that argument.""" )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(lowerCAmelCase , num_warmup_steps=lowerCAmelCase , last_epoch=lowerCAmelCase )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(F"""{name} requires `num_training_steps`, please provide that argument.""" )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
lowerCAmelCase , num_warmup_steps=lowerCAmelCase , num_training_steps=lowerCAmelCase , num_cycles=lowerCAmelCase , last_epoch=lowerCAmelCase , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
lowerCAmelCase , num_warmup_steps=lowerCAmelCase , num_training_steps=lowerCAmelCase , power=lowerCAmelCase , last_epoch=lowerCAmelCase , )
return schedule_func(
lowerCAmelCase , num_warmup_steps=lowerCAmelCase , num_training_steps=lowerCAmelCase , last_epoch=lowerCAmelCase )
| 411
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a = {
'''configuration_upernet''': ['''UperNetConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
'''UperNetForSemanticSegmentation''',
'''UperNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_upernet import UperNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel
else:
import sys
a = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 505
|
"""simple docstring"""
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def _snake_case ( _snake_case : List[Any] ) -> Any:
'''simple docstring'''
_A = {}
_A = tokenizer(example['content'] , truncation=_snake_case )['input_ids']
_A = len(example['content'] ) / len(output['input_ids'] )
return output
a = HfArgumentParser(PretokenizationArguments)
a = parser.parse_args()
if args.num_workers is None:
a = multiprocessing.cpu_count()
a = AutoTokenizer.from_pretrained(args.tokenizer_dir)
a = time.time()
a = load_dataset(args.dataset_name, split='''train''')
print(F'''Dataset loaded in {time.time()-t_start:.2f}s''')
a = time.time()
a = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
'''repo_name''',
'''path''',
'''copies''',
'''size''',
'''content''',
'''license''',
'''hash''',
'''line_mean''',
'''line_max''',
'''alpha_frac''',
'''autogenerated''',
],
)
print(F'''Dataset tokenized in {time.time()-t_start:.2f}s''')
a = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(F'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
| 505
| 1
|
'''simple docstring'''
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import DiffusionPipeline
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import logging
lowerCAmelCase : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
class _UpperCamelCase ( SCREAMING_SNAKE_CASE):
'''simple docstring'''
def __init__( self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , ) -> Union[str, Any]:
super().__init__()
self.register_modules(
vae=a_ , text_encoder=a_ , tokenizer=a_ , unet=a_ , scheduler=a_ , safety_checker=a_ , feature_extractor=a_ , )
def a__ ( self , a_ = "auto" ) -> Dict:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
lowercase : Union[str, Any] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(a_ )
def a__ ( self ) -> Dict:
self.enable_attention_slicing(a_ )
@torch.no_grad()
def __call__( self , a_ , a_ = 5_1_2 , a_ = 5_1_2 , a_ = 5_0 , a_ = 7.5 , a_ = None , a_ = 1 , a_ = 0.0 , a_ = None , a_ = None , a_ = "pil" , a_ = True , a_ = None , a_ = 1 , a_ = None , **a_ , ) -> Union[str, Any]:
if isinstance(a_ , a_ ):
lowercase : Tuple = 1
elif isinstance(a_ , a_ ):
lowercase : int = len(a_ )
else:
raise ValueError(F'''`prompt` has to be of type `str` or `list` but is {type(a_ )}''' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(a_ , a_ ) or callback_steps <= 0)
):
raise ValueError(
F'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
F''' {type(a_ )}.''' )
# get prompt text embeddings
lowercase : int = self.tokenizer(
a_ , padding="max_length" , max_length=self.tokenizer.model_max_length , return_tensors="pt" , )
lowercase : List[Any] = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
lowercase : str = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
F''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
lowercase : Dict = text_input_ids[:, : self.tokenizer.model_max_length]
if text_embeddings is None:
lowercase : List[Any] = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
lowercase , lowercase , lowercase : List[Any] = text_embeddings.shape
lowercase : Tuple = text_embeddings.repeat(1 , a_ , 1 )
lowercase : Optional[int] = text_embeddings.view(bs_embed * num_images_per_prompt , a_ , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
lowercase : str = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
lowercase : List[str]
if negative_prompt is None:
lowercase : Optional[Any] = [""]
elif type(a_ ) is not type(a_ ):
raise TypeError(
F'''`negative_prompt` should be the same type to `prompt`, but got {type(a_ )} !='''
F''' {type(a_ )}.''' )
elif isinstance(a_ , a_ ):
lowercase : Tuple = [negative_prompt]
elif batch_size != len(a_ ):
raise ValueError(
F'''`negative_prompt`: {negative_prompt} has batch size {len(a_ )}, but `prompt`:'''
F''' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'''
" the batch size of `prompt`." )
else:
lowercase : Dict = negative_prompt
lowercase : Tuple = text_input_ids.shape[-1]
lowercase : Any = self.tokenizer(
a_ , padding="max_length" , max_length=a_ , truncation=a_ , return_tensors="pt" , )
lowercase : Any = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
lowercase : Union[str, Any] = uncond_embeddings.shape[1]
lowercase : Tuple = uncond_embeddings.repeat(a_ , a_ , 1 )
lowercase : Optional[int] = uncond_embeddings.view(batch_size * num_images_per_prompt , a_ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowercase : Optional[Any] = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
lowercase : List[str] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
lowercase : Union[str, Any] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 6_4, 6_4)
lowercase : Union[str, Any] = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
lowercase : List[str] = torch.randn(
a_ , generator=a_ , device="cpu" , dtype=a_ ).to(self.device )
lowercase : Any = torch.randn(a_ , generator=a_ , device="cpu" , dtype=a_ ).to(
self.device )
else:
lowercase : List[Any] = torch.randn(
a_ , generator=a_ , device=self.device , dtype=a_ )
lowercase : Optional[Any] = torch.randn(a_ , generator=a_ , device=self.device , dtype=a_ )
else:
if latents_reference.shape != latents_shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
lowercase : Dict = latents_reference.to(self.device )
lowercase : int = latents.to(self.device )
# This is the key part of the pipeline where we
# try to ensure that the generated images w/ the same seed
# but different sizes actually result in similar images
lowercase : int = (latents_shape[3] - latents_shape_reference[3]) // 2
lowercase : Union[str, Any] = (latents_shape[2] - latents_shape_reference[2]) // 2
lowercase : int = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx
lowercase : Dict = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy
lowercase : Optional[int] = 0 if dx < 0 else dx
lowercase : Tuple = 0 if dy < 0 else dy
lowercase : Optional[int] = max(-dx , 0 )
lowercase : int = max(-dy , 0 )
# import pdb
# pdb.set_trace()
lowercase : Union[str, Any] = latents_reference[:, :, dy : dy + h, dx : dx + w]
# set timesteps
self.scheduler.set_timesteps(a_ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
lowercase : List[Any] = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
lowercase : List[str] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
lowercase : Optional[Any] = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowercase : List[str] = {}
if accepts_eta:
lowercase : List[str] = eta
for i, t in enumerate(self.progress_bar(a_ ) ):
# expand the latents if we are doing classifier free guidance
lowercase : Optional[int] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowercase : int = self.scheduler.scale_model_input(a_ , a_ )
# predict the noise residual
lowercase : Dict = self.unet(a_ , a_ , encoder_hidden_states=a_ ).sample
# perform guidance
if do_classifier_free_guidance:
lowercase , lowercase : Dict = noise_pred.chunk(2 )
lowercase : Optional[int] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
lowercase : int = self.scheduler.step(a_ , a_ , a_ , **a_ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(a_ , a_ , a_ )
lowercase : List[Any] = 1 / 0.1_82_15 * latents
lowercase : Tuple = self.vae.decode(a_ ).sample
lowercase : Any = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
lowercase : Any = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if self.safety_checker is not None:
lowercase : Tuple = self.feature_extractor(self.numpy_to_pil(a_ ) , return_tensors="pt" ).to(
self.device )
lowercase , lowercase : Tuple = self.safety_checker(
images=a_ , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) )
else:
lowercase : Union[str, Any] = None
if output_type == "pil":
lowercase : Union[str, Any] = self.numpy_to_pil(a_ )
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=a_ , nsfw_content_detected=a_ )
| 372
|
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
lowerCAmelCase : int = logging.get_logger(__name__)
lowerCAmelCase : Dict = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.linear_k""": """encoder.layers.*.self_attn.linear_k""",
"""self_attn.linear_v""": """encoder.layers.*.self_attn.linear_v""",
"""self_attn.linear_q""": """encoder.layers.*.self_attn.linear_q""",
"""self_attn.pos_bias_u""": """encoder.layers.*.self_attn.pos_bias_u""",
"""self_attn.pos_bias_v""": """encoder.layers.*.self_attn.pos_bias_v""",
"""self_attn.linear_out""": """encoder.layers.*.self_attn.linear_out""",
"""self_attn.linear_pos""": """encoder.layers.*.self_attn.linear_pos""",
"""self_attn.rotary_emb""": """encoder.embed_positions""",
"""self_attn_layer_norm""": """encoder.layers.*.self_attn_layer_norm""",
"""conv_module.pointwise_conv1""": """encoder.layers.*.conv_module.pointwise_conv1""",
"""conv_module.pointwise_conv2""": """encoder.layers.*.conv_module.pointwise_conv2""",
"""conv_module.depthwise_conv""": """encoder.layers.*.conv_module.depthwise_conv""",
"""conv_module.batch_norm""": """encoder.layers.*.conv_module.batch_norm""",
"""conv_module.layer_norm""": """encoder.layers.*.conv_module.layer_norm""",
"""ffn1.w_1""": """encoder.layers.*.ffn1.intermediate_dense""",
"""ffn1.w_2""": """encoder.layers.*.ffn1.output_dense""",
"""ffn1.layer_norm""": """encoder.layers.*.ffn1_layer_norm""",
"""ffn2.w_1""": """encoder.layers.*.ffn2.intermediate_dense""",
"""ffn2.w_2""": """encoder.layers.*.ffn2.output_dense""",
"""ffn2.layer_norm""": """encoder.layers.*.ffn2_layer_norm""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
lowerCAmelCase : Optional[Any] = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def _A ( A ,A ,A ,A ,A ) -> Tuple:
for attribute in key.split("." ):
lowercase : Dict = getattr(A ,A )
if weight_type is not None:
lowercase : List[str] = getattr(A ,A ).shape
else:
lowercase : Any = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}''' )
if weight_type == "weight":
lowercase : List[Any] = value
elif weight_type == "weight_g":
lowercase : List[Any] = value
elif weight_type == "weight_v":
lowercase : int = value
elif weight_type == "bias":
lowercase : Any = value
elif weight_type == "running_mean":
lowercase : Tuple = value
elif weight_type == "running_var":
lowercase : Dict = value
elif weight_type == "num_batches_tracked":
lowercase : Optional[int] = value
elif weight_type == "inv_freq":
lowercase : List[Any] = value
else:
lowercase : Dict = value
logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def _A ( A ,A ,A ) -> int:
lowercase : Optional[int] = []
lowercase : Tuple = fairseq_model.state_dict()
lowercase : Optional[Any] = hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
lowercase : str = False
if "conv_layers" in name:
load_conv_layer(
A ,A ,A ,A ,hf_model.config.feat_extract_norm == "group" ,)
lowercase : Dict = True
else:
for key, mapped_key in MAPPING.items():
lowercase : List[str] = "wav2vec2_conformer." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
lowercase : List[str] = True
if "*" in mapped_key:
lowercase : int = name.split(A )[0].split("." )[-2]
lowercase : Optional[Any] = mapped_key.replace("*" ,A )
if "pos_bias_u" in name:
lowercase : str = None
elif "pos_bias_v" in name:
lowercase : Optional[int] = None
elif "weight_g" in name:
lowercase : int = "weight_g"
elif "weight_v" in name:
lowercase : Dict = "weight_v"
elif "bias" in name:
lowercase : List[str] = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
lowercase : Optional[int] = "weight"
elif "running_mean" in name:
lowercase : Optional[int] = "running_mean"
elif "inv_freq" in name:
lowercase : Dict = "inv_freq"
elif "running_var" in name:
lowercase : int = "running_var"
elif "num_batches_tracked" in name:
lowercase : Optional[Any] = "num_batches_tracked"
else:
lowercase : int = None
set_recursively(A ,A ,A ,A ,A )
continue
if not is_used:
unused_weights.append(A )
logger.warning(F'''Unused weights: {unused_weights}''' )
def _A ( A ,A ,A ,A ,A ) -> List[str]:
lowercase : Tuple = full_name.split("conv_layers." )[-1]
lowercase : Optional[Any] = name.split("." )
lowercase : str = int(items[0] )
lowercase : List[Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
lowercase : Union[str, Any] = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
lowercase : str = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' )
lowercase : int = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' )
lowercase : str = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(A )
@torch.no_grad()
def _A ( A ,A ,A=None ,A=None ,A=True ) -> Optional[Any]:
if config_path is not None:
lowercase : Tuple = WavaVecaConformerConfig.from_pretrained(A ,hidden_act="swish" )
else:
lowercase : Any = WavaVecaConformerConfig()
if "rope" in checkpoint_path:
lowercase : str = "rotary"
if is_finetuned:
if dict_path:
lowercase : List[str] = Dictionary.load(A )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
lowercase : Optional[int] = target_dict.pad_index
lowercase : Optional[int] = target_dict.bos_index
lowercase : Optional[Any] = target_dict.eos_index
lowercase : str = len(target_dict.symbols )
lowercase : List[Any] = os.path.join(A ,"vocab.json" )
if not os.path.isdir(A ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(A ) )
return
os.makedirs(A ,exist_ok=A )
lowercase : Any = target_dict.indices
# fairseq has the <pad> and <s> switched
lowercase : Any = 0
lowercase : Tuple = 1
with open(A ,"w" ,encoding="utf-8" ) as vocab_handle:
json.dump(A ,A )
lowercase : Tuple = WavaVecaCTCTokenizer(
A ,unk_token=target_dict.unk_word ,pad_token=target_dict.pad_word ,bos_token=target_dict.bos_word ,eos_token=target_dict.eos_word ,word_delimiter_token="|" ,do_lower_case=A ,)
lowercase : Dict = True if config.feat_extract_norm == "layer" else False
lowercase : List[str] = WavaVecaFeatureExtractor(
feature_size=1 ,sampling_rate=1_6_0_0_0 ,padding_value=0 ,do_normalize=A ,return_attention_mask=A ,)
lowercase : List[str] = WavaVecaProcessor(feature_extractor=A ,tokenizer=A )
processor.save_pretrained(A )
lowercase : Any = WavaVecaConformerForCTC(A )
else:
lowercase : str = WavaVecaConformerForPreTraining(A )
if is_finetuned:
lowercase , lowercase , lowercase : List[str] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] ,arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
else:
lowercase : List[str] = argparse.Namespace(task="audio_pretraining" )
lowercase : Union[str, Any] = fairseq.tasks.setup_task(A )
lowercase , lowercase , lowercase : Optional[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] ,task=A )
lowercase : List[str] = model[0].eval()
recursively_load_weights(A ,A ,not is_finetuned )
hf_wavavec.save_pretrained(A )
if __name__ == "__main__":
lowerCAmelCase : str = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
lowerCAmelCase : int = parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 372
| 1
|
import importlib
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Union
import torch
from ..utils import BaseOutput
lowerCamelCase : List[Any] = 'scheduler_config.json'
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = 1
_snake_case = 2
_snake_case = 3
_snake_case = 4
_snake_case = 5
_snake_case = 6
_snake_case = 7
_snake_case = 8
_snake_case = 9
_snake_case = 10
_snake_case = 11
_snake_case = 12
_snake_case = 13
_snake_case = 14
@dataclass
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = 42
class __lowercase :
"""simple docstring"""
_snake_case = SCHEDULER_CONFIG_NAME
_snake_case = []
_snake_case = True
@classmethod
def UpperCAmelCase ( cls , A = None , A = None , A=False , **A , ) -> List[Any]:
snake_case , snake_case , snake_case : Optional[int] = cls.load_config(
pretrained_model_name_or_path=A , subfolder=A , return_unused_kwargs=A , return_commit_hash=A , **A , )
return cls.from_config(A , return_unused_kwargs=A , **A )
def UpperCAmelCase ( self , A , A = False , **A ) -> Union[str, Any]:
self.save_config(save_directory=A , push_to_hub=A , **A )
@property
def UpperCAmelCase ( self ) -> Optional[Any]:
return self._get_compatibles()
@classmethod
def UpperCAmelCase ( cls ) -> Tuple:
snake_case : Union[str, Any] = list(set([cls.__name__] + cls._compatibles ) )
snake_case : Optional[Any] = importlib.import_module(__name__.split(""".""" )[0] )
snake_case : int = [
getattr(A , A ) for c in compatible_classes_str if hasattr(A , A )
]
return compatible_classes
| 684
|
import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
lowerCamelCase : Any = logging.get_logger(__name__)
class __lowercase (enum.Enum ):
"""simple docstring"""
_snake_case = 0
_snake_case = 1
@add_end_docstrings(UpperCamelCase__ )
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = """generated"""
def __init__( self , *A , **A ) -> Optional[Any]:
super().__init__(*A , **A )
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == """tf"""
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING )
def UpperCAmelCase ( self , A=None , A=None , A=None , A=None , A=None , A=None , **A , ) -> Optional[int]:
snake_case : Tuple = {}
if truncation is not None:
snake_case : Union[str, Any] = truncation
snake_case : Dict = generate_kwargs
snake_case : int = {}
if return_tensors is not None and return_type is None:
snake_case : List[Any] = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
snake_case : List[str] = return_type
if clean_up_tokenization_spaces is not None:
snake_case : int = clean_up_tokenization_spaces
if stop_sequence is not None:
snake_case : Tuple = self.tokenizer.encode(A , add_special_tokens=A )
if len(A ) > 1:
warnings.warn(
"""Stopping on a multiple token sequence is not yet supported on transformers. The first token of"""
""" the stop sequence will be used as the stop sequence string in the interim.""" )
snake_case : List[str] = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def UpperCAmelCase ( self , A , A , A ) -> Union[str, Any]:
return True
def UpperCAmelCase ( self , *A , A ) -> Tuple:
snake_case : Union[str, Any] = self.model.config.prefix if self.model.config.prefix is not None else """"""
if isinstance(args[0] , A ):
if self.tokenizer.pad_token_id is None:
raise ValueError("""Please make sure that the tokenizer has a pad_token_id when using a batch input""" )
snake_case : Union[str, Any] = ([prefix + arg for arg in args[0]],)
snake_case : List[Any] = True
elif isinstance(args[0] , A ):
snake_case : str = (prefix + args[0],)
snake_case : str = False
else:
raise ValueError(
f""" `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`""" )
snake_case : Optional[Any] = self.tokenizer(*A , padding=A , truncation=A , return_tensors=self.framework )
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__( self , *A , **A ) -> Union[str, Any]:
snake_case : Tuple = super().__call__(*A , **A )
if (
isinstance(args[0] , A )
and all(isinstance(A , A ) for el in args[0] )
and all(len(A ) == 1 for res in result )
):
return [res[0] for res in result]
return result
def UpperCAmelCase ( self , A , A=TruncationStrategy.DO_NOT_TRUNCATE , **A ) -> str:
snake_case : Optional[Any] = self._parse_and_tokenize(A , truncation=A , **A )
return inputs
def UpperCAmelCase ( self , A , **A ) -> Tuple:
if self.framework == "pt":
snake_case , snake_case : List[str] = model_inputs["""input_ids"""].shape
elif self.framework == "tf":
snake_case , snake_case : Optional[Any] = tf.shape(model_inputs["""input_ids"""] ).numpy()
snake_case : Dict = generate_kwargs.get("""min_length""" , self.model.config.min_length )
snake_case : str = generate_kwargs.get("""max_length""" , self.model.config.max_length )
self.check_inputs(A , generate_kwargs["""min_length"""] , generate_kwargs["""max_length"""] )
snake_case : List[str] = self.model.generate(**A , **A )
snake_case : Dict = output_ids.shape[0]
if self.framework == "pt":
snake_case : List[Any] = output_ids.reshape(A , out_b // in_b , *output_ids.shape[1:] )
elif self.framework == "tf":
snake_case : Any = tf.reshape(A , (in_b, out_b // in_b, *output_ids.shape[1:]) )
return {"output_ids": output_ids}
def UpperCAmelCase ( self , A , A=ReturnType.TEXT , A=False ) -> Union[str, Any]:
snake_case : Tuple = []
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
snake_case : Dict = {f"""{self.return_name}_token_ids""": output_ids}
elif return_type == ReturnType.TEXT:
snake_case : int = {
f"""{self.return_name}_text""": self.tokenizer.decode(
A , skip_special_tokens=A , clean_up_tokenization_spaces=A , )
}
records.append(A )
return records
@add_end_docstrings(UpperCamelCase__ )
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = """summary"""
def __call__( self , *A , **A ) -> str:
return super().__call__(*A , **A )
def UpperCAmelCase ( self , A , A , A ) -> bool:
if max_length < min_length:
logger.warning(f"""Your min_length={min_length} must be inferior than your max_length={max_length}.""" )
if input_length < max_length:
logger.warning(
f"""Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is """
"""a summarization task, where outputs shorter than the input are typically wanted, you might """
f"""consider decreasing max_length manually, e.g. summarizer('...', max_length={input_length//2})""" )
@add_end_docstrings(UpperCamelCase__ )
class __lowercase (UpperCamelCase__ ):
"""simple docstring"""
_snake_case = """translation"""
def UpperCAmelCase ( self , A , A , A ) -> Union[str, Any]:
if input_length > 0.9 * max_length:
logger.warning(
f"""Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider """
"""increasing your max_length manually, e.g. translator('...', max_length=400)""" )
return True
def UpperCAmelCase ( self , *A , A=TruncationStrategy.DO_NOT_TRUNCATE , A=None , A=None ) -> Optional[int]:
if getattr(self.tokenizer , """_build_translation_inputs""" , A ):
return self.tokenizer._build_translation_inputs(
*A , return_tensors=self.framework , truncation=A , src_lang=A , tgt_lang=A )
else:
return super()._parse_and_tokenize(*A , truncation=A )
def UpperCAmelCase ( self , A=None , A=None , **A ) -> Union[str, Any]:
snake_case , snake_case , snake_case : str = super()._sanitize_parameters(**A )
if src_lang is not None:
snake_case : Tuple = src_lang
if tgt_lang is not None:
snake_case : str = tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
snake_case : Union[str, Any] = kwargs.get("""task""" , self.task )
snake_case : Any = task.split("""_""" )
if task and len(A ) == 4:
# translation, XX, to YY
snake_case : Optional[Any] = items[1]
snake_case : Dict = items[3]
return preprocess_params, forward_params, postprocess_params
def __call__( self , *A , **A ) -> str:
return super().__call__(*A , **A )
| 684
| 1
|
'''simple docstring'''
from math import factorial, radians
def a ( lowerCamelCase__ , lowerCamelCase__ = 18 , lowerCamelCase__ = 10 ):
'''simple docstring'''
A_ : Dict = angle_in_degrees - ((angle_in_degrees // 360.0) * 360.0)
# Converting from degrees to radians
A_ : Optional[Any] = radians(lowerCamelCase__ )
A_ : List[Any] = angle_in_radians
A_ : Dict = 3
A_ : Optional[int] = -1
for _ in range(lowerCamelCase__ ):
result += (b * (angle_in_radians**a)) / factorial(lowerCamelCase__ )
A_ : List[str] = -b # One positive term and the next will be negative and so on...
a += 2 # Increased by 2 for every term.
return round(lowerCamelCase__ , lowerCamelCase__ )
if __name__ == "__main__":
__import__('''doctest''').testmod()
| 667
|
'''simple docstring'''
print((lambda quine: quine % quine)('''print((lambda quine: quine %% quine)(%r))'''))
| 667
| 1
|
'''simple docstring'''
def __a(SCREAMING_SNAKE_CASE_ : str ):
'''simple docstring'''
return [
txt[:a] + txt[a].upper() + txt[a + 1 :]
for a in range(len(SCREAMING_SNAKE_CASE_ ) )
if txt[a].isalpha()
]
if __name__ == "__main__":
__import__("doctest").testmod()
| 702
|
'''simple docstring'''
import argparse
import collections
import os
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_table.py
_SCREAMING_SNAKE_CASE = "src/transformers"
_SCREAMING_SNAKE_CASE = "docs/source/en"
_SCREAMING_SNAKE_CASE = "."
def __a(SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
'''simple docstring'''
with open(SCREAMING_SNAKE_CASE_ , "r" , encoding="utf-8" , newline="\n" ) as f:
_lowerCAmelCase = f.readlines()
# Find the start prompt.
_lowerCAmelCase = 0
while not lines[start_index].startswith(SCREAMING_SNAKE_CASE_ ):
start_index += 1
start_index += 1
_lowerCAmelCase = start_index
while not lines[end_index].startswith(SCREAMING_SNAKE_CASE_ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# Add here suffixes that are used to identify models, separated by |
_SCREAMING_SNAKE_CASE = "Model|Encoder|Decoder|ForConditionalGeneration"
# Regexes that match TF/Flax/PT model names.
_SCREAMING_SNAKE_CASE = re.compile(r"TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
_SCREAMING_SNAKE_CASE = re.compile(r"Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
_SCREAMING_SNAKE_CASE = re.compile(r"(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# This is to make sure the transformers module imported is the one in the repo.
_SCREAMING_SNAKE_CASE = direct_transformers_import(TRANSFORMERS_PATH)
def __a(SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
'''simple docstring'''
_lowerCAmelCase = re.finditer(".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)" , SCREAMING_SNAKE_CASE_ )
return [m.group(0 ) for m in matches]
def __a(SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : int ):
'''simple docstring'''
_lowerCAmelCase = 2 if text == "✅" or text == "❌" else len(SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = (width - text_length) // 2
_lowerCAmelCase = width - text_length - left_indent
return " " * left_indent + text + " " * right_indent
def __a():
'''simple docstring'''
_lowerCAmelCase = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
_lowerCAmelCase = {
name: config_maping_names[code]
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if code in config_maping_names
}
_lowerCAmelCase = {name: config.replace("Config" , "" ) for name, config in model_name_to_config.items()}
# Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax.
_lowerCAmelCase = collections.defaultdict(SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = collections.defaultdict(SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = collections.defaultdict(SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = collections.defaultdict(SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = collections.defaultdict(SCREAMING_SNAKE_CASE_ )
# Let's lookup through all transformers object (once).
for attr_name in dir(SCREAMING_SNAKE_CASE_ ):
_lowerCAmelCase = None
if attr_name.endswith("Tokenizer" ):
_lowerCAmelCase = slow_tokenizers
_lowerCAmelCase = attr_name[:-9]
elif attr_name.endswith("TokenizerFast" ):
_lowerCAmelCase = fast_tokenizers
_lowerCAmelCase = attr_name[:-13]
elif _re_tf_models.match(SCREAMING_SNAKE_CASE_ ) is not None:
_lowerCAmelCase = tf_models
_lowerCAmelCase = _re_tf_models.match(SCREAMING_SNAKE_CASE_ ).groups()[0]
elif _re_flax_models.match(SCREAMING_SNAKE_CASE_ ) is not None:
_lowerCAmelCase = flax_models
_lowerCAmelCase = _re_flax_models.match(SCREAMING_SNAKE_CASE_ ).groups()[0]
elif _re_pt_models.match(SCREAMING_SNAKE_CASE_ ) is not None:
_lowerCAmelCase = pt_models
_lowerCAmelCase = _re_pt_models.match(SCREAMING_SNAKE_CASE_ ).groups()[0]
if lookup_dict is not None:
while len(SCREAMING_SNAKE_CASE_ ) > 0:
if attr_name in model_name_to_prefix.values():
_lowerCAmelCase = True
break
# Try again after removing the last word in the name
_lowerCAmelCase = "".join(camel_case_split(SCREAMING_SNAKE_CASE_ )[:-1] )
# Let's build that table!
_lowerCAmelCase = list(model_name_to_config.keys() )
model_names.sort(key=str.lower )
_lowerCAmelCase = ["Model", "Tokenizer slow", "Tokenizer fast", "PyTorch support", "TensorFlow support", "Flax Support"]
# We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side).
_lowerCAmelCase = [len(SCREAMING_SNAKE_CASE_ ) + 2 for c in columns]
_lowerCAmelCase = max([len(SCREAMING_SNAKE_CASE_ ) for name in model_names] ) + 2
# Build the table per se
_lowerCAmelCase = "|" + "|".join([_center_text(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for c, w in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )] ) + "|\n"
# Use ":-----:" format to center-aligned table cell texts
table += "|" + "|".join([":" + "-" * (w - 2) + ":" for w in widths] ) + "|\n"
_lowerCAmelCase = {True: "✅", False: "❌"}
for name in model_names:
_lowerCAmelCase = model_name_to_prefix[name]
_lowerCAmelCase = [
name,
check[slow_tokenizers[prefix]],
check[fast_tokenizers[prefix]],
check[pt_models[prefix]],
check[tf_models[prefix]],
check[flax_models[prefix]],
]
table += "|" + "|".join([_center_text(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for l, w in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )] ) + "|\n"
return table
def __a(SCREAMING_SNAKE_CASE_ : Union[str, Any]=False ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = _find_text_in_file(
filename=os.path.join(SCREAMING_SNAKE_CASE_ , "index.md" ) , start_prompt="<!--This table is updated automatically from the auto modules" , end_prompt="<!-- End table-->" , )
_lowerCAmelCase = get_model_table_from_auto_modules()
if current_table != new_table:
if overwrite:
with open(os.path.join(SCREAMING_SNAKE_CASE_ , "index.md" ) , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(lines[:start_index] + [new_table] + lines[end_index:] )
else:
raise ValueError(
"The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this." )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
_SCREAMING_SNAKE_CASE = parser.parse_args()
check_model_table(args.fix_and_overwrite)
| 489
| 0
|
"""simple docstring"""
import inspect
import unittest
from transformers import SegformerConfig, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_MAPPING,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerModel,
)
from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import SegformerImageProcessor
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def lowercase__ ( self : Optional[int] ) -> str:
'''simple docstring'''
UpperCAmelCase_ = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_UpperCAmelCase , "hidden_sizes" ) )
self.parent.assertTrue(hasattr(_UpperCAmelCase , "num_attention_heads" ) )
self.parent.assertTrue(hasattr(_UpperCAmelCase , "num_encoder_blocks" ) )
class lowercase__ :
'''simple docstring'''
def __init__( self : Union[str, Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : Optional[int]=13 , _UpperCAmelCase : Optional[int]=64 , _UpperCAmelCase : str=3 , _UpperCAmelCase : Tuple=4 , _UpperCAmelCase : int=[2, 2, 2, 2] , _UpperCAmelCase : int=[8, 4, 2, 1] , _UpperCAmelCase : List[Any]=[16, 32, 64, 128] , _UpperCAmelCase : Union[str, Any]=[1, 4, 8, 16] , _UpperCAmelCase : Dict=[1, 2, 4, 8] , _UpperCAmelCase : Dict=True , _UpperCAmelCase : Tuple=True , _UpperCAmelCase : Any="gelu" , _UpperCAmelCase : str=0.1 , _UpperCAmelCase : Any=0.1 , _UpperCAmelCase : Dict=0.02 , _UpperCAmelCase : str=3 , _UpperCAmelCase : List[Any]=None , ) -> int:
'''simple docstring'''
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = image_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = num_encoder_blocks
UpperCAmelCase_ = sr_ratios
UpperCAmelCase_ = depths
UpperCAmelCase_ = hidden_sizes
UpperCAmelCase_ = downsampling_rates
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = is_training
UpperCAmelCase_ = use_labels
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = num_labels
UpperCAmelCase_ = scope
def lowercase__ ( self : List[Any] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ = None
if self.use_labels:
UpperCAmelCase_ = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
UpperCAmelCase_ = self.get_config()
return config, pixel_values, labels
def lowercase__ ( self : str ) -> Optional[int]:
'''simple docstring'''
return SegformerConfig(
image_size=self.image_size , num_channels=self.num_channels , num_encoder_blocks=self.num_encoder_blocks , depths=self.depths , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def lowercase__ ( self : Optional[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : str ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = SegformerModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase_ = model(_UpperCAmelCase )
UpperCAmelCase_ = UpperCAmelCase_ = self.image_size // (self.downsampling_rates[-1] * 2)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) )
def lowercase__ ( self : List[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Any ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = self.num_labels
UpperCAmelCase_ = SegformerForSemanticSegmentation(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase_ = model(_UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
UpperCAmelCase_ = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
self.parent.assertGreater(result.loss , 0.0 )
def lowercase__ ( self : Dict , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : int ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = 1
UpperCAmelCase_ = SegformerForSemanticSegmentation(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase_ = torch.randint(0 , 1 , (self.batch_size, self.image_size, self.image_size) ).to(_UpperCAmelCase )
UpperCAmelCase_ = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertGreater(result.loss , 0.0 )
def lowercase__ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
UpperCAmelCase_ = self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = config_and_inputs
UpperCAmelCase_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowercase__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = (
(
SegformerModel,
SegformerForSemanticSegmentation,
SegformerForImageClassification,
)
if is_torch_available()
else ()
)
UpperCamelCase = (
{
'''feature-extraction''': SegformerModel,
'''image-classification''': SegformerForImageClassification,
'''image-segmentation''': SegformerForSemanticSegmentation,
}
if is_torch_available()
else {}
)
UpperCamelCase = True
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def lowercase__ ( self : Any ) -> str:
'''simple docstring'''
UpperCAmelCase_ = SegformerModelTester(self )
UpperCAmelCase_ = SegformerConfigTester(self , config_class=_UpperCAmelCase )
def lowercase__ ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase__ ( self : List[Any] ) -> str:
'''simple docstring'''
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def lowercase__ ( self : List[Any] ) -> str:
'''simple docstring'''
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_binary_image_segmentation(*_UpperCAmelCase )
def lowercase__ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_segmentation(*_UpperCAmelCase )
@unittest.skip("SegFormer does not use inputs_embeds" )
def lowercase__ ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
pass
@unittest.skip("SegFormer does not have get_input_embeddings method and get_output_embeddings methods" )
def lowercase__ ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
pass
def lowercase__ ( self : Optional[int] ) -> int:
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(_UpperCAmelCase )
UpperCAmelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ = [*signature.parameters.keys()]
UpperCAmelCase_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , _UpperCAmelCase )
def lowercase__ ( self : str ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ = True
for model_class in self.all_model_classes:
UpperCAmelCase_ = True
UpperCAmelCase_ = False
UpperCAmelCase_ = True
UpperCAmelCase_ = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
UpperCAmelCase_ = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
UpperCAmelCase_ = outputs.attentions
UpperCAmelCase_ = sum(self.model_tester.depths )
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
UpperCAmelCase_ = True
UpperCAmelCase_ = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
UpperCAmelCase_ = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
UpperCAmelCase_ = outputs.attentions
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
# verify the first attentions (first block, first layer)
UpperCAmelCase_ = (self.model_tester.image_size // 4) ** 2
UpperCAmelCase_ = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
# verify the last attentions (last block, last layer)
UpperCAmelCase_ = (self.model_tester.image_size // 32) ** 2
UpperCAmelCase_ = (self.model_tester.image_size // (32 * self.model_tester.sr_ratios[-1])) ** 2
self.assertListEqual(
list(attentions[-1].shape[-3:] ) , [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] , )
UpperCAmelCase_ = len(_UpperCAmelCase )
# Check attention is always last and order is fine
UpperCAmelCase_ = True
UpperCAmelCase_ = True
UpperCAmelCase_ = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
UpperCAmelCase_ = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
self.assertEqual(out_len + 1 , len(_UpperCAmelCase ) )
UpperCAmelCase_ = outputs.attentions
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
# verify the first attentions (first block, first layer)
UpperCAmelCase_ = (self.model_tester.image_size // 4) ** 2
UpperCAmelCase_ = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
def lowercase__ ( self : Dict ) -> int:
'''simple docstring'''
def check_hidden_states_output(_UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[int] ):
UpperCAmelCase_ = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
UpperCAmelCase_ = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
UpperCAmelCase_ = outputs.hidden_states
UpperCAmelCase_ = self.model_tester.num_encoder_blocks
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.hidden_sizes[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_ = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def lowercase__ ( self : Tuple ) -> List[Any]:
'''simple docstring'''
if not self.model_tester.is_training:
return
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ = True
for model_class in self.all_model_classes:
if model_class in get_values(_UpperCAmelCase ):
continue
UpperCAmelCase_ = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.train()
UpperCAmelCase_ = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase )
UpperCAmelCase_ = model(**_UpperCAmelCase ).loss
loss.backward()
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def lowercase__ ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
pass
@slow
def lowercase__ ( self : List[Any] ) -> Tuple:
'''simple docstring'''
for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ = SegformerModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def a__ ( ):
UpperCAmelCase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowercase__ ( self : Tuple ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=_UpperCAmelCase , align=_UpperCAmelCase , do_random_crop=_UpperCAmelCase )
UpperCAmelCase_ = SegformerForSemanticSegmentation.from_pretrained("nvidia/segformer-b0-finetuned-ade-512-512" ).to(
_UpperCAmelCase )
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = image_processor(images=_UpperCAmelCase , return_tensors="pt" )
UpperCAmelCase_ = encoded_inputs.pixel_values.to(_UpperCAmelCase )
with torch.no_grad():
UpperCAmelCase_ = model(_UpperCAmelCase )
UpperCAmelCase_ = torch.Size((1, model.config.num_labels, 128, 128) )
self.assertEqual(outputs.logits.shape , _UpperCAmelCase )
UpperCAmelCase_ = torch.tensor(
[
[[-4.6310, -5.5232, -6.2356], [-5.1921, -6.1444, -6.5996], [-5.4424, -6.2790, -6.7574]],
[[-12.1391, -13.3122, -13.9554], [-12.8732, -13.9352, -14.3563], [-12.9438, -13.8226, -14.2513]],
[[-12.5134, -13.4686, -14.4915], [-12.8669, -14.4343, -14.7758], [-13.2523, -14.5819, -15.0694]],
] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , _UpperCAmelCase , atol=1e-4 ) )
@slow
def lowercase__ ( self : Optional[int] ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=_UpperCAmelCase , align=_UpperCAmelCase , do_random_crop=_UpperCAmelCase )
UpperCAmelCase_ = SegformerForSemanticSegmentation.from_pretrained(
"nvidia/segformer-b1-finetuned-cityscapes-1024-1024" ).to(_UpperCAmelCase )
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = image_processor(images=_UpperCAmelCase , return_tensors="pt" )
UpperCAmelCase_ = encoded_inputs.pixel_values.to(_UpperCAmelCase )
with torch.no_grad():
UpperCAmelCase_ = model(_UpperCAmelCase )
UpperCAmelCase_ = torch.Size((1, model.config.num_labels, 128, 128) )
self.assertEqual(outputs.logits.shape , _UpperCAmelCase )
UpperCAmelCase_ = torch.tensor(
[
[[-13.5748, -13.9111, -12.6500], [-14.3500, -15.3683, -14.2328], [-14.7532, -16.0424, -15.6087]],
[[-17.1651, -15.8725, -12.9653], [-17.2580, -17.3718, -14.8223], [-16.6058, -16.8783, -16.7452]],
[[-3.6456, -3.0209, -1.4203], [-3.0797, -3.1959, -2.0000], [-1.8757, -1.9217, -1.6997]],
] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , _UpperCAmelCase , atol=1e-1 ) )
@slow
def lowercase__ ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=_UpperCAmelCase , align=_UpperCAmelCase , do_random_crop=_UpperCAmelCase )
UpperCAmelCase_ = SegformerForSemanticSegmentation.from_pretrained("nvidia/segformer-b0-finetuned-ade-512-512" ).to(
_UpperCAmelCase )
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = image_processor(images=_UpperCAmelCase , return_tensors="pt" )
UpperCAmelCase_ = encoded_inputs.pixel_values.to(_UpperCAmelCase )
with torch.no_grad():
UpperCAmelCase_ = model(_UpperCAmelCase )
UpperCAmelCase_ = outputs.logits.detach().cpu()
UpperCAmelCase_ = image_processor.post_process_semantic_segmentation(outputs=_UpperCAmelCase , target_sizes=[(500, 300)] )
UpperCAmelCase_ = torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape , _UpperCAmelCase )
UpperCAmelCase_ = image_processor.post_process_semantic_segmentation(outputs=_UpperCAmelCase )
UpperCAmelCase_ = torch.Size((128, 128) )
self.assertEqual(segmentation[0].shape , _UpperCAmelCase )
| 82
|
def UpperCamelCase ( __lowerCamelCase : int = 1 , __lowerCamelCase : int = 1000 ):
snake_case : int = 1
snake_case : int = 0
for divide_by_number in range(__lowerCamelCase , digit + 1 ):
snake_case : list[int] = []
snake_case : Optional[int] = numerator
for _ in range(1 , digit + 1 ):
if now_divide in has_been_divided:
if longest_list_length < len(__lowerCamelCase ):
snake_case : List[Any] = len(__lowerCamelCase )
snake_case : List[str] = divide_by_number
else:
has_been_divided.append(__lowerCamelCase )
snake_case : Any = now_divide * 10 % divide_by_number
return the_digit
# Tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 204
| 0
|
"""simple docstring"""
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : List[str] = analyze_text(_lowerCamelCase )
lowerCamelCase__ : Optional[Any] = list(' ' + ascii_lowercase )
# what is our total sum of probabilities.
lowerCamelCase__ : List[Any] = sum(single_char_strings.values() )
# one length string
lowerCamelCase__ : str = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
lowerCamelCase__ : Tuple = single_char_strings[ch]
lowerCamelCase__ : Union[str, Any] = my_str / all_sum
my_fir_sum += prob * math.loga(_lowerCamelCase ) # entropy formula.
# print entropy
print(f'''{round(-1 * my_fir_sum ):.1f}''' )
# two len string
lowerCamelCase__ : Dict = sum(two_char_strings.values() )
lowerCamelCase__ : str = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
lowerCamelCase__ : int = cha + cha
if sequence in two_char_strings:
lowerCamelCase__ : int = two_char_strings[sequence]
lowerCamelCase__ : Tuple = int(_lowerCamelCase ) / all_sum
my_sec_sum += prob * math.loga(_lowerCamelCase )
# print second entropy
print(f'''{round(-1 * my_sec_sum ):.1f}''' )
# print the difference between them
print(f'''{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}''' )
def lowerCamelCase_ ( _lowerCamelCase ):
lowerCamelCase__ : List[str] = Counter() # type: ignore
lowerCamelCase__ : List[Any] = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 , len(_lowerCamelCase ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def lowerCamelCase_ ( ):
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 706
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
A_ : Tuple = {
"configuration_llama": ["LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP", "LlamaConfig"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Union[str, Any] = ["LlamaTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : str = ["LlamaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Optional[Any] = [
"LlamaForCausalLM",
"LlamaModel",
"LlamaPreTrainedModel",
"LlamaForSequenceClassification",
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
A_ : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 696
| 0
|
'''simple docstring'''
import argparse
import re
from pathlib import Path
import requests
import torch
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import (
EfficientFormerConfig,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerImageProcessor,
)
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def _UpperCamelCase (_lowerCamelCase : int , _lowerCamelCase : str )-> List[str]:
'''simple docstring'''
__snake_case = old_name
if "patch_embed" in old_name:
__snake_case , __snake_case , __snake_case = old_name.split('''.''' )
if layer == "0":
__snake_case = old_name.replace('''0''' , '''convolution1''' )
elif layer == "1":
__snake_case = old_name.replace('''1''' , '''batchnorm_before''' )
elif layer == "3":
__snake_case = old_name.replace('''3''' , '''convolution2''' )
else:
__snake_case = old_name.replace('''4''' , '''batchnorm_after''' )
if "network" in old_name and re.search(R'''\d\.\d''' , _lowerCamelCase ):
__snake_case = R'''\b\d{2}\b'''
if bool(re.search(_lowerCamelCase , _lowerCamelCase ) ):
__snake_case = re.search(R'''\d\.\d\d.''' , _lowerCamelCase ).group()
else:
__snake_case = re.search(R'''\d\.\d.''' , _lowerCamelCase ).group()
if int(match[0] ) < 6:
__snake_case = old_name.replace(_lowerCamelCase , '''''' )
__snake_case = trimmed_name.replace('''network''' , match[0] + '''.meta4D_layers.blocks.''' + match[2:-1] )
__snake_case = '''intermediate_stages.''' + trimmed_name
else:
__snake_case = old_name.replace(_lowerCamelCase , '''''' )
if int(match[2] ) < num_meta4D_last_stage:
__snake_case = trimmed_name.replace('''network''' , '''meta4D_layers.blocks.''' + match[2] )
else:
__snake_case = str(int(match[2] ) - num_meta4D_last_stage )
__snake_case = trimmed_name.replace('''network''' , '''meta3D_layers.blocks.''' + layer_index )
if "norm1" in old_name:
__snake_case = trimmed_name.replace('''norm1''' , '''layernorm1''' )
elif "norm2" in old_name:
__snake_case = trimmed_name.replace('''norm2''' , '''layernorm2''' )
elif "fc1" in old_name:
__snake_case = trimmed_name.replace('''fc1''' , '''linear_in''' )
elif "fc2" in old_name:
__snake_case = trimmed_name.replace('''fc2''' , '''linear_out''' )
__snake_case = '''last_stage.''' + trimmed_name
elif "network" in old_name and re.search(R'''.\d.''' , _lowerCamelCase ):
__snake_case = old_name.replace('''network''' , '''intermediate_stages''' )
if "fc" in new_name:
__snake_case = new_name.replace('''fc''' , '''convolution''' )
elif ("norm1" in new_name) and ("layernorm1" not in new_name):
__snake_case = new_name.replace('''norm1''' , '''batchnorm_before''' )
elif ("norm2" in new_name) and ("layernorm2" not in new_name):
__snake_case = new_name.replace('''norm2''' , '''batchnorm_after''' )
if "proj" in new_name:
__snake_case = new_name.replace('''proj''' , '''projection''' )
if "dist_head" in new_name:
__snake_case = new_name.replace('''dist_head''' , '''distillation_classifier''' )
elif "head" in new_name:
__snake_case = new_name.replace('''head''' , '''classifier''' )
elif "patch_embed" in new_name:
__snake_case = '''efficientformer.''' + new_name
elif new_name == "norm.weight" or new_name == "norm.bias":
__snake_case = new_name.replace('''norm''' , '''layernorm''' )
__snake_case = '''efficientformer.''' + new_name
else:
__snake_case = '''efficientformer.encoder.''' + new_name
return new_name
def _UpperCamelCase (_lowerCamelCase : str , _lowerCamelCase : Tuple )-> List[str]:
'''simple docstring'''
for key in checkpoint.copy().keys():
__snake_case = checkpoint.pop(_lowerCamelCase )
__snake_case = val
return checkpoint
def _UpperCamelCase ()-> Tuple:
'''simple docstring'''
__snake_case = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__snake_case = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw )
return image
def _UpperCamelCase (_lowerCamelCase : Path , _lowerCamelCase : Path , _lowerCamelCase : Path , _lowerCamelCase : bool )-> Optional[Any]:
'''simple docstring'''
__snake_case = torch.load(_lowerCamelCase , map_location='''cpu''' )['''model''']
__snake_case = EfficientFormerConfig.from_json_file(_lowerCamelCase )
__snake_case = EfficientFormerForImageClassificationWithTeacher(_lowerCamelCase )
__snake_case = '''_'''.join(checkpoint_path.split('''/''' )[-1].split('''.''' )[0].split('''_''' )[:-1] )
__snake_case = config.depths[-1] - config.num_metaad_blocks + 1
__snake_case = convert_torch_checkpoint(_lowerCamelCase , _lowerCamelCase )
model.load_state_dict(_lowerCamelCase )
model.eval()
__snake_case = {
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
# prepare image
__snake_case = prepare_img()
__snake_case = 2_56
__snake_case = 2_24
__snake_case = EfficientFormerImageProcessor(
size={'''shortest_edge''': image_size} , crop_size={'''height''': crop_size, '''width''': crop_size} , resample=pillow_resamplings['''bicubic'''] , )
__snake_case = processor(images=_lowerCamelCase , return_tensors='''pt''' ).pixel_values
# original processing pipeline
__snake_case = Compose(
[
Resize(_lowerCamelCase , interpolation=pillow_resamplings['''bicubic'''] ),
CenterCrop(_lowerCamelCase ),
ToTensor(),
Normalize(_lowerCamelCase , _lowerCamelCase ),
] )
__snake_case = image_transforms(_lowerCamelCase ).unsqueeze(0 )
assert torch.allclose(_lowerCamelCase , _lowerCamelCase )
__snake_case = model(_lowerCamelCase )
__snake_case = outputs.logits
__snake_case = (1, 10_00)
if "l1" in model_name:
__snake_case = torch.Tensor(
[-0.1312, 0.4353, -1.0499, -0.5124, 0.4183, -0.6793, -1.3777, -0.0893, -0.7358, -2.4328] )
assert torch.allclose(logits[0, :10] , _lowerCamelCase , atol=1E-3 )
assert logits.shape == expected_shape
elif "l3" in model_name:
__snake_case = torch.Tensor(
[-1.3150, -1.5456, -1.2556, -0.8496, -0.7127, -0.7897, -0.9728, -0.3052, 0.3751, -0.3127] )
assert torch.allclose(logits[0, :10] , _lowerCamelCase , atol=1E-3 )
assert logits.shape == expected_shape
elif "l7" in model_name:
__snake_case = torch.Tensor(
[-1.0283, -1.4131, -0.5644, -1.3115, -0.5785, -1.2049, -0.7528, 0.1992, -0.3822, -0.0878] )
assert logits.shape == expected_shape
else:
raise ValueError(
f'''Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7''' )
# Save Checkpoints
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
model.save_pretrained(_lowerCamelCase )
print(f'''Checkpoint successfuly converted. Model saved at {pytorch_dump_path}''' )
processor.save_pretrained(_lowerCamelCase )
print(f'''Processor successfuly saved at {pytorch_dump_path}''' )
if push_to_hub:
print('''Pushing model to the hub...''' )
model.push_to_hub(
repo_id=f'''Bearnardd/{pytorch_dump_path}''' , commit_message='''Add model''' , use_temp_dir=_lowerCamelCase , )
processor.push_to_hub(
repo_id=f'''Bearnardd/{pytorch_dump_path}''' , commit_message='''Add image processor''' , use_temp_dir=_lowerCamelCase , )
if __name__ == "__main__":
UpperCAmelCase_ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--pytorch_model_path''',
default=None,
type=str,
required=True,
help='''Path to EfficientFormer pytorch checkpoint.''',
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The json file for EfficientFormer model config.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''')
parser.add_argument(
'''--no-push_to_hub''',
dest='''push_to_hub''',
action='''store_false''',
help='''Do not push model and image processor to the hub''',
)
parser.set_defaults(push_to_hub=True)
UpperCAmelCase_ : Union[str, Any] = parser.parse_args()
convert_efficientformer_checkpoint(
checkpoint_path=args.pytorch_model_path,
efficientformer_config_file=args.config_file,
pytorch_dump_path=args.pytorch_dump_path,
push_to_hub=args.push_to_hub,
)
| 24
|
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def __a ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
a__ = ('''dense.weight''', '''attention.self.query''', '''attention.self.key''', '''attention.self.value''')
a__ = (
('''layer.''', '''layer_'''),
('''word_embeddings.weight''', '''word_embeddings'''),
('''position_embeddings.weight''', '''position_embeddings'''),
('''token_type_embeddings.weight''', '''token_type_embeddings'''),
('''.''', '''/'''),
('''LayerNorm/weight''', '''LayerNorm/gamma'''),
('''LayerNorm/bias''', '''LayerNorm/beta'''),
('''weight''', '''kernel'''),
)
if not os.path.isdir(__UpperCAmelCase ):
os.makedirs(__UpperCAmelCase )
a__ = model.state_dict()
def to_tf_var_name(__UpperCAmelCase ):
for patt, repl in iter(__UpperCAmelCase ):
a__ = name.replace(__UpperCAmelCase , __UpperCAmelCase )
return f"bert/{name}"
def create_tf_var(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
a__ = tf.dtypes.as_dtype(tensor.dtype )
a__ = tf.get_variable(dtype=__UpperCAmelCase , shape=tensor.shape , name=__UpperCAmelCase , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(__UpperCAmelCase )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
a__ = to_tf_var_name(__UpperCAmelCase )
a__ = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
a__ = torch_tensor.T
a__ = create_tf_var(tensor=__UpperCAmelCase , name=__UpperCAmelCase , session=__UpperCAmelCase )
tf.keras.backend.set_value(__UpperCAmelCase , __UpperCAmelCase )
a__ = session.run(__UpperCAmelCase )
print(f"Successfully created {tf_name}: {np.allclose(__UpperCAmelCase , __UpperCAmelCase )}" )
a__ = tf.train.Saver(tf.trainable_variables() )
saver.save(__UpperCAmelCase , os.path.join(__UpperCAmelCase , model_name.replace('''-''' , '''_''' ) + '''.ckpt''' ) )
def __a ( __UpperCAmelCase=None ):
a__ = argparse.ArgumentParser()
parser.add_argument('''--model_name''' , type=__UpperCAmelCase , required=__UpperCAmelCase , help='''model name e.g. bert-base-uncased''' )
parser.add_argument(
'''--cache_dir''' , type=__UpperCAmelCase , default=__UpperCAmelCase , required=__UpperCAmelCase , help='''Directory containing pytorch model''' )
parser.add_argument('''--pytorch_model_path''' , type=__UpperCAmelCase , required=__UpperCAmelCase , help='''/path/to/<pytorch-model-name>.bin''' )
parser.add_argument('''--tf_cache_dir''' , type=__UpperCAmelCase , required=__UpperCAmelCase , help='''Directory in which to save tensorflow model''' )
a__ = parser.parse_args(__UpperCAmelCase )
a__ = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=__UpperCAmelCase , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 194
| 0
|
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
lowerCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
lowerCAmelCase_ : Dict = {
"deepmind/language-perceiver": "https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json",
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class UpperCamelCase__ ( __lowerCAmelCase ):
lowerCAmelCase__ : Dict = "perceiver"
def __init__( self : List[str] , lowerCamelCase : List[str]=2_5_6 , lowerCamelCase : List[Any]=1_2_8_0 , lowerCamelCase : int=7_6_8 , lowerCamelCase : str=1 , lowerCamelCase : List[Any]=2_6 , lowerCamelCase : int=8 , lowerCamelCase : List[Any]=8 , lowerCamelCase : Union[str, Any]=None , lowerCamelCase : str=None , lowerCamelCase : Union[str, Any]="kv" , lowerCamelCase : Dict=1 , lowerCamelCase : Any=1 , lowerCamelCase : List[Any]="gelu" , lowerCamelCase : List[str]=0.1 , lowerCamelCase : List[Any]=0.02 , lowerCamelCase : List[Any]=1e-12 , lowerCamelCase : Tuple=True , lowerCamelCase : int=2_6_2 , lowerCamelCase : int=2_0_4_8 , lowerCamelCase : Optional[Any]=5_6 , lowerCamelCase : Tuple=[3_6_8, 4_9_6] , lowerCamelCase : Optional[Any]=1_6 , lowerCamelCase : Optional[int]=1_9_2_0 , lowerCamelCase : Optional[int]=1_6 , lowerCamelCase : List[str]=[1, 1_6, 2_2_4, 2_2_4] , **lowerCamelCase : Tuple , ):
'''simple docstring'''
super().__init__(**lowerCamelCase )
a__ = num_latents
a__ = d_latents
a__ = d_model
a__ = num_blocks
a__ = num_self_attends_per_block
a__ = num_self_attention_heads
a__ = num_cross_attention_heads
a__ = qk_channels
a__ = v_channels
a__ = cross_attention_shape_for_attention
a__ = self_attention_widening_factor
a__ = cross_attention_widening_factor
a__ = hidden_act
a__ = attention_probs_dropout_prob
a__ = initializer_range
a__ = layer_norm_eps
a__ = use_query_residual
# masked language modeling attributes
a__ = vocab_size
a__ = max_position_embeddings
# image classification attributes
a__ = image_size
# flow attributes
a__ = train_size
# multimodal autoencoding attributes
a__ = num_frames
a__ = audio_samples_per_frame
a__ = samples_per_patch
a__ = output_shape
class UpperCamelCase__ ( __lowerCAmelCase ):
@property
def __a ( self : Tuple ):
'''simple docstring'''
if self.task == "multiple-choice":
a__ = {0: "batch", 1: "choice", 2: "sequence"}
else:
a__ = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("inputs", dynamic_axis),
("attention_mask", dynamic_axis),
] )
@property
def __a ( self : Optional[Any] ):
'''simple docstring'''
return 1e-4
def __a ( self : List[str] , lowerCamelCase : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , lowerCamelCase : int = -1 , lowerCamelCase : int = -1 , lowerCamelCase : int = -1 , lowerCamelCase : bool = False , lowerCamelCase : Optional[TensorType] = None , lowerCamelCase : int = 3 , lowerCamelCase : int = 4_0 , lowerCamelCase : int = 4_0 , ):
'''simple docstring'''
# copied from `transformers.onnx.config.OnnxConfig` and slightly altered/simplified
if isinstance(lowerCamelCase , lowerCamelCase ):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
a__ = compute_effective_axis_dimension(
lowerCamelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
a__ = preprocessor.num_special_tokens_to_add(lowerCamelCase )
a__ = compute_effective_axis_dimension(
lowerCamelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowerCamelCase )
# Generate dummy inputs according to compute batch and sequence
a__ = [" ".join(["a"] ) * seq_length] * batch_size
a__ = dict(preprocessor(lowerCamelCase , return_tensors=lowerCamelCase ) )
a__ = inputs.pop("input_ids" )
return inputs
elif isinstance(lowerCamelCase , lowerCamelCase ) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
a__ = compute_effective_axis_dimension(lowerCamelCase , fixed_dimension=OnnxConfig.default_fixed_batch )
a__ = self._generate_dummy_images(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
a__ = dict(preprocessor(images=lowerCamelCase , return_tensors=lowerCamelCase ) )
a__ = inputs.pop("pixel_values" )
return inputs
else:
raise ValueError(
"Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor." )
| 289
|
'''simple docstring'''
import math
def _lowerCamelCase (__lowerCamelCase : list , __lowerCamelCase : int = 0 , __lowerCamelCase : int = 0 ) -> list:
a__ = end or len(__lowerCamelCase )
for i in range(__lowerCamelCase , __lowerCamelCase ):
a__ = i
a__ = array[i]
while temp_index != start and temp_index_value < array[temp_index - 1]:
a__ = array[temp_index - 1]
temp_index -= 1
a__ = temp_index_value
return array
def _lowerCamelCase (__lowerCamelCase : list , __lowerCamelCase : int , __lowerCamelCase : int ) -> None: # Max Heap
a__ = index
a__ = 2 * index + 1 # Left Node
a__ = 2 * index + 2 # Right Node
if left_index < heap_size and array[largest] < array[left_index]:
a__ = left_index
if right_index < heap_size and array[largest] < array[right_index]:
a__ = right_index
if largest != index:
a__ , a__ = array[largest], array[index]
heapify(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def _lowerCamelCase (__lowerCamelCase : list ) -> list:
a__ = len(__lowerCamelCase )
for i in range(n // 2 , -1 , -1 ):
heapify(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
for i in range(n - 1 , 0 , -1 ):
a__ , a__ = array[0], array[i]
heapify(__lowerCamelCase , 0 , __lowerCamelCase )
return array
def _lowerCamelCase (__lowerCamelCase : list , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int ) -> int:
if (array[first_index] > array[middle_index]) != (
array[first_index] > array[last_index]
):
return array[first_index]
elif (array[middle_index] > array[first_index]) != (
array[middle_index] > array[last_index]
):
return array[middle_index]
else:
return array[last_index]
def _lowerCamelCase (__lowerCamelCase : list , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int ) -> int:
a__ = low
a__ = high
while True:
while array[i] < pivot:
i += 1
j -= 1
while pivot < array[j]:
j -= 1
if i >= j:
return i
a__ , a__ = array[j], array[i]
i += 1
def _lowerCamelCase (__lowerCamelCase : list ) -> list:
if len(__lowerCamelCase ) == 0:
return array
a__ = 2 * math.ceil(math.loga(len(__lowerCamelCase ) ) )
a__ = 16
return intro_sort(__lowerCamelCase , 0 , len(__lowerCamelCase ) , __lowerCamelCase , __lowerCamelCase )
def _lowerCamelCase (__lowerCamelCase : list , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int ) -> list:
while end - start > size_threshold:
if max_depth == 0:
return heap_sort(__lowerCamelCase )
max_depth -= 1
a__ = median_of_a(__lowerCamelCase , __lowerCamelCase , start + ((end - start) // 2) + 1 , end - 1 )
a__ = partition(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
intro_sort(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
a__ = p
return insertion_sort(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase_ : Optional[int] = input("Enter numbers separated by a comma : ").strip()
lowerCAmelCase_ : List[str] = [float(item) for item in user_input.split(",")]
print(sort(unsorted))
| 289
| 1
|
import baseaa
import io
import json
import os
from copy import deepcopy
from ..optimizer import AcceleratedOptimizer
from ..scheduler import AcceleratedScheduler
class __UpperCAmelCase :
"""simple docstring"""
def __init__( self , __A ):
if isinstance(__A , __A ):
# Don't modify user's data should they want to reuse it (e.g. in tests), because once we
# modified it, it will not be accepted here again, since `auto` values would have been overridden
__a = deepcopy(__A )
elif os.path.exists(__A ):
with io.open(__A , """r""" , encoding="""utf-8""" ) as f:
__a = json.load(__A )
else:
try:
__a = baseaa.urlsafe_baadecode(__A ).decode("""utf-8""" )
__a = json.loads(__A )
except (UnicodeDecodeError, AttributeError, ValueError):
raise ValueError(
f'''Expected a string path to an existing deepspeed config, or a dictionary, or a base64 encoded string. Received: {config_file_or_dict}''' )
__a = config
self.set_stage_and_offload()
def snake_case_ ( self ):
# zero stage - this is done as early as possible, before model is created, to allow
# ``is_deepspeed_zero3_enabled`` query and getting to the early deepspeed config object
# during ``zero.Init()`` which needs to know the dtype, and some other hparams.
__a = self.get_value("""zero_optimization.stage""" , -1 )
# offload
__a = False
if self.is_zeroa() or self.is_zeroa():
__a = set(["""cpu""", """nvme"""] )
__a = set(
[
self.get_value("""zero_optimization.offload_optimizer.device""" ),
self.get_value("""zero_optimization.offload_param.device""" ),
] )
if len(offload_devices & offload_devices_valid ) > 0:
__a = True
def snake_case_ ( self , __A ):
__a = self.config
# find the config node of interest if it exists
__a = ds_key_long.split(""".""" )
__a = nodes.pop()
for node in nodes:
__a = config.get(__A )
if config is None:
return None, ds_key
return config, ds_key
def snake_case_ ( self , __A , __A=None ):
__a , __a = self.find_config_node(__A )
if config is None:
return default
return config.get(__A , __A )
def snake_case_ ( self , __A , __A=False ):
__a = self.config
# find the config node of interest if it exists
__a = ds_key_long.split(""".""" )
for node in nodes:
__a = config
__a = config.get(__A )
if config is None:
if must_exist:
raise ValueError(f'''Can\'t find {ds_key_long} entry in the config: {self.config}''' )
else:
return
# if found remove it
if parent_config is not None:
parent_config.pop(__A )
def snake_case_ ( self , __A ):
__a = self.get_value(__A )
return False if value is None else bool(__A )
def snake_case_ ( self , __A ):
__a = self.get_value(__A )
return False if value is None else not bool(__A )
def snake_case_ ( self ):
return self._stage == 2
def snake_case_ ( self ):
return self._stage == 3
def snake_case_ ( self ):
return self._offload
class __UpperCAmelCase :
"""simple docstring"""
def __init__( self , __A ):
__a = engine
def snake_case_ ( self , __A , **__A ):
# runs backpropagation and handles mixed precision
self.engine.backward(__A , **__A )
# Deepspeed's `engine.step` performs the following operations:
# - gradient accumulation check
# - gradient clipping
# - optimizer step
# - zero grad
# - checking overflow
# - lr_scheduler step (only if engine.lr_scheduler is not None)
self.engine.step()
# and this plugin overrides the above calls with no-ops when Accelerate runs under
# Deepspeed, but allows normal functionality for non-Deepspeed cases thus enabling a simple
# training loop that works transparently under many training regimes.
class __UpperCAmelCase ( __A ):
"""simple docstring"""
def __init__( self , __A ):
super().__init__(__A , device_placement=__A , scaler=__A )
__a = hasattr(self.optimizer , """overflow""" )
def snake_case_ ( self , __A=None ):
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
def snake_case_ ( self ):
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
@property
def snake_case_ ( self ):
if self.__has_overflow__:
return self.optimizer.overflow
return False
class __UpperCAmelCase ( __A ):
"""simple docstring"""
def __init__( self , __A , __A ):
super().__init__(__A , __A )
def snake_case_ ( self ):
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
class __UpperCAmelCase :
"""simple docstring"""
def __init__( self , __A , __A=0.001 , __A=0 , **__A ):
__a = params
__a = lr
__a = weight_decay
__a = kwargs
class __UpperCAmelCase :
"""simple docstring"""
def __init__( self , __A , __A=None , __A=0 , **__A ):
__a = optimizer
__a = total_num_steps
__a = warmup_num_steps
__a = kwargs
| 99
|
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
UpperCamelCase__ : List[Any] = pytest.mark.integration
@pytest.mark.parametrize('path' , ['paws', 'csv'] )
def __UpperCAmelCase ( lowerCamelCase_ : Any , lowerCamelCase_ : int ) -> Any:
"""simple docstring"""
inspect_dataset(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : List[str] = path + '.py'
assert script_name in os.listdir(lowerCamelCase_ )
assert "__pycache__" not in os.listdir(lowerCamelCase_ )
@pytest.mark.filterwarnings('ignore:inspect_metric is deprecated:FutureWarning' )
@pytest.mark.filterwarnings('ignore:metric_module_factory is deprecated:FutureWarning' )
@pytest.mark.parametrize('path' , ['accuracy'] )
def __UpperCAmelCase ( lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Dict ) -> Optional[Any]:
"""simple docstring"""
inspect_metric(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Tuple = path + '.py'
assert script_name in os.listdir(lowerCamelCase_ )
assert "__pycache__" not in os.listdir(lowerCamelCase_ )
@pytest.mark.parametrize(
'path, config_name, expected_splits' , [
('squad', 'plain_text', ['train', 'validation']),
('dalle-mini/wit', 'dalle-mini--wit', ['train']),
('paws', 'labeled_final', ['train', 'test', 'validation']),
] , )
def __UpperCAmelCase ( lowerCamelCase_ : List[str] , lowerCamelCase_ : Dict , lowerCamelCase_ : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = get_dataset_config_info(lowerCamelCase_ , config_name=lowerCamelCase_ )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'path, config_name, expected_exception' , [
('paws', None, ValueError),
] , )
def __UpperCAmelCase ( lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : int ) -> List[Any]:
"""simple docstring"""
with pytest.raises(lowerCamelCase_ ):
get_dataset_config_info(lowerCamelCase_ , config_name=lowerCamelCase_ )
@pytest.mark.parametrize(
'path, expected' , [
('squad', 'plain_text'),
('acronym_identification', 'default'),
('lhoestq/squad', 'plain_text'),
('lhoestq/test', 'default'),
('lhoestq/demo1', 'lhoestq--demo1'),
('dalle-mini/wit', 'dalle-mini--wit'),
] , )
def __UpperCAmelCase ( lowerCamelCase_ : Tuple , lowerCamelCase_ : List[Any] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = get_dataset_config_names(lowerCamelCase_ )
assert expected in config_names
@pytest.mark.parametrize(
'path, expected_configs, expected_splits_in_first_config' , [
('squad', ['plain_text'], ['train', 'validation']),
('dalle-mini/wit', ['dalle-mini--wit'], ['train']),
('paws', ['labeled_final', 'labeled_swap', 'unlabeled_final'], ['train', 'test', 'validation']),
] , )
def __UpperCAmelCase ( lowerCamelCase_ : List[Any] , lowerCamelCase_ : int , lowerCamelCase_ : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = get_dataset_infos(lowerCamelCase_ )
assert list(infos.keys() ) == expected_configs
SCREAMING_SNAKE_CASE_ : Optional[Any] = expected_configs[0]
assert expected_config in infos
SCREAMING_SNAKE_CASE_ : Any = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
'path, expected_config, expected_splits' , [
('squad', 'plain_text', ['train', 'validation']),
('dalle-mini/wit', 'dalle-mini--wit', ['train']),
('paws', 'labeled_final', ['train', 'test', 'validation']),
] , )
def __UpperCAmelCase ( lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : List[str] , lowerCamelCase_ : str ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = get_dataset_infos(lowerCamelCase_ )
assert expected_config in infos
SCREAMING_SNAKE_CASE_ : Tuple = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'path, config_name, expected_exception' , [
('paws', None, ValueError),
] , )
def __UpperCAmelCase ( lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int ) -> int:
"""simple docstring"""
with pytest.raises(lowerCamelCase_ ):
get_dataset_split_names(lowerCamelCase_ , config_name=lowerCamelCase_ )
| 105
| 0
|
'''simple docstring'''
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class lowerCamelCase_ ( __a , __a ):
@register_to_config
def __init__( self : Tuple , _A : int = 128 , _A : int = 256 , _A : float = 2_000.0 , _A : int = 768 , _A : int = 12 , _A : int = 12 , _A : int = 64 , _A : int = 2_048 , _A : float = 0.1 , ):
'''simple docstring'''
super().__init__()
UpperCAmelCase__ : List[Any] = nn.Sequential(
nn.Linear(_A , d_model * 4 , bias=_A ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=_A ) , nn.SiLU() , )
UpperCAmelCase__ : str = nn.Embedding(_A , _A )
UpperCAmelCase__ : Optional[Any] = False
UpperCAmelCase__ : Union[str, Any] = nn.Linear(_A , _A , bias=_A )
UpperCAmelCase__ : List[str] = nn.Dropout(p=_A )
UpperCAmelCase__ : str = nn.ModuleList()
for lyr_num in range(_A ):
# FiLM conditional T5 decoder
UpperCAmelCase__ : List[str] = DecoderLayer(d_model=_A , d_kv=_A , num_heads=_A , d_ff=_A , dropout_rate=_A )
self.decoders.append(_A )
UpperCAmelCase__ : List[Any] = TaLayerNorm(_A )
UpperCAmelCase__ : Optional[int] = nn.Dropout(p=_A )
UpperCAmelCase__ : Optional[Any] = nn.Linear(_A , _A , bias=_A )
def lowercase_ ( self : List[Any] , _A : List[str] , _A : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def lowercase_ ( self : Optional[int] , _A : Any , _A : List[Any] , _A : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : int = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
UpperCAmelCase__ : List[Any] = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype )
UpperCAmelCase__ : Tuple = self.conditioning_emb(_A ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
UpperCAmelCase__ : Union[str, Any] = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
UpperCAmelCase__ : str = torch.broadcast_to(
torch.arange(_A , device=decoder_input_tokens.device ) , (batch, seq_length) , )
UpperCAmelCase__ : Any = self.position_encoding(_A )
UpperCAmelCase__ : Optional[Any] = self.continuous_inputs_projection(_A )
inputs += position_encodings
UpperCAmelCase__ : Any = self.dropout(_A )
# decoder: No padding present.
UpperCAmelCase__ : List[Any] = torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
UpperCAmelCase__ : Optional[Any] = [(x, self.encoder_decoder_mask(_A , _A )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
UpperCAmelCase__ : Optional[Any] = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 )
UpperCAmelCase__ : Any = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 )
for lyr in self.decoders:
UpperCAmelCase__ : str = lyr(
_A , conditioning_emb=_A , encoder_hidden_states=_A , encoder_attention_mask=_A , )[0]
UpperCAmelCase__ : Any = self.decoder_norm(_A )
UpperCAmelCase__ : List[Any] = self.post_dropout(_A )
UpperCAmelCase__ : List[str] = self.spec_out(_A )
return spec_out
class lowerCamelCase_ ( nn.Module ):
def __init__( self : Dict , _A : int , _A : int , _A : Any , _A : int , _A : Tuple , _A : Optional[Any]=1e-6 ):
'''simple docstring'''
super().__init__()
UpperCAmelCase__ : Any = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=_A , d_kv=_A , num_heads=_A , dropout_rate=_A ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=_A , d_kv=_A , num_heads=_A , dropout_rate=_A , layer_norm_epsilon=_A , ) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=_A , d_ff=_A , dropout_rate=_A , layer_norm_epsilon=_A ) )
def lowercase_ ( self : Union[str, Any] , _A : Any , _A : Union[str, Any]=None , _A : Tuple=None , _A : List[str]=None , _A : Tuple=None , _A : str=None , ):
'''simple docstring'''
UpperCAmelCase__ : str = self.layer[0](
_A , conditioning_emb=_A , attention_mask=_A , )
if encoder_hidden_states is not None:
UpperCAmelCase__ : List[Any] = torch.where(encoder_attention_mask > 0 , 0 , -1e10 ).to(
encoder_hidden_states.dtype )
UpperCAmelCase__ : str = self.layer[1](
_A , key_value_states=_A , attention_mask=_A , )
# Apply Film Conditional Feed Forward layer
UpperCAmelCase__ : Any = self.layer[-1](_A , _A )
return (hidden_states,)
class lowerCamelCase_ ( nn.Module ):
def __init__( self : Tuple , _A : List[Any] , _A : Optional[int] , _A : Optional[int] , _A : int ):
'''simple docstring'''
super().__init__()
UpperCAmelCase__ : List[str] = TaLayerNorm(_A )
UpperCAmelCase__ : Optional[int] = TaFiLMLayer(in_features=d_model * 4 , out_features=_A )
UpperCAmelCase__ : List[str] = Attention(query_dim=_A , heads=_A , dim_head=_A , out_bias=_A , scale_qk=_A )
UpperCAmelCase__ : Optional[Any] = nn.Dropout(_A )
def lowercase_ ( self : Optional[int] , _A : List[str] , _A : Tuple=None , _A : int=None , ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = self.layer_norm(_A )
if conditioning_emb is not None:
UpperCAmelCase__ : Tuple = self.FiLMLayer(_A , _A )
# Self-attention block
UpperCAmelCase__ : Optional[int] = self.attention(_A )
UpperCAmelCase__ : Dict = hidden_states + self.dropout(_A )
return hidden_states
class lowerCamelCase_ ( nn.Module ):
def __init__( self : str , _A : str , _A : str , _A : Any , _A : Any , _A : Union[str, Any] ):
'''simple docstring'''
super().__init__()
UpperCAmelCase__ : List[str] = Attention(query_dim=_A , heads=_A , dim_head=_A , out_bias=_A , scale_qk=_A )
UpperCAmelCase__ : List[str] = TaLayerNorm(_A , eps=_A )
UpperCAmelCase__ : Union[str, Any] = nn.Dropout(_A )
def lowercase_ ( self : int , _A : List[Any] , _A : Any=None , _A : Any=None , ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = self.layer_norm(_A )
UpperCAmelCase__ : Tuple = self.attention(
_A , encoder_hidden_states=_A , attention_mask=attention_mask.squeeze(1 ) , )
UpperCAmelCase__ : Tuple = hidden_states + self.dropout(_A )
return layer_output
class lowerCamelCase_ ( nn.Module ):
def __init__( self : Tuple , _A : List[str] , _A : Union[str, Any] , _A : List[Any] , _A : Union[str, Any] ):
'''simple docstring'''
super().__init__()
UpperCAmelCase__ : Tuple = TaDenseGatedActDense(d_model=_A , d_ff=_A , dropout_rate=_A )
UpperCAmelCase__ : Optional[int] = TaFiLMLayer(in_features=d_model * 4 , out_features=_A )
UpperCAmelCase__ : List[Any] = TaLayerNorm(_A , eps=_A )
UpperCAmelCase__ : Union[str, Any] = nn.Dropout(_A )
def lowercase_ ( self : Optional[int] , _A : Any , _A : Union[str, Any]=None ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = self.layer_norm(_A )
if conditioning_emb is not None:
UpperCAmelCase__ : Optional[int] = self.film(_A , _A )
UpperCAmelCase__ : Tuple = self.DenseReluDense(_A )
UpperCAmelCase__ : List[str] = hidden_states + self.dropout(_A )
return hidden_states
class lowerCamelCase_ ( nn.Module ):
def __init__( self : int , _A : List[str] , _A : List[str] , _A : int ):
'''simple docstring'''
super().__init__()
UpperCAmelCase__ : Optional[int] = nn.Linear(_A , _A , bias=_A )
UpperCAmelCase__ : Optional[Any] = nn.Linear(_A , _A , bias=_A )
UpperCAmelCase__ : str = nn.Linear(_A , _A , bias=_A )
UpperCAmelCase__ : Optional[int] = nn.Dropout(_A )
UpperCAmelCase__ : Optional[int] = NewGELUActivation()
def lowercase_ ( self : int , _A : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Any = self.act(self.wi_a(_A ) )
UpperCAmelCase__ : Dict = self.wi_a(_A )
UpperCAmelCase__ : Tuple = hidden_gelu * hidden_linear
UpperCAmelCase__ : List[Any] = self.dropout(_A )
UpperCAmelCase__ : Tuple = self.wo(_A )
return hidden_states
class lowerCamelCase_ ( nn.Module ):
def __init__( self : Optional[int] , _A : Union[str, Any] , _A : Optional[Any]=1e-6 ):
'''simple docstring'''
super().__init__()
UpperCAmelCase__ : int = nn.Parameter(torch.ones(_A ) )
UpperCAmelCase__ : List[str] = eps
def lowercase_ ( self : List[str] , _A : int ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=_A )
UpperCAmelCase__ : Any = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
UpperCAmelCase__ : Optional[int] = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class lowerCamelCase_ ( nn.Module ):
def lowercase_ ( self : Optional[int] , _A : torch.Tensor ):
'''simple docstring'''
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.0_4_4_7_1_5 * torch.pow(_A , 3.0 )) ))
class lowerCamelCase_ ( nn.Module ):
def __init__( self : Tuple , _A : List[Any] , _A : List[Any] ):
'''simple docstring'''
super().__init__()
UpperCAmelCase__ : str = nn.Linear(_A , out_features * 2 , bias=_A )
def lowercase_ ( self : List[str] , _A : str , _A : Any ):
'''simple docstring'''
UpperCAmelCase__ : str = self.scale_bias(_A )
UpperCAmelCase__ : Union[str, Any] = torch.chunk(_A , 2 , -1 )
UpperCAmelCase__ : Any = x * (1 + scale) + shift
return x
| 721
|
'''simple docstring'''
import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
UpperCamelCase__ = logging.get_logger(__name__)
class lowerCamelCase_ ( enum.Enum ):
lowerCAmelCase__ = 0
lowerCAmelCase__ = 1
@add_end_docstrings(__a )
class lowerCamelCase_ ( __a ):
lowerCAmelCase__ = 'generated'
def __init__( self : Optional[Any] , *_A : int , **_A : Union[str, Any] ):
'''simple docstring'''
super().__init__(*_A , **_A )
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING )
def lowercase_ ( self : List[Any] , _A : Any=None , _A : Union[str, Any]=None , _A : str=None , _A : List[Any]=None , _A : List[str]=None , _A : Tuple=None , **_A : List[Any] , ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = {}
if truncation is not None:
UpperCAmelCase__ : Union[str, Any] = truncation
UpperCAmelCase__ : Optional[int] = generate_kwargs
UpperCAmelCase__ : Union[str, Any] = {}
if return_tensors is not None and return_type is None:
UpperCAmelCase__ : Tuple = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
UpperCAmelCase__ : Dict = return_type
if clean_up_tokenization_spaces is not None:
UpperCAmelCase__ : Tuple = clean_up_tokenization_spaces
if stop_sequence is not None:
UpperCAmelCase__ : Tuple = self.tokenizer.encode(_A , add_special_tokens=_A )
if len(_A ) > 1:
warnings.warn(
'''Stopping on a multiple token sequence is not yet supported on transformers. The first token of'''
''' the stop sequence will be used as the stop sequence string in the interim.''' )
UpperCAmelCase__ : List[str] = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def lowercase_ ( self : Union[str, Any] , _A : int , _A : int , _A : int ):
'''simple docstring'''
return True
def lowercase_ ( self : List[str] , *_A : Tuple , _A : Any ):
'''simple docstring'''
UpperCAmelCase__ : str = self.model.config.prefix if self.model.config.prefix is not None else ''''''
if isinstance(args[0] , _A ):
if self.tokenizer.pad_token_id is None:
raise ValueError('''Please make sure that the tokenizer has a pad_token_id when using a batch input''' )
UpperCAmelCase__ : int = ([prefix + arg for arg in args[0]],)
UpperCAmelCase__ : int = True
elif isinstance(args[0] , _A ):
UpperCAmelCase__ : str = (prefix + args[0],)
UpperCAmelCase__ : str = False
else:
raise ValueError(
f""" `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`""" )
UpperCAmelCase__ : Dict = self.tokenizer(*_A , padding=_A , truncation=_A , return_tensors=self.framework )
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__( self : Union[str, Any] , *_A : Dict , **_A : str ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = super().__call__(*_A , **_A )
if (
isinstance(args[0] , _A )
and all(isinstance(_A , _A ) for el in args[0] )
and all(len(_A ) == 1 for res in result )
):
return [res[0] for res in result]
return result
def lowercase_ ( self : Tuple , _A : List[Any] , _A : Tuple=TruncationStrategy.DO_NOT_TRUNCATE , **_A : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = self._parse_and_tokenize(_A , truncation=_A , **_A )
return inputs
def lowercase_ ( self : int , _A : Any , **_A : Tuple ):
'''simple docstring'''
if self.framework == "pt":
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = model_inputs['''input_ids'''].shape
elif self.framework == "tf":
UpperCAmelCase__ , UpperCAmelCase__ : Dict = tf.shape(model_inputs['''input_ids'''] ).numpy()
UpperCAmelCase__ : Any = generate_kwargs.get('''min_length''' , self.model.config.min_length )
UpperCAmelCase__ : Any = generate_kwargs.get('''max_length''' , self.model.config.max_length )
self.check_inputs(_A , generate_kwargs['''min_length'''] , generate_kwargs['''max_length'''] )
UpperCAmelCase__ : List[str] = self.model.generate(**_A , **_A )
UpperCAmelCase__ : Optional[int] = output_ids.shape[0]
if self.framework == "pt":
UpperCAmelCase__ : Dict = output_ids.reshape(_A , out_b // in_b , *output_ids.shape[1:] )
elif self.framework == "tf":
UpperCAmelCase__ : Any = tf.reshape(_A , (in_b, out_b // in_b, *output_ids.shape[1:]) )
return {"output_ids": output_ids}
def lowercase_ ( self : Optional[int] , _A : List[str] , _A : Optional[int]=ReturnType.TEXT , _A : Any=False ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = []
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
UpperCAmelCase__ : Any = {f"""{self.return_name}_token_ids""": output_ids}
elif return_type == ReturnType.TEXT:
UpperCAmelCase__ : str = {
f"""{self.return_name}_text""": self.tokenizer.decode(
_A , skip_special_tokens=_A , clean_up_tokenization_spaces=_A , )
}
records.append(_A )
return records
@add_end_docstrings(__a )
class lowerCamelCase_ ( __a ):
lowerCAmelCase__ = 'summary'
def __call__( self : Any , *_A : int , **_A : str ):
'''simple docstring'''
return super().__call__(*_A , **_A )
def lowercase_ ( self : List[str] , _A : int , _A : int , _A : int ):
'''simple docstring'''
if max_length < min_length:
logger.warning(f"""Your min_length={min_length} must be inferior than your max_length={max_length}.""" )
if input_length < max_length:
logger.warning(
f"""Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is """
'''a summarization task, where outputs shorter than the input are typically wanted, you might '''
f"""consider decreasing max_length manually, e.g. summarizer('...', max_length={input_length//2})""" )
@add_end_docstrings(__a )
class lowerCamelCase_ ( __a ):
lowerCAmelCase__ = 'translation'
def lowercase_ ( self : Tuple , _A : int , _A : int , _A : int ):
'''simple docstring'''
if input_length > 0.9 * max_length:
logger.warning(
f"""Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider """
'''increasing your max_length manually, e.g. translator(\'...\', max_length=400)''' )
return True
def lowercase_ ( self : List[str] , *_A : Tuple , _A : Any=TruncationStrategy.DO_NOT_TRUNCATE , _A : List[Any]=None , _A : Any=None ):
'''simple docstring'''
if getattr(self.tokenizer , '''_build_translation_inputs''' , _A ):
return self.tokenizer._build_translation_inputs(
*_A , return_tensors=self.framework , truncation=_A , src_lang=_A , tgt_lang=_A )
else:
return super()._parse_and_tokenize(*_A , truncation=_A )
def lowercase_ ( self : str , _A : Tuple=None , _A : Union[str, Any]=None , **_A : Tuple ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = super()._sanitize_parameters(**_A )
if src_lang is not None:
UpperCAmelCase__ : List[str] = src_lang
if tgt_lang is not None:
UpperCAmelCase__ : int = tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
UpperCAmelCase__ : List[Any] = kwargs.get('''task''' , self.task )
UpperCAmelCase__ : Optional[Any] = task.split('''_''' )
if task and len(_A ) == 4:
# translation, XX, to YY
UpperCAmelCase__ : str = items[1]
UpperCAmelCase__ : Any = items[3]
return preprocess_params, forward_params, postprocess_params
def __call__( self : Optional[int] , *_A : Optional[int] , **_A : str ):
'''simple docstring'''
return super().__call__(*_A , **_A )
| 312
| 0
|
def lowerCamelCase_ ( _UpperCamelCase = 50_000_000 ) -> int:
"""simple docstring"""
snake_case_ : Optional[Any] = set()
snake_case_ : List[Any] = int((limit - 24) ** (1 / 2) )
snake_case_ : Any = set(range(3 , prime_square_limit + 1 , 2 ) )
primes.add(2 )
for p in range(3 , prime_square_limit + 1 , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , prime_square_limit + 1 , __snake_case ) ) )
for primea in primes:
snake_case_ : str = primea * primea
for primea in primes:
snake_case_ : Union[str, Any] = primea * primea * primea
if square + cube >= limit - 16:
break
for primea in primes:
snake_case_ : List[Any] = primea * primea * primea * primea
snake_case_ : int = square + cube + tetr
if total >= limit:
break
ret.add(__snake_case )
return len(__snake_case )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 60
|
from __future__ import annotations
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self : Optional[Any] , lowerCamelCase : list[list[int]] ) -> Any:
"""simple docstring"""
_UpperCAmelCase = TypeError(
"""Matrices must be formed from a list of zero or more lists containing at """
"""least one and the same number of values, each of which must be of type """
"""int or float.""" )
if len(lowerCamelCase ) != 0:
_UpperCAmelCase = len(rows[0] )
if cols == 0:
raise error
for row in rows:
if len(lowerCamelCase ) != cols:
raise error
for value in row:
if not isinstance(lowerCamelCase , (int, float) ):
raise error
_UpperCAmelCase = rows
else:
_UpperCAmelCase = []
def lowerCamelCase ( self : Optional[Any] ) -> list[list[int]]:
"""simple docstring"""
return [[row[i] for row in self.rows] for i in range(len(self.rows[0] ) )]
@property
def lowerCamelCase ( self : int ) -> int:
"""simple docstring"""
return len(self.rows )
@property
def lowerCamelCase ( self : int ) -> int:
"""simple docstring"""
return len(self.rows[0] )
@property
def lowerCamelCase ( self : Tuple ) -> tuple[int, int]:
"""simple docstring"""
return (self.num_rows, self.num_columns)
@property
def lowerCamelCase ( self : Tuple ) -> bool:
"""simple docstring"""
return self.order[0] == self.order[1]
def lowerCamelCase ( self : Optional[int] ) -> Matrix:
"""simple docstring"""
_UpperCAmelCase = [
[0 if column_num != row_num else 1 for column_num in range(self.num_rows )]
for row_num in range(self.num_rows )
]
return Matrix(lowerCamelCase )
def lowerCamelCase ( self : Optional[Any] ) -> int:
"""simple docstring"""
if not self.is_square:
return 0
if self.order == (0, 0):
return 1
if self.order == (1, 1):
return int(self.rows[0][0] )
if self.order == (2, 2):
return int(
(self.rows[0][0] * self.rows[1][1])
- (self.rows[0][1] * self.rows[1][0]) )
else:
return sum(
self.rows[0][column] * self.cofactors().rows[0][column]
for column in range(self.num_columns ) )
def lowerCamelCase ( self : Optional[Any] ) -> bool:
"""simple docstring"""
return bool(self.determinant() )
def lowerCamelCase ( self : Tuple , lowerCamelCase : int , lowerCamelCase : int ) -> int:
"""simple docstring"""
_UpperCAmelCase = [
[
self.rows[other_row][other_column]
for other_column in range(self.num_columns )
if other_column != column
]
for other_row in range(self.num_rows )
if other_row != row
]
return Matrix(lowerCamelCase ).determinant()
def lowerCamelCase ( self : int , lowerCamelCase : int , lowerCamelCase : int ) -> int:
"""simple docstring"""
if (row + column) % 2 == 0:
return self.get_minor(lowerCamelCase , lowerCamelCase )
return -1 * self.get_minor(lowerCamelCase , lowerCamelCase )
def lowerCamelCase ( self : str ) -> Matrix:
"""simple docstring"""
return Matrix(
[
[self.get_minor(lowerCamelCase , lowerCamelCase ) for column in range(self.num_columns )]
for row in range(self.num_rows )
] )
def lowerCamelCase ( self : Dict ) -> Matrix:
"""simple docstring"""
return Matrix(
[
[
self.minors().rows[row][column]
if (row + column) % 2 == 0
else self.minors().rows[row][column] * -1
for column in range(self.minors().num_columns )
]
for row in range(self.minors().num_rows )
] )
def lowerCamelCase ( self : Optional[Any] ) -> Matrix:
"""simple docstring"""
_UpperCAmelCase = [
[self.cofactors().rows[column][row] for column in range(self.num_columns )]
for row in range(self.num_rows )
]
return Matrix(lowerCamelCase )
def lowerCamelCase ( self : Any ) -> Matrix:
"""simple docstring"""
_UpperCAmelCase = self.determinant()
if not determinant:
raise TypeError("""Only matrices with a non-zero determinant have an inverse""" )
return self.adjugate() * (1 / determinant)
def __repr__( self : List[str] ) -> str:
"""simple docstring"""
return str(self.rows )
def __str__( self : Optional[Any] ) -> str:
"""simple docstring"""
if self.num_rows == 0:
return "[]"
if self.num_rows == 1:
return "[[" + ". ".join(str(self.rows[0] ) ) + "]]"
return (
"["
+ "\n ".join(
[
"""[""" + """. """.join([str(lowerCamelCase ) for value in row] ) + """.]"""
for row in self.rows
] )
+ "]"
)
def lowerCamelCase ( self : Tuple , lowerCamelCase : list[int] , lowerCamelCase : int | None = None ) -> None:
"""simple docstring"""
_UpperCAmelCase = TypeError("""Row must be a list containing all ints and/or floats""" )
if not isinstance(lowerCamelCase , lowerCamelCase ):
raise type_error
for value in row:
if not isinstance(lowerCamelCase , (int, float) ):
raise type_error
if len(lowerCamelCase ) != self.num_columns:
raise ValueError(
"""Row must be equal in length to the other rows in the matrix""" )
if position is None:
self.rows.append(lowerCamelCase )
else:
_UpperCAmelCase = self.rows[0:position] + [row] + self.rows[position:]
def lowerCamelCase ( self : List[Any] , lowerCamelCase : list[int] , lowerCamelCase : int | None = None ) -> None:
"""simple docstring"""
_UpperCAmelCase = TypeError(
"""Column must be a list containing all ints and/or floats""" )
if not isinstance(lowerCamelCase , lowerCamelCase ):
raise type_error
for value in column:
if not isinstance(lowerCamelCase , (int, float) ):
raise type_error
if len(lowerCamelCase ) != self.num_rows:
raise ValueError(
"""Column must be equal in length to the other columns in the matrix""" )
if position is None:
_UpperCAmelCase = [self.rows[i] + [column[i]] for i in range(self.num_rows )]
else:
_UpperCAmelCase = [
self.rows[i][0:position] + [column[i]] + self.rows[i][position:]
for i in range(self.num_rows )
]
def __eq__( self : Dict , lowerCamelCase : object ) -> bool:
"""simple docstring"""
if not isinstance(lowerCamelCase , lowerCamelCase ):
return NotImplemented
return self.rows == other.rows
def __ne__( self : List[str] , lowerCamelCase : object ) -> bool:
"""simple docstring"""
return not self == other
def __neg__( self : Union[str, Any] ) -> Matrix:
"""simple docstring"""
return self * -1
def __add__( self : Any , lowerCamelCase : Matrix ) -> Matrix:
"""simple docstring"""
if self.order != other.order:
raise ValueError("""Addition requires matrices of the same order""" )
return Matrix(
[
[self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __sub__( self : Optional[Any] , lowerCamelCase : Matrix ) -> Matrix:
"""simple docstring"""
if self.order != other.order:
raise ValueError("""Subtraction requires matrices of the same order""" )
return Matrix(
[
[self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __mul__( self : List[Any] , lowerCamelCase : Matrix | int | float ) -> Matrix:
"""simple docstring"""
if isinstance(lowerCamelCase , (int, float) ):
return Matrix(
[[int(element * other ) for element in row] for row in self.rows] )
elif isinstance(lowerCamelCase , lowerCamelCase ):
if self.num_columns != other.num_rows:
raise ValueError(
"""The number of columns in the first matrix must """
"""be equal to the number of rows in the second""" )
return Matrix(
[
[Matrix.dot_product(lowerCamelCase , lowerCamelCase ) for column in other.columns()]
for row in self.rows
] )
else:
raise TypeError(
"""A Matrix can only be multiplied by an int, float, or another matrix""" )
def __pow__( self : Any , lowerCamelCase : int ) -> Matrix:
"""simple docstring"""
if not isinstance(lowerCamelCase , lowerCamelCase ):
raise TypeError("""A Matrix can only be raised to the power of an int""" )
if not self.is_square:
raise ValueError("""Only square matrices can be raised to a power""" )
if other == 0:
return self.identity()
if other < 0:
if self.is_invertable():
return self.inverse() ** (-other)
raise ValueError(
"""Only invertable matrices can be raised to a negative power""" )
_UpperCAmelCase = self
for _ in range(other - 1 ):
result *= self
return result
@classmethod
def lowerCamelCase ( cls : Tuple , lowerCamelCase : list[int] , lowerCamelCase : list[int] ) -> int:
"""simple docstring"""
return sum(row[i] * column[i] for i in range(len(lowerCamelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 108
| 0
|
from scipy.stats import spearmanr
import datasets
A__ = '''
The Spearman rank-order correlation coefficient is a measure of the
relationship between two datasets. Like other correlation coefficients,
this one varies between -1 and +1 with 0 implying no correlation.
Positive correlations imply that as data in dataset x increases, so
does data in dataset y. Negative correlations imply that as x increases,
y decreases. Correlations of -1 or +1 imply an exact monotonic relationship.
Unlike the Pearson correlation, the Spearman correlation does not
assume that both datasets are normally distributed.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
'''
A__ = '''
Args:
predictions (`List[float]`): Predicted labels, as returned by a model.
references (`List[float]`): Ground truth labels.
return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns
only the spearmanr score. Defaults to `False`.
Returns:
spearmanr (`float`): Spearman correlation coefficient.
p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.
Examples:
Example 1:
>>> spearmanr_metric = datasets.load_metric("spearmanr")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])
>>> print(results)
{\'spearmanr\': -0.7}
Example 2:
>>> spearmanr_metric = datasets.load_metric("spearmanr")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],
... predictions=[10, 9, 2.5, 6, 4],
... return_pvalue=True)
>>> print(results[\'spearmanr\'])
-0.7
>>> print(round(results[\'spearmanr_pvalue\'], 2))
0.19
'''
A__ = r'''\
@book{kokoska2000crc,
title={CRC standard probability and statistics tables and formulae},
author={Kokoska, Stephen and Zwillinger, Daniel},
year={2000},
publisher={Crc Press}
}
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a ( datasets.Metric ):
def __lowerCamelCase ( self :Union[str, Any] ):
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'''predictions''': datasets.Value('''float''' ),
'''references''': datasets.Value('''float''' ),
} ) ,reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html'''] ,)
def __lowerCamelCase ( self :Union[str, Any] ,__lowercase :Union[str, Any] ,__lowercase :Dict ,__lowercase :Dict=False ):
snake_case__ : str = spearmanr(__lowercase ,__lowercase )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 700
|
import unittest
from transformers import DonutProcessor
A__ = '''naver-clova-ix/donut-base'''
class a ( unittest.TestCase ):
def __lowerCamelCase ( self :Optional[int] ):
snake_case__ : str = DonutProcessor.from_pretrained(__lowercase )
def __lowerCamelCase ( self :int ):
snake_case__ : List[Any] = {
'''name''': '''John Doe''',
'''age''': '''99''',
'''city''': '''Atlanta''',
'''state''': '''GA''',
'''zip''': '''30301''',
'''phone''': '''123-4567''',
'''nicknames''': [{'''nickname''': '''Johnny'''}, {'''nickname''': '''JD'''}],
}
snake_case__ : List[Any] = (
'''<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>'''
'''<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>'''
'''<s_nicknames><s_nickname>Johnny</s_nickname>'''
'''<sep/><s_nickname>JD</s_nickname></s_nicknames>'''
)
snake_case__ : Any = self.processor.tokenajson(__lowercase )
self.assertDictEqual(__lowercase ,__lowercase )
| 219
| 0
|
"""simple docstring"""
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
_lowerCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
class __UpperCamelCase ( __snake_case ):
def __init__( self ,_A ,_A ,_A ,_A ,_A ,_A ,_A ,_A ,_A ,):
'''simple docstring'''
super().__init__()
if safety_checker is None:
logger.warning(
F"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"""
' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'
' results in services or applications open to the public. Both the diffusers team and Hugging Face'
' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'
' it only for use-cases that involve analyzing network behavior or auditing its results. For more'
' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .' )
self.register_modules(
speech_model=__lowerCamelCase ,speech_processor=__lowerCamelCase ,vae=__lowerCamelCase ,text_encoder=__lowerCamelCase ,tokenizer=__lowerCamelCase ,unet=__lowerCamelCase ,scheduler=__lowerCamelCase ,feature_extractor=__lowerCamelCase ,)
def __lowerCamelCase ( self ,_A = "auto" ):
'''simple docstring'''
if slice_size == "auto":
_lowerCAmelCase : str = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__lowerCamelCase )
def __lowerCamelCase ( self ):
'''simple docstring'''
self.enable_attention_slicing(__lowerCamelCase )
@torch.no_grad()
def __call__( self ,_A ,_A=1_6000 ,_A = 512 ,_A = 512 ,_A = 50 ,_A = 7.5 ,_A = None ,_A = 1 ,_A = 0.0 ,_A = None ,_A = None ,_A = "pil" ,_A = True ,_A = None ,_A = 1 ,**_A ,):
'''simple docstring'''
_lowerCAmelCase : List[str] = self.speech_processor.feature_extractor(
__lowerCamelCase ,return_tensors='pt' ,sampling_rate=__lowerCamelCase ).input_features.to(self.device )
_lowerCAmelCase : int = self.speech_model.generate(__lowerCamelCase ,max_length=48_0000 )
_lowerCAmelCase : List[Any] = self.speech_processor.tokenizer.batch_decode(__lowerCamelCase ,skip_special_tokens=__lowerCamelCase ,normalize=__lowerCamelCase )[
0
]
if isinstance(__lowerCamelCase ,__lowerCamelCase ):
_lowerCAmelCase : List[Any] = 1
elif isinstance(__lowerCamelCase ,__lowerCamelCase ):
_lowerCAmelCase : List[str] = len(__lowerCamelCase )
else:
raise ValueError(F"""`prompt` has to be of type `str` or `list` but is {type(__lowerCamelCase )}""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(__lowerCamelCase ,__lowerCamelCase ) or callback_steps <= 0)
):
raise ValueError(
F"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
F""" {type(__lowerCamelCase )}.""" )
# get prompt text embeddings
_lowerCAmelCase : Optional[Any] = self.tokenizer(
__lowerCamelCase ,padding='max_length' ,max_length=self.tokenizer.model_max_length ,return_tensors='pt' ,)
_lowerCAmelCase : List[Any] = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
_lowerCAmelCase : List[Any] = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
F""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
_lowerCAmelCase : Dict = text_input_ids[:, : self.tokenizer.model_max_length]
_lowerCAmelCase : Any = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase : str = text_embeddings.shape
_lowerCAmelCase : int = text_embeddings.repeat(1 ,__lowerCamelCase ,1 )
_lowerCAmelCase : Union[str, Any] = text_embeddings.view(bs_embed * num_images_per_prompt ,__lowerCamelCase ,-1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
_lowerCAmelCase : Any = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
_lowerCAmelCase : Optional[int] = 42
if negative_prompt is None:
_lowerCAmelCase : List[Any] = [''] * batch_size
elif type(__lowerCamelCase ) is not type(__lowerCamelCase ):
raise TypeError(
F"""`negative_prompt` should be the same type to `prompt`, but got {type(__lowerCamelCase )} !="""
F""" {type(__lowerCamelCase )}.""" )
elif isinstance(__lowerCamelCase ,__lowerCamelCase ):
_lowerCAmelCase : Dict = [negative_prompt]
elif batch_size != len(__lowerCamelCase ):
raise ValueError(
F"""`negative_prompt`: {negative_prompt} has batch size {len(__lowerCamelCase )}, but `prompt`:"""
F""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"""
' the batch size of `prompt`.' )
else:
_lowerCAmelCase : Union[str, Any] = negative_prompt
_lowerCAmelCase : List[Any] = text_input_ids.shape[-1]
_lowerCAmelCase : List[Any] = self.tokenizer(
__lowerCamelCase ,padding='max_length' ,max_length=__lowerCamelCase ,truncation=__lowerCamelCase ,return_tensors='pt' ,)
_lowerCAmelCase : Any = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
_lowerCAmelCase : int = uncond_embeddings.shape[1]
_lowerCAmelCase : Any = uncond_embeddings.repeat(1 ,__lowerCamelCase ,1 )
_lowerCAmelCase : int = uncond_embeddings.view(batch_size * num_images_per_prompt ,__lowerCamelCase ,-1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_lowerCAmelCase : List[Any] = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
_lowerCAmelCase : List[Any] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
_lowerCAmelCase : Tuple = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
_lowerCAmelCase : Union[str, Any] = torch.randn(__lowerCamelCase ,generator=__lowerCamelCase ,device='cpu' ,dtype=__lowerCamelCase ).to(
self.device )
else:
_lowerCAmelCase : Tuple = torch.randn(__lowerCamelCase ,generator=__lowerCamelCase ,device=self.device ,dtype=__lowerCamelCase )
else:
if latents.shape != latents_shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
_lowerCAmelCase : Optional[int] = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(__lowerCamelCase )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
_lowerCAmelCase : List[str] = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
_lowerCAmelCase : Optional[int] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
_lowerCAmelCase : List[Any] = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
_lowerCAmelCase : Any = {}
if accepts_eta:
_lowerCAmelCase : Tuple = eta
for i, t in enumerate(self.progress_bar(__lowerCamelCase ) ):
# expand the latents if we are doing classifier free guidance
_lowerCAmelCase : str = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_lowerCAmelCase : int = self.scheduler.scale_model_input(__lowerCamelCase ,__lowerCamelCase )
# predict the noise residual
_lowerCAmelCase : Any = self.unet(__lowerCamelCase ,__lowerCamelCase ,encoder_hidden_states=__lowerCamelCase ).sample
# perform guidance
if do_classifier_free_guidance:
_lowerCAmelCase, _lowerCAmelCase : Optional[int] = noise_pred.chunk(2 )
_lowerCAmelCase : Optional[int] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
_lowerCAmelCase : Optional[Any] = self.scheduler.step(__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,**__lowerCamelCase ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase )
_lowerCAmelCase : Tuple = 1 / 0.1_8_2_1_5 * latents
_lowerCAmelCase : str = self.vae.decode(__lowerCamelCase ).sample
_lowerCAmelCase : Optional[Any] = (image / 2 + 0.5).clamp(0 ,1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
_lowerCAmelCase : List[Any] = image.cpu().permute(0 ,2 ,3 ,1 ).float().numpy()
if output_type == "pil":
_lowerCAmelCase : Any = self.numpy_to_pil(__lowerCamelCase )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=__lowerCamelCase ,nsfw_content_detected=__lowerCamelCase )
| 259
|
import tempfile
import unittest
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from transformers.testing_utils import (
is_torch_available,
require_optimum,
require_torch,
slow,
)
if is_torch_available():
import torch
@require_torch
@require_optimum
@slow
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self : Any ):
SCREAMING_SNAKE_CASE = "hf-internal-testing/tiny-random-t5"
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE = AutoModelForSeqaSeqLM.from_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE = tokenizer("This is me" , return_tensors="pt" )
SCREAMING_SNAKE_CASE = model.to_bettertransformer()
self.assertTrue(any("BetterTransformer" in mod.__class__.__name__ for _, mod in model.named_modules() ) )
SCREAMING_SNAKE_CASE = model.generate(**__lowerCamelCase )
SCREAMING_SNAKE_CASE = model.reverse_bettertransformer()
self.assertFalse(any("BetterTransformer" in mod.__class__.__name__ for _, mod in model.named_modules() ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE = AutoModelForSeqaSeqLM.from_pretrained(__lowerCamelCase )
self.assertFalse(
any("BetterTransformer" in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) )
SCREAMING_SNAKE_CASE = model_reloaded.generate(**__lowerCamelCase )
self.assertTrue(torch.allclose(__lowerCamelCase , __lowerCamelCase ) )
def _snake_case ( self : int ):
SCREAMING_SNAKE_CASE = "hf-internal-testing/tiny-random-t5"
SCREAMING_SNAKE_CASE = AutoModelForSeqaSeqLM.from_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE = model.to_bettertransformer()
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(__lowerCamelCase ):
model.save_pretrained(__lowerCamelCase )
SCREAMING_SNAKE_CASE = model.reverse_bettertransformer()
model.save_pretrained(__lowerCamelCase )
| 16
| 0
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionInstructPixaPixPipeline,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.utils import floats_tensor, load_image, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __a (lowerCamelCase , lowerCamelCase , lowerCamelCase , unittest.TestCase ):
__a : Dict = StableDiffusionInstructPixaPixPipeline
__a : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width", "cross_attention_kwargs"}
__a : Any = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
__a : int = IMAGE_TO_IMAGE_IMAGE_PARAMS
__a : List[Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def UpperCAmelCase__ ( self : Dict ) -> Tuple:
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase_ : Dict = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=8 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
UpperCAmelCase_ : int = PNDMScheduler(skip_prk_steps=__magic_name__ )
torch.manual_seed(0 )
UpperCAmelCase_ : int = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
UpperCAmelCase_ : List[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
UpperCAmelCase_ : int = CLIPTextModel(__magic_name__ )
UpperCAmelCase_ : List[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
UpperCAmelCase_ : Tuple = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def UpperCAmelCase__ ( self : Any , __magic_name__ : Optional[Any] , __magic_name__ : Union[str, Any]=0 ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(__magic_name__ ) ).to(__magic_name__ )
UpperCAmelCase_ : Tuple = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase_ : Any = Image.fromarray(np.uinta(__magic_name__ ) ).convert('''RGB''' )
if str(__magic_name__ ).startswith('''mps''' ):
UpperCAmelCase_ : List[str] = torch.manual_seed(__magic_name__ )
else:
UpperCAmelCase_ : Union[str, Any] = torch.Generator(device=__magic_name__ ).manual_seed(__magic_name__ )
UpperCAmelCase_ : Dict = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''image_guidance_scale''': 1,
'''output_type''': '''numpy''',
}
return inputs
def UpperCAmelCase__ ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ : int = '''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : str = self.get_dummy_components()
UpperCAmelCase_ : Optional[int] = StableDiffusionInstructPixaPixPipeline(**__magic_name__ )
UpperCAmelCase_ : Optional[int] = sd_pipe.to(__magic_name__ )
sd_pipe.set_progress_bar_config(disable=__magic_name__ )
UpperCAmelCase_ : Tuple = self.get_dummy_inputs(__magic_name__ )
UpperCAmelCase_ : Tuple = sd_pipe(**__magic_name__ ).images
UpperCAmelCase_ : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCAmelCase_ : List[str] = np.array([0.7_5_2_6, 0.3_7_5_0, 0.4_5_4_7, 0.6_1_1_7, 0.5_8_6_6, 0.5_0_1_6, 0.4_3_2_7, 0.5_6_4_2, 0.4_8_1_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def UpperCAmelCase__ ( self : Optional[int] ) -> int:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : Tuple = self.get_dummy_components()
UpperCAmelCase_ : List[str] = StableDiffusionInstructPixaPixPipeline(**__magic_name__ )
UpperCAmelCase_ : Tuple = sd_pipe.to(__magic_name__ )
sd_pipe.set_progress_bar_config(disable=__magic_name__ )
UpperCAmelCase_ : Dict = self.get_dummy_inputs(__magic_name__ )
UpperCAmelCase_ : int = '''french fries'''
UpperCAmelCase_ : List[Any] = sd_pipe(**__magic_name__ , negative_prompt=__magic_name__ )
UpperCAmelCase_ : List[str] = output.images
UpperCAmelCase_ : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCAmelCase_ : int = np.array([0.7_5_1_1, 0.3_6_4_2, 0.4_5_5_3, 0.6_2_3_6, 0.5_7_9_7, 0.5_0_1_3, 0.4_3_4_3, 0.5_6_1_1, 0.4_8_3_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def UpperCAmelCase__ ( self : Any ) -> str:
"""simple docstring"""
UpperCAmelCase_ : Any = '''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : str = self.get_dummy_components()
UpperCAmelCase_ : Dict = StableDiffusionInstructPixaPixPipeline(**__magic_name__ )
UpperCAmelCase_ : Union[str, Any] = sd_pipe.to(__magic_name__ )
sd_pipe.set_progress_bar_config(disable=__magic_name__ )
UpperCAmelCase_ : int = self.get_dummy_inputs(__magic_name__ )
UpperCAmelCase_ : Tuple = [inputs['''prompt''']] * 2
UpperCAmelCase_ : Tuple = np.array(inputs['''image'''] ).astype(np.floataa ) / 2_5_5.0
UpperCAmelCase_ : Optional[int] = torch.from_numpy(__magic_name__ ).unsqueeze(0 ).to(__magic_name__ )
UpperCAmelCase_ : Union[str, Any] = image / 2 + 0.5
UpperCAmelCase_ : int = image.permute(0 , 3 , 1 , 2 )
UpperCAmelCase_ : Dict = image.repeat(2 , 1 , 1 , 1 )
UpperCAmelCase_ : Dict = sd_pipe(**__magic_name__ ).images
UpperCAmelCase_ : Any = image[-1, -3:, -3:, -1]
assert image.shape == (2, 32, 32, 3)
UpperCAmelCase_ : Any = np.array([0.5_8_1_2, 0.5_7_4_8, 0.5_2_2_2, 0.5_9_0_8, 0.5_6_9_5, 0.7_1_7_4, 0.6_8_0_4, 0.5_5_2_3, 0.5_5_7_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def UpperCAmelCase__ ( self : Optional[int] ) -> int:
"""simple docstring"""
UpperCAmelCase_ : Tuple = '''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : str = self.get_dummy_components()
UpperCAmelCase_ : List[Any] = EulerAncestralDiscreteScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='''scaled_linear''' )
UpperCAmelCase_ : Union[str, Any] = StableDiffusionInstructPixaPixPipeline(**__magic_name__ )
UpperCAmelCase_ : Dict = sd_pipe.to(__magic_name__ )
sd_pipe.set_progress_bar_config(disable=__magic_name__ )
UpperCAmelCase_ : Union[str, Any] = self.get_dummy_inputs(__magic_name__ )
UpperCAmelCase_ : Union[str, Any] = sd_pipe(**__magic_name__ ).images
UpperCAmelCase_ : int = image[0, -3:, -3:, -1]
UpperCAmelCase_ : List[Any] = [round(__magic_name__ , 4 ) for x in image_slice.flatten().tolist()]
print(''','''.join([str(__magic_name__ ) for x in slice] ) )
assert image.shape == (1, 32, 32, 3)
UpperCAmelCase_ : List[str] = np.array([0.7_4_1_7, 0.3_8_4_2, 0.4_7_3_2, 0.5_7_7_6, 0.5_8_9_1, 0.5_1_3_9, 0.4_0_5_2, 0.5_6_7_3, 0.4_9_8_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def UpperCAmelCase__ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
UpperCAmelCase_ : Any = self.get_dummy_components()
UpperCAmelCase_ : Dict = StableDiffusionInstructPixaPixPipeline(**__magic_name__ )
UpperCAmelCase_ : Tuple = VaeImageProcessor(do_resize=__magic_name__ , do_normalize=__magic_name__ )
UpperCAmelCase_ : List[Any] = pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
UpperCAmelCase_ : List[str] = pipe(**self.get_dummy_inputs_by_type(__magic_name__ , input_image_type='''pt''' ) )[0]
UpperCAmelCase_ : Optional[Any] = components['''vae''']
UpperCAmelCase_ : Tuple = self.get_dummy_inputs_by_type(__magic_name__ , input_image_type='''pt''' )
for image_param in self.image_latents_params:
if image_param in inputs.keys():
UpperCAmelCase_ : Optional[int] = vae.encode(inputs[image_param] ).latent_dist.mode()
UpperCAmelCase_ : List[str] = pipe(**__magic_name__ )[0]
UpperCAmelCase_ : Optional[Any] = np.abs(out - out_latents_inputs ).max()
self.assertLess(__magic_name__ , 1E-4 , '''passing latents as image input generate different result from passing image''' )
@slow
@require_torch_gpu
class __a (unittest.TestCase ):
def UpperCAmelCase__ ( self : str ) -> List[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self : Union[str, Any] , __magic_name__ : Optional[int]=0 ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : List[str] = torch.manual_seed(__magic_name__ )
UpperCAmelCase_ : Tuple = load_image(
'''https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg''' )
UpperCAmelCase_ : int = {
'''prompt''': '''turn him into a cyborg''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''image_guidance_scale''': 1.0,
'''output_type''': '''numpy''',
}
return inputs
def UpperCAmelCase__ ( self : str ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=__magic_name__ )
pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
pipe.enable_attention_slicing()
UpperCAmelCase_ : List[str] = self.get_inputs()
UpperCAmelCase_ : List[Any] = pipe(**__magic_name__ ).images
UpperCAmelCase_ : int = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_12, 5_12, 3)
UpperCAmelCase_ : Optional[Any] = np.array([0.5_9_0_2, 0.6_0_1_5, 0.6_0_2_7, 0.5_9_8_3, 0.6_0_9_2, 0.6_0_6_1, 0.5_7_6_5, 0.5_7_8_5, 0.5_5_5_5] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def UpperCAmelCase__ ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=__magic_name__ )
UpperCAmelCase_ : List[str] = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
pipe.enable_attention_slicing()
UpperCAmelCase_ : int = self.get_inputs()
UpperCAmelCase_ : List[str] = pipe(**__magic_name__ ).images
UpperCAmelCase_ : Tuple = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_12, 5_12, 3)
UpperCAmelCase_ : List[str] = np.array([0.6_5_7_8, 0.6_8_1_7, 0.6_9_7_2, 0.6_7_6_1, 0.6_8_5_6, 0.6_9_1_6, 0.6_4_2_8, 0.6_5_1_6, 0.6_3_0_1] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def UpperCAmelCase__ ( self : Optional[int] ) -> str:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=__magic_name__ )
UpperCAmelCase_ : Any = DDIMScheduler.from_config(pipe.scheduler.config )
pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
pipe.enable_attention_slicing()
UpperCAmelCase_ : int = self.get_inputs()
UpperCAmelCase_ : Optional[Any] = pipe(**__magic_name__ ).images
UpperCAmelCase_ : str = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_12, 5_12, 3)
UpperCAmelCase_ : str = np.array([0.3_8_2_8, 0.3_8_3_4, 0.3_8_1_8, 0.3_7_9_2, 0.3_8_6_5, 0.3_7_5_2, 0.3_7_9_2, 0.3_8_4_7, 0.3_7_5_3] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def UpperCAmelCase__ ( self : int ) -> Any:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = 0
def callback_fn(__magic_name__ : int , __magic_name__ : int , __magic_name__ : torch.FloatTensor ) -> None:
UpperCAmelCase_ : Any = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
UpperCAmelCase_ : int = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
UpperCAmelCase_ : str = latents[0, -3:, -3:, -1]
UpperCAmelCase_ : Tuple = np.array([-0.2_4_6_3, -0.4_6_4_4, -0.9_7_5_6, 1.5_1_7_6, 1.4_4_1_4, 0.7_8_6_6, 0.9_8_9_7, 0.8_5_2_1, 0.7_9_8_3] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
elif step == 2:
UpperCAmelCase_ : List[Any] = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
UpperCAmelCase_ : Optional[Any] = latents[0, -3:, -3:, -1]
UpperCAmelCase_ : Optional[Any] = np.array([-0.2_6_4_4, -0.4_6_2_6, -0.9_6_5_3, 1.5_1_7_6, 1.4_5_5_1, 0.7_6_8_6, 0.9_8_0_5, 0.8_4_5_2, 0.8_1_1_5] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
UpperCAmelCase_ : Optional[Any] = False
UpperCAmelCase_ : Dict = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=__magic_name__ , torch_dtype=torch.floataa )
UpperCAmelCase_ : List[str] = pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
pipe.enable_attention_slicing()
UpperCAmelCase_ : List[Any] = self.get_inputs()
pipe(**__magic_name__ , callback=__magic_name__ , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def UpperCAmelCase__ ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
UpperCAmelCase_ : str = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'''timbrooks/instruct-pix2pix''' , safety_checker=__magic_name__ , torch_dtype=torch.floataa )
UpperCAmelCase_ : Dict = pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
UpperCAmelCase_ : int = self.get_inputs()
UpperCAmelCase_ : Optional[Any] = pipe(**__magic_name__ )
UpperCAmelCase_ : Optional[int] = torch.cuda.max_memory_allocated()
# make sure that less than 2.2 GB is allocated
assert mem_bytes < 2.2 * 10**9
def UpperCAmelCase__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = self.get_inputs()
# resize to resolution that is divisible by 8 but not 16 or 32
UpperCAmelCase_ : Optional[Any] = inputs['''image'''].resize((5_04, 5_04) )
UpperCAmelCase_ : Union[str, Any] = '''timbrooks/instruct-pix2pix'''
UpperCAmelCase_ : List[str] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
__magic_name__ , safety_checker=__magic_name__ , )
pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
pipe.enable_attention_slicing()
UpperCAmelCase_ : Optional[Any] = pipe(**__magic_name__ )
UpperCAmelCase_ : int = output.images[0]
UpperCAmelCase_ : Dict = image[2_55:2_58, 3_83:3_86, -1]
assert image.shape == (5_04, 5_04, 3)
UpperCAmelCase_ : Tuple = np.array([0.2_7_2_6, 0.2_5_2_9, 0.2_6_6_4, 0.2_6_5_5, 0.2_6_4_1, 0.2_6_4_2, 0.2_5_9_1, 0.2_6_4_9, 0.2_5_9_0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
| 644
|
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class __a (unittest.TestCase ):
def UpperCAmelCase__ ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
# For consistency across different places the DisjunctiveConstraint is called,
# dc.token_ids is a list of integers. It is also initialized only by integers.
UpperCAmelCase_ : List[str] = [[1, 2, 4], [1, 2, 3, 4]]
UpperCAmelCase_ : List[str] = DisjunctiveConstraint(__magic_name__ )
self.assertTrue(isinstance(dc.token_ids , __magic_name__ ) )
with self.assertRaises(__magic_name__ ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(__magic_name__ ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def UpperCAmelCase__ ( self : List[str] ) -> Dict:
"""simple docstring"""
# We can't have constraints that are complete subsets of another. This leads to a preverse
# interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint?
# It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially
# fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm
# will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it).
UpperCAmelCase_ : Tuple = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(__magic_name__ ):
DisjunctiveConstraint(__magic_name__ ) # fails here
def UpperCAmelCase__ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = [[1, 2, 3], [1, 2, 4]]
UpperCAmelCase_ : List[str] = DisjunctiveConstraint(__magic_name__ )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = dc.update(1 )
UpperCAmelCase_ : Dict = stepped is True and completed is False and reset is False
self.assertTrue(__magic_name__ )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : str = dc.update(2 )
UpperCAmelCase_ : Optional[Any] = stepped is True and completed is False and reset is False
self.assertTrue(__magic_name__ )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = dc.update(3 )
UpperCAmelCase_ : Dict = stepped is True and completed is True and reset is False
self.assertTrue(__magic_name__ )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def UpperCAmelCase__ ( self : int ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ : Any = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
UpperCAmelCase_ : Tuple = DisjunctiveConstraint(__magic_name__ )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : int = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : int = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 644
| 1
|
'''simple docstring'''
import inspect
import os
import sys
import unittest
import accelerate
from accelerate.test_utils import execute_subprocess_async, require_tpu
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self: str ) -> Any:
__magic_name__ : Tuple = inspect.getfile(accelerate.test_utils )
__magic_name__ : Tuple = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_script.py"] )
__magic_name__ : List[str] = os.path.sep.join(inspect.getfile(self.__class__ ).split(os.path.sep )[:-1] )
@require_tpu
def lowerCAmelCase__ ( self: Optional[Any] ) -> int:
__magic_name__ : List[Any] = f"""
{self.test_dir}/xla_spawn.py
--num_cores 8
{self.test_file_path}
""".split()
__magic_name__ : Any = [sys.executable] + distributed_args
execute_subprocess_async(__UpperCamelCase , env=os.environ.copy() )
| 436
|
'''simple docstring'''
import baseaa
import io
import json
import os
from copy import deepcopy
from ..optimizer import AcceleratedOptimizer
from ..scheduler import AcceleratedScheduler
class _snake_case :
'''simple docstring'''
def __init__( self: Any , __UpperCamelCase: List[Any] ) -> Dict:
if isinstance(__UpperCamelCase , __UpperCamelCase ):
# Don't modify user's data should they want to reuse it (e.g. in tests), because once we
# modified it, it will not be accepted here again, since `auto` values would have been overridden
__magic_name__ : Optional[int] = deepcopy(__UpperCamelCase )
elif os.path.exists(__UpperCamelCase ):
with io.open(__UpperCamelCase , "r" , encoding="utf-8" ) as f:
__magic_name__ : Optional[int] = json.load(__UpperCamelCase )
else:
try:
__magic_name__ : str = baseaa.urlsafe_baadecode(__UpperCamelCase ).decode("utf-8" )
__magic_name__ : int = json.loads(__UpperCamelCase )
except (UnicodeDecodeError, AttributeError, ValueError):
raise ValueError(
f"""Expected a string path to an existing deepspeed config, or a dictionary, or a base64 encoded string. Received: {config_file_or_dict}""" )
__magic_name__ : Optional[Any] = config
self.set_stage_and_offload()
def lowerCAmelCase__ ( self: str ) -> Optional[Any]:
# zero stage - this is done as early as possible, before model is created, to allow
# ``is_deepspeed_zero3_enabled`` query and getting to the early deepspeed config object
# during ``zero.Init()`` which needs to know the dtype, and some other hparams.
__magic_name__ : List[str] = self.get_value("zero_optimization.stage" , -1 )
# offload
__magic_name__ : Tuple = False
if self.is_zeroa() or self.is_zeroa():
__magic_name__ : List[str] = set(["cpu", "nvme"] )
__magic_name__ : Dict = set(
[
self.get_value("zero_optimization.offload_optimizer.device" ),
self.get_value("zero_optimization.offload_param.device" ),
] )
if len(offload_devices & offload_devices_valid ) > 0:
__magic_name__ : List[str] = True
def lowerCAmelCase__ ( self: Optional[Any] , __UpperCamelCase: str ) -> Optional[int]:
__magic_name__ : Tuple = self.config
# find the config node of interest if it exists
__magic_name__ : int = ds_key_long.split("." )
__magic_name__ : List[Any] = nodes.pop()
for node in nodes:
__magic_name__ : List[Any] = config.get(__UpperCamelCase )
if config is None:
return None, ds_key
return config, ds_key
def lowerCAmelCase__ ( self: str , __UpperCamelCase: Dict , __UpperCamelCase: int=None ) -> Union[str, Any]:
__magic_name__ , __magic_name__ : int = self.find_config_node(__UpperCamelCase )
if config is None:
return default
return config.get(__UpperCamelCase , __UpperCamelCase )
def lowerCAmelCase__ ( self: str , __UpperCamelCase: Optional[int] , __UpperCamelCase: Tuple=False ) -> Tuple:
__magic_name__ : List[str] = self.config
# find the config node of interest if it exists
__magic_name__ : Any = ds_key_long.split("." )
for node in nodes:
__magic_name__ : Dict = config
__magic_name__ : Union[str, Any] = config.get(__UpperCamelCase )
if config is None:
if must_exist:
raise ValueError(f"""Can't find {ds_key_long} entry in the config: {self.config}""" )
else:
return
# if found remove it
if parent_config is not None:
parent_config.pop(__UpperCamelCase )
def lowerCAmelCase__ ( self: Dict , __UpperCamelCase: Optional[Any] ) -> List[Any]:
__magic_name__ : List[Any] = self.get_value(__UpperCamelCase )
return False if value is None else bool(__UpperCamelCase )
def lowerCAmelCase__ ( self: Dict , __UpperCamelCase: List[str] ) -> Tuple:
__magic_name__ : List[Any] = self.get_value(__UpperCamelCase )
return False if value is None else not bool(__UpperCamelCase )
def lowerCAmelCase__ ( self: Union[str, Any] ) -> Optional[int]:
return self._stage == 2
def lowerCAmelCase__ ( self: Union[str, Any] ) -> List[Any]:
return self._stage == 3
def lowerCAmelCase__ ( self: Union[str, Any] ) -> str:
return self._offload
class _snake_case :
'''simple docstring'''
def __init__( self: List[str] , __UpperCamelCase: Union[str, Any] ) -> Tuple:
__magic_name__ : Tuple = engine
def lowerCAmelCase__ ( self: Optional[int] , __UpperCamelCase: int , **__UpperCamelCase: Union[str, Any] ) -> Tuple:
# runs backpropagation and handles mixed precision
self.engine.backward(__UpperCamelCase , **__UpperCamelCase )
# Deepspeed's `engine.step` performs the following operations:
# - gradient accumulation check
# - gradient clipping
# - optimizer step
# - zero grad
# - checking overflow
# - lr_scheduler step (only if engine.lr_scheduler is not None)
self.engine.step()
# and this plugin overrides the above calls with no-ops when Accelerate runs under
# Deepspeed, but allows normal functionality for non-Deepspeed cases thus enabling a simple
# training loop that works transparently under many training regimes.
class _snake_case ( snake_case_ ):
'''simple docstring'''
def __init__( self: List[str] , __UpperCamelCase: Optional[int] ) -> List[Any]:
super().__init__(__UpperCamelCase , device_placement=__UpperCamelCase , scaler=__UpperCamelCase )
__magic_name__ : Any = hasattr(self.optimizer , "overflow" )
def lowerCAmelCase__ ( self: List[str] , __UpperCamelCase: List[str]=None ) -> Union[str, Any]:
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
def lowerCAmelCase__ ( self: Union[str, Any] ) -> Optional[int]:
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
@property
def lowerCAmelCase__ ( self: Optional[Any] ) -> List[str]:
if self.__has_overflow__:
return self.optimizer.overflow
return False
class _snake_case ( snake_case_ ):
'''simple docstring'''
def __init__( self: List[Any] , __UpperCamelCase: str , __UpperCamelCase: Dict ) -> Any:
super().__init__(__UpperCamelCase , __UpperCamelCase )
def lowerCAmelCase__ ( self: int ) -> Dict:
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
class _snake_case :
'''simple docstring'''
def __init__( self: Optional[Any] , __UpperCamelCase: Optional[Any] , __UpperCamelCase: str=0.0_0_1 , __UpperCamelCase: List[Any]=0 , **__UpperCamelCase: List[str] ) -> Union[str, Any]:
__magic_name__ : List[Any] = params
__magic_name__ : List[Any] = lr
__magic_name__ : List[str] = weight_decay
__magic_name__ : Any = kwargs
class _snake_case :
'''simple docstring'''
def __init__( self: List[str] , __UpperCamelCase: List[Any] , __UpperCamelCase: int=None , __UpperCamelCase: int=0 , **__UpperCamelCase: Optional[Any] ) -> str:
__magic_name__ : Optional[int] = optimizer
__magic_name__ : Any = total_num_steps
__magic_name__ : int = warmup_num_steps
__magic_name__ : Dict = kwargs
| 436
| 1
|
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class snake_case__ ( lowercase_ , unittest.TestCase):
'''simple docstring'''
lowerCamelCase : Optional[int] = GPTSanJapaneseTokenizer
lowerCamelCase : Any = False
lowerCamelCase : Optional[Any] = {"do_clean_text": False, "add_prefix_space": False}
def __lowercase ( self ) -> Any:
'''simple docstring'''
super().setUp()
# fmt: off
__snake_case :str = ["""こん""", """こんに""", """にちは""", """ばんは""", """世界,㔺界""", """、""", """。""", """<BR>""", """<SP>""", """<TAB>""", """<URL>""", """<EMAIL>""", """<TEL>""", """<DATE>""", """<PRICE>""", """<BLOCK>""", """<KIGOU>""", """<U2000U2BFF>""", """<|emoji1|>""", """<unk>""", """<|bagoftoken|>""", """<|endoftext|>"""]
# fmt: on
__snake_case :Optional[Any] = {"""emoji""": {"""\ud83d\ude00""": """<|emoji1|>"""}, """emoji_inv""": {"""<|emoji1|>""": """\ud83d\ude00"""}} # 😀
__snake_case :Optional[int] = {"""unk_token""": """<unk>"""}
__snake_case :Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__snake_case :List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""emoji_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
with open(self.emoji_file , """w""" ) as emoji_writer:
emoji_writer.write(json.dumps(a__ ) )
def __lowercase ( self , **a__ ) -> Optional[Any]:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **a__ )
def __lowercase ( self , a__ ) -> List[str]:
'''simple docstring'''
__snake_case :Tuple = """こんにちは、世界。 \nこんばんは、㔺界。😀"""
__snake_case :Union[str, Any] = """こんにちは、世界。 \nこんばんは、世界。😀"""
return input_text, output_text
def __lowercase ( self , a__ ) -> List[str]:
'''simple docstring'''
__snake_case , __snake_case :Dict = self.get_input_output_texts(a__ )
__snake_case :Dict = tokenizer.encode(a__ , add_special_tokens=a__ )
__snake_case :int = tokenizer.decode(a__ , clean_up_tokenization_spaces=a__ )
return text, ids
def __lowercase ( self ) -> Optional[int]:
'''simple docstring'''
pass # TODO add if relevant
def __lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
pass # TODO add if relevant
def __lowercase ( self ) -> Union[str, Any]:
'''simple docstring'''
pass # TODO add if relevant
def __lowercase ( self ) -> str:
'''simple docstring'''
__snake_case :Optional[int] = self.get_tokenizer()
# Testing tokenization
__snake_case :int = """こんにちは、世界。 こんばんは、㔺界。"""
__snake_case :Dict = ["""こん""", """にちは""", """、""", """世界""", """。""", """<SP>""", """こん""", """ばんは""", """、""", """㔺界""", """。"""]
__snake_case :Optional[Any] = tokenizer.tokenize(a__ )
self.assertListEqual(a__ , a__ )
# Testing conversion to ids without special tokens
__snake_case :List[str] = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
__snake_case :Any = tokenizer.convert_tokens_to_ids(a__ )
self.assertListEqual(a__ , a__ )
# Testing conversion to ids with special tokens
__snake_case :Tuple = tokens + [tokenizer.unk_token]
__snake_case :Optional[int] = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19]
__snake_case :List[Any] = tokenizer.convert_tokens_to_ids(a__ )
self.assertListEqual(a__ , a__ )
def __lowercase ( self ) -> List[str]:
'''simple docstring'''
__snake_case :Tuple = self.get_tokenizer()
# Testing tokenization
__snake_case :Optional[Any] = """こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。"""
__snake_case :Tuple = """こんにちは、、、、世界。こんばんは、、、、世界。"""
__snake_case :str = tokenizer.encode(a__ )
__snake_case :Optional[int] = tokenizer.decode(a__ )
self.assertEqual(a__ , a__ )
@slow
def __lowercase ( self ) -> List[Any]:
'''simple docstring'''
__snake_case :List[str] = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
# Testing tokenization
__snake_case :str = """こんにちは、世界。"""
__snake_case :List[str] = """こんばんは、㔺界。😀"""
__snake_case :Dict = """こんにちは、世界。こんばんは、世界。😀"""
__snake_case :int = tokenizer.encode(prefix_text + input_text )
__snake_case :Optional[Any] = tokenizer.encode("""""" , prefix_text=prefix_text + input_text )
__snake_case :Any = tokenizer.encode(a__ , prefix_text=a__ )
__snake_case :Optional[Any] = tokenizer.decode(a__ )
__snake_case :Optional[Any] = tokenizer.decode(a__ )
__snake_case :Optional[Any] = tokenizer.decode(a__ )
self.assertEqual(a__ , a__ )
self.assertEqual(a__ , a__ )
self.assertEqual(a__ , a__ )
@slow
def __lowercase ( self ) -> Any:
'''simple docstring'''
__snake_case :Optional[Any] = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
# Testing tokenization
__snake_case :List[Any] = """こんにちは、世界。"""
__snake_case :Dict = """こんばんは、㔺界。😀"""
__snake_case :Optional[int] = len(tokenizer.encode(a__ ) ) - 2
__snake_case :Union[str, Any] = len(tokenizer.encode(a__ ) ) - 2
__snake_case :Union[str, Any] = [1] + [0] * (len_prefix + len_text + 1)
__snake_case :Optional[Any] = [1] * (len_prefix + len_text + 1) + [0]
__snake_case :Union[str, Any] = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
__snake_case :int = tokenizer(prefix_text + input_text ).token_type_ids
__snake_case :List[Any] = tokenizer("""""" , prefix_text=prefix_text + input_text ).token_type_ids
__snake_case :int = tokenizer(a__ , prefix_text=a__ ).token_type_ids
self.assertListEqual(a__ , a__ )
self.assertListEqual(a__ , a__ )
self.assertListEqual(a__ , a__ )
@slow
def __lowercase ( self ) -> Optional[int]:
'''simple docstring'''
__snake_case :Optional[Any] = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
__snake_case :Tuple = tokenizer.encode("""あンいワ""" )
__snake_case :List[str] = tokenizer.encode("""""" , prefix_text="""あンいワ""" )
__snake_case :int = tokenizer.encode("""いワ""" , prefix_text="""あン""" )
self.assertEqual(tokenizer.decode(a__ ) , tokenizer.decode(a__ ) )
self.assertEqual(tokenizer.decode(a__ ) , tokenizer.decode(a__ ) )
self.assertNotEqual(a__ , a__ )
self.assertNotEqual(a__ , a__ )
self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token
@slow
def __lowercase ( self ) -> Union[str, Any]:
'''simple docstring'''
__snake_case :List[str] = self.tokenizer_class.from_pretrained("""Tanrei/GPTSAN-japanese""" )
__snake_case :int = [["""武田信玄""", """は、"""], ["""織田信長""", """の配下の、"""]]
__snake_case :Optional[int] = tokenizer(a__ , padding=a__ )
__snake_case :int = tokenizer.batch_encode_plus(a__ , padding=a__ )
# fmt: off
__snake_case :Tuple = [[3_59_93, 86_40, 2_59_48, 3_59_98, 3_06_47, 3_56_75, 3_59_99, 3_59_99], [3_59_93, 1_03_82, 98_68, 3_59_98, 3_06_46, 94_59, 3_06_46, 3_56_75]]
__snake_case :Any = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
__snake_case :int = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , a__ )
self.assertListEqual(x_token.token_type_ids , a__ )
self.assertListEqual(x_token.attention_mask , a__ )
self.assertListEqual(x_token_a.input_ids , a__ )
self.assertListEqual(x_token_a.token_type_ids , a__ )
self.assertListEqual(x_token_a.attention_mask , a__ )
def __lowercase ( self ) -> List[str]:
'''simple docstring'''
pass
def __lowercase ( self ) -> Optional[int]:
'''simple docstring'''
pass
| 291
|
import importlib
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
import transformers.models.auto
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.bert.configuration_bert import BertConfig
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
lowerCamelCase__ = get_tests_dir("""fixtures/dummy-config.json""")
class snake_case__ ( unittest.TestCase):
'''simple docstring'''
def __lowercase ( self ) -> Union[str, Any]:
'''simple docstring'''
__snake_case :Optional[int] = 0
def __lowercase ( self ) -> int:
'''simple docstring'''
self.assertIsNotNone(transformers.models.auto.__spec__ )
self.assertIsNotNone(importlib.util.find_spec("""transformers.models.auto""" ) )
def __lowercase ( self ) -> List[str]:
'''simple docstring'''
__snake_case :Optional[Any] = AutoConfig.from_pretrained("""bert-base-uncased""" )
self.assertIsInstance(a__ , a__ )
def __lowercase ( self ) -> Optional[int]:
'''simple docstring'''
__snake_case :List[str] = AutoConfig.from_pretrained(a__ )
self.assertIsInstance(a__ , a__ )
def __lowercase ( self ) -> List[Any]:
'''simple docstring'''
__snake_case :Tuple = AutoConfig.from_pretrained(a__ )
self.assertIsInstance(a__ , a__ )
def __lowercase ( self ) -> str:
'''simple docstring'''
__snake_case :Optional[int] = AutoConfig.for_model("""roberta""" )
self.assertIsInstance(a__ , a__ )
def __lowercase ( self ) -> List[Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
# This model name contains bert and roberta, but roberta ends up being picked.
__snake_case :List[Any] = os.path.join(a__ , """fake-roberta""" )
os.makedirs(a__ , exist_ok=a__ )
with open(os.path.join(a__ , """config.json""" ) , """w""" ) as f:
f.write(json.dumps({} ) )
__snake_case :Optional[Any] = AutoConfig.from_pretrained(a__ )
self.assertEqual(type(a__ ) , a__ )
def __lowercase ( self ) -> Dict:
'''simple docstring'''
try:
AutoConfig.register("""custom""" , a__ )
# Wrong model type will raise an error
with self.assertRaises(a__ ):
AutoConfig.register("""model""" , a__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(a__ ):
AutoConfig.register("""bert""" , a__ )
# Now that the config is registered, it can be used as any other config with the auto-API
__snake_case :Optional[int] = CustomConfig()
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(a__ )
__snake_case :Tuple = AutoConfig.from_pretrained(a__ )
self.assertIsInstance(a__ , a__ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
def __lowercase ( self ) -> int:
'''simple docstring'''
with self.assertRaisesRegex(
a__ , """bert-base is not a local folder and is not a valid model identifier""" ):
__snake_case :str = AutoConfig.from_pretrained("""bert-base""" )
def __lowercase ( self ) -> str:
'''simple docstring'''
with self.assertRaisesRegex(
a__ , R"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ):
__snake_case :Dict = AutoConfig.from_pretrained(a__ , revision="""aaaaaa""" )
def __lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
with self.assertRaisesRegex(
a__ , """hf-internal-testing/no-config-test-repo does not appear to have a file named config.json.""" , ):
__snake_case :Optional[int] = AutoConfig.from_pretrained("""hf-internal-testing/no-config-test-repo""" )
def __lowercase ( self ) -> int:
'''simple docstring'''
with self.assertRaises(a__ ):
__snake_case :Dict = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(a__ ):
__snake_case :int = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" , trust_remote_code=a__ )
__snake_case :Tuple = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" , trust_remote_code=a__ )
self.assertEqual(config.__class__.__name__ , """NewModelConfig""" )
# Test config can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(a__ )
__snake_case :Union[str, Any] = AutoConfig.from_pretrained(a__ , trust_remote_code=a__ )
self.assertEqual(reloaded_config.__class__.__name__ , """NewModelConfig""" )
def __lowercase ( self ) -> Dict:
'''simple docstring'''
class snake_case__ ( lowercase_):
'''simple docstring'''
lowerCamelCase : Optional[Any] = "new-model"
try:
AutoConfig.register("""new-model""" , a__ )
# If remote code is not set, the default is to use local
__snake_case :List[Any] = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" )
self.assertEqual(config.__class__.__name__ , """NewModelConfigLocal""" )
# If remote code is disabled, we load the local one.
__snake_case :Tuple = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" , trust_remote_code=a__ )
self.assertEqual(config.__class__.__name__ , """NewModelConfigLocal""" )
# If remote is enabled, we load from the Hub
__snake_case :List[Any] = AutoConfig.from_pretrained("""hf-internal-testing/test_dynamic_model""" , trust_remote_code=a__ )
self.assertEqual(config.__class__.__name__ , """NewModelConfig""" )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
| 291
| 1
|
"""simple docstring"""
def a__ ( lowerCAmelCase__ ):
UpperCAmelCase_ = [1]
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 0, 0, 0
UpperCAmelCase_ = ugly_nums[ia] * 2
UpperCAmelCase_ = ugly_nums[ia] * 3
UpperCAmelCase_ = ugly_nums[ia] * 5
for _ in range(1 , lowerCAmelCase__ ):
UpperCAmelCase_ = min(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
ugly_nums.append(lowerCAmelCase__ )
if next_num == next_a:
ia += 1
UpperCAmelCase_ = ugly_nums[ia] * 2
if next_num == next_a:
ia += 1
UpperCAmelCase_ = ugly_nums[ia] * 3
if next_num == next_a:
ia += 1
UpperCAmelCase_ = ugly_nums[ia] * 5
return ugly_nums[-1]
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
print(F"{ugly_numbers(200) = }")
| 82
|
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ,lowerCAmelCase_ ,unittest.TestCase ):
_UpperCAmelCase : List[str] = IFImgaImgSuperResolutionPipeline
_UpperCAmelCase : Any = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"width", "height"}
_UpperCAmelCase : Any = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"original_image"} )
_UpperCAmelCase : List[Any] = PipelineTesterMixin.required_optional_params - {"latents"}
def __lowerCamelCase ( self : Any ) ->Optional[int]:
return self._get_superresolution_dummy_components()
def __lowerCamelCase ( self : Tuple , A : Dict , A : List[str]=0 ) ->Optional[int]:
if str(A ).startswith('''mps''' ):
lowerCamelCase__ : Dict = torch.manual_seed(A )
else:
lowerCamelCase__ : Union[str, Any] = torch.Generator(device=A ).manual_seed(A )
lowerCamelCase__ : Optional[Any] = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(A ) ).to(A )
lowerCamelCase__ : str = floats_tensor((1, 3, 1_6, 1_6) , rng=random.Random(A ) ).to(A )
lowerCamelCase__ : Any = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''original_image''': original_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def __lowerCamelCase ( self : List[Any] ) ->Any:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def __lowerCamelCase ( self : Optional[Any] ) ->Optional[Any]:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' )
def __lowerCamelCase ( self : Optional[int] ) ->int:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def __lowerCamelCase ( self : List[Any] ) ->List[str]:
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def __lowerCamelCase ( self : List[Any] ) ->Dict:
self._test_save_load_local()
def __lowerCamelCase ( self : Dict ) ->Dict:
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 315
| 0
|
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ): # noqa: E741
'''simple docstring'''
lowerCAmelCase : Optional[Any] = len(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : Optional[int] = 0
lowerCAmelCase : Tuple = [0] * n
lowerCAmelCase : Union[str, Any] = [False] * n
lowerCAmelCase : Union[str, Any] = [False] * n
def dfs(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ):
if parent == root:
out_edge_count += 1
lowerCAmelCase : Optional[Any] = True
lowerCAmelCase : str = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
lowerCAmelCase : str = dfs(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : Dict = min(low[at] ,low[to] )
# AP found via bridge
if at < low[to]:
lowerCAmelCase : str = True
# AP found via cycle
if at == low[to]:
lowerCAmelCase : Tuple = True
else:
lowerCAmelCase : Optional[int] = min(low[at] ,SCREAMING_SNAKE_CASE__ )
return out_edge_count
for i in range(SCREAMING_SNAKE_CASE__ ):
if not visited[i]:
lowerCAmelCase : List[Any] = 0
lowerCAmelCase : List[str] = dfs(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,-1 ,SCREAMING_SNAKE_CASE__ )
lowerCAmelCase : Dict = out_edge_count > 1
for x in range(len(SCREAMING_SNAKE_CASE__ ) ):
if is_art[x] is True:
print(SCREAMING_SNAKE_CASE__ )
# Adjacency list of graph
lowerCAmelCase : Tuple ={
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data)
| 693
|
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def _UpperCAmelCase ( ):
'''simple docstring'''
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(SCREAMING_SNAKE_CASE__ ):
requests.request("""GET""" ,"""https://huggingface.co""" )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request("""GET""" ,"""https://huggingface.co""" ,timeout=1.0 )
@pytest.mark.integration
def _UpperCAmelCase ( ):
'''simple docstring'''
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request("""GET""" ,"""https://huggingface.co""" )
def _UpperCAmelCase ( ):
'''simple docstring'''
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(SCREAMING_SNAKE_CASE__ ):
http_head("""https://huggingface.co""" )
| 693
| 1
|
from argparse import ArgumentParser
from ..pipelines import Pipeline, PipelineDataFormat, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
__lowerCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
def __lowerCamelCase ( _lowerCAmelCase ) -> Dict:
if not path:
return "pipe"
for ext in PipelineDataFormat.SUPPORTED_FORMATS:
if path.endswith(_lowerCAmelCase ):
return ext
raise Exception(
F'''Unable to determine file format from file extension {path}. '''
F'''Please provide the format through --format {PipelineDataFormat.SUPPORTED_FORMATS}''' )
def __lowerCamelCase ( _lowerCAmelCase ) -> Dict:
_UpperCAmelCase = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
_UpperCAmelCase = try_infer_format_from_ext(args.input ) if args.format == "infer" else args.format
_UpperCAmelCase = PipelineDataFormat.from_str(
format=_lowerCAmelCase , output_path=args.output , input_path=args.input , column=args.column if args.column else nlp.default_input_names , overwrite=args.overwrite , )
return RunCommand(_lowerCAmelCase , _lowerCAmelCase )
class __SCREAMING_SNAKE_CASE ( lowercase):
def __init__( self : Tuple , __UpperCamelCase : Pipeline , __UpperCamelCase : PipelineDataFormat ):
_UpperCAmelCase = nlp
_UpperCAmelCase = reader
@staticmethod
def UpperCAmelCase__ ( __UpperCamelCase : ArgumentParser ):
_UpperCAmelCase = parser.add_parser("run" , help="Run a pipeline through the CLI" )
run_parser.add_argument("--task" , choices=get_supported_tasks() , help="Task to run" )
run_parser.add_argument("--input" , type=__UpperCamelCase , help="Path to the file to use for inference" )
run_parser.add_argument("--output" , type=__UpperCamelCase , help="Path to the file that will be used post to write results." )
run_parser.add_argument("--model" , type=__UpperCamelCase , help="Name or path to the model to instantiate." )
run_parser.add_argument("--config" , type=__UpperCamelCase , help="Name or path to the model's config to instantiate." )
run_parser.add_argument(
"--tokenizer" , type=__UpperCamelCase , help="Name of the tokenizer to use. (default: same as the model name)" )
run_parser.add_argument(
"--column" , type=__UpperCamelCase , help="Name of the column to use as input. (For multi columns input as QA use column1,columns2)" , )
run_parser.add_argument(
"--format" , type=__UpperCamelCase , default="infer" , choices=PipelineDataFormat.SUPPORTED_FORMATS , help="Input format to read from" , )
run_parser.add_argument(
"--device" , type=__UpperCamelCase , default=-1 , help="Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)" , )
run_parser.add_argument("--overwrite" , action="store_true" , help="Allow overwriting the output file." )
run_parser.set_defaults(func=__UpperCamelCase )
def UpperCAmelCase__ ( self : Tuple ):
_UpperCAmelCase , _UpperCAmelCase = self._nlp, []
for entry in self._reader:
_UpperCAmelCase = nlp(**__UpperCamelCase ) if self._reader.is_multi_columns else nlp(__UpperCamelCase )
if isinstance(__UpperCamelCase , __UpperCamelCase ):
outputs.append(__UpperCamelCase )
else:
outputs += output
# Saving data
if self._nlp.binary_output:
_UpperCAmelCase = self._reader.save_binary(__UpperCamelCase )
logger.warning(F'''Current pipeline requires output to be in binary format, saving at {binary_path}''' )
else:
self._reader.save(__UpperCamelCase )
| 684
|
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def __lowerCamelCase ( _lowerCAmelCase ) -> List[str]:
return getitem, k
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> List[Any]:
return setitem, k, v
def __lowerCamelCase ( _lowerCAmelCase ) -> str:
return delitem, k
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , *_lowerCAmelCase ) -> Optional[int]:
try:
return fun(_lowerCAmelCase , *_lowerCAmelCase ), None
except Exception as e:
return None, e
__lowerCAmelCase = (
_set("key_a", "val_a"),
_set("key_b", "val_b"),
)
__lowerCAmelCase = [
_set("key_a", "val_a"),
_set("key_a", "val_b"),
]
__lowerCAmelCase = [
_set("key_a", "val_a"),
_set("key_b", "val_b"),
_del("key_a"),
_del("key_b"),
_set("key_a", "val_a"),
_del("key_a"),
]
__lowerCAmelCase = [
_get("key_a"),
_del("key_a"),
_set("key_a", "val_a"),
_del("key_a"),
_del("key_a"),
_get("key_a"),
]
__lowerCAmelCase = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
__lowerCAmelCase = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set("key_a", "val_b"),
]
@pytest.mark.parametrize(
"operations" , (
pytest.param(_add_items , id="add items" ),
pytest.param(_overwrite_items , id="overwrite items" ),
pytest.param(_delete_items , id="delete items" ),
pytest.param(_access_absent_items , id="access absent items" ),
pytest.param(_add_with_resize_up , id="add with resize up" ),
pytest.param(_add_with_resize_down , id="add with resize down" ),
) , )
def __lowerCamelCase ( _lowerCAmelCase ) -> List[str]:
_UpperCAmelCase = HashMap(initial_block_size=4 )
_UpperCAmelCase = {}
for _, (fun, *args) in enumerate(_lowerCAmelCase ):
_UpperCAmelCase , _UpperCAmelCase = _run_operation(_lowerCAmelCase , _lowerCAmelCase , *_lowerCAmelCase )
_UpperCAmelCase , _UpperCAmelCase = _run_operation(_lowerCAmelCase , _lowerCAmelCase , *_lowerCAmelCase )
assert my_res == py_res
assert str(_lowerCAmelCase ) == str(_lowerCAmelCase )
assert set(_lowerCAmelCase ) == set(_lowerCAmelCase )
assert len(_lowerCAmelCase ) == len(_lowerCAmelCase )
assert set(my.items() ) == set(py.items() )
def __lowerCamelCase ( ) -> List[Any]:
def is_public(_lowerCAmelCase ) -> bool:
return not name.startswith("_" )
_UpperCAmelCase = {name for name in dir({} ) if is_public(_lowerCAmelCase )}
_UpperCAmelCase = {name for name in dir(HashMap() ) if is_public(_lowerCAmelCase )}
assert dict_public_names > hash_public_names
| 684
| 1
|
from functools import lru_cache
def a__ ( A_ ):
'''simple docstring'''
__magic_name__ = 2
__magic_name__ = set()
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.add(A_ )
if n > 1:
factors.add(A_ )
return factors
@lru_cache
def a__ ( A_ ):
'''simple docstring'''
return len(unique_prime_factors(A_ ) )
def a__ ( A_ ):
'''simple docstring'''
return len(set(A_ ) ) in (0, 1)
def a__ ( A_ ):
'''simple docstring'''
__magic_name__ = 2
while True:
# Increment each value of a generated range
__magic_name__ = [base + i for i in range(A_ )]
# Run elements through out unique_prime_factors function
# Append our target number to the end.
__magic_name__ = [upf_len(A_ ) for x in group]
checker.append(A_ )
# If all numbers in the list are equal, return the group variable.
if equality(A_ ):
return group
# Increment our base variable by 1
base += 1
def a__ ( A_ = 4 ):
'''simple docstring'''
__magic_name__ = run(A_ )
return results[0] if len(A_ ) else None
if __name__ == "__main__":
print(solution())
| 719
|
from collections import deque
from .hash_table import HashTable
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
def __init__( self : int , *UpperCamelCase__ : Union[str, Any] , **UpperCamelCase__ : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
super().__init__(*UpperCamelCase__ , **UpperCamelCase__ )
def _lowercase ( self : Optional[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Any ) -> Dict:
"""simple docstring"""
__magic_name__ = deque([] ) if self.values[key] is None else self.values[key]
self.values[key].appendleft(UpperCamelCase__ )
__magic_name__ = self.values[key]
def _lowercase ( self : List[str] ) -> int:
"""simple docstring"""
return (
sum(self.charge_factor - len(UpperCamelCase__ ) for slot in self.values )
/ self.size_table
* self.charge_factor
)
def _lowercase ( self : Optional[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Tuple=None ) -> str:
"""simple docstring"""
if not (
len(self.values[key] ) == self.charge_factor and self.values.count(UpperCamelCase__ ) == 0
):
return key
return super()._collision_resolution(UpperCamelCase__ , UpperCamelCase__ )
| 76
| 0
|
'''simple docstring'''
from __future__ import annotations
from scipy.special import comb # type: ignore
class lowercase_ :
"""simple docstring"""
def __init__( self : Optional[int] ,lowercase__ : list[tuple[float, float]] ):
__lowercase = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
__lowercase = len(lowercase__ ) - 1
def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : float ):
assert 0 <= t <= 1, "Time t must be between 0 and 1."
__lowercase = []
for i in range(len(self.list_of_points ) ):
# basis function for each i
output_values.append(
comb(self.degree ,lowercase__ ) * ((1 - t) ** (self.degree - i)) * (t**i) )
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(lowercase__ ) ,5 ) == 1
return output_values
def SCREAMING_SNAKE_CASE ( self : List[str] ,lowercase__ : float ):
assert 0 <= t <= 1, "Time t must be between 0 and 1."
__lowercase = self.basis_function(lowercase__ )
__lowercase = 0.0
__lowercase = 0.0
for i in range(len(self.list_of_points ) ):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,lowercase__ : float = 0.0_1 ):
from matplotlib import pyplot as plt # type: ignore
__lowercase = [] # x coordinates of points to plot
__lowercase = [] # y coordinates of points to plot
__lowercase = 0.0
while t <= 1:
__lowercase = self.bezier_curve_function(lowercase__ )
to_plot_x.append(value[0] )
to_plot_y.append(value[1] )
t += step_size
__lowercase = [i[0] for i in self.list_of_points]
__lowercase = [i[1] for i in self.list_of_points]
plt.plot(
lowercase__ ,lowercase__ ,color='''blue''' ,label='''Curve of Degree ''' + str(self.degree ) ,)
plt.scatter(lowercase__ ,lowercase__ ,color='''red''' ,label='''Control Points''' )
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
| 41
|
"""simple docstring"""
def a__ ( snake_case__ ) -> list:
if len(snake_case__ ) <= 1:
return [tuple(snake_case__ )]
lowerCamelCase = []
def generate(snake_case__ , snake_case__ ):
lowerCamelCase = [0] * n
res.append(tuple(snake_case__ ) )
lowerCamelCase = 0
while i < n:
if c[i] < i:
if i % 2 == 0:
lowerCamelCase , lowerCamelCase = arr[i], arr[0]
else:
lowerCamelCase , lowerCamelCase = arr[i], arr[c[i]]
res.append(tuple(snake_case__ ) )
c[i] += 1
lowerCamelCase = 0
else:
lowerCamelCase = 0
i += 1
generate(len(snake_case__ ) , snake_case__ )
return res
if __name__ == "__main__":
lowerCAmelCase : str = input("""Enter numbers separated by a comma:\n""").strip()
lowerCAmelCase : List[Any] = [int(item) for item in user_input.split(""",""")]
print(heaps(arr))
| 543
| 0
|
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( __a , unittest.TestCase ):
"""simple docstring"""
a_ = LongformerTokenizer
a_ = True
a_ = LongformerTokenizerFast
a_ = True
def _lowercase ( self : Any ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
snake_case__ : List[Any] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
snake_case__ : List[str] = dict(zip(a_ , range(len(a_ ) ) ) )
snake_case__ : List[Any] = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
snake_case__ : Any = {"""unk_token""": """<unk>"""}
snake_case__ : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
snake_case__ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(a_ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(a_ ) )
def _lowercase ( self : Union[str, Any] , **__A : List[Any] ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **a_ )
def _lowercase ( self : str , **__A : str ):
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **a_ )
def _lowercase ( self : Tuple , __A : Tuple ):
snake_case__ : int = """lower newer"""
snake_case__ : Optional[int] = """lower newer"""
return input_text, output_text
def _lowercase ( self : Optional[int] ):
snake_case__ : Any = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
snake_case__ : int = """lower newer"""
snake_case__ : Union[str, Any] = ["""l""", """o""", """w""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
snake_case__ : int = tokenizer.tokenize(a_ ) # , add_prefix_space=True)
self.assertListEqual(a_ , a_ )
snake_case__ : Any = tokens + [tokenizer.unk_token]
snake_case__ : str = [0, 1, 2, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a_ ) , a_ )
def _lowercase ( self : List[Any] ):
snake_case__ : int = self.get_tokenizer()
self.assertListEqual(tokenizer.encode("Hello world!" , add_special_tokens=a_ ) , [0, 3_1_4_1_4, 2_3_2, 3_2_8, 2] )
self.assertListEqual(
tokenizer.encode("Hello world! cécé herlolip 418" , add_special_tokens=a_ ) , [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2] , )
@slow
def _lowercase ( self : Optional[int] ):
snake_case__ : Optional[int] = self.tokenizer_class.from_pretrained("allenai/longformer-base-4096" )
snake_case__ : Union[str, Any] = tokenizer.encode("sequence builders" , add_special_tokens=a_ )
snake_case__ : Tuple = tokenizer.encode("multi-sequence build" , add_special_tokens=a_ )
snake_case__ : Tuple = tokenizer.encode(
"sequence builders" , add_special_tokens=a_ , add_prefix_space=a_ )
snake_case__ : Optional[Any] = tokenizer.encode(
"sequence builders" , "multi-sequence build" , add_special_tokens=a_ , add_prefix_space=a_ )
snake_case__ : List[str] = tokenizer.build_inputs_with_special_tokens(a_ )
snake_case__ : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(a_ , a_ )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def _lowercase ( self : str ):
snake_case__ : Optional[Any] = self.get_tokenizer()
snake_case__ : Dict = """Encode this sequence."""
snake_case__ : Optional[Any] = tokenizer.byte_encoder[""" """.encode("utf-8" )[0]]
# Testing encoder arguments
snake_case__ : Union[str, Any] = tokenizer.encode(a_ , add_special_tokens=a_ , add_prefix_space=a_ )
snake_case__ : Optional[int] = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(a_ , a_ )
snake_case__ : Optional[int] = tokenizer.encode(a_ , add_special_tokens=a_ , add_prefix_space=a_ )
snake_case__ : Optional[int] = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(a_ , a_ )
tokenizer.add_special_tokens({"bos_token": "<s>"} )
snake_case__ : Union[str, Any] = tokenizer.encode(a_ , add_special_tokens=a_ )
snake_case__ : str = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(a_ , a_ )
# Testing spaces after special tokens
snake_case__ : List[str] = """<mask>"""
tokenizer.add_special_tokens(
{"mask_token": AddedToken(a_ , lstrip=a_ , rstrip=a_ )} ) # mask token has a left space
snake_case__ : str = tokenizer.convert_tokens_to_ids(a_ )
snake_case__ : int = """Encode <mask> sequence"""
snake_case__ : Tuple = """Encode <mask>sequence"""
snake_case__ : List[str] = tokenizer.encode(a_ )
snake_case__ : Optional[int] = encoded.index(a_ )
snake_case__ : Tuple = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(a_ , a_ )
snake_case__ : Union[str, Any] = tokenizer.encode(a_ )
snake_case__ : Optional[int] = encoded.index(a_ )
snake_case__ : Any = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(a_ , a_ )
def _lowercase ( self : Tuple ):
pass
def _lowercase ( self : List[str] ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
snake_case__ : str = self.rust_tokenizer_class.from_pretrained(a_ , **a_ )
snake_case__ : Dict = self.tokenizer_class.from_pretrained(a_ , **a_ )
snake_case__ : List[str] = """A, <mask> AllenNLP sentence."""
snake_case__ : Tuple = tokenizer_r.encode_plus(a_ , add_special_tokens=a_ , return_token_type_ids=a_ )
snake_case__ : Optional[int] = tokenizer_p.encode_plus(a_ , add_special_tokens=a_ , return_token_type_ids=a_ )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["token_type_ids"] ) , sum(tokens_p["token_type_ids"] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) , sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) , )
snake_case__ : int = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] )
snake_case__ : int = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["input_ids"] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(tokens_r["input_ids"] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(
a_ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
self.assertSequenceEqual(
a_ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
def _lowercase ( self : List[str] ):
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
snake_case__ : Any = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=a_ , add_prefix_space=a_ , trim_offsets=a_ )
snake_case__ : Optional[Any] = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
snake_case__ : Any = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state["add_prefix_space"] , a_ )
self.assertEqual(post_processor_state["add_prefix_space"] , a_ )
self.assertEqual(post_processor_state["trim_offsets"] , a_ )
def _lowercase ( self : List[str] ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
snake_case__ : Optional[int] = """hello""" # `hello` is a token in the vocabulary of `pretrained_name`
snake_case__ : int = f'''{text_of_1_token} {text_of_1_token}'''
snake_case__ : Optional[int] = self.rust_tokenizer_class.from_pretrained(
a_ , use_fast=a_ , add_prefix_space=a_ , trim_offsets=a_ )
snake_case__ : Dict = tokenizer_r(a_ , return_offsets_mapping=a_ , add_special_tokens=a_ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(a_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(a_ ) + 1, len(a_ ) + 1 + len(a_ )) , )
snake_case__ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
a_ , use_fast=a_ , add_prefix_space=a_ , trim_offsets=a_ )
snake_case__ : str = tokenizer_r(a_ , return_offsets_mapping=a_ , add_special_tokens=a_ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(a_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(a_ ) + 1, len(a_ ) + 1 + len(a_ )) , )
snake_case__ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
a_ , use_fast=a_ , add_prefix_space=a_ , trim_offsets=a_ )
snake_case__ : Dict = tokenizer_r(a_ , return_offsets_mapping=a_ , add_special_tokens=a_ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(a_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(a_ ), len(a_ ) + 1 + len(a_ )) , )
snake_case__ : Tuple = self.rust_tokenizer_class.from_pretrained(
a_ , use_fast=a_ , add_prefix_space=a_ , trim_offsets=a_ )
snake_case__ : Union[str, Any] = tokenizer_r(a_ , return_offsets_mapping=a_ , add_special_tokens=a_ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(a_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(a_ ), len(a_ ) + 1 + len(a_ )) , )
snake_case__ : Tuple = f''' {text}'''
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
snake_case__ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
a_ , use_fast=a_ , add_prefix_space=a_ , trim_offsets=a_ )
snake_case__ : Union[str, Any] = tokenizer_r(a_ , return_offsets_mapping=a_ , add_special_tokens=a_ )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(a_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(a_ ) + 1, 1 + len(a_ ) + 1 + len(a_ )) , )
snake_case__ : Tuple = self.rust_tokenizer_class.from_pretrained(
a_ , use_fast=a_ , add_prefix_space=a_ , trim_offsets=a_ )
snake_case__ : Any = tokenizer_r(a_ , return_offsets_mapping=a_ , add_special_tokens=a_ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(a_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(a_ ), 1 + len(a_ ) + 1 + len(a_ )) , )
snake_case__ : List[str] = self.rust_tokenizer_class.from_pretrained(
a_ , use_fast=a_ , add_prefix_space=a_ , trim_offsets=a_ )
snake_case__ : Optional[Any] = tokenizer_r(a_ , return_offsets_mapping=a_ , add_special_tokens=a_ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(a_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(a_ ), 1 + len(a_ ) + 1 + len(a_ )) , )
| 705
|
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
__lowerCamelCase : Optional[int] = get_logger()
__lowerCamelCase : Optional[dict] = None
class SCREAMING_SNAKE_CASE__ ( TensorFormatter[Mapping, "jax.Array", Mapping] ):
"""simple docstring"""
def __init__( self : Optional[Any] , __A : Dict=None , __A : List[str]=None , **__A : str ):
super().__init__(features=__A )
import jax
from jaxlib.xla_client import Device
if isinstance(__A , __A ):
raise ValueError(
f'''Expected {device} to be a `str` not {type(__A )}, as `jaxlib.xla_extension.Device` '''
"is not serializable neither with `pickle` nor with `dill`. Instead you can surround "
"the device with `str()` to get its string identifier that will be internally mapped "
"to the actual `jaxlib.xla_extension.Device`." )
snake_case__ : List[Any] = device if isinstance(__A , __A ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
snake_case__ : Any = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
f'''Device with string identifier {self.device} not listed among the available '''
f'''devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default '''
f'''device: {str(jax.devices()[0] )}.''' )
snake_case__ : str = str(jax.devices()[0] )
snake_case__ : str = jnp_array_kwargs
@staticmethod
def _lowercase ( ):
import jax
return {str(__A ): device for device in jax.devices()}
def _lowercase ( self : Optional[Any] , __A : str ):
import jax
import jax.numpy as jnp
if isinstance(__A , __A ) and column:
if all(
isinstance(__A , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(__A , axis=0 )
return column
def _lowercase ( self : int , __A : Tuple ):
import jax
import jax.numpy as jnp
if isinstance(__A , (str, bytes, type(__A )) ):
return value
elif isinstance(__A , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
snake_case__ : Optional[int] = {}
if isinstance(__A , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
snake_case__ : Any = {"dtype": jnp.intaa}
else:
snake_case__ : Tuple = {"dtype": jnp.intaa}
elif isinstance(__A , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
snake_case__ : str = {"dtype": jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(__A , PIL.Image.Image ):
snake_case__ : Optional[Any] = np.asarray(__A )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
snake_case__ : int = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(__A , **{**default_dtype, **self.jnp_array_kwargs} )
def _lowercase ( self : Union[str, Any] , __A : Optional[int] ):
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(__A , torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(__A , "__array__" ) and not isinstance(__A , jax.Array ):
snake_case__ : Union[str, Any] = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(__A , np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(__A ) for substruct in data_struct] )
elif isinstance(__A , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(__A ) for substruct in data_struct] )
return self._tensorize(__A )
def _lowercase ( self : Tuple , __A : dict ):
return map_nested(self._recursive_tensorize , __A , map_list=__A )
def _lowercase ( self : Optional[int] , __A : pa.Table ):
snake_case__ : int = self.numpy_arrow_extractor().extract_row(__A )
snake_case__ : Tuple = self.python_features_decoder.decode_row(__A )
return self.recursive_tensorize(__A )
def _lowercase ( self : Optional[Any] , __A : pa.Table ):
snake_case__ : Any = self.numpy_arrow_extractor().extract_column(__A )
snake_case__ : Optional[int] = self.python_features_decoder.decode_column(__A , pa_table.column_names[0] )
snake_case__ : List[Any] = self.recursive_tensorize(__A )
snake_case__ : Dict = self._consolidate(__A )
return column
def _lowercase ( self : str , __A : pa.Table ):
snake_case__ : Any = self.numpy_arrow_extractor().extract_batch(__A )
snake_case__ : int = self.python_features_decoder.decode_batch(__A )
snake_case__ : List[Any] = self.recursive_tensorize(__A )
for column_name in batch:
snake_case__ : Any = self._consolidate(batch[column_name] )
return batch
| 25
| 0
|
from math import sqrt
import numpy as np
from sympy import symbols
# Coefficient
# Speed of light (m/s)
_lowerCamelCase : Union[str, Any] = 2_9_9_7_9_2_4_5_8
# Symbols
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Any = symbols('''ct x y z''')
def _a ( SCREAMING_SNAKE_CASE__ : float ) -> float:
'''simple docstring'''
if velocity > c:
raise ValueError("Speed must not exceed light speed 299,792,458 [m/s]!" )
elif velocity < 1:
# Usually the speed should be much higher than 1 (c order of magnitude)
raise ValueError("Speed must be greater than or equal to 1!" )
return velocity / c
def _a ( SCREAMING_SNAKE_CASE__ : float ) -> float:
'''simple docstring'''
return 1 / sqrt(1 - beta(SCREAMING_SNAKE_CASE__ ) ** 2 )
def _a ( SCREAMING_SNAKE_CASE__ : float ) -> np.ndarray:
'''simple docstring'''
return np.array(
[
[gamma(SCREAMING_SNAKE_CASE__ ), -gamma(SCREAMING_SNAKE_CASE__ ) * beta(SCREAMING_SNAKE_CASE__ ), 0, 0],
[-gamma(SCREAMING_SNAKE_CASE__ ) * beta(SCREAMING_SNAKE_CASE__ ), gamma(SCREAMING_SNAKE_CASE__ ), 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
] )
def _a ( SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : np.ndarray | None = None ) -> np.ndarray:
'''simple docstring'''
if event is None:
SCREAMING_SNAKE_CASE__ : str = np.array([ct, x, y, z] ) # Symbolic four vector
else:
event[0] *= c # x0 is ct (speed of light * time)
return transformation_matrix(SCREAMING_SNAKE_CASE__ ) @ event
if __name__ == "__main__":
import doctest
doctest.testmod()
# Example of symbolic vector:
_lowerCamelCase : int = transform(2_9_9_7_9_2_4_5)
print('''Example of four vector: ''')
print(f"ct' = {four_vector[0]}")
print(f"x' = {four_vector[1]}")
print(f"y' = {four_vector[2]}")
print(f"z' = {four_vector[3]}")
# Substitute symbols with numerical values
_lowerCamelCase : Optional[Any] = {ct: c, x: 1, y: 1, z: 1}
_lowerCamelCase : Dict = [four_vector[i].subs(sub_dict) for i in range(4)]
print(f"\n{numerical_vector}")
| 663
|
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionAttendAndExcitePipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_numpy, skip_mps, slow
from diffusers.utils.testing_utils import require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
_lowerCamelCase : int = False
@skip_mps
class lowerCamelCase (__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ = StableDiffusionAttendAndExcitePipeline
UpperCAmelCase_ = False
UpperCAmelCase_ = TEXT_TO_IMAGE_PARAMS
UpperCAmelCase_ = TEXT_TO_IMAGE_BATCH_PARAMS.union({"token_indices"} )
UpperCAmelCase_ = TEXT_TO_IMAGE_IMAGE_PARAMS
UpperCAmelCase_ = TEXT_TO_IMAGE_IMAGE_PARAMS
@classmethod
def A_ ( cls : str ) -> Union[str, Any]:
"""simple docstring"""
super().setUpClass()
torch.use_deterministic_algorithms(_UpperCAmelCase )
@classmethod
def A_ ( cls : Tuple ) -> str:
"""simple docstring"""
super().tearDownClass()
torch.use_deterministic_algorithms(_UpperCAmelCase )
def A_ ( self : Any ) -> Tuple:
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : List[Any] = UNetaDConditionModel(
block_out_channels=(3_2, 6_4), layers_per_block=1, sample_size=3_2, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=3_2, attention_head_dim=(2, 4), use_linear_projection=_UpperCAmelCase, )
SCREAMING_SNAKE_CASE__ : Dict = DDIMScheduler(
beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", clip_sample=_UpperCAmelCase, set_alpha_to_one=_UpperCAmelCase, )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Any = AutoencoderKL(
block_out_channels=[3_2, 6_4], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, sample_size=1_2_8, )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : List[Any] = CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=3_2, intermediate_size=3_7, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1_0_0_0, hidden_act="gelu", projection_dim=5_1_2, )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = CLIPTextModel(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Any = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def A_ ( self : Optional[Any], _UpperCAmelCase : Optional[Any], _UpperCAmelCase : Any=0 ) -> Optional[Any]:
"""simple docstring"""
if str(_UpperCAmelCase ).startswith("mps" ):
SCREAMING_SNAKE_CASE__ : Tuple = torch.manual_seed(_UpperCAmelCase )
else:
SCREAMING_SNAKE_CASE__ : Tuple = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : str = {
"prompt": "a cat and a frog",
"token_indices": [2, 5],
"generator": generator,
"num_inference_steps": 1,
"guidance_scale": 6.0,
"output_type": "numpy",
"max_iter_to_alter": 2,
"thresholds": {0: 0.7},
}
return inputs
def A_ ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = "cpu"
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.get_dummy_components()
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.pipeline_class(**_UpperCAmelCase )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = self.get_dummy_inputs(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[str] = pipe(**_UpperCAmelCase ).images
SCREAMING_SNAKE_CASE__ : int = image[0, -3:, -3:, -1]
self.assertEqual(image.shape, (1, 6_4, 6_4, 3) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = np.array(
[0.63905364, 0.62897307, 0.48599017, 0.5133624, 0.5550048, 0.45769516, 0.50326973, 0.5023139, 0.45384496] )
SCREAMING_SNAKE_CASE__ : Dict = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_UpperCAmelCase, 1E-3 )
def A_ ( self : str ) -> List[Any]:
"""simple docstring"""
super().test_cpu_offload_forward_pass(expected_max_diff=5E-4 )
def A_ ( self : Any ) -> str:
"""simple docstring"""
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def A_ ( self : Optional[Any] ) -> str:
"""simple docstring"""
self._test_inference_batch_single_identical(batch_size=2, expected_max_diff=7E-4 )
def A_ ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def A_ ( self : Any ) -> List[str]:
"""simple docstring"""
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5E-4 )
def A_ ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
super().test_save_load_local(expected_max_difference=5E-4 )
def A_ ( self : Tuple ) -> List[Any]:
"""simple docstring"""
super().test_save_load_optional_components(expected_max_difference=4E-4 )
@require_torch_gpu
@slow
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
@classmethod
def A_ ( cls : Union[str, Any] ) -> Tuple:
"""simple docstring"""
super().setUpClass()
torch.use_deterministic_algorithms(_UpperCAmelCase )
@classmethod
def A_ ( cls : List[str] ) -> List[str]:
"""simple docstring"""
super().tearDownClass()
torch.use_deterministic_algorithms(_UpperCAmelCase )
def A_ ( self : str ) -> List[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A_ ( self : List[str] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = torch.manual_seed(5_1 )
SCREAMING_SNAKE_CASE__ : Tuple = StableDiffusionAttendAndExcitePipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4", safety_checker=_UpperCAmelCase, torch_dtype=torch.floataa )
pipe.to("cuda" )
SCREAMING_SNAKE_CASE__ : List[str] = "a painting of an elephant with glasses"
SCREAMING_SNAKE_CASE__ : Optional[int] = [5, 7]
SCREAMING_SNAKE_CASE__ : str = pipe(
prompt=_UpperCAmelCase, token_indices=_UpperCAmelCase, guidance_scale=7.5, generator=_UpperCAmelCase, num_inference_steps=5, max_iter_to_alter=5, output_type="numpy", ).images[0]
SCREAMING_SNAKE_CASE__ : Tuple = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy" )
assert np.abs((expected_image - image).max() ) < 5E-1
| 663
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
UpperCamelCase__ = {
'configuration_gpt_neo': ['GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GPTNeoConfig', 'GPTNeoOnnxConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
'GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST',
'GPTNeoForCausalLM',
'GPTNeoForQuestionAnswering',
'GPTNeoForSequenceClassification',
'GPTNeoForTokenClassification',
'GPTNeoModel',
'GPTNeoPreTrainedModel',
'load_tf_weights_in_gpt_neo',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
'FlaxGPTNeoForCausalLM',
'FlaxGPTNeoModel',
'FlaxGPTNeoPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neo import (
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoForCausalLM,
GPTNeoForQuestionAnswering,
GPTNeoForSequenceClassification,
GPTNeoForTokenClassification,
GPTNeoModel,
GPTNeoPreTrainedModel,
load_tf_weights_in_gpt_neo,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 640
|
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
"""simple docstring"""
if number < 0:
raise ValueError("number must not be negative" )
return number & (number - 1) == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 640
| 1
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
UpperCAmelCase_ : Optional[int] = {
'Acehnese Arabic': 'ace_Arab',
'Acehnese Latin': 'ace_Latn',
'Mesopotamian Arabic': 'acm_Arab',
'Ta\'izzi-Adeni Arabic': 'acq_Arab',
'Tunisian Arabic': 'aeb_Arab',
'Afrikaans': 'afr_Latn',
'South Levantine Arabic': 'ajp_Arab',
'Akan': 'aka_Latn',
'Amharic': 'amh_Ethi',
'North Levantine Arabic': 'apc_Arab',
'Modern Standard Arabic': 'arb_Arab',
'Modern Standard Arabic Romanized': 'arb_Latn',
'Najdi Arabic': 'ars_Arab',
'Moroccan Arabic': 'ary_Arab',
'Egyptian Arabic': 'arz_Arab',
'Assamese': 'asm_Beng',
'Asturian': 'ast_Latn',
'Awadhi': 'awa_Deva',
'Central Aymara': 'ayr_Latn',
'South Azerbaijani': 'azb_Arab',
'North Azerbaijani': 'azj_Latn',
'Bashkir': 'bak_Cyrl',
'Bambara': 'bam_Latn',
'Balinese': 'ban_Latn',
'Belarusian': 'bel_Cyrl',
'Bemba': 'bem_Latn',
'Bengali': 'ben_Beng',
'Bhojpuri': 'bho_Deva',
'Banjar Arabic': 'bjn_Arab',
'Banjar Latin': 'bjn_Latn',
'Standard Tibetan': 'bod_Tibt',
'Bosnian': 'bos_Latn',
'Buginese': 'bug_Latn',
'Bulgarian': 'bul_Cyrl',
'Catalan': 'cat_Latn',
'Cebuano': 'ceb_Latn',
'Czech': 'ces_Latn',
'Chokwe': 'cjk_Latn',
'Central Kurdish': 'ckb_Arab',
'Crimean Tatar': 'crh_Latn',
'Welsh': 'cym_Latn',
'Danish': 'dan_Latn',
'German': 'deu_Latn',
'Southwestern Dinka': 'dik_Latn',
'Dyula': 'dyu_Latn',
'Dzongkha': 'dzo_Tibt',
'Greek': 'ell_Grek',
'English': 'eng_Latn',
'Esperanto': 'epo_Latn',
'Estonian': 'est_Latn',
'Basque': 'eus_Latn',
'Ewe': 'ewe_Latn',
'Faroese': 'fao_Latn',
'Fijian': 'fij_Latn',
'Finnish': 'fin_Latn',
'Fon': 'fon_Latn',
'French': 'fra_Latn',
'Friulian': 'fur_Latn',
'Nigerian Fulfulde': 'fuv_Latn',
'Scottish Gaelic': 'gla_Latn',
'Irish': 'gle_Latn',
'Galician': 'glg_Latn',
'Guarani': 'grn_Latn',
'Gujarati': 'guj_Gujr',
'Haitian Creole': 'hat_Latn',
'Hausa': 'hau_Latn',
'Hebrew': 'heb_Hebr',
'Hindi': 'hin_Deva',
'Chhattisgarhi': 'hne_Deva',
'Croatian': 'hrv_Latn',
'Hungarian': 'hun_Latn',
'Armenian': 'hye_Armn',
'Igbo': 'ibo_Latn',
'Ilocano': 'ilo_Latn',
'Indonesian': 'ind_Latn',
'Icelandic': 'isl_Latn',
'Italian': 'ita_Latn',
'Javanese': 'jav_Latn',
'Japanese': 'jpn_Jpan',
'Kabyle': 'kab_Latn',
'Jingpho': 'kac_Latn',
'Kamba': 'kam_Latn',
'Kannada': 'kan_Knda',
'Kashmiri Arabic': 'kas_Arab',
'Kashmiri Devanagari': 'kas_Deva',
'Georgian': 'kat_Geor',
'Central Kanuri Arabic': 'knc_Arab',
'Central Kanuri Latin': 'knc_Latn',
'Kazakh': 'kaz_Cyrl',
'Kabiyè': 'kbp_Latn',
'Kabuverdianu': 'kea_Latn',
'Khmer': 'khm_Khmr',
'Kikuyu': 'kik_Latn',
'Kinyarwanda': 'kin_Latn',
'Kyrgyz': 'kir_Cyrl',
'Kimbundu': 'kmb_Latn',
'Northern Kurdish': 'kmr_Latn',
'Kikongo': 'kon_Latn',
'Korean': 'kor_Hang',
'Lao': 'lao_Laoo',
'Ligurian': 'lij_Latn',
'Limburgish': 'lim_Latn',
'Lingala': 'lin_Latn',
'Lithuanian': 'lit_Latn',
'Lombard': 'lmo_Latn',
'Latgalian': 'ltg_Latn',
'Luxembourgish': 'ltz_Latn',
'Luba-Kasai': 'lua_Latn',
'Ganda': 'lug_Latn',
'Luo': 'luo_Latn',
'Mizo': 'lus_Latn',
'Standard Latvian': 'lvs_Latn',
'Magahi': 'mag_Deva',
'Maithili': 'mai_Deva',
'Malayalam': 'mal_Mlym',
'Marathi': 'mar_Deva',
'Minangkabau Arabic ': 'min_Arab',
'Minangkabau Latin': 'min_Latn',
'Macedonian': 'mkd_Cyrl',
'Plateau Malagasy': 'plt_Latn',
'Maltese': 'mlt_Latn',
'Meitei Bengali': 'mni_Beng',
'Halh Mongolian': 'khk_Cyrl',
'Mossi': 'mos_Latn',
'Maori': 'mri_Latn',
'Burmese': 'mya_Mymr',
'Dutch': 'nld_Latn',
'Norwegian Nynorsk': 'nno_Latn',
'Norwegian Bokmål': 'nob_Latn',
'Nepali': 'npi_Deva',
'Northern Sotho': 'nso_Latn',
'Nuer': 'nus_Latn',
'Nyanja': 'nya_Latn',
'Occitan': 'oci_Latn',
'West Central Oromo': 'gaz_Latn',
'Odia': 'ory_Orya',
'Pangasinan': 'pag_Latn',
'Eastern Panjabi': 'pan_Guru',
'Papiamento': 'pap_Latn',
'Western Persian': 'pes_Arab',
'Polish': 'pol_Latn',
'Portuguese': 'por_Latn',
'Dari': 'prs_Arab',
'Southern Pashto': 'pbt_Arab',
'Ayacucho Quechua': 'quy_Latn',
'Romanian': 'ron_Latn',
'Rundi': 'run_Latn',
'Russian': 'rus_Cyrl',
'Sango': 'sag_Latn',
'Sanskrit': 'san_Deva',
'Santali': 'sat_Olck',
'Sicilian': 'scn_Latn',
'Shan': 'shn_Mymr',
'Sinhala': 'sin_Sinh',
'Slovak': 'slk_Latn',
'Slovenian': 'slv_Latn',
'Samoan': 'smo_Latn',
'Shona': 'sna_Latn',
'Sindhi': 'snd_Arab',
'Somali': 'som_Latn',
'Southern Sotho': 'sot_Latn',
'Spanish': 'spa_Latn',
'Tosk Albanian': 'als_Latn',
'Sardinian': 'srd_Latn',
'Serbian': 'srp_Cyrl',
'Swati': 'ssw_Latn',
'Sundanese': 'sun_Latn',
'Swedish': 'swe_Latn',
'Swahili': 'swh_Latn',
'Silesian': 'szl_Latn',
'Tamil': 'tam_Taml',
'Tatar': 'tat_Cyrl',
'Telugu': 'tel_Telu',
'Tajik': 'tgk_Cyrl',
'Tagalog': 'tgl_Latn',
'Thai': 'tha_Thai',
'Tigrinya': 'tir_Ethi',
'Tamasheq Latin': 'taq_Latn',
'Tamasheq Tifinagh': 'taq_Tfng',
'Tok Pisin': 'tpi_Latn',
'Tswana': 'tsn_Latn',
'Tsonga': 'tso_Latn',
'Turkmen': 'tuk_Latn',
'Tumbuka': 'tum_Latn',
'Turkish': 'tur_Latn',
'Twi': 'twi_Latn',
'Central Atlas Tamazight': 'tzm_Tfng',
'Uyghur': 'uig_Arab',
'Ukrainian': 'ukr_Cyrl',
'Umbundu': 'umb_Latn',
'Urdu': 'urd_Arab',
'Northern Uzbek': 'uzn_Latn',
'Venetian': 'vec_Latn',
'Vietnamese': 'vie_Latn',
'Waray': 'war_Latn',
'Wolof': 'wol_Latn',
'Xhosa': 'xho_Latn',
'Eastern Yiddish': 'ydd_Hebr',
'Yoruba': 'yor_Latn',
'Yue Chinese': 'yue_Hant',
'Chinese Simplified': 'zho_Hans',
'Chinese Traditional': 'zho_Hant',
'Standard Malay': 'zsm_Latn',
'Zulu': 'zul_Latn',
}
class UpperCAmelCase__ ( A ):
lowerCAmelCase_ = 'facebook/nllb-200-distilled-600M'
lowerCAmelCase_ = (
'This is a tool that translates text from a language to another. It takes three inputs: `text`, which should '
'be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, '
'which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in '
'plain English, such as \'Romanian\', or \'Albanian\'. It returns the text translated in `tgt_lang`.'
)
lowerCAmelCase_ = 'translator'
lowerCAmelCase_ = AutoTokenizer
lowerCAmelCase_ = AutoModelForSeqaSeqLM
lowerCAmelCase_ = LANGUAGE_CODES
lowerCAmelCase_ = ['text', 'text', 'text']
lowerCAmelCase_ = ['text']
def lowerCamelCase_ ( self : Optional[int],__A : str,__A : Optional[Any],__A : int ):
if src_lang not in self.lang_to_code:
raise ValueError(f'{src_lang} is not a supported language.' )
if tgt_lang not in self.lang_to_code:
raise ValueError(f'{tgt_lang} is not a supported language.' )
_lowerCamelCase : List[str] = self.lang_to_code[src_lang]
_lowerCamelCase : str = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
__A,return_tensors="pt",src_lang=__A,tgt_lang=__A )
def lowerCamelCase_ ( self : Dict,__A : str ):
return self.model.generate(**__A )
def lowerCamelCase_ ( self : int,__A : Optional[Any] ):
return self.post_processor.decode(outputs[0].tolist(),skip_special_tokens=__A )
| 44
|
'''simple docstring'''
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ : Optional[Any] = False, False, False
@dataclass
class UpperCAmelCase__ :
lowerCAmelCase_ = None
lowerCAmelCase_ = True
lowerCAmelCase_ = True
lowerCAmelCase_ = None
# Automatically constructed
lowerCAmelCase_ = "dict"
lowerCAmelCase_ = pa.struct({'bytes': pa.binary(), 'path': pa.string()} )
lowerCAmelCase_ = field(default='Audio' , init=A , repr=A )
def __call__( self : Tuple ):
return self.pa_type
def lowerCamelCase_ ( self : Any,__A : Union[str, bytes, dict] ):
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError("To support encoding audio data, please install 'soundfile'." ) from err
if isinstance(__A,__A ):
return {"bytes": None, "path": value}
elif isinstance(__A,__A ):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
_lowerCamelCase : List[Any] = BytesIO()
sf.write(__A,value["array"],value["sampling_rate"],format="wav" )
return {"bytes": buffer.getvalue(), "path": None}
elif value.get("path" ) is not None and os.path.isfile(value["path"] ):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith("pcm" ):
# "PCM" only has raw audio bytes
if value.get("sampling_rate" ) is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError("To use PCM files, please specify a 'sampling_rate' in Audio object" )
if value.get("bytes" ):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
_lowerCamelCase : Dict = np.frombuffer(value["bytes"],dtype=np.intaa ).astype(np.floataa ) / 3_2_7_6_7
else:
_lowerCamelCase : str = np.memmap(value["path"],dtype="h",mode="r" ).astype(np.floataa ) / 3_2_7_6_7
_lowerCamelCase : Optional[int] = BytesIO(bytes() )
sf.write(__A,__A,value["sampling_rate"],format="wav" )
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get("path" )}
elif value.get("bytes" ) is not None or value.get("path" ) is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get("bytes" ), "path": value.get("path" )}
else:
raise ValueError(
f'An audio sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.' )
def lowerCamelCase_ ( self : Optional[Any],__A : dict,__A : Optional[Dict[str, Union[str, bool, None]]] = None ):
if not self.decode:
raise RuntimeError("Decoding is disabled for this feature. Please use Audio(decode=True) instead." )
_lowerCamelCase , _lowerCamelCase : Optional[Any] = (value["path"], BytesIO(value["bytes"] )) if value["bytes"] is not None else (value["path"], None)
if path is None and file is None:
raise ValueError(f'An audio sample should have one of \'path\' or \'bytes\' but both are None in {value}.' )
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError("To support decoding audio files, please install 'librosa' and 'soundfile'." ) from err
_lowerCamelCase : Tuple = xsplitext(__A )[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
"Decoding 'opus' files requires system library 'libsndfile'>=1.0.31, "
"You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. " )
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
"Decoding 'mp3' files requires system library 'libsndfile'>=1.1.0, "
"You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. " )
if file is None:
_lowerCamelCase : Tuple = token_per_repo_id or {}
_lowerCamelCase : Union[str, Any] = path.split("::" )[-1]
try:
_lowerCamelCase : str = string_to_dict(__A,config.HUB_DATASETS_URL )["repo_id"]
_lowerCamelCase : str = token_per_repo_id[repo_id]
except (ValueError, KeyError):
_lowerCamelCase : Any = None
with xopen(__A,"rb",use_auth_token=__A ) as f:
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = sf.read(__A )
else:
_lowerCamelCase , _lowerCamelCase : str = sf.read(__A )
_lowerCamelCase : List[str] = array.T
if self.mono:
_lowerCamelCase : List[str] = librosa.to_mono(__A )
if self.sampling_rate and self.sampling_rate != sampling_rate:
_lowerCamelCase : List[str] = librosa.resample(__A,orig_sr=__A,target_sr=self.sampling_rate )
_lowerCamelCase : Optional[Any] = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def lowerCamelCase_ ( self : Any ):
from .features import Value
if self.decode:
raise ValueError("Cannot flatten a decoded Audio feature." )
return {
"bytes": Value("binary" ),
"path": Value("string" ),
}
def lowerCamelCase_ ( self : List[str],__A : Union[pa.StringArray, pa.StructArray] ):
if pa.types.is_string(storage.type ):
_lowerCamelCase : Any = pa.array([None] * len(__A ),type=pa.binary() )
_lowerCamelCase : int = pa.StructArray.from_arrays([bytes_array, storage],["bytes", "path"],mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
_lowerCamelCase : Dict = pa.array([None] * len(__A ),type=pa.string() )
_lowerCamelCase : Any = pa.StructArray.from_arrays([storage, path_array],["bytes", "path"],mask=storage.is_null() )
elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices("array" ):
_lowerCamelCase : Tuple = pa.array([Audio().encode_example(__A ) if x is not None else None for x in storage.to_pylist()] )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("bytes" ) >= 0:
_lowerCamelCase : Tuple = storage.field("bytes" )
else:
_lowerCamelCase : Any = pa.array([None] * len(__A ),type=pa.binary() )
if storage.type.get_field_index("path" ) >= 0:
_lowerCamelCase : List[str] = storage.field("path" )
else:
_lowerCamelCase : Tuple = pa.array([None] * len(__A ),type=pa.string() )
_lowerCamelCase : Tuple = pa.StructArray.from_arrays([bytes_array, path_array],["bytes", "path"],mask=storage.is_null() )
return array_cast(__A,self.pa_type )
def lowerCamelCase_ ( self : str,__A : pa.StructArray ):
@no_op_if_value_is_null
def path_to_bytes(__A : Dict ):
with xopen(__A,"rb" ) as f:
_lowerCamelCase : Any = f.read()
return bytes_
_lowerCamelCase : int = pa.array(
[
(path_to_bytes(x["path"] ) if x["bytes"] is None else x["bytes"]) if x is not None else None
for x in storage.to_pylist()
],type=pa.binary(),)
_lowerCamelCase : str = pa.array(
[os.path.basename(__A ) if path is not None else None for path in storage.field("path" ).to_pylist()],type=pa.string(),)
_lowerCamelCase : Dict = pa.StructArray.from_arrays([bytes_array, path_array],["bytes", "path"],mask=bytes_array.is_null() )
return array_cast(__A,self.pa_type )
| 44
| 1
|
from __future__ import annotations
from typing import Any
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 0 ) -> None:
"""simple docstring"""
snake_case__, snake_case__ : Optional[int] =row, column
snake_case__ : str =[[default_value for c in range(__SCREAMING_SNAKE_CASE )] for r in range(__SCREAMING_SNAKE_CASE )]
def __str__( self ) -> str:
"""simple docstring"""
snake_case__ : List[Any] =f'''Matrix consist of {self.row} rows and {self.column} columns\n'''
# Make string identifier
snake_case__ : Union[str, Any] =0
for row_vector in self.array:
for obj in row_vector:
snake_case__ : Dict =max(__SCREAMING_SNAKE_CASE , len(str(__SCREAMING_SNAKE_CASE ) ) )
snake_case__ : Optional[Any] =f'''%{max_element_length}s'''
# Make string and return
def single_line(__SCREAMING_SNAKE_CASE ) -> str:
nonlocal string_format_identifier
snake_case__ : Optional[Any] ='''['''
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(__SCREAMING_SNAKE_CASE ) for row_vector in self.array )
return s
def __repr__( self ) -> str:
"""simple docstring"""
return str(self )
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> bool:
"""simple docstring"""
if not (isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ) and len(__SCREAMING_SNAKE_CASE ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self , __SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
assert self.validate_indicies(__SCREAMING_SNAKE_CASE )
return self.array[loc[0]][loc[1]]
def __setitem__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> None:
"""simple docstring"""
assert self.validate_indicies(__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] =value
def __add__( self , __SCREAMING_SNAKE_CASE ) -> Matrix:
"""simple docstring"""
assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
assert self.row == another.row and self.column == another.column
# Add
snake_case__ : List[str] =Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
snake_case__ : Tuple =self[r, c] + another[r, c]
return result
def __neg__( self ) -> Matrix:
"""simple docstring"""
snake_case__ : str =Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
snake_case__ : Union[str, Any] =-self[r, c]
return result
def __sub__( self , __SCREAMING_SNAKE_CASE ) -> Matrix:
"""simple docstring"""
return self + (-another)
def __mul__( self , __SCREAMING_SNAKE_CASE ) -> Matrix:
"""simple docstring"""
if isinstance(__SCREAMING_SNAKE_CASE , (int, float) ): # Scalar multiplication
snake_case__ : List[str] =Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
snake_case__ : List[Any] =self[r, c] * another
return result
elif isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): # Matrix multiplication
assert self.column == another.row
snake_case__ : List[str] =Matrix(self.row , another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
snake_case__ : Optional[Any] =f'''Unsupported type given for another ({type(__SCREAMING_SNAKE_CASE )})'''
raise TypeError(__SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self ) -> Matrix:
"""simple docstring"""
snake_case__ : Any =Matrix(self.column , self.row )
for r in range(self.row ):
for c in range(self.column ):
snake_case__ : str =self[r, c]
return result
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
snake_case__ : str =v.transpose()
snake_case__ : str =(v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def lowercase_ ( ):
"""simple docstring"""
# a^(-1)
snake_case__ : Union[str, Any] =Matrix(3 , 3 , 0 )
for i in range(3 ):
snake_case__ : str =1
print(F'''a^(-1) is {ainv}''' )
# u, v
snake_case__ : str =Matrix(3 , 1 , 0 )
snake_case__, snake_case__, snake_case__ : List[str] =1, 2, -3
snake_case__ : Dict =Matrix(3 , 1 , 0 )
snake_case__, snake_case__, snake_case__ : Tuple =4, -2, 5
print(F'''u is {u}''' )
print(F'''v is {v}''' )
print(F'''uv^T is {u * v.transpose()}''' )
# Sherman Morrison
print(F'''(a + uv^T)^(-1) is {ainv.sherman_morrison(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )}''' )
def lowercase_ ( ):
"""simple docstring"""
import doctest
doctest.testmod()
testa()
| 408
|
import argparse
from typing import List
import evaluate
import numpy as np
import torch
from datasets import DatasetDict, load_dataset
# New Code #
# We'll be using StratifiedKFold for this example
from sklearn.model_selection import StratifiedKFold
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to perform Cross Validation,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowerCamelCase__ = 16
lowerCamelCase__ = 32
def lowercase_ ( SCREAMING_SNAKE_CASE : Accelerator , SCREAMING_SNAKE_CASE : DatasetDict , SCREAMING_SNAKE_CASE : List[int] , SCREAMING_SNAKE_CASE : List[int] , SCREAMING_SNAKE_CASE : int = 16 ):
"""simple docstring"""
snake_case__ : int =AutoTokenizer.from_pretrained('''bert-base-cased''' )
snake_case__ : Dict =DatasetDict(
{
'''train''': dataset['''train'''].select(SCREAMING_SNAKE_CASE ),
'''validation''': dataset['''train'''].select(SCREAMING_SNAKE_CASE ),
'''test''': dataset['''validation'''],
} )
def tokenize_function(SCREAMING_SNAKE_CASE : Union[str, Any] ):
# max_length=None => use the model max length (it's actually the default)
snake_case__ : Dict =tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
snake_case__ : Union[str, Any] =datasets.map(
SCREAMING_SNAKE_CASE , batched=SCREAMING_SNAKE_CASE , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
snake_case__ : List[Any] =tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(SCREAMING_SNAKE_CASE : List[Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
snake_case__ : Tuple =1_28 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
snake_case__ : Optional[int] =16
elif accelerator.mixed_precision != "no":
snake_case__ : Tuple =8
else:
snake_case__ : Union[str, Any] =None
return tokenizer.pad(
SCREAMING_SNAKE_CASE , padding='''longest''' , max_length=SCREAMING_SNAKE_CASE , pad_to_multiple_of=SCREAMING_SNAKE_CASE , return_tensors='''pt''' , )
# Instantiate dataloaders.
snake_case__ : List[str] =DataLoader(
tokenized_datasets['''train'''] , shuffle=SCREAMING_SNAKE_CASE , collate_fn=SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE )
snake_case__ : List[str] =DataLoader(
tokenized_datasets['''validation'''] , shuffle=SCREAMING_SNAKE_CASE , collate_fn=SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE )
snake_case__ : Optional[int] =DataLoader(
tokenized_datasets['''test'''] , shuffle=SCREAMING_SNAKE_CASE , collate_fn=SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE )
return train_dataloader, eval_dataloader, test_dataloader
def lowercase_ ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
# New Code #
snake_case__ : str =[]
# Download the dataset
snake_case__ : Union[str, Any] =load_dataset('''glue''' , '''mrpc''' )
# Create our splits
snake_case__ : int =StratifiedKFold(n_splits=int(args.num_folds ) )
# Initialize accelerator
snake_case__ : List[Any] =Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
snake_case__ : str =config['''lr''']
snake_case__ : List[str] =int(config['''num_epochs'''] )
snake_case__ : int =int(config['''seed'''] )
snake_case__ : Optional[int] =int(config['''batch_size'''] )
snake_case__ : List[str] =evaluate.load('''glue''' , '''mrpc''' )
# If the batch size is too big we use gradient accumulation
snake_case__ : str =1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
snake_case__ : Tuple =batch_size // MAX_GPU_BATCH_SIZE
snake_case__ : int =MAX_GPU_BATCH_SIZE
set_seed(SCREAMING_SNAKE_CASE )
# New Code #
# Create our folds:
snake_case__ : Union[str, Any] =kfold.split(np.zeros(datasets['''train'''].num_rows ) , datasets['''train''']['''label'''] )
snake_case__ : Dict =[]
# Iterate over them
for i, (train_idxs, valid_idxs) in enumerate(SCREAMING_SNAKE_CASE ):
snake_case__, snake_case__, snake_case__ : Optional[Any] =get_fold_dataloaders(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
snake_case__ : Optional[int] =AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=SCREAMING_SNAKE_CASE )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
snake_case__ : List[str] =model.to(accelerator.device )
# Instantiate optimizer
snake_case__ : Optional[int] =AdamW(params=model.parameters() , lr=SCREAMING_SNAKE_CASE )
# Instantiate scheduler
snake_case__ : int =get_linear_schedule_with_warmup(
optimizer=SCREAMING_SNAKE_CASE , num_warmup_steps=1_00 , num_training_steps=(len(SCREAMING_SNAKE_CASE ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
snake_case__, snake_case__, snake_case__, snake_case__, snake_case__ : Union[str, Any] =accelerator.prepare(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Now we train the model
for epoch in range(SCREAMING_SNAKE_CASE ):
model.train()
for step, batch in enumerate(SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
snake_case__ : List[str] =model(**SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] =outputs.loss
snake_case__ : Union[str, Any] =loss / gradient_accumulation_steps
accelerator.backward(SCREAMING_SNAKE_CASE )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
snake_case__ : str =model(**SCREAMING_SNAKE_CASE )
snake_case__ : Union[str, Any] =outputs.logits.argmax(dim=-1 )
snake_case__, snake_case__ : Any =accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=SCREAMING_SNAKE_CASE , references=SCREAMING_SNAKE_CASE , )
snake_case__ : Tuple =metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , SCREAMING_SNAKE_CASE )
# New Code #
# We also run predictions on the test set at the very end
snake_case__ : List[str] =[]
for step, batch in enumerate(SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
snake_case__ : Any =model(**SCREAMING_SNAKE_CASE )
snake_case__ : Optional[int] =outputs.logits
snake_case__, snake_case__ : Union[str, Any] =accelerator.gather_for_metrics((predictions, batch['''labels''']) )
fold_predictions.append(predictions.cpu() )
if i == 0:
# We need all of the test predictions
test_references.append(references.cpu() )
# Use accelerator.print to print only on the main process.
test_predictions.append(torch.cat(SCREAMING_SNAKE_CASE , dim=0 ) )
# We now need to release all our memory and get rid of the current model, optimizer, etc
accelerator.free_memory()
# New Code #
# Finally we check the accuracy of our folded results:
snake_case__ : Any =torch.cat(SCREAMING_SNAKE_CASE , dim=0 )
snake_case__ : List[str] =torch.stack(SCREAMING_SNAKE_CASE , dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 )
snake_case__ : Any =metric.compute(predictions=SCREAMING_SNAKE_CASE , references=SCREAMING_SNAKE_CASE )
accelerator.print('''Average test metrics from all folds:''' , SCREAMING_SNAKE_CASE )
def lowercase_ ( ):
"""simple docstring"""
snake_case__ : Tuple =argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
# New Code #
parser.add_argument('''--num_folds''' , type=SCREAMING_SNAKE_CASE , default=3 , help='''The number of splits to perform across the dataset''' )
snake_case__ : Dict =parser.parse_args()
snake_case__ : str ={'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 408
| 1
|
import warnings
from ..trainer import Trainer
from ..utils import logging
__snake_case = logging.get_logger(__name__)
class UpperCAmelCase ( __UpperCAmelCase ):
def __init__( self : str , __magic_name__ : Optional[Any]=None , **__magic_name__ : Optional[int] ):
"""simple docstring"""
warnings.warn(
"""`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` """
"""instead.""" , _lowerCamelCase , )
super().__init__(args=_lowerCamelCase , **_lowerCamelCase )
| 386
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = {
'''weiweishi/roc-bert-base-zh''': '''https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json''',
}
class snake_case ( __UpperCAmelCase ):
lowerCamelCase__ = '''roc_bert'''
def __init__( self :Union[str, Any] , _lowerCamelCase :Any=3_0_5_2_2 , _lowerCamelCase :str=7_6_8 , _lowerCamelCase :Optional[Any]=1_2 , _lowerCamelCase :List[str]=1_2 , _lowerCamelCase :str=3_0_7_2 , _lowerCamelCase :Tuple="gelu" , _lowerCamelCase :List[Any]=0.1 , _lowerCamelCase :List[str]=0.1 , _lowerCamelCase :Optional[int]=5_1_2 , _lowerCamelCase :Dict=2 , _lowerCamelCase :Any=0.0_2 , _lowerCamelCase :Optional[int]=1e-12 , _lowerCamelCase :str=True , _lowerCamelCase :Any=0 , _lowerCamelCase :List[str]="absolute" , _lowerCamelCase :List[Any]=None , _lowerCamelCase :Any=True , _lowerCamelCase :Union[str, Any]=True , _lowerCamelCase :str=7_6_8 , _lowerCamelCase :Union[str, Any]=9_1_0 , _lowerCamelCase :List[Any]=5_1_2 , _lowerCamelCase :Optional[int]=2_4_8_5_8 , _lowerCamelCase :Union[str, Any]=True , **_lowerCamelCase :str , ):
__SCREAMING_SNAKE_CASE : List[str] = vocab_size
__SCREAMING_SNAKE_CASE : int = max_position_embeddings
__SCREAMING_SNAKE_CASE : List[str] = hidden_size
__SCREAMING_SNAKE_CASE : str = num_hidden_layers
__SCREAMING_SNAKE_CASE : int = num_attention_heads
__SCREAMING_SNAKE_CASE : Any = intermediate_size
__SCREAMING_SNAKE_CASE : Optional[int] = hidden_act
__SCREAMING_SNAKE_CASE : List[Any] = hidden_dropout_prob
__SCREAMING_SNAKE_CASE : Optional[Any] = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE : Union[str, Any] = initializer_range
__SCREAMING_SNAKE_CASE : Union[str, Any] = type_vocab_size
__SCREAMING_SNAKE_CASE : List[str] = layer_norm_eps
__SCREAMING_SNAKE_CASE : Optional[int] = use_cache
__SCREAMING_SNAKE_CASE : str = enable_pronunciation
__SCREAMING_SNAKE_CASE : List[str] = enable_shape
__SCREAMING_SNAKE_CASE : Tuple = pronunciation_embed_dim
__SCREAMING_SNAKE_CASE : Optional[Any] = pronunciation_vocab_size
__SCREAMING_SNAKE_CASE : str = shape_embed_dim
__SCREAMING_SNAKE_CASE : Union[str, Any] = shape_vocab_size
__SCREAMING_SNAKE_CASE : Tuple = concat_input
__SCREAMING_SNAKE_CASE : Union[str, Any] = position_embedding_type
__SCREAMING_SNAKE_CASE : str = classifier_dropout
super().__init__(pad_token_id=_lowerCamelCase , **_lowerCamelCase )
| 674
| 0
|
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
A : Optional[Any] = get_logger()
A : Optional[dict] = None
class lowerCamelCase (TensorFormatter[Mapping, '''jax.Array''', Mapping] ):
"""simple docstring"""
def __init__( self : int , __magic_name__ : int=None , __magic_name__ : Union[str, Any]=None , **__magic_name__ : str ) -> Tuple:
super().__init__(features=__magic_name__ )
import jax
from jaxlib.xla_client import Device
if isinstance(__magic_name__ , __magic_name__ ):
raise ValueError(
F'''Expected {device} to be a `str` not {type(__magic_name__ )}, as `jaxlib.xla_extension.Device` '''
"is not serializable neither with `pickle` nor with `dill`. Instead you can surround "
"the device with `str()` to get its string identifier that will be internally mapped "
"to the actual `jaxlib.xla_extension.Device`." )
SCREAMING_SNAKE_CASE_ = device if isinstance(__magic_name__ , __magic_name__ ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
SCREAMING_SNAKE_CASE_ = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
F'''Device with string identifier {self.device} not listed among the available '''
F'''devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default '''
F'''device: {str(jax.devices()[0] )}.''' )
SCREAMING_SNAKE_CASE_ = str(jax.devices()[0] )
SCREAMING_SNAKE_CASE_ = jnp_array_kwargs
@staticmethod
def __A ( ) -> Dict[str, "jaxlib.xla_extension.Device"]:
import jax
return {str(__magic_name__ ): device for device in jax.devices()}
def __A ( self : Optional[int] , __magic_name__ : Union[str, Any] ) -> List[str]:
import jax
import jax.numpy as jnp
if isinstance(__magic_name__ , __magic_name__ ) and column:
if all(
isinstance(__magic_name__ , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(__magic_name__ , axis=0 )
return column
def __A ( self : Tuple , __magic_name__ : int ) -> Optional[Any]:
import jax
import jax.numpy as jnp
if isinstance(__magic_name__ , (str, bytes, type(__magic_name__ )) ):
return value
elif isinstance(__magic_name__ , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
SCREAMING_SNAKE_CASE_ = {}
if isinstance(__magic_name__ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
SCREAMING_SNAKE_CASE_ = {"dtype": jnp.intaa}
else:
SCREAMING_SNAKE_CASE_ = {"dtype": jnp.intaa}
elif isinstance(__magic_name__ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
SCREAMING_SNAKE_CASE_ = {"dtype": jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(__magic_name__ , PIL.Image.Image ):
SCREAMING_SNAKE_CASE_ = np.asarray(__magic_name__ )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
SCREAMING_SNAKE_CASE_ = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(__magic_name__ , **{**default_dtype, **self.jnp_array_kwargs} )
def __A ( self : Optional[int] , __magic_name__ : Optional[Any] ) -> Union[str, Any]:
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(__magic_name__ , torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(__magic_name__ , "__array__" ) and not isinstance(__magic_name__ , jax.Array ):
SCREAMING_SNAKE_CASE_ = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(__magic_name__ , np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(__magic_name__ ) for substruct in data_struct] )
elif isinstance(__magic_name__ , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(__magic_name__ ) for substruct in data_struct] )
return self._tensorize(__magic_name__ )
def __A ( self : int , __magic_name__ : dict ) -> Any:
return map_nested(self._recursive_tensorize , __magic_name__ , map_list=__magic_name__ )
def __A ( self : Optional[Any] , __magic_name__ : pa.Table ) -> Mapping:
SCREAMING_SNAKE_CASE_ = self.numpy_arrow_extractor().extract_row(__magic_name__ )
SCREAMING_SNAKE_CASE_ = self.python_features_decoder.decode_row(__magic_name__ )
return self.recursive_tensorize(__magic_name__ )
def __A ( self : Dict , __magic_name__ : pa.Table ) -> "jax.Array":
SCREAMING_SNAKE_CASE_ = self.numpy_arrow_extractor().extract_column(__magic_name__ )
SCREAMING_SNAKE_CASE_ = self.python_features_decoder.decode_column(__magic_name__ , pa_table.column_names[0] )
SCREAMING_SNAKE_CASE_ = self.recursive_tensorize(__magic_name__ )
SCREAMING_SNAKE_CASE_ = self._consolidate(__magic_name__ )
return column
def __A ( self : Dict , __magic_name__ : pa.Table ) -> Mapping:
SCREAMING_SNAKE_CASE_ = self.numpy_arrow_extractor().extract_batch(__magic_name__ )
SCREAMING_SNAKE_CASE_ = self.python_features_decoder.decode_batch(__magic_name__ )
SCREAMING_SNAKE_CASE_ = self.recursive_tensorize(__magic_name__ )
for column_name in batch:
SCREAMING_SNAKE_CASE_ = self._consolidate(batch[column_name] )
return batch
| 708
|
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None ):
if version.parse(hfh.__version__ ).release < version.parse("0.11.0" ).release:
# old versions of hfh don't url-encode the file path
SCREAMING_SNAKE_CASE_ = quote(__UpperCamelCase )
return hfh.hf_hub_url(__UpperCamelCase , __UpperCamelCase , repo_type="dataset" , revision=__UpperCamelCase )
| 356
| 0
|
"""simple docstring"""
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = 0
__lowerCAmelCase = False
__lowerCAmelCase = 3.0
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def _lowerCamelCase ( self ):
# If no defaults are changed, `to_kwargs` returns an empty dict.
self.assertDictEqual(MockClass().to_kwargs() , {} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {'''a''': 2} )
self.assertDictEqual(MockClass(a=2 , b=_UpperCAmelCase ).to_kwargs() , {'''a''': 2, '''b''': True} )
self.assertDictEqual(MockClass(a=2 , c=2.2_5 ).to_kwargs() , {'''a''': 2, '''c''': 2.2_5} )
@require_cuda
def _lowerCamelCase ( self ):
# If no defaults are changed, `to_kwargs` returns an empty dict.
__a : List[Any] = GradScalerKwargs(init_scale=1024 , growth_factor=2 )
AcceleratorState._reset_state()
__a : int = Accelerator(mixed_precision='''fp16''' , kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
__a : Optional[Any] = accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale , 1_0_2_4.0 )
self.assertEqual(scaler._growth_factor , 2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor , 0.5 )
self.assertEqual(scaler._growth_interval , 2000 )
self.assertEqual(scaler._enabled , _UpperCAmelCase )
@require_multi_gpu
def _lowerCamelCase ( self ):
__a : Dict = ['''torchrun''', f"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )]
execute_subprocess_async(_UpperCAmelCase , env=os.environ.copy() )
if __name__ == "__main__":
A = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True)
A = Accelerator(kwargs_handlers=[ddp_scaler])
A = torch.nn.Linear(100, 200)
A = accelerator.prepare(model)
# Check the values changed in kwargs
A = ''''''
A = model.bucket_bytes_cap // (1_024 * 1_024)
if observed_bucket_cap_map != 15:
error_msg += F"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += F"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += F"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += F"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += F"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 52
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_A = {
"""configuration_graphormer""": ["""GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GraphormerConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
"""GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GraphormerForGraphClassification""",
"""GraphormerModel""",
"""GraphormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_graphormer import (
GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
GraphormerForGraphClassification,
GraphormerModel,
GraphormerPreTrainedModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 158
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case = logging.get_logger(__name__)
snake_case = {
"google/canine-s": "https://huggingface.co/google/canine-s/resolve/main/config.json",
# See all CANINE models at https://huggingface.co/models?filter=canine
}
class __A ( snake_case__ ):
'''simple docstring'''
a_ = '''canine'''
def __init__( self , _snake_case=768 , _snake_case=12 , _snake_case=12 , _snake_case=3072 , _snake_case="gelu" , _snake_case=0.1 , _snake_case=0.1 , _snake_case=1_6384 , _snake_case=16 , _snake_case=0.02 , _snake_case=1E-1_2 , _snake_case=0 , _snake_case=0Xe000 , _snake_case=0Xe001 , _snake_case=4 , _snake_case=4 , _snake_case=8 , _snake_case=1_6384 , _snake_case=128 , **_snake_case , ):
super().__init__(pad_token_id=_snake_case , bos_token_id=_snake_case , eos_token_id=_snake_case , **_snake_case )
_lowerCAmelCase : Optional[Any] = max_position_embeddings
_lowerCAmelCase : str = hidden_size
_lowerCAmelCase : Union[str, Any] = num_hidden_layers
_lowerCAmelCase : str = num_attention_heads
_lowerCAmelCase : Union[str, Any] = intermediate_size
_lowerCAmelCase : Any = hidden_act
_lowerCAmelCase : Union[str, Any] = hidden_dropout_prob
_lowerCAmelCase : Tuple = attention_probs_dropout_prob
_lowerCAmelCase : Optional[Any] = initializer_range
_lowerCAmelCase : List[str] = type_vocab_size
_lowerCAmelCase : int = layer_norm_eps
# Character config:
_lowerCAmelCase : Optional[int] = downsampling_rate
_lowerCAmelCase : Dict = upsampling_kernel_size
_lowerCAmelCase : Dict = num_hash_functions
_lowerCAmelCase : Union[str, Any] = num_hash_buckets
_lowerCAmelCase : int = local_transformer_stride
| 587
|
def UpperCamelCase_ ( lowerCAmelCase__ = 4_00_00_00 ):
"""simple docstring"""
_lowerCAmelCase : int = [0, 1]
_lowerCAmelCase : List[str] = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1] )
if fib[i + 2] > n:
break
i += 1
_lowerCAmelCase : str = 0
for j in range(len(lowerCAmelCase__ ) - 1 ):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(F'''{solution() = }''')
| 587
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
"funnel-transformer/small": "https://huggingface.co/funnel-transformer/small/resolve/main/config.json",
"funnel-transformer/small-base": "https://huggingface.co/funnel-transformer/small-base/resolve/main/config.json",
"funnel-transformer/medium": "https://huggingface.co/funnel-transformer/medium/resolve/main/config.json",
"funnel-transformer/medium-base": "https://huggingface.co/funnel-transformer/medium-base/resolve/main/config.json",
"funnel-transformer/intermediate": (
"https://huggingface.co/funnel-transformer/intermediate/resolve/main/config.json"
),
"funnel-transformer/intermediate-base": (
"https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/config.json"
),
"funnel-transformer/large": "https://huggingface.co/funnel-transformer/large/resolve/main/config.json",
"funnel-transformer/large-base": "https://huggingface.co/funnel-transformer/large-base/resolve/main/config.json",
"funnel-transformer/xlarge": "https://huggingface.co/funnel-transformer/xlarge/resolve/main/config.json",
"funnel-transformer/xlarge-base": "https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/config.json",
}
class __UpperCamelCase ( A__ ):
__A : int = """funnel"""
__A : Union[str, Any] = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """n_head""",
}
def __init__( self , _UpperCamelCase=30522 , _UpperCamelCase=[4, 4, 4] , _UpperCamelCase=None , _UpperCamelCase=2 , _UpperCamelCase=768 , _UpperCamelCase=12 , _UpperCamelCase=64 , _UpperCamelCase=3072 , _UpperCamelCase="gelu_new" , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=0.0 , _UpperCamelCase=0.1 , _UpperCamelCase=None , _UpperCamelCase=1e-9 , _UpperCamelCase="mean" , _UpperCamelCase="relative_shift" , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=True , **_UpperCamelCase , ):
_UpperCAmelCase = vocab_size
_UpperCAmelCase = block_sizes
_UpperCAmelCase = [1] * len(_UpperCamelCase ) if block_repeats is None else block_repeats
assert len(_UpperCamelCase ) == len(
self.block_repeats ), "`block_sizes` and `block_repeats` should have the same length."
_UpperCAmelCase = num_decoder_layers
_UpperCAmelCase = d_model
_UpperCAmelCase = n_head
_UpperCAmelCase = d_head
_UpperCAmelCase = d_inner
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout
_UpperCAmelCase = attention_dropout
_UpperCAmelCase = activation_dropout
_UpperCAmelCase = initializer_range
_UpperCAmelCase = initializer_std
_UpperCAmelCase = layer_norm_eps
assert pooling_type in [
"mean",
"max",
], f'''Got {pooling_type} for `pooling_type` but only \'mean\' and \'max\' are supported.'''
_UpperCAmelCase = pooling_type
assert attention_type in [
"relative_shift",
"factorized",
], f'''Got {attention_type} for `attention_type` but only \'relative_shift\' and \'factorized\' are supported.'''
_UpperCAmelCase = attention_type
_UpperCAmelCase = separate_cls
_UpperCAmelCase = truncate_seq
_UpperCAmelCase = pool_q_only
super().__init__(**_UpperCamelCase )
@property
def UpperCamelCase( self ):
return sum(self.block_sizes )
@num_hidden_layers.setter
def UpperCamelCase( self , _UpperCamelCase ):
raise NotImplementedError(
'''This model does not support the setting of `num_hidden_layers`. Please set `block_sizes`.''' )
@property
def UpperCamelCase( self ):
return len(self.block_sizes )
@num_blocks.setter
def UpperCamelCase( self , _UpperCamelCase ):
raise NotImplementedError('''This model does not support the setting of `num_blocks`. Please set `block_sizes`.''' )
| 32
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class lowercase_ ( unittest.TestCase ):
def _lowerCAmelCase ( self : int ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _lowerCAmelCase ( self : int ):
lowerCAmelCase__ : Dict = 1
lowerCAmelCase__ : str = 3
lowerCAmelCase__ : Dict = (3_2, 3_2)
lowerCAmelCase__ : str = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(_lowercase )
return image
@property
def _lowerCAmelCase ( self : Union[str, Any] ):
torch.manual_seed(0 )
lowerCAmelCase__ : Any = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=3_2 , )
return model
@property
def _lowerCAmelCase ( self : int ):
torch.manual_seed(0 )
lowerCAmelCase__ : int = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
return model
@property
def _lowerCAmelCase ( self : Tuple ):
torch.manual_seed(0 )
lowerCAmelCase__ : List[str] = RobertaSeriesConfig(
hidden_size=3_2 , project_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5_0_0_6 , )
return RobertaSeriesModelWithTransformation(_lowercase )
@property
def _lowerCAmelCase ( self : List[Any] ):
def extract(*_lowercase : Union[str, Any] , **_lowercase : List[Any] ):
class lowercase_ :
def __init__( self : List[Any] ):
lowerCAmelCase__ : Optional[int] = torch.ones([0] )
def _lowerCAmelCase ( self : Union[str, Any] , _lowercase : List[Any] ):
self.pixel_values.to(_lowercase )
return self
return Out()
return extract
def _lowerCAmelCase ( self : Dict ):
lowerCAmelCase__ : Any = "cpu" # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase__ : List[str] = self.dummy_cond_unet
lowerCAmelCase__ : Optional[Any] = PNDMScheduler(skip_prk_steps=_lowercase )
lowerCAmelCase__ : Tuple = self.dummy_vae
lowerCAmelCase__ : int = self.dummy_text_encoder
lowerCAmelCase__ : Any = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" )
lowerCAmelCase__ : Optional[int] = 7_7
lowerCAmelCase__ : str = self.dummy_image.to(_lowercase )
lowerCAmelCase__ : Optional[Any] = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
lowerCAmelCase__ : Tuple = AltDiffusionImgaImgPipeline(
unet=_lowercase , scheduler=_lowercase , vae=_lowercase , text_encoder=_lowercase , tokenizer=_lowercase , safety_checker=_lowercase , feature_extractor=self.dummy_extractor , )
lowerCAmelCase__ : Tuple = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=_lowercase )
lowerCAmelCase__ : str = alt_pipe.to(_lowercase )
alt_pipe.set_progress_bar_config(disable=_lowercase )
lowerCAmelCase__ : List[str] = "A painting of a squirrel eating a burger"
lowerCAmelCase__ : str = torch.Generator(device=_lowercase ).manual_seed(0 )
lowerCAmelCase__ : Optional[Any] = alt_pipe(
[prompt] , generator=_lowercase , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , image=_lowercase , )
lowerCAmelCase__ : Tuple = output.images
lowerCAmelCase__ : Dict = torch.Generator(device=_lowercase ).manual_seed(0 )
lowerCAmelCase__ : List[str] = alt_pipe(
[prompt] , generator=_lowercase , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , image=_lowercase , return_dict=_lowercase , )[0]
lowerCAmelCase__ : str = image[0, -3:, -3:, -1]
lowerCAmelCase__ : Union[str, Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
lowerCAmelCase__ : List[str] = np.array([0.4427, 0.3731, 0.4249, 0.4941, 0.4546, 0.4148, 0.4193, 0.4666, 0.4499] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5e-3
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def _lowerCAmelCase ( self : Tuple ):
lowerCAmelCase__ : Optional[Any] = self.dummy_cond_unet
lowerCAmelCase__ : List[Any] = PNDMScheduler(skip_prk_steps=_lowercase )
lowerCAmelCase__ : Optional[int] = self.dummy_vae
lowerCAmelCase__ : Tuple = self.dummy_text_encoder
lowerCAmelCase__ : str = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" )
lowerCAmelCase__ : List[Any] = 7_7
lowerCAmelCase__ : Optional[int] = self.dummy_image.to(_lowercase )
# put models in fp16
lowerCAmelCase__ : Optional[int] = unet.half()
lowerCAmelCase__ : Optional[int] = vae.half()
lowerCAmelCase__ : int = bert.half()
# make sure here that pndm scheduler skips prk
lowerCAmelCase__ : str = AltDiffusionImgaImgPipeline(
unet=_lowercase , scheduler=_lowercase , vae=_lowercase , text_encoder=_lowercase , tokenizer=_lowercase , safety_checker=_lowercase , feature_extractor=self.dummy_extractor , )
lowerCAmelCase__ : Union[str, Any] = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=_lowercase )
lowerCAmelCase__ : Dict = alt_pipe.to(_lowercase )
alt_pipe.set_progress_bar_config(disable=_lowercase )
lowerCAmelCase__ : Union[str, Any] = "A painting of a squirrel eating a burger"
lowerCAmelCase__ : Any = torch.manual_seed(0 )
lowerCAmelCase__ : Union[str, Any] = alt_pipe(
[prompt] , generator=_lowercase , num_inference_steps=2 , output_type="np" , image=_lowercase , ).images
assert image.shape == (1, 3_2, 3_2, 3)
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def _lowerCAmelCase ( self : str ):
lowerCAmelCase__ : Union[str, Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
# resize to resolution that is divisible by 8 but not 16 or 32
lowerCAmelCase__ : List[Any] = init_image.resize((7_6_0, 5_0_4) )
lowerCAmelCase__ : str = "BAAI/AltDiffusion"
lowerCAmelCase__ : Tuple = AltDiffusionImgaImgPipeline.from_pretrained(
_lowercase , safety_checker=_lowercase , )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
pipe.enable_attention_slicing()
lowerCAmelCase__ : List[Any] = "A fantasy landscape, trending on artstation"
lowerCAmelCase__ : Union[str, Any] = torch.manual_seed(0 )
lowerCAmelCase__ : Any = pipe(
prompt=_lowercase , image=_lowercase , strength=0.75 , guidance_scale=7.5 , generator=_lowercase , output_type="np" , )
lowerCAmelCase__ : Optional[Any] = output.images[0]
lowerCAmelCase__ : List[Any] = image[2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert image.shape == (5_0_4, 7_6_0, 3)
lowerCAmelCase__ : Tuple = np.array([0.9358, 0.9397, 0.9599, 0.9901, 1.0000, 1.0000, 0.9882, 1.0000, 1.0000] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class lowercase_ ( unittest.TestCase ):
def _lowerCAmelCase ( self : Union[str, Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCAmelCase ( self : Union[str, Any] ):
lowerCAmelCase__ : List[str] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
lowerCAmelCase__ : Union[str, Any] = init_image.resize((7_6_8, 5_1_2) )
lowerCAmelCase__ : Union[str, Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy" )
lowerCAmelCase__ : Dict = "BAAI/AltDiffusion"
lowerCAmelCase__ : str = AltDiffusionImgaImgPipeline.from_pretrained(
_lowercase , safety_checker=_lowercase , )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
pipe.enable_attention_slicing()
lowerCAmelCase__ : Any = "A fantasy landscape, trending on artstation"
lowerCAmelCase__ : List[str] = torch.manual_seed(0 )
lowerCAmelCase__ : Tuple = pipe(
prompt=_lowercase , image=_lowercase , strength=0.75 , guidance_scale=7.5 , generator=_lowercase , output_type="np" , )
lowerCAmelCase__ : Union[str, Any] = output.images[0]
assert image.shape == (5_1_2, 7_6_8, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1e-2
| 308
| 0
|
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_A : int =logging.get_logger(__name__)
_A : Optional[Any] ={
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
_A : Any ={
'''vocab_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'''},
'''merges_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'''},
'''tokenizer_config_file''': {
'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'''
},
}
_A : Union[str, Any] ={'''facebook/blenderbot-3B''': 1_2_8}
class lowerCamelCase__ ( A ):
'''simple docstring'''
A_ = VOCAB_FILES_NAMES
A_ = PRETRAINED_VOCAB_FILES_MAP
A_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ = ["""input_ids""", """attention_mask"""]
A_ = BlenderbotTokenizer
def __init__( self : str , UpperCamelCase_ : Dict=None , UpperCamelCase_ : str=None , UpperCamelCase_ : Union[str, Any]=None , UpperCamelCase_ : Any="replace" , UpperCamelCase_ : Optional[int]="<s>" , UpperCamelCase_ : List[Any]="</s>" , UpperCamelCase_ : Any="</s>" , UpperCamelCase_ : Dict="<s>" , UpperCamelCase_ : Optional[Any]="<unk>" , UpperCamelCase_ : int="<pad>" , UpperCamelCase_ : str="<mask>" , UpperCamelCase_ : Tuple=False , UpperCamelCase_ : str=True , **UpperCamelCase_ : str , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(
UpperCamelCase_ , UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , errors=UpperCamelCase_ , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ , trim_offsets=UpperCamelCase_ , **UpperCamelCase_ , )
_lowercase : Optional[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , UpperCamelCase_ ) != add_prefix_space:
_lowercase : List[Any] = getattr(UpperCamelCase_ , pre_tok_state.pop('type' ) )
_lowercase : List[str] = add_prefix_space
_lowercase : Any = pre_tok_class(**UpperCamelCase_ )
_lowercase : Tuple = add_prefix_space
_lowercase : Optional[Any] = 'post_processor'
_lowercase : Tuple = getattr(self.backend_tokenizer , UpperCamelCase_ , UpperCamelCase_ )
if tokenizer_component_instance:
_lowercase : List[str] = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_lowercase : Any = tuple(state['sep'] )
if "cls" in state:
_lowercase : int = tuple(state['cls'] )
_lowercase : Optional[int] = False
if state.get('add_prefix_space' , UpperCamelCase_ ) != add_prefix_space:
_lowercase : List[Any] = add_prefix_space
_lowercase : str = True
if state.get('trim_offsets' , UpperCamelCase_ ) != trim_offsets:
_lowercase : Optional[int] = trim_offsets
_lowercase : Optional[int] = True
if changes_to_apply:
_lowercase : str = getattr(UpperCamelCase_ , state.pop('type' ) )
_lowercase : List[str] = component_class(**UpperCamelCase_ )
setattr(self.backend_tokenizer , UpperCamelCase_ , UpperCamelCase_ )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def __UpperCAmelCase ( self : Dict ) -> str:
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def __UpperCAmelCase ( self : Optional[Any] , UpperCamelCase_ : Optional[int] ) -> Dict:
'''simple docstring'''
_lowercase : Dict = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else value
_lowercase : List[Any] = value
def __UpperCAmelCase ( self : Optional[Any] , *UpperCamelCase_ : Any , **UpperCamelCase_ : Any ) -> BatchEncoding:
'''simple docstring'''
_lowercase : Union[str, Any] = kwargs.get('is_split_into_words' , UpperCamelCase_ )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*UpperCamelCase_ , **UpperCamelCase_ )
def __UpperCAmelCase ( self : Union[str, Any] , *UpperCamelCase_ : Dict , **UpperCamelCase_ : Optional[Any] ) -> BatchEncoding:
'''simple docstring'''
_lowercase : str = kwargs.get('is_split_into_words' , UpperCamelCase_ )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*UpperCamelCase_ , **UpperCamelCase_ )
def __UpperCAmelCase ( self : Optional[Any] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
_lowercase : Union[str, Any] = self._tokenizer.model.save(UpperCamelCase_ , name=UpperCamelCase_ )
return tuple(UpperCamelCase_ )
def __UpperCAmelCase ( self : Union[str, Any] , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
_lowercase : List[str] = [self.sep_token_id]
_lowercase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __UpperCAmelCase ( self : int , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None ) -> Any:
'''simple docstring'''
return token_ids_a + [self.eos_token_id]
def __UpperCAmelCase ( self : Union[str, Any] , UpperCamelCase_ : "Conversation" ) -> List[int]:
'''simple docstring'''
_lowercase : Optional[int] = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(' ' + text )
else:
# Generated responses should contain them already.
inputs.append(UpperCamelCase_ )
_lowercase : Union[str, Any] = ' '.join(UpperCamelCase_ )
_lowercase : Dict = self.encode(UpperCamelCase_ )
if len(UpperCamelCase_ ) > self.model_max_length:
_lowercase : int = input_ids[-self.model_max_length :]
logger.warning(F'''Trimmed input from conversation as it was longer than {self.model_max_length} tokens.''' )
return input_ids
| 707
|
'''simple docstring'''
def __UpperCamelCase ( _lowercase ) -> bool:
return str(_lowercase ) == str(_lowercase )[::-1]
def __UpperCamelCase ( _lowercase ) -> int:
return int(_lowercase ) + int(str(_lowercase )[::-1] )
def __UpperCamelCase ( _lowercase = 1_0000 ) -> int:
_lowercase : List[str] = []
for num in range(1, _lowercase ):
_lowercase : Tuple = 0
_lowercase : Tuple = num
while iterations < 50:
_lowercase : Union[str, Any] = sum_reverse(_lowercase )
iterations += 1
if is_palindrome(_lowercase ):
break
else:
lychrel_nums.append(_lowercase )
return len(_lowercase )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 4
| 0
|
'''simple docstring'''
import itertools
from dataclasses import dataclass
from typing import Optional
import pandas as pd
import pyarrow as pa
import datasets
from datasets.table import table_cast
@dataclass
class UpperCAmelCase ( datasets.BuilderConfig ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = None
class UpperCAmelCase ( datasets.ArrowBasedBuilder ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = PandasConfig
def UpperCamelCase( self ) -> str:
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> Dict:
'''simple docstring'''
if not self.config.data_files:
raise ValueError(f'''At least one data file must be specified, but got data_files={self.config.data_files}''' )
lowerCamelCase_ = dl_manager.download_and_extract(self.config.data_files )
if isinstance(SCREAMING_SNAKE_CASE_ , (str, list, tuple) ):
lowerCamelCase_ = data_files
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowerCamelCase_ = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
lowerCamelCase_ = [dl_manager.iter_files(SCREAMING_SNAKE_CASE_ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'files': files} )]
lowerCamelCase_ = []
for split_name, files in data_files.items():
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowerCamelCase_ = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
lowerCamelCase_ = [dl_manager.iter_files(SCREAMING_SNAKE_CASE_ ) for file in files]
splits.append(datasets.SplitGenerator(name=SCREAMING_SNAKE_CASE_ , gen_kwargs={'files': files} ) )
return splits
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> pa.Table:
'''simple docstring'''
if self.config.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
lowerCamelCase_ = table_cast(SCREAMING_SNAKE_CASE_ , self.config.features.arrow_schema )
return pa_table
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ ) -> str:
'''simple docstring'''
for i, file in enumerate(itertools.chain.from_iterable(SCREAMING_SNAKE_CASE_ ) ):
with open(SCREAMING_SNAKE_CASE_ , 'rb' ) as f:
lowerCamelCase_ = pa.Table.from_pandas(pd.read_pickle(SCREAMING_SNAKE_CASE_ ) )
yield i, self._cast_table(SCREAMING_SNAKE_CASE_ )
| 42
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__snake_case = {
"""configuration_time_series_transformer""": [
"""TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""TimeSeriesTransformerConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
"""TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TimeSeriesTransformerForPrediction""",
"""TimeSeriesTransformerModel""",
"""TimeSeriesTransformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 178
| 0
|
'''simple docstring'''
import json
import os
import unittest
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCamelCase__ ( __lowerCAmelCase ,unittest.TestCase ):
lowerCAmelCase__ : Optional[Any] = XLMTokenizer
lowerCAmelCase__ : Tuple = False
def __a ( self : int ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
a__ = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"w</w>",
"r</w>",
"t</w>",
"lo",
"low",
"er</w>",
"low</w>",
"lowest</w>",
"newer</w>",
"wider</w>",
"<unk>",
]
a__ = dict(zip(lowerCamelCase , range(len(lowerCamelCase ) ) ) )
a__ = ["l o 123", "lo w 1456", "e r</w> 1789", ""]
a__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
a__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" ) as fp:
fp.write(json.dumps(lowerCamelCase ) )
with open(self.merges_file , "w" ) as fp:
fp.write("\n".join(lowerCamelCase ) )
def __a ( self : int , lowerCamelCase : Optional[int] ):
'''simple docstring'''
a__ = "lower newer"
a__ = "lower newer"
return input_text, output_text
def __a ( self : Any ):
'''simple docstring'''
a__ = XLMTokenizer(self.vocab_file , self.merges_file )
a__ = "lower"
a__ = ["low", "er</w>"]
a__ = tokenizer.tokenize(lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
a__ = tokens + ["<unk>"]
a__ = [1_4, 1_5, 2_0]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase ) , lowerCamelCase )
@slow
def __a ( self : Any ):
'''simple docstring'''
a__ = XLMTokenizer.from_pretrained("xlm-mlm-en-2048" )
a__ = tokenizer.encode("sequence builders" , add_special_tokens=lowerCamelCase )
a__ = tokenizer.encode("multi-sequence build" , add_special_tokens=lowerCamelCase )
a__ = tokenizer.build_inputs_with_special_tokens(lowerCamelCase )
a__ = tokenizer.build_inputs_with_special_tokens(lowerCamelCase , lowerCamelCase )
assert encoded_sentence == [0] + text + [1]
assert encoded_pair == [0] + text + [1] + text_a + [1]
| 289
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase_ : str = logging.get_logger(__name__)
lowerCAmelCase_ : Optional[Any] = "▁"
lowerCAmelCase_ : Union[str, Any] = {"vocab_file": "sentencepiece.bpe.model"}
lowerCAmelCase_ : str = {
"vocab_file": {
"facebook/xglm-564M": "https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model",
}
}
lowerCAmelCase_ : Optional[int] = {
"facebook/xglm-564M": 2048,
}
class UpperCamelCase__ ( __lowerCAmelCase ):
lowerCAmelCase__ : int = VOCAB_FILES_NAMES
lowerCAmelCase__ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ : Optional[int] = ["input_ids", "attention_mask"]
def __init__( self : Optional[Any] , lowerCamelCase : Union[str, Any] , lowerCamelCase : int="<s>" , lowerCamelCase : List[Any]="</s>" , lowerCamelCase : Any="</s>" , lowerCamelCase : Tuple="<s>" , lowerCamelCase : Optional[Any]="<unk>" , lowerCamelCase : Tuple="<pad>" , lowerCamelCase : Optional[Dict[str, Any]] = None , **lowerCamelCase : Union[str, Any] , ):
'''simple docstring'''
a__ = {} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
a__ = 7
a__ = [F'''<madeupword{i}>''' for i in range(self.num_madeup_words )]
a__ = kwargs.get("additional_special_tokens" , [] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=lowerCamelCase , eos_token=lowerCamelCase , unk_token=lowerCamelCase , sep_token=lowerCamelCase , cls_token=lowerCamelCase , pad_token=lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **lowerCamelCase , )
a__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowerCamelCase ) )
a__ = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
a__ = 1
# Mimic fairseq token-to-id alignment for the first 4 token
a__ = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
a__ = len(self.sp_model )
a__ = {F'''<madeupword{i}>''': sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(lowerCamelCase )
a__ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : Optional[Any] ):
'''simple docstring'''
a__ = self.__dict__.copy()
a__ = None
a__ = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Dict , lowerCamelCase : List[Any] ):
'''simple docstring'''
a__ = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
a__ = {}
a__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def __a ( self : Union[str, Any] , lowerCamelCase : List[int] , lowerCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
a__ = [self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def __a ( self : int , lowerCamelCase : List[int] , lowerCamelCase : Optional[List[int]] = None , lowerCamelCase : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase , token_ids_a=lowerCamelCase , already_has_special_tokens=lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase ))
return [1] + ([0] * len(lowerCamelCase )) + [1, 1] + ([0] * len(lowerCamelCase ))
def __a ( self : Dict , lowerCamelCase : List[int] , lowerCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
a__ = [self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def __a ( self : List[str] ):
'''simple docstring'''
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def __a ( self : Dict ):
'''simple docstring'''
a__ = {self.convert_ids_to_tokens(lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __a ( self : int , lowerCamelCase : str ):
'''simple docstring'''
return self.sp_model.encode(lowerCamelCase , out_type=lowerCamelCase )
def __a ( self : List[str] , lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
a__ = self.sp_model.PieceToId(lowerCamelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def __a ( self : Any , lowerCamelCase : List[str] ):
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __a ( self : int , lowerCamelCase : Optional[Any] ):
'''simple docstring'''
a__ = "".join(lowerCamelCase ).replace(lowerCamelCase , " " ).strip()
return out_string
def __a ( self : int , lowerCamelCase : str , lowerCamelCase : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(lowerCamelCase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
a__ = os.path.join(
lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCamelCase , "wb" ) as fi:
a__ = self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase )
return (out_vocab_file,)
| 289
| 1
|
def lowerCamelCase__ ( _lowercase , _lowercase ):
'''simple docstring'''
return "\n".join(
f'''{number} * {i} = {number * i}''' for i in range(1 , number_of_terms + 1 ) )
if __name__ == "__main__":
print(multiplication_table(number=5, number_of_terms=10))
| 30
|
'''simple docstring'''
def __snake_case ( lowerCamelCase_ : int , lowerCamelCase_ : int ):
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError("the value of both inputs must be positive" )
__magic_name__ = str(bin(lowerCamelCase_ ) )[2:] # remove the leading "0b"
__magic_name__ = str(bin(lowerCamelCase_ ) )[2:] # remove the leading "0b"
__magic_name__ = max(len(lowerCamelCase_ ) , len(lowerCamelCase_ ) )
return "0b" + "".join(
str(int(char_a == "1" and char_b == "1" ) )
for char_a, char_b in zip(a_binary.zfill(lowerCamelCase_ ) , b_binary.zfill(lowerCamelCase_ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 664
| 0
|
"""simple docstring"""
from collections import defaultdict
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> bool:
'''simple docstring'''
lowercase_ = first_str.lower().strip()
lowercase_ = second_str.lower().strip()
# Remove whitespace
lowercase_ = first_str.replace(""" """ , """""" )
lowercase_ = second_str.replace(""" """ , """""" )
# Strings of different lengths are not anagrams
if len(a_ ) != len(a_ ):
return False
# Default values for count should be 0
lowercase_ = defaultdict(a_ )
# For each character in input strings,
# increment count in the corresponding
for i in range(len(a_ ) ):
count[first_str[i]] += 1
count[second_str[i]] -= 1
return all(_count == 0 for _count in count.values() )
if __name__ == "__main__":
from doctest import testmod
testmod()
UpperCAmelCase : Tuple = input("Enter the first string ").strip()
UpperCAmelCase : List[str] = input("Enter the second string ").strip()
UpperCAmelCase : Optional[int] = check_anagrams(input_a, input_b)
print(F"{input_a} and {input_b} are {'' if status else 'not '}anagrams.")
| 718
|
"""simple docstring"""
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase=2_81_23 ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = [1] * (limit + 1)
for i in range(2 , int(limit**0.5 ) + 1 ):
sum_divs[i * i] += i
for k in range(i + 1 , limit // i + 1 ):
sum_divs[k * i] += k + i
lowercase_ = set()
lowercase_ = 0
for n in range(1 , limit + 1 ):
if sum_divs[n] > n:
abundants.add(__lowerCAmelCase )
if not any((n - a in abundants) for a in abundants ):
res += n
return res
if __name__ == "__main__":
print(solution())
| 100
| 0
|
'''simple docstring'''
from __future__ import annotations
def lowerCamelCase ( _snake_case : list[float] ):
'''simple docstring'''
lowercase__ = 0.00
lowercase__ = 0
for resistor in resistors:
if resistor <= 0:
lowercase__ = f'''Resistor at index {index} has a negative or zero value!'''
raise ValueError(_snake_case )
first_sum += 1 / float(_snake_case )
index += 1
return 1 / first_sum
def lowerCamelCase ( _snake_case : list[float] ):
'''simple docstring'''
lowercase__ = 0.00
lowercase__ = 0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
lowercase__ = f'''Resistor at index {index} has a negative value!'''
raise ValueError(_snake_case )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod()
| 267
|
'''simple docstring'''
def lowerCamelCase ( _snake_case : list ):
'''simple docstring'''
if not isinstance(_snake_case ,_snake_case ):
raise ValueError("Input series is not valid, valid series - [2, 4, 6]" )
if len(_snake_case ) == 0:
raise ValueError("Input list must be a non empty list" )
if len(_snake_case ) == 1:
return True
lowercase__ = series[1] - series[0]
for index in range(len(_snake_case ) - 1 ):
if series[index + 1] - series[index] != common_diff:
return False
return True
def lowerCamelCase ( _snake_case : list ):
'''simple docstring'''
if not isinstance(_snake_case ,_snake_case ):
raise ValueError("Input series is not valid, valid series - [2, 4, 6]" )
if len(_snake_case ) == 0:
raise ValueError("Input list must be a non empty list" )
lowercase__ = 0
for val in series:
answer += val
return answer / len(_snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 267
| 1
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_albert import AlbertTokenizer
else:
_UpperCAmelCase : str = None
_UpperCAmelCase : Dict = logging.get_logger(__name__)
_UpperCAmelCase : Dict = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
_UpperCAmelCase : Optional[Any] = {
'''vocab_file''': {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/spiece.model''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/spiece.model''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/spiece.model''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model''',
},
'''tokenizer_file''': {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json''',
},
}
_UpperCAmelCase : Tuple = {
'''albert-base-v1''': 5_12,
'''albert-large-v1''': 5_12,
'''albert-xlarge-v1''': 5_12,
'''albert-xxlarge-v1''': 5_12,
'''albert-base-v2''': 5_12,
'''albert-large-v2''': 5_12,
'''albert-xlarge-v2''': 5_12,
'''albert-xxlarge-v2''': 5_12,
}
_UpperCAmelCase : Union[str, Any] = '''▁'''
class __magic_name__ ( __SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = VOCAB_FILES_NAMES
UpperCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ = AlbertTokenizer
def __init__( self , snake_case_=None , snake_case_=None , snake_case_=True , snake_case_=True , snake_case_=False , snake_case_="[CLS]" , snake_case_="[SEP]" , snake_case_="<unk>" , snake_case_="[SEP]" , snake_case_="<pad>" , snake_case_="[CLS]" , snake_case_="[MASK]" , **snake_case_ , ):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
lowercase =(
AddedToken(_a , lstrip=_a , rstrip=_a , normalized=_a )
if isinstance(_a , _a )
else mask_token
)
super().__init__(
_a , tokenizer_file=_a , do_lower_case=_a , remove_space=_a , keep_accents=_a , bos_token=_a , eos_token=_a , unk_token=_a , sep_token=_a , pad_token=_a , cls_token=_a , mask_token=_a , **_a , )
lowercase =do_lower_case
lowercase =remove_space
lowercase =keep_accents
lowercase =vocab_file
lowercase =False if not self.vocab_file else True
def _A( self , snake_case_ , snake_case_ = None ):
lowercase =[self.sep_token_id]
lowercase =[self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _A( self , snake_case_ , snake_case_ = None ):
lowercase =[self.sep_token_id]
lowercase =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _A( self , snake_case_ , snake_case_ = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(_a ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
lowercase =os.path.join(
_a , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ):
copyfile(self.vocab_file , _a )
return (out_vocab_file,)
| 721
|
'''simple docstring'''
from __future__ import annotations
_UpperCAmelCase : str = 10
def UpperCamelCase ( lowercase_ : list[int] ) -> list[int]:
'''simple docstring'''
lowercase =1
lowercase =max(lowercase_ )
while placement <= max_digit:
# declare and initialize empty buckets
lowercase =[[] for _ in range(lowercase_ )]
# split list_of_ints between the buckets
for i in list_of_ints:
lowercase =int((i / placement) % RADIX )
buckets[tmp].append(lowercase_ )
# put each buckets' contents into list_of_ints
lowercase =0
for b in range(lowercase_ ):
for i in buckets[b]:
lowercase =i
a += 1
# move to next
placement *= RADIX
return list_of_ints
if __name__ == "__main__":
import doctest
doctest.testmod()
| 145
| 0
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionAttendAndExcitePipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_numpy, skip_mps, slow
from diffusers.utils.testing_utils import require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
__SCREAMING_SNAKE_CASE = False
@skip_mps
class a__ ( A__ , A__ , A__ , unittest.TestCase ):
UpperCAmelCase__ = StableDiffusionAttendAndExcitePipeline
UpperCAmelCase__ = False
UpperCAmelCase__ = TEXT_TO_IMAGE_PARAMS
UpperCAmelCase__ = TEXT_TO_IMAGE_BATCH_PARAMS.union({'''token_indices'''} )
UpperCAmelCase__ = TEXT_TO_IMAGE_IMAGE_PARAMS
UpperCAmelCase__ = TEXT_TO_IMAGE_IMAGE_PARAMS
@classmethod
def lowerCamelCase_ ( cls :Optional[int] ):
'''simple docstring'''
super().setUpClass()
torch.use_deterministic_algorithms(_lowerCamelCase )
@classmethod
def lowerCamelCase_ ( cls :int ):
'''simple docstring'''
super().tearDownClass()
torch.use_deterministic_algorithms(_lowerCamelCase )
def lowerCamelCase_ ( self :Union[str, Any] ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCamelCase_ : int =UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=_lowerCamelCase , )
UpperCamelCase_ : List[Any] =DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=_lowerCamelCase , set_alpha_to_one=_lowerCamelCase , )
torch.manual_seed(0 )
UpperCamelCase_ : Tuple =AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
UpperCamelCase_ : Optional[Any] =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act='gelu' , projection_dim=512 , )
UpperCamelCase_ : Any =CLIPTextModel(_lowerCamelCase )
UpperCamelCase_ : Optional[int] =CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
UpperCamelCase_ : Union[str, Any] ={
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def lowerCamelCase_ ( self :Any , _lowerCamelCase :List[Any] , _lowerCamelCase :Any=0 ):
'''simple docstring'''
if str(_lowerCamelCase ).startswith('mps' ):
UpperCamelCase_ : int =torch.manual_seed(_lowerCamelCase )
else:
UpperCamelCase_ : Optional[int] =torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase )
UpperCamelCase_ : Dict ={
'prompt': 'a cat and a frog',
'token_indices': [2, 5],
'generator': generator,
'num_inference_steps': 1,
'guidance_scale': 6.0,
'output_type': 'numpy',
'max_iter_to_alter': 2,
'thresholds': {0: 0.7},
}
return inputs
def lowerCamelCase_ ( self :str ):
'''simple docstring'''
UpperCamelCase_ : Optional[Any] ='cpu'
UpperCamelCase_ : str =self.get_dummy_components()
UpperCamelCase_ : Dict =self.pipeline_class(**_lowerCamelCase )
pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
UpperCamelCase_ : int =self.get_dummy_inputs(_lowerCamelCase )
UpperCamelCase_ : Optional[Any] =pipe(**_lowerCamelCase ).images
UpperCamelCase_ : Tuple =image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 64, 64, 3) )
UpperCamelCase_ : Dict =np.array(
[0.63905364, 0.62897307, 0.48599017, 0.5133624, 0.5550048, 0.45769516, 0.50326973, 0.5023139, 0.45384496] )
UpperCamelCase_ : List[str] =np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_lowerCamelCase , 1E-3 )
def lowerCamelCase_ ( self :Any ):
'''simple docstring'''
super().test_cpu_offload_forward_pass(expected_max_diff=5E-4 )
def lowerCamelCase_ ( self :str ):
'''simple docstring'''
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def lowerCamelCase_ ( self :str ):
'''simple docstring'''
self._test_inference_batch_single_identical(batch_size=2 , expected_max_diff=7E-4 )
def lowerCamelCase_ ( self :List[Any] ):
'''simple docstring'''
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def lowerCamelCase_ ( self :Union[str, Any] ):
'''simple docstring'''
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5E-4 )
def lowerCamelCase_ ( self :Optional[Any] ):
'''simple docstring'''
super().test_save_load_local(expected_max_difference=5E-4 )
def lowerCamelCase_ ( self :Dict ):
'''simple docstring'''
super().test_save_load_optional_components(expected_max_difference=4E-4 )
@require_torch_gpu
@slow
class a__ ( unittest.TestCase ):
@classmethod
def lowerCamelCase_ ( cls :int ):
'''simple docstring'''
super().setUpClass()
torch.use_deterministic_algorithms(_lowerCamelCase )
@classmethod
def lowerCamelCase_ ( cls :Tuple ):
'''simple docstring'''
super().tearDownClass()
torch.use_deterministic_algorithms(_lowerCamelCase )
def lowerCamelCase_ ( self :str ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase_ ( self :str ):
'''simple docstring'''
UpperCamelCase_ : Dict =torch.manual_seed(51 )
UpperCamelCase_ : Dict =StableDiffusionAttendAndExcitePipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , safety_checker=_lowerCamelCase , torch_dtype=torch.floataa )
pipe.to('cuda' )
UpperCamelCase_ : Dict ='a painting of an elephant with glasses'
UpperCamelCase_ : Optional[int] =[5, 7]
UpperCamelCase_ : Optional[int] =pipe(
prompt=_lowerCamelCase , token_indices=_lowerCamelCase , guidance_scale=7.5 , generator=_lowerCamelCase , num_inference_steps=5 , max_iter_to_alter=5 , output_type='numpy' , ).images[0]
UpperCamelCase_ : Optional[int] =load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy' )
assert np.abs((expected_image - image).max() ) < 5E-1
| 357
|
"""simple docstring"""
from manim import *
class a__ ( A__ ):
def lowerCamelCase_ ( self :List[Any] ):
'''simple docstring'''
UpperCamelCase_ : Tuple =Rectangle(height=0.5 , width=0.5 )
UpperCamelCase_ : Any =Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
UpperCamelCase_ : Any =[mem.copy() for i in range(6 )]
UpperCamelCase_ : Optional[int] =[mem.copy() for i in range(6 )]
UpperCamelCase_ : List[str] =VGroup(*_lowerCamelCase ).arrange(_lowerCamelCase , buff=0 )
UpperCamelCase_ : Optional[int] =VGroup(*_lowerCamelCase ).arrange(_lowerCamelCase , buff=0 )
UpperCamelCase_ : Union[str, Any] =VGroup(_lowerCamelCase , _lowerCamelCase ).arrange(_lowerCamelCase , buff=0 )
UpperCamelCase_ : str =Text('CPU' , font_size=24 )
UpperCamelCase_ : str =Group(_lowerCamelCase , _lowerCamelCase ).arrange(_lowerCamelCase , buff=0.5 , aligned_edge=_lowerCamelCase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(_lowerCamelCase )
UpperCamelCase_ : int =[mem.copy() for i in range(4 )]
UpperCamelCase_ : Any =VGroup(*_lowerCamelCase ).arrange(_lowerCamelCase , buff=0 )
UpperCamelCase_ : List[Any] =Text('GPU' , font_size=24 )
UpperCamelCase_ : Any =Group(_lowerCamelCase , _lowerCamelCase ).arrange(_lowerCamelCase , buff=0.5 , aligned_edge=_lowerCamelCase )
gpu.move_to([-1, -1, 0] )
self.add(_lowerCamelCase )
UpperCamelCase_ : List[str] =[mem.copy() for i in range(6 )]
UpperCamelCase_ : str =VGroup(*_lowerCamelCase ).arrange(_lowerCamelCase , buff=0 )
UpperCamelCase_ : Optional[int] =Text('Model' , font_size=24 )
UpperCamelCase_ : List[Any] =Group(_lowerCamelCase , _lowerCamelCase ).arrange(_lowerCamelCase , buff=0.5 , aligned_edge=_lowerCamelCase )
model.move_to([3, -1.0, 0] )
self.add(_lowerCamelCase )
UpperCamelCase_ : int =[]
for i, rect in enumerate(_lowerCamelCase ):
rect.set_stroke(_lowerCamelCase )
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
UpperCamelCase_ : str =Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(_lowerCamelCase , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=_lowerCamelCase )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=_lowerCamelCase , buff=0.0 )
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=_lowerCamelCase , buff=0.0 )
self.add(_lowerCamelCase )
cpu_targs.append(_lowerCamelCase )
UpperCamelCase_ : List[str] =[mem.copy() for i in range(6 )]
UpperCamelCase_ : Any =VGroup(*_lowerCamelCase ).arrange(_lowerCamelCase , buff=0 )
UpperCamelCase_ : Optional[int] =Text('Loaded Checkpoint' , font_size=24 )
UpperCamelCase_ : List[Any] =Group(_lowerCamelCase , _lowerCamelCase ).arrange(_lowerCamelCase , aligned_edge=_lowerCamelCase , buff=0.4 )
checkpoint.move_to([3, 0.5, 0] )
UpperCamelCase_ : int =Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
UpperCamelCase_ : Optional[Any] =MarkupText(
f'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(_lowerCamelCase , _lowerCamelCase )
UpperCamelCase_ : Any =MarkupText(
f'''<span fgcolor=\'{BLUE}\'>●</span> Checkpoint''' , font_size=18 , )
blue_text.next_to(_lowerCamelCase , DOWN * 2.4 , aligned_edge=key_text.get_left() )
UpperCamelCase_ : Dict =MarkupText(
f'''Next, a <i><span fgcolor="{BLUE}">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor="{BLUE}">single shard</span>.''' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(_lowerCamelCase ) , Write(_lowerCamelCase ) )
self.play(Write(_lowerCamelCase , run_time=1 ) , Create(_lowerCamelCase , run_time=1 ) )
UpperCamelCase_ : Union[str, Any] =[]
UpperCamelCase_ : List[Any] =[]
for i, rect in enumerate(_lowerCamelCase ):
UpperCamelCase_ : str =fill.copy().set_fill(_lowerCamelCase , opacity=0.7 )
target.move_to(_lowerCamelCase )
first_animations.append(GrowFromCenter(_lowerCamelCase , run_time=1 ) )
UpperCamelCase_ : Any =target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5] )
second_animations.append(MoveToTarget(_lowerCamelCase , run_time=1.5 ) )
self.play(*_lowerCamelCase )
self.play(*_lowerCamelCase )
self.wait()
| 357
| 1
|
"""simple docstring"""
def UpperCAmelCase__ ( ):
'''simple docstring'''
lowerCAmelCase = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
lowerCAmelCase = 6
lowerCAmelCase = 1
lowerCAmelCase = 19_01
lowerCAmelCase = 0
while year < 20_01:
day += 7
if (year % 4 == 0 and year % 1_00 != 0) or (year % 4_00 == 0):
if day > days_per_month[month - 1] and month != 2:
month += 1
lowerCAmelCase = day - days_per_month[month - 2]
elif day > 29 and month == 2:
month += 1
lowerCAmelCase = day - 29
else:
if day > days_per_month[month - 1]:
month += 1
lowerCAmelCase = day - days_per_month[month - 2]
if month > 12:
year += 1
lowerCAmelCase = 1
if year < 20_01 and day == 1:
sundays += 1
return sundays
if __name__ == "__main__":
print(solution())
| 705
|
"""simple docstring"""
import numpy as np
# Importing the Keras libraries and packages
import tensorflow as tf
from tensorflow.keras import layers, models
if __name__ == "__main__":
# Initialising the CNN
# (Sequential- Building the model layer by layer)
SCREAMING_SNAKE_CASE__ = models.Sequential()
# Step 1 - Convolution
# Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel
# (3,3) is the kernel size (filter matrix)
classifier.add(
layers.ConvaD(32, (3, 3), input_shape=(64, 64, 3), activation="relu")
)
# Step 2 - Pooling
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Adding a second convolutional layer
classifier.add(layers.ConvaD(32, (3, 3), activation="relu"))
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Step 3 - Flattening
classifier.add(layers.Flatten())
# Step 4 - Full connection
classifier.add(layers.Dense(units=128, activation="relu"))
classifier.add(layers.Dense(units=1, activation="sigmoid"))
# Compiling the CNN
classifier.compile(
optimizer="adam", loss="binary_crossentropy", metrics=["accuracy"]
)
# Part 2 - Fitting the CNN to the images
# Load Trained model weights
# from keras.models import load_model
# regressor=load_model('cnn.h5')
SCREAMING_SNAKE_CASE__ = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1.0 / 255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True
)
SCREAMING_SNAKE_CASE__ = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 255)
SCREAMING_SNAKE_CASE__ = train_datagen.flow_from_directory(
"dataset/training_set", target_size=(64, 64), batch_size=32, class_mode="binary"
)
SCREAMING_SNAKE_CASE__ = test_datagen.flow_from_directory(
"dataset/test_set", target_size=(64, 64), batch_size=32, class_mode="binary"
)
classifier.fit_generator(
training_set, steps_per_epoch=5, epochs=30, validation_data=test_set
)
classifier.save("cnn.h5")
# Part 3 - Making new predictions
SCREAMING_SNAKE_CASE__ = tf.keras.preprocessing.image.load_img(
"dataset/single_prediction/image.png", target_size=(64, 64)
)
SCREAMING_SNAKE_CASE__ = tf.keras.preprocessing.image.img_to_array(test_image)
SCREAMING_SNAKE_CASE__ = np.expand_dims(test_image, axis=0)
SCREAMING_SNAKE_CASE__ = classifier.predict(test_image)
# training_set.class_indices
if result[0][0] == 0:
SCREAMING_SNAKE_CASE__ = "Normal"
if result[0][0] == 1:
SCREAMING_SNAKE_CASE__ = "Abnormality detected"
| 393
| 0
|
import importlib.util
import os
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import (
is_accelerate_available,
is_flax_available,
is_safetensors_available,
is_tf_available,
is_torch_available,
)
from . import BaseTransformersCLICommand
def _lowercase ( a__ : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
return EnvironmentCommand()
def _lowercase ( a__ : Optional[Any] ) -> List[str]:
"""simple docstring"""
return EnvironmentCommand(args.accelerate_config_file )
class lowerCamelCase_ ( snake_case_ ):
@staticmethod
def lowercase ( lowerCamelCase_ ) -> int:
"""simple docstring"""
_UpperCamelCase = parser.add_parser("env" )
download_parser.set_defaults(func=_a )
download_parser.add_argument(
"--accelerate-config_file" , default=_a , help="The accelerate config file to use for the default values in the launching script." , )
download_parser.set_defaults(func=_a )
def __init__( self , lowerCamelCase_ , *lowerCamelCase_ ) -> Any:
"""simple docstring"""
_UpperCamelCase = accelerate_config_file
def lowercase ( self ) -> List[str]:
"""simple docstring"""
_UpperCamelCase = "not installed"
if is_safetensors_available():
import safetensors
_UpperCamelCase = safetensors.__version__
elif importlib.util.find_spec("safetensors" ) is not None:
import safetensors
_UpperCamelCase = f'''{safetensors.__version__} but is ignored because of PyTorch version too old.'''
_UpperCamelCase = "not installed"
_UpperCamelCase = _UpperCamelCase = "not found"
if is_accelerate_available():
import accelerate
from accelerate.commands.config import default_config_file, load_config_from_file
_UpperCamelCase = accelerate.__version__
# Get the default from the config file.
if self._accelerate_config_file is not None or os.path.isfile(_a ):
_UpperCamelCase = load_config_from_file(self._accelerate_config_file ).to_dict()
_UpperCamelCase = (
"\n".join([f'''\t- {prop}: {val}''' for prop, val in accelerate_config.items()] )
if isinstance(_a , _a )
else f'''\t{accelerate_config}'''
)
_UpperCamelCase = "not installed"
_UpperCamelCase = "NA"
if is_torch_available():
import torch
_UpperCamelCase = torch.__version__
_UpperCamelCase = torch.cuda.is_available()
_UpperCamelCase = "not installed"
_UpperCamelCase = "NA"
if is_tf_available():
import tensorflow as tf
_UpperCamelCase = tf.__version__
try:
# deprecated in v2.1
_UpperCamelCase = tf.test.is_gpu_available()
except AttributeError:
# returns list of devices, convert to bool
_UpperCamelCase = bool(tf.config.list_physical_devices("GPU" ) )
_UpperCamelCase = "not installed"
_UpperCamelCase = "not installed"
_UpperCamelCase = "not installed"
_UpperCamelCase = "NA"
if is_flax_available():
import flax
import jax
import jaxlib
_UpperCamelCase = flax.__version__
_UpperCamelCase = jax.__version__
_UpperCamelCase = jaxlib.__version__
_UpperCamelCase = jax.lib.xla_bridge.get_backend().platform
_UpperCamelCase = {
"`transformers` version": version,
"Platform": platform.platform(),
"Python version": platform.python_version(),
"Huggingface_hub version": huggingface_hub.__version__,
"Safetensors version": f'''{safetensors_version}''',
"Accelerate version": f'''{accelerate_version}''',
"Accelerate config": f'''{accelerate_config_str}''',
"PyTorch version (GPU?)": f'''{pt_version} ({pt_cuda_available})''',
"Tensorflow version (GPU?)": f'''{tf_version} ({tf_cuda_available})''',
"Flax version (CPU?/GPU?/TPU?)": f'''{flax_version} ({jax_backend})''',
"Jax version": f'''{jax_version}''',
"JaxLib version": f'''{jaxlib_version}''',
"Using GPU in script?": "<fill in>",
"Using distributed or parallel set-up in script?": "<fill in>",
}
print("\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n" )
print(self.format_dict(_a ) )
return info
@staticmethod
def lowercase ( lowerCamelCase_ ) -> str:
"""simple docstring"""
return "\n".join([f'''- {prop}: {val}''' for prop, val in d.items()] ) + "\n"
| 147
|
import pytest
from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs
@pytest.mark.parametrize(
'''kwargs, expected''' , [
({'''num_shards''': 0, '''max_num_jobs''': 1}, []),
({'''num_shards''': 10, '''max_num_jobs''': 1}, [range(10 )]),
({'''num_shards''': 10, '''max_num_jobs''': 10}, [range(__lowerCAmelCase , i + 1 ) for i in range(10 )]),
({'''num_shards''': 1, '''max_num_jobs''': 10}, [range(1 )]),
({'''num_shards''': 10, '''max_num_jobs''': 3}, [range(0 , 4 ), range(4 , 7 ), range(7 , 10 )]),
({'''num_shards''': 3, '''max_num_jobs''': 10}, [range(0 , 1 ), range(1 , 2 ), range(2 , 3 )]),
] , )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> Optional[int]:
snake_case__ = _distribute_shards(**__lowerCAmelCase )
assert out == expected
@pytest.mark.parametrize(
'''gen_kwargs, max_num_jobs, expected''' , [
({'''foo''': 0}, 10, [{'''foo''': 0}]),
({'''shards''': [0, 1, 2, 3]}, 1, [{'''shards''': [0, 1, 2, 3]}]),
({'''shards''': [0, 1, 2, 3]}, 4, [{'''shards''': [0]}, {'''shards''': [1]}, {'''shards''': [2]}, {'''shards''': [3]}]),
({'''shards''': [0, 1]}, 4, [{'''shards''': [0]}, {'''shards''': [1]}]),
({'''shards''': [0, 1, 2, 3]}, 2, [{'''shards''': [0, 1]}, {'''shards''': [2, 3]}]),
] , )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Dict:
snake_case__ = _split_gen_kwargs(__lowerCAmelCase , __lowerCAmelCase )
assert out == expected
@pytest.mark.parametrize(
'''gen_kwargs, expected''' , [
({'''foo''': 0}, 1),
({'''shards''': [0]}, 1),
({'''shards''': [0, 1, 2, 3]}, 4),
({'''shards''': [0, 1, 2, 3], '''foo''': 0}, 4),
({'''shards''': [0, 1, 2, 3], '''other''': (0, 1)}, 4),
({'''shards''': [0, 1, 2, 3], '''shards2''': [0, 1]}, RuntimeError),
] , )
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> List[Any]:
if expected is RuntimeError:
with pytest.raises(__lowerCAmelCase ):
_number_of_shards_in_gen_kwargs(__lowerCAmelCase )
else:
snake_case__ = _number_of_shards_in_gen_kwargs(__lowerCAmelCase )
assert out == expected
| 33
| 0
|
'''simple docstring'''
def _UpperCamelCase ( SCREAMING_SNAKE_CASE_ ):
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise ValueError('Input must be an integer' )
if input_num <= 0:
raise ValueError('Input must be positive' )
return sum(
divisor for divisor in range(1 , input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 700
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_A = {
'configuration_bigbird_pegasus': [
'BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BigBirdPegasusConfig',
'BigBirdPegasusOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST',
'BigBirdPegasusForCausalLM',
'BigBirdPegasusForConditionalGeneration',
'BigBirdPegasusForQuestionAnswering',
'BigBirdPegasusForSequenceClassification',
'BigBirdPegasusModel',
'BigBirdPegasusPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP,
BigBirdPegasusConfig,
BigBirdPegasusOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST,
BigBirdPegasusForCausalLM,
BigBirdPegasusForConditionalGeneration,
BigBirdPegasusForQuestionAnswering,
BigBirdPegasusForSequenceClassification,
BigBirdPegasusModel,
BigBirdPegasusPreTrainedModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 438
| 0
|
from __future__ import annotations
_SCREAMING_SNAKE_CASE : str = []
def UpperCAmelCase_ ( _A , _A , _A ):
'''simple docstring'''
for i in range(len(_A ) ):
if board[row][i] == 1:
return False
for i in range(len(_A ) ):
if board[i][column] == 1:
return False
for i, j in zip(range(_A , -1 , -1 ) , range(_A , -1 , -1 ) ):
if board[i][j] == 1:
return False
for i, j in zip(range(_A , -1 , -1 ) , range(_A , len(_A ) ) ):
if board[i][j] == 1:
return False
return True
def UpperCAmelCase_ ( _A , _A ):
'''simple docstring'''
if row >= len(_A ):
solution.append(_A )
printboard(_A )
print()
return True
for i in range(len(_A ) ):
if is_safe(_A , _A , _A ):
SCREAMING_SNAKE_CASE__ = 1
solve(_A , row + 1 )
SCREAMING_SNAKE_CASE__ = 0
return False
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
for i in range(len(_A ) ):
for j in range(len(_A ) ):
if board[i][j] == 1:
print('''Q''' , end=''' ''' )
else:
print('''.''' , end=''' ''' )
print()
# n=int(input("The no. of queens"))
_SCREAMING_SNAKE_CASE : Tuple = 8
_SCREAMING_SNAKE_CASE : int = [[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print('''The total no. of solutions are :''', len(solution))
| 493
|
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
_SCREAMING_SNAKE_CASE : List[Any] = imread(r'''digital_image_processing/image_data/lena_small.jpg''')
_SCREAMING_SNAKE_CASE : Tuple = cvtColor(img, COLOR_BGR2GRAY)
def UpperCAmelCase_ ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = cn.convert_to_negative(_A )
# assert negative_img array for at least one True
assert negative_img.any()
def UpperCAmelCase_ ( ):
'''simple docstring'''
with Image.open('''digital_image_processing/image_data/lena_small.jpg''' ) as img:
# Work around assertion for response
assert str(cc.change_contrast(_A , 1_10 ) ).startswith(
'''<PIL.Image.Image image mode=RGB size=100x100 at''' )
def UpperCAmelCase_ ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def UpperCAmelCase_ ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = imread('''digital_image_processing/image_data/lena_small.jpg''' , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
SCREAMING_SNAKE_CASE__ = canny.canny(_A )
# assert canny array for at least one True
assert canny_array.any()
def UpperCAmelCase_ ( ):
'''simple docstring'''
assert gg.gaussian_filter(_A , 5 , sigma=0.9 ).all()
def UpperCAmelCase_ ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = array([[0.2_5, 0.5, 0.2_5], [0.5, -3, 0.5], [0.2_5, 0.5, 0.2_5]] )
SCREAMING_SNAKE_CASE__ = conv.img_convolve(_A , _A ).astype(_A )
assert res.any()
def UpperCAmelCase_ ( ):
'''simple docstring'''
assert med.median_filter(_A , 3 ).any()
def UpperCAmelCase_ ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = sob.sobel_filter(_A )
assert grad.any() and theta.any()
def UpperCAmelCase_ ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = sp.make_sepia(_A , 20 )
assert sepia.all()
def UpperCAmelCase_ ( _A = "digital_image_processing/image_data/lena_small.jpg" ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = bs.Burkes(imread(_A , 1 ) , 1_20 )
burkes.process()
assert burkes.output_img.any()
def UpperCAmelCase_ ( _A = "digital_image_processing/image_data/lena_small.jpg" , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = rs.NearestNeighbour(imread(_A , 1 ) , 4_00 , 2_00 )
nn.process()
assert nn.output.any()
def UpperCAmelCase_ ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = '''digital_image_processing/image_data/lena.jpg'''
# Reading the image and converting it to grayscale.
SCREAMING_SNAKE_CASE__ = imread(_A , 0 )
# Test for get_neighbors_pixel function() return not None
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = image[x_coordinate][y_coordinate]
SCREAMING_SNAKE_CASE__ = lbp.get_neighbors_pixel(
_A , _A , _A , _A )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
SCREAMING_SNAKE_CASE__ = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
SCREAMING_SNAKE_CASE__ = lbp.local_binary_value(_A , _A , _A )
assert lbp_image.any()
| 493
| 1
|
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE__ ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
snake_case = DDIMPipeline
snake_case = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
snake_case = PipelineTesterMixin.required_optional_params - {
"num_images_per_prompt",
"latents",
"callback",
"callback_steps",
}
snake_case = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
snake_case = False
def __UpperCAmelCase ( self : Tuple ):
torch.manual_seed(0 )
lowerCamelCase__ = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
lowerCamelCase__ = DDIMScheduler()
lowerCamelCase__ = {"""unet""": unet, """scheduler""": scheduler}
return components
def __UpperCAmelCase ( self : List[Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Any=0 ):
if str(SCREAMING_SNAKE_CASE_ ).startswith("""mps""" ):
lowerCamelCase__ = torch.manual_seed(SCREAMING_SNAKE_CASE_ )
else:
lowerCamelCase__ = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(SCREAMING_SNAKE_CASE_ )
lowerCamelCase__ = {
"""batch_size""": 1,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def __UpperCAmelCase ( self : Dict ):
lowerCamelCase__ = """cpu"""
lowerCamelCase__ = self.get_dummy_components()
lowerCamelCase__ = self.pipeline_class(**SCREAMING_SNAKE_CASE_ )
pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
lowerCamelCase__ = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ )
lowerCamelCase__ = pipe(**SCREAMING_SNAKE_CASE_ ).images
lowerCamelCase__ = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 32, 32, 3) )
lowerCamelCase__ = np.array(
[1.000e00, 5.717e-01, 4.717e-01, 1.000e00, 0.000e00, 1.000e00, 3.000e-04, 0.000e00, 9.000e-04] )
lowerCamelCase__ = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(SCREAMING_SNAKE_CASE_ , 1e-3 )
def __UpperCAmelCase ( self : Union[str, Any] ):
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def __UpperCAmelCase ( self : Optional[int] ):
super().test_save_load_local(expected_max_difference=3e-3 )
def __UpperCAmelCase ( self : Optional[Any] ):
super().test_save_load_optional_components(expected_max_difference=3e-3 )
def __UpperCAmelCase ( self : Union[str, Any] ):
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def __UpperCAmelCase ( self : Optional[Any] ):
lowerCamelCase__ = """google/ddpm-cifar10-32"""
lowerCamelCase__ = UNetaDModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
lowerCamelCase__ = DDIMScheduler()
lowerCamelCase__ = DDIMPipeline(unet=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ )
ddim.to(SCREAMING_SNAKE_CASE_ )
ddim.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
lowerCamelCase__ = torch.manual_seed(0 )
lowerCamelCase__ = ddim(generator=SCREAMING_SNAKE_CASE_ , eta=0.0 , output_type="""numpy""" ).images
lowerCamelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCamelCase__ = np.array([0.1_7_2_3, 0.1_6_1_7, 0.1_6_0_0, 0.1_6_2_6, 0.1_4_9_7, 0.1_5_1_3, 0.1_5_0_5, 0.1_4_4_2, 0.1_4_5_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def __UpperCAmelCase ( self : List[str] ):
lowerCamelCase__ = """google/ddpm-ema-bedroom-256"""
lowerCamelCase__ = UNetaDModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
lowerCamelCase__ = DDIMScheduler.from_pretrained(SCREAMING_SNAKE_CASE_ )
lowerCamelCase__ = DDIMPipeline(unet=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ )
ddpm.to(SCREAMING_SNAKE_CASE_ )
ddpm.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
lowerCamelCase__ = torch.manual_seed(0 )
lowerCamelCase__ = ddpm(generator=SCREAMING_SNAKE_CASE_ , output_type="""numpy""" ).images
lowerCamelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
lowerCamelCase__ = np.array([0.0_0_6_0, 0.0_2_0_1, 0.0_3_4_4, 0.0_0_2_4, 0.0_0_1_8, 0.0_0_0_2, 0.0_0_2_2, 0.0_0_0_0, 0.0_0_6_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 258
|
"""simple docstring"""
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class SCREAMING_SNAKE_CASE__ ( _SCREAMING_SNAKE_CASE ):
snake_case = (UnCLIPScheduler,)
def __UpperCAmelCase ( self : str , **SCREAMING_SNAKE_CASE_ : Optional[Any] ):
lowerCamelCase__ = {
"""num_train_timesteps""": 1000,
"""variance_type""": """fixed_small_log""",
"""clip_sample""": True,
"""clip_sample_range""": 1.0,
"""prediction_type""": """epsilon""",
}
config.update(**SCREAMING_SNAKE_CASE_ )
return config
def __UpperCAmelCase ( self : int ):
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE_ )
def __UpperCAmelCase ( self : Optional[Any] ):
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=SCREAMING_SNAKE_CASE_ )
def __UpperCAmelCase ( self : Dict ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=SCREAMING_SNAKE_CASE_ )
def __UpperCAmelCase ( self : List[str] ):
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=SCREAMING_SNAKE_CASE_ )
def __UpperCAmelCase ( self : List[str] ):
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=SCREAMING_SNAKE_CASE_ )
def __UpperCAmelCase ( self : Dict ):
for time_step in [0, 500, 999]:
for prev_timestep in [None, 5, 100, 250, 500, 750]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=SCREAMING_SNAKE_CASE_ , prev_timestep=SCREAMING_SNAKE_CASE_ )
def __UpperCAmelCase ( self : Tuple ):
lowerCamelCase__ = self.scheduler_classes[0]
lowerCamelCase__ = self.get_scheduler_config(variance_type="""fixed_small_log""" )
lowerCamelCase__ = scheduler_class(**SCREAMING_SNAKE_CASE_ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0_000e-10 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_5_4_9_6_2_5 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.9_9_9_4_9_8_7 ) ) < 1e-5
def __UpperCAmelCase ( self : Union[str, Any] ):
lowerCamelCase__ = self.scheduler_classes[0]
lowerCamelCase__ = self.get_scheduler_config(variance_type="""learned_range""" )
lowerCamelCase__ = scheduler_class(**SCREAMING_SNAKE_CASE_ )
lowerCamelCase__ = 0.5
assert scheduler._get_variance(1 , predicted_variance=SCREAMING_SNAKE_CASE_ ) - -1_0.1_7_1_2_7_9_0 < 1e-5
assert scheduler._get_variance(487 , predicted_variance=SCREAMING_SNAKE_CASE_ ) - -5.7_9_9_8_0_5_2 < 1e-5
assert scheduler._get_variance(999 , predicted_variance=SCREAMING_SNAKE_CASE_ ) - -0.0_0_1_0_0_1_1 < 1e-5
def __UpperCAmelCase ( self : Tuple ):
lowerCamelCase__ = self.scheduler_classes[0]
lowerCamelCase__ = self.get_scheduler_config()
lowerCamelCase__ = scheduler_class(**SCREAMING_SNAKE_CASE_ )
lowerCamelCase__ = scheduler.timesteps
lowerCamelCase__ = self.dummy_model()
lowerCamelCase__ = self.dummy_sample_deter
lowerCamelCase__ = torch.manual_seed(0 )
for i, t in enumerate(SCREAMING_SNAKE_CASE_ ):
# 1. predict noise residual
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# 2. predict previous mean of sample x_t-1
lowerCamelCase__ = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ ).prev_sample
lowerCamelCase__ = pred_prev_sample
lowerCamelCase__ = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_ ) )
lowerCamelCase__ = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) )
assert abs(result_sum.item() - 2_5_2.2_6_8_2_4_9_5 ) < 1e-2
assert abs(result_mean.item() - 0.3_2_8_4_7_4_3 ) < 1e-3
def __UpperCAmelCase ( self : Any ):
lowerCamelCase__ = self.scheduler_classes[0]
lowerCamelCase__ = self.get_scheduler_config()
lowerCamelCase__ = scheduler_class(**SCREAMING_SNAKE_CASE_ )
scheduler.set_timesteps(25 )
lowerCamelCase__ = scheduler.timesteps
lowerCamelCase__ = self.dummy_model()
lowerCamelCase__ = self.dummy_sample_deter
lowerCamelCase__ = torch.manual_seed(0 )
for i, t in enumerate(SCREAMING_SNAKE_CASE_ ):
# 1. predict noise residual
lowerCamelCase__ = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if i + 1 == timesteps.shape[0]:
lowerCamelCase__ = None
else:
lowerCamelCase__ = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
lowerCamelCase__ = scheduler.step(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , prev_timestep=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ ).prev_sample
lowerCamelCase__ = pred_prev_sample
lowerCamelCase__ = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_ ) )
lowerCamelCase__ = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) )
assert abs(result_sum.item() - 2_5_8.2_0_4_4_9_8_3 ) < 1e-2
assert abs(result_mean.item() - 0.3_3_6_2_0_3_8 ) < 1e-3
def __UpperCAmelCase ( self : List[str] ):
pass
def __UpperCAmelCase ( self : Any ):
pass
| 258
| 1
|
from math import factorial
def __A ( _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
if successes > trials:
raise ValueError('''successes must be lower or equal to trials''' )
if trials < 0 or successes < 0:
raise ValueError('''the function is defined for non-negative integers''' )
if not isinstance(_lowercase , _lowercase ) or not isinstance(_lowercase , _lowercase ):
raise ValueError('''the function is defined for non-negative integers''' )
if not 0 < prob < 1:
raise ValueError('''prob has to be in range of 1 - 0''' )
_A = (prob**successes) * ((1 - prob) ** (trials - successes))
# Calculate the binomial coefficient: n! / k!(n-k)!
_A = float(factorial(_lowercase ) )
coefficient /= factorial(_lowercase ) * factorial(trials - successes )
return probability * coefficient
if __name__ == "__main__":
from doctest import testmod
testmod()
print('Probability of 2 successes out of 4 trails')
print('with probability of 0.75 is:', end=' ')
print(binomial_distribution(2, 4, 0.75))
| 484
|
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def __A ( _lowercase ):
'''simple docstring'''
return (data["data"], data["target"])
def __A ( _lowercase , _lowercase ):
'''simple docstring'''
_A = XGBClassifier()
classifier.fit(_lowercase , _lowercase )
return classifier
def __A ( ):
'''simple docstring'''
_A = load_iris()
_A ,_A = data_handling(_lowercase )
_A ,_A ,_A ,_A = train_test_split(
_lowercase , _lowercase , test_size=0.25 )
_A = iris['''target_names''']
# Create an XGBoost Classifier from the training data
_A = xgboost(_lowercase , _lowercase )
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
_lowercase , _lowercase , _lowercase , display_labels=_lowercase , cmap='''Blues''' , normalize='''true''' , )
plt.title('''Normalized Confusion Matrix - IRIS Dataset''' )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 484
| 1
|
import unittest
import torch
from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel
from diffusers.training_utils import set_seed
from diffusers.utils.testing_utils import slow
_lowercase = False
class _UpperCAmelCase ( unittest.TestCase ):
def snake_case_ ( self , a__=3_2):
set_seed(0)
A__ = UNetaDModel(sample_size=a__ , in_channels=3 , out_channels=3)
A__ = torch.optim.SGD(model.parameters() , lr=0.0_0_0_1)
return model, optimizer
@slow
def snake_case_ ( self):
A__ = '''cpu''' # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable
A__ = DDPMScheduler(
num_train_timesteps=1_0_0_0 , beta_start=0.0_0_0_1 , beta_end=0.0_2 , beta_schedule='''linear''' , clip_sample=a__ , )
A__ = DDIMScheduler(
num_train_timesteps=1_0_0_0 , beta_start=0.0_0_0_1 , beta_end=0.0_2 , beta_schedule='''linear''' , clip_sample=a__ , )
assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps
# shared batches for DDPM and DDIM
set_seed(0)
A__ = [torch.randn((4, 3, 3_2, 3_2)).clip(-1 , 1).to(a__) for _ in range(4)]
A__ = [torch.randn((4, 3, 3_2, 3_2)).to(a__) for _ in range(4)]
A__ = [torch.randint(0 , 1_0_0_0 , (4,)).long().to(a__) for _ in range(4)]
# train with a DDPM scheduler
A__ , A__ = self.get_model_optimizer(resolution=3_2)
model.train().to(a__)
for i in range(4):
optimizer.zero_grad()
A__ = ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i])
A__ = model(a__ , timesteps[i]).sample
A__ = torch.nn.functional.mse_loss(a__ , noise[i])
loss.backward()
optimizer.step()
del model, optimizer
# recreate the model and optimizer, and retry with DDIM
A__ , A__ = self.get_model_optimizer(resolution=3_2)
model.train().to(a__)
for i in range(4):
optimizer.zero_grad()
A__ = ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i])
A__ = model(a__ , timesteps[i]).sample
A__ = torch.nn.functional.mse_loss(a__ , noise[i])
loss.backward()
optimizer.step()
del model, optimizer
self.assertTrue(torch.allclose(a__ , a__ , atol=1e-5))
self.assertTrue(torch.allclose(a__ , a__ , atol=1e-5))
| 526
|
import inspect
import unittest
from transformers import ViTHybridConfig
from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel
from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class _UpperCAmelCase :
def __init__( self , a__ , a__=1_3 , a__=6_4 , a__=2 , a__=3 , a__=True , a__=True , a__=3_2 , a__=5 , a__=4 , a__=3_7 , a__="gelu" , a__=0.1 , a__=0.1 , a__=1_0 , a__=0.0_2 , a__=[1, 1_6, 4, 4] , a__=None , ):
A__ = parent
A__ = batch_size
A__ = image_size
A__ = patch_size
A__ = num_channels
A__ = is_training
A__ = use_labels
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = type_sequence_label_size
A__ = initializer_range
A__ = scope
A__ = backbone_featmap_shape
# in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
# the number of patches is based on the feature map of the backbone, which by default uses an output stride
# of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size
A__ = (self.image_size // 3_2) ** 2
A__ = num_patches + 1
def snake_case_ ( self):
A__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size] , self.type_sequence_label_size)
A__ = self.get_config()
return config, pixel_values, labels
def snake_case_ ( self):
A__ = {
'''global_padding''': '''same''',
'''layer_type''': '''bottleneck''',
'''depths''': [3, 4, 9],
'''out_features''': ['''stage1''', '''stage2''', '''stage3'''],
'''embedding_dynamic_padding''': True,
'''hidden_sizes''': [4, 8, 1_6, 3_2],
'''num_groups''': 2,
}
return ViTHybridConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a__ , initializer_range=self.initializer_range , backbone_featmap_shape=self.backbone_featmap_shape , backbone_config=a__ , )
def snake_case_ ( self , a__ , a__ , a__):
A__ = ViTHybridModel(config=a__)
model.to(a__)
model.eval()
A__ = model(a__)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def snake_case_ ( self , a__ , a__ , a__):
A__ = self.type_sequence_label_size
A__ = ViTHybridForImageClassification(a__)
model.to(a__)
model.eval()
A__ = model(a__ , labels=a__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def snake_case_ ( self):
A__ = self.prepare_config_and_inputs()
A__ , A__ , A__ = config_and_inputs
A__ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( A__ , A__ , unittest.TestCase ):
UpperCamelCase__ = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else ()
UpperCamelCase__ = (
{'''feature-extraction''': ViTHybridModel, '''image-classification''': ViTHybridForImageClassification}
if is_torch_available()
else {}
)
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
def snake_case_ ( self):
A__ = ViTHybridModelTester(self)
A__ = ConfigTester(self , config_class=a__ , has_text_modality=a__ , hidden_size=3_7)
def snake_case_ ( self):
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViT does not use inputs_embeds''')
def snake_case_ ( self):
pass
def snake_case_ ( self):
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(a__)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
A__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a__ , nn.Linear))
def snake_case_ ( self):
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(a__)
A__ = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A__ = [*signature.parameters.keys()]
A__ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , a__)
def snake_case_ ( self):
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a__)
def snake_case_ ( self):
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a__)
def snake_case_ ( self):
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
A__ = _config_zero_init(a__)
for model_class in self.all_model_classes:
A__ = model_class(config=a__)
# Skip the check for the backbone
for name, module in model.named_modules():
if module.__class__.__name__ == "ViTHybridPatchEmbeddings":
A__ = [F"{name}.{key}" for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F"Parameter {name} of model {model_class} seems not properly initialized" , )
@slow
def snake_case_ ( self):
for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = ViTHybridModel.from_pretrained(a__)
self.assertIsNotNone(a__)
def lowerCAmelCase__ ( )-> str:
A__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class _UpperCAmelCase ( unittest.TestCase ):
@cached_property
def snake_case_ ( self):
return (
ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0])
if is_vision_available()
else None
)
@slow
def snake_case_ ( self):
A__ = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0]).to(
a__)
A__ = self.default_image_processor
A__ = prepare_img()
A__ = image_processor(images=a__ , return_tensors='''pt''').to(a__)
# forward pass
with torch.no_grad():
A__ = model(**a__)
# verify the logits
A__ = torch.Size((1, 1_0_0_0))
self.assertEqual(outputs.logits.shape , a__)
A__ = torch.tensor([-1.9_0_9_0, -0.4_9_9_3, -0.2_3_8_9]).to(a__)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , a__ , atol=1e-4))
@slow
@require_accelerate
def snake_case_ ( self):
A__ = ViTHybridImageProcessor.from_pretrained('''google/vit-hybrid-base-bit-384''')
A__ = ViTHybridForImageClassification.from_pretrained('''google/vit-hybrid-base-bit-384''' , device_map='''auto''')
A__ = prepare_img()
A__ = image_processor(images=a__ , return_tensors='''pt''')
A__ = model(**a__)
A__ = outputs.logits
# model predicts one of the 1000 ImageNet classes
A__ = logits.argmax(-1).item()
self.assertTrue(model.config.idalabel[predicted_class_idx] , '''tabby, tabby cat''')
| 526
| 1
|
"""simple docstring"""
import unittest
from transformers import MobileBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertModel,
)
class _UpperCAmelCase :
def __init__( self , _A , _A=13 , _A=7 , _A=True , _A=True , _A=True , _A=True , _A=99 , _A=64 , _A=32 , _A=5 , _A=4 , _A=37 , _A="gelu" , _A=0.1 , _A=0.1 , _A=5_12 , _A=16 , _A=2 , _A=0.02 , _A=3 , _A=4 , _A=None , ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : int = parent
_UpperCAmelCase : str = batch_size
_UpperCAmelCase : int = seq_length
_UpperCAmelCase : Any = is_training
_UpperCAmelCase : Tuple = use_input_mask
_UpperCAmelCase : Union[str, Any] = use_token_type_ids
_UpperCAmelCase : Optional[int] = use_labels
_UpperCAmelCase : Union[str, Any] = vocab_size
_UpperCAmelCase : Tuple = hidden_size
_UpperCAmelCase : List[Any] = embedding_size
_UpperCAmelCase : int = num_hidden_layers
_UpperCAmelCase : Union[str, Any] = num_attention_heads
_UpperCAmelCase : List[Any] = intermediate_size
_UpperCAmelCase : Union[str, Any] = hidden_act
_UpperCAmelCase : Optional[Any] = hidden_dropout_prob
_UpperCAmelCase : Any = attention_probs_dropout_prob
_UpperCAmelCase : str = max_position_embeddings
_UpperCAmelCase : str = type_vocab_size
_UpperCAmelCase : int = type_sequence_label_size
_UpperCAmelCase : List[str] = initializer_range
_UpperCAmelCase : List[str] = num_labels
_UpperCAmelCase : Optional[int] = num_choices
_UpperCAmelCase : str = scope
def __snake_case ( self ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase : Union[str, Any] = None
if self.use_input_mask:
_UpperCAmelCase : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCAmelCase : Union[str, Any] = None
if self.use_token_type_ids:
_UpperCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCAmelCase : Any = None
_UpperCAmelCase : Union[str, Any] = None
_UpperCAmelCase : Optional[int] = None
if self.use_labels:
_UpperCAmelCase : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCAmelCase : int = ids_tensor([self.batch_size] , self.num_choices )
_UpperCAmelCase : str = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __snake_case ( self ) -> Optional[int]:
'''simple docstring'''
return MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_A , initializer_range=self.initializer_range , )
def __snake_case ( self , _A , _A , _A , _A , _A , _A , _A ) -> str:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = MobileBertModel(config=_A )
model.to(_A )
model.eval()
_UpperCAmelCase : Optional[int] = model(_A , attention_mask=_A , token_type_ids=_A )
_UpperCAmelCase : List[str] = model(_A , token_type_ids=_A )
_UpperCAmelCase : List[str] = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __snake_case ( self , _A , _A , _A , _A , _A , _A , _A ) -> str:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = MobileBertForMaskedLM(config=_A )
model.to(_A )
model.eval()
_UpperCAmelCase : int = model(_A , attention_mask=_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __snake_case ( self , _A , _A , _A , _A , _A , _A , _A ) -> int:
'''simple docstring'''
_UpperCAmelCase : Dict = MobileBertForNextSentencePrediction(config=_A )
model.to(_A )
model.eval()
_UpperCAmelCase : int = model(
_A , attention_mask=_A , token_type_ids=_A , labels=_A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def __snake_case ( self , _A , _A , _A , _A , _A , _A , _A ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase : List[str] = MobileBertForPreTraining(config=_A )
model.to(_A )
model.eval()
_UpperCAmelCase : int = model(
_A , attention_mask=_A , token_type_ids=_A , labels=_A , next_sentence_label=_A , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def __snake_case ( self , _A , _A , _A , _A , _A , _A , _A ) -> str:
'''simple docstring'''
_UpperCAmelCase : List[Any] = MobileBertForQuestionAnswering(config=_A )
model.to(_A )
model.eval()
_UpperCAmelCase : Any = model(
_A , attention_mask=_A , token_type_ids=_A , start_positions=_A , end_positions=_A , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __snake_case ( self , _A , _A , _A , _A , _A , _A , _A ) -> int:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = self.num_labels
_UpperCAmelCase : Any = MobileBertForSequenceClassification(_A )
model.to(_A )
model.eval()
_UpperCAmelCase : str = model(_A , attention_mask=_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __snake_case ( self , _A , _A , _A , _A , _A , _A , _A ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = self.num_labels
_UpperCAmelCase : Union[str, Any] = MobileBertForTokenClassification(config=_A )
model.to(_A )
model.eval()
_UpperCAmelCase : int = model(_A , attention_mask=_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __snake_case ( self , _A , _A , _A , _A , _A , _A , _A ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Dict = self.num_choices
_UpperCAmelCase : Optional[int] = MobileBertForMultipleChoice(config=_A )
model.to(_A )
model.eval()
_UpperCAmelCase : List[Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCAmelCase : Optional[int] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCAmelCase : Tuple = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCAmelCase : int = model(
_A , attention_mask=_A , token_type_ids=_A , labels=_A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __snake_case ( self ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) : str = config_and_inputs
_UpperCAmelCase : Optional[int] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( __a , __a , unittest.TestCase):
__a : Any = (
(
MobileBertModel,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
)
if is_torch_available()
else ()
)
__a : List[str] = (
{
"""feature-extraction""": MobileBertModel,
"""fill-mask""": MobileBertForMaskedLM,
"""question-answering""": MobileBertForQuestionAnswering,
"""text-classification""": MobileBertForSequenceClassification,
"""token-classification""": MobileBertForTokenClassification,
"""zero-shot""": MobileBertForSequenceClassification,
}
if is_torch_available()
else {}
)
__a : List[Any] = True
def __snake_case ( self , _A , _A , _A=False ) -> Any:
'''simple docstring'''
_UpperCAmelCase : List[str] = super()._prepare_for_class(_A , _A , return_labels=_A )
if return_labels:
if model_class in get_values(_A ):
_UpperCAmelCase : Tuple = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=_A )
_UpperCAmelCase : Union[str, Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_A )
return inputs_dict
def __snake_case ( self ) -> str:
'''simple docstring'''
_UpperCAmelCase : Any = MobileBertModelTester(self )
_UpperCAmelCase : Dict = ConfigTester(self , config_class=_A , hidden_size=37 )
def __snake_case ( self ) -> List[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def __snake_case ( self ) -> Any:
'''simple docstring'''
_UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*_A )
def __snake_case ( self ) -> Any:
'''simple docstring'''
_UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*_A )
def __snake_case ( self ) -> str:
'''simple docstring'''
_UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*_A )
def __snake_case ( self ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*_A )
def __snake_case ( self ) -> Dict:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*_A )
def __snake_case ( self ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*_A )
def __snake_case ( self ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*_A )
def __snake_case ( self ) -> Dict:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*_A )
def UpperCamelCase ( _lowerCAmelCase : Dict ) -> Optional[int]:
return torch.tensor(
_lowerCAmelCase, dtype=torch.long, device=_lowerCAmelCase, )
lowerCamelCase__ : int = 1E-3
@require_torch
@require_sentencepiece
@require_tokenizers
class _UpperCAmelCase ( unittest.TestCase):
@slow
def __snake_case ( self ) -> Dict:
'''simple docstring'''
_UpperCAmelCase : List[str] = MobileBertModel.from_pretrained("""google/mobilebert-uncased""" ).to(_A )
_UpperCAmelCase : int = _long_tensor([[1_01, 71_10, 10_05, 10_56, 20_23, 1_13_33, 1_74_13, 10_29, 1_02]] )
with torch.no_grad():
_UpperCAmelCase : str = model(_A )[0]
_UpperCAmelCase : str = torch.Size((1, 9, 5_12) )
self.assertEqual(output.shape , _A )
_UpperCAmelCase : Any = torch.tensor(
[
[
[-2.4_73_65_26e07, 8.2_69_16_56e04, 1.6_52_18_38e05],
[-5.7_54_17_04e-01, 3.9_05_60_22e00, 4.4_01_15_07e00],
[2.6_04_73_59e00, 1.5_67_76_52e00, -1.7_32_41_88e-01],
]
] , device=_A , )
# MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a
# ~1 difference, it's therefore not a good idea to measure using addition.
# Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the
# result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE
_UpperCAmelCase : Union[str, Any] = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE )
_UpperCAmelCase : str = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE )
self.assertTrue(lower_bound and upper_bound )
| 238
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase__ : int = logging.get_logger(__name__)
lowerCamelCase__ : Union[str, Any] = {
'''facebook/data2vec-vision-base-ft''': (
'''https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json'''
),
}
class _UpperCAmelCase ( __a):
__a : int = """data2vec-vision"""
def __init__( self , _A=7_68 , _A=12 , _A=12 , _A=30_72 , _A="gelu" , _A=0.0 , _A=0.0 , _A=0.02 , _A=1e-12 , _A=2_24 , _A=16 , _A=3 , _A=False , _A=False , _A=False , _A=False , _A=0.1 , _A=0.1 , _A=True , _A=[3, 5, 7, 11] , _A=[1, 2, 3, 6] , _A=True , _A=0.4 , _A=2_56 , _A=1 , _A=False , _A=2_55 , **_A , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(**_A )
_UpperCAmelCase : Union[str, Any] = hidden_size
_UpperCAmelCase : Union[str, Any] = num_hidden_layers
_UpperCAmelCase : Tuple = num_attention_heads
_UpperCAmelCase : Dict = intermediate_size
_UpperCAmelCase : Optional[int] = hidden_act
_UpperCAmelCase : str = hidden_dropout_prob
_UpperCAmelCase : List[str] = attention_probs_dropout_prob
_UpperCAmelCase : str = initializer_range
_UpperCAmelCase : Any = layer_norm_eps
_UpperCAmelCase : str = image_size
_UpperCAmelCase : List[str] = patch_size
_UpperCAmelCase : str = num_channels
_UpperCAmelCase : Tuple = use_mask_token
_UpperCAmelCase : Union[str, Any] = use_absolute_position_embeddings
_UpperCAmelCase : Dict = use_relative_position_bias
_UpperCAmelCase : Tuple = use_shared_relative_position_bias
_UpperCAmelCase : List[Any] = layer_scale_init_value
_UpperCAmelCase : Tuple = drop_path_rate
_UpperCAmelCase : Optional[Any] = use_mean_pooling
# decode head attributes (semantic segmentation)
_UpperCAmelCase : Union[str, Any] = out_indices
_UpperCAmelCase : List[Any] = pool_scales
# auxiliary head attributes (semantic segmentation)
_UpperCAmelCase : Optional[int] = use_auxiliary_head
_UpperCAmelCase : List[Any] = auxiliary_loss_weight
_UpperCAmelCase : List[Any] = auxiliary_channels
_UpperCAmelCase : Tuple = auxiliary_num_convs
_UpperCAmelCase : Union[str, Any] = auxiliary_concat_input
_UpperCAmelCase : Optional[int] = semantic_loss_ignore_index
class _UpperCAmelCase ( __a):
__a : Tuple = version.parse("""1.11""")
@property
def __snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def __snake_case ( self ) -> float:
'''simple docstring'''
return 1e-4
| 238
| 1
|
'''simple docstring'''
import math
import qiskit
def a_ ( lowerCAmelCase_ : int = 1, lowerCAmelCase_ : int = 1, lowerCAmelCase_ : int = 1 ):
if (
isinstance(UpperCAmelCase__, UpperCAmelCase__ )
or isinstance(UpperCAmelCase__, UpperCAmelCase__ )
or isinstance(UpperCAmelCase__, UpperCAmelCase__ )
):
raise TypeError('inputs must be integers.' )
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError('inputs must be positive.' )
if (
(math.floor(UpperCAmelCase__ ) != input_a)
or (math.floor(UpperCAmelCase__ ) != input_a)
or (math.floor(UpperCAmelCase__ ) != carry_in)
):
raise ValueError('inputs must be exact integers.' )
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError('inputs must be less or equal to 2.' )
# build registers
__lowerCAmelCase = qiskit.QuantumRegister(4, 'qr' )
__lowerCAmelCase = qiskit.ClassicalRegister(2, 'cr' )
# list the entries
__lowerCAmelCase = [input_a, input_a, carry_in]
__lowerCAmelCase = qiskit.QuantumCircuit(UpperCAmelCase__, UpperCAmelCase__ )
for i in range(0, 3 ):
if entry[i] == 2:
quantum_circuit.h(UpperCAmelCase__ ) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(UpperCAmelCase__ ) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(UpperCAmelCase__ ) # for 0 entries
# build the circuit
quantum_circuit.ccx(0, 1, 3 ) # ccx = toffoli gate
quantum_circuit.cx(0, 1 )
quantum_circuit.ccx(1, 2, 3 )
quantum_circuit.cx(1, 2 )
quantum_circuit.cx(0, 1 )
quantum_circuit.measure([2, 3], UpperCAmelCase__ ) # measure the last two qbits
__lowerCAmelCase = qiskit.Aer.get_backend('aer_simulator' )
__lowerCAmelCase = qiskit.execute(UpperCAmelCase__, UpperCAmelCase__, shots=1000 )
return job.result().get_counts(UpperCAmelCase__ )
if __name__ == "__main__":
print(F"""Total sum count for state is: {quantum_full_adder(1, 1, 1)}""")
| 708
|
def a_ ( lowerCAmelCase_ : int, lowerCAmelCase_ : int ):
if not isinstance(lowerCAmelCase_, lowerCAmelCase_ ):
raise ValueError('iterations must be defined as integers' )
if not isinstance(lowerCAmelCase_, lowerCAmelCase_ ) or not number >= 1:
raise ValueError(
'starting number must be\n and integer and be more than 0' )
if not iterations >= 1:
raise ValueError('Iterations must be done more than 0 times to play FizzBuzz' )
__lowerCAmelCase = ''
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(lowerCAmelCase_ )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 421
| 0
|
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : Any = {}
UpperCAmelCase_ : Any = tokenizer(example['''content'''] , truncation=_lowercase )['''input_ids''']
UpperCAmelCase_ : Optional[int] = len(example['''content'''] ) / len(output['''input_ids'''] )
return output
__a = HfArgumentParser(PretokenizationArguments)
__a = parser.parse_args()
if args.num_workers is None:
__a = multiprocessing.cpu_count()
__a = AutoTokenizer.from_pretrained(args.tokenizer_dir)
__a = time.time()
__a = load_dataset(args.dataset_name, split='train')
print(F"""Dataset loaded in {time.time()-t_start:.2f}s""")
__a = time.time()
__a = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
'repo_name',
'path',
'copies',
'size',
'content',
'license',
'hash',
'line_mean',
'line_max',
'alpha_frac',
'autogenerated',
],
)
print(F"""Dataset tokenized in {time.time()-t_start:.2f}s""")
__a = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(F"""Data pushed to the hub in {time.time()-t_start:.2f}s""")
| 30
|
"""simple docstring"""
from heapq import heappop, heappush
import numpy as np
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , ) -> tuple[float | int, list[tuple[int, int]]]:
lowercase__ , lowercase__ : Optional[Any] = grid.shape
lowercase__ : List[str] = [-1, 1, 0, 0]
lowercase__ : List[str] = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
lowercase__ , lowercase__ : List[str] = [(0, source)], set()
lowercase__ : List[str] = np.full((rows, cols) , np.inf )
lowercase__ : Optional[int] = 0
lowercase__ : str = np.empty((rows, cols) , dtype=__lowerCamelCase )
lowercase__ : Optional[int] = None
while queue:
((lowercase__) , (lowercase__)) : Tuple = heappop(__lowerCamelCase )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
lowercase__ : Union[str, Any] = []
while (x, y) != source:
path.append((x, y) )
lowercase__ , lowercase__ : Union[str, Any] = predecessors[x, y]
path.append(__lowerCamelCase ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(__lowerCamelCase ) ):
lowercase__ , lowercase__ : Any = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
lowercase__ : Tuple = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(__lowerCamelCase , (dist + 1, (nx, ny)) )
lowercase__ : Optional[Any] = dist + 1
lowercase__ : Optional[Any] = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 560
| 0
|
import argparse
import torch
from transformers import (
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForAudioFrameClassification,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
logging,
)
logging.set_verbosity_info()
__magic_name__ : Optional[Any] = logging.get_logger(__name__)
def lowercase__ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase) -> List[Any]:
"""simple docstring"""
UpperCamelCase = WavaVecaForSequenceClassification.from_pretrained(_UpperCamelCase , config=_UpperCamelCase)
UpperCamelCase = downstream_dict['projector.weight']
UpperCamelCase = downstream_dict['projector.bias']
UpperCamelCase = downstream_dict['model.post_net.linear.weight']
UpperCamelCase = downstream_dict['model.post_net.linear.bias']
return model
def lowercase__ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase) -> List[Any]:
"""simple docstring"""
UpperCamelCase = WavaVecaForAudioFrameClassification.from_pretrained(_UpperCamelCase , config=_UpperCamelCase)
UpperCamelCase = downstream_dict['model.linear.weight']
UpperCamelCase = downstream_dict['model.linear.bias']
return model
def lowercase__ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase) -> str:
"""simple docstring"""
UpperCamelCase = WavaVecaForXVector.from_pretrained(_UpperCamelCase , config=_UpperCamelCase)
UpperCamelCase = downstream_dict['connector.weight']
UpperCamelCase = downstream_dict['connector.bias']
for i, kernel_size in enumerate(hf_config.tdnn_kernel):
UpperCamelCase = downstream_dict[
F'model.framelevel_feature_extractor.module.{i}.kernel.weight'
]
UpperCamelCase = downstream_dict[F'model.framelevel_feature_extractor.module.{i}.kernel.bias']
UpperCamelCase = downstream_dict['model.utterancelevel_feature_extractor.linear1.weight']
UpperCamelCase = downstream_dict['model.utterancelevel_feature_extractor.linear1.bias']
UpperCamelCase = downstream_dict['model.utterancelevel_feature_extractor.linear2.weight']
UpperCamelCase = downstream_dict['model.utterancelevel_feature_extractor.linear2.bias']
UpperCamelCase = downstream_dict['objective.W']
return model
@torch.no_grad()
def lowercase__ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = torch.load(_UpperCamelCase , map_location='cpu')
UpperCamelCase = checkpoint['Downstream']
UpperCamelCase = WavaVecaConfig.from_pretrained(_UpperCamelCase)
UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained(
_UpperCamelCase , return_attention_mask=_UpperCamelCase , do_normalize=_UpperCamelCase)
UpperCamelCase = hf_config.architectures[0]
if arch.endswith('ForSequenceClassification'):
UpperCamelCase = convert_classification(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase)
elif arch.endswith('ForAudioFrameClassification'):
UpperCamelCase = convert_diarization(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase)
elif arch.endswith('ForXVector'):
UpperCamelCase = convert_xvector(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase)
else:
raise NotImplementedError(F'S3PRL weights conversion is not supported for {arch}')
if hf_config.use_weighted_layer_sum:
UpperCamelCase = checkpoint['Featurizer']['weights']
hf_feature_extractor.save_pretrained(_UpperCamelCase)
hf_model.save_pretrained(_UpperCamelCase)
if __name__ == "__main__":
__magic_name__ : int = argparse.ArgumentParser()
parser.add_argument(
'''--base_model_name''', default=None, type=str, help='''Name of the huggingface pretrained base model.'''
)
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to the huggingface classifier config.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to the s3prl checkpoint.''')
parser.add_argument('''--model_dump_path''', default=None, type=str, help='''Path to the final converted model.''')
__magic_name__ : Tuple = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 703
|
import unittest
from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__magic_name__ : Union[str, Any] = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
class A__ ( __snake_case , unittest.TestCase ):
'''simple docstring'''
snake_case__ = XLMProphetNetTokenizer
snake_case__ = False
snake_case__ = True
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
UpperCamelCase = XLMProphetNetTokenizer(_SCREAMING_SNAKE_CASE , keep_accents=_SCREAMING_SNAKE_CASE )
tokenizer.save_pretrained(self.tmpdirname )
def _SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
UpperCamelCase = '[PAD]'
UpperCamelCase = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '[PAD]' )
self.assertEqual(vocab_keys[1] , '[CLS]' )
self.assertEqual(vocab_keys[-1] , 'j' )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , 1012 )
def _SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1012 )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = XLMProphetNetTokenizer(_SCREAMING_SNAKE_CASE , keep_accents=_SCREAMING_SNAKE_CASE )
UpperCamelCase = tokenizer.tokenize('This is a test' )
self.assertListEqual(_SCREAMING_SNAKE_CASE , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
UpperCamelCase = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
_SCREAMING_SNAKE_CASE , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
UpperCamelCase = tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE )
self.assertListEqual(
_SCREAMING_SNAKE_CASE , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, -9, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, -9, 4]
] , )
UpperCamelCase = tokenizer.convert_ids_to_tokens(_SCREAMING_SNAKE_CASE )
self.assertListEqual(
_SCREAMING_SNAKE_CASE , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'[UNK]',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'[UNK]',
'.',
] , )
@cached_property
def _SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
return XLMProphetNetTokenizer.from_pretrained('microsoft/xprophetnet-large-wiki100-cased' )
@slow
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = 'Hello World!'
UpperCamelCase = [3_5389, 6672, 49, 2]
self.assertListEqual(_SCREAMING_SNAKE_CASE , self.big_tokenizer.encode(_SCREAMING_SNAKE_CASE ) )
@slow
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = {'input_ids': [[1_1073, 8_2783, 18, 26, 8_2783, 549, 5_1540, 248, 1_7209, 1301, 217, 20, 21_5186, 1325, 147, 1_7209, 1301, 217, 20, 5_6370, 53, 12_2020, 20, 1_6477, 27, 8_7355, 4548, 20, 4728, 7_8392, 17, 15_9969, 18, 26, 2_4491, 629, 15, 538, 2_2704, 5439, 15, 2788, 2_4491, 9885, 15, 4_3534, 605, 15, 814, 1_8403, 3_3200, 29, 15, 4_3534, 2_4458, 1_2410, 111, 2_4966, 8_3669, 9637, 14_4068, 26, 850, 2_2346, 27, 147, 2_4966, 8_3669, 8_3490, 26, 3_9113, 735, 27, 689, 656, 2800, 1339, 4600, 53, 12_2020, 11_5785, 34, 816, 1339, 4_6887, 18, 147, 5_3905, 1951, 4_2238, 4_1170, 1_7732, 834, 436, 15, 2_7523, 9_8733, 217, 147, 5542, 4981, 930, 1_7347, 16, 2], [2_0091, 629, 94, 8_2786, 58, 490, 20, 1528, 84, 5_3905, 344, 8_0592, 11_0128, 1_8822, 5267, 1306, 62, 15_2537, 308, 7997, 401, 12_4427, 549, 3_5442, 225, 109, 1_5055, 2_5748, 147, 7119, 4_3712, 34, 767, 13_5366, 18, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [592, 6_3784, 11_9466, 17, 14_7808, 8_8214, 18, 656, 81, 32, 3296, 1_0280, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_SCREAMING_SNAKE_CASE , model_name='microsoft/xprophetnet-large-wiki100-cased' , revision='1acad1643ddd54a44df6a1b797ada8373685d90e' , )
| 410
| 0
|
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
_snake_case : str = logging.getLogger(__name__)
@dataclass
class __SCREAMING_SNAKE_CASE :
SCREAMING_SNAKE_CASE__ =field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
SCREAMING_SNAKE_CASE__ =field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
SCREAMING_SNAKE_CASE__ =field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
SCREAMING_SNAKE_CASE__ =field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
SCREAMING_SNAKE_CASE__ =field(default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Whether tp freeze the encoder."""} )
SCREAMING_SNAKE_CASE__ =field(default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Whether to freeze the embeddings."""} )
@dataclass
class __SCREAMING_SNAKE_CASE :
SCREAMING_SNAKE_CASE__ =field(
metadata={"""help""": """The input data dir. Should contain the .tsv files (or other data files) for the task."""} )
SCREAMING_SNAKE_CASE__ =field(
default="""summarization""" , metadata={"""help""": """Task name, summarization (or summarization_{dataset} for pegasus) or translation"""} , )
SCREAMING_SNAKE_CASE__ =field(
default=10_24 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
SCREAMING_SNAKE_CASE__ =field(
default=1_28 , metadata={
"""help""": (
"""The maximum total sequence length for target text after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
SCREAMING_SNAKE_CASE__ =field(
default=1_42 , metadata={
"""help""": (
"""The maximum total sequence length for validation target text after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded. """
"""This argument is also used to override the ``max_length`` param of ``model.generate``, which is used """
"""during ``evaluate`` and ``predict``."""
)
} , )
SCREAMING_SNAKE_CASE__ =field(
default=1_42 , metadata={
"""help""": (
"""The maximum total sequence length for test target text after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
SCREAMING_SNAKE_CASE__ =field(default=-1 , metadata={"""help""": """# training examples. -1 means use all."""} )
SCREAMING_SNAKE_CASE__ =field(default=-1 , metadata={"""help""": """# validation examples. -1 means use all."""} )
SCREAMING_SNAKE_CASE__ =field(default=-1 , metadata={"""help""": """# test examples. -1 means use all."""} )
SCREAMING_SNAKE_CASE__ =field(default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Source language id for translation."""} )
SCREAMING_SNAKE_CASE__ =field(default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Target language id for translation."""} )
SCREAMING_SNAKE_CASE__ =field(default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """# num_beams to use for evaluation."""} )
SCREAMING_SNAKE_CASE__ =field(
default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined."""} , )
def _A ( __snake_case :Optional[Any] , __snake_case :Union[str, Any] , __snake_case :Optional[Any] ) -> str:
"""simple docstring"""
logger.info(f'''***** {split} metrics *****''' )
for key in sorted(metrics.keys() ):
logger.info(f''' {key} = {metrics[key]}''' )
save_json(__snake_case , os.path.join(__snake_case , f'''{split}_results.json''' ) )
def _A ( ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = parser.parse_args_into_dataclasses()
check_output_dir(__snake_case )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info("Training/evaluation parameters %s" , __snake_case )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
__SCREAMING_SNAKE_CASE = ("encoder_layerdrop", "decoder_layerdrop", "dropout", "attention_dropout")
for p in extra_model_params:
if getattr(__snake_case , __snake_case , __snake_case ):
assert hasattr(__snake_case , __snake_case ), f'''({config.__class__.__name__}) doesn\'t have a `{p}` attribute'''
setattr(__snake_case , __snake_case , getattr(__snake_case , __snake_case ) )
__SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
__SCREAMING_SNAKE_CASE = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf=".ckpt" in model_args.model_name_or_path , config=__snake_case , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(__snake_case , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
__SCREAMING_SNAKE_CASE = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(__snake_case , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(__snake_case , __snake_case ):
__SCREAMING_SNAKE_CASE = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
__SCREAMING_SNAKE_CASE = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(__snake_case )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
__SCREAMING_SNAKE_CASE = SeqaSeqDataset
# Get datasets
__SCREAMING_SNAKE_CASE = (
dataset_class(
__snake_case , type_path="train" , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_train
else None
)
__SCREAMING_SNAKE_CASE = (
dataset_class(
__snake_case , type_path="val" , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
__SCREAMING_SNAKE_CASE = (
dataset_class(
__snake_case , type_path="test" , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_predict
else None
)
# Initialize our Trainer
__SCREAMING_SNAKE_CASE = (
build_compute_metrics_fn(data_args.task , __snake_case ) if training_args.predict_with_generate else None
)
__SCREAMING_SNAKE_CASE = SeqaSeqTrainer(
model=__snake_case , args=__snake_case , data_args=__snake_case , train_dataset=__snake_case , eval_dataset=__snake_case , data_collator=SeqaSeqDataCollator(
__snake_case , __snake_case , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=__snake_case , tokenizer=__snake_case , )
__SCREAMING_SNAKE_CASE = {}
# Training
if training_args.do_train:
logger.info("*** Train ***" )
__SCREAMING_SNAKE_CASE = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
__SCREAMING_SNAKE_CASE = train_result.metrics
__SCREAMING_SNAKE_CASE = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics("train" , __snake_case , training_args.output_dir )
all_metrics.update(__snake_case )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , "trainer_state.json" ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
__SCREAMING_SNAKE_CASE = trainer.evaluate(metric_key_prefix="val" )
__SCREAMING_SNAKE_CASE = data_args.n_val
__SCREAMING_SNAKE_CASE = round(metrics["val_loss"] , 4 )
if trainer.is_world_process_zero():
handle_metrics("val" , __snake_case , training_args.output_dir )
all_metrics.update(__snake_case )
if training_args.do_predict:
logger.info("*** Predict ***" )
__SCREAMING_SNAKE_CASE = trainer.predict(test_dataset=__snake_case , metric_key_prefix="test" )
__SCREAMING_SNAKE_CASE = test_output.metrics
__SCREAMING_SNAKE_CASE = data_args.n_test
if trainer.is_world_process_zero():
__SCREAMING_SNAKE_CASE = round(metrics["test_loss"] , 4 )
handle_metrics("test" , __snake_case , training_args.output_dir )
all_metrics.update(__snake_case )
if training_args.predict_with_generate:
__SCREAMING_SNAKE_CASE = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=__snake_case , clean_up_tokenization_spaces=__snake_case )
__SCREAMING_SNAKE_CASE = lmap(str.strip , __snake_case )
write_txt_file(__snake_case , os.path.join(training_args.output_dir , "test_generations.txt" ) )
if trainer.is_world_process_zero():
save_json(__snake_case , os.path.join(training_args.output_dir , "all_results.json" ) )
return all_metrics
def _A ( __snake_case :Optional[Any] ) -> List[Any]:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 693
|
def _A ( __snake_case :int = 400_0000 ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(__snake_case )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = b, a + b
return sum(__snake_case )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 693
| 1
|
def lowerCAmelCase__ ( _UpperCamelCase : int = 5_0 ) -> int:
"""simple docstring"""
snake_case = [[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length] )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 702
|
"""simple docstring"""
import numpy as np
from PIL import Image
def lowerCAmelCase__ ( _UpperCamelCase : np.ndarray , _UpperCamelCase : int , _UpperCamelCase : int ) -> np.ndarray:
"""simple docstring"""
snake_case = np.array(_UpperCamelCase )
if arr.shape[0] != arr.shape[1]:
raise ValueError('The input array is not a square matrix' )
snake_case = 0
snake_case = 0
snake_case = 0
snake_case = 0
# compute the shape of the output matrix
snake_case = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape maxpool_shape
snake_case = np.zeros((maxpool_shape, maxpool_shape) )
while i < arr.shape[0]:
if i + size > arr.shape[0]:
# if the end of the matrix is reached, break
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the maximum of the pooling matrix
snake_case = np.max(arr[i : i + size, j : j + size] )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
snake_case = 0
snake_case = 0
return updated_arr
def lowerCAmelCase__ ( _UpperCamelCase : np.ndarray , _UpperCamelCase : int , _UpperCamelCase : int ) -> np.ndarray:
"""simple docstring"""
snake_case = np.array(_UpperCamelCase )
if arr.shape[0] != arr.shape[1]:
raise ValueError('The input array is not a square matrix' )
snake_case = 0
snake_case = 0
snake_case = 0
snake_case = 0
# compute the shape of the output matrix
snake_case = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape avgpool_shape
snake_case = np.zeros((avgpool_shape, avgpool_shape) )
while i < arr.shape[0]:
# if the end of the matrix is reached, break
if i + size > arr.shape[0]:
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the average of the pooling matrix
snake_case = int(np.average(arr[i : i + size, j : j + size] ) )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
snake_case = 0
snake_case = 0
return updated_arr
# Main Function
if __name__ == "__main__":
from doctest import testmod
testmod(name="avgpooling", verbose=True)
# Loading the image
SCREAMING_SNAKE_CASE__ = Image.open("path_to_image")
# Converting the image to numpy array and maxpooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show()
# Converting the image to numpy array and averagepooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
| 104
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {
'sayakpaul/vit-msn-base': 'https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json',
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class __A ( SCREAMING_SNAKE_CASE_ ):
UpperCAmelCase__ = "vit_msn"
def __init__( self : Optional[int] , __snake_case : Optional[Any]=7_6_8 , __snake_case : Dict=1_2 , __snake_case : int=1_2 , __snake_case : Optional[int]=3_0_7_2 , __snake_case : Any="gelu" , __snake_case : str=0.0 , __snake_case : List[Any]=0.0 , __snake_case : str=0.02 , __snake_case : Optional[int]=1E-06 , __snake_case : List[Any]=2_2_4 , __snake_case : int=1_6 , __snake_case : List[Any]=3 , __snake_case : List[Any]=True , **__snake_case : Optional[int] , ) -> List[Any]:
super().__init__(**__snake_case )
__magic_name__: int = hidden_size
__magic_name__: int = num_hidden_layers
__magic_name__: Tuple = num_attention_heads
__magic_name__: List[str] = intermediate_size
__magic_name__: List[Any] = hidden_act
__magic_name__: Optional[int] = hidden_dropout_prob
__magic_name__: List[Any] = attention_probs_dropout_prob
__magic_name__: Optional[int] = initializer_range
__magic_name__: Tuple = layer_norm_eps
__magic_name__: Dict = image_size
__magic_name__: Union[str, Any] = patch_size
__magic_name__: Optional[Any] = num_channels
__magic_name__: str = qkv_bias
| 96
|
"""simple docstring"""
def a ( __UpperCAmelCase : list[int] ) -> float:
if not nums: # Makes sure that the list is not empty
raise ValueError("""List is empty""" )
__magic_name__: Dict = sum(__UpperCAmelCase ) / len(__UpperCAmelCase ) # Calculate the average
return sum(abs(x - average ) for x in nums ) / len(__UpperCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 96
| 1
|
"""simple docstring"""
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
snake_case__ : Optional[Any] = logging.get_logger(__name__)
snake_case__ : Any = {'''vocab_file''': '''spiece.model'''}
snake_case__ : Union[str, Any] = {
'''vocab_file''': {
'''AI-Sweden/gpt-sw3-126m''': '''https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model''',
'''AI-Sweden/gpt-sw3-350m''': '''https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model''',
'''AI-Sweden/gpt-sw3-1.6b''': '''https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model''',
'''AI-Sweden/gpt-sw3-6.7b''': '''https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model''',
'''AI-Sweden/gpt-sw3-20b''': '''https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model''',
}
}
snake_case__ : Union[str, Any] = {
'''AI-Sweden/gpt-sw3-126m''': 2_048,
'''AI-Sweden/gpt-sw3-350m''': 2_048,
'''AI-Sweden/gpt-sw3-1.6b''': 2_048,
'''AI-Sweden/gpt-sw3-6.7b''': 2_048,
'''AI-Sweden/gpt-sw3-20b''': 2_048,
}
class snake_case_( a__ ):
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = ['''input_ids''', '''attention_mask''']
def __init__( self : List[Any] , UpperCamelCase_ : int , UpperCamelCase_ : Optional[int]=False , UpperCamelCase_ : Tuple=False , UpperCamelCase_ : List[str]=False , UpperCamelCase_ : Any=None , UpperCamelCase_ : Dict=None , UpperCamelCase_ : Any=None , UpperCamelCase_ : List[str]=None , UpperCamelCase_ : Optional[Dict[str, Any]] = None , **UpperCamelCase_ : str , ):
lowerCAmelCase : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
lowerCAmelCase : Any = kwargs.get('''name_or_path''' )
if name_or_path is None:
logger.warning(
'''name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,'''
''' you are testing the model, this can safely be ignored''' )
lowerCAmelCase : Optional[Any] = '''None'''
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
lowerCAmelCase : Dict = '''<|endoftext|>''' if eos_token is None else eos_token
lowerCAmelCase : int = '''<unk>''' if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
lowerCAmelCase : Optional[Any] = unk_token if pad_token is None else pad_token
lowerCAmelCase : Union[str, Any] = eos_token if bos_token is None else bos_token
else:
lowerCAmelCase : int = '''<pad>''' if pad_token is None else pad_token
lowerCAmelCase : Tuple = '''<s>''' if bos_token is None else bos_token
super().__init__(
do_lower_case=UpperCamelCase_ , remove_space=UpperCamelCase_ , keep_accents=UpperCamelCase_ , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase_ , )
lowerCAmelCase : str = do_lower_case
lowerCAmelCase : Union[str, Any] = remove_space
lowerCAmelCase : Dict = keep_accents
lowerCAmelCase : Optional[int] = vocab_file
lowerCAmelCase : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCamelCase_ )
# Used for whitespace normalization in input texts
# fmt : off
lowerCAmelCase : int = {''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', '''''', ''''''}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
lowerCAmelCase : List[Any] = re.compile(
F'''[{"".join(map(UpperCamelCase_ , list(range(0 , 9 ) ) + list(range(1_1 , 3_2 ) ) + list(range(1_2_7 , 1_6_0 ) ) + [1_6_0, 1_7_3, 8_2_0_3] ) )}]''' )
def __getstate__( self : Dict ):
lowerCAmelCase : Tuple = self.__dict__.copy()
lowerCAmelCase : Optional[Any] = None
return state
def __setstate__( self : Union[str, Any] , UpperCamelCase_ : str ):
lowerCAmelCase : Dict = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowerCAmelCase : Union[str, Any] = {}
lowerCAmelCase : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def lowerCamelCase__ ( self : Any ):
return len(self.sp_model )
def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase_ : str ):
lowerCAmelCase : Any = self.non_printing_characters_re.sub('''''' , UpperCamelCase_ )
# Normalize whitespaces
lowerCAmelCase : str = ''''''.join([char if char not in self.whitespaces else ''' ''' for char in text] )
# NFC Unicode normalization
lowerCAmelCase : Optional[Any] = unicodedata.normalize('''NFC''' , UpperCamelCase_ )
return text
def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase_ : str , **UpperCamelCase_ : int ):
lowerCAmelCase : int = self.preprocess_text(UpperCamelCase_ )
return self.sp_model.encode(UpperCamelCase_ , out_type=UpperCamelCase_ )
def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase_ : str ):
return self.sp_model.PieceToId(UpperCamelCase_ )
def lowerCamelCase__ ( self : str , UpperCamelCase_ : int ):
return self.sp_model.IdToPiece(UpperCamelCase_ )
@staticmethod
def lowerCamelCase__ ( UpperCamelCase_ : str ):
return out_string
def lowerCamelCase__ ( self : Dict , UpperCamelCase_ : List[str] ):
lowerCAmelCase : Tuple = []
lowerCAmelCase : Optional[int] = ''''''
lowerCAmelCase : Union[str, Any] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(UpperCamelCase_ ) + token
lowerCAmelCase : int = True
lowerCAmelCase : Dict = []
else:
current_sub_tokens.append(UpperCamelCase_ )
lowerCAmelCase : Optional[Any] = False
out_string += self.sp_model.decode(UpperCamelCase_ )
return out_string
def lowerCamelCase__ ( self : List[str] ):
lowerCAmelCase : Dict = {self.convert_ids_to_tokens(UpperCamelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCamelCase__ ( self : str , UpperCamelCase_ : str , UpperCamelCase_ : Optional[str] = None ):
if not os.path.isdir(UpperCamelCase_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCAmelCase : int = os.path.join(
UpperCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCamelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase_ , '''wb''' ) as fi:
lowerCAmelCase : Any = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase_ )
return (out_vocab_file,)
def lowerCamelCase__ ( self : Any , UpperCamelCase_ : Union[str, List[str]] , UpperCamelCase_ : Union[str, bool] = False ):
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
lowerCAmelCase : Any = self.preprocess_text(UpperCamelCase_ )
lowerCAmelCase : Optional[int] = self.sp_model.encode(UpperCamelCase_ )
else:
lowerCAmelCase : Any = [self.preprocess_text(UpperCamelCase_ ) for t in text]
lowerCAmelCase : Union[str, Any] = self.sp_model.encode(UpperCamelCase_ )
if return_tensors is True or return_tensors == "pt":
lowerCAmelCase : List[Any] = torch.tensor(UpperCamelCase_ )
return token_ids
def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase_ : Union[int, List[int]] ):
return self.sp_model.decode(UpperCamelCase_ )
def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase_ : "Conversation" ):
lowerCAmelCase : Dict = [F'''User: {text}''' if is_user else F'''Bot: {text}''' for is_user, text in conversation.iter_texts()]
lowerCAmelCase : List[str] = (
F'''{self.eos_token}{self.bos_token}''' + F'''{self.bos_token}'''.join(UpperCamelCase_ ) + F'''{self.bos_token}Bot:'''
)
return self.encode(text=UpperCamelCase_ )
| 637
|
"""simple docstring"""
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotConfig, is_flax_available
from transformers.testing_utils import jax_device, require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
snake_case__ : int = '''platform'''
import jax
import jax.numpy as jnp
from transformers import BlenderbotTokenizer
from transformers.models.blenderbot.modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
shift_tokens_right,
)
def _snake_case ( _snake_case : str , _snake_case : Any , _snake_case : str=None , _snake_case : str=None , _snake_case : Dict=None , _snake_case : Tuple=None , _snake_case : str=None , _snake_case : Any=None , ):
if attention_mask is None:
lowerCAmelCase : List[str] = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
lowerCAmelCase : Optional[int] = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
lowerCAmelCase : Any = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
lowerCAmelCase : int = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
lowerCAmelCase : List[str] = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class snake_case_:
def __init__( self : int , UpperCamelCase_ : Tuple , UpperCamelCase_ : int=1_3 , UpperCamelCase_ : Union[str, Any]=7 , UpperCamelCase_ : Union[str, Any]=True , UpperCamelCase_ : List[Any]=False , UpperCamelCase_ : Dict=9_9 , UpperCamelCase_ : Optional[int]=1_6 , UpperCamelCase_ : str=2 , UpperCamelCase_ : List[str]=4 , UpperCamelCase_ : List[Any]=4 , UpperCamelCase_ : int="gelu" , UpperCamelCase_ : Optional[int]=0.1 , UpperCamelCase_ : Any=0.1 , UpperCamelCase_ : str=3_2 , UpperCamelCase_ : str=2 , UpperCamelCase_ : Tuple=1 , UpperCamelCase_ : List[Any]=0 , UpperCamelCase_ : Any=0.02 , ):
lowerCAmelCase : Tuple = parent
lowerCAmelCase : str = batch_size
lowerCAmelCase : List[Any] = seq_length
lowerCAmelCase : Optional[int] = is_training
lowerCAmelCase : int = use_labels
lowerCAmelCase : List[Any] = vocab_size
lowerCAmelCase : str = hidden_size
lowerCAmelCase : List[Any] = num_hidden_layers
lowerCAmelCase : Any = num_attention_heads
lowerCAmelCase : List[Any] = intermediate_size
lowerCAmelCase : Optional[int] = hidden_act
lowerCAmelCase : Dict = hidden_dropout_prob
lowerCAmelCase : Optional[int] = attention_probs_dropout_prob
lowerCAmelCase : List[Any] = max_position_embeddings
lowerCAmelCase : Union[str, Any] = eos_token_id
lowerCAmelCase : Dict = pad_token_id
lowerCAmelCase : Optional[Any] = bos_token_id
lowerCAmelCase : List[str] = initializer_range
def lowerCamelCase__ ( self : Dict ):
lowerCAmelCase : List[Any] = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
lowerCAmelCase : str = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
lowerCAmelCase : Tuple = shift_tokens_right(UpperCamelCase_ , 1 , 2 )
lowerCAmelCase : Union[str, Any] = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=UpperCamelCase_ , )
lowerCAmelCase : Union[str, Any] = prepare_blenderbot_inputs_dict(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
return config, inputs_dict
def lowerCamelCase__ ( self : str ):
lowerCAmelCase, lowerCAmelCase : Optional[int] = self.prepare_config_and_inputs()
return config, inputs_dict
def lowerCamelCase__ ( self : List[str] , UpperCamelCase_ : List[str] , UpperCamelCase_ : str , UpperCamelCase_ : Tuple ):
lowerCAmelCase : int = 2_0
lowerCAmelCase : Tuple = model_class_name(UpperCamelCase_ )
lowerCAmelCase : Optional[Any] = model.encode(inputs_dict['''input_ids'''] )
lowerCAmelCase, lowerCAmelCase : str = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
lowerCAmelCase : str = model.init_cache(decoder_input_ids.shape[0] , UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase : Union[str, Any] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='''i4''' )
lowerCAmelCase : Tuple = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowerCAmelCase : List[Any] = model.decode(
decoder_input_ids[:, :-1] , UpperCamelCase_ , decoder_attention_mask=UpperCamelCase_ , past_key_values=UpperCamelCase_ , decoder_position_ids=UpperCamelCase_ , )
lowerCAmelCase : Any = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
lowerCAmelCase : List[str] = model.decode(
decoder_input_ids[:, -1:] , UpperCamelCase_ , decoder_attention_mask=UpperCamelCase_ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=UpperCamelCase_ , )
lowerCAmelCase : Union[str, Any] = model.decode(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase : int = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''' )
def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase_ : Any , UpperCamelCase_ : Any , UpperCamelCase_ : List[str] ):
lowerCAmelCase : Optional[int] = 2_0
lowerCAmelCase : List[Any] = model_class_name(UpperCamelCase_ )
lowerCAmelCase : Union[str, Any] = model.encode(inputs_dict['''input_ids'''] )
lowerCAmelCase, lowerCAmelCase : Optional[int] = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
lowerCAmelCase : str = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
lowerCAmelCase : Union[str, Any] = model.init_cache(decoder_input_ids.shape[0] , UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase : str = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowerCAmelCase : Dict = model.decode(
decoder_input_ids[:, :-1] , UpperCamelCase_ , decoder_attention_mask=UpperCamelCase_ , past_key_values=UpperCamelCase_ , decoder_position_ids=UpperCamelCase_ , )
lowerCAmelCase : Any = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
lowerCAmelCase : Union[str, Any] = model.decode(
decoder_input_ids[:, -1:] , UpperCamelCase_ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=UpperCamelCase_ , decoder_position_ids=UpperCamelCase_ , )
lowerCAmelCase : Dict = model.decode(UpperCamelCase_ , UpperCamelCase_ , decoder_attention_mask=UpperCamelCase_ )
lowerCAmelCase : Any = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''' )
@require_flax
class snake_case_( unittest.TestCase ):
__UpperCamelCase = 99
def lowerCamelCase__ ( self : str ):
lowerCAmelCase : List[Any] = np.array(
[
[7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2],
[6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2],
[5, 9_7, 1_7, 3_9, 9_4, 4_0, 2],
[7_6, 8_3, 9_4, 2_5, 7_0, 7_8, 2],
[8_7, 5_9, 4_1, 3_5, 4_8, 6_6, 2],
[5_5, 1_3, 1_6, 5_8, 5, 2, 1], # note padding
[6_4, 2_7, 3_1, 5_1, 1_2, 7_5, 2],
[5_2, 6_4, 8_6, 1_7, 8_3, 3_9, 2],
[4_8, 6_1, 9, 2_4, 7_1, 8_2, 2],
[2_6, 1, 6_0, 4_8, 2_2, 1_3, 2],
[2_1, 5, 6_2, 2_8, 1_4, 7_6, 2],
[4_5, 9_8, 3_7, 8_6, 5_9, 4_8, 2],
[7_0, 7_0, 5_0, 9, 2_8, 0, 2],
] , dtype=np.intaa , )
lowerCAmelCase : List[Any] = input_ids.shape[0]
lowerCAmelCase : Optional[Any] = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=2_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=3_2 , decoder_ffn_dim=3_2 , max_position_embeddings=4_8 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def lowerCamelCase__ ( self : List[str] ):
lowerCAmelCase, lowerCAmelCase, lowerCAmelCase : Any = self._get_config_and_data()
lowerCAmelCase : Any = FlaxBlenderbotForConditionalGeneration(UpperCamelCase_ )
lowerCAmelCase : Optional[int] = lm_model(input_ids=UpperCamelCase_ )
lowerCAmelCase : Tuple = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , UpperCamelCase_ )
def lowerCamelCase__ ( self : Any ):
lowerCAmelCase : Any = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=1_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=4_8 , )
lowerCAmelCase : int = FlaxBlenderbotForConditionalGeneration(UpperCamelCase_ )
lowerCAmelCase : Optional[Any] = np.array([[7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2], [6_8, 3_4, 2_6, 5_8, 3_0, 2, 1]] , dtype=np.intaa )
lowerCAmelCase : List[str] = np.array([[8_2, 7_1, 8_2, 1_8, 2], [5_8, 6_8, 2, 1, 1]] , dtype=np.intaa )
lowerCAmelCase : List[Any] = lm_model(input_ids=UpperCamelCase_ , decoder_input_ids=UpperCamelCase_ )
lowerCAmelCase : str = (*summary.shape, config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , UpperCamelCase_ )
def lowerCamelCase__ ( self : int ):
lowerCAmelCase : Any = np.array([[7_1, 8_2, 1_8, 3_3, 2, 1, 1], [6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2]] , dtype=np.intaa )
lowerCAmelCase : Tuple = shift_tokens_right(UpperCamelCase_ , 1 , 2 )
lowerCAmelCase : Optional[int] = np.equal(UpperCamelCase_ , 1 ).astype(np.floataa ).sum()
lowerCAmelCase : str = np.equal(UpperCamelCase_ , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(UpperCamelCase_ , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class snake_case_( a__ , unittest.TestCase , a__ ):
__UpperCamelCase = True
__UpperCamelCase = (
(
FlaxBlenderbotModel,
FlaxBlenderbotForConditionalGeneration,
)
if is_flax_available()
else ()
)
__UpperCamelCase = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else ()
def lowerCamelCase__ ( self : Dict ):
lowerCAmelCase : Any = FlaxBlenderbotModelTester(self )
def lowerCamelCase__ ( self : Tuple ):
lowerCAmelCase, lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def lowerCamelCase__ ( self : List[str] ):
lowerCAmelCase, lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def lowerCamelCase__ ( self : Tuple ):
lowerCAmelCase, lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowerCAmelCase : Optional[int] = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase : Optional[Any] = model_class(UpperCamelCase_ )
@jax.jit
def encode_jitted(UpperCamelCase_ : List[str] , UpperCamelCase_ : Optional[Any]=None , **UpperCamelCase_ : List[str] ):
return model.encode(input_ids=UpperCamelCase_ , attention_mask=UpperCamelCase_ )
with self.subTest('''JIT Enabled''' ):
lowerCAmelCase : List[str] = encode_jitted(**UpperCamelCase_ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
lowerCAmelCase : int = encode_jitted(**UpperCamelCase_ ).to_tuple()
self.assertEqual(len(UpperCamelCase_ ) , len(UpperCamelCase_ ) )
for jitted_output, output in zip(UpperCamelCase_ , UpperCamelCase_ ):
self.assertEqual(jitted_output.shape , output.shape )
def lowerCamelCase__ ( self : Union[str, Any] ):
lowerCAmelCase, lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowerCAmelCase : Tuple = model_class(UpperCamelCase_ )
lowerCAmelCase : int = model.encode(inputs_dict['''input_ids'''] , inputs_dict['''attention_mask'''] )
lowerCAmelCase : List[Any] = {
'''decoder_input_ids''': inputs_dict['''decoder_input_ids'''],
'''decoder_attention_mask''': inputs_dict['''decoder_attention_mask'''],
'''encoder_outputs''': encoder_outputs,
}
@jax.jit
def decode_jitted(UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Dict , UpperCamelCase_ : int ):
return model.decode(
decoder_input_ids=UpperCamelCase_ , decoder_attention_mask=UpperCamelCase_ , encoder_outputs=UpperCamelCase_ , )
with self.subTest('''JIT Enabled''' ):
lowerCAmelCase : str = decode_jitted(**UpperCamelCase_ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
lowerCAmelCase : Union[str, Any] = decode_jitted(**UpperCamelCase_ ).to_tuple()
self.assertEqual(len(UpperCamelCase_ ) , len(UpperCamelCase_ ) )
for jitted_output, output in zip(UpperCamelCase_ , UpperCamelCase_ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def lowerCamelCase__ ( self : Optional[int] ):
for model_class_name in self.all_model_classes:
lowerCAmelCase : Optional[int] = model_class_name.from_pretrained('''facebook/blenderbot-400M-distill''' )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
lowerCAmelCase : int = np.ones((1, 1) ) * model.config.eos_token_id
lowerCAmelCase : List[str] = model(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
@unittest.skipUnless(jax_device != '''cpu''' , '''3B test too slow on CPU.''' )
@slow
def lowerCamelCase__ ( self : Union[str, Any] ):
lowerCAmelCase : Dict = {'''num_beams''': 1, '''early_stopping''': True, '''min_length''': 1_5, '''max_length''': 2_5}
lowerCAmelCase : List[str] = {'''skip_special_tokens''': True, '''clean_up_tokenization_spaces''': True}
lowerCAmelCase : Tuple = FlaxBlenderbotForConditionalGeneration.from_pretrained('''facebook/blenderbot-3B''' , from_pt=UpperCamelCase_ )
lowerCAmelCase : Union[str, Any] = BlenderbotTokenizer.from_pretrained('''facebook/blenderbot-3B''' )
lowerCAmelCase : List[Any] = ['''Sam''']
lowerCAmelCase : str = tokenizer(UpperCamelCase_ , return_tensors='''jax''' )
lowerCAmelCase : Union[str, Any] = model.generate(**UpperCamelCase_ , **UpperCamelCase_ )
lowerCAmelCase : Tuple = '''Sam is a great name. It means "sun" in Gaelic.'''
lowerCAmelCase : Union[str, Any] = tokenizer.batch_decode(UpperCamelCase_ , **UpperCamelCase_ )
assert generated_txt[0].strip() == tgt_text
| 637
| 1
|
import unittest
from transformers import AlbertTokenizer, AlbertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase__ = get_tests_dir('fixtures/spiece.model')
@require_sentencepiece
@require_tokenizers
class A ( UpperCAmelCase_ , unittest.TestCase ):
__UpperCAmelCase : str = AlbertTokenizer
__UpperCAmelCase : str = AlbertTokenizerFast
__UpperCAmelCase : List[Any] = True
__UpperCAmelCase : Any = True
__UpperCAmelCase : Union[str, Any] = True
def lowercase_ (self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase__ = AlbertTokenizer(__UpperCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase_ (self : Dict , __UpperCAmelCase : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ = "this is a test"
UpperCAmelCase__ = "this is a test"
return input_text, output_text
def lowercase_ (self : Dict ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = "<pad>"
UpperCAmelCase__ = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCAmelCase ) , __UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCAmelCase ) , __UpperCAmelCase )
def lowercase_ (self : Tuple ) -> str:
"""simple docstring"""
UpperCAmelCase__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<pad>" )
self.assertEqual(vocab_keys[1] , "<unk>" )
self.assertEqual(vocab_keys[-1] , "▁eloquent" )
self.assertEqual(len(__UpperCAmelCase ) , 3_0_0_0_0 )
def lowercase_ (self : Any ) -> str:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 3_0_0_0_0 )
def lowercase_ (self : Tuple ) -> List[Any]:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
UpperCAmelCase__ = self.get_tokenizer()
UpperCAmelCase__ = self.get_rust_tokenizer()
UpperCAmelCase__ = "I was born in 92000, and this is falsé."
UpperCAmelCase__ = tokenizer.tokenize(__UpperCAmelCase )
UpperCAmelCase__ = rust_tokenizer.tokenize(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
UpperCAmelCase__ = tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
UpperCAmelCase__ = rust_tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
UpperCAmelCase__ = self.get_rust_tokenizer()
UpperCAmelCase__ = tokenizer.encode(__UpperCAmelCase )
UpperCAmelCase__ = rust_tokenizer.encode(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
def lowercase_ (self : Optional[int] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = AlbertTokenizer(__UpperCAmelCase , keep_accents=__UpperCAmelCase )
UpperCAmelCase__ = tokenizer.tokenize("This is a test" )
self.assertListEqual(__UpperCAmelCase , ["▁this", "▁is", "▁a", "▁test"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , [4_8, 2_5, 2_1, 1_2_8_9] )
UpperCAmelCase__ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
__UpperCAmelCase , ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "é", "."] )
UpperCAmelCase__ = tokenizer.convert_tokens_to_ids(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , [3_1, 2_3, 3_8_6, 1_9, 5_6_1, 3_0_5_0, 1_5, 1_7, 4_8, 2_5, 8_2_5_6, 1_8, 1, 9] )
UpperCAmelCase__ = tokenizer.convert_ids_to_tokens(__UpperCAmelCase )
self.assertListEqual(
__UpperCAmelCase , ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "."] , )
def lowercase_ (self : Tuple ) -> int:
"""simple docstring"""
UpperCAmelCase__ = AlbertTokenizer(__UpperCAmelCase )
UpperCAmelCase__ = tokenizer.encode("sequence builders" )
UpperCAmelCase__ = tokenizer.encode("multi-sequence build" )
UpperCAmelCase__ = tokenizer.build_inputs_with_special_tokens(__UpperCAmelCase )
UpperCAmelCase__ = tokenizer.build_inputs_with_special_tokens(__UpperCAmelCase , __UpperCAmelCase )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
@slow
def lowercase_ (self : List[str] ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ = {"attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "input_ids": [[2, 2_1_9_7_0, 1_3, 5, 6_0_9_2, 1_6_7, 2_8, 7_1_0_3, 2_1_5_3, 6_7_3, 8, 7_0_2_8, 1_2_0_5_1, 1_8, 1_7, 7_1_0_3, 2_1_5_3, 6_7_3, 8, 3_5_1_5, 1_8_6_8_4, 8, 4_4_6_1, 6, 1_9_2_7, 2_9_7, 8, 1_2_0_6_0, 2_6_0_7, 1_8, 1_3, 5, 4_4_6_1, 1_5, 1_0_5_3_8, 3_8, 8, 1_3_5, 1_5, 8_2_2, 5_8, 1_5, 9_9_3, 1_0_3_6_3, 1_5, 1_4_6_0, 8_0_0_5, 4_4_6_1, 1_5, 9_9_3, 2_5_5, 2_3_2_8, 9, 9, 9, 6, 2_6, 1_1_1_2, 8_1_6, 3_2_6_0, 1_3, 5, 1_0_3, 2_3_7_7, 6, 1_7, 1_1_1_2, 8_1_6, 2_7_8_2, 1_3, 5, 1_0_3, 1_0_6_4_1, 6, 2_9, 8_4, 2_5_1_2, 2_4_3_0, 7_8_2, 1_8_6_8_4, 2_7_6_1, 1_9, 8_0_8, 2_4_3_0, 2_5_5_6, 1_7, 8_5_5, 1_4_8_0, 9_4_7_7, 4_0_9_1, 1_2_8, 1_1_7_1_2, 1_5, 7_1_0_3, 2_1_5_3, 6_7_3, 1_7, 2_4_8_8_3, 9_9_9_0, 9, 3], [2, 1_1_5_0_2, 2_5, 1_0_0_6, 2_0, 7_8_2, 8, 1_1_8_0_9, 8_5_5, 1_7_3_2, 1_9_3_9_3, 1_8_6_6_7, 3_7, 3_6_7, 2_1_0_1_8, 6_9, 1_8_5_4, 3_4, 1_1_8_6_0, 1_9_1_2_4, 2_7, 1_5_6, 2_2_5, 1_7, 1_9_3, 4_1_4_1, 1_9, 6_5, 9_1_2_4, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 1_4, 2_2_3_1, 8_8_6, 2_3_8_5, 1_7_6_5_9, 8_4, 1_4, 1_6_7_9_2, 1_9_5_2, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__UpperCAmelCase , model_name="albert-base-v2" , revision="6b6560eaf5ff2e250b00c50f380c5389a9c2d82e" , )
| 486
|
def lowerCAmelCase_ ( __A ) -> list:
'''simple docstring'''
for i in range(len(__A ) - 1, 0, -1 ):
UpperCAmelCase__ = False
for j in range(__A, 0, -1 ):
if unsorted[j] < unsorted[j - 1]:
UpperCAmelCase__ , UpperCAmelCase__ = unsorted[j - 1], unsorted[j]
UpperCAmelCase__ = True
for j in range(__A ):
if unsorted[j] > unsorted[j + 1]:
UpperCAmelCase__ , UpperCAmelCase__ = unsorted[j + 1], unsorted[j]
UpperCAmelCase__ = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase__ = input('Enter numbers separated by a comma:\n').strip()
UpperCamelCase__ = [int(item) for item in user_input.split(',')]
print(f'''{cocktail_shaker_sort(unsorted) = }''')
| 486
| 1
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
__UpperCAmelCase = logging.get_logger(__name__)
class lowercase__( __UpperCAmelCase ):
'''simple docstring'''
def __init__( self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE) -> str:
"""simple docstring"""
warnings.warn(
"The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use MobileViTImageProcessor instead." , __SCREAMING_SNAKE_CASE , )
super().__init__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
| 717
|
from math import factorial
__UpperCAmelCase = {str(digit): factorial(digit) for digit in range(10)}
def _lowerCamelCase ( A_ : int ) -> int:
'''simple docstring'''
if not isinstance(A_ , A_ ):
raise TypeError("Parameter number must be int" )
if number < 0:
raise ValueError("Parameter number must be greater than or equal to 0" )
# Converts number in string to iterate on its digits and adds its factorial.
return sum(DIGIT_FACTORIAL[digit] for digit in str(A_ ) )
def _lowerCamelCase ( A_ : int = 6_0 , A_ : int = 1_0_0_0_0_0_0 ) -> int:
'''simple docstring'''
if not isinstance(A_ , A_ ) or not isinstance(A_ , A_ ):
raise TypeError("Parameters chain_length and number_limit must be int" )
if chain_length <= 0 or number_limit <= 0:
raise ValueError(
"Parameters chain_length and number_limit must be greater than 0" )
# the counter for the chains with the exact desired length
UpperCamelCase__ : str =0
# the cached sizes of the previous chains
UpperCamelCase__ : dict[int, int] ={}
for start_chain_element in range(1 , A_ ):
# The temporary set will contain the elements of the chain
UpperCamelCase__ : Any =set()
UpperCamelCase__ : Optional[Any] =0
# Stop computing the chain when you find a cached size, a repeating item or the
# length is greater then the desired one.
UpperCamelCase__ : str =start_chain_element
while (
chain_element not in chain_sets_lengths
and chain_element not in chain_set
and chain_set_length <= chain_length
):
chain_set.add(A_ )
chain_set_length += 1
UpperCamelCase__ : Tuple =digit_factorial_sum(A_ )
if chain_element in chain_sets_lengths:
chain_set_length += chain_sets_lengths[chain_element]
UpperCamelCase__ : List[str] =chain_set_length
# If chain contains the exact amount of elements increase the counter
if chain_set_length == chain_length:
chains_counter += 1
return chains_counter
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"""{solution()}""")
| 582
| 0
|
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
_A : Optional[Any] = """src/transformers"""
_A : Optional[int] = """docs/source/en/tasks"""
def __snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> List[str]:
with open(lowerCAmelCase_ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
SCREAMING_SNAKE_CASE__ = f.readlines()
# Find the start prompt.
SCREAMING_SNAKE_CASE__ = 0
while not lines[start_index].startswith(lowerCAmelCase_ ):
start_index += 1
start_index += 1
SCREAMING_SNAKE_CASE__ = start_index
while not lines[end_index].startswith(lowerCAmelCase_ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
_A : Any = direct_transformers_import(TRANSFORMERS_PATH)
_A : Union[str, Any] = {
"""asr.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
"""audio_classification.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
"""language_modeling.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
"""image_classification.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
"""masked_language_modeling.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
"""multiple_choice.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
"""object_detection.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
"""question_answering.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
"""semantic_segmentation.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
"""sequence_classification.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
"""summarization.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
"""token_classification.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
"""translation.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
"""video_classification.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
"""document_question_answering.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
"""monocular_depth_estimation.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
_A : int = {
"""summarization.md""": ("""nllb""",),
"""translation.md""": ("""nllb""",),
}
def __snake_case ( lowerCAmelCase_ ) -> int:
SCREAMING_SNAKE_CASE__ = TASK_GUIDE_TO_MODELS[task_guide]
SCREAMING_SNAKE_CASE__ = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(lowerCAmelCase_ , set() )
SCREAMING_SNAKE_CASE__ = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([f'''[{name}](../model_doc/{code})''' for code, name in model_names.items()] ) + "\n"
def __snake_case ( lowerCAmelCase_ , lowerCAmelCase_=False ) -> int:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = _find_text_in_file(
filename=os.path.join(lowerCAmelCase_ , lowerCAmelCase_ ) , start_prompt='''<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->''' , end_prompt='''<!--End of the generated tip-->''' , )
SCREAMING_SNAKE_CASE__ = get_model_list_for_task(lowerCAmelCase_ )
if current_list != new_list:
if overwrite:
with open(os.path.join(lowerCAmelCase_ , lowerCAmelCase_ ) , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
f'''The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`'''
''' to fix this.''' )
if __name__ == "__main__":
_A : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
_A : List[Any] = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite)
| 100
|
'''simple docstring'''
import warnings
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
__lowercase = logging.get_logger(__name__)
class a__( lowerCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase_ : Dict = ['''input_values''', '''attention_mask''']
def __init__( self , __lowerCAmelCase = 1 , __lowerCAmelCase = 16000 , __lowerCAmelCase = 0.0 , __lowerCAmelCase = False , __lowerCAmelCase = 80 , __lowerCAmelCase = 16 , __lowerCAmelCase = 64 , __lowerCAmelCase = "hann_window" , __lowerCAmelCase = 1.0 , __lowerCAmelCase = 80 , __lowerCAmelCase = 7600 , __lowerCAmelCase = 1E-1_0 , __lowerCAmelCase = 2 , __lowerCAmelCase = True , **__lowerCAmelCase , ):
"""simple docstring"""
super().__init__(feature_size=__lowerCAmelCase , sampling_rate=__lowerCAmelCase , padding_value=__lowerCAmelCase , **__lowerCAmelCase)
lowerCAmelCase = do_normalize
lowerCAmelCase = return_attention_mask
lowerCAmelCase = num_mel_bins
lowerCAmelCase = hop_length
lowerCAmelCase = win_length
lowerCAmelCase = win_function
lowerCAmelCase = frame_signal_scale
lowerCAmelCase = fmin
lowerCAmelCase = fmax
lowerCAmelCase = mel_floor
lowerCAmelCase = reduction_factor
lowerCAmelCase = win_length * sampling_rate // 1000
lowerCAmelCase = hop_length * sampling_rate // 1000
lowerCAmelCase = optimal_fft_length(self.sample_size)
lowerCAmelCase = (self.n_fft // 2) + 1
lowerCAmelCase = window_function(window_length=self.sample_size , name=self.win_function , periodic=__lowerCAmelCase)
lowerCAmelCase = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.num_mel_bins , min_frequency=self.fmin , max_frequency=self.fmax , sampling_rate=self.sampling_rate , norm="""slaney""" , mel_scale="""slaney""" , )
if frame_signal_scale != 1.0:
warnings.warn(
"""The argument `frame_signal_scale` is deprecated and will be removed in version 4.30.0 of Transformers""" , __lowerCAmelCase , )
if reduction_factor != 2.0:
warnings.warn(
"""The argument `reduction_factor` is deprecated and will be removed in version 4.30.0 of Transformers""" , __lowerCAmelCase , )
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def a_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 0.0):
"""simple docstring"""
if attention_mask is not None:
lowerCAmelCase = np.array(__lowerCAmelCase , np.intaa)
lowerCAmelCase = []
for vector, length in zip(__lowerCAmelCase , attention_mask.sum(-1)):
lowerCAmelCase = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7)
if length < normed_slice.shape[0]:
lowerCAmelCase = padding_value
normed_input_values.append(__lowerCAmelCase)
else:
lowerCAmelCase = [(x - x.mean()) / np.sqrt(x.var() + 1E-7) for x in input_values]
return normed_input_values
def a_ ( self , __lowerCAmelCase , ):
"""simple docstring"""
lowerCAmelCase = spectrogram(
__lowerCAmelCase , window=self.window , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , mel_filters=self.mel_filters , mel_floor=self.mel_floor , log_mel="""log10""" , )
return log_mel_spec.T
def __call__( self , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = False , __lowerCAmelCase = None , __lowerCAmelCase = False , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , **__lowerCAmelCase , ):
"""simple docstring"""
if audio is None and audio_target is None:
raise ValueError("""You must provide either `audio` or `audio_target` values.""")
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"The model corresponding to this feature extractor: {self} was trained using a sampling rate of"
f" {self.sampling_rate}. Please make sure that the provided audio input was sampled with"
f" {self.sampling_rate} and not {sampling_rate}.")
else:
logger.warning(
"""It is strongly recommended to pass the ``sampling_rate`` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""")
if audio is not None:
lowerCAmelCase = self._process_audio(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase , )
else:
lowerCAmelCase = None
if audio_target is not None:
lowerCAmelCase = self._process_audio(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase , )
if inputs is None:
return inputs_target
else:
lowerCAmelCase = inputs_target["""input_values"""]
lowerCAmelCase = inputs_target.get("""attention_mask""")
if decoder_attention_mask is not None:
lowerCAmelCase = decoder_attention_mask
return inputs
def a_ ( self , __lowerCAmelCase , __lowerCAmelCase = False , __lowerCAmelCase = False , __lowerCAmelCase = None , __lowerCAmelCase = False , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , **__lowerCAmelCase , ):
"""simple docstring"""
lowerCAmelCase = isinstance(__lowerCAmelCase , np.ndarray) and len(speech.shape) > 1
if is_batched_numpy and len(speech.shape) > 2:
raise ValueError(f"Only mono-channel audio is supported for input to {self}")
lowerCAmelCase = is_batched_numpy or (
isinstance(__lowerCAmelCase , (list, tuple)) and (isinstance(speech[0] , (np.ndarray, tuple, list)))
)
if is_batched:
lowerCAmelCase = [np.asarray(__lowerCAmelCase , dtype=np.floataa) for speech in speech]
elif not is_batched and not isinstance(__lowerCAmelCase , np.ndarray):
lowerCAmelCase = np.asarray(__lowerCAmelCase , dtype=np.floataa)
elif isinstance(__lowerCAmelCase , np.ndarray) and speech.dtype is np.dtype(np.floataa):
lowerCAmelCase = speech.astype(np.floataa)
# always return batch
if not is_batched:
lowerCAmelCase = [speech]
# needed to make pad() work on spectrogram inputs
lowerCAmelCase = self.feature_size
# convert into correct format for padding
if is_target:
lowerCAmelCase = [self._extract_mel_features(__lowerCAmelCase) for waveform in speech]
lowerCAmelCase = BatchFeature({"""input_values""": features})
lowerCAmelCase = self.num_mel_bins
else:
lowerCAmelCase = BatchFeature({"""input_values""": speech})
lowerCAmelCase = self.pad(
__lowerCAmelCase , padding=__lowerCAmelCase , max_length=__lowerCAmelCase , truncation=__lowerCAmelCase , pad_to_multiple_of=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , **__lowerCAmelCase , )
lowerCAmelCase = feature_size_hack
# convert input values to correct format
lowerCAmelCase = padded_inputs["""input_values"""]
if not isinstance(input_values[0] , np.ndarray):
lowerCAmelCase = [np.asarray(__lowerCAmelCase , dtype=np.floataa) for array in input_values]
elif (
not isinstance(__lowerCAmelCase , np.ndarray)
and isinstance(input_values[0] , np.ndarray)
and input_values[0].dtype is np.dtype(np.floataa)
):
lowerCAmelCase = [array.astype(np.floataa) for array in input_values]
elif isinstance(__lowerCAmelCase , np.ndarray) and input_values.dtype is np.dtype(np.floataa):
lowerCAmelCase = input_values.astype(np.floataa)
# convert attention_mask to correct format
lowerCAmelCase = padded_inputs.get("""attention_mask""")
if attention_mask is not None:
lowerCAmelCase = [np.asarray(__lowerCAmelCase , dtype=np.intaa) for array in attention_mask]
# zero-mean and unit-variance normalization
if not is_target and self.do_normalize:
lowerCAmelCase = (
attention_mask
if self._get_padding_strategies(__lowerCAmelCase , max_length=__lowerCAmelCase) is not PaddingStrategy.DO_NOT_PAD
else None
)
lowerCAmelCase = self.zero_mean_unit_var_norm(
padded_inputs["""input_values"""] , attention_mask=__lowerCAmelCase , padding_value=self.padding_value)
if return_tensors is not None:
lowerCAmelCase = padded_inputs.convert_to_tensors(__lowerCAmelCase)
return padded_inputs
def a_ ( self):
"""simple docstring"""
lowerCAmelCase = super().to_dict()
# Don't serialize these as they are derived from the other properties.
lowerCAmelCase = ["""window""", """mel_filters""", """sample_size""", """sample_stride""", """n_fft""", """n_freqs"""]
for name in names:
if name in output:
del output[name]
return output
| 370
| 0
|
def UpperCamelCase ( __lowercase : str ,__lowercase : str ):
'''simple docstring'''
if len(__lowercase ) != len(__lowercase ):
raise ValueError('String lengths must match!' )
A_ : Optional[int] = 0
for chara, chara in zip(__lowercase ,__lowercase ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 70
|
def UpperCamelCase ( __lowercase : list ):
'''simple docstring'''
A_ : str = len(__lowercase )
for _ in range(__lowercase ):
for i in range(_ % 2 ,arr_size - 1 ,2 ):
if arr[i + 1] < arr[i]:
A_ , A_ : Optional[Any] = arr[i + 1], arr[i]
return arr
if __name__ == "__main__":
_UpperCAmelCase = list(range(10, 0, -1))
print(F"""Original: {arr}. Sorted: {odd_even_transposition(arr)}""")
| 70
| 1
|
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase =logging.get_logger(__name__)
class __magic_name__ ( __a ):
UpperCAmelCase ='''encoder-decoder'''
UpperCAmelCase =True
def __init__( self , **snake_case) -> List[Any]:
'''simple docstring'''
super().__init__(**lowerCAmelCase__)
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
_UpperCAmelCase : Any =kwargs.pop('encoder')
_UpperCAmelCase : str =encoder_config.pop('model_type')
_UpperCAmelCase : List[Any] =kwargs.pop('decoder')
_UpperCAmelCase : Any =decoder_config.pop('model_type')
from ..auto.configuration_auto import AutoConfig
_UpperCAmelCase : int =AutoConfig.for_model(lowerCAmelCase__ , **lowerCAmelCase__)
_UpperCAmelCase : Optional[int] =AutoConfig.for_model(lowerCAmelCase__ , **lowerCAmelCase__)
_UpperCAmelCase : Dict =True
@classmethod
def lowerCAmelCase ( cls , snake_case , snake_case , **snake_case) -> PretrainedConfig:
'''simple docstring'''
logger.info('Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config')
_UpperCAmelCase : Any =True
_UpperCAmelCase : Optional[Any] =True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **lowerCAmelCase__)
def lowerCAmelCase ( self) -> str:
'''simple docstring'''
_UpperCAmelCase : int =copy.deepcopy(self.__dict__)
_UpperCAmelCase : Union[str, Any] =self.encoder.to_dict()
_UpperCAmelCase : List[Any] =self.decoder.to_dict()
_UpperCAmelCase : Optional[int] =self.__class__.model_type
return output
| 446
|
from __future__ import annotations
import os
from typing import Any
import requests
lowerCAmelCase : int = '''https://api.github.com'''
# https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user
lowerCAmelCase : int = BASE_URL + '''/user'''
# https://github.com/settings/tokens
lowerCAmelCase : List[Any] = os.environ.get('''USER_TOKEN''', '''''')
def _lowercase ( __UpperCamelCase : str ):
snake_case__ = {
"""Authorization""": F'''token {auth_token}''',
"""Accept""": """application/vnd.github.v3+json""",
}
return requests.get(__UpperCamelCase , headers=__UpperCamelCase ).json()
if __name__ == "__main__": # pragma: no cover
if USER_TOKEN:
for key, value in fetch_github_info(USER_TOKEN).items():
print(f"""{key}: {value}""")
else:
raise ValueError('''\'USER_TOKEN\' field cannot be empty.''')
| 214
| 0
|
import os
import unittest
from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class __snake_case (_a , unittest.TestCase ):
lowerCAmelCase__ = TransfoXLTokenizer
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Tuple:
'''simple docstring'''
super().setUp()
_lowerCAmelCase : Dict = [
"""<unk>""",
"""[CLS]""",
"""[SEP]""",
"""want""",
"""unwanted""",
"""wa""",
"""un""",
"""running""",
""",""",
"""low""",
"""l""",
]
_lowerCAmelCase : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , **_UpperCAmelCase : Optional[int] ) -> Dict:
'''simple docstring'''
_lowerCAmelCase : List[str] = True
return TransfoXLTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Any , _UpperCAmelCase : str ) -> Any:
'''simple docstring'''
_lowerCAmelCase : str = """<unk> UNwanted , running"""
_lowerCAmelCase : Tuple = """<unk> unwanted, running"""
return input_text, output_text
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
_lowerCAmelCase : List[Any] = TransfoXLTokenizer(vocab_file=self.vocab_file , lower_case=_UpperCAmelCase )
_lowerCAmelCase : Dict = tokenizer.tokenize("""<unk> UNwanted , running""" )
self.assertListEqual(_UpperCAmelCase , ["""<unk>""", """unwanted""", """,""", """running"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , [0, 4, 8, 7] )
def SCREAMING_SNAKE_CASE ( self : str ) -> str:
'''simple docstring'''
_lowerCAmelCase : Optional[int] = TransfoXLTokenizer(lower_case=_UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo ! how \n Are yoU ? """ ) , ["""hello""", """!""", """how""", """are""", """you""", """?"""] )
def SCREAMING_SNAKE_CASE ( self : str ) -> Dict:
'''simple docstring'''
_lowerCAmelCase : int = TransfoXLTokenizer(lower_case=_UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo ! how \n Are yoU ? """ ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
'''simple docstring'''
_lowerCAmelCase : str = TransfoXLTokenizer(lower_case=_UpperCAmelCase )
_lowerCAmelCase : int = """Hello (bracket) and side-scrolled [and] Henry's $5,000 with 3.34 m. What's up!?"""
_lowerCAmelCase : Optional[Any] = [
"""Hello""",
"""(""",
"""bracket""",
""")""",
"""and""",
"""side""",
"""@-@""",
"""scrolled""",
"""[""",
"""and""",
"""]""",
"""Henry""",
"""'s""",
"""$""",
"""5""",
"""@,@""",
"""000""",
"""with""",
"""3""",
"""@.@""",
"""34""",
"""m""",
""".""",
"""What""",
"""'s""",
"""up""",
"""!""",
"""?""",
]
self.assertListEqual(tokenizer.tokenize(_UpperCAmelCase ) , _UpperCAmelCase )
self.assertEqual(tokenizer.convert_tokens_to_string(_UpperCAmelCase ) , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
'''simple docstring'''
_lowerCAmelCase : int = self.get_tokenizer()
_lowerCAmelCase : Optional[Any] = len(_UpperCAmelCase )
tokenizer.add_tokens(["""new1""", """new2"""] )
tokenizer.move_added_token("""new1""" , 1 )
# Check that moved token is not copied (duplicate)
self.assertEqual(len(_UpperCAmelCase ) , original_len + 2 )
# Check that token is moved to specified id
self.assertEqual(tokenizer.encode("""new1""" ) , [1] )
self.assertEqual(tokenizer.decode([1] ) , """new1""" )
| 196
|
import bza
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
from .logging import get_logger
_lowerCamelCase : str = get_logger(__name__)
class __snake_case :
def __init__( self : Optional[int] , _UpperCAmelCase : Optional[str] = None ) -> List[str]:
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = (
os.path.join(_UpperCAmelCase , config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
_lowerCAmelCase : Optional[int] = Extractor
def SCREAMING_SNAKE_CASE ( self : Tuple , _UpperCAmelCase : str ) -> str:
'''simple docstring'''
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
_lowerCAmelCase : List[Any] = os.path.abspath(_UpperCAmelCase )
return os.path.join(self.extract_dir , hash_url_to_filename(_UpperCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , _UpperCAmelCase : str , _UpperCAmelCase : bool ) -> bool:
'''simple docstring'''
return force_extract or (
not os.path.isfile(_UpperCAmelCase ) and not (os.path.isdir(_UpperCAmelCase ) and os.listdir(_UpperCAmelCase ))
)
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , _UpperCAmelCase : str , _UpperCAmelCase : bool = False ) -> str:
'''simple docstring'''
_lowerCAmelCase : str = self.extractor.infer_extractor_format(_UpperCAmelCase )
if not extractor_format:
return input_path
_lowerCAmelCase : int = self._get_output_path(_UpperCAmelCase )
if self._do_extract(_UpperCAmelCase , _UpperCAmelCase ):
self.extractor.extract(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
return output_path
class __snake_case (_a ):
@classmethod
@abstractmethod
def SCREAMING_SNAKE_CASE ( cls : Tuple , _UpperCAmelCase : Union[Path, str] , **_UpperCAmelCase : Optional[int] ) -> bool:
'''simple docstring'''
...
@staticmethod
@abstractmethod
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase : Union[Path, str] , _UpperCAmelCase : Union[Path, str] ) -> None:
'''simple docstring'''
...
class __snake_case (_a , _a ):
lowerCAmelCase__ = []
@staticmethod
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase : Union[Path, str] , _UpperCAmelCase : int ) -> Dict:
'''simple docstring'''
with open(_UpperCAmelCase , """rb""" ) as f:
return f.read(_UpperCAmelCase )
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Any , _UpperCAmelCase : Union[Path, str] , _UpperCAmelCase : bytes = b"" ) -> bool:
'''simple docstring'''
if not magic_number:
_lowerCAmelCase : Optional[int] = max(len(_UpperCAmelCase ) for cls_magic_number in cls.magic_numbers )
try:
_lowerCAmelCase : Union[str, Any] = cls.read_magic_number(_UpperCAmelCase , _UpperCAmelCase )
except OSError:
return False
return any(magic_number.startswith(_UpperCAmelCase ) for cls_magic_number in cls.magic_numbers )
class __snake_case (_a ):
@classmethod
def SCREAMING_SNAKE_CASE ( cls : str , _UpperCAmelCase : Union[Path, str] , **_UpperCAmelCase : Dict ) -> bool:
'''simple docstring'''
return tarfile.is_tarfile(_UpperCAmelCase )
@staticmethod
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Tuple ) -> Tuple:
'''simple docstring'''
def resolved(_UpperCAmelCase : str ) -> str:
return os.path.realpath(os.path.abspath(_UpperCAmelCase ) )
def badpath(_UpperCAmelCase : str , _UpperCAmelCase : str ) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(_UpperCAmelCase , _UpperCAmelCase ) ).startswith(_UpperCAmelCase )
def badlink(_UpperCAmelCase : Optional[int] , _UpperCAmelCase : str ) -> bool:
# Links are interpreted relative to the directory containing the link
_lowerCAmelCase : Tuple = resolved(os.path.join(_UpperCAmelCase , os.path.dirname(info.name ) ) )
return badpath(info.linkname , base=_UpperCAmelCase )
_lowerCAmelCase : List[Any] = resolved(_UpperCAmelCase )
for finfo in members:
if badpath(finfo.name , _UpperCAmelCase ):
logger.error(f"Extraction of {finfo.name} is blocked (illegal path)" )
elif finfo.issym() and badlink(_UpperCAmelCase , _UpperCAmelCase ):
logger.error(f"Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}" )
elif finfo.islnk() and badlink(_UpperCAmelCase , _UpperCAmelCase ):
logger.error(f"Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}" )
else:
yield finfo
@staticmethod
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase : Union[Path, str] , _UpperCAmelCase : Union[Path, str] ) -> None:
'''simple docstring'''
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase )
_lowerCAmelCase : str = tarfile.open(_UpperCAmelCase )
tar_file.extractall(_UpperCAmelCase , members=TarExtractor.safemembers(_UpperCAmelCase , _UpperCAmelCase ) )
tar_file.close()
class __snake_case (_a ):
lowerCAmelCase__ = [b"\x1F\x8B"]
@staticmethod
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase : Union[Path, str] , _UpperCAmelCase : Union[Path, str] ) -> None:
'''simple docstring'''
with gzip.open(_UpperCAmelCase , """rb""" ) as gzip_file:
with open(_UpperCAmelCase , """wb""" ) as extracted_file:
shutil.copyfileobj(_UpperCAmelCase , _UpperCAmelCase )
class __snake_case (_a ):
lowerCAmelCase__ = [
b"PK\x03\x04",
b"PK\x05\x06", # empty archive
b"PK\x07\x08", # spanned archive
]
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Dict , _UpperCAmelCase : Union[Path, str] , _UpperCAmelCase : bytes = b"" ) -> bool:
'''simple docstring'''
if super().is_extractable(_UpperCAmelCase , magic_number=_UpperCAmelCase ):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(_UpperCAmelCase , """rb""" ) as fp:
_lowerCAmelCase : Union[str, Any] = _EndRecData(_UpperCAmelCase )
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
_lowerCAmelCase : List[Any] = fp.read(_UpperCAmelCase ) # CD is where we expect it to be
if len(_UpperCAmelCase ) == sizeCentralDir:
_lowerCAmelCase : int = struct.unpack(_UpperCAmelCase , _UpperCAmelCase ) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase : Union[Path, str] , _UpperCAmelCase : Union[Path, str] ) -> None:
'''simple docstring'''
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase )
with zipfile.ZipFile(_UpperCAmelCase , """r""" ) as zip_file:
zip_file.extractall(_UpperCAmelCase )
zip_file.close()
class __snake_case (_a ):
lowerCAmelCase__ = [b"\xFD\x37\x7A\x58\x5A\x00"]
@staticmethod
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase : Union[Path, str] , _UpperCAmelCase : Union[Path, str] ) -> None:
'''simple docstring'''
with lzma.open(_UpperCAmelCase ) as compressed_file:
with open(_UpperCAmelCase , """wb""" ) as extracted_file:
shutil.copyfileobj(_UpperCAmelCase , _UpperCAmelCase )
class __snake_case (_a ):
lowerCAmelCase__ = [b"Rar!\x1a\x07\x00", b"Rar!\x1a\x07\x01\x00"] # RAR_ID # RAR5_ID
@staticmethod
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase : Union[Path, str] , _UpperCAmelCase : Union[Path, str] ) -> None:
'''simple docstring'''
if not config.RARFILE_AVAILABLE:
raise ImportError("""Please pip install rarfile""" )
import rarfile
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase )
_lowerCAmelCase : str = rarfile.RarFile(_UpperCAmelCase )
rf.extractall(_UpperCAmelCase )
rf.close()
class __snake_case (_a ):
lowerCAmelCase__ = [b"\x28\xb5\x2F\xFD"]
@staticmethod
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase : Union[Path, str] , _UpperCAmelCase : Union[Path, str] ) -> None:
'''simple docstring'''
if not config.ZSTANDARD_AVAILABLE:
raise ImportError("""Please pip install zstandard""" )
import zstandard as zstd
_lowerCAmelCase : Any = zstd.ZstdDecompressor()
with open(_UpperCAmelCase , """rb""" ) as ifh, open(_UpperCAmelCase , """wb""" ) as ofh:
dctx.copy_stream(_UpperCAmelCase , _UpperCAmelCase )
class __snake_case (_a ):
lowerCAmelCase__ = [b"\x42\x5A\x68"]
@staticmethod
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase : Union[Path, str] , _UpperCAmelCase : Union[Path, str] ) -> None:
'''simple docstring'''
with bza.open(_UpperCAmelCase , """rb""" ) as compressed_file:
with open(_UpperCAmelCase , """wb""" ) as extracted_file:
shutil.copyfileobj(_UpperCAmelCase , _UpperCAmelCase )
class __snake_case (_a ):
lowerCAmelCase__ = [b"\x37\x7A\xBC\xAF\x27\x1C"]
@staticmethod
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase : Union[Path, str] , _UpperCAmelCase : Union[Path, str] ) -> None:
'''simple docstring'''
if not config.PY7ZR_AVAILABLE:
raise ImportError("""Please pip install py7zr""" )
import pyazr
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase )
with pyazr.SevenZipFile(_UpperCAmelCase , """r""" ) as archive:
archive.extractall(_UpperCAmelCase )
class __snake_case (_a ):
lowerCAmelCase__ = [b"\x04\x22\x4D\x18"]
@staticmethod
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase : Union[Path, str] , _UpperCAmelCase : Union[Path, str] ) -> None:
'''simple docstring'''
if not config.LZ4_AVAILABLE:
raise ImportError("""Please pip install lz4""" )
import lza.frame
with lza.frame.open(_UpperCAmelCase , """rb""" ) as compressed_file:
with open(_UpperCAmelCase , """wb""" ) as extracted_file:
shutil.copyfileobj(_UpperCAmelCase , _UpperCAmelCase )
class __snake_case :
# Put zip file to the last, b/c it is possible wrongly detected as zip (I guess it means: as tar or gzip)
lowerCAmelCase__ = {
"tar": TarExtractor,
"gzip": GzipExtractor,
"zip": ZipExtractor,
"xz": XzExtractor,
"rar": RarExtractor,
"zstd": ZstdExtractor,
"bz2": BzipaExtractor,
"7z": SevenZipExtractor, # <Added version="2.4.0"/>
"lz4": LzaExtractor, # <Added version="2.4.0"/>
}
@classmethod
def SCREAMING_SNAKE_CASE ( cls : str ) -> Optional[int]:
'''simple docstring'''
return max(
len(_UpperCAmelCase )
for extractor in cls.extractors.values()
if issubclass(_UpperCAmelCase , _UpperCAmelCase )
for extractor_magic_number in extractor.magic_numbers )
@staticmethod
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase : Union[Path, str] , _UpperCAmelCase : int ) -> Any:
'''simple docstring'''
try:
return MagicNumberBaseExtractor.read_magic_number(_UpperCAmelCase , magic_number_length=_UpperCAmelCase )
except OSError:
return b""
@classmethod
def SCREAMING_SNAKE_CASE ( cls : int , _UpperCAmelCase : Union[Path, str] , _UpperCAmelCase : bool = False ) -> bool:
'''simple docstring'''
warnings.warn(
"""Method 'is_extractable' was deprecated in version 2.4.0 and will be removed in 3.0.0. """
"""Use 'infer_extractor_format' instead.""" , category=_UpperCAmelCase , )
_lowerCAmelCase : str = cls.infer_extractor_format(_UpperCAmelCase )
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Optional[Any] , _UpperCAmelCase : Union[Path, str] ) -> str: # <Added version="2.4.0"/>
'''simple docstring'''
_lowerCAmelCase : str = cls._get_magic_number_max_length()
_lowerCAmelCase : Dict = cls._read_magic_number(_UpperCAmelCase , _UpperCAmelCase )
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(_UpperCAmelCase , magic_number=_UpperCAmelCase ):
return extractor_format
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Optional[Any] , _UpperCAmelCase : Union[Path, str] , _UpperCAmelCase : Union[Path, str] , _UpperCAmelCase : Optional[str] = None , _UpperCAmelCase : Optional[BaseExtractor] = "deprecated" , ) -> None:
'''simple docstring'''
os.makedirs(os.path.dirname(_UpperCAmelCase ) , exist_ok=_UpperCAmelCase )
# Prevent parallel extractions
_lowerCAmelCase : Tuple = str(Path(_UpperCAmelCase ).with_suffix(""".lock""" ) )
with FileLock(_UpperCAmelCase ):
shutil.rmtree(_UpperCAmelCase , ignore_errors=_UpperCAmelCase )
if extractor_format or extractor != "deprecated":
if extractor != "deprecated" or not isinstance(_UpperCAmelCase , _UpperCAmelCase ): # passed as positional arg
warnings.warn(
"""Parameter 'extractor' was deprecated in version 2.4.0 and will be removed in 3.0.0. """
"""Use 'extractor_format' instead.""" , category=_UpperCAmelCase , )
_lowerCAmelCase : List[str] = extractor if extractor != """deprecated""" else extractor_format
else:
_lowerCAmelCase : List[Any] = cls.extractors[extractor_format]
return extractor.extract(_UpperCAmelCase , _UpperCAmelCase )
else:
warnings.warn(
"""Parameter 'extractor_format' was made required in version 2.4.0 and not passing it will raise an """
"""exception in 3.0.0.""" , category=_UpperCAmelCase , )
for extractor in cls.extractors.values():
if extractor.is_extractable(_UpperCAmelCase ):
return extractor.extract(_UpperCAmelCase , _UpperCAmelCase )
| 196
| 1
|
def _A ( SCREAMING_SNAKE_CASE ):
UpperCAmelCase__: Tuple = int(SCREAMING_SNAKE_CASE )
if decimal in (0, 1): # Exit cases for the recursion
return str(SCREAMING_SNAKE_CASE )
UpperCAmelCase__ , UpperCAmelCase__: Union[str, Any] = divmod(SCREAMING_SNAKE_CASE ,2 )
return binary_recursive(SCREAMING_SNAKE_CASE ) + str(SCREAMING_SNAKE_CASE )
def _A ( SCREAMING_SNAKE_CASE ):
UpperCAmelCase__: List[str] = str(SCREAMING_SNAKE_CASE ).strip()
if not number:
raise ValueError("No input value was provided" )
UpperCAmelCase__: Tuple = "-" if number.startswith("-" ) else ""
UpperCAmelCase__: Any = number.lstrip("-" )
if not number.isnumeric():
raise ValueError("Input value is not an integer" )
return f"{negative}0b{binary_recursive(int(SCREAMING_SNAKE_CASE ) )}"
if __name__ == "__main__":
from doctest import testmod
testmod()
| 113
|
_lowerCAmelCase : int ="""
# Installazione di Transformers
! pip install transformers datasets
# Per installare dalla fonte invece dell'ultima versione rilasciata, commenta il comando sopra e
# rimuovi la modalità commento al comando seguente.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
_lowerCAmelCase : List[str] =[{"""type""": """code""", """content""": INSTALL_CONTENT}]
_lowerCAmelCase : int ={
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 113
| 1
|
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
lowerCAmelCase : Tuple = logging.get_logger(__name__)
lowerCAmelCase : int = {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json',
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class SCREAMING_SNAKE_CASE__ ( snake_case_):
lowerCAmelCase_ = """blenderbot-small"""
lowerCAmelCase_ = ["""past_key_values"""]
lowerCAmelCase_ = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self , A_=50265 , A_=512 , A_=8 , A_=2048 , A_=16 , A_=8 , A_=2048 , A_=16 , A_=0.0 , A_=0.0 , A_=True , A_=True , A_="gelu" , A_=512 , A_=0.1 , A_=0.0 , A_=0.0 , A_=0.02 , A_=1 , A_=False , A_=0 , A_=1 , A_=2 , A_=2 , **A_ , )-> Optional[Any]:
'''simple docstring'''
UpperCamelCase = vocab_size
UpperCamelCase = max_position_embeddings
UpperCamelCase = d_model
UpperCamelCase = encoder_ffn_dim
UpperCamelCase = encoder_layers
UpperCamelCase = encoder_attention_heads
UpperCamelCase = decoder_ffn_dim
UpperCamelCase = decoder_layers
UpperCamelCase = decoder_attention_heads
UpperCamelCase = dropout
UpperCamelCase = attention_dropout
UpperCamelCase = activation_dropout
UpperCamelCase = activation_function
UpperCamelCase = init_std
UpperCamelCase = encoder_layerdrop
UpperCamelCase = decoder_layerdrop
UpperCamelCase = use_cache
UpperCamelCase = encoder_layers
UpperCamelCase = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , is_encoder_decoder=A_ , decoder_start_token_id=A_ , forced_eos_token_id=A_ , **A_ , )
class SCREAMING_SNAKE_CASE__ ( snake_case_):
@property
def UpperCAmelCase_ ( self )-> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
UpperCamelCase = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
UpperCamelCase = {0: 'batch'}
UpperCamelCase = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
UpperCamelCase = {0: 'batch', 1: 'decoder_sequence'}
UpperCamelCase = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(A_ , direction='inputs' )
elif self.task == "causal-lm":
# TODO: figure this case out.
UpperCamelCase = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
UpperCamelCase , UpperCamelCase = self.num_layers
for i in range(A_ ):
UpperCamelCase = {0: 'batch', 2: 'past_sequence + sequence'}
UpperCamelCase = {0: 'batch', 2: 'past_sequence + sequence'}
else:
UpperCamelCase = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
('decoder_input_ids', {0: 'batch', 1: 'decoder_sequence'}),
('decoder_attention_mask', {0: 'batch', 1: 'decoder_sequence'}),
] )
return common_inputs
@property
def UpperCAmelCase_ ( self )-> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
UpperCamelCase = super().outputs
else:
UpperCamelCase = super(A_ , self ).outputs
if self.use_past:
UpperCamelCase , UpperCamelCase = self.num_layers
for i in range(A_ ):
UpperCamelCase = {0: 'batch', 2: 'past_sequence + sequence'}
UpperCamelCase = {0: 'batch', 2: 'past_sequence + sequence'}
return common_outputs
def UpperCAmelCase_ ( self , A_ , A_ = -1 , A_ = -1 , A_ = False , A_ = None , )-> Mapping[str, Any]:
'''simple docstring'''
UpperCamelCase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
A_ , A_ , A_ , A_ , A_ )
# Generate decoder inputs
UpperCamelCase = seq_length if not self.use_past else 1
UpperCamelCase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
A_ , A_ , A_ , A_ , A_ )
UpperCamelCase = {F'''decoder_{name}''': tensor for name, tensor in decoder_inputs.items()}
UpperCamelCase = dict(**A_ , **A_ )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
UpperCamelCase , UpperCamelCase = common_inputs['input_ids'].shape
UpperCamelCase = common_inputs['decoder_input_ids'].shape[1]
UpperCamelCase , UpperCamelCase = self.num_attention_heads
UpperCamelCase = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
UpperCamelCase = decoder_seq_length + 3
UpperCamelCase = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
UpperCamelCase = torch.cat(
[common_inputs['decoder_attention_mask'], torch.ones(A_ , A_ )] , dim=1 )
UpperCamelCase = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
UpperCamelCase , UpperCamelCase = self.num_layers
UpperCamelCase = min(A_ , A_ )
UpperCamelCase = max(A_ , A_ ) - min_num_layers
UpperCamelCase = 'encoder' if num_encoder_layers > num_decoder_layers else 'decoder'
for _ in range(A_ ):
common_inputs["past_key_values"].append(
(
torch.zeros(A_ ),
torch.zeros(A_ ),
torch.zeros(A_ ),
torch.zeros(A_ ),
) )
# TODO: test this.
UpperCamelCase = encoder_shape if remaining_side_name == 'encoder' else decoder_shape
for _ in range(A_ , A_ ):
common_inputs["past_key_values"].append((torch.zeros(A_ ), torch.zeros(A_ )) )
return common_inputs
def UpperCAmelCase_ ( self , A_ , A_ = -1 , A_ = -1 , A_ = False , A_ = None , )-> Mapping[str, Any]:
'''simple docstring'''
UpperCamelCase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
A_ , A_ , A_ , A_ , A_ )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
UpperCamelCase , UpperCamelCase = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
UpperCamelCase = seqlen + 2
UpperCamelCase , UpperCamelCase = self.num_layers
UpperCamelCase , UpperCamelCase = self.num_attention_heads
UpperCamelCase = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
UpperCamelCase = common_inputs['attention_mask'].dtype
UpperCamelCase = torch.cat(
[common_inputs['attention_mask'], torch.ones(A_ , A_ , dtype=A_ )] , dim=1 )
UpperCamelCase = [
(torch.zeros(A_ ), torch.zeros(A_ )) for _ in range(A_ )
]
return common_inputs
def UpperCAmelCase_ ( self , A_ , A_ = -1 , A_ = -1 , A_ = False , A_ = None , )-> Mapping[str, Any]:
'''simple docstring'''
UpperCamelCase = compute_effective_axis_dimension(
A_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
UpperCamelCase = tokenizer.num_special_tokens_to_add(A_ )
UpperCamelCase = compute_effective_axis_dimension(
A_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=A_ )
# Generate dummy inputs according to compute batch and sequence
UpperCamelCase = [' '.join([tokenizer.unk_token] ) * seq_length] * batch_size
UpperCamelCase = dict(tokenizer(A_ , return_tensors=A_ ) )
return common_inputs
def UpperCAmelCase_ ( self , A_ , A_ = -1 , A_ = -1 , A_ = False , A_ = None , )-> Mapping[str, Any]:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
UpperCamelCase = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
A_ , batch_size=A_ , seq_length=A_ , is_pair=A_ , framework=A_ )
elif self.task == "causal-lm":
UpperCamelCase = self._generate_dummy_inputs_for_causal_lm(
A_ , batch_size=A_ , seq_length=A_ , is_pair=A_ , framework=A_ )
else:
UpperCamelCase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
A_ , batch_size=A_ , seq_length=A_ , is_pair=A_ , framework=A_ )
return common_inputs
def UpperCAmelCase_ ( self , A_ , A_ , A_ , A_ )-> str:
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
UpperCamelCase = super()._flatten_past_key_values_(A_ , A_ , A_ , A_ )
else:
UpperCamelCase = super(A_ , self )._flatten_past_key_values_(
A_ , A_ , A_ , A_ )
| 432
|
'''simple docstring'''
from typing import List, Union
import numpy as np
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, logging
from .base import PIPELINE_INIT_ARGS, ArgumentHandler, ChunkPipeline
lowerCAmelCase : Any = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( snake_case_):
def UpperCAmelCase_ ( self , A_ )-> Any:
'''simple docstring'''
if isinstance(A_ , A_ ):
UpperCamelCase = [label.strip() for label in labels.split(',' ) if label.strip()]
return labels
def __call__( self , A_ , A_ , A_ )-> List[str]:
'''simple docstring'''
if len(A_ ) == 0 or len(A_ ) == 0:
raise ValueError('You must include at least one label and at least one sequence.' )
if hypothesis_template.format(labels[0] ) == hypothesis_template:
raise ValueError(
(
'The provided hypothesis_template "{}" was not able to be formatted with the target labels. '
'Make sure the passed template includes formatting syntax such as {{}} where the label should go.'
).format(A_ ) )
if isinstance(A_ , A_ ):
UpperCamelCase = [sequences]
UpperCamelCase = []
for sequence in sequences:
sequence_pairs.extend([[sequence, hypothesis_template.format(A_ )] for label in labels] )
return sequence_pairs, sequences
@add_end_docstrings(snake_case_)
class SCREAMING_SNAKE_CASE__ ( snake_case_):
def __init__( self , A_=ZeroShotClassificationArgumentHandler() , *A_ , **A_ )-> Tuple:
'''simple docstring'''
UpperCamelCase = args_parser
super().__init__(*A_ , **A_ )
if self.entailment_id == -1:
logger.warning(
'Failed to determine \'entailment\' label id from the label2id mapping in the model config. Setting to '
'-1. Define a descriptive label2id mapping in the model config to ensure correct outputs.' )
@property
def UpperCAmelCase_ ( self )-> Union[str, Any]:
'''simple docstring'''
for label, ind in self.model.config.labelaid.items():
if label.lower().startswith('entail' ):
return ind
return -1
def UpperCAmelCase_ ( self , A_ , A_=True , A_=True , A_=TruncationStrategy.ONLY_FIRST , **A_ )-> Union[str, Any]:
'''simple docstring'''
UpperCamelCase = self.framework
if self.tokenizer.pad_token is None:
# Override for tokenizers not supporting padding
logger.error(
'Tokenizer was not supporting padding necessary for zero-shot, attempting to use '
' `pad_token=eos_token`' )
UpperCamelCase = self.tokenizer.eos_token
try:
UpperCamelCase = self.tokenizer(
A_ , add_special_tokens=A_ , return_tensors=A_ , padding=A_ , truncation=A_ , )
except Exception as e:
if "too short" in str(A_ ):
# tokenizers might yell that we want to truncate
# to a value that is not even reached by the input.
# In that case we don't want to truncate.
# It seems there's not a really better way to catch that
# exception.
UpperCamelCase = self.tokenizer(
A_ , add_special_tokens=A_ , return_tensors=A_ , padding=A_ , truncation=TruncationStrategy.DO_NOT_TRUNCATE , )
else:
raise e
return inputs
def UpperCAmelCase_ ( self , **A_ )-> str:
'''simple docstring'''
if kwargs.get('multi_class' , A_ ) is not None:
UpperCamelCase = kwargs['multi_class']
logger.warning(
'The `multi_class` argument has been deprecated and renamed to `multi_label`. '
'`multi_class` will be removed in a future version of Transformers.' )
UpperCamelCase = {}
if "candidate_labels" in kwargs:
UpperCamelCase = self._args_parser._parse_labels(kwargs['candidate_labels'] )
if "hypothesis_template" in kwargs:
UpperCamelCase = kwargs['hypothesis_template']
UpperCamelCase = {}
if "multi_label" in kwargs:
UpperCamelCase = kwargs['multi_label']
return preprocess_params, {}, postprocess_params
def __call__( self , A_ , *A_ , **A_ , )-> int:
'''simple docstring'''
if len(A_ ) == 0:
pass
elif len(A_ ) == 1 and "candidate_labels" not in kwargs:
UpperCamelCase = args[0]
else:
raise ValueError(F'''Unable to understand extra arguments {args}''' )
return super().__call__(A_ , **A_ )
def UpperCAmelCase_ ( self , A_ , A_=None , A_="This example is {}." )-> List[str]:
'''simple docstring'''
UpperCamelCase , UpperCamelCase = self._args_parser(A_ , A_ , A_ )
for i, (candidate_label, sequence_pair) in enumerate(zip(A_ , A_ ) ):
UpperCamelCase = self._parse_and_tokenize([sequence_pair] )
yield {
"candidate_label": candidate_label,
"sequence": sequences[0],
"is_last": i == len(A_ ) - 1,
**model_input,
}
def UpperCAmelCase_ ( self , A_ )-> int:
'''simple docstring'''
UpperCamelCase = inputs['candidate_label']
UpperCamelCase = inputs['sequence']
UpperCamelCase = {k: inputs[k] for k in self.tokenizer.model_input_names}
UpperCamelCase = self.model(**A_ )
UpperCamelCase = {
'candidate_label': candidate_label,
'sequence': sequence,
'is_last': inputs['is_last'],
**outputs,
}
return model_outputs
def UpperCAmelCase_ ( self , A_ , A_=False )-> List[str]:
'''simple docstring'''
UpperCamelCase = [outputs['candidate_label'] for outputs in model_outputs]
UpperCamelCase = [outputs['sequence'] for outputs in model_outputs]
UpperCamelCase = np.concatenate([output['logits'].numpy() for output in model_outputs] )
UpperCamelCase = logits.shape[0]
UpperCamelCase = len(A_ )
UpperCamelCase = N // n
UpperCamelCase = logits.reshape((num_sequences, n, -1) )
if multi_label or len(A_ ) == 1:
# softmax over the entailment vs. contradiction dim for each label independently
UpperCamelCase = self.entailment_id
UpperCamelCase = -1 if entailment_id == 0 else 0
UpperCamelCase = reshaped_outputs[..., [contradiction_id, entailment_id]]
UpperCamelCase = np.exp(A_ ) / np.exp(A_ ).sum(-1 , keepdims=A_ )
UpperCamelCase = scores[..., 1]
else:
# softmax the "entailment" logits over all candidate labels
UpperCamelCase = reshaped_outputs[..., self.entailment_id]
UpperCamelCase = np.exp(A_ ) / np.exp(A_ ).sum(-1 , keepdims=A_ )
UpperCamelCase = list(reversed(scores[0].argsort() ) )
return {
"sequence": sequences[0],
"labels": [candidate_labels[i] for i in top_inds],
"scores": scores[0, top_inds].tolist(),
}
| 432
| 1
|
import datasets
from .evaluate import evaluate
_lowercase = """\
@inproceedings{Rajpurkar2016SQuAD10,
title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text},
author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang},
booktitle={EMNLP},
year={2016}
}
"""
_lowercase = """
This metric wrap the official scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD).
Stanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by
crowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span,
from the corresponding reading passage, or the question might be unanswerable.
"""
_lowercase = """
Computes SQuAD scores (F1 and EM).
Args:
predictions: List of question-answers dictionaries with the following key-values:
- 'id': id of the question-answer pair as given in the references (see below)
- 'prediction_text': the text of the answer
references: List of question-answers dictionaries with the following key-values:
- 'id': id of the question-answer pair (see above),
- 'answers': a Dict in the SQuAD dataset format
{
'text': list of possible texts for the answer, as a list of strings
'answer_start': list of start positions for the answer, as a list of ints
}
Note that answer_start values are not taken into account to compute the metric.
Returns:
'exact_match': Exact match (the normalized answer exactly match the gold answer)
'f1': The F-score of predicted tokens versus the gold answer
Examples:
>>> predictions = [{'prediction_text': '1976', 'id': '56e10a3be3433e1400422b22'}]
>>> references = [{'answers': {'answer_start': [97], 'text': ['1976']}, 'id': '56e10a3be3433e1400422b22'}]
>>> squad_metric = datasets.load_metric(\"squad\")
>>> results = squad_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 100.0, 'f1': 100.0}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase_ ( datasets.Metric ):
def _snake_case ( self ) -> Optional[int]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': {'''id''': datasets.Value('''string''' ), '''prediction_text''': datasets.Value('''string''' )},
'''references''': {
'''id''': datasets.Value('''string''' ),
'''answers''': datasets.features.Sequence(
{
'''text''': datasets.Value('''string''' ),
'''answer_start''': datasets.Value('''int32''' ),
} ),
},
} ) , codebase_urls=['''https://rajpurkar.github.io/SQuAD-explorer/'''] , reference_urls=['''https://rajpurkar.github.io/SQuAD-explorer/'''] , )
def _snake_case ( self , __A , __A ) -> Dict:
SCREAMING_SNAKE_CASE_ : int ={prediction['''id''']: prediction['''prediction_text'''] for prediction in predictions}
SCREAMING_SNAKE_CASE_ : Union[str, Any] =[
{
'''paragraphs''': [
{
'''qas''': [
{
'''answers''': [{'''text''': answer_text} for answer_text in ref['''answers''']['''text''']],
'''id''': ref['''id'''],
}
for ref in references
]
}
]
}
]
SCREAMING_SNAKE_CASE_ : Union[str, Any] =evaluate(dataset=__A , predictions=__A )
return score
| 443
|
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class lowercase_ ( A ):
__lowerCamelCase = (DDIMParallelScheduler,)
__lowerCamelCase = (("eta", 0.0), ("num_inference_steps", 5_0))
def _snake_case ( self , **__A ) -> List[str]:
SCREAMING_SNAKE_CASE_ : Optional[Any] ={
'''num_train_timesteps''': 1_000,
'''beta_start''': 0.0_001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''clip_sample''': True,
}
config.update(**__A )
return config
def _snake_case ( self , **__A ) -> List[Any]:
SCREAMING_SNAKE_CASE_ : int =self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ : str =self.get_scheduler_config(**__A )
SCREAMING_SNAKE_CASE_ : List[Any] =scheduler_class(**__A )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] =10, 0.0
SCREAMING_SNAKE_CASE_ : List[str] =self.dummy_model()
SCREAMING_SNAKE_CASE_ : Union[str, Any] =self.dummy_sample_deter
scheduler.set_timesteps(__A )
for t in scheduler.timesteps:
SCREAMING_SNAKE_CASE_ : Optional[int] =model(__A , __A )
SCREAMING_SNAKE_CASE_ : List[Any] =scheduler.step(__A , __A , __A , __A ).prev_sample
return sample
def _snake_case ( self ) -> Optional[Any]:
for timesteps in [100, 500, 1_000]:
self.check_over_configs(num_train_timesteps=__A )
def _snake_case ( self ) -> Optional[int]:
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=__A )
SCREAMING_SNAKE_CASE_ : Dict =self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ : Optional[Any] =self.get_scheduler_config(steps_offset=1 )
SCREAMING_SNAKE_CASE_ : Dict =scheduler_class(**__A )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps , torch.LongTensor([801, 601, 401, 201, 1] ) )
def _snake_case ( self ) -> Dict:
for beta_start, beta_end in zip([0.0_001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=__A , beta_end=__A )
def _snake_case ( self ) -> str:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=__A )
def _snake_case ( self ) -> List[str]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__A )
def _snake_case ( self ) -> List[Any]:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__A )
def _snake_case ( self ) -> int:
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=__A )
def _snake_case ( self ) -> Tuple:
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=__A )
def _snake_case ( self ) -> List[Any]:
self.check_over_configs(thresholding=__A )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=__A , prediction_type=__A , sample_max_value=__A , )
def _snake_case ( self ) -> Dict:
for t in [1, 10, 49]:
self.check_over_forward(time_step=__A )
def _snake_case ( self ) -> Dict:
for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 500] ):
self.check_over_forward(time_step=__A , num_inference_steps=__A )
def _snake_case ( self ) -> int:
for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=__A , eta=__A )
def _snake_case ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ : List[Any] =self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ : List[str] =self.get_scheduler_config()
SCREAMING_SNAKE_CASE_ : List[str] =scheduler_class(**__A )
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(420 , 400 ) - 0.14_771 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(980 , 960 ) - 0.32_460 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 , 486 ) - 0.00_979 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 , 998 ) - 0.02 ) ) < 1e-5
def _snake_case ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ : List[str] =self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ : List[str] =self.get_scheduler_config()
SCREAMING_SNAKE_CASE_ : str =scheduler_class(**__A )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] =10, 0.0
scheduler.set_timesteps(__A )
SCREAMING_SNAKE_CASE_ : List[str] =self.dummy_model()
SCREAMING_SNAKE_CASE_ : str =self.dummy_sample_deter
SCREAMING_SNAKE_CASE_ : int =self.dummy_sample_deter + 0.1
SCREAMING_SNAKE_CASE_ : Tuple =self.dummy_sample_deter - 0.1
SCREAMING_SNAKE_CASE_ : Union[str, Any] =samplea.shape[0]
SCREAMING_SNAKE_CASE_ : List[str] =torch.stack([samplea, samplea, samplea] , dim=0 )
SCREAMING_SNAKE_CASE_ : Any =torch.arange(__A )[0:3, None].repeat(1 , __A )
SCREAMING_SNAKE_CASE_ : Dict =model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
SCREAMING_SNAKE_CASE_ : Optional[int] =scheduler.batch_step_no_noise(__A , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , __A )
SCREAMING_SNAKE_CASE_ : str =torch.sum(torch.abs(__A ) )
SCREAMING_SNAKE_CASE_ : int =torch.mean(torch.abs(__A ) )
assert abs(result_sum.item() - 1_147.7_904 ) < 1e-2
assert abs(result_mean.item() - 0.4_982 ) < 1e-3
def _snake_case ( self ) -> Any:
SCREAMING_SNAKE_CASE_ : List[str] =self.full_loop()
SCREAMING_SNAKE_CASE_ : List[Any] =torch.sum(torch.abs(__A ) )
SCREAMING_SNAKE_CASE_ : Optional[Any] =torch.mean(torch.abs(__A ) )
assert abs(result_sum.item() - 172.0_067 ) < 1e-2
assert abs(result_mean.item() - 0.223_967 ) < 1e-3
def _snake_case ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ : str =self.full_loop(prediction_type='''v_prediction''' )
SCREAMING_SNAKE_CASE_ : List[Any] =torch.sum(torch.abs(__A ) )
SCREAMING_SNAKE_CASE_ : str =torch.mean(torch.abs(__A ) )
assert abs(result_sum.item() - 52.5_302 ) < 1e-2
assert abs(result_mean.item() - 0.0_684 ) < 1e-3
def _snake_case ( self ) -> Dict:
# We specify different beta, so that the first alpha is 0.99
SCREAMING_SNAKE_CASE_ : Tuple =self.full_loop(set_alpha_to_one=__A , beta_start=0.01 )
SCREAMING_SNAKE_CASE_ : Tuple =torch.sum(torch.abs(__A ) )
SCREAMING_SNAKE_CASE_ : int =torch.mean(torch.abs(__A ) )
assert abs(result_sum.item() - 149.8_295 ) < 1e-2
assert abs(result_mean.item() - 0.1_951 ) < 1e-3
def _snake_case ( self ) -> Optional[Any]:
# We specify different beta, so that the first alpha is 0.99
SCREAMING_SNAKE_CASE_ : List[str] =self.full_loop(set_alpha_to_one=__A , beta_start=0.01 )
SCREAMING_SNAKE_CASE_ : Optional[int] =torch.sum(torch.abs(__A ) )
SCREAMING_SNAKE_CASE_ : Tuple =torch.mean(torch.abs(__A ) )
assert abs(result_sum.item() - 149.0_784 ) < 1e-2
assert abs(result_mean.item() - 0.1_941 ) < 1e-3
| 443
| 1
|
'''simple docstring'''
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
A : Union[str, Any] = logging.getLogger(__name__)
def lowercase_ ( lowercase__ , lowercase__ ) ->Union[str, Any]:
if os.path.exists(_lowerCamelCase ):
if os.path.exists(os.path.join(_lowerCamelCase , 'config.json' ) ) and os.path.isfile(
os.path.join(_lowerCamelCase , 'config.json' ) ):
os.remove(os.path.join(_lowerCamelCase , 'config.json' ) )
if os.path.exists(os.path.join(_lowerCamelCase , 'pytorch_model.bin' ) ) and os.path.isfile(
os.path.join(_lowerCamelCase , 'pytorch_model.bin' ) ):
os.remove(os.path.join(_lowerCamelCase , 'pytorch_model.bin' ) )
else:
os.makedirs(_lowerCamelCase )
model.save_pretrained(_lowerCamelCase )
def lowercase_ ( lowercase__ , lowercase__=False ) ->Any:
_snake_case: str = 2
if unlogit:
_snake_case: Tuple = torch.pow(_lowerCamelCase , _lowerCamelCase )
_snake_case: Optional[int] = p * torch.log(_lowerCamelCase )
_snake_case: Optional[int] = 0
return -plogp.sum(dim=-1 )
def lowercase_ ( lowercase__ ) ->Union[str, Any]:
logger.info('lv, h >\t' + '\t'.join(F'''{x + 1}''' for x in range(len(_lowerCamelCase ) ) ) )
for row in range(len(_lowerCamelCase ) ):
if tensor.dtype != torch.long:
logger.info(F'''layer {row + 1}:\t''' + '\t'.join(F'''{x:.5f}''' for x in tensor[row].cpu().data ) )
else:
logger.info(F'''layer {row + 1}:\t''' + '\t'.join(F'''{x:d}''' for x in tensor[row].cpu().data ) )
def lowercase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__=True , lowercase__=True , lowercase__=None , lowercase__=False ) ->int:
_snake_case: List[Any] = model.config.num_hidden_layers, model.config.num_attention_heads
_snake_case: Optional[Any] = torch.zeros(_lowerCamelCase , _lowerCamelCase ).to(args.device )
_snake_case: Union[str, Any] = torch.zeros(_lowerCamelCase , _lowerCamelCase ).to(args.device )
if head_mask is None:
_snake_case: str = torch.ones(_lowerCamelCase , _lowerCamelCase ).to(args.device )
head_mask.requires_grad_(requires_grad=_lowerCamelCase )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
_snake_case: Optional[Any] = None
_snake_case: Dict = 0.0
_snake_case: Optional[Any] = 0.0
for step, inputs in enumerate(tqdm(_lowerCamelCase , desc='Iteration' , disable=args.local_rank not in [-1, 0] ) ):
_snake_case: int = tuple(t.to(args.device ) for t in inputs )
(_snake_case ): Union[str, Any] = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
_snake_case: Tuple = model(_lowerCamelCase , labels=_lowerCamelCase , head_mask=_lowerCamelCase )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
_snake_case: Optional[Any] = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(_lowerCamelCase ):
_snake_case: Tuple = entropy(attn.detach() , _lowerCamelCase )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(_lowerCamelCase ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
_snake_case: Optional[int] = 2
_snake_case: Dict = torch.pow(torch.pow(_lowerCamelCase , _lowerCamelCase ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1e-20
if not args.dont_normalize_global_importance:
_snake_case: Optional[Any] = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info('Attention entropies' )
print_ad_tensor(_lowerCamelCase )
if compute_importance:
logger.info('Head importance scores' )
print_ad_tensor(_lowerCamelCase )
logger.info('Head ranked by importance scores' )
_snake_case: Any = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
_snake_case: int = torch.arange(
head_importance.numel() , device=args.device )
_snake_case: int = head_ranks.view_as(_lowerCamelCase )
print_ad_tensor(_lowerCamelCase )
return attn_entropy, head_importance, total_loss
def lowercase_ ( lowercase__ , lowercase__ , lowercase__ ) ->str:
_snake_case: Tuple = compute_heads_importance(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , compute_entropy=_lowerCamelCase )
_snake_case: int = 1 / loss # instead of downsteam score use the LM loss
logger.info('Pruning: original score: %f, threshold: %f' , _lowerCamelCase , original_score * args.masking_threshold )
_snake_case: int = torch.ones_like(_lowerCamelCase )
_snake_case: List[str] = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
_snake_case: Dict = original_score
while current_score >= original_score * args.masking_threshold:
_snake_case: List[str] = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
_snake_case: Optional[int] = float('Inf' )
_snake_case: Optional[Any] = head_importance.view(-1 ).sort()[1]
if len(_lowerCamelCase ) <= num_to_mask:
print('BREAK BY num_to_mask' )
break
# mask heads
_snake_case: Tuple = current_heads_to_mask[:num_to_mask]
logger.info('Heads to mask: %s' , str(current_heads_to_mask.tolist() ) )
_snake_case: int = new_head_mask.view(-1 )
_snake_case: Optional[Any] = 0.0
_snake_case: Optional[Any] = new_head_mask.view_as(_lowerCamelCase )
_snake_case: Any = new_head_mask.clone().detach()
print_ad_tensor(_lowerCamelCase )
# Compute metric and head importance again
_snake_case: Union[str, Any] = compute_heads_importance(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , compute_entropy=_lowerCamelCase , head_mask=_lowerCamelCase )
_snake_case: Optional[Any] = 1 / loss
logger.info(
'Masking: current score: %f, remaining heads %d (%.1f percents)' , _lowerCamelCase , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 100 , )
logger.info('Final head mask' )
print_ad_tensor(_lowerCamelCase )
np.save(os.path.join(args.output_dir , 'head_mask.npy' ) , head_mask.detach().cpu().numpy() )
return head_mask
def lowercase_ ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ) ->str:
_snake_case: int = datetime.now()
_snake_case: List[str] = compute_heads_importance(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , compute_entropy=_lowerCamelCase , compute_importance=_lowerCamelCase , head_mask=_lowerCamelCase )
_snake_case: int = 1 / loss
_snake_case: Optional[Any] = datetime.now() - before_time
_snake_case: Tuple = sum(p.numel() for p in model.parameters() )
_snake_case: Any = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(_lowerCamelCase ) )
}
for k, v in heads_to_prune.items():
if isinstance(_lowerCamelCase , _lowerCamelCase ):
_snake_case: Optional[Any] = [
v,
]
assert sum(len(_lowerCamelCase ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(_lowerCamelCase )
_snake_case: Union[str, Any] = sum(p.numel() for p in model.parameters() )
_snake_case: Dict = datetime.now()
_snake_case: List[Any] = compute_heads_importance(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , compute_entropy=_lowerCamelCase , compute_importance=_lowerCamelCase , head_mask=_lowerCamelCase , actually_pruned=_lowerCamelCase , )
_snake_case: Optional[int] = 1 / loss
_snake_case: Optional[int] = datetime.now() - before_time
logger.info(
'Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)' , _lowerCamelCase , _lowerCamelCase , pruned_num_params / original_num_params * 100 , )
logger.info('Pruning: score with masking: %f score with pruning: %f' , _lowerCamelCase , _lowerCamelCase )
logger.info('Pruning: speed ratio (original timing / new timing): %f percents' , original_time / new_time * 100 )
save_model(_lowerCamelCase , args.output_dir )
def lowercase_ ( ) ->Dict:
_snake_case: Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--data_dir' , default=_lowerCamelCase , type=_lowerCamelCase , required=_lowerCamelCase , help='The input data dir. Should contain the .tsv files (or other data files) for the task.' , )
parser.add_argument(
'--model_name_or_path' , default=_lowerCamelCase , type=_lowerCamelCase , required=_lowerCamelCase , help='Path to pretrained model or model identifier from huggingface.co/models' , )
parser.add_argument(
'--output_dir' , default=_lowerCamelCase , type=_lowerCamelCase , required=_lowerCamelCase , help='The output directory where the model predictions and checkpoints will be written.' , )
# Other parameters
parser.add_argument(
'--config_name' , default='' , type=_lowerCamelCase , help='Pretrained config name or path if not the same as model_name_or_path' , )
parser.add_argument(
'--tokenizer_name' , default='' , type=_lowerCamelCase , help='Pretrained tokenizer name or path if not the same as model_name_or_path' , )
parser.add_argument(
'--cache_dir' , default=_lowerCamelCase , type=_lowerCamelCase , help='Where do you want to store the pre-trained models downloaded from s3' , )
parser.add_argument(
'--data_subset' , type=_lowerCamelCase , default=-1 , help='If > 0: limit the data to a subset of data_subset instances.' )
parser.add_argument(
'--overwrite_output_dir' , action='store_true' , help='Whether to overwrite data in output directory' )
parser.add_argument(
'--overwrite_cache' , action='store_true' , help='Overwrite the cached training and evaluation sets' )
parser.add_argument(
'--dont_normalize_importance_by_layer' , action='store_true' , help='Don\'t normalize importance score by layers' )
parser.add_argument(
'--dont_normalize_global_importance' , action='store_true' , help='Don\'t normalize all importance scores between 0 and 1' , )
parser.add_argument(
'--try_masking' , action='store_true' , help='Whether to try to mask head until a threshold of accuracy.' )
parser.add_argument(
'--masking_threshold' , default=0.9 , type=_lowerCamelCase , help='masking threshold in term of metrics (stop masking when metric < threshold * original metric value).' , )
parser.add_argument(
'--masking_amount' , default=0.1 , type=_lowerCamelCase , help='Amount to heads to masking at each masking step.' )
parser.add_argument('--metric_name' , default='acc' , type=_lowerCamelCase , help='Metric to use for head masking.' )
parser.add_argument(
'--max_seq_length' , default=128 , type=_lowerCamelCase , help=(
'The maximum total input sequence length after WordPiece tokenization. \n'
'Sequences longer than this will be truncated, sequences shorter padded.'
) , )
parser.add_argument('--batch_size' , default=1 , type=_lowerCamelCase , help='Batch size.' )
parser.add_argument('--seed' , type=_lowerCamelCase , default=42 )
parser.add_argument('--local_rank' , type=_lowerCamelCase , default=-1 , help='local_rank for distributed training on gpus' )
parser.add_argument('--no_cuda' , action='store_true' , help='Whether not to use CUDA when available' )
parser.add_argument('--server_ip' , type=_lowerCamelCase , default='' , help='Can be used for distant debugging.' )
parser.add_argument('--server_port' , type=_lowerCamelCase , default='' , help='Can be used for distant debugging.' )
_snake_case: Dict = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('Waiting for debugger attach' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=_lowerCamelCase )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
_snake_case: List[str] = torch.device('cuda' if torch.cuda.is_available() and not args.no_cuda else 'cpu' )
_snake_case: List[Any] = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
_snake_case: Optional[Any] = torch.device('cuda' , args.local_rank )
_snake_case: Optional[int] = 1
torch.distributed.init_process_group(backend='nccl' ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info('device: {} n_gpu: {}, distributed: {}'.format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
_snake_case: List[str] = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
_snake_case: List[Any] = nn.parallel.DistributedDataParallel(
_lowerCamelCase , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=_lowerCamelCase )
elif args.n_gpu > 1:
_snake_case: Union[str, Any] = nn.DataParallel(_lowerCamelCase )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=_lowerCamelCase )
torch.save(_lowerCamelCase , os.path.join(args.output_dir , 'run_args.bin' ) )
logger.info('Training/evaluation parameters %s' , _lowerCamelCase )
# Prepare dataset
_snake_case: Union[str, Any] = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
_snake_case: str = (torch.from_numpy(_lowerCamelCase ),)
_snake_case: int = TensorDataset(*_lowerCamelCase )
_snake_case: Union[str, Any] = RandomSampler(_lowerCamelCase )
_snake_case: List[str] = DataLoader(_lowerCamelCase , sampler=_lowerCamelCase , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
_snake_case: int = mask_heads(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
prune_heads(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if __name__ == "__main__":
main()
| 706
|
'''simple docstring'''
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class lowerCamelCase :
def __init__( self : Optional[int] , __snake_case : int , __snake_case : Tuple=13 , __snake_case : Any=7 , __snake_case : Union[str, Any]=True , __snake_case : List[str]=True , __snake_case : Optional[int]=False , __snake_case : List[Any]=True , __snake_case : str=99 , __snake_case : Optional[int]=32 , __snake_case : Any=5 , __snake_case : Tuple=4 , __snake_case : List[Any]=37 , __snake_case : Union[str, Any]="gelu" , __snake_case : List[str]=0.1 , __snake_case : int=0.1 , __snake_case : Any=5_12 , __snake_case : Dict=16 , __snake_case : int=2 , __snake_case : Any=0.02 , __snake_case : Any=3 , __snake_case : str=4 , __snake_case : int=None , ):
'''simple docstring'''
_snake_case: Dict = parent
_snake_case: Optional[int] = batch_size
_snake_case: List[Any] = seq_length
_snake_case: Union[str, Any] = is_training
_snake_case: Optional[Any] = use_input_mask
_snake_case: Dict = use_token_type_ids
_snake_case: Any = use_labels
_snake_case: Optional[Any] = vocab_size
_snake_case: List[Any] = hidden_size
_snake_case: int = num_hidden_layers
_snake_case: List[str] = num_attention_heads
_snake_case: List[Any] = intermediate_size
_snake_case: Optional[Any] = hidden_act
_snake_case: str = hidden_dropout_prob
_snake_case: List[str] = attention_probs_dropout_prob
_snake_case: Dict = max_position_embeddings
_snake_case: Optional[Any] = type_vocab_size
_snake_case: List[Any] = type_sequence_label_size
_snake_case: List[str] = initializer_range
_snake_case: List[str] = num_labels
_snake_case: Tuple = num_choices
_snake_case: Dict = scope
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
'''simple docstring'''
_snake_case: str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_snake_case: int = None
if self.use_input_mask:
_snake_case: List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
_snake_case: List[Any] = None
if self.use_token_type_ids:
_snake_case: str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_snake_case: Optional[int] = None
_snake_case: Tuple = None
_snake_case: Union[str, Any] = None
if self.use_labels:
_snake_case: Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_snake_case: Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_snake_case: Optional[int] = ids_tensor([self.batch_size] , self.num_choices )
_snake_case: Union[str, Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
'''simple docstring'''
return LlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__snake_case , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , __snake_case : List[Any] , __snake_case : Any , __snake_case : Any , __snake_case : Union[str, Any] , __snake_case : Optional[Any] , __snake_case : Tuple , __snake_case : int ):
'''simple docstring'''
_snake_case: Optional[Any] = LlamaModel(config=__snake_case )
model.to(__snake_case )
model.eval()
_snake_case: Optional[Any] = model(__snake_case , attention_mask=__snake_case )
_snake_case: Tuple = model(__snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , __snake_case : Any , __snake_case : Tuple , __snake_case : Tuple , __snake_case : Dict , __snake_case : str , __snake_case : int , __snake_case : List[str] , __snake_case : Union[str, Any] , __snake_case : Any , ):
'''simple docstring'''
_snake_case: str = True
_snake_case: int = LlamaModel(__snake_case )
model.to(__snake_case )
model.eval()
_snake_case: int = model(
__snake_case , attention_mask=__snake_case , encoder_hidden_states=__snake_case , encoder_attention_mask=__snake_case , )
_snake_case: Optional[int] = model(
__snake_case , attention_mask=__snake_case , encoder_hidden_states=__snake_case , )
_snake_case: List[str] = model(__snake_case , attention_mask=__snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE_ ( self : int , __snake_case : List[str] , __snake_case : Dict , __snake_case : Dict , __snake_case : List[str] , __snake_case : Optional[int] , __snake_case : int , __snake_case : int , __snake_case : List[Any] , __snake_case : Union[str, Any] , ):
'''simple docstring'''
_snake_case: Union[str, Any] = LlamaForCausalLM(config=__snake_case )
model.to(__snake_case )
model.eval()
_snake_case: str = model(__snake_case , attention_mask=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE_ ( self : int , __snake_case : Dict , __snake_case : List[Any] , __snake_case : Optional[Any] , __snake_case : Optional[int] , __snake_case : Optional[Any] , __snake_case : Optional[Any] , __snake_case : Union[str, Any] , __snake_case : Optional[int] , __snake_case : Any , ):
'''simple docstring'''
_snake_case: Any = True
_snake_case: Optional[int] = True
_snake_case: List[str] = LlamaForCausalLM(config=__snake_case )
model.to(__snake_case )
model.eval()
# first forward pass
_snake_case: Dict = model(
__snake_case , attention_mask=__snake_case , encoder_hidden_states=__snake_case , encoder_attention_mask=__snake_case , use_cache=__snake_case , )
_snake_case: Optional[int] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
_snake_case: Any = ids_tensor((self.batch_size, 3) , config.vocab_size )
_snake_case: str = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
_snake_case: Tuple = torch.cat([input_ids, next_tokens] , dim=-1 )
_snake_case: Tuple = torch.cat([input_mask, next_mask] , dim=-1 )
_snake_case: Tuple = model(
__snake_case , attention_mask=__snake_case , encoder_hidden_states=__snake_case , encoder_attention_mask=__snake_case , output_hidden_states=__snake_case , )['hidden_states'][0]
_snake_case: Dict = model(
__snake_case , attention_mask=__snake_case , encoder_hidden_states=__snake_case , encoder_attention_mask=__snake_case , past_key_values=__snake_case , output_hidden_states=__snake_case , )['hidden_states'][0]
# select random slice
_snake_case: List[Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_snake_case: str = output_from_no_past[:, -3:, random_slice_idx].detach()
_snake_case: Tuple = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__snake_case , __snake_case , atol=1e-3 ) )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
'''simple docstring'''
_snake_case: List[str] = self.prepare_config_and_inputs()
(
(
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) ,
): Dict = config_and_inputs
_snake_case: Any = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
_SCREAMING_SNAKE_CASE = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
_SCREAMING_SNAKE_CASE = (LlamaForCausalLM,) if is_torch_available() else ()
_SCREAMING_SNAKE_CASE = (
{
"feature-extraction": LlamaModel,
"text-classification": LlamaForSequenceClassification,
"text-generation": LlamaForCausalLM,
"zero-shot": LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
def SCREAMING_SNAKE_CASE_ ( self : str ):
'''simple docstring'''
_snake_case: Optional[int] = LlamaModelTester(self )
_snake_case: Dict = ConfigTester(self , config_class=__snake_case , hidden_size=37 )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
'''simple docstring'''
_snake_case: str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__snake_case )
def SCREAMING_SNAKE_CASE_ ( self : str ):
'''simple docstring'''
_snake_case: Optional[Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_snake_case: Union[str, Any] = type
self.model_tester.create_and_check_model(*__snake_case )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
'''simple docstring'''
_snake_case , _snake_case: Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case: int = 3
_snake_case: Optional[Any] = input_dict['input_ids']
_snake_case: Tuple = input_ids.ne(1 ).to(__snake_case )
_snake_case: Optional[Any] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
_snake_case: Union[str, Any] = LlamaForSequenceClassification(__snake_case )
model.to(__snake_case )
model.eval()
_snake_case: str = model(__snake_case , attention_mask=__snake_case , labels=__snake_case )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
'''simple docstring'''
_snake_case , _snake_case: List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case: Dict = 3
_snake_case: str = 'single_label_classification'
_snake_case: List[str] = input_dict['input_ids']
_snake_case: Optional[int] = input_ids.ne(1 ).to(__snake_case )
_snake_case: List[str] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
_snake_case: Optional[Any] = LlamaForSequenceClassification(__snake_case )
model.to(__snake_case )
model.eval()
_snake_case: Optional[Any] = model(__snake_case , attention_mask=__snake_case , labels=__snake_case )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case , _snake_case: int = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case: Any = 3
_snake_case: Optional[int] = 'multi_label_classification'
_snake_case: Tuple = input_dict['input_ids']
_snake_case: Optional[Any] = input_ids.ne(1 ).to(__snake_case )
_snake_case: List[str] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
_snake_case: Union[str, Any] = LlamaForSequenceClassification(__snake_case )
model.to(__snake_case )
model.eval()
_snake_case: Optional[int] = model(__snake_case , attention_mask=__snake_case , labels=__snake_case )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('LLaMA buffers include complex numbers, which breaks this test' )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
'''simple docstring'''
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , __snake_case : Union[str, Any] ):
'''simple docstring'''
_snake_case , _snake_case: int = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case: Optional[int] = ids_tensor([1, 10] , config.vocab_size )
_snake_case: Union[str, Any] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
_snake_case: Tuple = LlamaModel(__snake_case )
original_model.to(__snake_case )
original_model.eval()
_snake_case: List[Any] = original_model(__snake_case ).last_hidden_state
_snake_case: List[str] = original_model(__snake_case ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
_snake_case: Tuple = {'type': scaling_type, 'factor': 10.0}
_snake_case: List[Any] = LlamaModel(__snake_case )
scaled_model.to(__snake_case )
scaled_model.eval()
_snake_case: Dict = scaled_model(__snake_case ).last_hidden_state
_snake_case: str = scaled_model(__snake_case ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(__snake_case , __snake_case , atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(__snake_case , __snake_case , atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(__snake_case , __snake_case , atol=1e-5 ) )
@require_torch
class lowerCamelCase ( unittest.TestCase ):
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def SCREAMING_SNAKE_CASE_ ( self : str ):
'''simple docstring'''
_snake_case: List[str] = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38]
_snake_case: Optional[int] = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-7b-hf' , device_map='auto' )
_snake_case: Optional[int] = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
_snake_case: List[Any] = torch.tensor([[-6.6_550, -4.1_227, -4.9_859, -3.2_406, 0.8_262, -3.0_033, 1.2_964, -3.3_699]] )
torch.testing.assert_close(out.mean(-1 ) , __snake_case , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
_snake_case: Tuple = torch.tensor([-12.8_281, -7.4_453, -0.4_639, -8.0_625, -7.2_500, -8.0_000, -6.4_883, -7.7_695, -7.8_438, -7.0_312, -6.2_188, -7.1_328, -1.8_496, 1.9_961, -8.6_250, -6.7_227, -12.8_281, -6.9_492, -7.0_742, -7.7_852, -7.5_820, -7.9_062, -6.9_375, -7.9_805, -8.3_438, -8.1_562, -8.0_469, -7.6_250, -7.7_422, -7.3_398,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , __snake_case , atol=1e-5 , rtol=1e-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case: Union[str, Any] = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38]
_snake_case: Any = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-hf' , device_map='auto' )
_snake_case: int = model(torch.tensor(__snake_case ) )
# Expected mean on dim = -1
_snake_case: List[Any] = torch.tensor([[-2.0_622, -1.2_794, -1.1_638, -0.9_788, -1.4_603, -1.0_238, -1.7_893, -1.4_411]] )
torch.testing.assert_close(out.mean(-1 ) , __snake_case , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
_snake_case: Optional[Any] = torch.tensor([-8.1_406, -8.0_547, 2.7_461, -1.2_344, -0.1_448, -1.8_262, -1.0_020, -1.8_154, -1.6_895, -1.8_516, -2.3_574, -0.9_277, 3.7_598, 6.5_742, -1.2_998, -0.1_177, -8.1_406, -2.9_688, -2.9_199, -3.1_699, -3.5_254, -2.3_555, -2.7_988, -3.4_141, -2.8_262, -4.5_195, -3.3_379, -3.3_164, -2.7_832, -3.0_273] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , __snake_case , atol=1e-5 , rtol=1e-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def SCREAMING_SNAKE_CASE_ ( self : int ):
'''simple docstring'''
_snake_case: List[Any] = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38]
_snake_case: Any = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-chat-hf' , device_map='auto' )
_snake_case: List[str] = model(torch.tensor(__snake_case ) )
# Expected mean on dim = -1
_snake_case: List[Any] = torch.tensor([[-0.8_562, -1.8_520, -0.7_551, -0.4_162, -1.5_161, -1.2_038, -2.4_823, -2.3_254]] )
torch.testing.assert_close(out.mean(-1 ) , __snake_case , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
_snake_case: Optional[int] = torch.tensor([-2.2_227, 4.8_828, 0.9_023, -0.4_578, -0.7_871, -0.1_033, -0.6_221, -0.5_786, -0.7_803, -1.0_674, -1.2_920, -0.1_570, 0.8_008, 2.0_723, -0.9_497, 0.2_771, -2.2_227, -0.7_612, -1.4_346, -1.2_061, -1.6_426, -0.3_000, -0.7_139, -1.1_934, -1.8_691, -1.6_973, -1.5_947, -1.2_705, -0.3_523, -0.5_513] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) , __snake_case , atol=1e-2 , rtol=1e-2 )
@unittest.skip(
'Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test' )
@slow
def SCREAMING_SNAKE_CASE_ ( self : str ):
'''simple docstring'''
_snake_case: int = [1, 3_06, 46_58, 2_78, 65_93, 3_10, 28_34, 3_38]
_snake_case: Any = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-70b-hf' , device_map='auto' )
_snake_case: Dict = model(torch.tensor(__snake_case ) )
_snake_case: Optional[Any] = torch.tensor(
[[-4.2_327, -3.3_360, -4.6_665, -4.7_631, -1.8_180, -3.4_170, -1.4_211, -3.1_810]] , dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) , __snake_case , atol=1e-2 , rtol=1e-2 )
# fmt: off
_snake_case: str = torch.tensor([-9.4_922, -3.9_551, 1.7_998, -5.6_758, -5.1_055, -5.8_984, -4.8_320, -6.8_086, -6.5_391, -5.6_172, -5.5_820, -5.5_352, 1.7_881, 3.6_289, -6.5_117, -3.4_785, -9.5_000, -6.0_352, -6.8_125, -6.0_195, -6.6_836, -5.4_727, -6.2_812, -6.0_391, -7.3_398, -7.4_297, -7.4_844, -6.5_820, -5.8_789, -5.5_312] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , __snake_case , atol=1e-5 , rtol=1e-5 )
@unittest.skip('Model is curently gated' )
@slow
def SCREAMING_SNAKE_CASE_ ( self : int ):
'''simple docstring'''
_snake_case: Dict = 'Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the "princi'
_snake_case: Dict = 'Simply put, the theory of relativity states that '
_snake_case: List[str] = LlamaTokenizer.from_pretrained('meta-llama/Llama-2-13b-chat-hf' )
_snake_case: Optional[Any] = tokenizer.encode(__snake_case , return_tensors='pt' )
_snake_case: Optional[int] = LlamaForCausalLM.from_pretrained(
'meta-llama/Llama-2-13b-chat-hf' , device_map='sequential' , use_safetensors=__snake_case )
# greedy generation outputs
_snake_case: List[Any] = model.generate(__snake_case , max_new_tokens=64 , top_p=__snake_case , temperature=1 , do_sample=__snake_case )
_snake_case: Dict = tokenizer.decode(generated_ids[0] , skip_special_tokens=__snake_case )
self.assertEqual(__snake_case , __snake_case )
| 273
| 0
|
"""simple docstring"""
import argparse
A = '''docs/source/_static/js/custom.js'''
def __A ( a_ :Tuple) -> Union[str, Any]:
with open(a_ , encoding='''utf-8''' , newline='''\n''') as f:
__a : Union[str, Any] = f.readlines()
__a : List[Any] = 0
# First let's put the right version
while not lines[index].startswith('''const stableVersion ='''):
index += 1
__a : Optional[Any] = F"""const stableVersion = \"v{version}\"\n"""
# Then update the dictionary
while not lines[index].startswith('''const versionMapping = {'''):
index += 1
# We go until the end
while not lines[index].startswith('''}'''):
index += 1
# We add the new version at the end
lines[index - 1] += F""" \"v{version}\": \"v{version}\",\n"""
with open(a_ , '''w''' , encoding='''utf-8''' , newline='''\n''') as f:
f.writelines(a_)
if __name__ == "__main__":
A = argparse.ArgumentParser()
parser.add_argument('''--version''', help='''Release version.''')
A = parser.parse_args()
update_custom_js(args.version)
| 52
|
import unittest
import numpy as np
import torch
from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
@property
def UpperCamelCase__ ( self :Optional[int]):
"""simple docstring"""
torch.manual_seed(0)
_lowercase =UNetaDModel(
block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=3, out_channels=3, down_block_types=('DownBlock2D', 'AttnDownBlock2D'), up_block_types=('AttnUpBlock2D', 'UpBlock2D'), )
return model
def UpperCamelCase__ ( self :str):
"""simple docstring"""
_lowercase =self.dummy_uncond_unet
_lowercase =ScoreSdeVeScheduler()
_lowercase =ScoreSdeVePipeline(unet=snake_case, scheduler=snake_case)
sde_ve.to(snake_case)
sde_ve.set_progress_bar_config(disable=snake_case)
_lowercase =torch.manual_seed(0)
_lowercase =sde_ve(num_inference_steps=2, output_type='numpy', generator=snake_case).images
_lowercase =torch.manual_seed(0)
_lowercase =sde_ve(num_inference_steps=2, output_type='numpy', generator=snake_case, return_dict=snake_case)[
0
]
_lowercase =image[0, -3:, -3:, -1]
_lowercase =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_lowercase =np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
@slow
@require_torch
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase__ ( self :int):
"""simple docstring"""
_lowercase ='google/ncsnpp-church-256'
_lowercase =UNetaDModel.from_pretrained(snake_case)
_lowercase =ScoreSdeVeScheduler.from_pretrained(snake_case)
_lowercase =ScoreSdeVePipeline(unet=snake_case, scheduler=snake_case)
sde_ve.to(snake_case)
sde_ve.set_progress_bar_config(disable=snake_case)
_lowercase =torch.manual_seed(0)
_lowercase =sde_ve(num_inference_steps=10, output_type='numpy', generator=snake_case).images
_lowercase =image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
_lowercase =np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
| 181
| 0
|
'''simple docstring'''
from typing import Dict, Optional
import numpy as np
import datasets
lowerCAmelCase__ = '\nIoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union\nbetween the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,\nthe mean IoU of the image is calculated by taking the IoU of each class and averaging them.\n'
lowerCAmelCase__ = '\nArgs:\n predictions (`List[ndarray]`):\n List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n references (`List[ndarray]`):\n List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n num_labels (`int`):\n Number of classes (categories).\n ignore_index (`int`):\n Index that will be ignored during evaluation.\n nan_to_num (`int`, *optional*):\n If specified, NaN values will be replaced by the number defined by the user.\n label_map (`dict`, *optional*):\n If specified, dictionary mapping old label indices to new label indices.\n reduce_labels (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,\n and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.\n\nReturns:\n `Dict[str, float | ndarray]` comprising various elements:\n - *mean_iou* (`float`):\n Mean Intersection-over-Union (IoU averaged over all categories).\n - *mean_accuracy* (`float`):\n Mean accuracy (averaged over all categories).\n - *overall_accuracy* (`float`):\n Overall accuracy on all images.\n - *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):\n Per category accuracy.\n - *per_category_iou* (`ndarray` of shape `(num_labels,)`):\n Per category IoU.\n\nExamples:\n\n >>> import numpy as np\n\n >>> mean_iou = datasets.load_metric("mean_iou")\n\n >>> # suppose one has 3 different segmentation maps predicted\n >>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])\n >>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])\n\n >>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])\n >>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])\n\n >>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])\n >>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])\n\n >>> predicted = [predicted_1, predicted_2, predicted_3]\n >>> ground_truth = [actual_1, actual_2, actual_3]\n\n >>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {\'mean_iou\': 0.47750000000000004, \'mean_accuracy\': 0.5916666666666666, \'overall_accuracy\': 0.5263157894736842, \'per_category_iou\': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), \'per_category_accuracy\': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}\n'
lowerCAmelCase__ = '\\n@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,\nauthor = {{MMSegmentation Contributors}},\nlicense = {Apache-2.0},\nmonth = {7},\ntitle = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},\nurl = {https://github.com/open-mmlab/mmsegmentation},\nyear = {2020}\n}'
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = False , ) -> Union[str, Any]:
if label_map is not None:
for old_id, new_id in label_map.items():
UpperCamelCase__ : Optional[Any] = new_id
# turn into Numpy arrays
UpperCamelCase__ : str = np.array(lowerCamelCase_)
UpperCamelCase__ : Dict = np.array(lowerCamelCase_)
if reduce_labels:
UpperCamelCase__ : List[str] = 255
UpperCamelCase__ : Union[str, Any] = label - 1
UpperCamelCase__ : Optional[Any] = 255
UpperCamelCase__ : Dict = label != ignore_index
UpperCamelCase__ : List[Any] = np.not_equal(lowerCamelCase_ , lowerCamelCase_)
UpperCamelCase__ : int = pred_label[mask]
UpperCamelCase__ : Optional[Any] = np.array(lowerCamelCase_)[mask]
UpperCamelCase__ : Union[str, Any] = pred_label[pred_label == label]
UpperCamelCase__ : int = np.histogram(lowerCamelCase_ , bins=lowerCamelCase_ , range=(0, num_labels - 1))[0]
UpperCamelCase__ : Optional[int] = np.histogram(lowerCamelCase_ , bins=lowerCamelCase_ , range=(0, num_labels - 1))[0]
UpperCamelCase__ : Optional[int] = np.histogram(lowerCamelCase_ , bins=lowerCamelCase_ , range=(0, num_labels - 1))[0]
UpperCamelCase__ : int = area_pred_label + area_label - area_intersect
return area_intersect, area_union, area_pred_label, area_label
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = False , ) -> List[Any]:
UpperCamelCase__ : Optional[int] = np.zeros((num_labels,) , dtype=np.floataa)
UpperCamelCase__ : Dict = np.zeros((num_labels,) , dtype=np.floataa)
UpperCamelCase__ : int = np.zeros((num_labels,) , dtype=np.floataa)
UpperCamelCase__ : Tuple = np.zeros((num_labels,) , dtype=np.floataa)
for result, gt_seg_map in zip(lowerCamelCase_ , lowerCamelCase_):
UpperCamelCase__ : Tuple = intersect_and_union(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_)
total_area_intersect += area_intersect
total_area_union += area_union
total_area_pred_label += area_pred_label
total_area_label += area_label
return total_area_intersect, total_area_union, total_area_pred_label, total_area_label
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = False , ) -> Optional[Any]:
UpperCamelCase__ : Dict = total_intersect_and_union(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_)
# compute metrics
UpperCamelCase__ : Optional[int] = {}
UpperCamelCase__ : List[Any] = total_area_intersect.sum() / total_area_label.sum()
UpperCamelCase__ : Tuple = total_area_intersect / total_area_union
UpperCamelCase__ : str = total_area_intersect / total_area_label
UpperCamelCase__ : Union[str, Any] = np.nanmean(lowerCamelCase_)
UpperCamelCase__ : List[Any] = np.nanmean(lowerCamelCase_)
UpperCamelCase__ : Any = all_acc
UpperCamelCase__ : Optional[int] = iou
UpperCamelCase__ : Union[str, Any] = acc
if nan_to_num is not None:
UpperCamelCase__ : Optional[int] = {metric: np.nan_to_num(lowerCamelCase_ , nan=lowerCamelCase_) for metric, metric_value in metrics.items()}
return metrics
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowercase (datasets.Metric ):
def __UpperCamelCase ( self : Optional[int]):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
# 1st Seq - height dim, 2nd - width dim
{
'predictions': datasets.Sequence(datasets.Sequence(datasets.Value('uint16'))),
'references': datasets.Sequence(datasets.Sequence(datasets.Value('uint16'))),
}) , reference_urls=[
'https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py'
] , )
def __UpperCamelCase ( self : Union[str, Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any , UpperCAmelCase_ : int , UpperCAmelCase_ : bool , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : Optional[Dict[int, int]] = None , UpperCAmelCase_ : bool = False , ):
UpperCamelCase__ : Tuple = mean_iou(
results=UpperCAmelCase_ , gt_seg_maps=UpperCAmelCase_ , num_labels=UpperCAmelCase_ , ignore_index=UpperCAmelCase_ , nan_to_num=UpperCAmelCase_ , label_map=UpperCAmelCase_ , reduce_labels=UpperCAmelCase_ , )
return iou_result
| 705
|
'''simple docstring'''
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __lowercase :
def __init__( self : Union[str, Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[int]=13 , UpperCAmelCase_ : Tuple=30 , UpperCAmelCase_ : Dict=2 , UpperCAmelCase_ : Dict=3 , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : str=True , UpperCAmelCase_ : Tuple=32 , UpperCAmelCase_ : List[str]=5 , UpperCAmelCase_ : str=4 , UpperCAmelCase_ : Optional[int]=37 , UpperCAmelCase_ : str="gelu" , UpperCAmelCase_ : List[str]=0.1 , UpperCAmelCase_ : Dict=0.1 , UpperCAmelCase_ : Dict=10 , UpperCAmelCase_ : Optional[int]=0.02 , UpperCAmelCase_ : Union[str, Any]=3 , UpperCAmelCase_ : Any=0.6 , UpperCAmelCase_ : Dict=None , ):
UpperCamelCase__ : Tuple = parent
UpperCamelCase__ : List[str] = batch_size
UpperCamelCase__ : Optional[Any] = image_size
UpperCamelCase__ : Optional[Any] = patch_size
UpperCamelCase__ : List[str] = num_channels
UpperCamelCase__ : Union[str, Any] = is_training
UpperCamelCase__ : int = use_labels
UpperCamelCase__ : Optional[int] = hidden_size
UpperCamelCase__ : Any = num_hidden_layers
UpperCamelCase__ : str = num_attention_heads
UpperCamelCase__ : str = intermediate_size
UpperCamelCase__ : Union[str, Any] = hidden_act
UpperCamelCase__ : Optional[int] = hidden_dropout_prob
UpperCamelCase__ : Tuple = attention_probs_dropout_prob
UpperCamelCase__ : Any = type_sequence_label_size
UpperCamelCase__ : int = initializer_range
UpperCamelCase__ : Optional[int] = mask_ratio
UpperCamelCase__ : int = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
UpperCamelCase__ : str = (image_size // patch_size) ** 2
UpperCamelCase__ : Dict = int(math.ceil((1 - mask_ratio) * (num_patches + 1)))
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
UpperCamelCase__ : List[str] = None
if self.use_labels:
UpperCamelCase__ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size)
UpperCamelCase__ : Any = self.get_config()
return config, pixel_values, labels
def __UpperCamelCase ( self : List[Any]):
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCAmelCase_ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def __UpperCamelCase ( self : Tuple , UpperCAmelCase_ : int , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[int]):
UpperCamelCase__ : Dict = ViTMAEModel(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
UpperCamelCase__ : Optional[int] = model(UpperCAmelCase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def __UpperCamelCase ( self : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Tuple):
UpperCamelCase__ : List[Any] = ViTMAEForPreTraining(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
UpperCamelCase__ : Dict = model(UpperCAmelCase_)
UpperCamelCase__ : List[str] = (self.image_size // self.patch_size) ** 2
UpperCamelCase__ : Optional[int] = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels))
# test greyscale images
UpperCamelCase__ : List[Any] = 1
UpperCamelCase__ : Union[str, Any] = ViTMAEForPreTraining(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
UpperCamelCase__ : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
UpperCamelCase__ : Union[str, Any] = model(UpperCAmelCase_)
UpperCamelCase__ : Tuple = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels))
def __UpperCamelCase ( self : Dict):
UpperCamelCase__ : List[str] = self.prepare_config_and_inputs()
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : List[str] = config_and_inputs
UpperCamelCase__ : Any = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __lowercase (__lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
_lowerCamelCase = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
_lowerCamelCase = {'''feature-extraction''': ViTMAEModel} if is_torch_available() else {}
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
def __UpperCamelCase ( self : Optional[Any]):
UpperCamelCase__ : List[str] = ViTMAEModelTester(self)
UpperCamelCase__ : Any = ConfigTester(self , config_class=UpperCAmelCase_ , has_text_modality=UpperCAmelCase_ , hidden_size=37)
def __UpperCamelCase ( self : Any):
self.config_tester.run_common_tests()
@unittest.skip(reason='ViTMAE does not use inputs_embeds')
def __UpperCamelCase ( self : Tuple):
pass
def __UpperCamelCase ( self : Optional[Any]):
UpperCamelCase__, UpperCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ : List[str] = model_class(UpperCAmelCase_)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
UpperCamelCase__ : Optional[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCAmelCase_ , nn.Linear))
def __UpperCamelCase ( self : List[str]):
UpperCamelCase__, UpperCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ : Tuple = model_class(UpperCAmelCase_)
UpperCamelCase__ : int = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase__ : Any = [*signature.parameters.keys()]
UpperCamelCase__ : Optional[int] = ['pixel_values']
self.assertListEqual(arg_names[:1] , UpperCAmelCase_)
def __UpperCamelCase ( self : int):
UpperCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_)
def __UpperCamelCase ( self : str):
UpperCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*UpperCAmelCase_)
def __UpperCamelCase ( self : Union[str, Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Any , UpperCAmelCase_ : Union[str, Any]):
# make masks reproducible
np.random.seed(2)
UpperCamelCase__ : str = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2)
UpperCamelCase__ : Tuple = np.random.uniform(size=(self.model_tester.batch_size, num_patches))
UpperCamelCase__ : Optional[Any] = torch.from_numpy(UpperCAmelCase_)
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
UpperCamelCase__ : List[str] = pt_noise
super().check_pt_tf_models(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
def __UpperCamelCase ( self : int):
UpperCamelCase__, UpperCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ : Optional[Any] = model_class(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
# make random mask reproducible
torch.manual_seed(2)
with torch.no_grad():
UpperCamelCase__ : Tuple = model(**self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_))
UpperCamelCase__ : Dict = outputs[0].cpu().numpy()
UpperCamelCase__ : Optional[int] = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCAmelCase_)
UpperCamelCase__ : str = model_class.from_pretrained(UpperCAmelCase_)
model.to(UpperCAmelCase_)
# make random mask reproducible
torch.manual_seed(2)
with torch.no_grad():
UpperCamelCase__ : List[str] = model(**self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_))
# Make sure we don't have nans
UpperCamelCase__ : Tuple = after_outputs[0].cpu().numpy()
UpperCamelCase__ : Any = 0
UpperCamelCase__ : Union[str, Any] = np.amax(np.abs(out_a - out_a))
self.assertLessEqual(UpperCAmelCase_ , 1e-5)
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.')
def __UpperCamelCase ( self : Tuple):
pass
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.')
def __UpperCamelCase ( self : Optional[int]):
pass
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.')
def __UpperCamelCase ( self : Tuple):
pass
@unittest.skip(reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load')
def __UpperCamelCase ( self : Tuple):
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.')
def __UpperCamelCase ( self : Optional[int]):
pass
@slow
def __UpperCamelCase ( self : Optional[Any]):
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ : Tuple = ViTMAEModel.from_pretrained(UpperCAmelCase_)
self.assertIsNotNone(UpperCAmelCase_)
def __UpperCAmelCase ( ) -> Optional[Any]:
UpperCamelCase__ : int = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
return image
@require_torch
@require_vision
class __lowercase (unittest.TestCase ):
@cached_property
def __UpperCamelCase ( self : int):
return ViTImageProcessor.from_pretrained('facebook/vit-mae-base') if is_vision_available() else None
@slow
def __UpperCamelCase ( self : str):
# make random mask reproducible across the PT and TF model
np.random.seed(2)
UpperCamelCase__ : Union[str, Any] = ViTMAEForPreTraining.from_pretrained('facebook/vit-mae-base').to(UpperCAmelCase_)
UpperCamelCase__ : Tuple = self.default_image_processor
UpperCamelCase__ : Dict = prepare_img()
UpperCamelCase__ : Optional[int] = image_processor(images=UpperCAmelCase_ , return_tensors='pt').to(UpperCAmelCase_)
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
UpperCamelCase__ : Union[str, Any] = ViTMAEConfig()
UpperCamelCase__ : int = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2)
UpperCamelCase__ : Any = np.random.uniform(size=(1, num_patches))
# forward pass
with torch.no_grad():
UpperCamelCase__ : Dict = model(**UpperCAmelCase_ , noise=torch.from_numpy(UpperCAmelCase_).to(device=UpperCAmelCase_))
# verify the logits
UpperCamelCase__ : Tuple = torch.Size((1, 196, 768))
self.assertEqual(outputs.logits.shape , UpperCAmelCase_)
UpperCamelCase__ : Any = torch.tensor(
[[-0.05_48, -1.70_23, -0.93_25], [0.37_21, -0.56_70, -0.22_33], [0.82_35, -1.38_78, -0.35_24]])
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(UpperCAmelCase_) , atol=1e-4))
| 6
| 0
|
"""simple docstring"""
from math import sqrt
def lowercase ( a__ : int = 1000000 ) -> int:
_UpperCamelCase = 0
_UpperCamelCase = 0
_UpperCamelCase = 42
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(a__ , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(F'''{solution() = }''')
| 420
|
import fire
from utils import calculate_rouge, save_json
def __SCREAMING_SNAKE_CASE ( a__ : Any ,a__ : Tuple ,a__ : Any=None ,**a__ : Dict ) -> Optional[Any]:
__A : int = [x.strip() for x in open(a__ ).readlines()]
__A : List[str] = [x.strip() for x in open(a__ ).readlines()][: len(a__ )]
__A : List[Any] = calculate_rouge(a__ ,a__ ,**a__ )
if save_path is not None:
save_json(a__ ,a__ ,indent=a__ )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 17
| 0
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __lowercase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
lowerCamelCase : Optional[int] = StableDiffusionInpaintPipeline
lowerCamelCase : Optional[int] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
lowerCamelCase : List[Any] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
lowerCamelCase : List[str] = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
lowerCamelCase : int = frozenset([] )
def UpperCAmelCase__ (self ):
torch.manual_seed(0 )
lowerCamelCase_ : Optional[Any] = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=9 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=3_2 , attention_head_dim=(2, 4) , use_linear_projection=__A , )
lowerCamelCase_ : int = PNDMScheduler(skip_prk_steps=__A )
torch.manual_seed(0 )
lowerCamelCase_ : Union[str, Any] = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=1_2_8 , )
torch.manual_seed(0 )
lowerCamelCase_ : List[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='''gelu''' , projection_dim=5_1_2 , )
lowerCamelCase_ : Tuple = CLIPTextModel(__A )
lowerCamelCase_ : Optional[int] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
lowerCamelCase_ : Dict = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def UpperCAmelCase__ (self , A , A=0 ):
# TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched
lowerCamelCase_ : str = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(__A ) ).to(__A )
lowerCamelCase_ : str = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCamelCase_ : int = Image.fromarray(np.uinta(__A ) ).convert('''RGB''' ).resize((6_4, 6_4) )
lowerCamelCase_ : Optional[int] = Image.fromarray(np.uinta(image + 4 ) ).convert('''RGB''' ).resize((6_4, 6_4) )
if str(__A ).startswith('''mps''' ):
lowerCamelCase_ : str = torch.manual_seed(__A )
else:
lowerCamelCase_ : Any = torch.Generator(device=__A ).manual_seed(__A )
lowerCamelCase_ : List[Any] = {
"prompt": "A painting of a squirrel eating a burger",
"image": init_image,
"mask_image": mask_image,
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def UpperCAmelCase__ (self ):
lowerCamelCase_ : str = "cpu" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase_ : Tuple = self.get_dummy_components()
lowerCamelCase_ : str = StableDiffusionInpaintPipeline(**__A )
lowerCamelCase_ : int = sd_pipe.to(__A )
sd_pipe.set_progress_bar_config(disable=__A )
lowerCamelCase_ : str = self.get_dummy_inputs(__A )
lowerCamelCase_ : int = sd_pipe(**__A ).images
lowerCamelCase_ : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
lowerCamelCase_ : Dict = np.array([0.47_27, 0.57_35, 0.39_41, 0.54_46, 0.59_26, 0.43_94, 0.50_62, 0.46_54, 0.44_76] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase__ (self ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class __lowercase ( unittest.TestCase ):
def UpperCAmelCase__ (self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Optional[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
lowerCamelCase_ : Tuple = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
lowerCamelCase_ : List[Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'''
'''/yellow_cat_sitting_on_a_park_bench.npy''' )
lowerCamelCase_ : str = "stabilityai/stable-diffusion-2-inpainting"
lowerCamelCase_ : Union[str, Any] = StableDiffusionInpaintPipeline.from_pretrained(__A , safety_checker=__A )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
pipe.enable_attention_slicing()
lowerCamelCase_ : Union[str, Any] = "Face of a yellow cat, high resolution, sitting on a park bench"
lowerCamelCase_ : Tuple = torch.manual_seed(0 )
lowerCamelCase_ : int = pipe(
prompt=__A , image=__A , mask_image=__A , generator=__A , output_type='''np''' , )
lowerCamelCase_ : Dict = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image ).max() < 9E-3
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Any = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
lowerCamelCase_ : List[str] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
lowerCamelCase_ : str = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'''
'''/yellow_cat_sitting_on_a_park_bench_fp16.npy''' )
lowerCamelCase_ : int = "stabilityai/stable-diffusion-2-inpainting"
lowerCamelCase_ : List[Any] = StableDiffusionInpaintPipeline.from_pretrained(
__A , torch_dtype=torch.floataa , safety_checker=__A , )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
pipe.enable_attention_slicing()
lowerCamelCase_ : List[str] = "Face of a yellow cat, high resolution, sitting on a park bench"
lowerCamelCase_ : Optional[Any] = torch.manual_seed(0 )
lowerCamelCase_ : Union[str, Any] = pipe(
prompt=__A , image=__A , mask_image=__A , generator=__A , output_type='''np''' , )
lowerCamelCase_ : Any = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def UpperCAmelCase__ (self ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowerCamelCase_ : Optional[int] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
lowerCamelCase_ : Any = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
lowerCamelCase_ : Union[str, Any] = "stabilityai/stable-diffusion-2-inpainting"
lowerCamelCase_ : List[str] = PNDMScheduler.from_pretrained(__A , subfolder='''scheduler''' )
lowerCamelCase_ : Tuple = StableDiffusionInpaintPipeline.from_pretrained(
__A , safety_checker=__A , scheduler=__A , torch_dtype=torch.floataa , )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
lowerCamelCase_ : Union[str, Any] = "Face of a yellow cat, high resolution, sitting on a park bench"
lowerCamelCase_ : Tuple = torch.manual_seed(0 )
lowerCamelCase_ : List[Any] = pipe(
prompt=__A , image=__A , mask_image=__A , generator=__A , num_inference_steps=2 , output_type='''np''' , )
lowerCamelCase_ : Tuple = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 1_0**9
| 703
|
'''simple docstring'''
def lowercase_ ( _lowercase = 1_000 ) -> int:
'''simple docstring'''
lowerCamelCase_ : Dict = 2**power
lowerCamelCase_ : Union[str, Any] = str(_lowercase )
lowerCamelCase_ : Union[str, Any] = list(_lowercase )
lowerCamelCase_ : Dict = 0
for i in list_num:
sum_of_num += int(_lowercase )
return sum_of_num
if __name__ == "__main__":
__lowercase : Optional[Any] = int(input('''Enter the power of 2: ''').strip())
print('''2 ^ ''', power, ''' = ''', 2**power)
__lowercase : Tuple = solution(power)
print('''Sum of the digits is: ''', result)
| 357
| 0
|
"""simple docstring"""
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class _snake_case :
'''simple docstring'''
def __init__( self : str , snake_case : Any , snake_case : List[str]=13 , snake_case : Any=7 , snake_case : Any=True , snake_case : str=True , snake_case : Optional[int]=False , snake_case : Dict=True , snake_case : Tuple=99 , snake_case : Union[str, Any]=32 , snake_case : Union[str, Any]=5 , snake_case : Any=4 , snake_case : str=37 , snake_case : Any="gelu" , snake_case : Dict=0.1 , snake_case : Optional[Any]=0.1 , snake_case : str=512 , snake_case : List[Any]=16 , snake_case : int=2 , snake_case : Optional[Any]=0.02 , snake_case : Union[str, Any]=3 , snake_case : Any=4 , snake_case : int=None , ):
UpperCAmelCase_ :List[str] = parent
UpperCAmelCase_ :str = batch_size
UpperCAmelCase_ :List[Any] = seq_length
UpperCAmelCase_ :Union[str, Any] = is_training
UpperCAmelCase_ :str = use_input_mask
UpperCAmelCase_ :Any = use_token_type_ids
UpperCAmelCase_ :str = use_labels
UpperCAmelCase_ :str = vocab_size
UpperCAmelCase_ :Dict = hidden_size
UpperCAmelCase_ :int = num_hidden_layers
UpperCAmelCase_ :Tuple = num_attention_heads
UpperCAmelCase_ :int = intermediate_size
UpperCAmelCase_ :Tuple = hidden_act
UpperCAmelCase_ :Optional[int] = hidden_dropout_prob
UpperCAmelCase_ :Union[str, Any] = attention_probs_dropout_prob
UpperCAmelCase_ :Dict = max_position_embeddings
UpperCAmelCase_ :str = type_vocab_size
UpperCAmelCase_ :Optional[Any] = type_sequence_label_size
UpperCAmelCase_ :Dict = initializer_range
UpperCAmelCase_ :List[Any] = num_labels
UpperCAmelCase_ :int = num_choices
UpperCAmelCase_ :Union[str, Any] = scope
def snake_case_ ( self : Optional[int] ):
UpperCAmelCase_ :str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_ :List[Any] = None
if self.use_input_mask:
UpperCAmelCase_ :Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase_ :int = None
if self.use_token_type_ids:
UpperCAmelCase_ :Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase_ :List[str] = None
UpperCAmelCase_ :List[Any] = None
UpperCAmelCase_ :List[Any] = None
if self.use_labels:
UpperCAmelCase_ :List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ :Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase_ :List[Any] = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase_ :List[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case_ ( self : Optional[Any] ):
return LlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case , initializer_range=self.initializer_range , )
def snake_case_ ( self : Any , snake_case : List[Any] , snake_case : str , snake_case : List[Any] , snake_case : List[Any] , snake_case : Dict , snake_case : int , snake_case : Tuple ):
UpperCAmelCase_ :List[str] = LlamaModel(config=snake_case )
model.to(snake_case )
model.eval()
UpperCAmelCase_ :List[Any] = model(snake_case , attention_mask=snake_case )
UpperCAmelCase_ :int = model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case_ ( self : int , snake_case : Any , snake_case : str , snake_case : Tuple , snake_case : Optional[int] , snake_case : Dict , snake_case : Dict , snake_case : Dict , snake_case : Optional[int] , snake_case : Optional[int] , ):
UpperCAmelCase_ :int = True
UpperCAmelCase_ :Tuple = LlamaModel(snake_case )
model.to(snake_case )
model.eval()
UpperCAmelCase_ :Union[str, Any] = model(
snake_case , attention_mask=snake_case , encoder_hidden_states=snake_case , encoder_attention_mask=snake_case , )
UpperCAmelCase_ :int = model(
snake_case , attention_mask=snake_case , encoder_hidden_states=snake_case , )
UpperCAmelCase_ :Tuple = model(snake_case , attention_mask=snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case_ ( self : Dict , snake_case : Optional[Any] , snake_case : Tuple , snake_case : str , snake_case : List[Any] , snake_case : Any , snake_case : int , snake_case : List[str] , snake_case : Union[str, Any] , snake_case : Union[str, Any] , ):
UpperCAmelCase_ :Optional[int] = LlamaForCausalLM(config=snake_case )
model.to(snake_case )
model.eval()
UpperCAmelCase_ :Optional[int] = model(snake_case , attention_mask=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case_ ( self : Dict , snake_case : Union[str, Any] , snake_case : List[Any] , snake_case : Optional[Any] , snake_case : Any , snake_case : Optional[int] , snake_case : Any , snake_case : Any , snake_case : List[str] , snake_case : Dict , ):
UpperCAmelCase_ :Optional[Any] = True
UpperCAmelCase_ :Any = True
UpperCAmelCase_ :int = LlamaForCausalLM(config=snake_case )
model.to(snake_case )
model.eval()
# first forward pass
UpperCAmelCase_ :Optional[Any] = model(
snake_case , attention_mask=snake_case , encoder_hidden_states=snake_case , encoder_attention_mask=snake_case , use_cache=snake_case , )
UpperCAmelCase_ :Optional[Any] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
UpperCAmelCase_ :Any = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCAmelCase_ :Optional[Any] = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
UpperCAmelCase_ :Tuple = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCAmelCase_ :Tuple = torch.cat([input_mask, next_mask] , dim=-1 )
UpperCAmelCase_ :str = model(
snake_case , attention_mask=snake_case , encoder_hidden_states=snake_case , encoder_attention_mask=snake_case , output_hidden_states=snake_case , )['''hidden_states'''][0]
UpperCAmelCase_ :Union[str, Any] = model(
snake_case , attention_mask=snake_case , encoder_hidden_states=snake_case , encoder_attention_mask=snake_case , past_key_values=snake_case , output_hidden_states=snake_case , )['''hidden_states'''][0]
# select random slice
UpperCAmelCase_ :Dict = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCAmelCase_ :List[Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCAmelCase_ :Dict = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(snake_case , snake_case , atol=1e-3 ) )
def snake_case_ ( self : Dict ):
UpperCAmelCase_ :List[Any] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) ,(
UpperCAmelCase_
) ,(
UpperCAmelCase_
) ,(
UpperCAmelCase_
) ,(
UpperCAmelCase_
) ,(
UpperCAmelCase_
) ,(
UpperCAmelCase_
) ,
) :Dict = config_and_inputs
UpperCAmelCase_ :int = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class _snake_case ( A__ , A__ , A__ , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase__ =(LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
UpperCamelCase__ =(LlamaForCausalLM,) if is_torch_available() else ()
UpperCamelCase__ =(
{
"""feature-extraction""": LlamaModel,
"""text-classification""": LlamaForSequenceClassification,
"""text-generation""": LlamaForCausalLM,
"""zero-shot""": LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCamelCase__ =False
UpperCamelCase__ =False
def snake_case_ ( self : int ):
UpperCAmelCase_ :Dict = LlamaModelTester(self )
UpperCAmelCase_ :Optional[int] = ConfigTester(self , config_class=snake_case , hidden_size=37 )
def snake_case_ ( self : List[Any] ):
self.config_tester.run_common_tests()
def snake_case_ ( self : List[Any] ):
UpperCAmelCase_ :str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
def snake_case_ ( self : List[Any] ):
UpperCAmelCase_ :List[Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCAmelCase_ :Any = type
self.model_tester.create_and_check_model(*snake_case )
def snake_case_ ( self : Optional[int] ):
UpperCAmelCase_ ,UpperCAmelCase_ :Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ :Optional[int] = 3
UpperCAmelCase_ :str = input_dict['''input_ids''']
UpperCAmelCase_ :Tuple = input_ids.ne(1 ).to(snake_case )
UpperCAmelCase_ :Tuple = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
UpperCAmelCase_ :Optional[Any] = LlamaForSequenceClassification(snake_case )
model.to(snake_case )
model.eval()
UpperCAmelCase_ :Optional[Any] = model(snake_case , attention_mask=snake_case , labels=snake_case )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def snake_case_ ( self : Optional[int] ):
UpperCAmelCase_ ,UpperCAmelCase_ :Dict = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ :Tuple = 3
UpperCAmelCase_ :Union[str, Any] = '''single_label_classification'''
UpperCAmelCase_ :Dict = input_dict['''input_ids''']
UpperCAmelCase_ :List[str] = input_ids.ne(1 ).to(snake_case )
UpperCAmelCase_ :Union[str, Any] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
UpperCAmelCase_ :int = LlamaForSequenceClassification(snake_case )
model.to(snake_case )
model.eval()
UpperCAmelCase_ :Tuple = model(snake_case , attention_mask=snake_case , labels=snake_case )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def snake_case_ ( self : int ):
UpperCAmelCase_ ,UpperCAmelCase_ :Dict = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ :Any = 3
UpperCAmelCase_ :Tuple = '''multi_label_classification'''
UpperCAmelCase_ :Optional[Any] = input_dict['''input_ids''']
UpperCAmelCase_ :Dict = input_ids.ne(1 ).to(snake_case )
UpperCAmelCase_ :Union[str, Any] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
UpperCAmelCase_ :int = LlamaForSequenceClassification(snake_case )
model.to(snake_case )
model.eval()
UpperCAmelCase_ :Dict = model(snake_case , attention_mask=snake_case , labels=snake_case )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('''LLaMA buffers include complex numbers, which breaks this test''' )
def snake_case_ ( self : str ):
pass
@parameterized.expand([('''linear''',), ('''dynamic''',)] )
def snake_case_ ( self : Tuple , snake_case : List[str] ):
UpperCAmelCase_ ,UpperCAmelCase_ :Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ :Tuple = ids_tensor([1, 10] , config.vocab_size )
UpperCAmelCase_ :Optional[Any] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
UpperCAmelCase_ :Dict = LlamaModel(snake_case )
original_model.to(snake_case )
original_model.eval()
UpperCAmelCase_ :str = original_model(snake_case ).last_hidden_state
UpperCAmelCase_ :List[str] = original_model(snake_case ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
UpperCAmelCase_ :str = {'''type''': scaling_type, '''factor''': 10.0}
UpperCAmelCase_ :Optional[int] = LlamaModel(snake_case )
scaled_model.to(snake_case )
scaled_model.eval()
UpperCAmelCase_ :Tuple = scaled_model(snake_case ).last_hidden_state
UpperCAmelCase_ :int = scaled_model(snake_case ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(snake_case , snake_case , atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(snake_case , snake_case , atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(snake_case , snake_case , atol=1e-5 ) )
@require_torch
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
@unittest.skip('''Logits are not exactly the same, once we fix the instabalities somehow, will update!''' )
@slow
def snake_case_ ( self : List[Any] ):
UpperCAmelCase_ :Any = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
UpperCAmelCase_ :List[str] = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-7b-hf''' , device_map='''auto''' )
UpperCAmelCase_ :List[str] = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
UpperCAmelCase_ :str = torch.tensor([[-6.6_550, -4.1_227, -4.9_859, -3.2_406, 0.8_262, -3.0_033, 1.2_964, -3.3_699]] )
torch.testing.assert_close(out.mean(-1 ) , snake_case , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
UpperCAmelCase_ :Optional[int] = torch.tensor([-12.8_281, -7.4_453, -0.4_639, -8.0_625, -7.2_500, -8.0_000, -6.4_883, -7.7_695, -7.8_438, -7.0_312, -6.2_188, -7.1_328, -1.8_496, 1.9_961, -8.6_250, -6.7_227, -12.8_281, -6.9_492, -7.0_742, -7.7_852, -7.5_820, -7.9_062, -6.9_375, -7.9_805, -8.3_438, -8.1_562, -8.0_469, -7.6_250, -7.7_422, -7.3_398,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , snake_case , atol=1e-5 , rtol=1e-5 )
@unittest.skip('''Logits are not exactly the same, once we fix the instabalities somehow, will update!''' )
@slow
def snake_case_ ( self : str ):
UpperCAmelCase_ :str = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
UpperCAmelCase_ :Any = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-13b-hf''' , device_map='''auto''' )
UpperCAmelCase_ :int = model(torch.tensor(snake_case ) )
# Expected mean on dim = -1
UpperCAmelCase_ :Optional[Any] = torch.tensor([[-2.0_622, -1.2_794, -1.1_638, -0.9_788, -1.4_603, -1.0_238, -1.7_893, -1.4_411]] )
torch.testing.assert_close(out.mean(-1 ) , snake_case , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
UpperCAmelCase_ :str = torch.tensor([-8.1_406, -8.0_547, 2.7_461, -1.2_344, -0.1_448, -1.8_262, -1.0_020, -1.8_154, -1.6_895, -1.8_516, -2.3_574, -0.9_277, 3.7_598, 6.5_742, -1.2_998, -0.1_177, -8.1_406, -2.9_688, -2.9_199, -3.1_699, -3.5_254, -2.3_555, -2.7_988, -3.4_141, -2.8_262, -4.5_195, -3.3_379, -3.3_164, -2.7_832, -3.0_273] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , snake_case , atol=1e-5 , rtol=1e-5 )
@unittest.skip('''Logits are not exactly the same, once we fix the instabalities somehow, will update!''' )
@slow
def snake_case_ ( self : Tuple ):
UpperCAmelCase_ :str = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
UpperCAmelCase_ :Optional[Any] = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-13b-chat-hf''' , device_map='''auto''' )
UpperCAmelCase_ :Tuple = model(torch.tensor(snake_case ) )
# Expected mean on dim = -1
UpperCAmelCase_ :Optional[Any] = torch.tensor([[-0.8_562, -1.8_520, -0.7_551, -0.4_162, -1.5_161, -1.2_038, -2.4_823, -2.3_254]] )
torch.testing.assert_close(out.mean(-1 ) , snake_case , atol=1e-2 , rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
UpperCAmelCase_ :Any = torch.tensor([-2.2_227, 4.8_828, 0.9_023, -0.4_578, -0.7_871, -0.1_033, -0.6_221, -0.5_786, -0.7_803, -1.0_674, -1.2_920, -0.1_570, 0.8_008, 2.0_723, -0.9_497, 0.2_771, -2.2_227, -0.7_612, -1.4_346, -1.2_061, -1.6_426, -0.3_000, -0.7_139, -1.1_934, -1.8_691, -1.6_973, -1.5_947, -1.2_705, -0.3_523, -0.5_513] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) , snake_case , atol=1e-2 , rtol=1e-2 )
@unittest.skip(
'''Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test''' )
@slow
def snake_case_ ( self : Optional[int] ):
UpperCAmelCase_ :Tuple = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
UpperCAmelCase_ :Union[str, Any] = LlamaForCausalLM.from_pretrained('''meta-llama/Llama-2-70b-hf''' , device_map='''auto''' )
UpperCAmelCase_ :Union[str, Any] = model(torch.tensor(snake_case ) )
UpperCAmelCase_ :str = torch.tensor(
[[-4.2_327, -3.3_360, -4.6_665, -4.7_631, -1.8_180, -3.4_170, -1.4_211, -3.1_810]] , dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) , snake_case , atol=1e-2 , rtol=1e-2 )
# fmt: off
UpperCAmelCase_ :int = torch.tensor([-9.4_922, -3.9_551, 1.7_998, -5.6_758, -5.1_055, -5.8_984, -4.8_320, -6.8_086, -6.5_391, -5.6_172, -5.5_820, -5.5_352, 1.7_881, 3.6_289, -6.5_117, -3.4_785, -9.5_000, -6.0_352, -6.8_125, -6.0_195, -6.6_836, -5.4_727, -6.2_812, -6.0_391, -7.3_398, -7.4_297, -7.4_844, -6.5_820, -5.8_789, -5.5_312] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , snake_case , atol=1e-5 , rtol=1e-5 )
@unittest.skip('''Model is curently gated''' )
@slow
def snake_case_ ( self : List[Any] ):
UpperCAmelCase_ :Optional[int] = '''Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the "princi'''
UpperCAmelCase_ :Tuple = '''Simply put, the theory of relativity states that '''
UpperCAmelCase_ :Dict = LlamaTokenizer.from_pretrained('''meta-llama/Llama-2-13b-chat-hf''' )
UpperCAmelCase_ :Optional[Any] = tokenizer.encode(snake_case , return_tensors='''pt''' )
UpperCAmelCase_ :Optional[int] = LlamaForCausalLM.from_pretrained(
'''meta-llama/Llama-2-13b-chat-hf''' , device_map='''sequential''' , use_safetensors=snake_case )
# greedy generation outputs
UpperCAmelCase_ :Tuple = model.generate(snake_case , max_new_tokens=64 , top_p=snake_case , temperature=1 , do_sample=snake_case )
UpperCAmelCase_ :List[str] = tokenizer.decode(generated_ids[0] , skip_special_tokens=snake_case )
self.assertEqual(snake_case , snake_case )
| 608
|
"""simple docstring"""
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
def snake_case_ ( self : Any ):
UpperCAmelCase_ :List[Any] = tempfile.mkdtemp()
UpperCAmelCase_ :List[Any] = BlipImageProcessor()
UpperCAmelCase_ :str = BertTokenizer.from_pretrained('''hf-internal-testing/tiny-random-BertModel''' )
UpperCAmelCase_ :Dict = BlipProcessor(snake_case , snake_case )
processor.save_pretrained(self.tmpdirname )
def snake_case_ ( self : Dict , **snake_case : List[str] ):
return AutoProcessor.from_pretrained(self.tmpdirname , **snake_case ).tokenizer
def snake_case_ ( self : List[str] , **snake_case : Tuple ):
return AutoProcessor.from_pretrained(self.tmpdirname , **snake_case ).image_processor
def snake_case_ ( self : List[Any] ):
shutil.rmtree(self.tmpdirname )
def snake_case_ ( self : int ):
UpperCAmelCase_ :Optional[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
UpperCAmelCase_ :str = [Image.fromarray(np.moveaxis(snake_case , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def snake_case_ ( self : Optional[int] ):
UpperCAmelCase_ :Tuple = BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase_ :Tuple = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
UpperCAmelCase_ :Union[str, Any] = self.get_image_processor(do_normalize=snake_case , padding_value=1.0 )
UpperCAmelCase_ :Tuple = BlipProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=snake_case , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , snake_case )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , snake_case )
def snake_case_ ( self : int ):
UpperCAmelCase_ :Tuple = self.get_image_processor()
UpperCAmelCase_ :Union[str, Any] = self.get_tokenizer()
UpperCAmelCase_ :int = BlipProcessor(tokenizer=snake_case , image_processor=snake_case )
UpperCAmelCase_ :Optional[int] = self.prepare_image_inputs()
UpperCAmelCase_ :Union[str, Any] = image_processor(snake_case , return_tensors='''np''' )
UpperCAmelCase_ :Any = processor(images=snake_case , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def snake_case_ ( self : str ):
UpperCAmelCase_ :List[str] = self.get_image_processor()
UpperCAmelCase_ :Tuple = self.get_tokenizer()
UpperCAmelCase_ :List[Any] = BlipProcessor(tokenizer=snake_case , image_processor=snake_case )
UpperCAmelCase_ :Dict = '''lower newer'''
UpperCAmelCase_ :str = processor(text=snake_case )
UpperCAmelCase_ :Any = tokenizer(snake_case , return_token_type_ids=snake_case )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def snake_case_ ( self : Any ):
UpperCAmelCase_ :Dict = self.get_image_processor()
UpperCAmelCase_ :Tuple = self.get_tokenizer()
UpperCAmelCase_ :Dict = BlipProcessor(tokenizer=snake_case , image_processor=snake_case )
UpperCAmelCase_ :Union[str, Any] = '''lower newer'''
UpperCAmelCase_ :Optional[int] = self.prepare_image_inputs()
UpperCAmelCase_ :Any = processor(text=snake_case , images=snake_case )
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''input_ids''', '''attention_mask'''] )
# test if it raises when no input is passed
with pytest.raises(snake_case ):
processor()
def snake_case_ ( self : Optional[int] ):
UpperCAmelCase_ :Union[str, Any] = self.get_image_processor()
UpperCAmelCase_ :Optional[int] = self.get_tokenizer()
UpperCAmelCase_ :int = BlipProcessor(tokenizer=snake_case , image_processor=snake_case )
UpperCAmelCase_ :Union[str, Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCAmelCase_ :Optional[Any] = processor.batch_decode(snake_case )
UpperCAmelCase_ :Dict = tokenizer.batch_decode(snake_case )
self.assertListEqual(snake_case , snake_case )
def snake_case_ ( self : str ):
UpperCAmelCase_ :str = self.get_image_processor()
UpperCAmelCase_ :Optional[Any] = self.get_tokenizer()
UpperCAmelCase_ :Dict = BlipProcessor(tokenizer=snake_case , image_processor=snake_case )
UpperCAmelCase_ :List[Any] = '''lower newer'''
UpperCAmelCase_ :List[str] = self.prepare_image_inputs()
UpperCAmelCase_ :List[str] = processor(text=snake_case , images=snake_case )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''input_ids''', '''attention_mask'''] )
| 608
| 1
|
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class a ( unittest.TestCase ):
def __lowerCamelCase ( self :Dict ):
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
snake_case__ : Union[str, Any] = FlaxDiffusionPipeline.from_pretrained(
'''hf-internal-testing/tiny-stable-diffusion-pipe''' ,safety_checker=__lowercase ,cache_dir=__lowercase )
snake_case__ : Tuple = [t[-1] for t in os.walk(os.path.join(__lowercase ,os.listdir(__lowercase )[0] ,'''snapshots''' ) )]
snake_case__ : Any = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith('''.bin''' ) for f in files )
@slow
@require_flax
class a ( unittest.TestCase ):
def __lowerCamelCase ( self :Union[str, Any] ):
snake_case__ : Dict = FlaxStableDiffusionPipeline.from_pretrained(
'''hf-internal-testing/tiny-stable-diffusion-pipe''' ,safety_checker=__lowercase )
snake_case__ : int = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
snake_case__ : Tuple = jax.random.PRNGKey(0 )
snake_case__ : int = 4
snake_case__ : Optional[int] = jax.device_count()
snake_case__ : List[Any] = num_samples * [prompt]
snake_case__ : Tuple = pipeline.prepare_inputs(__lowercase )
# shard inputs and rng
snake_case__ : str = replicate(__lowercase )
snake_case__ : List[Any] = jax.random.split(__lowercase ,__lowercase )
snake_case__ : List[str] = shard(__lowercase )
snake_case__ : str = pipeline(__lowercase ,__lowercase ,__lowercase ,__lowercase ,jit=__lowercase ).images
assert images.shape == (num_samples, 1, 6_4, 6_4, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:] ,dtype=np.floataa ).sum() - 4.151_4745 ) < 1e-3
assert np.abs(np.abs(__lowercase ,dtype=np.floataa ).sum() - 4_9_9_4_7.8_7_5 ) < 5e-1
snake_case__ : Optional[Any] = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(__lowercase ) == num_samples
def __lowerCamelCase ( self :Tuple ):
snake_case__ : List[Any] = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' ,revision='''flax''' ,safety_checker=__lowercase )
snake_case__ : Tuple = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
snake_case__ : Union[str, Any] = jax.random.PRNGKey(0 )
snake_case__ : List[Any] = 5_0
snake_case__ : Union[str, Any] = jax.device_count()
snake_case__ : Optional[int] = num_samples * [prompt]
snake_case__ : Any = pipeline.prepare_inputs(__lowercase )
# shard inputs and rng
snake_case__ : str = replicate(__lowercase )
snake_case__ : Optional[int] = jax.random.split(__lowercase ,__lowercase )
snake_case__ : Dict = shard(__lowercase )
snake_case__ : str = pipeline(__lowercase ,__lowercase ,__lowercase ,__lowercase ,jit=__lowercase ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] ,dtype=np.floataa ).sum() - 0.0565_2401) ) < 1e-3
assert np.abs((np.abs(__lowercase ,dtype=np.floataa ).sum() - 2_3_8_3_8_0_8.2) ) < 5e-1
def __lowerCamelCase ( self :Union[str, Any] ):
snake_case__ : Tuple = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' ,revision='''bf16''' ,dtype=jnp.bfloataa ,safety_checker=__lowercase )
snake_case__ : Tuple = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
snake_case__ : Dict = jax.random.PRNGKey(0 )
snake_case__ : Any = 5_0
snake_case__ : Tuple = jax.device_count()
snake_case__ : str = num_samples * [prompt]
snake_case__ : int = pipeline.prepare_inputs(__lowercase )
# shard inputs and rng
snake_case__ : Dict = replicate(__lowercase )
snake_case__ : Union[str, Any] = jax.random.split(__lowercase ,__lowercase )
snake_case__ : Tuple = shard(__lowercase )
snake_case__ : Optional[Any] = pipeline(__lowercase ,__lowercase ,__lowercase ,__lowercase ,jit=__lowercase ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] ,dtype=np.floataa ).sum() - 0.0400_3906) ) < 1e-3
assert np.abs((np.abs(__lowercase ,dtype=np.floataa ).sum() - 2_3_7_3_5_1_6.7_5) ) < 5e-1
def __lowerCamelCase ( self :str ):
snake_case__ : Union[str, Any] = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' ,revision='''bf16''' ,dtype=jnp.bfloataa )
snake_case__ : Tuple = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
snake_case__ : int = jax.random.PRNGKey(0 )
snake_case__ : Optional[int] = 5_0
snake_case__ : List[str] = jax.device_count()
snake_case__ : int = num_samples * [prompt]
snake_case__ : List[str] = pipeline.prepare_inputs(__lowercase )
# shard inputs and rng
snake_case__ : Optional[int] = replicate(__lowercase )
snake_case__ : str = jax.random.split(__lowercase ,__lowercase )
snake_case__ : Dict = shard(__lowercase )
snake_case__ : Union[str, Any] = pipeline(__lowercase ,__lowercase ,__lowercase ,__lowercase ,jit=__lowercase ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] ,dtype=np.floataa ).sum() - 0.0400_3906) ) < 1e-3
assert np.abs((np.abs(__lowercase ,dtype=np.floataa ).sum() - 2_3_7_3_5_1_6.7_5) ) < 5e-1
def __lowerCamelCase ( self :List[Any] ):
snake_case__ : Union[str, Any] = FlaxDDIMScheduler(
beta_start=0.0_0085 ,beta_end=0.012 ,beta_schedule='''scaled_linear''' ,set_alpha_to_one=__lowercase ,steps_offset=1 ,)
snake_case__ : Any = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' ,revision='''bf16''' ,dtype=jnp.bfloataa ,scheduler=__lowercase ,safety_checker=__lowercase ,)
snake_case__ : Dict = scheduler.create_state()
snake_case__ : List[str] = scheduler_state
snake_case__ : Any = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
snake_case__ : Tuple = jax.random.PRNGKey(0 )
snake_case__ : int = 5_0
snake_case__ : int = jax.device_count()
snake_case__ : List[Any] = num_samples * [prompt]
snake_case__ : Tuple = pipeline.prepare_inputs(__lowercase )
# shard inputs and rng
snake_case__ : Tuple = replicate(__lowercase )
snake_case__ : Dict = jax.random.split(__lowercase ,__lowercase )
snake_case__ : Optional[Any] = shard(__lowercase )
snake_case__ : List[Any] = pipeline(__lowercase ,__lowercase ,__lowercase ,__lowercase ,jit=__lowercase ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] ,dtype=np.floataa ).sum() - 0.0_4504_3945) ) < 1e-3
assert np.abs((np.abs(__lowercase ,dtype=np.floataa ).sum() - 2_3_4_7_6_9_3.5) ) < 5e-1
def __lowerCamelCase ( self :Tuple ):
snake_case__ : int = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
snake_case__ : Dict = jax.device_count()
snake_case__ : List[str] = num_samples * [prompt]
snake_case__ : Tuple = jax.random.split(jax.random.PRNGKey(0 ) ,__lowercase )
snake_case__ : Dict = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' ,revision='''bf16''' ,dtype=jnp.bfloataa ,safety_checker=__lowercase ,)
snake_case__ : Union[str, Any] = replicate(__lowercase )
snake_case__ : Union[str, Any] = pipeline.prepare_inputs(__lowercase )
snake_case__ : str = shard(__lowercase )
snake_case__ : Optional[Any] = pipeline(__lowercase ,__lowercase ,__lowercase ,jit=__lowercase ).images
assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
snake_case__ : List[Any] = images[2, 0, 2_5_6, 1_0:1_7, 1]
# With memory efficient attention
snake_case__ : int = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' ,revision='''bf16''' ,dtype=jnp.bfloataa ,safety_checker=__lowercase ,use_memory_efficient_attention=__lowercase ,)
snake_case__ : Optional[Any] = replicate(__lowercase )
snake_case__ : List[Any] = pipeline.prepare_inputs(__lowercase )
snake_case__ : List[str] = shard(__lowercase )
snake_case__ : Tuple = pipeline(__lowercase ,__lowercase ,__lowercase ,jit=__lowercase ).images
assert images_eff.shape == (num_samples, 1, 5_1_2, 5_1_2, 3)
snake_case__ : Optional[int] = images[2, 0, 2_5_6, 1_0:1_7, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1e-2
| 708
|
import importlib.metadata
import operator
import re
import sys
from typing import Optional
from packaging import version
A__ = {
'''<''': operator.lt,
'''<=''': operator.le,
'''==''': operator.eq,
'''!=''': operator.ne,
'''>=''': operator.ge,
'''>''': operator.gt,
}
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> List[str]:
"""simple docstring"""
if got_ver is None or want_ver is None:
raise ValueError(
f"""Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider"""
f""" reinstalling {pkg}.""" )
if not ops[op](version.parse(__lowerCAmelCase ) , version.parse(__lowerCAmelCase ) ):
raise ImportError(
f"""{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}""" )
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase = None ) -> None:
"""simple docstring"""
snake_case__ : List[str] = f"""\n{hint}""" if hint is not None else ''''''
# non-versioned check
if re.match(r'''^[\w_\-\d]+$''' , __lowerCAmelCase ):
snake_case__ , snake_case__ , snake_case__ : Tuple = requirement, None, None
else:
snake_case__ : Union[str, Any] = re.findall(r'''^([^!=<>\s]+)([\s!=<>]{1,2}.+)''' , __lowerCAmelCase )
if not match:
raise ValueError(
'''requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but'''
f""" got {requirement}""" )
snake_case__ , snake_case__ : int = match[0]
snake_case__ : List[str] = want_full.split(''',''' ) # there could be multiple requirements
snake_case__ : Tuple = {}
for w in want_range:
snake_case__ : str = re.findall(r'''^([\s!=<>]{1,2})(.+)''' , __lowerCAmelCase )
if not match:
raise ValueError(
'''requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,'''
f""" but got {requirement}""" )
snake_case__ , snake_case__ : List[Any] = match[0]
snake_case__ : Any = want_ver
if op not in ops:
raise ValueError(f"""{requirement}: need one of {list(ops.keys() )}, but got {op}""" )
# special case
if pkg == "python":
snake_case__ : Dict = '''.'''.join([str(__lowerCAmelCase ) for x in sys.version_info[:3]] )
for op, want_ver in wanted.items():
_compare_versions(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return
# check if any version is installed
try:
snake_case__ : List[Any] = importlib.metadata.version(__lowerCAmelCase )
except importlib.metadata.PackageNotFoundError:
raise importlib.metadata.PackageNotFoundError(
f"""The '{requirement}' distribution was not found and is required by this application. {hint}""" )
# check that the right version is installed if version number or a range was provided
if want_ver is not None:
for op, want_ver in wanted.items():
_compare_versions(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def _lowerCAmelCase ( __lowerCAmelCase ) -> int:
"""simple docstring"""
snake_case__ : Any = '''Try: pip install transformers -U or pip install -e \'.[dev]\' if you\'re working with git main'''
return require_version(__lowerCAmelCase , __lowerCAmelCase )
| 219
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
A_ = '''timm_backbone'''
def __init__( self , lowerCamelCase_=None , lowerCamelCase_=3 , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=None , **lowerCamelCase_ , ) -> Optional[Any]:
super().__init__(**lowerCamelCase_)
UpperCamelCase = backbone
UpperCamelCase = num_channels
UpperCamelCase = features_only
UpperCamelCase = use_pretrained_backbone
UpperCamelCase = True
UpperCamelCase = out_indices if out_indices is not None else (-1,)
| 34
|
'''simple docstring'''
def UpperCAmelCase ( lowerCamelCase_ :int ):
'''simple docstring'''
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise TypeError("""'float' object cannot be interpreted as an integer""" )
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise TypeError("""'str' object cannot be interpreted as an integer""" )
if num == 0:
return "0b0"
snake_case_ : str = False
if num < 0:
snake_case_ : Optional[int] = True
snake_case_ : List[str] = -num
snake_case_ : list[int] = []
while num > 0:
binary.insert(0 , num % 2 )
num >>= 1
if negative:
return "-0b" + "".join(str(lowerCamelCase_ ) for e in binary )
return "0b" + "".join(str(lowerCamelCase_ ) for e in binary )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 334
| 0
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __SCREAMING_SNAKE_CASE ( _lowerCAmelCase , unittest.TestCase ):
UpperCAmelCase = DiTPipeline
UpperCAmelCase = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
UpperCAmelCase = PipelineTesterMixin.required_optional_params - {
'''latents''',
'''num_images_per_prompt''',
'''callback''',
'''callback_steps''',
}
UpperCAmelCase = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
UpperCAmelCase = False
def lowercase_ ( self :Union[str, Any] ) -> List[str]:
"""simple docstring"""
torch.manual_seed(0 )
lowerCamelCase__ : List[str] = TransformeraDModel(
sample_size=16 ,num_layers=2 ,patch_size=4 ,attention_head_dim=8 ,num_attention_heads=2 ,in_channels=4 ,out_channels=8 ,attention_bias=__UpperCAmelCase ,activation_fn='''gelu-approximate''' ,num_embeds_ada_norm=10_00 ,norm_type='''ada_norm_zero''' ,norm_elementwise_affine=__UpperCAmelCase ,)
lowerCamelCase__ : Optional[Any] = AutoencoderKL()
lowerCamelCase__ : Optional[int] = DDIMScheduler()
lowerCamelCase__ : Optional[int] = {'''transformer''': transformer.eval(), '''vae''': vae.eval(), '''scheduler''': scheduler}
return components
def lowercase_ ( self :int ,__UpperCAmelCase :Any ,__UpperCAmelCase :List[str]=0 ) -> Optional[int]:
"""simple docstring"""
if str(__UpperCAmelCase ).startswith('''mps''' ):
lowerCamelCase__ : Optional[int] = torch.manual_seed(__UpperCAmelCase )
else:
lowerCamelCase__ : Union[str, Any] = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
lowerCamelCase__ : Optional[int] = {
'''class_labels''': [1],
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def lowercase_ ( self :Optional[int] ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase__ : List[str] = '''cpu'''
lowerCamelCase__ : int = self.get_dummy_components()
lowerCamelCase__ : str = self.pipeline_class(**__UpperCAmelCase )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
lowerCamelCase__ : int = self.get_dummy_inputs(__UpperCAmelCase )
lowerCamelCase__ : List[str] = pipe(**__UpperCAmelCase ).images
lowerCamelCase__ : List[str] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape ,(1, 16, 16, 3) )
lowerCamelCase__ : str = np.array([0.2_946, 0.6_601, 0.4_329, 0.3_296, 0.4_144, 0.5_319, 0.7_273, 0.5_013, 0.4_457] )
lowerCamelCase__ : Optional[int] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__UpperCAmelCase ,1E-3 )
def lowercase_ ( self :str ) -> Any:
"""simple docstring"""
self._test_inference_batch_single_identical(relax_max_difference=__UpperCAmelCase ,expected_max_diff=1E-3 )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() ,reason='''XFormers attention is only available with CUDA and `xformers` installed''' ,)
def lowercase_ ( self :Dict ) -> Union[str, Any]:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@require_torch_gpu
@slow
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def lowercase_ ( self :int ) -> Union[str, Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self :Tuple ) -> Dict:
"""simple docstring"""
lowerCamelCase__ : int = torch.manual_seed(0 )
lowerCamelCase__ : Optional[Any] = DiTPipeline.from_pretrained('''facebook/DiT-XL-2-256''' )
pipe.to('''cuda''' )
lowerCamelCase__ : Optional[Any] = ['''vase''', '''umbrella''', '''white shark''', '''white wolf''']
lowerCamelCase__ : Optional[int] = pipe.get_label_ids(__UpperCAmelCase )
lowerCamelCase__ : Any = pipe(__UpperCAmelCase ,generator=__UpperCAmelCase ,num_inference_steps=40 ,output_type='''np''' ).images
for word, image in zip(__UpperCAmelCase ,__UpperCAmelCase ):
lowerCamelCase__ : Union[str, Any] = load_numpy(
F"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy""" )
assert np.abs((expected_image - image).max() ) < 1E-2
def lowercase_ ( self :List[Any] ) -> Any:
"""simple docstring"""
lowerCamelCase__ : Tuple = DiTPipeline.from_pretrained('''facebook/DiT-XL-2-512''' )
lowerCamelCase__ : Optional[int] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to('''cuda''' )
lowerCamelCase__ : int = ['''vase''', '''umbrella''']
lowerCamelCase__ : Optional[int] = pipe.get_label_ids(__UpperCAmelCase )
lowerCamelCase__ : str = torch.manual_seed(0 )
lowerCamelCase__ : List[str] = pipe(__UpperCAmelCase ,generator=__UpperCAmelCase ,num_inference_steps=25 ,output_type='''np''' ).images
for word, image in zip(__UpperCAmelCase ,__UpperCAmelCase ):
lowerCamelCase__ : str = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
F"""/dit/{word}_512.npy""" )
assert np.abs((expected_image - image).max() ) < 1E-1
| 121
|
"""simple docstring"""
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
UpperCAmelCase : Dict = False
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def lowercase_ ( self :Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self :Dict ) -> List[str]:
"""simple docstring"""
lowerCamelCase__ : Any = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' ,torch_dtype=torch.floataa )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
lowerCamelCase__ : List[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
lowerCamelCase__ : Union[str, Any] = torch.manual_seed(0 )
lowerCamelCase__ : Optional[int] = pipe.dual_guided(
prompt='''first prompt''' ,image=__UpperCAmelCase ,text_to_image_strength=0.75 ,generator=__UpperCAmelCase ,guidance_scale=7.5 ,num_inference_steps=2 ,output_type='''numpy''' ,).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(__UpperCAmelCase )
lowerCamelCase__ : Dict = VersatileDiffusionPipeline.from_pretrained(__UpperCAmelCase ,torch_dtype=torch.floataa )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
lowerCamelCase__ : Dict = generator.manual_seed(0 )
lowerCamelCase__ : List[str] = pipe.dual_guided(
prompt='''first prompt''' ,image=__UpperCAmelCase ,text_to_image_strength=0.75 ,generator=__UpperCAmelCase ,guidance_scale=7.5 ,num_inference_steps=2 ,output_type='''numpy''' ,).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def lowercase_ ( self :Tuple ) -> str:
"""simple docstring"""
lowerCamelCase__ : Optional[Any] = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' ,torch_dtype=torch.floataa )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
lowerCamelCase__ : Tuple = '''cyberpunk 2077'''
lowerCamelCase__ : str = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
lowerCamelCase__ : str = torch.manual_seed(0 )
lowerCamelCase__ : Union[str, Any] = pipe.dual_guided(
prompt=__UpperCAmelCase ,image=__UpperCAmelCase ,text_to_image_strength=0.75 ,generator=__UpperCAmelCase ,guidance_scale=7.5 ,num_inference_steps=50 ,output_type='''numpy''' ,).images
lowerCamelCase__ : Tuple = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
lowerCamelCase__ : Optional[Any] = np.array([0.1_448, 0.1_619, 0.1_741, 0.1_086, 0.1_147, 0.1_128, 0.1_199, 0.1_165, 0.1_001] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
lowerCamelCase__ : Tuple = '''A painting of a squirrel eating a burger '''
lowerCamelCase__ : Optional[Any] = torch.manual_seed(0 )
lowerCamelCase__ : List[Any] = pipe.text_to_image(
prompt=__UpperCAmelCase ,generator=__UpperCAmelCase ,guidance_scale=7.5 ,num_inference_steps=50 ,output_type='''numpy''' ).images
lowerCamelCase__ : List[Any] = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
lowerCamelCase__ : Optional[Any] = np.array([0.3_367, 0.3_169, 0.2_656, 0.3_870, 0.4_790, 0.3_796, 0.4_009, 0.4_878, 0.4_778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
lowerCamelCase__ : Any = pipe.image_variation(__UpperCAmelCase ,generator=__UpperCAmelCase ,output_type='''numpy''' ).images
lowerCamelCase__ : Union[str, Any] = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
lowerCamelCase__ : str = np.array([0.3_076, 0.3_123, 0.3_284, 0.3_782, 0.3_770, 0.3_894, 0.4_297, 0.4_331, 0.4_456] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
| 121
| 1
|
from typing import Dict, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
_a : Optional[Any] = logging.get_logger(__name__)
def snake_case__ ( UpperCAmelCase : str , UpperCAmelCase : Any , UpperCAmelCase : Tuple ):
return [
int(1_0_0_0 * (box[0] / width) ),
int(1_0_0_0 * (box[1] / height) ),
int(1_0_0_0 * (box[2] / width) ),
int(1_0_0_0 * (box[3] / height) ),
]
def snake_case__ ( UpperCAmelCase : np.ndarray , UpperCAmelCase : Optional[str] , UpperCAmelCase : Optional[str] = None ):
lowerCAmelCase__ :Optional[int] = tesseract_config if tesseract_config is not None else ""
# apply OCR
lowerCAmelCase__ :str = to_pil_image(UpperCAmelCase )
lowerCAmelCase__ ,lowerCAmelCase__ :Optional[Any] = pil_image.size
lowerCAmelCase__ :Tuple = pytesseract.image_to_data(UpperCAmelCase , lang=UpperCAmelCase , output_type="dict" , config=UpperCAmelCase )
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ :Tuple = data["text"], data["left"], data["top"], data["width"], data["height"]
# filter empty words and corresponding coordinates
lowerCAmelCase__ :Tuple = [idx for idx, word in enumerate(UpperCAmelCase ) if not word.strip()]
lowerCAmelCase__ :List[str] = [word for idx, word in enumerate(UpperCAmelCase ) if idx not in irrelevant_indices]
lowerCAmelCase__ :Dict = [coord for idx, coord in enumerate(UpperCAmelCase ) if idx not in irrelevant_indices]
lowerCAmelCase__ :Union[str, Any] = [coord for idx, coord in enumerate(UpperCAmelCase ) if idx not in irrelevant_indices]
lowerCAmelCase__ :List[str] = [coord for idx, coord in enumerate(UpperCAmelCase ) if idx not in irrelevant_indices]
lowerCAmelCase__ :List[str] = [coord for idx, coord in enumerate(UpperCAmelCase ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
lowerCAmelCase__ :Tuple = []
for x, y, w, h in zip(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
lowerCAmelCase__ :int = [x, y, x + w, y + h]
actual_boxes.append(UpperCAmelCase )
# finally, normalize the bounding boxes
lowerCAmelCase__ :int = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) )
assert len(UpperCAmelCase ) == len(UpperCAmelCase ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class _UpperCAmelCase ( _A ):
"""simple docstring"""
A = ['''pixel_values''']
def __init__( self , _lowerCAmelCase = True , _lowerCAmelCase = None , _lowerCAmelCase = PILImageResampling.BILINEAR , _lowerCAmelCase = True , _lowerCAmelCase = None , _lowerCAmelCase = "" , **_lowerCAmelCase , ):
'''simple docstring'''
super().__init__(**_lowerCAmelCase )
lowerCAmelCase__ :Optional[int] = size if size is not None else {"height": 224, "width": 224}
lowerCAmelCase__ :Any = get_size_dict(_lowerCAmelCase )
lowerCAmelCase__ :List[Any] = do_resize
lowerCAmelCase__ :int = size
lowerCAmelCase__ :Union[str, Any] = resample
lowerCAmelCase__ :Optional[Any] = apply_ocr
lowerCAmelCase__ :Optional[Any] = ocr_lang
lowerCAmelCase__ :Union[str, Any] = tesseract_config
def snake_case_ ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = PILImageResampling.BILINEAR , _lowerCAmelCase = None , **_lowerCAmelCase , ):
'''simple docstring'''
lowerCAmelCase__ :Any = get_size_dict(_lowerCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''' )
lowerCAmelCase__ :str = (size["height"], size["width"])
return resize(_lowerCAmelCase , size=_lowerCAmelCase , resample=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def snake_case_ ( self , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = ChannelDimension.FIRST , **_lowerCAmelCase , ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase__ :Optional[int] = size if size is not None else self.size
lowerCAmelCase__ :Any = get_size_dict(_lowerCAmelCase )
lowerCAmelCase__ :Dict = resample if resample is not None else self.resample
lowerCAmelCase__ :List[str] = apply_ocr if apply_ocr is not None else self.apply_ocr
lowerCAmelCase__ :Any = ocr_lang if ocr_lang is not None else self.ocr_lang
lowerCAmelCase__ :Any = tesseract_config if tesseract_config is not None else self.tesseract_config
lowerCAmelCase__ :int = make_list_of_images(_lowerCAmelCase )
if not valid_images(_lowerCAmelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
# All transformations expect numpy arrays.
lowerCAmelCase__ :int = [to_numpy_array(_lowerCAmelCase ) for image in images]
if apply_ocr:
requires_backends(self , "pytesseract" )
lowerCAmelCase__ :Dict = []
lowerCAmelCase__ :Dict = []
for image in images:
lowerCAmelCase__ ,lowerCAmelCase__ :Dict = apply_tesseract(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
words_batch.append(_lowerCAmelCase )
boxes_batch.append(_lowerCAmelCase )
if do_resize:
lowerCAmelCase__ :Optional[int] = [self.resize(image=_lowerCAmelCase , size=_lowerCAmelCase , resample=_lowerCAmelCase ) for image in images]
# flip color channels from RGB to BGR (as Detectron2 requires this)
lowerCAmelCase__ :Union[str, Any] = [flip_channel_order(_lowerCAmelCase ) for image in images]
lowerCAmelCase__ :str = [to_channel_dimension_format(_lowerCAmelCase , _lowerCAmelCase ) for image in images]
lowerCAmelCase__ :Optional[int] = BatchFeature(data={"pixel_values": images} , tensor_type=_lowerCAmelCase )
if apply_ocr:
lowerCAmelCase__ :Union[str, Any] = words_batch
lowerCAmelCase__ :Optional[int] = boxes_batch
return data
| 145
|
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_gpta import GPTaTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_a : Any = logging.get_logger(__name__)
_a : Tuple = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
_a : Tuple = {
"""vocab_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/vocab.json""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/vocab.json""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/vocab.json""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/vocab.json""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/vocab.json""",
},
"""merges_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/merges.txt""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/merges.txt""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/merges.txt""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/merges.txt""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/tokenizer.json""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/tokenizer.json""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/tokenizer.json""",
},
}
_a : str = {
"""gpt2""": 1024,
"""gpt2-medium""": 1024,
"""gpt2-large""": 1024,
"""gpt2-xl""": 1024,
"""distilgpt2""": 1024,
}
class _UpperCAmelCase ( _A ):
"""simple docstring"""
A = VOCAB_FILES_NAMES
A = PRETRAINED_VOCAB_FILES_MAP
A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A = ['''input_ids''', '''attention_mask''']
A = GPTaTokenizer
def __init__( self , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase="<|endoftext|>" , _lowerCAmelCase="<|endoftext|>" , _lowerCAmelCase="<|endoftext|>" , _lowerCAmelCase=False , **_lowerCAmelCase , ):
'''simple docstring'''
super().__init__(
_lowerCAmelCase , _lowerCAmelCase , tokenizer_file=_lowerCAmelCase , unk_token=_lowerCAmelCase , bos_token=_lowerCAmelCase , eos_token=_lowerCAmelCase , add_prefix_space=_lowerCAmelCase , **_lowerCAmelCase , )
lowerCAmelCase__ :List[str] = kwargs.pop("add_bos_token" , _lowerCAmelCase )
lowerCAmelCase__ :List[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , _lowerCAmelCase ) != add_prefix_space:
lowerCAmelCase__ :Any = getattr(_lowerCAmelCase , pre_tok_state.pop("type" ) )
lowerCAmelCase__ :List[str] = add_prefix_space
lowerCAmelCase__ :Union[str, Any] = pre_tok_class(**_lowerCAmelCase )
lowerCAmelCase__ :int = add_prefix_space
def snake_case_ ( self , *_lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :str = kwargs.get("is_split_into_words" , _lowerCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*_lowerCAmelCase , **_lowerCAmelCase )
def snake_case_ ( self , *_lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = kwargs.get("is_split_into_words" , _lowerCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*_lowerCAmelCase , **_lowerCAmelCase )
def snake_case_ ( self , _lowerCAmelCase , _lowerCAmelCase = None ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = self._tokenizer.model.save(_lowerCAmelCase , name=_lowerCAmelCase )
return tuple(_lowerCAmelCase )
def snake_case_ ( self , _lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase ) + [self.eos_token_id] )
if len(_lowerCAmelCase ) > self.model_max_length:
lowerCAmelCase__ :int = input_ids[-self.model_max_length :]
return input_ids
| 145
| 1
|
"""simple docstring"""
def lowercase ( a__ : int , a__ : int ) -> List[Any]:
if number < 0 or shift_amount < 0:
raise ValueError('''both inputs must be positive integers''' )
_UpperCamelCase = str(bin(a__ ) )
binary_number += "0" * shift_amount
return binary_number
def lowercase ( a__ : int , a__ : int ) -> List[Any]:
if number < 0 or shift_amount < 0:
raise ValueError('''both inputs must be positive integers''' )
_UpperCamelCase = str(bin(a__ ) )[2:]
if shift_amount >= len(a__ ):
return "0b0"
_UpperCamelCase = binary_number[: len(a__ ) - shift_amount]
return "0b" + shifted_binary_number
def lowercase ( a__ : int , a__ : int ) -> Tuple:
if number >= 0: # Get binary representation of positive number
_UpperCamelCase = '''0''' + str(bin(a__ ) ).strip('''-''' )[2:]
else: # Get binary (2's complement) representation of negative number
_UpperCamelCase = len(bin(a__ )[3:] ) # Find 2's complement of number
_UpperCamelCase = bin(abs(a__ ) - (1 << binary_number_length) )[3:]
_UpperCamelCase = (
'''1''' + '''0''' * (binary_number_length - len(a__ )) + binary_number
)
if shift_amount >= len(a__ ):
return "0b" + binary_number[0] * len(a__ )
return (
"0b"
+ binary_number[0] * shift_amount
+ binary_number[: len(a__ ) - shift_amount]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 717
|
"""simple docstring"""
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class UpperCAmelCase_ ( unittest.TestCase):
def _UpperCamelCase ( self : List[Any] ) -> List[str]:
_UpperCamelCase = 0
def _UpperCamelCase ( self : Any ) -> str:
_UpperCamelCase = AutoImageProcessor.from_pretrained('''openai/clip-vit-base-patch32''' )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
def _UpperCamelCase ( self : Any ) -> Tuple:
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCamelCase = Path(__UpperCamelCase ) / '''preprocessor_config.json'''
_UpperCamelCase = Path(__UpperCamelCase ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(__UpperCamelCase , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(__UpperCamelCase , '''w''' ) )
_UpperCamelCase = AutoImageProcessor.from_pretrained(__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
def _UpperCamelCase ( self : Optional[Any] ) -> List[str]:
# Ensure we can load the image processor from the feature extractor config
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCamelCase = Path(__UpperCamelCase ) / '''preprocessor_config.json'''
_UpperCamelCase = Path(__UpperCamelCase ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(__UpperCamelCase , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(__UpperCamelCase , '''w''' ) )
_UpperCamelCase = AutoImageProcessor.from_pretrained(__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
def _UpperCamelCase ( self : int ) -> Optional[int]:
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCamelCase = CLIPConfig()
# Create a dummy config file with image_proceesor_type
_UpperCamelCase = Path(__UpperCamelCase ) / '''preprocessor_config.json'''
_UpperCamelCase = Path(__UpperCamelCase ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(__UpperCamelCase , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(__UpperCamelCase , '''w''' ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
_UpperCamelCase = AutoImageProcessor.from_pretrained(__UpperCamelCase ).to_dict()
config_dict.pop('''image_processor_type''' )
_UpperCamelCase = CLIPImageProcessor(**__UpperCamelCase )
# save in new folder
model_config.save_pretrained(__UpperCamelCase )
config.save_pretrained(__UpperCamelCase )
_UpperCamelCase = AutoImageProcessor.from_pretrained(__UpperCamelCase )
# make sure private variable is not incorrectly saved
_UpperCamelCase = json.loads(config.to_json_string() )
self.assertTrue('''_processor_class''' not in dict_as_saved )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
def _UpperCamelCase ( self : Union[str, Any] ) -> Optional[int]:
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCamelCase = Path(__UpperCamelCase ) / '''preprocessor_config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(__UpperCamelCase , '''w''' ) , )
_UpperCamelCase = AutoImageProcessor.from_pretrained(__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
def _UpperCamelCase ( self : List[Any] ) -> List[Any]:
with self.assertRaisesRegex(
__UpperCamelCase , '''clip-base is not a local folder and is not a valid model identifier''' ):
_UpperCamelCase = AutoImageProcessor.from_pretrained('''clip-base''' )
def _UpperCamelCase ( self : Dict ) -> Union[str, Any]:
with self.assertRaisesRegex(
__UpperCamelCase , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
_UpperCamelCase = AutoImageProcessor.from_pretrained(__UpperCamelCase , revision='''aaaaaa''' )
def _UpperCamelCase ( self : Optional[Any] ) -> Optional[Any]:
with self.assertRaisesRegex(
__UpperCamelCase , '''hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.''' , ):
_UpperCamelCase = AutoImageProcessor.from_pretrained('''hf-internal-testing/config-no-model''' )
def _UpperCamelCase ( self : int ) -> Any:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(__UpperCamelCase ):
_UpperCamelCase = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__UpperCamelCase ):
_UpperCamelCase = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=__UpperCamelCase )
_UpperCamelCase = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=__UpperCamelCase )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(__UpperCamelCase )
_UpperCamelCase = AutoImageProcessor.from_pretrained(__UpperCamelCase , trust_remote_code=__UpperCamelCase )
self.assertEqual(reloaded_image_processor.__class__.__name__ , '''NewImageProcessor''' )
def _UpperCamelCase ( self : Optional[int] ) -> List[Any]:
try:
AutoConfig.register('''custom''' , __UpperCamelCase )
AutoImageProcessor.register(__UpperCamelCase , __UpperCamelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__UpperCamelCase ):
AutoImageProcessor.register(__UpperCamelCase , __UpperCamelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCamelCase = Path(__UpperCamelCase ) / '''preprocessor_config.json'''
_UpperCamelCase = Path(__UpperCamelCase ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(__UpperCamelCase , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(__UpperCamelCase , '''w''' ) )
_UpperCamelCase = CustomImageProcessor.from_pretrained(__UpperCamelCase )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(__UpperCamelCase )
_UpperCamelCase = AutoImageProcessor.from_pretrained(__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def _UpperCamelCase ( self : List[str] ) -> Optional[Any]:
class UpperCAmelCase_ ( _lowercase):
snake_case__ = True
try:
AutoConfig.register('''custom''' , __UpperCamelCase )
AutoImageProcessor.register(__UpperCamelCase , __UpperCamelCase )
# If remote code is not set, the default is to use local
_UpperCamelCase = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
_UpperCamelCase = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=__UpperCamelCase )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
_UpperCamelCase = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=__UpperCamelCase )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(not hasattr(__UpperCamelCase , '''is_local''' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 342
| 0
|
"""simple docstring"""
import logging
import os
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
from filelock import FileLock
from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available
__lowerCamelCase = logging.getLogger(__name__)
@dataclass
class __A :
UpperCAmelCase__ = 42
UpperCAmelCase__ = 42
UpperCAmelCase__ = 42
@dataclass
class __A :
UpperCAmelCase__ = 42
UpperCAmelCase__ = 42
UpperCAmelCase__ = None
UpperCAmelCase__ = None
class __A ( SCREAMING_SNAKE_CASE_ ):
UpperCAmelCase__ = "train"
UpperCAmelCase__ = "dev"
UpperCAmelCase__ = "test"
class __A :
@staticmethod
def lowerCamelCase__ ( __snake_case : Union[str, Any] , __snake_case : Union[Split, str] ) -> List[InputExample]:
raise NotImplementedError
@staticmethod
def lowerCamelCase__ ( __snake_case : str ) -> List[str]:
raise NotImplementedError
@staticmethod
def lowerCamelCase__ ( __snake_case : List[InputExample] , __snake_case : List[str] , __snake_case : int , __snake_case : PreTrainedTokenizer , __snake_case : int=False , __snake_case : Union[str, Any]="[CLS]" , __snake_case : str=1 , __snake_case : Dict="[SEP]" , __snake_case : List[Any]=False , __snake_case : Dict=False , __snake_case : Dict=0 , __snake_case : Dict=0 , __snake_case : Optional[int]=-1_0_0 , __snake_case : Optional[int]=0 , __snake_case : str=True , ) -> List[InputFeatures]:
__magic_name__: str = {label: i for i, label in enumerate(__snake_case )}
__magic_name__: Optional[Any] = []
for ex_index, example in enumerate(__snake_case ):
if ex_index % 1_0_0_0_0 == 0:
logger.info("""Writing example %d of %d""" , __snake_case , len(__snake_case ) )
__magic_name__: int = []
__magic_name__: List[Any] = []
for word, label in zip(example.words , example.labels ):
__magic_name__: Optional[Any] = tokenizer.tokenize(__snake_case )
# bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space.
if len(__snake_case ) > 0:
tokens.extend(__snake_case )
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(__snake_case ) - 1) )
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
__magic_name__: int = tokenizer.num_special_tokens_to_add()
if len(__snake_case ) > max_seq_length - special_tokens_count:
__magic_name__: List[str] = tokens[: (max_seq_length - special_tokens_count)]
__magic_name__: Optional[int] = label_ids[: (max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens += [sep_token]
label_ids += [pad_token_label_id]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
label_ids += [pad_token_label_id]
__magic_name__: Union[str, Any] = [sequence_a_segment_id] * len(__snake_case )
if cls_token_at_end:
tokens += [cls_token]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
__magic_name__: Union[str, Any] = [cls_token] + tokens
__magic_name__: List[str] = [pad_token_label_id] + label_ids
__magic_name__: Dict = [cls_token_segment_id] + segment_ids
__magic_name__: str = tokenizer.convert_tokens_to_ids(__snake_case )
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
__magic_name__: int = [1 if mask_padding_with_zero else 0] * len(__snake_case )
# Zero-pad up to the sequence length.
__magic_name__: Union[str, Any] = max_seq_length - len(__snake_case )
if pad_on_left:
__magic_name__: Union[str, Any] = ([pad_token] * padding_length) + input_ids
__magic_name__: Dict = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
__magic_name__: Dict = ([pad_token_segment_id] * padding_length) + segment_ids
__magic_name__: Optional[int] = ([pad_token_label_id] * padding_length) + label_ids
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [pad_token_label_id] * padding_length
assert len(__snake_case ) == max_seq_length
assert len(__snake_case ) == max_seq_length
assert len(__snake_case ) == max_seq_length
assert len(__snake_case ) == max_seq_length
if ex_index < 5:
logger.info("""*** Example ***""" )
logger.info("""guid: %s""" , example.guid )
logger.info("""tokens: %s""" , """ """.join([str(__snake_case ) for x in tokens] ) )
logger.info("""input_ids: %s""" , """ """.join([str(__snake_case ) for x in input_ids] ) )
logger.info("""input_mask: %s""" , """ """.join([str(__snake_case ) for x in input_mask] ) )
logger.info("""segment_ids: %s""" , """ """.join([str(__snake_case ) for x in segment_ids] ) )
logger.info("""label_ids: %s""" , """ """.join([str(__snake_case ) for x in label_ids] ) )
if "token_type_ids" not in tokenizer.model_input_names:
__magic_name__: Any = None
features.append(
InputFeatures(
input_ids=__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , label_ids=__snake_case ) )
return features
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
class __A ( SCREAMING_SNAKE_CASE_ ):
UpperCAmelCase__ = 42
UpperCAmelCase__ = nn.CrossEntropyLoss().ignore_index
def __init__( self : int , __snake_case : TokenClassificationTask , __snake_case : str , __snake_case : PreTrainedTokenizer , __snake_case : List[str] , __snake_case : str , __snake_case : Optional[int] = None , __snake_case : Any=False , __snake_case : Split = Split.train , ) -> Optional[int]:
# Load data features from cache or dataset file
__magic_name__: Union[str, Any] = os.path.join(
__snake_case , """cached_{}_{}_{}""".format(mode.value , tokenizer.__class__.__name__ , str(__snake_case ) ) , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
__magic_name__: Union[str, Any] = cached_features_file + """.lock"""
with FileLock(__snake_case ):
if os.path.exists(__snake_case ) and not overwrite_cache:
logger.info(F'Loading features from cached file {cached_features_file}' )
__magic_name__: List[Any] = torch.load(__snake_case )
else:
logger.info(F'Creating features from dataset file at {data_dir}' )
__magic_name__: int = token_classification_task.read_examples_from_file(__snake_case , __snake_case )
# TODO clean up all this to leverage built-in features of tokenizers
__magic_name__: List[str] = token_classification_task.convert_examples_to_features(
__snake_case , __snake_case , __snake_case , __snake_case , cls_token_at_end=bool(model_type in ["""xlnet"""] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ["""xlnet"""] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=__snake_case , pad_on_left=bool(tokenizer.padding_side == """left""" ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info(F'Saving features into cached file {cached_features_file}' )
torch.save(self.features , __snake_case )
def __len__( self : str ) -> str:
return len(self.features )
def __getitem__( self : Any , __snake_case : Optional[int] ) -> InputFeatures:
return self.features[i]
if is_tf_available():
import tensorflow as tf
class __A :
UpperCAmelCase__ = 42
UpperCAmelCase__ = -1_0_0
def __init__( self : Tuple , __snake_case : TokenClassificationTask , __snake_case : str , __snake_case : PreTrainedTokenizer , __snake_case : List[str] , __snake_case : str , __snake_case : Optional[int] = None , __snake_case : str=False , __snake_case : Split = Split.train , ) -> List[Any]:
__magic_name__: Optional[int] = token_classification_task.read_examples_from_file(__snake_case , __snake_case )
# TODO clean up all this to leverage built-in features of tokenizers
__magic_name__: Optional[int] = token_classification_task.convert_examples_to_features(
__snake_case , __snake_case , __snake_case , __snake_case , cls_token_at_end=bool(model_type in ["""xlnet"""] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ["""xlnet"""] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=__snake_case , pad_on_left=bool(tokenizer.padding_side == """left""" ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
def gen():
for ex in self.features:
if ex.token_type_ids is None:
yield (
{"input_ids": ex.input_ids, "attention_mask": ex.attention_mask},
ex.label_ids,
)
else:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label_ids,
)
if "token_type_ids" not in tokenizer.model_input_names:
__magic_name__: Union[str, Any] = tf.data.Dataset.from_generator(
__snake_case , ({"""input_ids""": tf.intaa, """attention_mask""": tf.intaa}, tf.intaa) , (
{"""input_ids""": tf.TensorShape([None] ), """attention_mask""": tf.TensorShape([None] )},
tf.TensorShape([None] ),
) , )
else:
__magic_name__: str = tf.data.Dataset.from_generator(
__snake_case , ({"""input_ids""": tf.intaa, """attention_mask""": tf.intaa, """token_type_ids""": tf.intaa}, tf.intaa) , (
{
"""input_ids""": tf.TensorShape([None] ),
"""attention_mask""": tf.TensorShape([None] ),
"""token_type_ids""": tf.TensorShape([None] ),
},
tf.TensorShape([None] ),
) , )
def lowerCamelCase__ ( self : Tuple ) -> Dict:
__magic_name__: Optional[Any] = self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features ) ) )
return self.dataset
def __len__( self : Union[str, Any] ) -> int:
return len(self.features )
def __getitem__( self : Dict , __snake_case : int ) -> InputFeatures:
return self.features[i]
| 96
|
"""simple docstring"""
import importlib
import inspect
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
__A = "src/transformers"
# This is to make sure the transformers module imported is the one in the repo.
__A = importlib.util.spec_from_file_location(
"transformers",
os.path.join(PATH_TO_TRANSFORMERS, "__init__.py"),
submodule_search_locations=[PATH_TO_TRANSFORMERS],
)
__A = spec.loader.load_module()
__A = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
__A = re.compile("\[(.+?)\]\((https://huggingface\.co/.+?)\)")
__A = {
"CLIPConfigMixin",
"DecisionTransformerConfigMixin",
"EncoderDecoderConfigMixin",
"RagConfigMixin",
"SpeechEncoderDecoderConfigMixin",
"VisionEncoderDecoderConfigMixin",
"VisionTextDualEncoderConfigMixin",
}
def a__ ( ) -> int:
__lowerCAmelCase: str = []
for config_class in list(CONFIG_MAPPING.values() ):
__lowerCAmelCase: List[Any] = False
# source code of `config_class`
__lowerCAmelCase: Any = inspect.getsource(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase: List[str] = _re_checkpoint.findall(__SCREAMING_SNAKE_CASE )
for checkpoint in checkpoints:
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
__lowerCAmelCase , __lowerCAmelCase: List[str] = checkpoint
# verify the checkpoint name corresponds to the checkpoint link
__lowerCAmelCase: Tuple = F"https://huggingface.co/{ckpt_name}"
if ckpt_link == ckpt_link_from_name:
__lowerCAmelCase: str = True
break
__lowerCAmelCase: Optional[Any] = config_class.__name__
if not checkpoint_found and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(__SCREAMING_SNAKE_CASE )
if len(__SCREAMING_SNAKE_CASE ) > 0:
__lowerCAmelCase: Union[str, Any] = "\n".join(sorted(__SCREAMING_SNAKE_CASE ) )
raise ValueError(F"The following configurations don't contain any valid checkpoint:\n{message}" )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 346
| 0
|
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
snake_case__ : Any = datasets.utils.logging.get_logger(__name__)
@dataclass
class _a ( datasets.BuilderConfig ):
"""simple docstring"""
A_ = 10_000
A_ = None
A_ = None
class _a ( datasets.ArrowBasedBuilder ):
"""simple docstring"""
A_ = ParquetConfig
def _UpperCAmelCase ( self ) -> Optional[Any]:
return datasets.DatasetInfo(features=self.config.features )
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> Dict:
if not self.config.data_files:
raise ValueError(f"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
UpperCamelCase_ = dl_manager.download_and_extract(self.config.data_files )
if isinstance(_UpperCAmelCase , (str, list, tuple) ):
UpperCamelCase_ = data_files
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
UpperCamelCase_ = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
UpperCamelCase_ = [dl_manager.iter_files(_UpperCAmelCase ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'files': files} )]
UpperCamelCase_ = []
for split_name, files in data_files.items():
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
UpperCamelCase_ = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
UpperCamelCase_ = [dl_manager.iter_files(_UpperCAmelCase ) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(_UpperCAmelCase ):
with open(_UpperCAmelCase , 'rb' ) as f:
UpperCamelCase_ = datasets.Features.from_arrow_schema(pq.read_schema(_UpperCAmelCase ) )
break
splits.append(datasets.SplitGenerator(name=_UpperCAmelCase , gen_kwargs={'files': files} ) )
return splits
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> pa.Table:
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
UpperCamelCase_ = table_cast(_UpperCAmelCase , self.info.features.arrow_schema )
return pa_table
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> Optional[int]:
UpperCamelCase_ = self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema ) != sorted(self.config.columns ):
raise ValueError(
f"""Tried to load parquet data with columns '{self.config.columns}' with mismatching features '{self.info.features}'""" )
for file_idx, file in enumerate(itertools.chain.from_iterable(_UpperCAmelCase ) ):
with open(_UpperCAmelCase , 'rb' ) as f:
UpperCamelCase_ = pq.ParquetFile(_UpperCAmelCase )
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ):
UpperCamelCase_ = pa.Table.from_batches([record_batch] )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield f"""{file_idx}_{batch_idx}""", self._cast_table(_UpperCAmelCase )
except ValueError as e:
logger.error(f"""Failed to read file '{file}' with error {type(_UpperCAmelCase )}: {e}""" )
raise
| 618
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class _a ( unittest.TestCase ):
"""simple docstring"""
def _UpperCAmelCase ( self ) -> Dict:
UpperCamelCase_ = tempfile.mkdtemp()
# fmt: off
UpperCamelCase_ = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing', ',', 'low', 'lowest']
# fmt: on
UpperCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
UpperCamelCase_ = {
'do_resize': True,
'size': {'height': 18, 'width': 18},
'do_normalize': True,
'image_mean': [0.5, 0.5, 0.5],
'image_std': [0.5, 0.5, 0.5],
}
UpperCamelCase_ = os.path.join(self.tmpdirname , _UpperCAmelCase )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(_UpperCAmelCase , _UpperCAmelCase )
def _UpperCAmelCase ( self , **_UpperCAmelCase ) -> Optional[Any]:
return BertTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def _UpperCAmelCase ( self , **_UpperCAmelCase ) -> Optional[int]:
return ViTImageProcessor.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Any:
shutil.rmtree(self.tmpdirname )
def _UpperCAmelCase ( self ) -> Optional[int]:
UpperCamelCase_ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
UpperCamelCase_ = [Image.fromarray(np.moveaxis(_UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _UpperCAmelCase ( self ) -> Tuple:
UpperCamelCase_ = self.get_tokenizer()
UpperCamelCase_ = self.get_image_processor()
UpperCamelCase_ = VisionTextDualEncoderProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase_ = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , _UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Optional[Any]:
UpperCamelCase_ = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase_ = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
UpperCamelCase_ = self.get_image_processor(do_normalize=_UpperCAmelCase , padding_value=1.0 )
UpperCamelCase_ = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=_UpperCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _UpperCAmelCase )
def _UpperCAmelCase ( self ) -> str:
UpperCamelCase_ = self.get_image_processor()
UpperCamelCase_ = self.get_tokenizer()
UpperCamelCase_ = VisionTextDualEncoderProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
UpperCamelCase_ = self.prepare_image_inputs()
UpperCamelCase_ = image_processor(_UpperCAmelCase , return_tensors='np' )
UpperCamelCase_ = processor(images=_UpperCAmelCase , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def _UpperCAmelCase ( self ) -> Optional[int]:
UpperCamelCase_ = self.get_image_processor()
UpperCamelCase_ = self.get_tokenizer()
UpperCamelCase_ = VisionTextDualEncoderProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
UpperCamelCase_ = 'lower newer'
UpperCamelCase_ = processor(text=_UpperCAmelCase )
UpperCamelCase_ = tokenizer(_UpperCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _UpperCAmelCase ( self ) -> Any:
UpperCamelCase_ = self.get_image_processor()
UpperCamelCase_ = self.get_tokenizer()
UpperCamelCase_ = VisionTextDualEncoderProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
UpperCamelCase_ = 'lower newer'
UpperCamelCase_ = self.prepare_image_inputs()
UpperCamelCase_ = processor(text=_UpperCAmelCase , images=_UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'token_type_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with self.assertRaises(_UpperCAmelCase ):
processor()
def _UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase_ = self.get_image_processor()
UpperCamelCase_ = self.get_tokenizer()
UpperCamelCase_ = VisionTextDualEncoderProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
UpperCamelCase_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCamelCase_ = processor.batch_decode(_UpperCAmelCase )
UpperCamelCase_ = tokenizer.batch_decode(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def _UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase_ = self.get_image_processor()
UpperCamelCase_ = self.get_tokenizer()
UpperCamelCase_ = VisionTextDualEncoderProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
UpperCamelCase_ = 'lower newer'
UpperCamelCase_ = self.prepare_image_inputs()
UpperCamelCase_ = processor(text=_UpperCAmelCase , images=_UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 618
| 1
|
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
UpperCAmelCase = logging.getLogger(__name__)
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = False , ):
lowercase = bnb_quantization_config.load_in_abit
lowercase = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
'You have a version of `bitsandbytes` that is not compatible with 8bit quantization,'
' make sure you have the latest version of `bitsandbytes` installed.' )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
'You have a version of `bitsandbytes` that is not compatible with 4bit quantization,'
'make sure you have the latest version of `bitsandbytes` installed.' )
lowercase = []
# custom device map
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and len(device_map.keys() ) > 1:
lowercase = [key for key, value in device_map.items() if value in ['disk', 'cpu']]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
lowercase = get_keys_to_not_convert(__SCREAMING_SNAKE_CASE )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(__SCREAMING_SNAKE_CASE )
lowercase = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
lowercase = []
lowercase = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(__SCREAMING_SNAKE_CASE )
# compatibility with peft
lowercase = load_in_abit
lowercase = load_in_abit
lowercase = get_parameter_device(__SCREAMING_SNAKE_CASE )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
'It is not recommended to quantize a loaded model. '
'The model should be instantiated under the `init_empty_weights` context manager.' )
lowercase = replace_with_bnb_layers(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , modules_to_not_convert=__SCREAMING_SNAKE_CASE )
# convert param to the right dtype
lowercase = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
lowercase = name.replace('.weight' , '' ).replace('.bias' , '' )
lowercase = getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(__SCREAMING_SNAKE_CASE ):
param.to(__SCREAMING_SNAKE_CASE )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError('No GPU found. A GPU is needed for quantization.' )
logger.info(
F'''The model device type is {model_device.type}. However, cuda is needed for quantization.'''
'We move the model to cuda.' )
return model
elif weights_location is None:
raise RuntimeError(
F'''`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} ''' )
else:
with init_empty_weights():
lowercase = replace_with_bnb_layers(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , modules_to_not_convert=__SCREAMING_SNAKE_CASE )
lowercase = get_quantized_model_device_map(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , max_memory=__SCREAMING_SNAKE_CASE , no_split_module_classes=__SCREAMING_SNAKE_CASE , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
lowercase = True
lowercase = any(x in list(device_map.values() ) for x in ['cpu', 'disk'] )
load_checkpoint_in_model(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , dtype=bnb_quantization_config.torch_dtype , offload_folder=__SCREAMING_SNAKE_CASE , offload_state_dict=__SCREAMING_SNAKE_CASE , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(__SCREAMING_SNAKE_CASE , device_map=__SCREAMING_SNAKE_CASE , offload_dir=__SCREAMING_SNAKE_CASE )
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None ):
if device_map is None:
if torch.cuda.is_available():
lowercase = {'': torch.cuda.current_device()}
else:
raise RuntimeError('No GPU found. A GPU is needed for quantization.' )
logger.info('The device_map was not initialized.' 'Setting device_map to `{\'\':torch.cuda.current_device()}`.' )
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
'If passing a string for `device_map`, please choose \'auto\', \'balanced\', \'balanced_low_0\' or '
'\'sequential\'.' )
lowercase = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
lowercase = {}
lowercase = special_dtypes
lowercase = no_split_module_classes
lowercase = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
lowercase = get_balanced_memory(
__SCREAMING_SNAKE_CASE , low_zero=(device_map == 'balanced_low_0') , max_memory=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
lowercase = max_memory
lowercase = infer_auto_device_map(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
# check if don't have any quantized module on the cpu
lowercase = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
lowercase = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
'\n Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit\n the quantized model. If you want to dispatch the model on the CPU or the disk while keeping\n these modules in `torch_dtype`, you need to pass a custom `device_map` to\n `load_and_quantize_model`. Check\n https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk\n for more details.\n ' )
else:
logger.info(
'Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit' )
del device_map_without_some_modules
return device_map
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None ):
if modules_to_not_convert is None:
lowercase = []
lowercase , lowercase = _replace_with_bnb_layers(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if not has_been_replaced:
logger.warning(
'You are loading your model in 8bit or 4bit but no linear modules were found in your model.'
' this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers.'
' Please double check your model architecture, or submit an issue on github if you think this is'
' a bug.' )
return model
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , ):
lowercase = False
for name, module in model.named_children():
if current_key_name is None:
lowercase = []
current_key_name.append(__SCREAMING_SNAKE_CASE )
if isinstance(__SCREAMING_SNAKE_CASE , nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
lowercase = '.'.join(__SCREAMING_SNAKE_CASE )
lowercase = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
lowercase = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
lowercase = bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=__SCREAMING_SNAKE_CASE , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
lowercase = bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError('load_in_8bit and load_in_4bit can\'t be both False' )
lowercase = module.weight.data
if module.bias is not None:
lowercase = module.bias.data
bnb_module.requires_grad_(__SCREAMING_SNAKE_CASE )
setattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase = True
if len(list(module.children() ) ) > 0:
lowercase , lowercase = _replace_with_bnb_layers(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
# Create a copy of the model
with init_empty_weights():
lowercase = deepcopy(__SCREAMING_SNAKE_CASE ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
lowercase = find_tied_parameters(__SCREAMING_SNAKE_CASE )
# For compatibility with Accelerate < 0.18
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
lowercase = sum(__SCREAMING_SNAKE_CASE , [] )
lowercase = len(__SCREAMING_SNAKE_CASE ) > 0
# Check if it is a base model
lowercase = False
if hasattr(__SCREAMING_SNAKE_CASE , 'base_model_prefix' ):
lowercase = not hasattr(__SCREAMING_SNAKE_CASE , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
lowercase = list(model.named_children() )
lowercase = [list_modules[-1][0]]
# add last module together with tied weights
lowercase = set(__SCREAMING_SNAKE_CASE ) - set(__SCREAMING_SNAKE_CASE )
lowercase = list(set(__SCREAMING_SNAKE_CASE ) ) + list(__SCREAMING_SNAKE_CASE )
# remove ".weight" from the keys
lowercase = ['.weight', '.bias']
lowercase = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
lowercase = name.replace(__SCREAMING_SNAKE_CASE , '' )
filtered_module_names.append(__SCREAMING_SNAKE_CASE )
return filtered_module_names
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
for m in model.modules():
if isinstance(__SCREAMING_SNAKE_CASE , bnb.nn.Linearabit ):
return True
return False
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
return next(parameter.parameters() ).device
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
# if it is not quantized, we quantize and offload the quantized weights and the SCB stats
if fpaa_statistics is None:
set_module_tensor_to_device(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , 0 , dtype=__SCREAMING_SNAKE_CASE , value=__SCREAMING_SNAKE_CASE )
lowercase = param_name
lowercase = model
if "." in tensor_name:
lowercase = tensor_name.split('.' )
for split in splits[:-1]:
lowercase = getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if new_module is None:
raise ValueError(F'''{module} has no attribute {split}.''' )
lowercase = new_module
lowercase = splits[-1]
# offload weights
lowercase = False
offload_weight(module._parameters[tensor_name] , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , index=__SCREAMING_SNAKE_CASE )
if hasattr(module._parameters[tensor_name] , 'SCB' ):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace('weight' , 'SCB' ) , __SCREAMING_SNAKE_CASE , index=__SCREAMING_SNAKE_CASE , )
else:
offload_weight(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , index=__SCREAMING_SNAKE_CASE )
offload_weight(__SCREAMING_SNAKE_CASE , param_name.replace('weight' , 'SCB' ) , __SCREAMING_SNAKE_CASE , index=__SCREAMING_SNAKE_CASE )
set_module_tensor_to_device(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , 'meta' , dtype=__SCREAMING_SNAKE_CASE , value=torch.empty(*param.size() ) )
| 84
|
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class SCREAMING_SNAKE_CASE__ ( metaclass=UpperCAmelCase__ ):
__lowerCAmelCase : List[str] = ['speech']
def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
requires_backends(self , ["""speech"""] )
class SCREAMING_SNAKE_CASE__ ( metaclass=UpperCAmelCase__ ):
__lowerCAmelCase : int = ['speech']
def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
requires_backends(self , ["""speech"""] )
| 160
| 0
|
import unittest
import numpy as np
from transformers import MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING, TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
from transformers.pipelines import AudioClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_torchaudio,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class __lowercase ( unittest.TestCase ):
lowercase = MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
lowercase = TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
def __a ( self : str , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
lowercase = AudioClassificationPipeline(model=__lowerCamelCase , feature_extractor=__lowerCamelCase )
# test with a raw waveform
lowercase = np.zeros((3_40_00,) )
lowercase = np.zeros((1_40_00,) )
return audio_classifier, [audioa, audio]
def __a ( self : Dict , __lowerCamelCase : Any , __lowerCamelCase : Dict ) -> Dict:
'''simple docstring'''
lowercase ,lowercase = examples
lowercase = audio_classifier(__lowerCamelCase )
# by default a model is initialized with num_labels=2
self.assertEqual(
__lowerCamelCase , [
{'''score''': ANY(__lowerCamelCase ), '''label''': ANY(__lowerCamelCase )},
{'''score''': ANY(__lowerCamelCase ), '''label''': ANY(__lowerCamelCase )},
] , )
lowercase = audio_classifier(__lowerCamelCase , top_k=1 )
self.assertEqual(
__lowerCamelCase , [
{'''score''': ANY(__lowerCamelCase ), '''label''': ANY(__lowerCamelCase )},
] , )
self.run_torchaudio(__lowerCamelCase )
@require_torchaudio
def __a ( self : Dict , __lowerCamelCase : Tuple ) -> Tuple:
'''simple docstring'''
import datasets
# test with a local file
lowercase = datasets.load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
lowercase = dataset[0]['''audio''']['''array''']
lowercase = audio_classifier(__lowerCamelCase )
self.assertEqual(
__lowerCamelCase , [
{'''score''': ANY(__lowerCamelCase ), '''label''': ANY(__lowerCamelCase )},
{'''score''': ANY(__lowerCamelCase ), '''label''': ANY(__lowerCamelCase )},
] , )
@require_torch
def __a ( self : Union[str, Any] ) -> int:
'''simple docstring'''
lowercase = '''anton-l/wav2vec2-random-tiny-classifier'''
lowercase = pipeline('''audio-classification''' , model=__lowerCamelCase )
lowercase = np.ones((80_00,) )
lowercase = audio_classifier(__lowerCamelCase , top_k=4 )
lowercase = [
{'''score''': 0.0842, '''label''': '''no'''},
{'''score''': 0.0838, '''label''': '''up'''},
{'''score''': 0.0837, '''label''': '''go'''},
{'''score''': 0.0834, '''label''': '''right'''},
]
lowercase = [
{'''score''': 0.0845, '''label''': '''stop'''},
{'''score''': 0.0844, '''label''': '''on'''},
{'''score''': 0.0841, '''label''': '''right'''},
{'''score''': 0.0834, '''label''': '''left'''},
]
self.assertIn(nested_simplify(__lowerCamelCase , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
lowercase = {'''array''': np.ones((80_00,) ), '''sampling_rate''': audio_classifier.feature_extractor.sampling_rate}
lowercase = audio_classifier(__lowerCamelCase , top_k=4 )
self.assertIn(nested_simplify(__lowerCamelCase , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
@require_torch
@slow
def __a ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
import datasets
lowercase = '''superb/wav2vec2-base-superb-ks'''
lowercase = pipeline('''audio-classification''' , model=__lowerCamelCase )
lowercase = datasets.load_dataset('''anton-l/superb_dummy''' , '''ks''' , split='''test''' )
lowercase = np.array(dataset[3]['''speech'''] , dtype=np.floataa )
lowercase = audio_classifier(__lowerCamelCase , top_k=4 )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=3 ) , [
{'''score''': 0.981, '''label''': '''go'''},
{'''score''': 0.007, '''label''': '''up'''},
{'''score''': 0.006, '''label''': '''_unknown_'''},
{'''score''': 0.001, '''label''': '''down'''},
] , )
@require_tf
@unittest.skip('''Audio classification is not implemented for TF''' )
def __a ( self : List[str] ) -> List[Any]:
'''simple docstring'''
pass
| 479
|
import cmath
import math
def __UpperCAmelCase ( UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase )-> complex:
"""simple docstring"""
lowercase = math.radians(UpperCAmelCase )
lowercase = math.radians(UpperCAmelCase )
# Convert voltage and current to rectangular form
lowercase = cmath.rect(UpperCAmelCase, UpperCAmelCase )
lowercase = cmath.rect(UpperCAmelCase, UpperCAmelCase )
# Calculate apparent power
return voltage_rect * current_rect
if __name__ == "__main__":
import doctest
doctest.testmod()
| 479
| 1
|
'''simple docstring'''
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def __UpperCAmelCase ( a_: bytes, a_: int ):
_UpperCAmelCase : str = f"""{sampling_rate}"""
_UpperCAmelCase : Dict = "1"
_UpperCAmelCase : Union[str, Any] = "f32le"
_UpperCAmelCase : Optional[Any] = [
"ffmpeg",
"-i",
"pipe:0",
"-ac",
ac,
"-ar",
ar,
"-f",
format_for_conversion,
"-hide_banner",
"-loglevel",
"quiet",
"pipe:1",
]
try:
with subprocess.Popen(a_, stdin=subprocess.PIPE, stdout=subprocess.PIPE ) as ffmpeg_process:
_UpperCAmelCase : Optional[int] = ffmpeg_process.communicate(a_ )
except FileNotFoundError as error:
raise ValueError("ffmpeg was not found but is required to load audio files from filename" ) from error
_UpperCAmelCase : Optional[Any] = output_stream[0]
_UpperCAmelCase : Optional[Any] = np.frombuffer(a_, np.floataa )
if audio.shape[0] == 0:
raise ValueError("Malformed soundfile" )
return audio
def __UpperCAmelCase ( a_: int, a_: float, a_: str = "f32le", ):
_UpperCAmelCase : str = f"""{sampling_rate}"""
_UpperCAmelCase : Dict = "1"
if format_for_conversion == "s16le":
_UpperCAmelCase : Tuple = 2
elif format_for_conversion == "f32le":
_UpperCAmelCase : Any = 4
else:
raise ValueError(f"""Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`""" )
_UpperCAmelCase : Union[str, Any] = platform.system()
if system == "Linux":
_UpperCAmelCase : List[Any] = "alsa"
_UpperCAmelCase : List[str] = "default"
elif system == "Darwin":
_UpperCAmelCase : List[str] = "avfoundation"
_UpperCAmelCase : List[str] = ":0"
elif system == "Windows":
_UpperCAmelCase : Optional[int] = "dshow"
_UpperCAmelCase : Optional[Any] = "default"
_UpperCAmelCase : Optional[int] = [
"ffmpeg",
"-f",
format_,
"-i",
input_,
"-ac",
ac,
"-ar",
ar,
"-f",
format_for_conversion,
"-fflags",
"nobuffer",
"-hide_banner",
"-loglevel",
"quiet",
"pipe:1",
]
_UpperCAmelCase : List[str] = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
_UpperCAmelCase : List[Any] = _ffmpeg_stream(a_, a_ )
for item in iterator:
yield item
def __UpperCAmelCase ( a_: int, a_: float, a_: Optional[int] = None, a_: Optional[Union[Tuple[float, float], float]] = None, a_: str = "f32le", ):
if stream_chunk_s is not None:
_UpperCAmelCase : int = stream_chunk_s
else:
_UpperCAmelCase : Union[str, Any] = chunk_length_s
_UpperCAmelCase : Union[str, Any] = ffmpeg_microphone(a_, a_, format_for_conversion=a_ )
if format_for_conversion == "s16le":
_UpperCAmelCase : Optional[int] = np.intaa
_UpperCAmelCase : List[str] = 2
elif format_for_conversion == "f32le":
_UpperCAmelCase : Any = np.floataa
_UpperCAmelCase : str = 4
else:
raise ValueError(f"""Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`""" )
if stride_length_s is None:
_UpperCAmelCase : Optional[int] = chunk_length_s / 6
_UpperCAmelCase : int = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(a_, (int, float) ):
_UpperCAmelCase : List[Any] = [stride_length_s, stride_length_s]
_UpperCAmelCase : Any = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
_UpperCAmelCase : Any = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
_UpperCAmelCase : Tuple = datetime.datetime.now()
_UpperCAmelCase : Optional[Any] = datetime.timedelta(seconds=a_ )
for item in chunk_bytes_iter(a_, a_, stride=(stride_left, stride_right), stream=a_ ):
# Put everything back in numpy scale
_UpperCAmelCase : List[str] = np.frombuffer(item["raw"], dtype=a_ )
_UpperCAmelCase : Tuple = (
item["stride"][0] // size_of_sample,
item["stride"][1] // size_of_sample,
)
_UpperCAmelCase : Any = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def __UpperCAmelCase ( a_: int, a_: int, a_: Tuple[int, int], a_: bool = False ):
_UpperCAmelCase : int = B""
_UpperCAmelCase , _UpperCAmelCase : Optional[int] = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
f"""Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}""" )
_UpperCAmelCase : List[Any] = 0
for raw in iterator:
acc += raw
if stream and len(a_ ) < chunk_len:
_UpperCAmelCase : List[Any] = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(a_ ) >= chunk_len:
# We are flushing the accumulator
_UpperCAmelCase : List[str] = (_stride_left, stride_right)
_UpperCAmelCase : Union[str, Any] = {"raw": acc[:chunk_len], "stride": stride}
if stream:
_UpperCAmelCase : Any = False
yield item
_UpperCAmelCase : List[Any] = stride_left
_UpperCAmelCase : List[Any] = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(a_ ) > stride_left:
_UpperCAmelCase : Tuple = {"raw": acc, "stride": (_stride_left, 0)}
if stream:
_UpperCAmelCase : Optional[int] = False
yield item
def __UpperCAmelCase ( a_: str, a_: int ):
_UpperCAmelCase : Optional[int] = 2**24 # 16Mo
try:
with subprocess.Popen(a_, stdout=subprocess.PIPE, bufsize=a_ ) as ffmpeg_process:
while True:
_UpperCAmelCase : Optional[Any] = ffmpeg_process.stdout.read(a_ )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError("ffmpeg was not found but is required to stream audio files from filename" ) from error
| 494
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_deformable_detr import DeformableDetrImageProcessor
__a = logging.get_logger(__name__)
class A__ ( UpperCamelCase ):
"""simple docstring"""
def __init__( self : Any , *lowerCAmelCase__ : Tuple , **lowerCAmelCase__ : int ) -> None:
"""simple docstring"""
warnings.warn(
"The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use DeformableDetrImageProcessor instead." , lowerCAmelCase__ , )
super().__init__(*lowerCAmelCase__ , **lowerCAmelCase__ )
| 494
| 1
|
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class A__ ( unittest.TestCase ):
"""simple docstring"""
def __magic_name__ ( self : Union[str, Any] ):
'''simple docstring'''
_lowerCAmelCase : Dict = "| <pad> <unk> <s> </s> a b c d e f g h i j k".split()
_lowerCAmelCase : int = dict(zip(A_ , range(len(A_ ) ) ) )
_lowerCAmelCase : int = {
"unk_token": "<unk>",
"bos_token": "<s>",
"eos_token": "</s>",
}
_lowerCAmelCase : Any = {
"feature_size": 1,
"padding_value": 0.0,
"sampling_rate": 1_6_0_0_0,
"return_attention_mask": False,
"do_normalize": True,
}
_lowerCAmelCase : int = tempfile.mkdtemp()
_lowerCAmelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
_lowerCAmelCase : Tuple = os.path.join(self.tmpdirname , A_ )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(A_ ) + "\n" )
with open(self.feature_extraction_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(A_ ) + "\n" )
# load decoder from hub
_lowerCAmelCase : Union[str, Any] = "hf-internal-testing/ngram-beam-search-decoder"
def __magic_name__ ( self : Tuple , **A_ : Optional[int] ):
'''simple docstring'''
_lowerCAmelCase : str = self.add_kwargs_tokens_map.copy()
kwargs.update(A_ )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **A_ )
def __magic_name__ ( self : Tuple , **A_ : Tuple ):
'''simple docstring'''
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **A_ )
def __magic_name__ ( self : str , **A_ : Tuple ):
'''simple docstring'''
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **A_ )
def __magic_name__ ( self : Any ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def __magic_name__ ( self : Dict ):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.get_tokenizer()
_lowerCAmelCase : str = self.get_feature_extractor()
_lowerCAmelCase : Optional[Any] = self.get_decoder()
_lowerCAmelCase : List[str] = WavaVecaProcessorWithLM(tokenizer=A_ , feature_extractor=A_ , decoder=A_ )
processor.save_pretrained(self.tmpdirname )
_lowerCAmelCase : int = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , A_ )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , A_ )
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , A_ )
def __magic_name__ ( self : Tuple ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
_lowerCAmelCase : Union[str, Any] = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha , 5.0 )
self.assertEqual(processor.language_model.beta , 3.0 )
self.assertEqual(processor.language_model.score_boundary , -7.0 )
self.assertEqual(processor.language_model.unk_score_offset , 3 )
def __magic_name__ ( self : str ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(["xx"] )
with self.assertRaisesRegex(A_ , "include" ):
WavaVecaProcessorWithLM(
tokenizer=A_ , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
def __magic_name__ ( self : List[str] ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.get_feature_extractor()
_lowerCAmelCase : Dict = self.get_tokenizer()
_lowerCAmelCase : Tuple = self.get_decoder()
_lowerCAmelCase : Optional[Any] = WavaVecaProcessorWithLM(tokenizer=A_ , feature_extractor=A_ , decoder=A_ )
_lowerCAmelCase : int = floats_list((3, 1_0_0_0) )
_lowerCAmelCase : Tuple = feature_extractor(A_ , return_tensors="np" )
_lowerCAmelCase : Any = processor(A_ , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __magic_name__ ( self : List[Any] ):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.get_feature_extractor()
_lowerCAmelCase : Optional[int] = self.get_tokenizer()
_lowerCAmelCase : List[str] = self.get_decoder()
_lowerCAmelCase : List[str] = WavaVecaProcessorWithLM(tokenizer=A_ , feature_extractor=A_ , decoder=A_ )
_lowerCAmelCase : int = "This is a test string"
_lowerCAmelCase : List[str] = processor(text=A_ )
_lowerCAmelCase : int = tokenizer(A_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __magic_name__ ( self : Optional[int] , A_ : Tuple=(2, 1_0, 1_6) , A_ : str=7_7 ):
'''simple docstring'''
np.random.seed(A_ )
return np.random.rand(*A_ )
def __magic_name__ ( self : List[str] ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.get_feature_extractor()
_lowerCAmelCase : Dict = self.get_tokenizer()
_lowerCAmelCase : List[str] = self.get_decoder()
_lowerCAmelCase : List[str] = WavaVecaProcessorWithLM(tokenizer=A_ , feature_extractor=A_ , decoder=A_ )
_lowerCAmelCase : Tuple = self._get_dummy_logits(shape=(1_0, 1_6) , seed=1_3 )
_lowerCAmelCase : List[Any] = processor.decode(A_ )
_lowerCAmelCase : Any = decoder.decode_beams(A_ )[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text )
self.assertEqual("</s> <s> </s>" , decoded_processor.text )
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score )
@parameterized.expand([[None], ["fork"], ["spawn"]] )
def __magic_name__ ( self : Optional[int] , A_ : Union[str, Any] ):
'''simple docstring'''
_lowerCAmelCase : str = self.get_feature_extractor()
_lowerCAmelCase : Optional[int] = self.get_tokenizer()
_lowerCAmelCase : Any = self.get_decoder()
_lowerCAmelCase : Tuple = WavaVecaProcessorWithLM(tokenizer=A_ , feature_extractor=A_ , decoder=A_ )
_lowerCAmelCase : int = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
_lowerCAmelCase : int = processor.batch_decode(A_ )
else:
with get_context(A_ ).Pool() as pool:
_lowerCAmelCase : List[Any] = processor.batch_decode(A_ , A_ )
_lowerCAmelCase : List[Any] = list(A_ )
with get_context("fork" ).Pool() as p:
_lowerCAmelCase : Any = decoder.decode_beams_batch(A_ , A_ )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[Any] = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(A_ , decoded_processor.text )
self.assertListEqual(["<s> <s> </s>", "<s> <s> <s>"] , decoded_processor.text )
self.assertListEqual(A_ , decoded_processor.logit_score )
self.assertListEqual(A_ , decoded_processor.lm_score )
def __magic_name__ ( self : Union[str, Any] ):
'''simple docstring'''
_lowerCAmelCase : List[str] = self.get_feature_extractor()
_lowerCAmelCase : str = self.get_tokenizer()
_lowerCAmelCase : Tuple = self.get_decoder()
_lowerCAmelCase : int = WavaVecaProcessorWithLM(tokenizer=A_ , feature_extractor=A_ , decoder=A_ )
_lowerCAmelCase : List[Any] = self._get_dummy_logits()
_lowerCAmelCase : Optional[Any] = 1_5
_lowerCAmelCase : str = -20.0
_lowerCAmelCase : Any = -4.0
_lowerCAmelCase : int = processor.batch_decode(
A_ , beam_width=A_ , beam_prune_logp=A_ , token_min_logp=A_ , )
_lowerCAmelCase : Tuple = decoded_processor_out.text
_lowerCAmelCase : Optional[Any] = list(A_ )
with get_context("fork" ).Pool() as pool:
_lowerCAmelCase : Any = decoder.decode_beams_batch(
A_ , A_ , beam_width=A_ , beam_prune_logp=A_ , token_min_logp=A_ , )
_lowerCAmelCase : List[Any] = [d[0][0] for d in decoded_decoder_out]
_lowerCAmelCase : Tuple = [d[0][2] for d in decoded_decoder_out]
_lowerCAmelCase : str = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(A_ , A_ )
self.assertListEqual(["</s> <s> <s>", "<s> <s> <s>"] , A_ )
self.assertTrue(np.array_equal(A_ , decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-20.054, -18.447] , A_ , atol=1E-3 ) )
self.assertTrue(np.array_equal(A_ , decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-15.554, -13.9474] , A_ , atol=1E-3 ) )
def __magic_name__ ( self : str ):
'''simple docstring'''
_lowerCAmelCase : Any = self.get_feature_extractor()
_lowerCAmelCase : str = self.get_tokenizer()
_lowerCAmelCase : int = self.get_decoder()
_lowerCAmelCase : Union[str, Any] = WavaVecaProcessorWithLM(tokenizer=A_ , feature_extractor=A_ , decoder=A_ )
_lowerCAmelCase : Any = self._get_dummy_logits()
_lowerCAmelCase : int = 2.0
_lowerCAmelCase : int = 5.0
_lowerCAmelCase : Tuple = -20.0
_lowerCAmelCase : List[str] = True
_lowerCAmelCase : List[str] = processor.batch_decode(
A_ , alpha=A_ , beta=A_ , unk_score_offset=A_ , lm_score_boundary=A_ , )
_lowerCAmelCase : List[Any] = decoded_processor_out.text
_lowerCAmelCase : Optional[int] = list(A_ )
decoder.reset_params(
alpha=A_ , beta=A_ , unk_score_offset=A_ , lm_score_boundary=A_ , )
with get_context("fork" ).Pool() as pool:
_lowerCAmelCase : List[Any] = decoder.decode_beams_batch(
A_ , A_ , )
_lowerCAmelCase : str = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(A_ , A_ )
self.assertListEqual(["<s> </s> <s> </s> </s>", "</s> </s> <s> </s> </s>"] , A_ )
_lowerCAmelCase : Any = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0 )
self.assertEqual(lm_model.beta , 5.0 )
self.assertEqual(lm_model.unk_score_offset , -20.0 )
self.assertEqual(lm_model.score_boundary , A_ )
def __magic_name__ ( self : str ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = WavaVecaProcessorWithLM.from_pretrained("hf-internal-testing/processor_with_lm" )
_lowerCAmelCase : Dict = processor.decoder.model_container[processor.decoder._model_key]
_lowerCAmelCase : str = Path(language_model._kenlm_model.path.decode("utf-8" ) ).parent.parent.absolute()
_lowerCAmelCase : Any = os.listdir(A_ )
_lowerCAmelCase : List[str] = ["alphabet.json", "language_model"]
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(A_ , A_ )
def __magic_name__ ( self : str ):
'''simple docstring'''
_lowerCAmelCase : List[str] = snapshot_download("hf-internal-testing/processor_with_lm" )
_lowerCAmelCase : str = WavaVecaProcessorWithLM.from_pretrained(A_ )
_lowerCAmelCase : List[Any] = processor.decoder.model_container[processor.decoder._model_key]
_lowerCAmelCase : int = Path(language_model._kenlm_model.path.decode("utf-8" ) ).parent.parent.absolute()
_lowerCAmelCase : Optional[Any] = os.listdir(A_ )
_lowerCAmelCase : Optional[Any] = os.listdir(A_ )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(A_ , A_ )
def __magic_name__ ( self : Any ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = WavaVecaProcessorWithLM.from_pretrained("hf-internal-testing/processor_with_lm" )
_lowerCAmelCase : Optional[int] = AutoProcessor.from_pretrained("hf-internal-testing/processor_with_lm" )
_lowerCAmelCase : Any = floats_list((3, 1_0_0_0) )
_lowerCAmelCase : Optional[int] = processor_wavaveca(A_ , return_tensors="np" )
_lowerCAmelCase : Optional[Any] = processor_auto(A_ , return_tensors="np" )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1E-2 )
_lowerCAmelCase : Any = self._get_dummy_logits()
_lowerCAmelCase : Any = processor_wavaveca.batch_decode(A_ )
_lowerCAmelCase : Union[str, Any] = processor_auto.batch_decode(A_ )
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text )
def __magic_name__ ( self : int ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.get_feature_extractor()
_lowerCAmelCase : Tuple = self.get_tokenizer()
_lowerCAmelCase : str = self.get_decoder()
_lowerCAmelCase : Any = WavaVecaProcessorWithLM(tokenizer=A_ , feature_extractor=A_ , decoder=A_ )
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg="`processor` and `feature_extractor` model input names do not match" , )
@staticmethod
def __magic_name__ ( A_ : Union[str, Any] , A_ : Optional[int] ):
'''simple docstring'''
_lowerCAmelCase : Any = [d[key] for d in offsets]
return retrieved_list
def __magic_name__ ( self : Optional[int] ):
'''simple docstring'''
_lowerCAmelCase : Any = WavaVecaProcessorWithLM.from_pretrained("hf-internal-testing/processor_with_lm" )
_lowerCAmelCase : Dict = self._get_dummy_logits()[0]
_lowerCAmelCase : Union[str, Any] = processor.decode(A_ , output_word_offsets=A_ )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue("text" in outputs )
self.assertTrue("word_offsets" in outputs )
self.assertTrue(isinstance(A_ , A_ ) )
self.assertEqual(" ".join(self.get_from_offsets(outputs["word_offsets"] , "word" ) ) , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"] , "word" ) , ["<s>", "<s>", "</s>"] )
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"] , "start_offset" ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"] , "end_offset" ) , [1, 3, 5] )
def __magic_name__ ( self : str ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = WavaVecaProcessorWithLM.from_pretrained("hf-internal-testing/processor_with_lm" )
_lowerCAmelCase : Union[str, Any] = self._get_dummy_logits()
_lowerCAmelCase : Union[str, Any] = processor.batch_decode(A_ , output_word_offsets=A_ )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue("text" in outputs )
self.assertTrue("word_offsets" in outputs )
self.assertTrue(isinstance(A_ , A_ ) )
self.assertListEqual(
[" ".join(self.get_from_offsets(A_ , "word" ) ) for o in outputs["word_offsets"]] , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"][0] , "word" ) , ["<s>", "<s>", "</s>"] )
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"][0] , "start_offset" ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"][0] , "end_offset" ) , [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def __magic_name__ ( self : int ):
'''simple docstring'''
import torch
_lowerCAmelCase : Any = load_dataset("common_voice" , "en" , split="train" , streaming=A_ )
_lowerCAmelCase : Optional[int] = ds.cast_column("audio" , datasets.Audio(sampling_rate=1_6_0_0_0 ) )
_lowerCAmelCase : Union[str, Any] = iter(A_ )
_lowerCAmelCase : Optional[int] = next(A_ )
_lowerCAmelCase : str = AutoProcessor.from_pretrained("patrickvonplaten/wav2vec2-base-100h-with-lm" )
_lowerCAmelCase : List[str] = WavaVecaForCTC.from_pretrained("patrickvonplaten/wav2vec2-base-100h-with-lm" )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
_lowerCAmelCase : List[Any] = processor(sample["audio"]["array"] , return_tensors="pt" ).input_values
with torch.no_grad():
_lowerCAmelCase : List[str] = model(A_ ).logits.cpu().numpy()
_lowerCAmelCase : List[Any] = processor.decode(logits[0] , output_word_offsets=A_ )
_lowerCAmelCase : Union[str, Any] = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
_lowerCAmelCase : Optional[Any] = [
{
"start_time": d["start_offset"] * time_offset,
"end_time": d["end_offset"] * time_offset,
"word": d["word"],
}
for d in output["word_offsets"]
]
_lowerCAmelCase : Optional[int] = "WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL"
# output words
self.assertEqual(" ".join(self.get_from_offsets(A_ , "word" ) ) , A_ )
self.assertEqual(" ".join(self.get_from_offsets(A_ , "word" ) ) , output.text )
# output times
_lowerCAmelCase : Dict = torch.tensor(self.get_from_offsets(A_ , "start_time" ) )
_lowerCAmelCase : Tuple = torch.tensor(self.get_from_offsets(A_ , "end_time" ) )
# fmt: off
_lowerCAmelCase : List[str] = torch.tensor([1.4199, 1.6599, 2.2599, 3.0, 3.24, 3.5999, 3.7999, 4.0999, 4.26, 4.94, 5.28, 5.6599, 5.78, 5.94, 6.32, 6.5399, 6.6599] )
_lowerCAmelCase : Optional[Any] = torch.tensor([1.5399, 1.8999, 2.9, 3.16, 3.5399, 3.72, 4.0199, 4.1799, 4.76, 5.1599, 5.5599, 5.6999, 5.86, 6.1999, 6.38, 6.6199, 6.94] )
# fmt: on
self.assertTrue(torch.allclose(A_ , A_ , atol=0.01 ) )
self.assertTrue(torch.allclose(A_ , A_ , atol=0.01 ) )
| 503
|
from __future__ import annotations
import inspect
import unittest
from math import floor
import numpy as np
from transformers import CvtConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFCvtForImageClassification, TFCvtModel
from transformers.models.cvt.modeling_tf_cvt import TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A__ ( A ):
"""simple docstring"""
def __magic_name__ ( self : List[str] ):
'''simple docstring'''
_lowerCAmelCase : int = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(A_ , "embed_dim" ) )
self.parent.assertTrue(hasattr(A_ , "num_heads" ) )
class A__ :
"""simple docstring"""
def __init__( self : Tuple , A_ : int , A_ : Dict=1_3 , A_ : int=6_4 , A_ : str=3 , A_ : Optional[int]=[1_6, 4_8, 9_6] , A_ : int=[1, 3, 6] , A_ : Optional[int]=[1, 2, 1_0] , A_ : Any=[7, 3, 3] , A_ : Tuple=[4, 2, 2] , A_ : str=[2, 1, 1] , A_ : Optional[Any]=[2, 2, 2] , A_ : Union[str, Any]=[False, False, True] , A_ : Union[str, Any]=[0.0, 0.0, 0.0] , A_ : Any=0.02 , A_ : Optional[int]=1E-12 , A_ : str=True , A_ : List[Any]=True , A_ : Union[str, Any]=2 , ):
'''simple docstring'''
_lowerCAmelCase : Dict = parent
_lowerCAmelCase : int = batch_size
_lowerCAmelCase : Union[str, Any] = image_size
_lowerCAmelCase : Optional[Any] = patch_sizes
_lowerCAmelCase : Optional[int] = patch_stride
_lowerCAmelCase : List[str] = patch_padding
_lowerCAmelCase : str = is_training
_lowerCAmelCase : Any = use_labels
_lowerCAmelCase : Dict = num_labels
_lowerCAmelCase : Tuple = num_channels
_lowerCAmelCase : Tuple = embed_dim
_lowerCAmelCase : List[Any] = num_heads
_lowerCAmelCase : Union[str, Any] = stride_kv
_lowerCAmelCase : Tuple = depth
_lowerCAmelCase : List[str] = cls_token
_lowerCAmelCase : Tuple = attention_drop_rate
_lowerCAmelCase : Optional[Any] = initializer_range
_lowerCAmelCase : Optional[int] = layer_norm_eps
def __magic_name__ ( self : Dict ):
'''simple docstring'''
_lowerCAmelCase : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase : int = None
if self.use_labels:
# create a random int32 tensor of given shape
_lowerCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.num_labels )
_lowerCAmelCase : Tuple = self.get_config()
return config, pixel_values, labels
def __magic_name__ ( self : Union[str, Any] ):
'''simple docstring'''
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def __magic_name__ ( self : Union[str, Any] , A_ : str , A_ : Optional[int] , A_ : str ):
'''simple docstring'''
_lowerCAmelCase : str = TFCvtModel(config=A_ )
_lowerCAmelCase : str = model(A_ , training=A_ )
_lowerCAmelCase : List[Any] = (self.image_size, self.image_size)
_lowerCAmelCase , _lowerCAmelCase : Any = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
_lowerCAmelCase : Union[str, Any] = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
_lowerCAmelCase : List[Any] = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def __magic_name__ ( self : Any , A_ : Tuple , A_ : int , A_ : Any ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.num_labels
_lowerCAmelCase : Optional[Any] = TFCvtForImageClassification(A_ )
_lowerCAmelCase : Optional[Any] = model(A_ , labels=A_ , training=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __magic_name__ ( self : List[str] ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : List[str] = config_and_inputs
_lowerCAmelCase : Optional[int] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class A__ ( A , A , unittest.TestCase ):
"""simple docstring"""
_lowercase : Any = (TFCvtModel, TFCvtForImageClassification) if is_tf_available() else ()
_lowercase : Tuple = (
{'''feature-extraction''': TFCvtModel, '''image-classification''': TFCvtForImageClassification}
if is_tf_available()
else {}
)
_lowercase : Optional[int] = False
_lowercase : Any = False
_lowercase : int = False
_lowercase : int = False
_lowercase : Tuple = False
def __magic_name__ ( self : Optional[Any] ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = TFCvtModelTester(self )
_lowerCAmelCase : Optional[Any] = TFCvtConfigTester(self , config_class=A_ , has_text_modality=A_ , hidden_size=3_7 )
def __magic_name__ ( self : List[Any] ):
'''simple docstring'''
self.config_tester.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
@unittest.skip(reason="Cvt does not output attentions" )
def __magic_name__ ( self : str ):
'''simple docstring'''
pass
@unittest.skip(reason="Cvt does not use inputs_embeds" )
def __magic_name__ ( self : Tuple ):
'''simple docstring'''
pass
@unittest.skip(reason="Cvt does not support input and output embeddings" )
def __magic_name__ ( self : int ):
'''simple docstring'''
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("GPU" ) ) == 0 , reason="TF does not support backprop for grouped convolutions on CPU." , )
def __magic_name__ ( self : Dict ):
'''simple docstring'''
super().test_dataset_conversion()
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("GPU" ) ) == 0 , reason="TF does not support backprop for grouped convolutions on CPU." , )
@slow
def __magic_name__ ( self : Dict ):
'''simple docstring'''
super().test_keras_fit()
@unittest.skip(reason="Get `Failed to determine best cudnn convolution algo.` error after using TF 2.12+cuda 11.8" )
def __magic_name__ ( self : Dict ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = tf.keras.mixed_precision.Policy("mixed_float16" )
tf.keras.mixed_precision.set_global_policy(A_ )
super().test_keras_fit()
tf.keras.mixed_precision.set_global_policy("float32" )
def __magic_name__ ( self : str ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : Dict = model_class(A_ )
_lowerCAmelCase : Optional[int] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase : List[Any] = [*signature.parameters.keys()]
_lowerCAmelCase : Union[str, Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , A_ )
def __magic_name__ ( self : int ):
'''simple docstring'''
def check_hidden_states_output(A_ : Any , A_ : Any , A_ : Dict ):
_lowerCAmelCase : Tuple = model_class(A_ )
_lowerCAmelCase : List[Any] = model(**self._prepare_for_class(A_ , A_ ) )
_lowerCAmelCase : Tuple = outputs.hidden_states
_lowerCAmelCase : Optional[Any] = len(self.model_tester.depth )
self.assertEqual(len(A_ ) , A_ )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
_lowerCAmelCase , _lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : Union[str, Any] = True
check_hidden_states_output(A_ , A_ , A_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCAmelCase : List[str] = True
check_hidden_states_output(A_ , A_ , A_ )
def __magic_name__ ( self : Tuple ):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def __magic_name__ ( self : List[str] ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A_ )
@slow
def __magic_name__ ( self : Dict ):
'''simple docstring'''
for model_name in TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : Optional[Any] = TFCvtModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
def _snake_case ( ) -> Dict:
"""simple docstring"""
_lowerCAmelCase : int = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class A__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __magic_name__ ( self : str ):
'''simple docstring'''
return AutoImageProcessor.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def __magic_name__ ( self : int ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = TFCvtForImageClassification.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
_lowerCAmelCase : int = self.default_image_processor
_lowerCAmelCase : Optional[Any] = prepare_img()
_lowerCAmelCase : Optional[Any] = image_processor(images=A_ , return_tensors="tf" )
# forward pass
_lowerCAmelCase : str = model(**A_ )
# verify the logits
_lowerCAmelCase : List[str] = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , A_ )
_lowerCAmelCase : Any = tf.constant([0.9285, 0.9015, -0.3150] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , A_ , atol=1E-4 ) )
| 503
| 1
|
import inspect
import unittest
from transformers import DecisionTransformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import DecisionTransformerModel
from transformers.models.decision_transformer.modeling_decision_transformer import (
DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
class __lowerCAmelCase :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=7 , __UpperCAmelCase=6 , __UpperCAmelCase=17 , __UpperCAmelCase=23 , __UpperCAmelCase=11 , __UpperCAmelCase=True , ):
'''simple docstring'''
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = seq_length
__lowerCamelCase = act_dim
__lowerCamelCase = state_dim
__lowerCamelCase = hidden_size
__lowerCamelCase = max_length
__lowerCamelCase = is_training
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = floats_tensor((self.batch_size, self.seq_length, self.state_dim) )
__lowerCamelCase = floats_tensor((self.batch_size, self.seq_length, self.act_dim) )
__lowerCamelCase = floats_tensor((self.batch_size, self.seq_length, 1) )
__lowerCamelCase = floats_tensor((self.batch_size, self.seq_length, 1) )
__lowerCamelCase = ids_tensor((self.batch_size, self.seq_length) , vocab_size=1000 )
__lowerCamelCase = random_attention_mask((self.batch_size, self.seq_length) )
__lowerCamelCase = self.get_config()
return (
config,
states,
actions,
rewards,
returns_to_go,
timesteps,
attention_mask,
)
def lowerCamelCase ( self ):
'''simple docstring'''
return DecisionTransformerConfig(
batch_size=self.batch_size , seq_length=self.seq_length , act_dim=self.act_dim , state_dim=self.state_dim , hidden_size=self.hidden_size , max_length=self.max_length , )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ):
'''simple docstring'''
__lowerCamelCase = DecisionTransformerModel(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__lowerCamelCase = model(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
self.parent.assertEqual(result.state_preds.shape , states.shape )
self.parent.assertEqual(result.action_preds.shape , actions.shape )
self.parent.assertEqual(result.return_preds.shape , returns_to_go.shape )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length * 3, self.hidden_size) ) # seq length *3 as there are 3 modelities: states, returns and actions
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.prepare_config_and_inputs()
(
__lowerCamelCase
) = config_and_inputs
__lowerCamelCase = {
"""states""": states,
"""actions""": actions,
"""rewards""": rewards,
"""returns_to_go""": returns_to_go,
"""timesteps""": timesteps,
"""attention_mask""": attention_mask,
}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( lowercase_ , lowercase_ , lowercase_ , unittest.TestCase ):
lowerCAmelCase__ = (DecisionTransformerModel,) if is_torch_available() else ()
lowerCAmelCase__ = ()
lowerCAmelCase__ = {"""feature-extraction""": DecisionTransformerModel} if is_torch_available() else {}
# Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids
lowerCAmelCase__ = False
# Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = DecisionTransformerModelTester(self )
__lowerCamelCase = ConfigTester(self , config_class=lowerCAmelCase_ , hidden_size=37 )
def lowerCamelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
@slow
def lowerCamelCase ( self ):
'''simple docstring'''
for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase = DecisionTransformerModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase = model_class(lowerCAmelCase_ )
__lowerCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCamelCase = [*signature.parameters.keys()]
__lowerCamelCase = [
"""states""",
"""actions""",
"""rewards""",
"""returns_to_go""",
"""timesteps""",
"""attention_mask""",
]
self.assertListEqual(arg_names[: len(lowerCAmelCase_ )] , lowerCAmelCase_ )
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
@slow
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = 2 # number of steps of autoregressive prediction we will perform
__lowerCamelCase = 10 # defined by the RL environment, may be normalized
__lowerCamelCase = DecisionTransformerModel.from_pretrained('''edbeeching/decision-transformer-gym-hopper-expert''' )
__lowerCamelCase = model.to(lowerCAmelCase_ )
__lowerCamelCase = model.config
torch.manual_seed(0 )
__lowerCamelCase = torch.randn(1 , 1 , config.state_dim ).to(device=lowerCAmelCase_ , dtype=torch.floataa ) # env.reset()
__lowerCamelCase = torch.tensor(
[[0.242_793, -0.28_693_074, 0.8_742_613], [0.67_815_274, -0.08_101_085, -0.12_952_147]] , device=lowerCAmelCase_ )
__lowerCamelCase = torch.tensor(lowerCAmelCase_ , device=lowerCAmelCase_ , dtype=torch.floataa ).reshape(1 , 1 , 1 )
__lowerCamelCase = state
__lowerCamelCase = torch.zeros(1 , 0 , config.act_dim , device=lowerCAmelCase_ , dtype=torch.floataa )
__lowerCamelCase = torch.zeros(1 , 0 , device=lowerCAmelCase_ , dtype=torch.floataa )
__lowerCamelCase = torch.tensor(0 , device=lowerCAmelCase_ , dtype=torch.long ).reshape(1 , 1 )
for step in range(lowerCAmelCase_ ):
__lowerCamelCase = torch.cat([actions, torch.zeros(1 , 1 , config.act_dim , device=lowerCAmelCase_ )] , dim=1 )
__lowerCamelCase = torch.cat([rewards, torch.zeros(1 , 1 , device=lowerCAmelCase_ )] , dim=1 )
__lowerCamelCase = torch.ones(1 , states.shape[1] ).to(dtype=torch.long , device=states.device )
with torch.no_grad():
__lowerCamelCase = model(
states=lowerCAmelCase_ , actions=lowerCAmelCase_ , rewards=lowerCAmelCase_ , returns_to_go=lowerCAmelCase_ , timesteps=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , return_dict=lowerCAmelCase_ , )
self.assertEqual(action_pred.shape , actions.shape )
self.assertTrue(torch.allclose(action_pred[0, -1] , expected_outputs[step] , atol=1E-4 ) )
__lowerCamelCase = ( # env.step(action)
torch.randn(1 , 1 , config.state_dim ).to(device=lowerCAmelCase_ , dtype=torch.floataa ),
1.0,
False,
{},
)
__lowerCamelCase = action_pred[0, -1]
__lowerCamelCase = torch.cat([states, state] , dim=1 )
__lowerCamelCase = returns_to_go[0, -1] - reward
__lowerCamelCase = torch.cat([returns_to_go, pred_return.reshape(1 , 1 , 1 )] , dim=1 )
__lowerCamelCase = torch.cat(
[timesteps, torch.ones((1, 1) , device=lowerCAmelCase_ , dtype=torch.long ) * (step + 1)] , dim=1 )
| 175
|
'''simple docstring'''
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import (
BaseOutput,
OptionalDependencyNotAvailable,
is_flax_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_onnx_available,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
@dataclass
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
__snake_case = 42
__snake_case = 42
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_cycle_diffusion import CycleDiffusionPipeline
from .pipeline_stable_diffusion import StableDiffusionPipeline
from .pipeline_stable_diffusion_attend_and_excite import StableDiffusionAttendAndExcitePipeline
from .pipeline_stable_diffusion_imgaimg import StableDiffusionImgaImgPipeline
from .pipeline_stable_diffusion_inpaint import StableDiffusionInpaintPipeline
from .pipeline_stable_diffusion_inpaint_legacy import StableDiffusionInpaintPipelineLegacy
from .pipeline_stable_diffusion_instruct_pixapix import StableDiffusionInstructPixaPixPipeline
from .pipeline_stable_diffusion_latent_upscale import StableDiffusionLatentUpscalePipeline
from .pipeline_stable_diffusion_ldmad import StableDiffusionLDMaDPipeline
from .pipeline_stable_diffusion_model_editing import StableDiffusionModelEditingPipeline
from .pipeline_stable_diffusion_panorama import StableDiffusionPanoramaPipeline
from .pipeline_stable_diffusion_paradigms import StableDiffusionParadigmsPipeline
from .pipeline_stable_diffusion_sag import StableDiffusionSAGPipeline
from .pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from .pipeline_stable_unclip import StableUnCLIPPipeline
from .pipeline_stable_unclip_imgaimg import StableUnCLIPImgaImgPipeline
from .safety_checker import StableDiffusionSafetyChecker
from .stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import StableDiffusionImageVariationPipeline
else:
from .pipeline_stable_diffusion_image_variation import StableDiffusionImageVariationPipeline
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.26.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionPixaPixZeroPipeline,
)
else:
from .pipeline_stable_diffusion_depthaimg import StableDiffusionDepthaImgPipeline
from .pipeline_stable_diffusion_diffedit import StableDiffusionDiffEditPipeline
from .pipeline_stable_diffusion_pixapix_zero import StableDiffusionPixaPixZeroPipeline
try:
if not (
is_torch_available()
and is_transformers_available()
and is_k_diffusion_available()
and is_k_diffusion_version('>=', '0.0.12')
):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipeline_stable_diffusion_k_diffusion import StableDiffusionKDiffusionPipeline
try:
if not (is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_onnx_objects import * # noqa F403
else:
from .pipeline_onnx_stable_diffusion import OnnxStableDiffusionPipeline, StableDiffusionOnnxPipeline
from .pipeline_onnx_stable_diffusion_imgaimg import OnnxStableDiffusionImgaImgPipeline
from .pipeline_onnx_stable_diffusion_inpaint import OnnxStableDiffusionInpaintPipeline
from .pipeline_onnx_stable_diffusion_inpaint_legacy import OnnxStableDiffusionInpaintPipelineLegacy
from .pipeline_onnx_stable_diffusion_upscale import OnnxStableDiffusionUpscalePipeline
if is_transformers_available() and is_flax_available():
import flax
@flax.struct.dataclass
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
__snake_case = 42
__snake_case = 42
from ...schedulers.scheduling_pndm_flax import PNDMSchedulerState
from .pipeline_flax_stable_diffusion import FlaxStableDiffusionPipeline
from .pipeline_flax_stable_diffusion_imgaimg import FlaxStableDiffusionImgaImgPipeline
from .pipeline_flax_stable_diffusion_inpaint import FlaxStableDiffusionInpaintPipeline
from .safety_checker_flax import FlaxStableDiffusionSafetyChecker
| 215
| 0
|
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
"""files""" , [
["""full:README.md""", """dataset_infos.json"""],
["""empty:README.md""", """dataset_infos.json"""],
["""dataset_infos.json"""],
["""full:README.md"""],
] , )
def _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case ) -> str:
_UpperCAmelCase = tmp_path_factory.mktemp("""dset_infos_dir""" )
if "full:README.md" in files:
with open(dataset_infos_dir / """README.md""" , """w""" ) as f:
f.write("""---\ndataset_info:\n dataset_size: 42\n---""" )
if "empty:README.md" in files:
with open(dataset_infos_dir / """README.md""" , """w""" ) as f:
f.write("""""" )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / """dataset_infos.json""" , """w""" ) as f:
f.write("""{\"default\": {\"dataset_size\": 42}}""" )
_UpperCAmelCase = DatasetInfosDict.from_directory(__snake_case )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 4_2
@pytest.mark.parametrize(
"""dataset_info""" , [
DatasetInfo(),
DatasetInfo(
description="""foo""" , features=Features({"""a""": Value("""int32""" )} ) , builder_name="""builder""" , config_name="""config""" , version="""1.0.0""" , splits=[{"""name""": """train"""}] , download_size=4_2 , ),
] , )
def _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case ) -> int:
_UpperCAmelCase = str(__snake_case )
dataset_info.write_to_directory(__snake_case )
_UpperCAmelCase = DatasetInfo.from_directory(__snake_case )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(__snake_case , """dataset_info.json""" ) )
def _SCREAMING_SNAKE_CASE ( ) -> Dict:
_UpperCAmelCase = DatasetInfo(
description="""foo""" , citation="""bar""" , homepage="""https://foo.bar""" , license="""CC0""" , features=Features({"""a""": Value("""int32""" )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name="""builder""" , config_name="""config""" , version="""1.0.0""" , splits=[{"""name""": """train""", """num_examples""": 4_2}] , download_checksums={} , download_size=1_3_3_7 , post_processing_size=4_4_2 , dataset_size=1_2_3_4 , size_in_bytes=1_3_3_7 + 4_4_2 + 1_2_3_4 , )
_UpperCAmelCase = dataset_info._to_yaml_dict()
assert sorted(__snake_case ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) )
_UpperCAmelCase = yaml.safe_dump(__snake_case )
_UpperCAmelCase = yaml.safe_load(__snake_case )
assert dataset_info_yaml_dict == reloaded
def _SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]:
_UpperCAmelCase = DatasetInfo()
_UpperCAmelCase = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
"""dataset_infos_dict""" , [
DatasetInfosDict(),
DatasetInfosDict({"""default""": DatasetInfo()} ),
DatasetInfosDict({"""my_config_name""": DatasetInfo()} ),
DatasetInfosDict(
{
"""default""": DatasetInfo(
description="""foo""" , features=Features({"""a""": Value("""int32""" )} ) , builder_name="""builder""" , config_name="""config""" , version="""1.0.0""" , splits=[{"""name""": """train"""}] , download_size=4_2 , )
} ),
DatasetInfosDict(
{
"""v1""": DatasetInfo(dataset_size=4_2 ),
"""v2""": DatasetInfo(dataset_size=1_3_3_7 ),
} ),
] , )
def _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case ) -> Dict:
_UpperCAmelCase = str(__snake_case )
dataset_infos_dict.write_to_directory(__snake_case )
_UpperCAmelCase = DatasetInfosDict.from_directory(__snake_case )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
_UpperCAmelCase = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
_UpperCAmelCase = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(__snake_case , """README.md""" ) )
| 402
|
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import tensorflow as tf
from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM
@require_tf
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCamelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = TFAutoModelForSeqaSeqLM.from_pretrained("""google/mt5-small""" )
_UpperCAmelCase = AutoTokenizer.from_pretrained("""google/mt5-small""" )
_UpperCAmelCase = tokenizer("""Hello there""" , return_tensors="""tf""" ).input_ids
_UpperCAmelCase = tokenizer("""Hi I am""" , return_tensors="""tf""" ).input_ids
_UpperCAmelCase = model(lowerCamelCase , labels=lowerCamelCase ).loss
_UpperCAmelCase = -tf.math.reduce_mean(lowerCamelCase ).numpy()
_UpperCAmelCase = -21.22_8168
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2E-4 )
| 402
| 1
|
'''simple docstring'''
from __future__ import annotations
def __a(SCREAMING_SNAKE_CASE_ : list ):
'''simple docstring'''
if not nums:
raise ValueError("List is empty" )
return sum(SCREAMING_SNAKE_CASE_ ) / len(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 18
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class lowerCAmelCase_ ( __magic_name__ ):
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ) -> None:
warnings.warn(
"The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use YolosImageProcessor instead." , _lowerCAmelCase , )
super().__init__(*_lowerCAmelCase , **_lowerCAmelCase )
| 18
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a = {
"configuration_clipseg": [
"CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP",
"CLIPSegConfig",
"CLIPSegTextConfig",
"CLIPSegVisionConfig",
],
"processing_clipseg": ["CLIPSegProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
"CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST",
"CLIPSegModel",
"CLIPSegPreTrainedModel",
"CLIPSegTextModel",
"CLIPSegVisionModel",
"CLIPSegForImageSegmentation",
]
if TYPE_CHECKING:
from .configuration_clipseg import (
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPSegConfig,
CLIPSegTextConfig,
CLIPSegVisionConfig,
)
from .processing_clipseg import CLIPSegProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clipseg import (
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPSegForImageSegmentation,
CLIPSegModel,
CLIPSegPreTrainedModel,
CLIPSegTextModel,
CLIPSegVisionModel,
)
else:
import sys
a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 13
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a = logging.get_logger(__name__)
a = {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/config.json",
"umberto-commoncrawl-cased-v1": (
"https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json"
),
"umberto-wikipedia-uncased-v1": (
"https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json"
),
}
class __a ( _snake_case ):
__UpperCamelCase : Tuple = 'camembert'
def __init__( self : int ,lowerCamelCase : List[Any]=3_0522 ,lowerCamelCase : List[Any]=768 ,lowerCamelCase : str=12 ,lowerCamelCase : List[str]=12 ,lowerCamelCase : Optional[Any]=3072 ,lowerCamelCase : Tuple="gelu" ,lowerCamelCase : List[str]=0.1 ,lowerCamelCase : Tuple=0.1 ,lowerCamelCase : Union[str, Any]=512 ,lowerCamelCase : Dict=2 ,lowerCamelCase : Tuple=0.02 ,lowerCamelCase : List[Any]=1E-1_2 ,lowerCamelCase : Union[str, Any]=1 ,lowerCamelCase : Optional[Any]=0 ,lowerCamelCase : List[Any]=2 ,lowerCamelCase : List[str]="absolute" ,lowerCamelCase : int=True ,lowerCamelCase : Any=None ,**lowerCamelCase : Optional[Any] ,):
'''simple docstring'''
super().__init__(pad_token_id=lowerCamelCase ,bos_token_id=lowerCamelCase ,eos_token_id=lowerCamelCase ,**lowerCamelCase )
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = type_vocab_size
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = layer_norm_eps
__SCREAMING_SNAKE_CASE = position_embedding_type
__SCREAMING_SNAKE_CASE = use_cache
__SCREAMING_SNAKE_CASE = classifier_dropout
class __a ( _snake_case ):
@property
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
if self.task == "multiple-choice":
__SCREAMING_SNAKE_CASE = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
__SCREAMING_SNAKE_CASE = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 13
| 1
|
'''simple docstring'''
def __snake_case ( lowerCAmelCase : int , lowerCAmelCase : int ):
return int((input_a, input_a).count(1 ) != 0 )
def __snake_case ( ):
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 396
|
'''simple docstring'''
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def __snake_case ( ):
__UpperCAmelCase = ArgumentParser(
description=(
'PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes'
) )
# Optional arguments for the launch helper
parser.add_argument('--num_cores' , type=lowerCAmelCase , default=1 , help='Number of TPU cores to use (1 or 8).' )
# positional
parser.add_argument(
'training_script' , type=lowerCAmelCase , help=(
'The full path to the single TPU training '
'program/script to be launched in parallel, '
'followed by all the arguments for the '
'training script'
) , )
# rest from the training program
parser.add_argument('training_script_args' , nargs=lowerCAmelCase )
return parser.parse_args()
def __snake_case ( ):
__UpperCAmelCase = parse_args()
# Import training_script as a module.
__UpperCAmelCase = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
__UpperCAmelCase = script_fpath.stem
__UpperCAmelCase = importlib.import_module(lowerCAmelCase )
# Patch sys.argv
__UpperCAmelCase = [args.training_script] + args.training_script_args + ['--tpu_num_cores', str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 396
| 1
|
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.25.0''')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
)
else:
from .modeling_text_unet import UNetFlatConditionModel
from .pipeline_versatile_diffusion import VersatileDiffusionPipeline
from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline
from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline
from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
| 712
|
"""simple docstring"""
import logging
import os
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
from filelock import FileLock
from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available
_snake_case = logging.getLogger(__name__)
@dataclass
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
SCREAMING_SNAKE_CASE_: str
SCREAMING_SNAKE_CASE_: List[str]
SCREAMING_SNAKE_CASE_: Optional[List[str]]
@dataclass
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[int]
SCREAMING_SNAKE_CASE_: List[int]
SCREAMING_SNAKE_CASE_: Optional[List[int]] = None
SCREAMING_SNAKE_CASE_: Optional[List[int]] = None
class _SCREAMING_SNAKE_CASE ( UpperCAmelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Union[str, Any] = "train"
SCREAMING_SNAKE_CASE_: Tuple = "dev"
SCREAMING_SNAKE_CASE_: List[str] = "test"
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
@staticmethod
def __lowerCamelCase ( UpperCAmelCase_ : str , UpperCAmelCase_ : Union[Split, str] ) -> List[InputExample]:
"""simple docstring"""
raise NotImplementedError
@staticmethod
def __lowerCamelCase ( UpperCAmelCase_ : str ) -> List[str]:
"""simple docstring"""
raise NotImplementedError
@staticmethod
def __lowerCamelCase ( UpperCAmelCase_ : List[InputExample] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : PreTrainedTokenizer , UpperCAmelCase_ : Union[str, Any]=False , UpperCAmelCase_ : Optional[int]="[CLS]" , UpperCAmelCase_ : Tuple=1 , UpperCAmelCase_ : str="[SEP]" , UpperCAmelCase_ : Optional[Any]=False , UpperCAmelCase_ : Union[str, Any]=False , UpperCAmelCase_ : Union[str, Any]=0 , UpperCAmelCase_ : Dict=0 , UpperCAmelCase_ : List[Any]=-100 , UpperCAmelCase_ : Dict=0 , UpperCAmelCase_ : List[Any]=True , ) -> List[InputFeatures]:
"""simple docstring"""
_lowerCAmelCase = {label: i for i, label in enumerate(UpperCAmelCase_ )}
_lowerCAmelCase = []
for ex_index, example in enumerate(UpperCAmelCase_ ):
if ex_index % 10_000 == 0:
logger.info('Writing example %d of %d' , UpperCAmelCase_ , len(UpperCAmelCase_ ) )
_lowerCAmelCase = []
_lowerCAmelCase = []
for word, label in zip(example.words , example.labels ):
_lowerCAmelCase = tokenizer.tokenize(UpperCAmelCase_ )
# bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space.
if len(UpperCAmelCase_ ) > 0:
tokens.extend(UpperCAmelCase_ )
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(UpperCAmelCase_ ) - 1) )
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
_lowerCAmelCase = tokenizer.num_special_tokens_to_add()
if len(UpperCAmelCase_ ) > max_seq_length - special_tokens_count:
_lowerCAmelCase = tokens[: (max_seq_length - special_tokens_count)]
_lowerCAmelCase = label_ids[: (max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens += [sep_token]
label_ids += [pad_token_label_id]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
label_ids += [pad_token_label_id]
_lowerCAmelCase = [sequence_a_segment_id] * len(UpperCAmelCase_ )
if cls_token_at_end:
tokens += [cls_token]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
_lowerCAmelCase = [cls_token] + tokens
_lowerCAmelCase = [pad_token_label_id] + label_ids
_lowerCAmelCase = [cls_token_segment_id] + segment_ids
_lowerCAmelCase = tokenizer.convert_tokens_to_ids(UpperCAmelCase_ )
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
_lowerCAmelCase = [1 if mask_padding_with_zero else 0] * len(UpperCAmelCase_ )
# Zero-pad up to the sequence length.
_lowerCAmelCase = max_seq_length - len(UpperCAmelCase_ )
if pad_on_left:
_lowerCAmelCase = ([pad_token] * padding_length) + input_ids
_lowerCAmelCase = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
_lowerCAmelCase = ([pad_token_segment_id] * padding_length) + segment_ids
_lowerCAmelCase = ([pad_token_label_id] * padding_length) + label_ids
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [pad_token_label_id] * padding_length
assert len(UpperCAmelCase_ ) == max_seq_length
assert len(UpperCAmelCase_ ) == max_seq_length
assert len(UpperCAmelCase_ ) == max_seq_length
assert len(UpperCAmelCase_ ) == max_seq_length
if ex_index < 5:
logger.info('*** Example ***' )
logger.info('guid: %s' , example.guid )
logger.info('tokens: %s' , ' '.join([str(UpperCAmelCase_ ) for x in tokens] ) )
logger.info('input_ids: %s' , ' '.join([str(UpperCAmelCase_ ) for x in input_ids] ) )
logger.info('input_mask: %s' , ' '.join([str(UpperCAmelCase_ ) for x in input_mask] ) )
logger.info('segment_ids: %s' , ' '.join([str(UpperCAmelCase_ ) for x in segment_ids] ) )
logger.info('label_ids: %s' , ' '.join([str(UpperCAmelCase_ ) for x in label_ids] ) )
if "token_type_ids" not in tokenizer.model_input_names:
_lowerCAmelCase = None
features.append(
InputFeatures(
input_ids=UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , label_ids=UpperCAmelCase_ ) )
return features
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
class _SCREAMING_SNAKE_CASE ( UpperCAmelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[InputFeatures]
SCREAMING_SNAKE_CASE_: int = nn.CrossEntropyLoss().ignore_index
def __init__( self : List[Any] , UpperCAmelCase_ : TokenClassificationTask , UpperCAmelCase_ : str , UpperCAmelCase_ : PreTrainedTokenizer , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : List[str]=False , UpperCAmelCase_ : Split = Split.train , ) -> List[str]:
"""simple docstring"""
_lowerCAmelCase = os.path.join(
UpperCAmelCase_ , 'cached_{}_{}_{}'.format(mode.value , tokenizer.__class__.__name__ , str(UpperCAmelCase_ ) ) , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
_lowerCAmelCase = cached_features_file + '.lock'
with FileLock(UpperCAmelCase_ ):
if os.path.exists(UpperCAmelCase_ ) and not overwrite_cache:
logger.info(F"""Loading features from cached file {cached_features_file}""" )
_lowerCAmelCase = torch.load(UpperCAmelCase_ )
else:
logger.info(F"""Creating features from dataset file at {data_dir}""" )
_lowerCAmelCase = token_classification_task.read_examples_from_file(UpperCAmelCase_ , UpperCAmelCase_ )
# TODO clean up all this to leverage built-in features of tokenizers
_lowerCAmelCase = token_classification_task.convert_examples_to_features(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , cls_token_at_end=bool(model_type in ['xlnet'] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ['xlnet'] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=UpperCAmelCase_ , pad_on_left=bool(tokenizer.padding_side == 'left' ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info(F"""Saving features into cached file {cached_features_file}""" )
torch.save(self.features , UpperCAmelCase_ )
def __len__( self : Dict ) -> int:
"""simple docstring"""
return len(self.features )
def __getitem__( self : List[str] , UpperCAmelCase_ : List[Any] ) -> InputFeatures:
"""simple docstring"""
return self.features[i]
if is_tf_available():
import tensorflow as tf
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[InputFeatures]
SCREAMING_SNAKE_CASE_: int = -1_0_0
def __init__( self : Tuple , UpperCAmelCase_ : TokenClassificationTask , UpperCAmelCase_ : str , UpperCAmelCase_ : PreTrainedTokenizer , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : Optional[Any]=False , UpperCAmelCase_ : Split = Split.train , ) -> Dict:
"""simple docstring"""
_lowerCAmelCase = token_classification_task.read_examples_from_file(UpperCAmelCase_ , UpperCAmelCase_ )
# TODO clean up all this to leverage built-in features of tokenizers
_lowerCAmelCase = token_classification_task.convert_examples_to_features(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , cls_token_at_end=bool(model_type in ['xlnet'] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ['xlnet'] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=UpperCAmelCase_ , pad_on_left=bool(tokenizer.padding_side == 'left' ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
def gen():
for ex in self.features:
if ex.token_type_ids is None:
yield (
{"input_ids": ex.input_ids, "attention_mask": ex.attention_mask},
ex.label_ids,
)
else:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label_ids,
)
if "token_type_ids" not in tokenizer.model_input_names:
_lowerCAmelCase = tf.data.Dataset.from_generator(
UpperCAmelCase_ , ({'input_ids': tf.intaa, 'attention_mask': tf.intaa}, tf.intaa) , (
{'input_ids': tf.TensorShape([None] ), 'attention_mask': tf.TensorShape([None] )},
tf.TensorShape([None] ),
) , )
else:
_lowerCAmelCase = tf.data.Dataset.from_generator(
UpperCAmelCase_ , ({'input_ids': tf.intaa, 'attention_mask': tf.intaa, 'token_type_ids': tf.intaa}, tf.intaa) , (
{
'input_ids': tf.TensorShape([None] ),
'attention_mask': tf.TensorShape([None] ),
'token_type_ids': tf.TensorShape([None] ),
},
tf.TensorShape([None] ),
) , )
def __lowerCamelCase ( self : str ) -> Tuple:
"""simple docstring"""
_lowerCAmelCase = self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features ) ) )
return self.dataset
def __len__( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
return len(self.features )
def __getitem__( self : int , UpperCAmelCase_ : List[Any] ) -> InputFeatures:
"""simple docstring"""
return self.features[i]
| 491
| 0
|
from __future__ import annotations
class __UpperCamelCase :
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE ) -> List[Any]:
a__ = TypeError(
'''Matrices must be formed from a list of zero or more lists containing at '''
'''least one and the same number of values, each of which must be of type '''
'''int or float.''' )
if len(SCREAMING_SNAKE_CASE ) != 0:
a__ = len(rows[0] )
if cols == 0:
raise error
for row in rows:
if len(SCREAMING_SNAKE_CASE ) != cols:
raise error
for value in row:
if not isinstance(SCREAMING_SNAKE_CASE , (int, float) ):
raise error
a__ = rows
else:
a__ = []
def _UpperCAmelCase ( self ) -> list[list[int]]:
return [[row[i] for row in self.rows] for i in range(len(self.rows[0] ) )]
@property
def _UpperCAmelCase ( self ) -> int:
return len(self.rows )
@property
def _UpperCAmelCase ( self ) -> int:
return len(self.rows[0] )
@property
def _UpperCAmelCase ( self ) -> tuple[int, int]:
return (self.num_rows, self.num_columns)
@property
def _UpperCAmelCase ( self ) -> bool:
return self.order[0] == self.order[1]
def _UpperCAmelCase ( self ) -> Matrix:
a__ = [
[0 if column_num != row_num else 1 for column_num in range(self.num_rows )]
for row_num in range(self.num_rows )
]
return Matrix(SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self ) -> int:
if not self.is_square:
return 0
if self.order == (0, 0):
return 1
if self.order == (1, 1):
return int(self.rows[0][0] )
if self.order == (2, 2):
return int(
(self.rows[0][0] * self.rows[1][1])
- (self.rows[0][1] * self.rows[1][0]) )
else:
return sum(
self.rows[0][column] * self.cofactors().rows[0][column]
for column in range(self.num_columns ) )
def _UpperCAmelCase ( self ) -> bool:
return bool(self.determinant() )
def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int:
a__ = [
[
self.rows[other_row][other_column]
for other_column in range(self.num_columns )
if other_column != column
]
for other_row in range(self.num_rows )
if other_row != row
]
return Matrix(SCREAMING_SNAKE_CASE ).determinant()
def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int:
if (row + column) % 2 == 0:
return self.get_minor(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return -1 * self.get_minor(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self ) -> Matrix:
return Matrix(
[
[self.get_minor(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for column in range(self.num_columns )]
for row in range(self.num_rows )
] )
def _UpperCAmelCase ( self ) -> Matrix:
return Matrix(
[
[
self.minors().rows[row][column]
if (row + column) % 2 == 0
else self.minors().rows[row][column] * -1
for column in range(self.minors().num_columns )
]
for row in range(self.minors().num_rows )
] )
def _UpperCAmelCase ( self ) -> Matrix:
a__ = [
[self.cofactors().rows[column][row] for column in range(self.num_columns )]
for row in range(self.num_rows )
]
return Matrix(SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self ) -> Matrix:
a__ = self.determinant()
if not determinant:
raise TypeError('''Only matrices with a non-zero determinant have an inverse''' )
return self.adjugate() * (1 / determinant)
def __repr__( self ) -> str:
return str(self.rows )
def __str__( self ) -> str:
if self.num_rows == 0:
return "[]"
if self.num_rows == 1:
return "[[" + ". ".join(str(self.rows[0] ) ) + "]]"
return (
"["
+ "\n ".join(
[
'''[''' + '''. '''.join([str(SCREAMING_SNAKE_CASE ) for value in row] ) + '''.]'''
for row in self.rows
] )
+ "]"
)
def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None ) -> None:
a__ = TypeError('''Row must be a list containing all ints and/or floats''' )
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
raise type_error
for value in row:
if not isinstance(SCREAMING_SNAKE_CASE , (int, float) ):
raise type_error
if len(SCREAMING_SNAKE_CASE ) != self.num_columns:
raise ValueError(
'''Row must be equal in length to the other rows in the matrix''' )
if position is None:
self.rows.append(SCREAMING_SNAKE_CASE )
else:
a__ = self.rows[0:position] + [row] + self.rows[position:]
def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None ) -> None:
a__ = TypeError(
'''Column must be a list containing all ints and/or floats''' )
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
raise type_error
for value in column:
if not isinstance(SCREAMING_SNAKE_CASE , (int, float) ):
raise type_error
if len(SCREAMING_SNAKE_CASE ) != self.num_rows:
raise ValueError(
'''Column must be equal in length to the other columns in the matrix''' )
if position is None:
a__ = [self.rows[i] + [column[i]] for i in range(self.num_rows )]
else:
a__ = [
self.rows[i][0:position] + [column[i]] + self.rows[i][position:]
for i in range(self.num_rows )
]
def __eq__( self , SCREAMING_SNAKE_CASE ) -> bool:
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
return NotImplemented
return self.rows == other.rows
def __ne__( self , SCREAMING_SNAKE_CASE ) -> bool:
return not self == other
def __neg__( self ) -> Matrix:
return self * -1
def __add__( self , SCREAMING_SNAKE_CASE ) -> Matrix:
if self.order != other.order:
raise ValueError('''Addition requires matrices of the same order''' )
return Matrix(
[
[self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __sub__( self , SCREAMING_SNAKE_CASE ) -> Matrix:
if self.order != other.order:
raise ValueError('''Subtraction requires matrices of the same order''' )
return Matrix(
[
[self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __mul__( self , SCREAMING_SNAKE_CASE ) -> Matrix:
if isinstance(SCREAMING_SNAKE_CASE , (int, float) ):
return Matrix(
[[int(element * other ) for element in row] for row in self.rows] )
elif isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
if self.num_columns != other.num_rows:
raise ValueError(
'''The number of columns in the first matrix must '''
'''be equal to the number of rows in the second''' )
return Matrix(
[
[Matrix.dot_product(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for column in other.columns()]
for row in self.rows
] )
else:
raise TypeError(
'''A Matrix can only be multiplied by an int, float, or another matrix''' )
def __pow__( self , SCREAMING_SNAKE_CASE ) -> Matrix:
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
raise TypeError('''A Matrix can only be raised to the power of an int''' )
if not self.is_square:
raise ValueError('''Only square matrices can be raised to a power''' )
if other == 0:
return self.identity()
if other < 0:
if self.is_invertable():
return self.inverse() ** (-other)
raise ValueError(
'''Only invertable matrices can be raised to a negative power''' )
a__ = self
for _ in range(other - 1 ):
result *= self
return result
@classmethod
def _UpperCAmelCase ( cls , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int:
return sum(row[i] * column[i] for i in range(len(SCREAMING_SNAKE_CASE ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 194
|
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def __a ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
a__ = ('''dense.weight''', '''attention.self.query''', '''attention.self.key''', '''attention.self.value''')
a__ = (
('''layer.''', '''layer_'''),
('''word_embeddings.weight''', '''word_embeddings'''),
('''position_embeddings.weight''', '''position_embeddings'''),
('''token_type_embeddings.weight''', '''token_type_embeddings'''),
('''.''', '''/'''),
('''LayerNorm/weight''', '''LayerNorm/gamma'''),
('''LayerNorm/bias''', '''LayerNorm/beta'''),
('''weight''', '''kernel'''),
)
if not os.path.isdir(__UpperCAmelCase ):
os.makedirs(__UpperCAmelCase )
a__ = model.state_dict()
def to_tf_var_name(__UpperCAmelCase ):
for patt, repl in iter(__UpperCAmelCase ):
a__ = name.replace(__UpperCAmelCase , __UpperCAmelCase )
return f"bert/{name}"
def create_tf_var(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
a__ = tf.dtypes.as_dtype(tensor.dtype )
a__ = tf.get_variable(dtype=__UpperCAmelCase , shape=tensor.shape , name=__UpperCAmelCase , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(__UpperCAmelCase )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
a__ = to_tf_var_name(__UpperCAmelCase )
a__ = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
a__ = torch_tensor.T
a__ = create_tf_var(tensor=__UpperCAmelCase , name=__UpperCAmelCase , session=__UpperCAmelCase )
tf.keras.backend.set_value(__UpperCAmelCase , __UpperCAmelCase )
a__ = session.run(__UpperCAmelCase )
print(f"Successfully created {tf_name}: {np.allclose(__UpperCAmelCase , __UpperCAmelCase )}" )
a__ = tf.train.Saver(tf.trainable_variables() )
saver.save(__UpperCAmelCase , os.path.join(__UpperCAmelCase , model_name.replace('''-''' , '''_''' ) + '''.ckpt''' ) )
def __a ( __UpperCAmelCase=None ):
a__ = argparse.ArgumentParser()
parser.add_argument('''--model_name''' , type=__UpperCAmelCase , required=__UpperCAmelCase , help='''model name e.g. bert-base-uncased''' )
parser.add_argument(
'''--cache_dir''' , type=__UpperCAmelCase , default=__UpperCAmelCase , required=__UpperCAmelCase , help='''Directory containing pytorch model''' )
parser.add_argument('''--pytorch_model_path''' , type=__UpperCAmelCase , required=__UpperCAmelCase , help='''/path/to/<pytorch-model-name>.bin''' )
parser.add_argument('''--tf_cache_dir''' , type=__UpperCAmelCase , required=__UpperCAmelCase , help='''Directory in which to save tensorflow model''' )
a__ = parser.parse_args(__UpperCAmelCase )
a__ = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=__UpperCAmelCase , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 194
| 1
|
"""simple docstring"""
from __future__ import annotations
A_ : Union[str, Any] = {
"A": ["B", "C", "E"],
"B": ["A", "D", "E"],
"C": ["A", "F", "G"],
"D": ["B"],
"E": ["A", "B", "D"],
"F": ["C"],
"G": ["C"],
}
class lowerCamelCase :
def __init__( self : str , __UpperCAmelCase : dict[str, list[str]] , __UpperCAmelCase : str ) -> None:
SCREAMING_SNAKE_CASE__ = graph
# mapping node to its parent in resulting breadth first tree
SCREAMING_SNAKE_CASE__ = {}
SCREAMING_SNAKE_CASE__ = source_vertex
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> None:
SCREAMING_SNAKE_CASE__ = {self.source_vertex}
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = [self.source_vertex] # first in first out queue
while queue:
SCREAMING_SNAKE_CASE__ = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = vertex
queue.append(__UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : str , __UpperCAmelCase : str ) -> str:
if target_vertex == self.source_vertex:
return self.source_vertex
SCREAMING_SNAKE_CASE__ = self.parent.get(__UpperCAmelCase )
if target_vertex_parent is None:
SCREAMING_SNAKE_CASE__ = (
F"""No path from vertex: {self.source_vertex} to vertex: {target_vertex}"""
)
raise ValueError(__UpperCAmelCase )
return self.shortest_path(__UpperCAmelCase ) + F"""->{target_vertex}"""
if __name__ == "__main__":
A_ : Optional[Any] = Graph(graph, "G")
g.breath_first_search()
print(g.shortest_path("D"))
print(g.shortest_path("G"))
print(g.shortest_path("Foo"))
| 616
|
"""simple docstring"""
from ..utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_pt_objects import * # noqa F403
else:
from .scheduling_consistency_models import CMStochasticIterativeScheduler
from .scheduling_ddim import DDIMScheduler
from .scheduling_ddim_inverse import DDIMInverseScheduler
from .scheduling_ddim_parallel import DDIMParallelScheduler
from .scheduling_ddpm import DDPMScheduler
from .scheduling_ddpm_parallel import DDPMParallelScheduler
from .scheduling_deis_multistep import DEISMultistepScheduler
from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler
from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler
from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler
from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler
from .scheduling_euler_discrete import EulerDiscreteScheduler
from .scheduling_heun_discrete import HeunDiscreteScheduler
from .scheduling_ipndm import IPNDMScheduler
from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler
from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler
from .scheduling_karras_ve import KarrasVeScheduler
from .scheduling_pndm import PNDMScheduler
from .scheduling_repaint import RePaintScheduler
from .scheduling_sde_ve import ScoreSdeVeScheduler
from .scheduling_sde_vp import ScoreSdeVpScheduler
from .scheduling_unclip import UnCLIPScheduler
from .scheduling_unipc_multistep import UniPCMultistepScheduler
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
from .scheduling_vq_diffusion import VQDiffusionScheduler
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_flax_objects import * # noqa F403
else:
from .scheduling_ddim_flax import FlaxDDIMScheduler
from .scheduling_ddpm_flax import FlaxDDPMScheduler
from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler
from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler
from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler
from .scheduling_pndm_flax import FlaxPNDMScheduler
from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler
from .scheduling_utils_flax import (
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
broadcast_to_shape_from_left,
)
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .scheduling_lms_discrete import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
| 616
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowerCAmelCase : int = {
"configuration_roberta": ["ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP", "RobertaConfig", "RobertaOnnxConfig"],
"tokenization_roberta": ["RobertaTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Optional[int] = ["RobertaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Optional[Any] = [
"ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"RobertaForCausalLM",
"RobertaForMaskedLM",
"RobertaForMultipleChoice",
"RobertaForQuestionAnswering",
"RobertaForSequenceClassification",
"RobertaForTokenClassification",
"RobertaModel",
"RobertaPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : str = [
"TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRobertaForCausalLM",
"TFRobertaForMaskedLM",
"TFRobertaForMultipleChoice",
"TFRobertaForQuestionAnswering",
"TFRobertaForSequenceClassification",
"TFRobertaForTokenClassification",
"TFRobertaMainLayer",
"TFRobertaModel",
"TFRobertaPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Dict = [
"FlaxRobertaForCausalLM",
"FlaxRobertaForMaskedLM",
"FlaxRobertaForMultipleChoice",
"FlaxRobertaForQuestionAnswering",
"FlaxRobertaForSequenceClassification",
"FlaxRobertaForTokenClassification",
"FlaxRobertaModel",
"FlaxRobertaPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig
from .tokenization_roberta import RobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roberta_fast import RobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta import (
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
RobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta import (
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForMultipleChoice,
TFRobertaForQuestionAnswering,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TFRobertaMainLayer,
TFRobertaModel,
TFRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
FlaxRobertaPreTrainedModel,
)
else:
import sys
_lowerCAmelCase : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 289
|
"""simple docstring"""
import json
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from transformers import OneFormerImageProcessor
from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle
from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput
if is_vision_available():
from PIL import Image
def __snake_case ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[Any]="shi-labs/oneformer_demo" ) -> Tuple:
'''simple docstring'''
with open(hf_hub_download(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , repo_type="dataset" ) , "r" ) as f:
_UpperCAmelCase : Optional[int] = json.load(SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase : int = {}
_UpperCAmelCase : Union[str, Any] = []
_UpperCAmelCase : List[Any] = []
for key, info in class_info.items():
_UpperCAmelCase : List[str] = info["name"]
class_names.append(info["name"] )
if info["isthing"]:
thing_ids.append(int(SCREAMING_SNAKE_CASE__ ) )
_UpperCAmelCase : Tuple = thing_ids
_UpperCAmelCase : str = class_names
return metadata
class UpperCAmelCase_ ( unittest.TestCase ):
def __init__( self : Optional[Any] , A : Tuple , A : str=7 , A : Union[str, Any]=3 , A : Union[str, Any]=3_0 , A : Dict=4_0_0 , A : List[str]=None , A : str=True , A : Union[str, Any]=True , A : Optional[Any]=[0.5, 0.5, 0.5] , A : str=[0.5, 0.5, 0.5] , A : Optional[Any]=1_0 , A : Optional[int]=False , A : int=2_5_5 , A : List[Any]="shi-labs/oneformer_demo" , A : int="ade20k_panoptic.json" , A : str=1_0 , ):
_UpperCAmelCase : int = parent
_UpperCAmelCase : Any = batch_size
_UpperCAmelCase : str = num_channels
_UpperCAmelCase : str = min_resolution
_UpperCAmelCase : List[str] = max_resolution
_UpperCAmelCase : List[Any] = do_resize
_UpperCAmelCase : List[Any] = {"shortest_edge": 3_2, "longest_edge": 1_3_3_3} if size is None else size
_UpperCAmelCase : Optional[Any] = do_normalize
_UpperCAmelCase : Optional[int] = image_mean
_UpperCAmelCase : Dict = image_std
_UpperCAmelCase : Any = class_info_file
_UpperCAmelCase : Optional[int] = prepare_metadata(A , A )
_UpperCAmelCase : Any = num_text
_UpperCAmelCase : Dict = repo_path
# for the post_process_functions
_UpperCAmelCase : str = 2
_UpperCAmelCase : Any = 1_0
_UpperCAmelCase : Optional[int] = 1_0
_UpperCAmelCase : Tuple = 3
_UpperCAmelCase : List[str] = 4
_UpperCAmelCase : int = num_labels
_UpperCAmelCase : Optional[Any] = do_reduce_labels
_UpperCAmelCase : Any = ignore_index
def snake_case_ ( self : Optional[int] ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"num_labels": self.num_labels,
"do_reduce_labels": self.do_reduce_labels,
"ignore_index": self.ignore_index,
"class_info_file": self.class_info_file,
"metadata": self.metadata,
"num_text": self.num_text,
}
def snake_case_ ( self : str , A : int , A : Optional[Any]=False ):
if not batched:
_UpperCAmelCase : List[str] = image_inputs[0]
if isinstance(A , Image.Image ):
_UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = image.size
else:
_UpperCAmelCase , _UpperCAmelCase : Any = image.shape[1], image.shape[2]
if w < h:
_UpperCAmelCase : Optional[int] = int(self.size["shortest_edge"] * h / w )
_UpperCAmelCase : Tuple = self.size["shortest_edge"]
elif w > h:
_UpperCAmelCase : Dict = self.size["shortest_edge"]
_UpperCAmelCase : Dict = int(self.size["shortest_edge"] * w / h )
else:
_UpperCAmelCase : Optional[int] = self.size["shortest_edge"]
_UpperCAmelCase : Optional[int] = self.size["shortest_edge"]
else:
_UpperCAmelCase : List[str] = []
for image in image_inputs:
_UpperCAmelCase , _UpperCAmelCase : int = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_UpperCAmelCase : int = max(A , key=lambda A : item[0] )[0]
_UpperCAmelCase : Dict = max(A , key=lambda A : item[1] )[1]
return expected_height, expected_width
def snake_case_ ( self : List[str] ):
return OneFormerForUniversalSegmentationOutput(
# +1 for null class
class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1) ) , masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width) ) , )
@require_torch
@require_vision
class UpperCAmelCase_ ( _UpperCamelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : str = OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None
# only for test_image_processing_common.test_image_proc_to_json_string
__SCREAMING_SNAKE_CASE : Tuple = image_processing_class
def snake_case_ ( self : Tuple ):
_UpperCAmelCase : Optional[Any] = OneFormerImageProcessorTester(self )
@property
def snake_case_ ( self : Optional[Any] ):
return self.image_processing_tester.prepare_image_processor_dict()
def snake_case_ ( self : Union[str, Any] ):
_UpperCAmelCase : str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A , "image_mean" ) )
self.assertTrue(hasattr(A , "image_std" ) )
self.assertTrue(hasattr(A , "do_normalize" ) )
self.assertTrue(hasattr(A , "do_resize" ) )
self.assertTrue(hasattr(A , "size" ) )
self.assertTrue(hasattr(A , "ignore_index" ) )
self.assertTrue(hasattr(A , "class_info_file" ) )
self.assertTrue(hasattr(A , "num_text" ) )
self.assertTrue(hasattr(A , "repo_path" ) )
self.assertTrue(hasattr(A , "metadata" ) )
self.assertTrue(hasattr(A , "do_reduce_labels" ) )
def snake_case_ ( self : List[Any] ):
pass
def snake_case_ ( self : Union[str, Any] ):
# Initialize image_processor
_UpperCAmelCase : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCAmelCase : Optional[int] = prepare_image_inputs(self.image_processing_tester , equal_resolution=A )
for image in image_inputs:
self.assertIsInstance(A , Image.Image )
# Test not batched input
_UpperCAmelCase : Optional[Any] = image_processor(image_inputs[0] , ["semantic"] , return_tensors="pt" ).pixel_values
_UpperCAmelCase , _UpperCAmelCase : Optional[Any] = self.image_processing_tester.get_expected_values(A )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
_UpperCAmelCase , _UpperCAmelCase : Dict = self.image_processing_tester.get_expected_values(A , batched=A )
_UpperCAmelCase : Optional[Any] = image_processor(
A , ["semantic"] * len(A ) , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def snake_case_ ( self : str ):
# Initialize image_processor
_UpperCAmelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_UpperCAmelCase : Any = prepare_image_inputs(self.image_processing_tester , equal_resolution=A , numpify=A )
for image in image_inputs:
self.assertIsInstance(A , np.ndarray )
# Test not batched input
_UpperCAmelCase : int = image_processor(image_inputs[0] , ["semantic"] , return_tensors="pt" ).pixel_values
_UpperCAmelCase , _UpperCAmelCase : List[Any] = self.image_processing_tester.get_expected_values(A )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
_UpperCAmelCase , _UpperCAmelCase : int = self.image_processing_tester.get_expected_values(A , batched=A )
_UpperCAmelCase : List[str] = image_processor(
A , ["semantic"] * len(A ) , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def snake_case_ ( self : Optional[int] ):
# Initialize image_processor
_UpperCAmelCase : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_UpperCAmelCase : str = prepare_image_inputs(self.image_processing_tester , equal_resolution=A , torchify=A )
for image in image_inputs:
self.assertIsInstance(A , torch.Tensor )
# Test not batched input
_UpperCAmelCase : Tuple = image_processor(image_inputs[0] , ["semantic"] , return_tensors="pt" ).pixel_values
_UpperCAmelCase , _UpperCAmelCase : Any = self.image_processing_tester.get_expected_values(A )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
_UpperCAmelCase , _UpperCAmelCase : List[str] = self.image_processing_tester.get_expected_values(A , batched=A )
_UpperCAmelCase : Optional[int] = image_processor(
A , ["semantic"] * len(A ) , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def snake_case_ ( self : Optional[int] , A : Tuple=False , A : Optional[Any]=False , A : int="np" ):
_UpperCAmelCase : List[Any] = self.image_processing_class(**self.image_processor_dict )
# prepare image and target
_UpperCAmelCase : List[str] = self.image_processing_tester.num_labels
_UpperCAmelCase : Any = None
_UpperCAmelCase : List[Any] = None
_UpperCAmelCase : Dict = prepare_image_inputs(self.image_processing_tester , equal_resolution=A )
if with_segmentation_maps:
_UpperCAmelCase : Union[str, Any] = num_labels
if is_instance_map:
_UpperCAmelCase : Optional[int] = list(range(A ) ) * 2
_UpperCAmelCase : Union[str, Any] = dict(enumerate(A ) )
_UpperCAmelCase : Tuple = [
np.random.randint(0 , high * 2 , (img.size[1], img.size[0]) ).astype(np.uinta ) for img in image_inputs
]
if segmentation_type == "pil":
_UpperCAmelCase : Optional[int] = [Image.fromarray(A ) for annotation in annotations]
_UpperCAmelCase : int = image_processor(
A , ["semantic"] * len(A ) , A , return_tensors="pt" , instance_id_to_semantic_id=A , pad_and_return_pixel_mask=A , )
return inputs
def snake_case_ ( self : Any ):
pass
def snake_case_ ( self : Dict ):
def common(A : List[Any]=False , A : List[str]=None ):
_UpperCAmelCase : str = self.comm_get_image_processor_inputs(
with_segmentation_maps=A , is_instance_map=A , segmentation_type=A )
_UpperCAmelCase : Optional[int] = inputs["mask_labels"]
_UpperCAmelCase : str = inputs["class_labels"]
_UpperCAmelCase : List[str] = inputs["pixel_values"]
_UpperCAmelCase : Any = inputs["text_inputs"]
# check the batch_size
for mask_label, class_label, text_input in zip(A , A , A ):
self.assertEqual(mask_label.shape[0] , class_label.shape[0] )
# this ensure padding has happened
self.assertEqual(mask_label.shape[1:] , pixel_values.shape[2:] )
self.assertEqual(len(A ) , self.image_processing_tester.num_text )
common()
common(is_instance_map=A )
common(is_instance_map=A , segmentation_type="pil" )
common(is_instance_map=A , segmentation_type="pil" )
def snake_case_ ( self : int ):
_UpperCAmelCase : Optional[Any] = np.zeros((2_0, 5_0) )
_UpperCAmelCase : Dict = 1
_UpperCAmelCase : Optional[int] = 1
_UpperCAmelCase : Optional[int] = 1
_UpperCAmelCase : List[str] = binary_mask_to_rle(A )
self.assertEqual(len(A ) , 4 )
self.assertEqual(rle[0] , 2_1 )
self.assertEqual(rle[1] , 4_5 )
def snake_case_ ( self : Optional[Any] ):
_UpperCAmelCase : Optional[Any] = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=7_7 , task_seq_length=7_7 , class_info_file="ade20k_panoptic.json" , num_text=self.image_processing_tester.num_text , repo_path="shi-labs/oneformer_demo" , )
_UpperCAmelCase : int = self.image_processing_tester.get_fake_oneformer_outputs()
_UpperCAmelCase : List[str] = fature_extractor.post_process_semantic_segmentation(A )
self.assertEqual(len(A ) , self.image_processing_tester.batch_size )
self.assertEqual(
segmentation[0].shape , (
self.image_processing_tester.height,
self.image_processing_tester.width,
) , )
_UpperCAmelCase : Tuple = [(1, 4) for i in range(self.image_processing_tester.batch_size )]
_UpperCAmelCase : Optional[Any] = fature_extractor.post_process_semantic_segmentation(A , target_sizes=A )
self.assertEqual(segmentation[0].shape , target_sizes[0] )
def snake_case_ ( self : Dict ):
_UpperCAmelCase : Tuple = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=7_7 , task_seq_length=7_7 , class_info_file="ade20k_panoptic.json" , num_text=self.image_processing_tester.num_text , repo_path="shi-labs/oneformer_demo" , )
_UpperCAmelCase : Union[str, Any] = self.image_processing_tester.get_fake_oneformer_outputs()
_UpperCAmelCase : Tuple = image_processor.post_process_instance_segmentation(A , threshold=0 )
self.assertTrue(len(A ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue("segmentation" in el )
self.assertTrue("segments_info" in el )
self.assertEqual(type(el["segments_info"] ) , A )
self.assertEqual(
el["segmentation"].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
def snake_case_ ( self : Any ):
_UpperCAmelCase : Optional[int] = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=7_7 , task_seq_length=7_7 , class_info_file="ade20k_panoptic.json" , num_text=self.image_processing_tester.num_text , repo_path="shi-labs/oneformer_demo" , )
_UpperCAmelCase : Optional[Any] = self.image_processing_tester.get_fake_oneformer_outputs()
_UpperCAmelCase : Tuple = image_processor.post_process_panoptic_segmentation(A , threshold=0 )
self.assertTrue(len(A ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue("segmentation" in el )
self.assertTrue("segments_info" in el )
self.assertEqual(type(el["segments_info"] ) , A )
self.assertEqual(
el["segmentation"].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
| 289
| 1
|
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
lowerCamelCase__ = _symbol_database.Default()
lowerCamelCase__ = _descriptor_pool.Default().AddSerializedFile(
B'''\n\x19sentencepiece_model.proto\x12\rsentencepiece"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03'''
)
lowerCamelCase__ = globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, '''sentencepiece_model_pb2''', _globals)
if _descriptor._USE_C_DESCRIPTORS is False:
lowerCamelCase__ = None
lowerCamelCase__ = B'''H\003'''
# (generated by protobuf compiler, but `_TRAINERSPEC` is not defined)
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001"
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001"
lowerCamelCase__ = 45
lowerCamelCase__ = 15_81
lowerCamelCase__ = 15_17
lowerCamelCase__ = 15_70
lowerCamelCase__ = 15_84
lowerCamelCase__ = 17_93
lowerCamelCase__ = 17_95
lowerCamelCase__ = 19_16
lowerCamelCase__ = 18_64
lowerCamelCase__ = 19_05
lowerCamelCase__ = 19_19
lowerCamelCase__ = 24_29
lowerCamelCase__ = 22_08
lowerCamelCase__ = 24_18
lowerCamelCase__ = 23_23
lowerCamelCase__ = 24_07
# @@protoc_insertion_point(module_scope)
| 226
|
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
lowerCamelCase__ = '''true'''
def A(__a: Any , __a: Tuple=82 , __a: Any=16 ):
set_seed(42 )
lowerCAmelCase_ = RegressionModel()
lowerCAmelCase_ = deepcopy(__a )
lowerCAmelCase_ = RegressionDataset(length=__a )
lowerCAmelCase_ = DataLoader(__a , batch_size=__a )
model.to(accelerator.device )
lowerCAmelCase_ , lowerCAmelCase_ = accelerator.prepare(__a , __a )
return model, ddp_model, dataloader
def A(__a: Accelerator , __a: List[Any]=False ):
lowerCAmelCase_ = AutoTokenizer.from_pretrained("hf-internal-testing/mrpc-bert-base-cased" )
lowerCAmelCase_ = load_dataset("glue" , "mrpc" , split="validation" )
def tokenize_function(__a: Optional[int] ):
lowerCAmelCase_ = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=__a , max_length=__a )
return outputs
with accelerator.main_process_first():
lowerCAmelCase_ = dataset.map(
__a , batched=__a , remove_columns=["idx", "sentence1", "sentence2"] , )
lowerCAmelCase_ = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(__a: str ):
if use_longest:
return tokenizer.pad(__a , padding="longest" , return_tensors="pt" )
return tokenizer.pad(__a , padding="max_length" , max_length=128 , return_tensors="pt" )
return DataLoader(__a , shuffle=__a , collate_fn=__a , batch_size=16 )
def A(__a: Optional[int] , __a: List[str] ):
lowerCAmelCase_ = Accelerator(dispatch_batches=__a , split_batches=__a )
lowerCAmelCase_ = get_dataloader(__a , not dispatch_batches )
lowerCAmelCase_ = AutoModelForSequenceClassification.from_pretrained(
"hf-internal-testing/mrpc-bert-base-cased" , return_dict=__a )
lowerCAmelCase_ , lowerCAmelCase_ = accelerator.prepare(__a , __a )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def A(__a: Optional[Any] , __a: Optional[Any] , __a: int ):
lowerCAmelCase_ = []
for batch in dataloader:
lowerCAmelCase_ , lowerCAmelCase_ = batch.values()
with torch.no_grad():
lowerCAmelCase_ = model(__a )
lowerCAmelCase_ , lowerCAmelCase_ = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
lowerCAmelCase_ , lowerCAmelCase_ = [], []
for logit, targ in logits_and_targets:
logits.append(__a )
targs.append(__a )
lowerCAmelCase_ , lowerCAmelCase_ = torch.cat(__a ), torch.cat(__a )
return logits, targs
def A(__a: Accelerator , __a: List[str]=82 , __a: str=False , __a: List[Any]=False , __a: Optional[Any]=16 ):
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = get_basic_setup(__a , __a , __a )
lowerCAmelCase_ , lowerCAmelCase_ = generate_predictions(__a , __a , __a )
assert (
len(__a ) == num_samples
), F"Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(__a )}"
def A(__a: bool = False , __a: bool = False ):
lowerCAmelCase_ = evaluate.load("glue" , "mrpc" )
lowerCAmelCase_ , lowerCAmelCase_ = get_mrpc_setup(__a , __a )
# First do baseline
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = setup["no"]
model.to(__a )
model.eval()
for batch in dataloader:
batch.to(__a )
with torch.inference_mode():
lowerCAmelCase_ = model(**__a )
lowerCAmelCase_ = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=__a , references=batch["labels"] )
lowerCAmelCase_ = metric.compute()
# Then do distributed
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = setup["ddp"]
model.eval()
for batch in dataloader:
with torch.inference_mode():
lowerCAmelCase_ = model(**__a )
lowerCAmelCase_ = outputs.logits.argmax(dim=-1 )
lowerCAmelCase_ = batch["labels"]
lowerCAmelCase_ , lowerCAmelCase_ = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=__a , references=__a )
lowerCAmelCase_ = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), F"Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n"
def A():
lowerCAmelCase_ = Accelerator(split_batches=__a , dispatch_batches=__a )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print("**Testing gather_for_metrics**" )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(F"With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`" )
test_mrpc(__a , __a )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print("**Test torch metrics**" )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
lowerCAmelCase_ = Accelerator(split_batches=__a , dispatch_batches=__a )
if accelerator.is_local_main_process:
print(F"With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99" )
test_torch_metrics(__a , 99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print("**Test last batch is not dropped when perfectly divisible**" )
lowerCAmelCase_ = Accelerator()
test_torch_metrics(__a , 512 )
accelerator.state._reset_state()
def A(__a: List[str] ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 226
| 1
|
import os
def __lowercase ( ):
"""simple docstring"""
with open(os.path.dirname(snake_case ) + '''/grid.txt''' ) as f:
__magic_name__ :int = [] # noqa: E741
for _ in range(2_0 ):
l.append([int(snake_case ) for x in f.readline().split()] )
__magic_name__ :Union[str, Any] = 0
# right
for i in range(2_0 ):
for j in range(1_7 ):
__magic_name__ :Any = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
__magic_name__ :Optional[Any] = temp
# down
for i in range(1_7 ):
for j in range(2_0 ):
__magic_name__ :Optional[Any] = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
__magic_name__ :Optional[int] = temp
# diagonal 1
for i in range(1_7 ):
for j in range(1_7 ):
__magic_name__ :int = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
__magic_name__ :Optional[Any] = temp
# diagonal 2
for i in range(1_7 ):
for j in range(3, 2_0 ):
__magic_name__ :int = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
__magic_name__ :List[str] = temp
return maximum
if __name__ == "__main__":
print(solution())
| 0
|
from __future__ import annotations
import unittest
from transformers import RoFormerConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerModel,
)
from transformers.models.roformer.modeling_tf_roformer import (
TFRoFormerSelfAttention,
TFRoFormerSinusoidalPositionalEmbedding,
)
class lowerCamelCase_ :
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=1_3 , __lowerCAmelCase=7 , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=9_9 , __lowerCAmelCase=3_2 , __lowerCAmelCase=2 , __lowerCAmelCase=4 , __lowerCAmelCase=3_7 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=5_1_2 , __lowerCAmelCase=1_6 , __lowerCAmelCase=2 , __lowerCAmelCase=0.02 , __lowerCAmelCase=3 , __lowerCAmelCase=4 , __lowerCAmelCase=None , ):
"""simple docstring"""
__magic_name__ :Optional[int] = parent
__magic_name__ :List[Any] = 1_3
__magic_name__ :Union[str, Any] = 7
__magic_name__ :Optional[Any] = True
__magic_name__ :Tuple = True
__magic_name__ :List[str] = True
__magic_name__ :List[Any] = True
__magic_name__ :int = 9_9
__magic_name__ :Any = 3_2
__magic_name__ :Union[str, Any] = 2
__magic_name__ :List[str] = 4
__magic_name__ :List[Any] = 3_7
__magic_name__ :Tuple = '''gelu'''
__magic_name__ :Any = 0.1
__magic_name__ :str = 0.1
__magic_name__ :List[str] = 5_1_2
__magic_name__ :int = 1_6
__magic_name__ :Any = 2
__magic_name__ :List[Any] = 0.02
__magic_name__ :Optional[Any] = 3
__magic_name__ :Tuple = 4
__magic_name__ :Optional[Any] = None
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__magic_name__ :str = None
if self.use_input_mask:
__magic_name__ :Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
__magic_name__ :str = None
if self.use_token_type_ids:
__magic_name__ :List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__magic_name__ :Union[str, Any] = None
__magic_name__ :Tuple = None
__magic_name__ :str = None
if self.use_labels:
__magic_name__ :List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ :List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__magic_name__ :List[Any] = ids_tensor([self.batch_size] , self.num_choices )
__magic_name__ :str = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=__lowerCAmelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :int = TFRoFormerModel(config=__lowerCAmelCase )
__magic_name__ :Optional[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__magic_name__ :List[str] = [input_ids, input_mask]
__magic_name__ :Any = model(__lowerCAmelCase )
__magic_name__ :List[str] = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Dict = True
__magic_name__ :List[str] = TFRoFormerForCausalLM(config=__lowerCAmelCase )
__magic_name__ :str = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
__magic_name__ :Optional[Any] = model(__lowerCAmelCase )['''logits''']
self.parent.assertListEqual(
list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] )
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Optional[Any] = TFRoFormerForMaskedLM(config=__lowerCAmelCase )
__magic_name__ :Any = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
__magic_name__ :Dict = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :int = self.num_labels
__magic_name__ :str = TFRoFormerForSequenceClassification(config=__lowerCAmelCase )
__magic_name__ :Optional[int] = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
__magic_name__ :str = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = self.num_choices
__magic_name__ :Tuple = TFRoFormerForMultipleChoice(config=__lowerCAmelCase )
__magic_name__ :int = tf.tile(tf.expand_dims(__lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
__magic_name__ :Optional[Any] = tf.tile(tf.expand_dims(__lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
__magic_name__ :Union[str, Any] = tf.tile(tf.expand_dims(__lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
__magic_name__ :str = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
__magic_name__ :Tuple = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Optional[int] = self.num_labels
__magic_name__ :Any = TFRoFormerForTokenClassification(config=__lowerCAmelCase )
__magic_name__ :str = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
__magic_name__ :Dict = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :List[str] = TFRoFormerForQuestionAnswering(config=__lowerCAmelCase )
__magic_name__ :List[str] = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
__magic_name__ :Union[str, Any] = model(__lowerCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A ( self ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = self.prepare_config_and_inputs()
(
(
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) ,
) :Union[str, Any] = config_and_inputs
__magic_name__ :Optional[Any] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class lowerCamelCase_ ( lowerCamelCase , lowerCamelCase , unittest.TestCase ):
a__ = (
(
TFRoFormerModel,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerForMultipleChoice,
)
if is_tf_available()
else ()
)
a__ = (
{
'''feature-extraction''': TFRoFormerModel,
'''fill-mask''': TFRoFormerForMaskedLM,
'''question-answering''': TFRoFormerForQuestionAnswering,
'''text-classification''': TFRoFormerForSequenceClassification,
'''text-generation''': TFRoFormerForCausalLM,
'''token-classification''': TFRoFormerForTokenClassification,
'''zero-shot''': TFRoFormerForSequenceClassification,
}
if is_tf_available()
else {}
)
a__ = False
a__ = False
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
if pipeline_test_casse_name == "TextGenerationPipelineTests":
return True
return False
def A ( self ):
"""simple docstring"""
__magic_name__ :List[str] = TFRoFormerModelTester(self )
__magic_name__ :List[str] = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=3_7 )
def A ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def A ( self ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__lowerCAmelCase )
def A ( self ):
"""simple docstring"""
__magic_name__ :Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head(*__lowerCAmelCase )
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__lowerCAmelCase )
def A ( self ):
"""simple docstring"""
__magic_name__ :Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__lowerCAmelCase )
def A ( self ):
"""simple docstring"""
__magic_name__ :Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__lowerCAmelCase )
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__lowerCAmelCase )
@slow
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[Any] = TFRoFormerModel.from_pretrained('''junnyu/roformer_chinese_base''' )
self.assertIsNotNone(__lowerCAmelCase )
@require_tf
class lowerCamelCase_ ( unittest.TestCase ):
@slow
def A ( self ):
"""simple docstring"""
__magic_name__ :int = TFRoFormerForMaskedLM.from_pretrained('''junnyu/roformer_chinese_base''' )
__magic_name__ :Dict = tf.constant([[0, 1, 2, 3, 4, 5]] )
__magic_name__ :Optional[Any] = model(__lowerCAmelCase )[0]
# TODO Replace vocab size
__magic_name__ :int = 5_0_0_0_0
__magic_name__ :Tuple = [1, 6, vocab_size]
self.assertEqual(output.shape , __lowerCAmelCase )
print(output[:, :3, :3] )
# TODO Replace values below with what was printed above.
__magic_name__ :Any = tf.constant(
[
[
[-0.12053341, -1.0264901, 0.29221946],
[-1.5133783, 0.197433, 0.15190607],
[-5.0135403, -3.900256, -0.84038764],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __lowerCAmelCase , atol=1E-4 )
@require_tf
class lowerCamelCase_ ( unittest.TestCase ):
a__ = 1e-4
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[int] = tf.constant([[4, 1_0]] )
__magic_name__ :Optional[int] = TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 )
__magic_name__ :Optional[Any] = emba(input_ids.shape )
__magic_name__ :List[str] = tf.constant(
[[0.0000, 0.0000, 0.0000, 1.0000, 1.0000, 1.0000], [0.8415, 0.0464, 0.0022, 0.5403, 0.9989, 1.0000]] )
tf.debugging.assert_near(__lowerCAmelCase , __lowerCAmelCase , atol=self.tolerance )
def A ( self ):
"""simple docstring"""
__magic_name__ :Tuple = tf.constant(
[
[0.0000, 0.0000, 0.0000, 0.0000, 0.0000],
[0.8415, 0.8219, 0.8020, 0.7819, 0.7617],
[0.9093, 0.9364, 0.9581, 0.9749, 0.9870],
] )
__magic_name__ :Union[str, Any] = TFRoFormerSinusoidalPositionalEmbedding(num_positions=5_1_2 , embedding_dim=5_1_2 )
emba([2, 1_6, 5_1_2] )
__magic_name__ :Optional[int] = emba.weight[:3, :5]
tf.debugging.assert_near(__lowerCAmelCase , __lowerCAmelCase , atol=self.tolerance )
@require_tf
class lowerCamelCase_ ( unittest.TestCase ):
a__ = 1e-4
def A ( self ):
"""simple docstring"""
# 2,12,16,64
__magic_name__ :int = tf.reshape(tf.range(2 * 1_2 * 1_6 * 6_4 , dtype=tf.floataa ) , shape=(2, 1_2, 1_6, 6_4) ) / 1_0_0
__magic_name__ :str = -tf.reshape(tf.range(2 * 1_2 * 1_6 * 6_4 , dtype=tf.floataa ) , shape=(2, 1_2, 1_6, 6_4) ) / 1_0_0
__magic_name__ :int = TFRoFormerSinusoidalPositionalEmbedding(num_positions=3_2 , embedding_dim=6_4 )
__magic_name__ :List[str] = embed_positions([2, 1_6, 7_6_8] )[None, None, :, :]
__magic_name__ , __magic_name__ :Union[str, Any] = TFRoFormerSelfAttention.apply_rotary_position_embeddings(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
__magic_name__ :Tuple = tf.constant(
[
[0.0000, 0.0100, 0.0200, 0.0300, 0.0400, 0.0500, 0.0600, 0.0700],
[-0.2012, 0.8897, 0.0263, 0.9401, 0.2074, 0.9463, 0.3481, 0.9343],
[-1.7057, 0.6271, -1.2145, 1.3897, -0.6303, 1.7647, -0.1173, 1.8985],
[-2.1731, -1.6397, -2.7358, 0.2854, -2.1840, 1.7183, -1.3018, 2.4871],
[0.2717, -3.6173, -2.9206, -2.1988, -3.6638, 0.3858, -2.9155, 2.2980],
[3.9859, -2.1580, -0.7984, -4.4904, -4.1181, -2.0252, -4.4782, 1.1253],
] )
__magic_name__ :List[str] = tf.constant(
[
[0.0000, -0.0100, -0.0200, -0.0300, -0.0400, -0.0500, -0.0600, -0.0700],
[0.2012, -0.8897, -0.0263, -0.9401, -0.2074, -0.9463, -0.3481, -0.9343],
[1.7057, -0.6271, 1.2145, -1.3897, 0.6303, -1.7647, 0.1173, -1.8985],
[2.1731, 1.6397, 2.7358, -0.2854, 2.1840, -1.7183, 1.3018, -2.4871],
[-0.2717, 3.6173, 2.9206, 2.1988, 3.6638, -0.3858, 2.9155, -2.2980],
[-3.9859, 2.1580, 0.7984, 4.4904, 4.1181, 2.0252, 4.4782, -1.1253],
] )
tf.debugging.assert_near(query_layer[0, 0, :6, :8] , __lowerCAmelCase , atol=self.tolerance )
tf.debugging.assert_near(key_layer[0, 0, :6, :8] , __lowerCAmelCase , atol=self.tolerance )
| 0
| 1
|
"""simple docstring"""
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def __A ( a_ : Optional[Any] )-> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = filter(lambda a_ : p.requires_grad , model.parameters() )
SCREAMING_SNAKE_CASE : Any = sum([np.prod(p.size() ) for p in model_parameters] )
return params
lowerCamelCase__ : Dict = logging.getLogger(__name__)
def __A ( a_ : Optional[Any] , a_ : Optional[int] )-> str:
'''simple docstring'''
if metric == "rouge2":
SCREAMING_SNAKE_CASE : Dict = '{val_avg_rouge2:.4f}-{step_count}'
elif metric == "bleu":
SCREAMING_SNAKE_CASE : int = '{val_avg_bleu:.4f}-{step_count}'
elif metric == "em":
SCREAMING_SNAKE_CASE : List[str] = '{val_avg_em:.4f}-{step_count}'
else:
raise NotImplementedError(
F"seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this"
''' function.''' )
SCREAMING_SNAKE_CASE : Union[str, Any] = ModelCheckpoint(
dirpath=_lowerCamelCase , filename=_lowerCamelCase , monitor=F"val_{metric}" , mode='''max''' , save_top_k=3 , every_n_epochs=1 , )
return checkpoint_callback
def __A ( a_ : str , a_ : List[Any] )-> Any:
'''simple docstring'''
return EarlyStopping(
monitor=F"val_{metric}" , mode='''min''' if '''loss''' in metric else '''max''' , patience=_lowerCamelCase , verbose=_lowerCamelCase , )
class lowercase__( pl.Callback ):
'''simple docstring'''
def __lowerCAmelCase ( self :Tuple , lowerCamelCase_ :List[str] , lowerCamelCase_ :Union[str, Any] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = {f"lr_group_{i}": param['lr'] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(_A )
@rank_zero_only
def __lowerCAmelCase ( self :str , lowerCamelCase_ :Dict , lowerCamelCase_ :str , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Tuple=True ) -> Union[str, Any]:
'''simple docstring'''
logger.info(f"***** {type_path} results at step {trainer.global_step:05d} *****" )
SCREAMING_SNAKE_CASE : Union[str, Any] = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['''log''', '''progress_bar''', '''preds''']} )
# Log results
SCREAMING_SNAKE_CASE : List[str] = Path(pl_module.hparams.output_dir )
if type_path == "test":
SCREAMING_SNAKE_CASE : str = od / 'test_results.txt'
SCREAMING_SNAKE_CASE : Dict = od / 'test_generations.txt'
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
SCREAMING_SNAKE_CASE : Union[str, Any] = od / f"{type_path}_results/{trainer.global_step:05d}.txt"
SCREAMING_SNAKE_CASE : Dict = od / f"{type_path}_generations/{trainer.global_step:05d}.txt"
results_file.parent.mkdir(exist_ok=_A )
generations_file.parent.mkdir(exist_ok=_A )
with open(_A , '''a+''' ) as writer:
for key in sorted(_A ):
if key in ["log", "progress_bar", "preds"]:
continue
SCREAMING_SNAKE_CASE : Union[str, Any] = metrics[key]
if isinstance(_A , torch.Tensor ):
SCREAMING_SNAKE_CASE : int = val.item()
SCREAMING_SNAKE_CASE : Optional[Any] = f"{key}: {val:.6f}\n"
writer.write(_A )
if not save_generations:
return
if "preds" in metrics:
SCREAMING_SNAKE_CASE : Optional[Any] = '\n'.join(metrics['''preds'''] )
generations_file.open('''w+''' ).write(_A )
@rank_zero_only
def __lowerCAmelCase ( self :Optional[int] , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
try:
SCREAMING_SNAKE_CASE : Any = pl_module.model.model.num_parameters()
except AttributeError:
SCREAMING_SNAKE_CASE : Any = pl_module.model.num_parameters()
SCREAMING_SNAKE_CASE : Optional[int] = count_trainable_parameters(_A )
# mp stands for million parameters
trainer.logger.log_metrics({'''n_params''': npars, '''mp''': npars / 1E6, '''grad_mp''': n_trainable_pars / 1E6} )
@rank_zero_only
def __lowerCAmelCase ( self :str , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Optional[int] ) -> Tuple:
'''simple docstring'''
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(_A , _A , '''test''' )
@rank_zero_only
def __lowerCAmelCase ( self :Any , lowerCamelCase_ :Dict , lowerCamelCase_ :Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 716
|
"""simple docstring"""
from __future__ import annotations
from fractions import Fraction
def __A ( a_ : int , a_ : int )-> bool:
'''simple docstring'''
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def __A ( a_ : int )-> list[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = []
SCREAMING_SNAKE_CASE : List[str] = 11
SCREAMING_SNAKE_CASE : Union[str, Any] = int('''1''' + '''0''' * digit_len )
for num in range(a_ , a_ ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(a_ , a_ ):
solutions.append(F"{num}/{den}" )
den += 1
num += 1
SCREAMING_SNAKE_CASE : Optional[Any] = 10
return solutions
def __A ( a_ : int = 2 )-> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = 1.0
for fraction in fraction_list(a_ ):
SCREAMING_SNAKE_CASE : List[str] = Fraction(a_ )
result *= frac.denominator / frac.numerator
return int(a_ )
if __name__ == "__main__":
print(solution())
| 18
| 0
|
from __future__ import annotations
import unittest
import numpy as np
from transformers import OPTConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None ):
if attention_mask is None:
lowercase__ = tf.cast(tf.math.not_equal(SCREAMING_SNAKE_CASE_ , config.pad_token_id ) , tf.inta )
return {"input_ids": input_ids, "attention_mask": attention_mask}
@require_tf
class _snake_case :
UpperCamelCase__ : Optional[Any] =OPTConfig
UpperCamelCase__ : Optional[int] ={}
UpperCamelCase__ : Optional[int] ="""gelu"""
def __init__( self : Tuple, __lowercase : Optional[int], __lowercase : Optional[int]=13, __lowercase : Optional[Any]=7, __lowercase : int=True, __lowercase : str=False, __lowercase : List[str]=99, __lowercase : Any=16, __lowercase : Any=2, __lowercase : List[Any]=4, __lowercase : List[Any]=4, __lowercase : Optional[int]="gelu", __lowercase : Any=0.1, __lowercase : Dict=0.1, __lowercase : Tuple=20, __lowercase : Tuple=2, __lowercase : Optional[int]=1, __lowercase : Union[str, Any]=0, __lowercase : Tuple=16, __lowercase : Tuple=16, ):
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = seq_length
lowercase__ = is_training
lowercase__ = use_labels
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = eos_token_id
lowercase__ = pad_token_id
lowercase__ = bos_token_id
lowercase__ = embed_dim
lowercase__ = word_embed_proj_dim
lowercase__ = False
def A__ ( self : Optional[int] ):
lowercase__ = ids_tensor([self.batch_size, self.seq_length - 1], self.vocab_size )
lowercase__ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ), 1 )
lowercase__ = tf.concat([input_ids, eos_tensor], axis=1 )
lowercase__ = self.config_cls(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, eos_token_id=self.eos_token_id, bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, embed_dim=self.embed_dim, word_embed_proj_dim=self.word_embed_proj_dim, is_encoder_decoder=__lowercase, **self.config_updates, )
lowercase__ = prepare_opt_inputs_dict(__lowercase, __lowercase )
return config, inputs_dict
def A__ ( self : int, __lowercase : Optional[int], __lowercase : Dict ):
lowercase__ = TFOPTModel(config=__lowercase )
lowercase__ = inputs_dict["input_ids"]
lowercase__ = input_ids[:1, :]
lowercase__ = inputs_dict["attention_mask"][:1, :]
lowercase__ = 1
# first forward pass
lowercase__ = model(__lowercase, attention_mask=__lowercase, use_cache=__lowercase )
lowercase__ , lowercase__ = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
lowercase__ = ids_tensor((self.batch_size, 3), config.vocab_size )
lowercase__ = tf.cast(ids_tensor((self.batch_size, 3), 2 ), tf.inta )
# append to next input_ids and
lowercase__ = tf.concat([input_ids, next_tokens], axis=-1 )
lowercase__ = tf.concat([attention_mask, next_attn_mask], axis=-1 )
lowercase__ = model(__lowercase, attention_mask=__lowercase )[0]
lowercase__ = model(__lowercase, attention_mask=__lowercase, past_key_values=__lowercase )[0]
self.parent.assertEqual(next_tokens.shape[1], output_from_past.shape[1] )
# select random slice
lowercase__ = int(ids_tensor((1,), output_from_past.shape[-1] ) )
lowercase__ = output_from_no_past[:, -3:, random_slice_idx]
lowercase__ = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__lowercase, __lowercase, rtol=1e-3 )
@require_tf
class _snake_case ( lowercase__ , lowercase__ , unittest.TestCase):
UpperCamelCase__ : Optional[Any] =(TFOPTModel, TFOPTForCausalLM) if is_tf_available() else ()
UpperCamelCase__ : Optional[int] =(TFOPTForCausalLM,) if is_tf_available() else ()
UpperCamelCase__ : str =(
{"""feature-extraction""": TFOPTModel, """text-generation""": TFOPTForCausalLM} if is_tf_available() else {}
)
UpperCamelCase__ : Any =False
UpperCamelCase__ : Any =False
UpperCamelCase__ : Any =False
UpperCamelCase__ : str =1_0
def A__ ( self : Optional[Any] ):
lowercase__ = TFOPTModelTester(self )
lowercase__ = ConfigTester(self, config_class=__lowercase )
def A__ ( self : Dict ):
self.config_tester.run_common_tests()
def A__ ( self : Union[str, Any] ):
lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__lowercase )
def A__ ( self : Union[str, Any] ):
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
def _get_word_embedding_weight(__lowercase : Tuple, __lowercase : Tuple ):
if hasattr(__lowercase, "weight" ):
return embedding_layer.weight
else:
# Here we build the word embeddings weights if not exists.
# And then we retry to get the attribute once built.
model.build()
if hasattr(__lowercase, "weight" ):
return embedding_layer.weight
else:
return None
for model_class in self.all_model_classes:
for size in [config.vocab_size - 10, config.vocab_size + 10]:
# build the embeddings
lowercase__ = model_class(config=__lowercase )
lowercase__ = _get_word_embedding_weight(__lowercase, model.get_input_embeddings() )
lowercase__ = _get_word_embedding_weight(__lowercase, model.get_output_embeddings() )
# reshape the embeddings
model.resize_token_embeddings(__lowercase )
lowercase__ = _get_word_embedding_weight(__lowercase, model.get_input_embeddings() )
lowercase__ = _get_word_embedding_weight(__lowercase, model.get_output_embeddings() )
# check that the resized embeddings size matches the desired size.
lowercase__ = size if size is not None else config.vocab_size
self.assertEqual(new_input_embeddings.shape[0], __lowercase )
# check that weights remain the same after resizing
lowercase__ = True
for pa, pa in zip(old_input_embeddings.value(), new_input_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
lowercase__ = False
self.assertTrue(__lowercase )
if old_output_embeddings is not None and new_output_embeddings is not None:
self.assertEqual(new_output_embeddings.shape[0], __lowercase )
lowercase__ = True
for pa, pa in zip(old_output_embeddings.value(), new_output_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
lowercase__ = False
self.assertTrue(__lowercase )
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
return tf.constant(SCREAMING_SNAKE_CASE_ , dtype=tf.intaa )
@require_tf
class _snake_case ( unittest.TestCase):
UpperCamelCase__ : List[Any] =9_9
def A__ ( self : List[str] ):
lowercase__ = tf.ones((4, 1), dtype=tf.intaa ) * 2
lowercase__ = tf.concat([ids_tensor((4, 6), self.vocab_size - 3 ) + 3, eos_column_vector], axis=1 )
lowercase__ = input_ids.shape[0]
lowercase__ = OPTConfig(
vocab_size=self.vocab_size, hidden_size=24, num_hidden_layers=2, num_attention_heads=2, ffn_dim=32, max_position_embeddings=48, eos_token_id=2, pad_token_id=1, bos_token_id=0, )
return config, input_ids, batch_size
@require_sentencepiece
@require_tf
class _snake_case ( unittest.TestCase):
@slow
def A__ ( self : Any ):
lowercase__ = TFOPTModel.from_pretrained("facebook/opt-350m" )
lowercase__ = _long_tensor([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] )
lowercase__ = tf.not_equal(__lowercase, model.config.pad_token_id )
with tf.GradientTape():
lowercase__ = model(input_ids=__lowercase, attention_mask=__lowercase ).last_hidden_state
lowercase__ = (1, 11, 512)
self.assertEqual(output.shape, __lowercase )
lowercase__ = tf.constant(
[[-0.2873, -1.9218, -0.3033], [-1.2710, -0.1338, -0.1902], [0.4095, 0.1214, -1.3121]] )
self.assertTrue(np.allclose(output[:, :3, :3], __lowercase, atol=4e-3 ) )
lowercase__ = tf.function(__lowercase, jit_compile=__lowercase )
lowercase__ = xla_generate(__lowercase, __lowercase )[0]
self.assertTrue(np.allclose(output[:, :3, :3], __lowercase, atol=4e-2 ) )
@require_tf
@slow
class _snake_case ( unittest.TestCase):
def A__ ( self : Dict ):
super().setUp()
lowercase__ = "facebook/opt-350m"
def A__ ( self : Optional[Any] ):
lowercase__ = TFOPTForCausalLM.from_pretrained(self.path_model )
lowercase__ = GPTaTokenizer.from_pretrained(self.path_model )
lowercase__ = [
"Today is a beautiful day and I want to",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
# verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False
lowercase__ = tokenizer(__lowercase, return_tensors="tf", padding=__lowercase, add_special_tokens=__lowercase )
lowercase__ = tf.math.reduce_mean(model(inputs.input_ids, attention_mask=inputs.attention_mask )[0], axis=-1 )
lowercase__ = tf.constant(
[
[1.3851, -13.8923, -10.5229, -10.7533, -0.2309, -10.2384, -0.5365, -9.0947, -5.1670],
[-4.7073, -10.6276, -3.9415, -21.5242, -0.2822, -0.2822, -0.2822, -0.2822, -0.2822],
[0.6247, -3.4229, -8.9179, -1.4297, -14.1650, 1.4146, -9.0218, -0.2703, -0.2703],
[6.4783, -1.9913, -10.7926, -2.3336, 1.5092, -0.9974, -6.8213, 1.3477, 1.3477],
] )
self.assertTrue(np.allclose(__lowercase, __lowercase, atol=1e-4 ) )
lowercase__ = tf.function(__lowercase, jit_compile=__lowercase )
lowercase__ = tf.math.reduce_mean(xla_generate(inputs.input_ids, attention_mask=inputs.attention_mask )[0], axis=-1 )
self.assertTrue(np.allclose(__lowercase, __lowercase, atol=1e-4 ) )
@require_tf
@slow
class _snake_case ( unittest.TestCase):
@property
def A__ ( self : Dict ):
return [
"Today is a beautiful day and I want",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
def A__ ( self : Dict ):
lowercase__ = "facebook/opt-125m"
lowercase__ = [
"Today is a beautiful day and I want to",
"In the city of New York, the city",
"Paris is the capital of France and the capital",
"Computers and mobile phones have taken over the",
]
lowercase__ = []
lowercase__ = GPTaTokenizer.from_pretrained(__lowercase )
lowercase__ = TFOPTForCausalLM.from_pretrained(__lowercase )
for prompt in self.prompts:
lowercase__ = tokenizer(__lowercase, return_tensors="tf" ).input_ids
lowercase__ = model.generate(__lowercase, max_length=10 )
lowercase__ = tokenizer.batch_decode(__lowercase, skip_special_tokens=__lowercase )
predicted_outputs += generated_string
self.assertListEqual(__lowercase, __lowercase )
def A__ ( self : Tuple ):
lowercase__ = "facebook/opt-350m"
lowercase__ = GPTaTokenizer.from_pretrained(__lowercase )
lowercase__ = TFOPTForCausalLM.from_pretrained(__lowercase )
lowercase__ = "left"
# use different length sentences to test batching
lowercase__ = [
"Hello, my dog is a little",
"Today, I",
]
lowercase__ = tokenizer(__lowercase, return_tensors="tf", padding=__lowercase )
lowercase__ = inputs["input_ids"]
lowercase__ = model.generate(input_ids=__lowercase, attention_mask=inputs["attention_mask"] )
lowercase__ = tokenizer(sentences[0], return_tensors="tf" ).input_ids
lowercase__ = model.generate(input_ids=__lowercase )
lowercase__ = inputs_non_padded.shape[-1] - tf.math.reduce_sum(
tf.cast(inputs["attention_mask"][-1], tf.intaa ) )
lowercase__ = tokenizer(sentences[1], return_tensors="tf" ).input_ids
lowercase__ = model.generate(input_ids=__lowercase, max_length=model.config.max_length - num_paddings )
lowercase__ = tokenizer.batch_decode(__lowercase, skip_special_tokens=__lowercase )
lowercase__ = tokenizer.decode(output_non_padded[0], skip_special_tokens=__lowercase )
lowercase__ = tokenizer.decode(output_padded[0], skip_special_tokens=__lowercase )
lowercase__ = [
"Hello, my dog is a little bit of a dork.\nI'm a little bit",
"Today, I was in the middle of a conversation with a friend about the",
]
self.assertListEqual(__lowercase, __lowercase )
self.assertListEqual(__lowercase, [non_padded_sentence, padded_sentence] )
def A__ ( self : Union[str, Any] ):
lowercase__ = "facebook/opt-350m"
lowercase__ = [
"Today is a beautiful day and I want to",
"In the city of San Francisco, the city",
"Paris is the capital of France and the capital",
"Computers and mobile phones have taken over the",
]
lowercase__ = []
lowercase__ = GPTaTokenizer.from_pretrained(__lowercase )
lowercase__ = TFOPTForCausalLM.from_pretrained(__lowercase )
for prompt in self.prompts:
lowercase__ = tokenizer(__lowercase, return_tensors="tf" ).input_ids
lowercase__ = model.generate(__lowercase, max_length=10 )
lowercase__ = tokenizer.batch_decode(__lowercase, skip_special_tokens=__lowercase )
predicted_outputs += generated_string
self.assertListEqual(__lowercase, __lowercase )
| 413
|
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise TypeError("only integers accepted as input" )
else:
lowercase__ = str(abs(SCREAMING_SNAKE_CASE_ ) )
lowercase__ = [list(SCREAMING_SNAKE_CASE_ ) for char in range(len(SCREAMING_SNAKE_CASE_ ) )]
for index in range(len(SCREAMING_SNAKE_CASE_ ) ):
num_transpositions[index].pop(SCREAMING_SNAKE_CASE_ )
return max(
int("".join(list(SCREAMING_SNAKE_CASE_ ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__("""doctest""").testmod()
| 413
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase_ = {
'configuration_longformer': [
'LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'LongformerConfig',
'LongformerOnnxConfig',
],
'tokenization_longformer': ['LongformerTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ['LongformerTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
'LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'LongformerForMaskedLM',
'LongformerForMultipleChoice',
'LongformerForQuestionAnswering',
'LongformerForSequenceClassification',
'LongformerForTokenClassification',
'LongformerModel',
'LongformerPreTrainedModel',
'LongformerSelfAttention',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
'TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFLongformerForMaskedLM',
'TFLongformerForMultipleChoice',
'TFLongformerForQuestionAnswering',
'TFLongformerForSequenceClassification',
'TFLongformerForTokenClassification',
'TFLongformerModel',
'TFLongformerPreTrainedModel',
'TFLongformerSelfAttention',
]
if TYPE_CHECKING:
from .configuration_longformer import (
LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
LongformerConfig,
LongformerOnnxConfig,
)
from .tokenization_longformer import LongformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_longformer_fast import LongformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longformer import (
LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
LongformerForMaskedLM,
LongformerForMultipleChoice,
LongformerForQuestionAnswering,
LongformerForSequenceClassification,
LongformerForTokenClassification,
LongformerModel,
LongformerPreTrainedModel,
LongformerSelfAttention,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_longformer import (
TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLongformerForMaskedLM,
TFLongformerForMultipleChoice,
TFLongformerForQuestionAnswering,
TFLongformerForSequenceClassification,
TFLongformerForTokenClassification,
TFLongformerModel,
TFLongformerPreTrainedModel,
TFLongformerSelfAttention,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 80
|
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class lowerCamelCase__( __lowerCamelCase):
@slow
@require_torch
def lowerCAmelCase__ ( self: Union[str, Any] ):
__lowerCamelCase = EncoderDecoderModel.from_encoder_decoder_pretrained("""prajjwal1/bert-tiny""" , """prajjwal1/bert-tiny""" )
__lowerCamelCase = BertTokenizer.from_pretrained("""bert-base-uncased""" )
__lowerCamelCase = bertabert.config.encoder.vocab_size
__lowerCamelCase = tokenizer.sep_token_id
__lowerCamelCase = tokenizer.cls_token_id
__lowerCamelCase = 1_28
__lowerCamelCase = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""train[:1%]""" )
__lowerCamelCase = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""validation[:1%]""" )
__lowerCamelCase = train_dataset.select(range(32 ) )
__lowerCamelCase = val_dataset.select(range(16 ) )
__lowerCamelCase = 4
def _map_to_encoder_decoder_inputs(UpperCamelCase_: List[Any] ):
# Tokenizer will automatically set [BOS] <text> [EOS]
__lowerCamelCase = tokenizer(batch["""article"""] , padding="""max_length""" , truncation=UpperCamelCase_ , max_length=5_12 )
__lowerCamelCase = tokenizer(batch["""highlights"""] , padding="""max_length""" , truncation=UpperCamelCase_ , max_length=1_28 )
__lowerCamelCase = inputs.input_ids
__lowerCamelCase = inputs.attention_mask
__lowerCamelCase = outputs.input_ids
__lowerCamelCase = outputs.input_ids.copy()
__lowerCamelCase = [
[-1_00 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["""labels"""]
]
__lowerCamelCase = outputs.attention_mask
assert all(len(UpperCamelCase_ ) == 5_12 for x in inputs.input_ids )
assert all(len(UpperCamelCase_ ) == 1_28 for x in outputs.input_ids )
return batch
def _compute_metrics(UpperCamelCase_: int ):
__lowerCamelCase = pred.label_ids
__lowerCamelCase = pred.predictions
# all unnecessary tokens are removed
__lowerCamelCase = tokenizer.batch_decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ )
__lowerCamelCase = tokenizer.batch_decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ )
__lowerCamelCase = sum([int(pred_str[i] == label_str[i] ) for i in range(len(UpperCamelCase_ ) )] ) / len(UpperCamelCase_ )
return {"accuracy": accuracy}
# map train dataset
__lowerCamelCase = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=UpperCamelCase_ , batch_size=UpperCamelCase_ , remove_columns=["""article""", """highlights"""] , )
train_dataset.set_format(
type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , )
# same for validation dataset
__lowerCamelCase = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=UpperCamelCase_ , batch_size=UpperCamelCase_ , remove_columns=["""article""", """highlights"""] , )
val_dataset.set_format(
type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , )
__lowerCamelCase = self.get_auto_remove_tmp_dir()
__lowerCamelCase = SeqaSeqTrainingArguments(
output_dir=UpperCamelCase_ , per_device_train_batch_size=UpperCamelCase_ , per_device_eval_batch_size=UpperCamelCase_ , predict_with_generate=UpperCamelCase_ , evaluation_strategy="""steps""" , do_train=UpperCamelCase_ , do_eval=UpperCamelCase_ , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
__lowerCamelCase = SeqaSeqTrainer(
model=UpperCamelCase_ , args=UpperCamelCase_ , compute_metrics=_compute_metrics , train_dataset=UpperCamelCase_ , eval_dataset=UpperCamelCase_ , tokenizer=UpperCamelCase_ , )
# start training
trainer.train()
| 80
| 1
|
"""simple docstring"""
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class __UpperCamelCase ( unittest.TestCase ):
def UpperCAmelCase__ ( self : Union[str, Any] ) -> List[str]:
lowerCAmelCase :Optional[int] = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
lowerCAmelCase :List[str] = AutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' ).to(UpperCAmelCase )
lowerCAmelCase :Tuple = -1
lowerCAmelCase :Union[str, Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(UpperCAmelCase )
lowerCAmelCase :Dict = model.generate(UpperCAmelCase , max_new_tokens=10 , do_sample=UpperCAmelCase )
lowerCAmelCase :Optional[Any] = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
lowerCAmelCase :str = TextStreamer(UpperCAmelCase )
model.generate(UpperCAmelCase , max_new_tokens=10 , do_sample=UpperCAmelCase , streamer=UpperCAmelCase )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
lowerCAmelCase :List[Any] = cs.out[:-1]
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase__ ( self : int ) -> List[str]:
lowerCAmelCase :List[str] = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
lowerCAmelCase :Dict = AutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' ).to(UpperCAmelCase )
lowerCAmelCase :Optional[Any] = -1
lowerCAmelCase :Tuple = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(UpperCAmelCase )
lowerCAmelCase :Optional[int] = model.generate(UpperCAmelCase , max_new_tokens=10 , do_sample=UpperCAmelCase )
lowerCAmelCase :Optional[Any] = tokenizer.decode(greedy_ids[0] )
lowerCAmelCase :Optional[int] = TextIteratorStreamer(UpperCAmelCase )
lowerCAmelCase :Optional[Any] = {'input_ids': input_ids, 'max_new_tokens': 10, 'do_sample': False, 'streamer': streamer}
lowerCAmelCase :Union[str, Any] = Thread(target=model.generate , kwargs=UpperCAmelCase )
thread.start()
lowerCAmelCase :Any = ''
for new_text in streamer:
streamer_text += new_text
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase__ ( self : Any ) -> Optional[int]:
lowerCAmelCase :Tuple = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
lowerCAmelCase :List[str] = AutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' ).to(UpperCAmelCase )
lowerCAmelCase :List[str] = -1
lowerCAmelCase :Tuple = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(UpperCAmelCase )
lowerCAmelCase :Optional[Any] = model.generate(UpperCAmelCase , max_new_tokens=10 , do_sample=UpperCAmelCase )
lowerCAmelCase :Dict = greedy_ids[:, input_ids.shape[1] :]
lowerCAmelCase :int = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
lowerCAmelCase :int = TextStreamer(UpperCAmelCase , skip_prompt=UpperCAmelCase )
model.generate(UpperCAmelCase , max_new_tokens=10 , do_sample=UpperCAmelCase , streamer=UpperCAmelCase )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
lowerCAmelCase :str = cs.out[:-1]
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase__ ( self : Tuple ) -> List[str]:
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
lowerCAmelCase :int = AutoTokenizer.from_pretrained('distilgpt2' )
lowerCAmelCase :Tuple = AutoModelForCausalLM.from_pretrained('distilgpt2' ).to(UpperCAmelCase )
lowerCAmelCase :Any = -1
lowerCAmelCase :List[Any] = torch.ones((1, 5) , device=UpperCAmelCase ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
lowerCAmelCase :Optional[int] = TextStreamer(UpperCAmelCase , skip_special_tokens=UpperCAmelCase )
model.generate(UpperCAmelCase , max_new_tokens=1 , do_sample=UpperCAmelCase , streamer=UpperCAmelCase )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
lowerCAmelCase :Dict = cs.out[:-1] # Remove the final "\n"
lowerCAmelCase :Union[str, Any] = tokenizer(UpperCAmelCase , return_tensors='pt' )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def UpperCAmelCase__ ( self : str ) -> int:
lowerCAmelCase :List[Any] = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
lowerCAmelCase :List[Any] = AutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-random-gpt2' ).to(UpperCAmelCase )
lowerCAmelCase :Union[str, Any] = -1
lowerCAmelCase :int = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(UpperCAmelCase )
lowerCAmelCase :Tuple = TextIteratorStreamer(UpperCAmelCase , timeout=0.0_0_1 )
lowerCAmelCase :Optional[Any] = {'input_ids': input_ids, 'max_new_tokens': 10, 'do_sample': False, 'streamer': streamer}
lowerCAmelCase :int = Thread(target=model.generate , kwargs=UpperCAmelCase )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(UpperCAmelCase ):
lowerCAmelCase :str = ''
for new_text in streamer:
streamer_text += new_text
| 553
|
"""simple docstring"""
import darl # noqa
import gym
import tqdm
from diffusers.experimental import ValueGuidedRLPipeline
__SCREAMING_SNAKE_CASE = {
'n_samples': 64,
'horizon': 32,
'num_inference_steps': 20,
'n_guide_steps': 2, # can set to 0 for faster sampling, does not use value network
'scale_grad_by_std': True,
'scale': 0.1,
'eta': 0.0,
't_grad_cutoff': 2,
'device': 'cpu',
}
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE = 'hopper-medium-v2'
__SCREAMING_SNAKE_CASE = gym.make(env_name)
__SCREAMING_SNAKE_CASE = ValueGuidedRLPipeline.from_pretrained(
'bglick13/hopper-medium-v2-value-function-hor32',
env=env,
)
env.seed(0)
__SCREAMING_SNAKE_CASE = env.reset()
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 10_00
__SCREAMING_SNAKE_CASE = [obs.copy()]
try:
for t in tqdm.tqdm(range(T)):
# call the policy
__SCREAMING_SNAKE_CASE = pipeline(obs, planning_horizon=32)
# execute action in environment
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE = env.step(denorm_actions)
__SCREAMING_SNAKE_CASE = env.get_normalized_score(total_reward)
# update return
total_reward += reward
total_score += score
print(
F"""Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:"""
F""" {total_score}"""
)
# save observations for rendering
rollout.append(next_observation.copy())
__SCREAMING_SNAKE_CASE = next_observation
except KeyboardInterrupt:
pass
print(F"""Total reward: {total_reward}""")
| 553
| 1
|
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class lowerCAmelCase_ :
def __init__( self ,snake_case__ ,snake_case__=2 ,snake_case__=32 ,snake_case__=16 ,snake_case__=3 ,snake_case__=True ,snake_case__=True ,snake_case__=32 ,snake_case__=4 ,snake_case__=[0, 1, 2, 3] ,snake_case__=4 ,snake_case__=37 ,snake_case__="gelu" ,snake_case__=0.1 ,snake_case__=0.1 ,snake_case__=0.02 ,snake_case__=3 ,snake_case__=[1, 384, 24, 24] ,snake_case__=True ,snake_case__=None ,):
SCREAMING_SNAKE_CASE_ : Optional[int] = parent
SCREAMING_SNAKE_CASE_ : int = batch_size
SCREAMING_SNAKE_CASE_ : str = image_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = patch_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] = num_channels
SCREAMING_SNAKE_CASE_ : List[str] = is_training
SCREAMING_SNAKE_CASE_ : Dict = use_labels
SCREAMING_SNAKE_CASE_ : int = hidden_size
SCREAMING_SNAKE_CASE_ : List[str] = num_hidden_layers
SCREAMING_SNAKE_CASE_ : Dict = backbone_out_indices
SCREAMING_SNAKE_CASE_ : int = num_attention_heads
SCREAMING_SNAKE_CASE_ : Optional[int] = intermediate_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = hidden_act
SCREAMING_SNAKE_CASE_ : Optional[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : Optional[int] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : Union[str, Any] = initializer_range
SCREAMING_SNAKE_CASE_ : Optional[int] = num_labels
SCREAMING_SNAKE_CASE_ : str = backbone_featmap_shape
SCREAMING_SNAKE_CASE_ : Tuple = scope
SCREAMING_SNAKE_CASE_ : str = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
SCREAMING_SNAKE_CASE_ : Any = (image_size // patch_size) ** 2
SCREAMING_SNAKE_CASE_ : int = num_patches + 1
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE_ : str = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ids_tensor([self.batch_size, self.image_size, self.image_size] ,self.num_labels )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Dict = {
'''global_padding''': '''same''',
'''layer_type''': '''bottleneck''',
'''depths''': [3, 4, 9],
'''out_features''': ['''stage1''', '''stage2''', '''stage3'''],
'''embedding_dynamic_padding''': True,
'''hidden_sizes''': [96, 192, 384, 768],
'''num_groups''': 2,
}
return DPTConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,backbone_out_indices=self.backbone_out_indices ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=lowerCamelCase__ ,initializer_range=self.initializer_range ,is_hybrid=self.is_hybrid ,backbone_config=lowerCamelCase__ ,backbone_featmap_shape=self.backbone_featmap_shape ,)
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : List[Any] = DPTModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
SCREAMING_SNAKE_CASE_ : List[str] = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : List[str] = self.num_labels
SCREAMING_SNAKE_CASE_ : Any = DPTForDepthEstimation(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
SCREAMING_SNAKE_CASE_ : List[str] = model(lowerCamelCase__ )
self.parent.assertEqual(result.predicted_depth.shape ,(self.batch_size, self.image_size, self.image_size) )
def snake_case ( self ,snake_case__ ,snake_case__ ,snake_case__ ):
SCREAMING_SNAKE_CASE_ : int = self.num_labels
SCREAMING_SNAKE_CASE_ : str = DPTForSemanticSegmentation(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
SCREAMING_SNAKE_CASE_ : Tuple = model(lowerCamelCase__ ,labels=lowerCamelCase__ )
self.parent.assertEqual(
result.logits.shape ,(self.batch_size, self.num_labels, self.image_size, self.image_size) )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ : int = config_and_inputs
SCREAMING_SNAKE_CASE_ : Any = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
__a : Tuple = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
__a : Optional[Any] = (
{
"depth-estimation": DPTForDepthEstimation,
"feature-extraction": DPTModel,
"image-segmentation": DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__a : List[Any] = False
__a : Optional[int] = False
__a : str = False
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = DPTModelTester(self )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ConfigTester(self ,config_class=lowerCamelCase__ ,has_text_modality=lowerCamelCase__ ,hidden_size=37 )
def snake_case ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason='DPT does not use inputs_embeds' )
def snake_case ( self ):
pass
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : List[Any] = model_class(lowerCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
SCREAMING_SNAKE_CASE_ : List[str] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase__ ,nn.Linear ) )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : List[Any] = model_class(lowerCamelCase__ )
SCREAMING_SNAKE_CASE_ : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE_ : Tuple = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE_ : List[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] ,lowerCamelCase__ )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*lowerCamelCase__ )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*lowerCamelCase__ )
def snake_case ( self ):
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
SCREAMING_SNAKE_CASE_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : List[str] = True
if model_class in get_values(lowerCamelCase__ ):
continue
SCREAMING_SNAKE_CASE_ : Tuple = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.train()
SCREAMING_SNAKE_CASE_ : List[Any] = self._prepare_for_class(lowerCamelCase__ ,lowerCamelCase__ ,return_labels=lowerCamelCase__ )
SCREAMING_SNAKE_CASE_ : str = model(**lowerCamelCase__ ).loss
loss.backward()
def snake_case ( self ):
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
SCREAMING_SNAKE_CASE_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : Optional[Any] = False
SCREAMING_SNAKE_CASE_ : Optional[Any] = True
if model_class in get_values(lowerCamelCase__ ) or not model_class.supports_gradient_checkpointing:
continue
SCREAMING_SNAKE_CASE_ : Any = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.gradient_checkpointing_enable()
model.train()
SCREAMING_SNAKE_CASE_ : List[Any] = self._prepare_for_class(lowerCamelCase__ ,lowerCamelCase__ ,return_labels=lowerCamelCase__ )
SCREAMING_SNAKE_CASE_ : Any = model(**lowerCamelCase__ ).loss
loss.backward()
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : str = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = _config_zero_init(lowerCamelCase__ )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : List[Any] = model_class(config=lowerCamelCase__ )
# Skip the check for the backbone
SCREAMING_SNAKE_CASE_ : Optional[Any] = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
SCREAMING_SNAKE_CASE_ : int = [F'{name}.{key}' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() ,[0.0, 1.0] ,msg=F'Parameter {name} of model {model_class} seems not properly initialized' ,)
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def snake_case ( self ):
pass
@slow
def snake_case ( self ):
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
SCREAMING_SNAKE_CASE_ : List[str] = DPTModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : List[Any] = '''add'''
with self.assertRaises(lowerCamelCase__ ):
SCREAMING_SNAKE_CASE_ : int = DPTForDepthEstimation(lowerCamelCase__ )
def __UpperCAmelCase ( ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
@slow
class lowerCAmelCase_ ( unittest.TestCase ):
def snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = DPTImageProcessor.from_pretrained('Intel/dpt-hybrid-midas' )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = DPTForDepthEstimation.from_pretrained('Intel/dpt-hybrid-midas' ).to(lowerCamelCase__ )
SCREAMING_SNAKE_CASE_ : int = prepare_img()
SCREAMING_SNAKE_CASE_ : Tuple = image_processor(images=lowerCamelCase__ ,return_tensors='pt' ).to(lowerCamelCase__ )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : Dict = model(**lowerCamelCase__ )
SCREAMING_SNAKE_CASE_ : str = outputs.predicted_depth
# verify the predicted depth
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.Size((1, 384, 384) )
self.assertEqual(predicted_depth.shape ,lowerCamelCase__ )
SCREAMING_SNAKE_CASE_ : Dict = torch.tensor(
[[[5.6437, 5.6146, 5.6511], [5.4371, 5.5649, 5.5958], [5.5215, 5.5184, 5.5293]]] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 100 ,lowerCamelCase__ ,atol=1E-4 ) )
| 718
|
import os
import sys
import tempfile
import torch
from .state import AcceleratorState
from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment
def __UpperCAmelCase ( lowerCamelCase_ : Tuple , lowerCamelCase_ : int=() , lowerCamelCase_ : Union[str, Any]=None , lowerCamelCase_ : Optional[int]="no" , lowerCamelCase_ : Optional[Any]="29500" ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = False
SCREAMING_SNAKE_CASE_ : Optional[Any] = False
if any(key.startswith('KAGGLE' ) for key in os.environ.keys() ):
SCREAMING_SNAKE_CASE_ : str = True
elif "IPython" in sys.modules:
SCREAMING_SNAKE_CASE_ : Dict = 'google.colab' in str(sys.modules['IPython'].get_ipython() )
try:
SCREAMING_SNAKE_CASE_ : Optional[int] = PrecisionType(mixed_precision.lower() )
except ValueError:
raise ValueError(
F'Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}.' )
if (in_colab or in_kaggle) and (os.environ.get('TPU_NAME' , lowerCamelCase_ ) is not None):
# TPU launch
import torch_xla.distributed.xla_multiprocessing as xmp
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside '
'your training function. Restart your notebook and make sure no cells initializes an '
'`Accelerator`.' )
if num_processes is None:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 8
SCREAMING_SNAKE_CASE_ : int = PrepareForLaunch(lowerCamelCase_ , distributed_type='TPU' )
print(F'Launching a training on {num_processes} TPU cores.' )
xmp.spawn(lowerCamelCase_ , args=lowerCamelCase_ , nprocs=lowerCamelCase_ , start_method='fork' )
elif in_colab:
# No need for a distributed launch otherwise as it's either CPU or one GPU.
if torch.cuda.is_available():
print('Launching training on one GPU.' )
else:
print('Launching training on one CPU.' )
function(*lowerCamelCase_ )
else:
if num_processes is None:
raise ValueError(
'You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call.' )
if num_processes > 1:
# Multi-GPU launch
from torch.multiprocessing import start_processes
from torch.multiprocessing.spawn import ProcessRaisedException
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized '
'inside your training function. Restart your notebook and make sure no cells initializes an '
'`Accelerator`.' )
if torch.cuda.is_initialized():
raise ValueError(
'To launch a multi-GPU training from your notebook, you need to avoid running any instruction '
'using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA '
'function.' )
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=lowerCamelCase_ , master_addr='127.0.01' , master_port=lowerCamelCase_ , mixed_precision=lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : int = PrepareForLaunch(lowerCamelCase_ , distributed_type='MULTI_GPU' )
print(F'Launching training on {num_processes} GPUs.' )
try:
start_processes(lowerCamelCase_ , args=lowerCamelCase_ , nprocs=lowerCamelCase_ , start_method='fork' )
except ProcessRaisedException as e:
if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]:
raise RuntimeError(
'CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. '
'This likely stems from an outside import causing issues once the `notebook_launcher()` is called. '
'Please review your imports and test them when running the `notebook_launcher()` to identify '
'which one is problematic.' ) from e
else:
# No need for a distributed launch otherwise as it's either CPU, GPU or MPS.
if is_mps_available():
SCREAMING_SNAKE_CASE_ : Optional[Any] = '1'
print('Launching training on MPS.' )
elif torch.cuda.is_available():
print('Launching training on one GPU.' )
else:
print('Launching training on CPU.' )
function(*lowerCamelCase_ )
def __UpperCAmelCase ( lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Tuple=() , lowerCamelCase_ : str=2 ) -> Union[str, Any]:
"""simple docstring"""
from torch.multiprocessing import start_processes
with tempfile.NamedTemporaryFile() as tmp_file:
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=lowerCamelCase_ , master_addr='127.0.01' , master_port='29500' , accelerate_mixed_precision='no' , accelerate_debug_rdv_file=tmp_file.name , accelerate_use_cpu='yes' , ):
SCREAMING_SNAKE_CASE_ : Tuple = PrepareForLaunch(lowerCamelCase_ , debug=lowerCamelCase_ )
start_processes(lowerCamelCase_ , args=lowerCamelCase_ , nprocs=lowerCamelCase_ , start_method='fork' )
| 685
| 0
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = """▁"""
UpperCamelCase = {"""vocab_file""": """sentencepiece.bpe.model""", """monolingual_vocab_file""": """dict.txt"""}
UpperCamelCase = {
"""vocab_file""": {
"""vinai/bartpho-syllable""": """https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model""",
},
"""monolingual_vocab_file""": {
"""vinai/bartpho-syllable""": """https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt""",
},
}
UpperCamelCase = {"""vinai/bartpho-syllable""": 1024}
class UpperCamelCase__ ( _lowerCAmelCase ):
"""simple docstring"""
A__ : List[str] = VOCAB_FILES_NAMES
A__ : Tuple = PRETRAINED_VOCAB_FILES_MAP
A__ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : str = ["input_ids", "attention_mask"]
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__="<s>" , SCREAMING_SNAKE_CASE__="</s>" , SCREAMING_SNAKE_CASE__="</s>" , SCREAMING_SNAKE_CASE__="<s>" , SCREAMING_SNAKE_CASE__="<unk>" , SCREAMING_SNAKE_CASE__="<pad>" , SCREAMING_SNAKE_CASE__="<mask>" , SCREAMING_SNAKE_CASE__ = None , **SCREAMING_SNAKE_CASE__ , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
A__ = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else mask_token
A__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , mask_token=SCREAMING_SNAKE_CASE__ , sp_model_kwargs=self.sp_model_kwargs , **SCREAMING_SNAKE_CASE__ , )
A__ = vocab_file
A__ = monolingual_vocab_file
A__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(SCREAMING_SNAKE_CASE__ ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
A__ = {}
A__ = 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(SCREAMING_SNAKE_CASE__ ) not in self.fairseq_tokens_to_ids:
A__ = cnt
cnt += 1
with open(SCREAMING_SNAKE_CASE__ , "r" , encoding="utf-8" ) as f:
for line in f.readlines():
A__ = line.strip().split()[0]
A__ = len(self.fairseq_tokens_to_ids )
if str(SCREAMING_SNAKE_CASE__ ) not in self.fairseq_tokens_to_ids:
A__ = len(self.fairseq_tokens_to_ids )
A__ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ) -> Dict:
A__ = self.__dict__.copy()
A__ = None
A__ = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , SCREAMING_SNAKE_CASE__ ) -> List[Any]:
A__ = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
A__ = {}
A__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
A__ = [self.cls_token_id]
A__ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE__ , token_ids_a=SCREAMING_SNAKE_CASE__ , already_has_special_tokens=SCREAMING_SNAKE_CASE__ )
if token_ids_a is None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1]
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE__ )) + [1]
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ) -> List[int]:
A__ = [self.sep_token_id]
A__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def snake_case__ ( self ) -> str:
return len(self.fairseq_ids_to_tokens )
def snake_case__ ( self ) -> Dict:
A__ = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ ) -> List[str]:
return self.sp_model.encode(SCREAMING_SNAKE_CASE__ , out_type=SCREAMING_SNAKE_CASE__ )
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ ) -> List[Any]:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
return self.fairseq_ids_to_tokens[index]
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ ) -> str:
A__ = "".join(SCREAMING_SNAKE_CASE__ ).replace(SCREAMING_SNAKE_CASE__ , " " ).strip()
return out_string
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ) -> Tuple[str]:
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
A__ = os.path.join(
SCREAMING_SNAKE_CASE__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
A__ = os.path.join(
SCREAMING_SNAKE_CASE__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["monolingual_vocab_file"] , )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE__ )
elif not os.path.isfile(self.vocab_file ):
with open(SCREAMING_SNAKE_CASE__ , "wb" ) as fi:
A__ = self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE__ )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
SCREAMING_SNAKE_CASE__ ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file , SCREAMING_SNAKE_CASE__ )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(SCREAMING_SNAKE_CASE__ , "w" , encoding="utf-8" ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(f"""{str(SCREAMING_SNAKE_CASE__ )} \n""" )
return out_vocab_file, out_monolingual_vocab_file
| 104
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..utils import cached_file
# docstyle-ignore
lowercase : Tuple = '\nHuman: <<task>>\n\nAssistant: '
lowercase : Union[str, Any] = 'huggingface-tools/default-prompts'
lowercase : int = {'chat': 'chat_prompt_template.txt', 'run': 'run_prompt_template.txt'}
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__="run" ):
'''simple docstring'''
if prompt_or_repo_id is None:
A : List[Any] = DEFAULT_PROMPTS_REPO
# prompt is considered a repo ID when it does not contain any kind of space
if re.search('''\\s''' , snake_case__ ) is not None:
return prompt_or_repo_id
A : int = cached_file(
snake_case__ , PROMPT_FILES[mode] , repo_type='''dataset''' , user_agent={'''agent''': agent_name} )
with open(snake_case__ , '''r''' , encoding='''utf-8''' ) as f:
return f.read()
| 634
| 0
|
from arguments import InitializationArguments
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser
# Configuration
_lowerCamelCase : Union[str, Any] = HfArgumentParser(InitializationArguments)
_lowerCamelCase : str = parser.parse_args()
# Load codeparrot tokenizer trained for Python code tokenization
_lowerCamelCase : Optional[int] = AutoTokenizer.from_pretrained(args.tokenizer_name)
# Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks
_lowerCamelCase : Any = {
"""vocab_size""": len(tokenizer),
"""scale_attn_by_inverse_layer_idx""": True,
"""reorder_and_upcast_attn""": True,
}
# Load model config (GPT-2 large in this case)
_lowerCamelCase : str = AutoConfig.from_pretrained(args.config_name, **config_kwargs)
# Initialize new model with config
_lowerCamelCase : Tuple = AutoModelForCausalLM.from_config(config)
# Save model to the hub
model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub)
| 719
|
import argparse
import os
import jax as jnp
import numpy as onp
import torch
import torch.nn as nn
from music_spectrogram_diffusion import inference
from tax import checkpoints
from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline
from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder
_lowerCamelCase : int = """base_with_context"""
def __a ( __lowerCAmelCase , __lowerCAmelCase ) -> str:
SCREAMING_SNAKE_CASE : Optional[int] = nn.Parameter(torch.FloatTensor(weights['token_embedder']['embedding'] ) )
SCREAMING_SNAKE_CASE : Dict = nn.Parameter(
torch.FloatTensor(weights['Embed_0']['embedding'] ) , requires_grad=__lowerCAmelCase )
for lyr_num, lyr in enumerate(model.encoders ):
SCREAMING_SNAKE_CASE : Any = weights[F'''layers_{lyr_num}''']
SCREAMING_SNAKE_CASE : int = nn.Parameter(
torch.FloatTensor(ly_weight['pre_attention_layer_norm']['scale'] ) )
SCREAMING_SNAKE_CASE : List[Any] = ly_weight['attention']
SCREAMING_SNAKE_CASE : Any = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
SCREAMING_SNAKE_CASE : Optional[int] = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
SCREAMING_SNAKE_CASE : Optional[int] = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
SCREAMING_SNAKE_CASE : int = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
SCREAMING_SNAKE_CASE : Optional[int] = nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) )
SCREAMING_SNAKE_CASE : Dict = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) )
SCREAMING_SNAKE_CASE : int = nn.Parameter(torch.FloatTensor(weights['encoder_norm']['scale'] ) )
return model
def __a ( __lowerCAmelCase , __lowerCAmelCase ) -> List[str]:
SCREAMING_SNAKE_CASE : Any = nn.Parameter(torch.FloatTensor(weights['input_proj']['kernel'].T ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Parameter(
torch.FloatTensor(weights['Embed_0']['embedding'] ) , requires_grad=__lowerCAmelCase )
for lyr_num, lyr in enumerate(model.encoders ):
SCREAMING_SNAKE_CASE : Dict = weights[F'''layers_{lyr_num}''']
SCREAMING_SNAKE_CASE : List[Any] = ly_weight['attention']
SCREAMING_SNAKE_CASE : Any = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
SCREAMING_SNAKE_CASE : Tuple = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
SCREAMING_SNAKE_CASE : str = nn.Parameter(
torch.FloatTensor(ly_weight['pre_attention_layer_norm']['scale'] ) )
SCREAMING_SNAKE_CASE : Any = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) )
SCREAMING_SNAKE_CASE : Dict = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) )
SCREAMING_SNAKE_CASE : List[Any] = nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) )
SCREAMING_SNAKE_CASE : Optional[int] = nn.Parameter(torch.FloatTensor(weights['encoder_norm']['scale'] ) )
return model
def __a ( __lowerCAmelCase , __lowerCAmelCase ) -> List[str]:
SCREAMING_SNAKE_CASE : Any = nn.Parameter(torch.FloatTensor(weights['time_emb_dense0']['kernel'].T ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Parameter(torch.FloatTensor(weights['time_emb_dense1']['kernel'].T ) )
SCREAMING_SNAKE_CASE : Optional[Any] = nn.Parameter(
torch.FloatTensor(weights['Embed_0']['embedding'] ) , requires_grad=__lowerCAmelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Parameter(
torch.FloatTensor(weights['continuous_inputs_projection']['kernel'].T ) )
for lyr_num, lyr in enumerate(model.decoders ):
SCREAMING_SNAKE_CASE : Tuple = weights[F'''layers_{lyr_num}''']
SCREAMING_SNAKE_CASE : Dict = nn.Parameter(
torch.FloatTensor(ly_weight['pre_self_attention_layer_norm']['scale'] ) )
SCREAMING_SNAKE_CASE : int = nn.Parameter(
torch.FloatTensor(ly_weight['FiLMLayer_0']['DenseGeneral_0']['kernel'].T ) )
SCREAMING_SNAKE_CASE : Any = ly_weight['self_attention']
SCREAMING_SNAKE_CASE : Dict = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
SCREAMING_SNAKE_CASE : Optional[Any] = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
SCREAMING_SNAKE_CASE : List[str] = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
SCREAMING_SNAKE_CASE : List[Any] = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
SCREAMING_SNAKE_CASE : str = ly_weight['MultiHeadDotProductAttention_0']
SCREAMING_SNAKE_CASE : Optional[int] = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
SCREAMING_SNAKE_CASE : Any = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
SCREAMING_SNAKE_CASE : Any = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
SCREAMING_SNAKE_CASE : Tuple = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
SCREAMING_SNAKE_CASE : List[Any] = nn.Parameter(
torch.FloatTensor(ly_weight['pre_cross_attention_layer_norm']['scale'] ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) )
SCREAMING_SNAKE_CASE : int = nn.Parameter(
torch.FloatTensor(ly_weight['FiLMLayer_1']['DenseGeneral_0']['kernel'].T ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) )
SCREAMING_SNAKE_CASE : Any = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) )
SCREAMING_SNAKE_CASE : str = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) )
SCREAMING_SNAKE_CASE : List[Any] = nn.Parameter(torch.FloatTensor(weights['decoder_norm']['scale'] ) )
SCREAMING_SNAKE_CASE : List[str] = nn.Parameter(torch.FloatTensor(weights['spec_out_dense']['kernel'].T ) )
return model
def __a ( __lowerCAmelCase ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE : Optional[int] = checkpoints.load_tax_checkpoint(args.checkpoint_path )
SCREAMING_SNAKE_CASE : int = jnp.tree_util.tree_map(onp.array , __lowerCAmelCase )
SCREAMING_SNAKE_CASE : Tuple = [
'from __gin__ import dynamic_registration',
'from music_spectrogram_diffusion.models.diffusion import diffusion_utils',
'diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0',
'diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()',
]
SCREAMING_SNAKE_CASE : Optional[int] = os.path.join(args.checkpoint_path , '..' , 'config.gin' )
SCREAMING_SNAKE_CASE : Dict = inference.parse_training_gin_file(__lowerCAmelCase , __lowerCAmelCase )
SCREAMING_SNAKE_CASE : Dict = inference.InferenceModel(args.checkpoint_path , __lowerCAmelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = DDPMScheduler(beta_schedule='squaredcos_cap_v2' , variance_type='fixed_large' )
SCREAMING_SNAKE_CASE : List[Any] = SpectrogramNotesEncoder(
max_length=synth_model.sequence_length['inputs'] , vocab_size=synth_model.model.module.config.vocab_size , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='gated-gelu' , )
SCREAMING_SNAKE_CASE : str = SpectrogramContEncoder(
input_dims=synth_model.audio_codec.n_dims , targets_context_length=synth_model.sequence_length['targets_context'] , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='gated-gelu' , )
SCREAMING_SNAKE_CASE : str = TaFilmDecoder(
input_dims=synth_model.audio_codec.n_dims , targets_length=synth_model.sequence_length['targets_context'] , max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time , d_model=synth_model.model.module.config.emb_dim , num_layers=synth_model.model.module.config.num_decoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , dropout_rate=synth_model.model.module.config.dropout_rate , )
SCREAMING_SNAKE_CASE : Union[str, Any] = load_notes_encoder(ta_checkpoint['target']['token_encoder'] , __lowerCAmelCase )
SCREAMING_SNAKE_CASE : Tuple = load_continuous_encoder(ta_checkpoint['target']['continuous_encoder'] , __lowerCAmelCase )
SCREAMING_SNAKE_CASE : Any = load_decoder(ta_checkpoint['target']['decoder'] , __lowerCAmelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = OnnxRuntimeModel.from_pretrained('kashif/soundstream_mel_decoder' )
SCREAMING_SNAKE_CASE : int = SpectrogramDiffusionPipeline(
notes_encoder=__lowerCAmelCase , continuous_encoder=__lowerCAmelCase , decoder=__lowerCAmelCase , scheduler=__lowerCAmelCase , melgan=__lowerCAmelCase , )
if args.save:
pipe.save_pretrained(args.output_path )
if __name__ == "__main__":
_lowerCamelCase : List[str] = argparse.ArgumentParser()
parser.add_argument("""--output_path""", default=None, type=str, required=True, help="""Path to the converted model.""")
parser.add_argument(
"""--save""", default=True, type=bool, required=False, help="""Whether to save the converted model or not."""
)
parser.add_argument(
"""--checkpoint_path""",
default=f"""{MODEL}/checkpoint_500000""",
type=str,
required=False,
help="""Path to the original jax model checkpoint.""",
)
_lowerCamelCase : Any = parser.parse_args()
main(args)
| 308
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.