code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
from dataclasses import dataclass
from typing import Optional
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .modeling_utils import ModelMixin
@dataclass
class __UpperCamelCase ( __lowercase ):
lowerCamelCase : torch.FloatTensor
class __UpperCamelCase ( __lowercase , __lowercase ):
@register_to_config
def __init__( self , lowerCAmelCase__ = 16 , lowerCAmelCase__ = 88 , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = 1 , lowerCAmelCase__ = 0.0 , lowerCAmelCase__ = 32 , lowerCAmelCase__ = None , lowerCAmelCase__ = False , lowerCAmelCase__ = None , lowerCAmelCase__ = "geglu" , lowerCAmelCase__ = True , lowerCAmelCase__ = True , ) -> Dict:
super().__init__()
a : Any = num_attention_heads
a : Union[str, Any] = attention_head_dim
a : Optional[Any] = num_attention_heads * attention_head_dim
a : str = in_channels
a : Union[str, Any] = torch.nn.GroupNorm(num_groups=__a , num_channels=__a , eps=1E-6 , affine=__a )
a : Any = nn.Linear(__a , __a )
# 3. Define transformers blocks
a : Union[str, Any] = nn.ModuleList(
[
BasicTransformerBlock(
__a , __a , __a , dropout=__a , cross_attention_dim=__a , activation_fn=__a , attention_bias=__a , double_self_attention=__a , norm_elementwise_affine=__a , )
for d in range(__a )
] )
a : Any = nn.Linear(__a , __a )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=1 , lowerCAmelCase__=None , lowerCAmelCase__ = True , ) -> Dict:
a : Optional[Any] = hidden_states.shape
a : Tuple = batch_frames // num_frames
a : List[str] = hidden_states
a : int = hidden_states[None, :].reshape(__a , __a , __a , __a , __a )
a : List[Any] = hidden_states.permute(0 , 2 , 1 , 3 , 4 )
a : List[Any] = self.norm(__a )
a : Tuple = hidden_states.permute(0 , 3 , 4 , 2 , 1 ).reshape(batch_size * height * width , __a , __a )
a : Optional[int] = self.proj_in(__a )
# 2. Blocks
for block in self.transformer_blocks:
a : Tuple = block(
__a , encoder_hidden_states=__a , timestep=__a , cross_attention_kwargs=__a , class_labels=__a , )
# 3. Output
a : Dict = self.proj_out(__a )
a : List[str] = (
hidden_states[None, None, :]
.reshape(__a , __a , __a , __a , __a )
.permute(0 , 3 , 4 , 1 , 2 )
.contiguous()
)
a : Tuple = hidden_states.reshape(__a , __a , __a , __a )
a : str = hidden_states + residual
if not return_dict:
return (output,)
return TransformerTemporalModelOutput(sample=__a )
| 719
|
"""simple docstring"""
import itertools
import random
import unittest
import numpy as np
from transformers import BatchFeature, SpeechTaFeatureExtractor
from transformers.testing_utils import require_torch
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
a : List[Any] = random.Random()
def _SCREAMING_SNAKE_CASE ( _lowercase : List[str] , _lowercase : int=1.0 , _lowercase : Optional[int]=None , _lowercase : Union[str, Any]=None ) ->Optional[Any]:
'''simple docstring'''
if rng is None:
a : Tuple = global_rng
a : Tuple = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
class __UpperCamelCase ( unittest.TestCase ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=7 , lowerCAmelCase__=400 , lowerCAmelCase__=2000 , lowerCAmelCase__=1 , lowerCAmelCase__=0.0 , lowerCAmelCase__=1_6000 , lowerCAmelCase__=True , lowerCAmelCase__=80 , lowerCAmelCase__=16 , lowerCAmelCase__=64 , lowerCAmelCase__="hann_window" , lowerCAmelCase__=80 , lowerCAmelCase__=7600 , lowerCAmelCase__=1E-10 , lowerCAmelCase__=True , ) -> Optional[Any]:
a : int = parent
a : Tuple = batch_size
a : Dict = min_seq_length
a : Any = max_seq_length
a : Optional[int] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
a : Union[str, Any] = feature_size
a : Tuple = padding_value
a : str = sampling_rate
a : Dict = do_normalize
a : str = num_mel_bins
a : List[str] = hop_length
a : str = win_length
a : Optional[Any] = win_function
a : List[str] = fmin
a : Any = fmax
a : Optional[int] = mel_floor
a : Tuple = return_attention_mask
def __a ( self ) -> Optional[Any]:
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"do_normalize": self.do_normalize,
"num_mel_bins": self.num_mel_bins,
"hop_length": self.hop_length,
"win_length": self.win_length,
"win_function": self.win_function,
"fmin": self.fmin,
"fmax": self.fmax,
"mel_floor": self.mel_floor,
"return_attention_mask": self.return_attention_mask,
}
def __a ( self , lowerCAmelCase__=False , lowerCAmelCase__=False ) -> Tuple:
def _flatten(lowerCAmelCase__ ):
return list(itertools.chain(*lowerCAmelCase__ ) )
if equal_length:
a : Union[str, Any] = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
a : str = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
a : Any = [np.asarray(lowerCAmelCase__ ) for x in speech_inputs]
return speech_inputs
def __a ( self , lowerCAmelCase__=False , lowerCAmelCase__=False ) -> Dict:
if equal_length:
a : str = [floats_list((self.max_seq_length, self.num_mel_bins) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
a : Any = [
floats_list((x, self.num_mel_bins) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
a : Optional[int] = [np.asarray(lowerCAmelCase__ ) for x in speech_inputs]
return speech_inputs
@require_torch
class __UpperCamelCase ( a__ , unittest.TestCase ):
lowerCamelCase : Tuple =SpeechTaFeatureExtractor
def __a ( self ) -> Union[str, Any]:
a : Tuple = SpeechTaFeatureExtractionTester(self )
def __a ( self , lowerCAmelCase__ ) -> Union[str, Any]:
self.assertTrue(np.all(np.mean(lowerCAmelCase__ , axis=0 ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(lowerCAmelCase__ , axis=0 ) - 1 ) < 1E-3 ) )
def __a ( self ) -> Union[str, Any]:
# Tests that all call wrap to encode_plus and batch_encode_plus
a : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
a : Any = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
a : Any = [np.asarray(lowerCAmelCase__ ) for speech_input in speech_inputs]
# Test not batched input
a : Optional[int] = feat_extract(speech_inputs[0] , return_tensors="np" ).input_values
a : Optional[Any] = feat_extract(np_speech_inputs[0] , return_tensors="np" ).input_values
self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3 ) )
# Test batched
a : int = feat_extract(lowerCAmelCase__ , return_tensors="np" ).input_values
a : int = feat_extract(lowerCAmelCase__ , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3 ) )
def __a ( self ) -> Optional[Any]:
a : List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
a : Dict = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
a : int = ["longest", "max_length", "do_not_pad"]
a : Tuple = [None, 1600, None]
for max_length, padding in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
a : Dict = feat_extract(lowerCAmelCase__ , padding=lowerCAmelCase__ , max_length=lowerCAmelCase__ , return_tensors="np" )
a : List[Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self.assertTrue(input_values[0][800:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self.assertTrue(input_values[0][1000:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def __a ( self ) -> str:
a : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
a : List[str] = range(800 , 1400 , 200 )
a : List[str] = [floats_list((1, x) )[0] for x in lengths]
a : Any = ["longest", "max_length", "do_not_pad"]
a : Any = [None, 1600, None]
for max_length, padding in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
a : List[Any] = feat_extract(lowerCAmelCase__ , max_length=lowerCAmelCase__ , padding=lowerCAmelCase__ )
a : Dict = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def __a ( self ) -> Dict:
a : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
a : int = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
a : Union[str, Any] = feat_extract(
lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=1000 , padding="max_length" , return_tensors="np" )
a : List[Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def __a ( self ) -> Dict:
a : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
a : Tuple = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
a : List[Any] = feat_extract(
lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=1000 , padding="longest" , return_tensors="np" )
a : Union[str, Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1000) )
a : List[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
a : int = feat_extract(
lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=2000 , padding="longest" , return_tensors="np" )
a : Dict = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1200) )
def __a ( self ) -> List[str]:
a : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
a : Any = np.random.rand(100 ).astype(np.floataa )
a : Optional[Any] = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
a : str = feature_extractor.pad([{"input_values": inputs}] , return_tensors="np" )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
a : List[str] = feature_extractor.pad([{"input_values": inputs}] , return_tensors="pt" )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def __a ( self ) -> Tuple:
# Tests that all call wrap to encode_plus and batch_encode_plus
a : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
a : Optional[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
a : Tuple = [np.asarray(lowerCAmelCase__ ) for speech_input in speech_inputs]
# Test feature size
a : Union[str, Any] = feature_extractor(audio_target=lowerCAmelCase__ , padding=lowerCAmelCase__ , return_tensors="np" ).input_values
self.assertTrue(input_values.ndim == 3 )
self.assertTrue(input_values.shape[-1] == feature_extractor.num_mel_bins )
# Test not batched input
a : Dict = feature_extractor(speech_inputs[0] , return_tensors="np" ).input_values
a : List[Any] = feature_extractor(np_speech_inputs[0] , return_tensors="np" ).input_values
self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3 ) )
# Test batched
a : Optional[int] = feature_extractor(lowerCAmelCase__ , return_tensors="np" ).input_values
a : Any = feature_extractor(lowerCAmelCase__ , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
a : Optional[Any] = [floats_list((1, x) )[0] for x in (800, 800, 800)]
a : List[Any] = np.asarray(lowerCAmelCase__ )
a : str = feature_extractor(lowerCAmelCase__ , return_tensors="np" ).input_values
a : str = feature_extractor(lowerCAmelCase__ , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3 ) )
def __a ( self ) -> str:
a : Optional[Any] = self.feat_extract_tester.prepare_inputs_for_target()
a : Any = self.feature_extraction_class(**self.feat_extract_dict )
a : Union[str, Any] = feat_extract.model_input_names[0]
a : List[str] = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(lowerCAmelCase__ ) == len(lowerCAmelCase__ ) for x, y in zip(lowerCAmelCase__ , processed_features[input_name] ) ) )
a : Tuple = self.feat_extract_tester.prepare_inputs_for_target(equal_length=lowerCAmelCase__ )
a : List[Any] = BatchFeature({input_name: speech_inputs} , tensor_type="np" )
a : Tuple = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
a : Dict = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def __a ( self ) -> Tuple:
a : Tuple = self.feat_extract_tester.prepare_inputs_for_target(equal_length=lowerCAmelCase__ )
a : Optional[Any] = self.feature_extraction_class(**self.feat_extract_dict )
a : Optional[int] = feat_extract.model_input_names[0]
a : List[Any] = BatchFeature({input_name: speech_inputs} , tensor_type="pt" )
a : Tuple = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
a : List[str] = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def __a ( self ) -> Optional[Any]:
a : Dict = self.feature_extraction_class(**self.feat_extract_dict )
a : Optional[int] = self.feat_extract_tester.prepare_inputs_for_target()
a : Optional[Any] = feat_extract.model_input_names[0]
a : List[str] = BatchFeature({input_name: speech_inputs} )
a : Tuple = feat_extract.num_mel_bins # hack!
a : List[Any] = feat_extract.pad(lowerCAmelCase__ , padding="longest" , return_tensors="np" )[input_name]
a : Any = feat_extract.pad(lowerCAmelCase__ , padding="longest" , return_tensors="pt" )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1E-2 )
def __a ( self ) -> Union[str, Any]:
a : Any = self.feat_extract_dict
a : Optional[Any] = True
a : Union[str, Any] = self.feature_extraction_class(**lowerCAmelCase__ )
a : Any = self.feat_extract_tester.prepare_inputs_for_target()
a : Dict = [len(lowerCAmelCase__ ) for x in speech_inputs]
a : int = feat_extract.model_input_names[0]
a : List[Any] = BatchFeature({input_name: speech_inputs} )
a : Union[str, Any] = feat_extract.num_mel_bins # hack!
a : Dict = feat_extract.pad(lowerCAmelCase__ , padding="longest" , return_tensors="np" )
self.assertIn("attention_mask" , lowerCAmelCase__ )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , lowerCAmelCase__ )
def __a ( self ) -> Union[str, Any]:
a : Tuple = self.feat_extract_dict
a : str = True
a : Optional[Any] = self.feature_extraction_class(**lowerCAmelCase__ )
a : List[Any] = self.feat_extract_tester.prepare_inputs_for_target()
a : Dict = [len(lowerCAmelCase__ ) for x in speech_inputs]
a : Optional[Any] = feat_extract.model_input_names[0]
a : str = BatchFeature({input_name: speech_inputs} )
a : Optional[Any] = min(lowerCAmelCase__ )
a : List[Any] = feat_extract.num_mel_bins # hack!
a : Any = feat_extract.pad(
lowerCAmelCase__ , padding="max_length" , max_length=lowerCAmelCase__ , truncation=lowerCAmelCase__ , return_tensors="np" )
self.assertIn("attention_mask" , lowerCAmelCase__ )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
def __a ( self , lowerCAmelCase__ ) -> Optional[int]:
from datasets import load_dataset
a : Tuple = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
a : Optional[Any] = ds.sort("id" ).select(range(lowerCAmelCase__ ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def __a ( self ) -> Union[str, Any]:
# fmt: off
a : List[Any] = torch.tensor(
[2.3_804E-03, 2.0_752E-03, 1.9_836E-03, 2.1_057E-03, 1.6_174E-03,
3.0_518E-04, 9.1_553E-05, 3.3_569E-04, 9.7_656E-04, 1.8_311E-03,
2.0_142E-03, 2.1_057E-03, 1.7_395E-03, 4.5_776E-04, -3.9_673E-04,
4.5_776E-04, 1.0_071E-03, 9.1_553E-05, 4.8_828E-04, 1.1_597E-03,
7.3_242E-04, 9.4_604E-04, 1.8_005E-03, 1.8_311E-03, 8.8_501E-04,
4.2_725E-04, 4.8_828E-04, 7.3_242E-04, 1.0_986E-03, 2.1_057E-03] )
# fmt: on
a : List[str] = self._load_datasamples(1 )
a : Union[str, Any] = SpeechTaFeatureExtractor()
a : str = feature_extractor(lowerCAmelCase__ , return_tensors="pt" ).input_values
self.assertEquals(input_values.shape , (1, 9_3680) )
self.assertTrue(torch.allclose(input_values[0, :30] , lowerCAmelCase__ , atol=1E-6 ) )
def __a ( self ) -> Union[str, Any]:
# fmt: off
a : Tuple = torch.tensor(
[-2.6_870, -3.0_104, -3.1_356, -3.5_352, -3.0_044, -3.0_353, -3.4_719, -3.6_777,
-3.1_520, -2.9_435, -2.6_553, -2.8_795, -2.9_944, -2.5_921, -3.0_279, -3.0_386,
-3.0_864, -3.1_291, -3.2_353, -2.7_444, -2.6_831, -2.7_287, -3.1_761, -3.1_571,
-3.2_726, -3.0_582, -3.1_007, -3.4_533, -3.4_695, -3.0_998] )
# fmt: on
a : Dict = self._load_datasamples(1 )
a : Tuple = SpeechTaFeatureExtractor()
a : Optional[int] = feature_extractor(audio_target=lowerCAmelCase__ , return_tensors="pt" ).input_values
self.assertEquals(input_values.shape , (1, 366, 80) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , lowerCAmelCase__ , atol=1E-4 ) )
| 31
| 0
|
from __future__ import annotations
import math
def _SCREAMING_SNAKE_CASE ( _lowercase : int , _lowercase : int , _lowercase : bool , _lowercase : list[int] , _lowercase : float ) ->Optional[int]:
'''simple docstring'''
if depth < 0:
raise ValueError("Depth cannot be less than 0" )
if len(_UpperCamelCase ) == 0:
raise ValueError("Scores cannot be empty" )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) , minimax(depth + 1 , node_index * 2 + 1 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) , )
return min(
minimax(depth + 1 , node_index * 2 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) , minimax(depth + 1 , node_index * 2 + 1 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) , )
def _SCREAMING_SNAKE_CASE ( ) ->Any:
'''simple docstring'''
a : int = [90, 23, 6, 33, 21, 65, 123, 3_4423]
a : Union[str, Any] = math.log(len(_UpperCamelCase ) , 2 )
print("Optimal value : " , end="" )
print(minimax(0 , 0 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 720
|
"""simple docstring"""
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def _SCREAMING_SNAKE_CASE ( _lowercase : List[str] ) ->int:
'''simple docstring'''
a : int = {}
a : Union[str, Any] = tokenizer(example["content"] , truncation=_lowercase )["input_ids"]
a : Any = len(example["content"] ) / len(output["input_ids"] )
return output
a : int = HfArgumentParser(PretokenizationArguments)
a : Optional[int] = parser.parse_args()
if args.num_workers is None:
a : Tuple = multiprocessing.cpu_count()
a : Optional[int] = AutoTokenizer.from_pretrained(args.tokenizer_dir)
a : Dict = time.time()
a : Tuple = load_dataset(args.dataset_name, split='''train''')
print(F'''Dataset loaded in {time.time()-t_start:.2f}s''')
a : Dict = time.time()
a : Tuple = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
'''repo_name''',
'''path''',
'''copies''',
'''size''',
'''content''',
'''license''',
'''hash''',
'''line_mean''',
'''line_max''',
'''alpha_frac''',
'''autogenerated''',
],
)
print(F'''Dataset tokenized in {time.time()-t_start:.2f}s''')
a : Tuple = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(F'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
| 31
| 0
|
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( _lowercase : Union[str, Any] , _lowercase : Any ) ->Optional[Any]:
'''simple docstring'''
a : Union[str, Any] = ""
for i in table:
res += inp[i - 1]
return res
def _SCREAMING_SNAKE_CASE ( _lowercase : str ) ->Tuple:
'''simple docstring'''
return data[1:] + data[0]
def _SCREAMING_SNAKE_CASE ( _lowercase : Tuple , _lowercase : List[str] ) ->Optional[Any]:
'''simple docstring'''
a : Any = ""
for i in range(len(_lowercase ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def _SCREAMING_SNAKE_CASE ( _lowercase : Optional[Any] , _lowercase : Dict ) ->Tuple:
'''simple docstring'''
a : Any = int("0b" + data[0] + data[-1] , 2 )
a : Optional[Any] = int("0b" + data[1:3] , 2 )
return bin(s[row][col] )[2:]
def _SCREAMING_SNAKE_CASE ( _lowercase : Dict , _lowercase : List[Any] , _lowercase : Dict , _lowercase : Union[str, Any] , _lowercase : Union[str, Any] ) ->List[str]:
'''simple docstring'''
a : Optional[Any] = message[:4]
a : str = message[4:]
a : Optional[int] = apply_table(_lowercase , _lowercase )
a : Optional[int] = xor(_lowercase , _lowercase )
a : Optional[Any] = apply_sbox(_lowercase , temp[:4] ) # noqa: E741
a : List[Any] = apply_sbox(_lowercase , temp[4:] )
a : int = "0" * (2 - len(_lowercase )) + l # noqa: E741
a : Union[str, Any] = "0" * (2 - len(_lowercase )) + r
a : Optional[Any] = apply_table(l + r , _lowercase )
a : List[str] = xor(_lowercase , _lowercase )
return temp + right
if __name__ == "__main__":
_A : str = input('''Enter 10 bit key: ''')
_A : Optional[Any] = input('''Enter 8 bit message: ''')
_A : Dict = [6, 3, 7, 4, 8, 5, 10, 9]
_A : List[Any] = [3, 5, 2, 7, 4, 10, 1, 9, 8, 6]
_A : Any = [2, 4, 3, 1]
_A : Optional[int] = [2, 6, 3, 1, 4, 8, 5, 7]
_A : List[str] = [4, 1, 3, 5, 7, 2, 8, 6]
_A : int = [4, 1, 2, 3, 2, 3, 4, 1]
_A : List[str] = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
_A : Optional[Any] = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
_A : List[str] = apply_table(key, paa_table)
_A : Dict = temp[:5]
_A : Tuple = temp[5:]
_A : List[Any] = left_shift(left)
_A : Tuple = left_shift(right)
_A : List[Any] = apply_table(left + right, pa_table)
_A : Union[str, Any] = left_shift(left)
_A : Dict = left_shift(right)
_A : Union[str, Any] = left_shift(left)
_A : int = left_shift(right)
_A : str = apply_table(left + right, pa_table)
# encryption
_A : List[str] = apply_table(message, IP)
_A : Optional[Any] = function(expansion, sa, sa, keya, temp)
_A : List[Any] = temp[4:] + temp[:4]
_A : Dict = function(expansion, sa, sa, keya, temp)
_A : Optional[Any] = apply_table(temp, IP_inv)
print('''Cipher text is:''', CT)
# decryption
_A : Optional[int] = apply_table(CT, IP)
_A : Dict = function(expansion, sa, sa, keya, temp)
_A : Optional[int] = temp[4:] + temp[:4]
_A : Dict = function(expansion, sa, sa, keya, temp)
_A : Dict = apply_table(temp, IP_inv)
print('''Plain text after decypting is:''', PT)
| 721
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
a : List[Any] = TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
a : Union[str, Any] = TaTokenizerFast
a : Union[str, Any] = {'''configuration_mt5''': ['''MT5Config''', '''MT5OnnxConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : str = [
'''MT5EncoderModel''',
'''MT5ForConditionalGeneration''',
'''MT5ForQuestionAnswering''',
'''MT5Model''',
'''MT5PreTrainedModel''',
'''MT5Stack''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[str] = ['''TFMT5EncoderModel''', '''TFMT5ForConditionalGeneration''', '''TFMT5Model''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Optional[Any] = ['''FlaxMT5EncoderModel''', '''FlaxMT5ForConditionalGeneration''', '''FlaxMT5Model''']
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
a : List[Any] = _LazyModule(
__name__,
globals()['''__file__'''],
_import_structure,
extra_objects={'''MT5Tokenizer''': MTaTokenizer, '''MT5TokenizerFast''': MTaTokenizerFast},
module_spec=__spec__,
)
| 31
| 0
|
"""simple docstring"""
from typing import Dict, Iterable, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
a : List[str] = logging.get_logger(__name__)
def _SCREAMING_SNAKE_CASE ( _lowercase : int , _lowercase : Dict , _lowercase : Dict ) ->List[Any]:
'''simple docstring'''
return [
int(1000 * (box[0] / width) ),
int(1000 * (box[1] / height) ),
int(1000 * (box[2] / width) ),
int(1000 * (box[3] / height) ),
]
def _SCREAMING_SNAKE_CASE ( _lowercase : Any , _lowercase : Tuple , _lowercase : str ) ->List[str]:
'''simple docstring'''
a : List[Any] = to_pil_image(_SCREAMING_SNAKE_CASE )
a, a : Dict = pil_image.size
a : Optional[int] = pytesseract.image_to_data(_SCREAMING_SNAKE_CASE , lang=_SCREAMING_SNAKE_CASE , output_type="dict" , config=_SCREAMING_SNAKE_CASE )
a, a, a, a, a : Dict = data["text"], data["left"], data["top"], data["width"], data["height"]
# filter empty words and corresponding coordinates
a : Union[str, Any] = [idx for idx, word in enumerate(_SCREAMING_SNAKE_CASE ) if not word.strip()]
a : Optional[int] = [word for idx, word in enumerate(_SCREAMING_SNAKE_CASE ) if idx not in irrelevant_indices]
a : Tuple = [coord for idx, coord in enumerate(_SCREAMING_SNAKE_CASE ) if idx not in irrelevant_indices]
a : Any = [coord for idx, coord in enumerate(_SCREAMING_SNAKE_CASE ) if idx not in irrelevant_indices]
a : int = [coord for idx, coord in enumerate(_SCREAMING_SNAKE_CASE ) if idx not in irrelevant_indices]
a : Optional[int] = [coord for idx, coord in enumerate(_SCREAMING_SNAKE_CASE ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
a : List[str] = []
for x, y, w, h in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
a : Union[str, Any] = [x, y, x + w, y + h]
actual_boxes.append(_SCREAMING_SNAKE_CASE )
# finally, normalize the bounding boxes
a : str = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
assert len(_SCREAMING_SNAKE_CASE ) == len(_SCREAMING_SNAKE_CASE ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class __UpperCamelCase ( a__ ):
lowerCamelCase : List[str] =['pixel_values']
def __init__( self , lowerCAmelCase__ = True , lowerCAmelCase__ = None , lowerCAmelCase__ = PILImageResampling.BILINEAR , lowerCAmelCase__ = True , lowerCAmelCase__ = 1 / 255 , lowerCAmelCase__ = True , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = True , lowerCAmelCase__ = None , lowerCAmelCase__ = "" , **lowerCAmelCase__ , ) -> None:
super().__init__(**lowerCAmelCase__ )
a : Union[str, Any] = size if size is not None else {"height": 224, "width": 224}
a : List[str] = get_size_dict(lowerCAmelCase__ )
a : Optional[int] = do_resize
a : List[Any] = size
a : Dict = resample
a : str = do_rescale
a : Dict = rescale_value
a : str = do_normalize
a : str = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
a : List[str] = image_std if image_std is not None else IMAGENET_STANDARD_STD
a : Union[str, Any] = apply_ocr
a : Optional[int] = ocr_lang
a : Tuple = tesseract_config
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = PILImageResampling.BILINEAR , lowerCAmelCase__ = None , **lowerCAmelCase__ , ) -> np.ndarray:
a : List[Any] = get_size_dict(lowerCAmelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
a : Optional[Any] = (size["height"], size["width"])
return resize(lowerCAmelCase__ , size=lowerCAmelCase__ , resample=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__ )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = None , **lowerCAmelCase__ , ) -> np.ndarray:
return rescale(lowerCAmelCase__ , scale=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__ )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = None , **lowerCAmelCase__ , ) -> np.ndarray:
return normalize(lowerCAmelCase__ , mean=lowerCAmelCase__ , std=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__ )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__=None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = ChannelDimension.FIRST , **lowerCAmelCase__ , ) -> PIL.Image.Image:
a : List[Any] = do_resize if do_resize is not None else self.do_resize
a : List[Any] = size if size is not None else self.size
a : Any = get_size_dict(lowerCAmelCase__ )
a : Dict = resample if resample is not None else self.resample
a : Optional[Any] = do_rescale if do_rescale is not None else self.do_rescale
a : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
a : str = do_normalize if do_normalize is not None else self.do_normalize
a : Union[str, Any] = image_mean if image_mean is not None else self.image_mean
a : Union[str, Any] = image_std if image_std is not None else self.image_std
a : List[Any] = apply_ocr if apply_ocr is not None else self.apply_ocr
a : Tuple = ocr_lang if ocr_lang is not None else self.ocr_lang
a : int = tesseract_config if tesseract_config is not None else self.tesseract_config
a : Union[str, Any] = make_list_of_images(lowerCAmelCase__ )
if not valid_images(lowerCAmelCase__ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("If do_normalize is True, image_mean and image_std must be specified." )
# All transformations expect numpy arrays.
a : Optional[Any] = [to_numpy_array(lowerCAmelCase__ ) for image in images]
# Tesseract OCR to get words + normalized bounding boxes
if apply_ocr:
requires_backends(self , "pytesseract" )
a : int = []
a : List[Any] = []
for image in images:
a, a : Any = apply_tesseract(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
words_batch.append(lowerCAmelCase__ )
boxes_batch.append(lowerCAmelCase__ )
if do_resize:
a : Dict = [self.resize(image=lowerCAmelCase__ , size=lowerCAmelCase__ , resample=lowerCAmelCase__ ) for image in images]
if do_rescale:
a : Tuple = [self.rescale(image=lowerCAmelCase__ , scale=lowerCAmelCase__ ) for image in images]
if do_normalize:
a : str = [self.normalize(image=lowerCAmelCase__ , mean=lowerCAmelCase__ , std=lowerCAmelCase__ ) for image in images]
a : List[str] = [to_channel_dimension_format(lowerCAmelCase__ , lowerCAmelCase__ ) for image in images]
a : str = BatchFeature(data={"pixel_values": images} , tensor_type=lowerCAmelCase__ )
if apply_ocr:
a : List[Any] = words_batch
a : List[str] = boxes_batch
return data
| 700
|
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( _lowercase : int = 10 , _lowercase : int = 1000 , _lowercase : bool = True ) ->int:
'''simple docstring'''
assert (
isinstance(_lowercase , _lowercase )
and isinstance(_lowercase , _lowercase )
and isinstance(_lowercase , _lowercase )
), "Invalid type of value(s) specified to function!"
if min_val > max_val:
raise ValueError("Invalid value for min_val or max_val (min_value < max_value)" )
return min_val if option else max_val
def _SCREAMING_SNAKE_CASE ( _lowercase : int , _lowercase : int ) ->int:
'''simple docstring'''
return int((number_a + number_a) / 2 )
def _SCREAMING_SNAKE_CASE ( _lowercase : int , _lowercase : int , _lowercase : int ) ->None:
'''simple docstring'''
assert (
isinstance(_lowercase , _lowercase ) and isinstance(_lowercase , _lowercase ) and isinstance(_lowercase , _lowercase )
), 'argument values must be type of "int"'
if lower > higher:
raise ValueError("argument value for lower and higher must be(lower > higher)" )
if not lower < to_guess < higher:
raise ValueError(
"guess value must be within the range of lower and higher value" )
def answer(_lowercase : int ) -> str:
if number > to_guess:
return "high"
elif number < to_guess:
return "low"
else:
return "same"
print("started..." )
a : Optional[Any] = lower
a : List[Any] = higher
a : Tuple = []
while True:
a : List[Any] = get_avg(_lowercase , _lowercase )
last_numbers.append(_lowercase )
if answer(_lowercase ) == "low":
a : Optional[int] = number
elif answer(_lowercase ) == "high":
a : Tuple = number
else:
break
print(F"""guess the number : {last_numbers[-1]}""" )
print(F"""details : {last_numbers!s}""" )
def _SCREAMING_SNAKE_CASE ( ) ->None:
'''simple docstring'''
a : Tuple = int(input("Enter lower value : " ).strip() )
a : Dict = int(input("Enter high value : " ).strip() )
a : Optional[int] = int(input("Enter value to guess : " ).strip() )
guess_the_number(_lowercase , _lowercase , _lowercase )
if __name__ == "__main__":
main()
| 31
| 0
|
"""simple docstring"""
# This code is adapted from OpenAI's release
# https://github.com/openai/human-eval/blob/master/human_eval/execution.py
import contextlib
import faulthandler
import io
import multiprocessing
import os
import platform
import signal
import tempfile
def _SCREAMING_SNAKE_CASE ( _lowercase : int , _lowercase : Optional[Any] , _lowercase : Dict , _lowercase : Optional[int] ) ->Optional[int]:
'''simple docstring'''
a : Any = multiprocessing.Manager()
a : int = manager.list()
a : List[Any] = multiprocessing.Process(target=lowercase__ , args=(check_program, result, timeout) )
p.start()
p.join(timeout=timeout + 1 )
if p.is_alive():
p.kill()
if not result:
result.append("timed out" )
return {
"task_id": task_id,
"passed": result[0] == "passed",
"result": result[0],
"completion_id": completion_id,
}
def _SCREAMING_SNAKE_CASE ( _lowercase : List[Any] , _lowercase : Any , _lowercase : str ) ->Any:
'''simple docstring'''
with create_tempdir():
# These system calls are needed when cleaning up tempdir.
import os
import shutil
a : int = shutil.rmtree
a : int = os.rmdir
a : Optional[int] = os.chdir
# Disable functionalities that can make destructive changes to the test.
reliability_guard()
# Run program.
try:
a : int = {}
with swallow_io():
with time_limit(lowercase__ ):
exec(lowercase__ , lowercase__ )
result.append("passed" )
except TimeoutException:
result.append("timed out" )
except BaseException as e:
result.append(F"""failed: {e}""" )
# Needed for cleaning up.
a : Tuple = rmtree
a : List[str] = rmdir
a : str = chdir
@contextlib.contextmanager
def _SCREAMING_SNAKE_CASE ( _lowercase : str ) ->int:
'''simple docstring'''
def signal_handler(_lowercase : List[Any] , _lowercase : List[Any] ):
raise TimeoutException("Timed out!" )
signal.setitimer(signal.ITIMER_REAL , lowercase__ )
signal.signal(signal.SIGALRM , lowercase__ )
try:
yield
finally:
signal.setitimer(signal.ITIMER_REAL , 0 )
@contextlib.contextmanager
def _SCREAMING_SNAKE_CASE ( ) ->Any:
'''simple docstring'''
a : str = WriteOnlyStringIO()
with contextlib.redirect_stdout(lowercase__ ):
with contextlib.redirect_stderr(lowercase__ ):
with redirect_stdin(lowercase__ ):
yield
@contextlib.contextmanager
def _SCREAMING_SNAKE_CASE ( ) ->int:
'''simple docstring'''
with tempfile.TemporaryDirectory() as dirname:
with chdir(lowercase__ ):
yield dirname
class __UpperCamelCase ( __snake_case ):
pass
class __UpperCamelCase ( io.StringIO ):
def __a ( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> int:
raise OSError
def __a ( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Optional[Any]:
raise OSError
def __a ( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Optional[int]:
raise OSError
def __a ( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Optional[Any]:
return False
class __UpperCamelCase ( contextlib._RedirectStream ): # type: ignore
lowerCamelCase : Any ="""stdin"""
@contextlib.contextmanager
def _SCREAMING_SNAKE_CASE ( _lowercase : Tuple ) ->Dict:
'''simple docstring'''
if root == ".":
yield
return
a : Optional[int] = os.getcwd()
os.chdir(lowercase__ )
try:
yield
except BaseException as exc:
raise exc
finally:
os.chdir(lowercase__ )
def _SCREAMING_SNAKE_CASE ( _lowercase : Union[str, Any]=None ) ->Union[str, Any]:
'''simple docstring'''
if maximum_memory_bytes is not None:
import resource
resource.setrlimit(resource.RLIMIT_AS , (maximum_memory_bytes, maximum_memory_bytes) )
resource.setrlimit(resource.RLIMIT_DATA , (maximum_memory_bytes, maximum_memory_bytes) )
if not platform.uname().system == "Darwin":
resource.setrlimit(resource.RLIMIT_STACK , (maximum_memory_bytes, maximum_memory_bytes) )
faulthandler.disable()
import builtins
a : List[str] = None
a : Any = None
import os
a : Tuple = "1"
a : Union[str, Any] = None
a : Optional[Any] = None
a : str = None
a : Dict = None
a : Optional[Any] = None
a : Optional[Any] = None
a : int = None
a : Optional[int] = None
a : List[str] = None
a : Any = None
a : List[Any] = None
a : str = None
a : List[str] = None
a : List[str] = None
a : Union[str, Any] = None
a : Optional[int] = None
a : Any = None
a : List[str] = None
a : Any = None
a : Any = None
a : Optional[int] = None
a : Tuple = None
a : Optional[Any] = None
a : Tuple = None
a : Tuple = None
a : Optional[Any] = None
a : str = None
import shutil
a : Optional[int] = None
a : int = None
a : int = None
import subprocess
a : int = None # type: ignore
a : List[str] = None
import sys
a : Optional[Any] = None
a : int = None
a : Optional[Any] = None
a : Any = None
a : Optional[Any] = None
| 701
|
"""simple docstring"""
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
a : Any = logging.get_logger(__name__)
a : Tuple = {
'''tensor(bool)''': np.bool_,
'''tensor(int8)''': np.inta,
'''tensor(uint8)''': np.uinta,
'''tensor(int16)''': np.intaa,
'''tensor(uint16)''': np.uintaa,
'''tensor(int32)''': np.intaa,
'''tensor(uint32)''': np.uintaa,
'''tensor(int64)''': np.intaa,
'''tensor(uint64)''': np.uintaa,
'''tensor(float16)''': np.floataa,
'''tensor(float)''': np.floataa,
'''tensor(double)''': np.floataa,
}
class __UpperCamelCase :
def __init__( self , lowerCAmelCase__=None , **lowerCAmelCase__ ) -> str:
logger.info("`diffusers.OnnxRuntimeModel` is experimental and might change in the future." )
a : Optional[int] = model
a : int = kwargs.get("model_save_dir" , lowerCAmelCase__ )
a : Tuple = kwargs.get("latest_model_name" , lowerCAmelCase__ )
def __call__( self , **lowerCAmelCase__ ) -> Dict:
a : List[str] = {k: np.array(lowerCAmelCase__ ) for k, v in kwargs.items()}
return self.model.run(lowerCAmelCase__ , lowerCAmelCase__ )
@staticmethod
def __a ( lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None ) -> Union[str, Any]:
if provider is None:
logger.info("No onnxruntime provider specified, using CPUExecutionProvider" )
a : List[str] = "CPUExecutionProvider"
return ort.InferenceSession(lowerCAmelCase__ , providers=[provider] , sess_options=lowerCAmelCase__ )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None , **lowerCAmelCase__ ) -> int:
a : List[str] = file_name if file_name is not None else ONNX_WEIGHTS_NAME
a : Optional[int] = self.model_save_dir.joinpath(self.latest_model_name )
a : List[str] = Path(lowerCAmelCase__ ).joinpath(lowerCAmelCase__ )
try:
shutil.copyfile(lowerCAmelCase__ , lowerCAmelCase__ )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
a : str = self.model_save_dir.joinpath(lowerCAmelCase__ )
if src_path.exists():
a : Any = Path(lowerCAmelCase__ ).joinpath(lowerCAmelCase__ )
try:
shutil.copyfile(lowerCAmelCase__ , lowerCAmelCase__ )
except shutil.SameFileError:
pass
def __a ( self , lowerCAmelCase__ , **lowerCAmelCase__ , ) -> str:
if os.path.isfile(lowerCAmelCase__ ):
logger.error(f"""Provided path ({save_directory}) should be a directory, not a file""" )
return
os.makedirs(lowerCAmelCase__ , exist_ok=lowerCAmelCase__ )
# saving model weights/files
self._save_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
@classmethod
def __a ( cls , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = False , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , **lowerCAmelCase__ , ) -> Optional[int]:
a : Tuple = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(lowerCAmelCase__ ):
a : Tuple = OnnxRuntimeModel.load_model(
os.path.join(lowerCAmelCase__ , lowerCAmelCase__ ) , provider=lowerCAmelCase__ , sess_options=lowerCAmelCase__ )
a : Tuple = Path(lowerCAmelCase__ )
# load model from hub
else:
# download model
a : Optional[Any] = hf_hub_download(
repo_id=lowerCAmelCase__ , filename=lowerCAmelCase__ , use_auth_token=lowerCAmelCase__ , revision=lowerCAmelCase__ , cache_dir=lowerCAmelCase__ , force_download=lowerCAmelCase__ , )
a : Optional[int] = Path(lowerCAmelCase__ ).parent
a : List[Any] = Path(lowerCAmelCase__ ).name
a : int = OnnxRuntimeModel.load_model(lowerCAmelCase__ , provider=lowerCAmelCase__ , sess_options=lowerCAmelCase__ )
return cls(model=lowerCAmelCase__ , **lowerCAmelCase__ )
@classmethod
def __a ( cls , lowerCAmelCase__ , lowerCAmelCase__ = True , lowerCAmelCase__ = None , lowerCAmelCase__ = None , **lowerCAmelCase__ , ) -> List[str]:
a : Any = None
if len(str(lowerCAmelCase__ ).split("@" ) ) == 2:
a, a : Tuple = model_id.split("@" )
return cls._from_pretrained(
model_id=lowerCAmelCase__ , revision=lowerCAmelCase__ , cache_dir=lowerCAmelCase__ , force_download=lowerCAmelCase__ , use_auth_token=lowerCAmelCase__ , **lowerCAmelCase__ , )
| 31
| 0
|
"""simple docstring"""
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotConfig, is_flax_available
from transformers.testing_utils import jax_device, require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
a : Optional[Any] = 'platform'
import jax
import jax.numpy as jnp
from transformers import BlenderbotTokenizer
from transformers.models.blenderbot.modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
shift_tokens_right,
)
def _SCREAMING_SNAKE_CASE ( _lowercase : Optional[int] , _lowercase : str , _lowercase : List[str]=None , _lowercase : Tuple=None , _lowercase : Dict=None , _lowercase : Optional[int]=None , _lowercase : List[str]=None , _lowercase : Union[str, Any]=None , ) ->Union[str, Any]:
'''simple docstring'''
if attention_mask is None:
a : Optional[Any] = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
a : Union[str, Any] = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
a : Dict = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
a : List[Any] = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
a : Any = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class __UpperCamelCase :
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=13 , lowerCAmelCase__=7 , lowerCAmelCase__=True , lowerCAmelCase__=False , lowerCAmelCase__=99 , lowerCAmelCase__=16 , lowerCAmelCase__=2 , lowerCAmelCase__=4 , lowerCAmelCase__=4 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=32 , lowerCAmelCase__=2 , lowerCAmelCase__=1 , lowerCAmelCase__=0 , lowerCAmelCase__=0.02 , ) -> Union[str, Any]:
a : Tuple = parent
a : int = batch_size
a : List[str] = seq_length
a : List[str] = is_training
a : Any = use_labels
a : Optional[Any] = vocab_size
a : Tuple = hidden_size
a : Optional[int] = num_hidden_layers
a : int = num_attention_heads
a : Any = intermediate_size
a : Any = hidden_act
a : Union[str, Any] = hidden_dropout_prob
a : str = attention_probs_dropout_prob
a : int = max_position_embeddings
a : Optional[int] = eos_token_id
a : Optional[Any] = pad_token_id
a : List[Any] = bos_token_id
a : Any = initializer_range
def __a ( self ) -> List[Any]:
a : str = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
a : Optional[int] = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
a : Any = shift_tokens_right(lowercase__ , 1 , 2 )
a : Any = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=lowercase__ , )
a : List[Any] = prepare_blenderbot_inputs_dict(lowercase__ , lowercase__ , lowercase__ )
return config, inputs_dict
def __a ( self ) -> Optional[Any]:
a : Any = self.prepare_config_and_inputs()
return config, inputs_dict
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[Any]:
a : Union[str, Any] = 20
a : int = model_class_name(lowercase__ )
a : Union[str, Any] = model.encode(inputs_dict["input_ids"] )
a : Optional[Any] = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
a : int = model.init_cache(decoder_input_ids.shape[0] , lowercase__ , lowercase__ )
a : Dict = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="i4" )
a : Optional[int] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
a : List[Any] = model.decode(
decoder_input_ids[:, :-1] , lowercase__ , decoder_attention_mask=lowercase__ , past_key_values=lowercase__ , decoder_position_ids=lowercase__ , )
a : Optional[int] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" )
a : Optional[int] = model.decode(
decoder_input_ids[:, -1:] , lowercase__ , decoder_attention_mask=lowercase__ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=lowercase__ , )
a : Optional[Any] = model.decode(lowercase__ , lowercase__ )
a : Optional[Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f"""Max diff is {diff}""" )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Tuple:
a : Dict = 20
a : Any = model_class_name(lowercase__ )
a : List[Any] = model.encode(inputs_dict["input_ids"] )
a : List[str] = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
a : Tuple = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
a : Tuple = model.init_cache(decoder_input_ids.shape[0] , lowercase__ , lowercase__ )
a : Optional[int] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
a : List[str] = model.decode(
decoder_input_ids[:, :-1] , lowercase__ , decoder_attention_mask=lowercase__ , past_key_values=lowercase__ , decoder_position_ids=lowercase__ , )
a : Optional[int] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" )
a : List[str] = model.decode(
decoder_input_ids[:, -1:] , lowercase__ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=lowercase__ , decoder_position_ids=lowercase__ , )
a : Optional[Any] = model.decode(lowercase__ , lowercase__ , decoder_attention_mask=lowercase__ )
a : int = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f"""Max diff is {diff}""" )
@require_flax
class __UpperCamelCase ( unittest.TestCase ):
lowerCamelCase : Optional[Any] =99
def __a ( self ) -> List[Any]:
a : Dict = np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
a : Union[str, Any] = input_ids.shape[0]
a : Union[str, Any] = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def __a ( self ) -> Optional[Any]:
a : Tuple = self._get_config_and_data()
a : Dict = FlaxBlenderbotForConditionalGeneration(lowercase__ )
a : List[Any] = lm_model(input_ids=lowercase__ )
a : Optional[Any] = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs["logits"].shape , lowercase__ )
def __a ( self ) -> int:
a : Any = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
a : Optional[Any] = FlaxBlenderbotForConditionalGeneration(lowercase__ )
a : Dict = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa )
a : Union[str, Any] = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa )
a : List[str] = lm_model(input_ids=lowercase__ , decoder_input_ids=lowercase__ )
a : Any = (*summary.shape, config.vocab_size)
self.assertEqual(outputs["logits"].shape , lowercase__ )
def __a ( self ) -> Any:
a : Any = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa )
a : Optional[int] = shift_tokens_right(lowercase__ , 1 , 2 )
a : Optional[int] = np.equal(lowercase__ , 1 ).astype(np.floataa ).sum()
a : Dict = np.equal(lowercase__ , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(lowercase__ , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class __UpperCamelCase ( a__ , unittest.TestCase , a__ ):
lowerCamelCase : Optional[Any] =True
lowerCamelCase : List[str] =(
(
FlaxBlenderbotModel,
FlaxBlenderbotForConditionalGeneration,
)
if is_flax_available()
else ()
)
lowerCamelCase : Tuple =(FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else ()
def __a ( self ) -> Tuple:
a : Any = FlaxBlenderbotModelTester(self )
def __a ( self ) -> List[str]:
a : Optional[int] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(lowercase__ , lowercase__ , lowercase__ )
def __a ( self ) -> Optional[int]:
a : Optional[int] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(lowercase__ , lowercase__ , lowercase__ )
def __a ( self ) -> Optional[Any]:
a : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
a : Dict = self._prepare_for_class(lowercase__ , lowercase__ )
a : List[str] = model_class(lowercase__ )
@jax.jit
def encode_jitted(lowerCAmelCase__ , lowerCAmelCase__=None , **lowerCAmelCase__ ):
return model.encode(input_ids=lowercase__ , attention_mask=lowercase__ )
with self.subTest("JIT Enabled" ):
a : Tuple = encode_jitted(**lowercase__ ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
a : Tuple = encode_jitted(**lowercase__ ).to_tuple()
self.assertEqual(len(lowercase__ ) , len(lowercase__ ) )
for jitted_output, output in zip(lowercase__ , lowercase__ ):
self.assertEqual(jitted_output.shape , output.shape )
def __a ( self ) -> List[Any]:
a : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
a : Optional[int] = model_class(lowercase__ )
a : Tuple = model.encode(inputs_dict["input_ids"] , inputs_dict["attention_mask"] )
a : List[Any] = {
'''decoder_input_ids''': inputs_dict['''decoder_input_ids'''],
'''decoder_attention_mask''': inputs_dict['''decoder_attention_mask'''],
'''encoder_outputs''': encoder_outputs,
}
@jax.jit
def decode_jitted(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
return model.decode(
decoder_input_ids=lowercase__ , decoder_attention_mask=lowercase__ , encoder_outputs=lowercase__ , )
with self.subTest("JIT Enabled" ):
a : List[str] = decode_jitted(**lowercase__ ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
a : Any = decode_jitted(**lowercase__ ).to_tuple()
self.assertEqual(len(lowercase__ ) , len(lowercase__ ) )
for jitted_output, output in zip(lowercase__ , lowercase__ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def __a ( self ) -> Tuple:
for model_class_name in self.all_model_classes:
a : Dict = model_class_name.from_pretrained("facebook/blenderbot-400M-distill" )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
a : List[Any] = np.ones((1, 1) ) * model.config.eos_token_id
a : Union[str, Any] = model(lowercase__ )
self.assertIsNotNone(lowercase__ )
@unittest.skipUnless(jax_device != "cpu" , "3B test too slow on CPU." )
@slow
def __a ( self ) -> List[str]:
a : str = {'''num_beams''': 1, '''early_stopping''': True, '''min_length''': 15, '''max_length''': 25}
a : Tuple = {'''skip_special_tokens''': True, '''clean_up_tokenization_spaces''': True}
a : Optional[Any] = FlaxBlenderbotForConditionalGeneration.from_pretrained("facebook/blenderbot-3B" , from_pt=lowercase__ )
a : int = BlenderbotTokenizer.from_pretrained("facebook/blenderbot-3B" )
a : Any = ['''Sam''']
a : List[str] = tokenizer(lowercase__ , return_tensors="jax" )
a : Union[str, Any] = model.generate(**lowercase__ , **lowercase__ )
a : Tuple = '''Sam is a great name. It means "sun" in Gaelic.'''
a : Dict = tokenizer.batch_decode(lowercase__ , **lowercase__ )
assert generated_txt[0].strip() == tgt_text
| 702
|
"""simple docstring"""
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def _SCREAMING_SNAKE_CASE ( _lowercase : Optional[Any] , _lowercase : List[str] , _lowercase : Optional[Any] ) ->str:
'''simple docstring'''
a : Union[str, Any] = 1.5
a : List[str] = int(factor * num_class_images )
a : Optional[Any] = ClipClient(
url="https://knn.laion.ai/knn-service" , indice_name="laion_400m" , num_images=_lowercase , aesthetic_weight=0.1 )
os.makedirs(F"""{class_data_dir}/images""" , exist_ok=_lowercase )
if len(list(Path(F"""{class_data_dir}/images""" ).iterdir() ) ) >= num_class_images:
return
while True:
a : List[Any] = client.query(text=_lowercase )
if len(_lowercase ) >= factor * num_class_images or num_images > 1E4:
break
else:
a : Optional[int] = int(factor * num_images )
a : str = ClipClient(
url="https://knn.laion.ai/knn-service" , indice_name="laion_400m" , num_images=_lowercase , aesthetic_weight=0.1 , )
a : Optional[int] = 0
a : str = 0
a : Any = tqdm(desc="downloading real regularization images" , total=_lowercase )
with open(F"""{class_data_dir}/caption.txt""" , "w" ) as fa, open(F"""{class_data_dir}/urls.txt""" , "w" ) as fa, open(
F"""{class_data_dir}/images.txt""" , "w" ) as fa:
while total < num_class_images:
a : Optional[Any] = class_images[count]
count += 1
try:
a : str = requests.get(images["url"] )
if img.status_code == 200:
a : int = Image.open(BytesIO(img.content ) )
with open(F"""{class_data_dir}/images/{total}.jpg""" , "wb" ) as f:
f.write(img.content )
fa.write(images["caption"] + "\n" )
fa.write(images["url"] + "\n" )
fa.write(F"""{class_data_dir}/images/{total}.jpg""" + "\n" )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def _SCREAMING_SNAKE_CASE ( ) ->Dict:
'''simple docstring'''
a : Optional[int] = argparse.ArgumentParser("" , add_help=_lowercase )
parser.add_argument("--class_prompt" , help="text prompt to retrieve images" , required=_lowercase , type=_lowercase )
parser.add_argument("--class_data_dir" , help="path to save images" , required=_lowercase , type=_lowercase )
parser.add_argument("--num_class_images" , help="number of images to download" , default=200 , type=_lowercase )
return parser.parse_args()
if __name__ == "__main__":
a : List[Any] = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 31
| 0
|
"""simple docstring"""
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
a : List[str] = logging.get_logger(__name__)
# General docstring
a : Optional[Any] = '''RegNetConfig'''
# Base docstring
a : Tuple = '''facebook/regnet-y-040'''
a : Union[str, Any] = [1, 1088, 7, 7]
# Image classification docstring
a : Optional[Any] = '''facebook/regnet-y-040'''
a : str = '''tabby, tabby cat'''
a : List[Any] = [
'''facebook/regnet-y-040''',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class __UpperCamelCase ( tf.keras.layers.Layer ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ = 3 , lowerCAmelCase__ = 1 , lowerCAmelCase__ = 1 , lowerCAmelCase__ = "relu" , **lowerCAmelCase__ , ) -> Tuple:
super().__init__(**lowercase__ )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
a : Dict = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
a : List[Any] = tf.keras.layers.ConvaD(
filters=lowercase__ , kernel_size=lowercase__ , strides=lowercase__ , padding="VALID" , groups=lowercase__ , use_bias=lowercase__ , name="convolution" , )
a : str = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name="normalization" )
a : List[str] = ACTaFN[activation] if activation is not None else tf.identity
def __a ( self , lowerCAmelCase__ ) -> List[str]:
a : int = self.convolution(self.padding(lowercase__ ) )
a : Tuple = self.normalization(lowercase__ )
a : int = self.activation(lowercase__ )
return hidden_state
class __UpperCamelCase ( tf.keras.layers.Layer ):
def __init__( self , lowerCAmelCase__ , **lowerCAmelCase__ ) -> Optional[Any]:
super().__init__(**lowercase__ )
a : Optional[Any] = config.num_channels
a : Optional[int] = TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name="embedder" , )
def __a ( self , lowerCAmelCase__ ) -> Dict:
a : Optional[int] = shape_list(lowercase__ )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
"Make sure that the channel dimension of the pixel values match with the one set in the configuration." )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
a : Union[str, Any] = tf.transpose(lowercase__ , perm=(0, 2, 3, 1) )
a : Optional[Any] = self.embedder(lowercase__ )
return hidden_state
class __UpperCamelCase ( tf.keras.layers.Layer ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ = 2 , **lowerCAmelCase__ ) -> int:
super().__init__(**lowercase__ )
a : Dict = tf.keras.layers.ConvaD(
filters=lowercase__ , kernel_size=1 , strides=lowercase__ , use_bias=lowercase__ , name="convolution" )
a : Any = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name="normalization" )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = False ) -> Union[str, Any]:
return self.normalization(self.convolution(lowercase__ ) , training=lowercase__ )
class __UpperCamelCase ( tf.keras.layers.Layer ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ) -> Tuple:
super().__init__(**lowercase__ )
a : int = tf.keras.layers.GlobalAveragePoolingaD(keepdims=lowercase__ , name="pooler" )
a : str = [
tf.keras.layers.ConvaD(filters=lowercase__ , kernel_size=1 , activation="relu" , name="attention.0" ),
tf.keras.layers.ConvaD(filters=lowercase__ , kernel_size=1 , activation="sigmoid" , name="attention.2" ),
]
def __a ( self , lowerCAmelCase__ ) -> int:
# [batch_size, h, w, num_channels] -> [batch_size, 1, 1, num_channels]
a : int = self.pooler(lowercase__ )
for layer_module in self.attention:
a : Union[str, Any] = layer_module(lowercase__ )
a : List[Any] = hidden_state * pooled
return hidden_state
class __UpperCamelCase ( tf.keras.layers.Layer ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = 1 , **lowerCAmelCase__ ) -> Tuple:
super().__init__(**lowercase__ )
a : str = in_channels != out_channels or stride != 1
a : Any = max(1 , out_channels // config.groups_width )
a : int = (
TFRegNetShortCut(lowercase__ , stride=lowercase__ , name="shortcut" )
if should_apply_shortcut
else tf.keras.layers.Activation("linear" , name="shortcut" )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
a : str = [
TFRegNetConvLayer(lowercase__ , kernel_size=1 , activation=config.hidden_act , name="layer.0" ),
TFRegNetConvLayer(
lowercase__ , stride=lowercase__ , groups=lowercase__ , activation=config.hidden_act , name="layer.1" ),
TFRegNetConvLayer(lowercase__ , kernel_size=1 , activation=lowercase__ , name="layer.2" ),
]
a : Any = ACTaFN[config.hidden_act]
def __a ( self , lowerCAmelCase__ ) -> Dict:
a : Tuple = hidden_state
for layer_module in self.layers:
a : Tuple = layer_module(lowercase__ )
a : List[Any] = self.shortcut(lowercase__ )
hidden_state += residual
a : List[Any] = self.activation(lowercase__ )
return hidden_state
class __UpperCamelCase ( tf.keras.layers.Layer ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = 1 , **lowerCAmelCase__ ) -> List[Any]:
super().__init__(**lowercase__ )
a : str = in_channels != out_channels or stride != 1
a : List[Any] = max(1 , out_channels // config.groups_width )
a : Optional[Any] = (
TFRegNetShortCut(lowercase__ , stride=lowercase__ , name="shortcut" )
if should_apply_shortcut
else tf.keras.layers.Activation("linear" , name="shortcut" )
)
a : Tuple = [
TFRegNetConvLayer(lowercase__ , kernel_size=1 , activation=config.hidden_act , name="layer.0" ),
TFRegNetConvLayer(
lowercase__ , stride=lowercase__ , groups=lowercase__ , activation=config.hidden_act , name="layer.1" ),
TFRegNetSELayer(lowercase__ , reduced_channels=int(round(in_channels / 4 ) ) , name="layer.2" ),
TFRegNetConvLayer(lowercase__ , kernel_size=1 , activation=lowercase__ , name="layer.3" ),
]
a : Optional[Any] = ACTaFN[config.hidden_act]
def __a ( self , lowerCAmelCase__ ) -> Optional[int]:
a : str = hidden_state
for layer_module in self.layers:
a : Optional[int] = layer_module(lowercase__ )
a : List[str] = self.shortcut(lowercase__ )
hidden_state += residual
a : List[Any] = self.activation(lowercase__ )
return hidden_state
class __UpperCamelCase ( tf.keras.layers.Layer ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = 2 , lowerCAmelCase__ = 2 , **lowerCAmelCase__ ) -> Dict:
super().__init__(**lowercase__ )
a : Any = TFRegNetXLayer if config.layer_type == """x""" else TFRegNetYLayer
a : List[str] = [
# downsampling is done in the first layer with stride of 2
layer(lowercase__ , lowercase__ , lowercase__ , stride=lowercase__ , name="layers.0" ),
*[layer(lowercase__ , lowercase__ , lowercase__ , name=f"""layers.{i+1}""" ) for i in range(depth - 1 )],
]
def __a ( self , lowerCAmelCase__ ) -> Optional[int]:
for layer_module in self.layers:
a : str = layer_module(lowercase__ )
return hidden_state
class __UpperCamelCase ( tf.keras.layers.Layer ):
def __init__( self , lowerCAmelCase__ , **lowerCAmelCase__ ) -> Tuple:
super().__init__(**lowercase__ )
a : List[str] = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
lowercase__ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name="stages.0" , ) )
a : Optional[int] = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(lowercase__ , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(lowercase__ , lowercase__ , lowercase__ , depth=lowercase__ , name=f"""stages.{i+1}""" ) )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = False , lowerCAmelCase__ = True ) -> Union[str, Any]:
a : List[Any] = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
a : Dict = hidden_states + (hidden_state,)
a : List[str] = stage_module(lowercase__ )
if output_hidden_states:
a : List[Any] = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=lowercase__ , hidden_states=lowercase__ )
@keras_serializable
class __UpperCamelCase ( tf.keras.layers.Layer ):
lowerCamelCase : Dict =RegNetConfig
def __init__( self , lowerCAmelCase__ , **lowerCAmelCase__ ) -> List[str]:
super().__init__(**lowercase__ )
a : Tuple = config
a : Optional[int] = TFRegNetEmbeddings(lowercase__ , name="embedder" )
a : Tuple = TFRegNetEncoder(lowercase__ , name="encoder" )
a : Any = tf.keras.layers.GlobalAveragePoolingaD(keepdims=lowercase__ , name="pooler" )
@unpack_inputs
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = False , ) -> Tuple:
a : str = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
a : List[Any] = return_dict if return_dict is not None else self.config.use_return_dict
a : Optional[int] = self.embedder(lowercase__ , training=lowercase__ )
a : List[Any] = self.encoder(
lowercase__ , output_hidden_states=lowercase__ , return_dict=lowercase__ , training=lowercase__ )
a : Optional[Any] = encoder_outputs[0]
a : Optional[Any] = self.pooler(lowercase__ )
# Change to NCHW output format have uniformity in the modules
a : Optional[int] = tf.transpose(lowercase__ , perm=(0, 3, 1, 2) )
a : List[Any] = tf.transpose(lowercase__ , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
a : Any = tuple([tf.transpose(lowercase__ , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowercase__ , pooler_output=lowercase__ , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class __UpperCamelCase ( _UpperCAmelCase ):
lowerCamelCase : Any =RegNetConfig
lowerCamelCase : List[Any] ="""regnet"""
lowerCamelCase : Tuple ="""pixel_values"""
@property
def __a ( self ) -> Tuple:
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 224, 224) , dtype=tf.floataa )}
a : str = R'''
Parameters:
This model is a Tensorflow
[tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a
regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and
behavior.
config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
'''
a : Optional[int] = R'''
Args:
pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConveNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
"""The bare RegNet model outputting raw features without any specific head on top.""" , _UpperCAmelCase , )
class __UpperCamelCase ( _UpperCAmelCase ):
def __init__( self , lowerCAmelCase__ , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Dict:
super().__init__(lowercase__ , *lowercase__ , **lowercase__ )
a : Tuple = TFRegNetMainLayer(lowercase__ , name="regnet" )
@unpack_inputs
@add_start_docstrings_to_model_forward(lowercase__ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowercase__ , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__=False , ) -> int:
a : List[str] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
a : int = return_dict if return_dict is not None else self.config.use_return_dict
a : Dict = self.regnet(
pixel_values=lowercase__ , output_hidden_states=lowercase__ , return_dict=lowercase__ , training=lowercase__ , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
"""
RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
""" , _UpperCAmelCase , )
class __UpperCamelCase ( _UpperCAmelCase , _UpperCAmelCase ):
def __init__( self , lowerCAmelCase__ , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> List[Any]:
super().__init__(lowercase__ , *lowercase__ , **lowercase__ )
a : Optional[int] = config.num_labels
a : List[str] = TFRegNetMainLayer(lowercase__ , name="regnet" )
# classification head
a : Dict = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name="classifier.1" ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(lowercase__ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowercase__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def __a ( self , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__=False , ) -> Any:
a : Optional[Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
a : Union[str, Any] = return_dict if return_dict is not None else self.config.use_return_dict
a : List[Any] = self.regnet(
lowercase__ , output_hidden_states=lowercase__ , return_dict=lowercase__ , training=lowercase__ )
a : List[Any] = outputs.pooler_output if return_dict else outputs[1]
a : Tuple = self.classifier[0](lowercase__ )
a : Optional[int] = self.classifier[1](lowercase__ )
a : Any = None if labels is None else self.hf_compute_loss(labels=lowercase__ , logits=lowercase__ )
if not return_dict:
a : Optional[Any] = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=lowercase__ , logits=lowercase__ , hidden_states=outputs.hidden_states )
| 703
|
"""simple docstring"""
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
a : Optional[int] = '''Run commands across TPU VMs for initial setup before running `accelerate launch`.'''
def _SCREAMING_SNAKE_CASE ( _lowercase : Any=None ) ->Optional[Any]:
'''simple docstring'''
if subparsers is not None:
a : int = subparsers.add_parser("tpu-config" , description=_description )
else:
a : List[Any] = argparse.ArgumentParser("Accelerate tpu-config command" , description=_description )
# Core arguments
a : Dict = parser.add_argument_group(
"Config Arguments" , "Arguments that can be configured through `accelerate config`." )
config_args.add_argument(
"--config_file" , type=_lowercase , default=_lowercase , help="Path to the config file to use for accelerate." , )
config_args.add_argument(
"--tpu_name" , default=_lowercase , help="The name of the TPU to use. If not specified, will use the TPU specified in the config file." , )
config_args.add_argument(
"--tpu_zone" , default=_lowercase , help="The zone of the TPU to use. If not specified, will use the zone specified in the config file." , )
a : Any = parser.add_argument_group("TPU Arguments" , "Arguments for options ran inside the TPU." )
pod_args.add_argument(
"--use_alpha" , action="store_true" , help="Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`." , )
pod_args.add_argument(
"--command_file" , default=_lowercase , help="The path to the file containing the commands to run on the pod on startup." , )
pod_args.add_argument(
"--command" , action="append" , nargs="+" , help="A command to run on the pod. Can be passed multiple times." , )
pod_args.add_argument(
"--install_accelerate" , action="store_true" , help="Whether to install accelerate on the pod. Defaults to False." , )
pod_args.add_argument(
"--accelerate_version" , default="latest" , help="The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify 'dev' to install from GitHub." , )
pod_args.add_argument(
"--debug" , action="store_true" , help="If set, will print the command that would be run instead of running it." )
if subparsers is not None:
parser.set_defaults(func=_lowercase )
return parser
def _SCREAMING_SNAKE_CASE ( _lowercase : Any ) ->Tuple:
'''simple docstring'''
a : Union[str, Any] = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(_lowercase ):
a : Optional[Any] = load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
a : int = defaults.command_file
if not args.command and defaults.commands is not None:
a : Union[str, Any] = defaults.commands
if not args.tpu_name:
a : int = defaults.tpu_name
if not args.tpu_zone:
a : Union[str, Any] = defaults.tpu_zone
if args.accelerate_version == "dev":
a : int = "git+https://github.com/huggingface/accelerate.git"
elif args.accelerate_version == "latest":
a : Optional[Any] = "accelerate -U"
elif isinstance(parse(args.accelerate_version ) , _lowercase ):
a : Optional[Any] = F"""accelerate=={args.accelerate_version}"""
if not args.command_file and not args.command:
raise ValueError("You must specify either a command file or a command to run on the pod." )
if args.command_file:
with open(args.command_file , "r" ) as f:
a : int = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , _lowercase ):
a : Union[str, Any] = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
a : Tuple = ["cd /usr/share"]
if args.install_accelerate:
new_cmd += [F"""pip install {args.accelerate_version}"""]
new_cmd += args.command
a : List[Any] = "; ".join(_lowercase )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
a : str = ["gcloud"]
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(F"""Running {' '.join(_lowercase )}""" )
return
subprocess.run(_lowercase )
print("Successfully setup pod." )
def _SCREAMING_SNAKE_CASE ( ) ->Tuple:
'''simple docstring'''
a : List[Any] = tpu_command_parser()
a : Optional[int] = parser.parse_args()
tpu_command_launcher(_lowercase )
| 31
| 0
|
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class __UpperCamelCase ( unittest.TestCase ):
def __a ( self ) -> int:
a : int = tempfile.mkdtemp()
# fmt: off
a : Optional[Any] = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''']
# fmt: on
a : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
a : List[str] = {
'''do_resize''': True,
'''size''': {'''height''': 18, '''width''': 18},
'''do_normalize''': True,
'''image_mean''': [0.5, 0.5, 0.5],
'''image_std''': [0.5, 0.5, 0.5],
}
a : Dict = os.path.join(self.tmpdirname , __snake_case )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(__snake_case , __snake_case )
def __a ( self , **lowerCAmelCase__ ) -> int:
return BertTokenizer.from_pretrained(self.tmpdirname , **__snake_case )
def __a ( self , **lowerCAmelCase__ ) -> Optional[Any]:
return ViTImageProcessor.from_pretrained(self.tmpdirname , **__snake_case )
def __a ( self ) -> Optional[int]:
shutil.rmtree(self.tmpdirname )
def __a ( self ) -> Optional[int]:
a : Optional[int] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
a : Optional[int] = [Image.fromarray(np.moveaxis(__snake_case , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __a ( self ) -> int:
a : Union[str, Any] = self.get_tokenizer()
a : Any = self.get_image_processor()
a : Any = VisionTextDualEncoderProcessor(tokenizer=__snake_case , image_processor=__snake_case )
processor.save_pretrained(self.tmpdirname )
a : List[Any] = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , __snake_case )
def __a ( self ) -> Optional[Any]:
a : Any = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
a : Optional[Any] = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
a : Tuple = self.get_image_processor(do_normalize=__snake_case , padding_value=1.0 )
a : List[str] = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=__snake_case , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __snake_case )
def __a ( self ) -> List[Any]:
a : Dict = self.get_image_processor()
a : int = self.get_tokenizer()
a : Union[str, Any] = VisionTextDualEncoderProcessor(tokenizer=__snake_case , image_processor=__snake_case )
a : Dict = self.prepare_image_inputs()
a : Union[str, Any] = image_processor(__snake_case , return_tensors="np" )
a : Union[str, Any] = processor(images=__snake_case , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __a ( self ) -> Dict:
a : Tuple = self.get_image_processor()
a : Tuple = self.get_tokenizer()
a : Union[str, Any] = VisionTextDualEncoderProcessor(tokenizer=__snake_case , image_processor=__snake_case )
a : List[str] = '''lower newer'''
a : str = processor(text=__snake_case )
a : Dict = tokenizer(__snake_case )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __a ( self ) -> List[Any]:
a : Tuple = self.get_image_processor()
a : Tuple = self.get_tokenizer()
a : Dict = VisionTextDualEncoderProcessor(tokenizer=__snake_case , image_processor=__snake_case )
a : Optional[Any] = '''lower newer'''
a : Tuple = self.prepare_image_inputs()
a : List[str] = processor(text=__snake_case , images=__snake_case )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "token_type_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with self.assertRaises(__snake_case ):
processor()
def __a ( self ) -> List[str]:
a : int = self.get_image_processor()
a : List[str] = self.get_tokenizer()
a : Optional[int] = VisionTextDualEncoderProcessor(tokenizer=__snake_case , image_processor=__snake_case )
a : str = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
a : Dict = processor.batch_decode(__snake_case )
a : Optional[int] = tokenizer.batch_decode(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
def __a ( self ) -> int:
a : Tuple = self.get_image_processor()
a : List[Any] = self.get_tokenizer()
a : Any = VisionTextDualEncoderProcessor(tokenizer=__snake_case , image_processor=__snake_case )
a : Optional[Any] = '''lower newer'''
a : Union[str, Any] = self.prepare_image_inputs()
a : Tuple = processor(text=__snake_case , images=__snake_case )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 704
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
a : Tuple = None
a : int = logging.get_logger(__name__)
a : int = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
a : Optional[int] = {
'''vocab_file''': {
'''facebook/nllb-200-distilled-600M''': (
'''https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model'''
),
},
'''tokenizer_file''': {
'''facebook/nllb-200-distilled-600M''': (
'''https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json'''
),
},
}
a : int = {
'''facebook/nllb-large-en-ro''': 1024,
'''facebook/nllb-200-distilled-600M''': 1024,
}
# fmt: off
a : List[Any] = ['''ace_Arab''', '''ace_Latn''', '''acm_Arab''', '''acq_Arab''', '''aeb_Arab''', '''afr_Latn''', '''ajp_Arab''', '''aka_Latn''', '''amh_Ethi''', '''apc_Arab''', '''arb_Arab''', '''ars_Arab''', '''ary_Arab''', '''arz_Arab''', '''asm_Beng''', '''ast_Latn''', '''awa_Deva''', '''ayr_Latn''', '''azb_Arab''', '''azj_Latn''', '''bak_Cyrl''', '''bam_Latn''', '''ban_Latn''', '''bel_Cyrl''', '''bem_Latn''', '''ben_Beng''', '''bho_Deva''', '''bjn_Arab''', '''bjn_Latn''', '''bod_Tibt''', '''bos_Latn''', '''bug_Latn''', '''bul_Cyrl''', '''cat_Latn''', '''ceb_Latn''', '''ces_Latn''', '''cjk_Latn''', '''ckb_Arab''', '''crh_Latn''', '''cym_Latn''', '''dan_Latn''', '''deu_Latn''', '''dik_Latn''', '''dyu_Latn''', '''dzo_Tibt''', '''ell_Grek''', '''eng_Latn''', '''epo_Latn''', '''est_Latn''', '''eus_Latn''', '''ewe_Latn''', '''fao_Latn''', '''pes_Arab''', '''fij_Latn''', '''fin_Latn''', '''fon_Latn''', '''fra_Latn''', '''fur_Latn''', '''fuv_Latn''', '''gla_Latn''', '''gle_Latn''', '''glg_Latn''', '''grn_Latn''', '''guj_Gujr''', '''hat_Latn''', '''hau_Latn''', '''heb_Hebr''', '''hin_Deva''', '''hne_Deva''', '''hrv_Latn''', '''hun_Latn''', '''hye_Armn''', '''ibo_Latn''', '''ilo_Latn''', '''ind_Latn''', '''isl_Latn''', '''ita_Latn''', '''jav_Latn''', '''jpn_Jpan''', '''kab_Latn''', '''kac_Latn''', '''kam_Latn''', '''kan_Knda''', '''kas_Arab''', '''kas_Deva''', '''kat_Geor''', '''knc_Arab''', '''knc_Latn''', '''kaz_Cyrl''', '''kbp_Latn''', '''kea_Latn''', '''khm_Khmr''', '''kik_Latn''', '''kin_Latn''', '''kir_Cyrl''', '''kmb_Latn''', '''kon_Latn''', '''kor_Hang''', '''kmr_Latn''', '''lao_Laoo''', '''lvs_Latn''', '''lij_Latn''', '''lim_Latn''', '''lin_Latn''', '''lit_Latn''', '''lmo_Latn''', '''ltg_Latn''', '''ltz_Latn''', '''lua_Latn''', '''lug_Latn''', '''luo_Latn''', '''lus_Latn''', '''mag_Deva''', '''mai_Deva''', '''mal_Mlym''', '''mar_Deva''', '''min_Latn''', '''mkd_Cyrl''', '''plt_Latn''', '''mlt_Latn''', '''mni_Beng''', '''khk_Cyrl''', '''mos_Latn''', '''mri_Latn''', '''zsm_Latn''', '''mya_Mymr''', '''nld_Latn''', '''nno_Latn''', '''nob_Latn''', '''npi_Deva''', '''nso_Latn''', '''nus_Latn''', '''nya_Latn''', '''oci_Latn''', '''gaz_Latn''', '''ory_Orya''', '''pag_Latn''', '''pan_Guru''', '''pap_Latn''', '''pol_Latn''', '''por_Latn''', '''prs_Arab''', '''pbt_Arab''', '''quy_Latn''', '''ron_Latn''', '''run_Latn''', '''rus_Cyrl''', '''sag_Latn''', '''san_Deva''', '''sat_Beng''', '''scn_Latn''', '''shn_Mymr''', '''sin_Sinh''', '''slk_Latn''', '''slv_Latn''', '''smo_Latn''', '''sna_Latn''', '''snd_Arab''', '''som_Latn''', '''sot_Latn''', '''spa_Latn''', '''als_Latn''', '''srd_Latn''', '''srp_Cyrl''', '''ssw_Latn''', '''sun_Latn''', '''swe_Latn''', '''swh_Latn''', '''szl_Latn''', '''tam_Taml''', '''tat_Cyrl''', '''tel_Telu''', '''tgk_Cyrl''', '''tgl_Latn''', '''tha_Thai''', '''tir_Ethi''', '''taq_Latn''', '''taq_Tfng''', '''tpi_Latn''', '''tsn_Latn''', '''tso_Latn''', '''tuk_Latn''', '''tum_Latn''', '''tur_Latn''', '''twi_Latn''', '''tzm_Tfng''', '''uig_Arab''', '''ukr_Cyrl''', '''umb_Latn''', '''urd_Arab''', '''uzn_Latn''', '''vec_Latn''', '''vie_Latn''', '''war_Latn''', '''wol_Latn''', '''xho_Latn''', '''ydd_Hebr''', '''yor_Latn''', '''yue_Hant''', '''zho_Hans''', '''zho_Hant''', '''zul_Latn''']
class __UpperCamelCase ( a__ ):
lowerCamelCase : Optional[Any] =VOCAB_FILES_NAMES
lowerCamelCase : str =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : Dict =PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : List[Any] =["""input_ids""", """attention_mask"""]
lowerCamelCase : Union[str, Any] =NllbTokenizer
lowerCamelCase : List[int] =[]
lowerCamelCase : List[int] =[]
def __init__( self , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__="<s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__="<s>" , lowerCAmelCase__="<unk>" , lowerCAmelCase__="<pad>" , lowerCAmelCase__="<mask>" , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=False , **lowerCAmelCase__ , ) -> Optional[Any]:
# Mask token behave like a normal word, i.e. include the space before it
a : Dict = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else mask_token
a : Optional[Any] = legacy_behaviour
super().__init__(
vocab_file=lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , src_lang=lowerCAmelCase__ , tgt_lang=lowerCAmelCase__ , additional_special_tokens=lowerCAmelCase__ , legacy_behaviour=lowerCAmelCase__ , **lowerCAmelCase__ , )
a : int = vocab_file
a : Any = False if not self.vocab_file else True
a : List[str] = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({"additional_special_tokens": _additional_special_tokens} )
a : str = {
lang_code: self.convert_tokens_to_ids(lowerCAmelCase__ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
a : List[Any] = src_lang if src_lang is not None else "eng_Latn"
a : str = self.convert_tokens_to_ids(self._src_lang )
a : Any = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def __a ( self ) -> str:
return self._src_lang
@src_lang.setter
def __a ( self , lowerCAmelCase__ ) -> None:
a : List[str] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]:
a : str = [self.sep_token_id]
a : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ) -> Any:
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" )
a : Dict = src_lang
a : int = self(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ )
a : Dict = self.convert_tokens_to_ids(lowerCAmelCase__ )
a : Any = tgt_lang_id
return inputs
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = "eng_Latn" , lowerCAmelCase__ = None , lowerCAmelCase__ = "fra_Latn" , **lowerCAmelCase__ , ) -> BatchEncoding:
a : Optional[int] = src_lang
a : int = tgt_lang
return super().prepare_seqaseq_batch(lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ )
def __a ( self ) -> Tuple:
return self.set_src_lang_special_tokens(self.src_lang )
def __a ( self ) -> str:
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def __a ( self , lowerCAmelCase__ ) -> None:
a : int = self.convert_tokens_to_ids(lowerCAmelCase__ )
if self.legacy_behaviour:
a : Tuple = []
a : List[str] = [self.eos_token_id, self.cur_lang_code]
else:
a : int = [self.cur_lang_code]
a : int = [self.eos_token_id]
a : Union[str, Any] = self.convert_ids_to_tokens(self.prefix_tokens )
a : Any = self.convert_ids_to_tokens(self.suffix_tokens )
a : Any = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def __a ( self , lowerCAmelCase__ ) -> None:
a : str = self.convert_tokens_to_ids(lowerCAmelCase__ )
if self.legacy_behaviour:
a : Optional[Any] = []
a : int = [self.eos_token_id, self.cur_lang_code]
else:
a : List[Any] = [self.cur_lang_code]
a : List[Any] = [self.eos_token_id]
a : int = self.convert_ids_to_tokens(self.prefix_tokens )
a : int = self.convert_ids_to_tokens(self.suffix_tokens )
a : Any = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(lowerCAmelCase__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory.""" )
return
a : Any = os.path.join(
lowerCAmelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase__ ):
copyfile(self.vocab_file , lowerCAmelCase__ )
return (out_vocab_file,)
| 31
| 0
|
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( _lowercase : str ) ->str:
'''simple docstring'''
a : str = 0
# if input_string is "aba" than new_input_string become "a|b|a"
a : List[str] = ""
a : Tuple = ""
# append each character + "|" in new_string for range(0, length-1)
for i in input_string[: len(_lowercase ) - 1]:
new_input_string += i + "|"
# append last character
new_input_string += input_string[-1]
# we will store the starting and ending of previous furthest ending palindromic
# substring
a, a : Dict = 0, 0
# length[i] shows the length of palindromic substring with center i
a : Optional[Any] = [1 for i in range(len(_lowercase ) )]
# for each character in new_string find corresponding palindromic string
a : Dict = 0
for j in range(len(_lowercase ) ):
a : List[Any] = 1 if j > r else min(length[l + r - j] // 2 , r - j + 1 )
while (
j - k >= 0
and j + k < len(_lowercase )
and new_input_string[k + j] == new_input_string[j - k]
):
k += 1
a : int = 2 * k - 1
# does this string is ending after the previously explored end (that is r) ?
# if yes the update the new r to the last index of this
if j + k - 1 > r:
a : str = j - k + 1 # noqa: E741
a : Optional[Any] = j + k - 1
# update max_length and start position
if max_length < length[j]:
a : int = length[j]
a : Optional[Any] = j
# create that string
a : Dict = new_input_string[start - max_length // 2 : start + max_length // 2 + 1]
for i in s:
if i != "|":
output_string += i
return output_string
if __name__ == "__main__":
import doctest
doctest.testmod()
| 705
|
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
class __UpperCamelCase ( a__ ):
lowerCamelCase : torch.FloatTensor
lowerCamelCase : torch.FloatTensor
lowerCamelCase : Optional[torch.FloatTensor] =None
class __UpperCamelCase ( a__ , a__ ):
lowerCamelCase : Tuple =2
@register_to_config
def __init__( self , lowerCAmelCase__ = 0.02 , lowerCAmelCase__ = 100 , lowerCAmelCase__ = 1.007 , lowerCAmelCase__ = 80 , lowerCAmelCase__ = 0.05 , lowerCAmelCase__ = 50 , ) -> Union[str, Any]:
# standard deviation of the initial noise distribution
a : Tuple = sigma_max
# setable values
a : int = None
a : np.IntTensor = None
a : torch.FloatTensor = None # sigma(t_i)
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> torch.FloatTensor:
return sample
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[str]:
a : List[Any] = num_inference_steps
a : List[str] = np.arange(0 , self.num_inference_steps )[::-1].copy()
a : int = torch.from_numpy(lowerCAmelCase__ ).to(lowerCAmelCase__ )
a : List[str] = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in self.timesteps
]
a : Any = torch.tensor(lowerCAmelCase__ , dtype=torch.floataa , device=lowerCAmelCase__ )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Tuple[torch.FloatTensor, float]:
if self.config.s_min <= sigma <= self.config.s_max:
a : str = min(self.config.s_churn / self.num_inference_steps , 2**0.5 - 1 )
else:
a : Dict = 0
# sample eps ~ N(0, S_noise^2 * I)
a : Union[str, Any] = self.config.s_noise * randn_tensor(sample.shape , generator=lowerCAmelCase__ ).to(sample.device )
a : Any = sigma + gamma * sigma
a : Tuple = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = True , ) -> Union[KarrasVeOutput, Tuple]:
a : Union[str, Any] = sample_hat + sigma_hat * model_output
a : Tuple = (sample_hat - pred_original_sample) / sigma_hat
a : List[Any] = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=lowerCAmelCase__ , derivative=lowerCAmelCase__ , pred_original_sample=lowerCAmelCase__ )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = True , ) -> Union[KarrasVeOutput, Tuple]:
a : Optional[int] = sample_prev + sigma_prev * model_output
a : str = (sample_prev - pred_original_sample) / sigma_prev
a : Dict = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=lowerCAmelCase__ , derivative=lowerCAmelCase__ , pred_original_sample=lowerCAmelCase__ )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> str:
raise NotImplementedError()
| 31
| 0
|
"""simple docstring"""
import json
import os
from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES, TORCH_DYNAMO_MODES
from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType
from ...utils.imports import is_botoa_available
from .config_args import SageMakerConfig
from .config_utils import (
DYNAMO_BACKENDS,
_ask_field,
_ask_options,
_convert_dynamo_backend,
_convert_mixed_precision,
_convert_sagemaker_distributed_mode,
_convert_yes_no_to_bool,
)
if is_botoa_available():
import botoa # noqa: F401
def _SCREAMING_SNAKE_CASE ( _lowercase : int ) ->int:
'''simple docstring'''
a : List[str] = botoa.client("iam" )
a : Tuple = {
"Version": "2012-10-17",
"Statement": [
{"Effect": "Allow", "Principal": {"Service": "sagemaker.amazonaws.com"}, "Action": "sts:AssumeRole"}
],
}
try:
# create the role, associated with the chosen trust policy
iam_client.create_role(
RoleName=lowercase_ , AssumeRolePolicyDocument=json.dumps(lowercase_ , indent=2 ) )
a : List[str] = {
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"sagemaker:*",
"ecr:GetDownloadUrlForLayer",
"ecr:BatchGetImage",
"ecr:BatchCheckLayerAvailability",
"ecr:GetAuthorizationToken",
"cloudwatch:PutMetricData",
"cloudwatch:GetMetricData",
"cloudwatch:GetMetricStatistics",
"cloudwatch:ListMetrics",
"logs:CreateLogGroup",
"logs:CreateLogStream",
"logs:DescribeLogStreams",
"logs:PutLogEvents",
"logs:GetLogEvents",
"s3:CreateBucket",
"s3:ListBucket",
"s3:GetBucketLocation",
"s3:GetObject",
"s3:PutObject",
],
"Resource": "*",
}
],
}
# attach policy to role
iam_client.put_role_policy(
RoleName=lowercase_ , PolicyName=F"""{role_name}_policy_permission""" , PolicyDocument=json.dumps(lowercase_ , indent=2 ) , )
except iam_client.exceptions.EntityAlreadyExistsException:
print(F"""role {role_name} already exists. Using existing one""" )
def _SCREAMING_SNAKE_CASE ( _lowercase : int ) ->List[str]:
'''simple docstring'''
a : Dict = botoa.client("iam" )
return iam_client.get_role(RoleName=lowercase_ )["Role"]["Arn"]
def _SCREAMING_SNAKE_CASE ( ) ->int:
'''simple docstring'''
a : str = _ask_options(
"How do you want to authorize?" , ["AWS Profile", "Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) "] , lowercase_ , )
a : List[Any] = None
if credentials_configuration == 0:
a : int = _ask_field("Enter your AWS Profile name: [default] " , default="default" )
a : Optional[int] = aws_profile
else:
print(
"Note you will need to provide AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY when you launch you training script with,"
"`accelerate launch --aws_access_key_id XXX --aws_secret_access_key YYY`" )
a : Union[str, Any] = _ask_field("AWS Access Key ID: " )
a : Any = aws_access_key_id
a : Any = _ask_field("AWS Secret Access Key: " )
a : Tuple = aws_secret_access_key
a : Union[str, Any] = _ask_field("Enter your AWS Region: [us-east-1]" , default="us-east-1" )
a : str = aws_region
a : Optional[Any] = _ask_options(
"Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?" , ["Provide IAM Role name", "Create new IAM role using credentials"] , lowercase_ , )
if role_management == 0:
a : List[Any] = _ask_field("Enter your IAM role name: " )
else:
a : str = "accelerate_sagemaker_execution_role"
print(F"""Accelerate will create an iam role \"{iam_role_name}\" using the provided credentials""" )
_create_iam_role_for_sagemaker(lowercase_ )
a : Union[str, Any] = _ask_field(
"Do you want to use custom Docker image? [yes/NO]: " , _convert_yes_no_to_bool , default=lowercase_ , error_message="Please enter yes or no." , )
a : Optional[int] = None
if is_custom_docker_image:
a : Optional[int] = _ask_field("Enter your Docker image: " , lambda _lowercase : str(lowercase_ ).lower() )
a : List[str] = _ask_field(
"Do you want to provide SageMaker input channels with data locations? [yes/NO]: " , _convert_yes_no_to_bool , default=lowercase_ , error_message="Please enter yes or no." , )
a : Optional[int] = None
if is_sagemaker_inputs_enabled:
a : List[str] = _ask_field(
"Enter the path to the SageMaker inputs TSV file with columns (channel_name, data_location): " , lambda _lowercase : str(lowercase_ ).lower() , )
a : List[Any] = _ask_field(
"Do you want to enable SageMaker metrics? [yes/NO]: " , _convert_yes_no_to_bool , default=lowercase_ , error_message="Please enter yes or no." , )
a : Union[str, Any] = None
if is_sagemaker_metrics_enabled:
a : Tuple = _ask_field(
"Enter the path to the SageMaker metrics TSV file with columns (metric_name, metric_regex): " , lambda _lowercase : str(lowercase_ ).lower() , )
a : str = _ask_options(
"What is the distributed mode?" , ["No distributed training", "Data parallelism"] , _convert_sagemaker_distributed_mode , )
a : str = {}
a : List[str] = _ask_field(
"Do you wish to optimize your script with torch dynamo?[yes/NO]:" , _convert_yes_no_to_bool , default=lowercase_ , error_message="Please enter yes or no." , )
if use_dynamo:
a : Optional[Any] = "dynamo_"
a : Tuple = _ask_options(
"Which dynamo backend would you like to use?" , [x.lower() for x in DYNAMO_BACKENDS] , _convert_dynamo_backend , default=2 , )
a : int = _ask_field(
"Do you want to customize the defaults sent to torch.compile? [yes/NO]: " , _convert_yes_no_to_bool , default=lowercase_ , error_message="Please enter yes or no." , )
if use_custom_options:
a : int = _ask_options(
"Which mode do you want to use?" , lowercase_ , lambda _lowercase : TORCH_DYNAMO_MODES[int(lowercase_ )] , default="default" , )
a : Optional[Any] = _ask_field(
"Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: " , _convert_yes_no_to_bool , default=lowercase_ , error_message="Please enter yes or no." , )
a : Union[str, Any] = _ask_field(
"Do you want to enable dynamic shape tracing? [yes/NO]: " , _convert_yes_no_to_bool , default=lowercase_ , error_message="Please enter yes or no." , )
a : Any = "Which EC2 instance type you want to use for your training?"
if distributed_type != SageMakerDistributedType.NO:
a : Union[str, Any] = _ask_options(
lowercase_ , lowercase_ , lambda _lowercase : SAGEMAKER_PARALLEL_EC2_INSTANCES[int(lowercase_ )] )
else:
eca_instance_query += "? [ml.p3.2xlarge]:"
a : Any = _ask_field(lowercase_ , lambda _lowercase : str(lowercase_ ).lower() , default="ml.p3.2xlarge" )
a : Optional[int] = 1
if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL):
a : Any = _ask_field(
"How many machines do you want use? [1]: " , lowercase_ , default=1 , )
a : List[Any] = _ask_options(
"Do you wish to use FP16 or BF16 (mixed precision)?" , ["no", "fp16", "bf16", "fp8"] , _convert_mixed_precision , )
if use_dynamo and mixed_precision == "no":
print(
"Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts." )
return SageMakerConfig(
image_uri=lowercase_ , compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER , distributed_type=lowercase_ , use_cpu=lowercase_ , dynamo_config=lowercase_ , eca_instance_type=lowercase_ , profile=lowercase_ , region=lowercase_ , iam_role_name=lowercase_ , mixed_precision=lowercase_ , num_machines=lowercase_ , sagemaker_inputs_file=lowercase_ , sagemaker_metrics_file=lowercase_ , )
| 706
|
"""simple docstring"""
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
a : Optional[Any] = datasets.utils.logging.get_logger(__name__)
a : Union[str, Any] = ['''names''', '''prefix''']
a : Any = ['''warn_bad_lines''', '''error_bad_lines''', '''mangle_dupe_cols''']
a : Any = ['''encoding_errors''', '''on_bad_lines''']
a : List[str] = ['''date_format''']
@dataclass
class __UpperCamelCase ( datasets.BuilderConfig ):
lowerCamelCase : str =","
lowerCamelCase : Optional[str] =None
lowerCamelCase : Optional[Union[int, List[int], str]] ="infer"
lowerCamelCase : Optional[List[str]] =None
lowerCamelCase : Optional[List[str]] =None
lowerCamelCase : Optional[Union[int, str, List[int], List[str]]] =None
lowerCamelCase : Optional[Union[List[int], List[str]]] =None
lowerCamelCase : Optional[str] =None
lowerCamelCase : bool =True
lowerCamelCase : Optional[Literal["c", "python", "pyarrow"]] =None
lowerCamelCase : Dict[Union[int, str], Callable[[Any], Any]] =None
lowerCamelCase : Optional[list] =None
lowerCamelCase : Optional[list] =None
lowerCamelCase : bool =False
lowerCamelCase : Optional[Union[int, List[int]]] =None
lowerCamelCase : Optional[int] =None
lowerCamelCase : Optional[Union[str, List[str]]] =None
lowerCamelCase : bool =True
lowerCamelCase : bool =True
lowerCamelCase : bool =False
lowerCamelCase : bool =True
lowerCamelCase : Optional[str] =None
lowerCamelCase : str ="."
lowerCamelCase : Optional[str] =None
lowerCamelCase : str ='"'
lowerCamelCase : int =0
lowerCamelCase : Optional[str] =None
lowerCamelCase : Optional[str] =None
lowerCamelCase : Optional[str] =None
lowerCamelCase : Optional[str] =None
lowerCamelCase : bool =True
lowerCamelCase : bool =True
lowerCamelCase : int =0
lowerCamelCase : bool =True
lowerCamelCase : bool =False
lowerCamelCase : Optional[str] =None
lowerCamelCase : int =1_0000
lowerCamelCase : Optional[datasets.Features] =None
lowerCamelCase : Optional[str] ="strict"
lowerCamelCase : Literal["error", "warn", "skip"] ="error"
lowerCamelCase : Optional[str] =None
def __a ( self ) -> Dict:
if self.delimiter is not None:
a : int = self.delimiter
if self.column_names is not None:
a : Any = self.column_names
@property
def __a ( self ) -> List[str]:
a : Dict = {
"sep": self.sep,
"header": self.header,
"names": self.names,
"index_col": self.index_col,
"usecols": self.usecols,
"prefix": self.prefix,
"mangle_dupe_cols": self.mangle_dupe_cols,
"engine": self.engine,
"converters": self.converters,
"true_values": self.true_values,
"false_values": self.false_values,
"skipinitialspace": self.skipinitialspace,
"skiprows": self.skiprows,
"nrows": self.nrows,
"na_values": self.na_values,
"keep_default_na": self.keep_default_na,
"na_filter": self.na_filter,
"verbose": self.verbose,
"skip_blank_lines": self.skip_blank_lines,
"thousands": self.thousands,
"decimal": self.decimal,
"lineterminator": self.lineterminator,
"quotechar": self.quotechar,
"quoting": self.quoting,
"escapechar": self.escapechar,
"comment": self.comment,
"encoding": self.encoding,
"dialect": self.dialect,
"error_bad_lines": self.error_bad_lines,
"warn_bad_lines": self.warn_bad_lines,
"skipfooter": self.skipfooter,
"doublequote": self.doublequote,
"memory_map": self.memory_map,
"float_precision": self.float_precision,
"chunksize": self.chunksize,
"encoding_errors": self.encoding_errors,
"on_bad_lines": self.on_bad_lines,
"date_format": self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , lowerCAmelCase__ ):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class __UpperCamelCase ( datasets.ArrowBasedBuilder ):
lowerCamelCase : Union[str, Any] =CsvConfig
def __a ( self ) -> Optional[Any]:
return datasets.DatasetInfo(features=self.config.features )
def __a ( self , lowerCAmelCase__ ) -> Optional[int]:
if not self.config.data_files:
raise ValueError(f"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
a : Optional[Any] = dl_manager.download_and_extract(self.config.data_files )
if isinstance(lowerCAmelCase__ , (str, list, tuple) ):
a : Tuple = data_files
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
a : Tuple = [files]
a : int = [dl_manager.iter_files(lowerCAmelCase__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )]
a : int = []
for split_name, files in data_files.items():
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
a : Any = [files]
a : List[str] = [dl_manager.iter_files(lowerCAmelCase__ ) for file in files]
splits.append(datasets.SplitGenerator(name=lowerCAmelCase__ , gen_kwargs={"files": files} ) )
return splits
def __a ( self , lowerCAmelCase__ ) -> pa.Table:
if self.config.features is not None:
a : Optional[Any] = self.config.features.arrow_schema
if all(not require_storage_cast(lowerCAmelCase__ ) for feature in self.config.features.values() ):
# cheaper cast
a : Dict = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=lowerCAmelCase__ )
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
a : Union[str, Any] = table_cast(lowerCAmelCase__ , lowerCAmelCase__ )
return pa_table
def __a ( self , lowerCAmelCase__ ) -> Any:
a : Tuple = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
a : Any = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(lowerCAmelCase__ ) else object
for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() )
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(lowerCAmelCase__ ) ):
a : Tuple = pd.read_csv(lowerCAmelCase__ , iterator=lowerCAmelCase__ , dtype=lowerCAmelCase__ , **self.config.pd_read_csv_kwargs )
try:
for batch_idx, df in enumerate(lowerCAmelCase__ ):
a : Any = pa.Table.from_pandas(lowerCAmelCase__ )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(lowerCAmelCase__ )
except ValueError as e:
logger.error(f"""Failed to read file '{file}' with error {type(lowerCAmelCase__ )}: {e}""" )
raise
| 31
| 0
|
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
a : Tuple = '''<<<<<<< This should probably be modified because it mentions: '''
a : List[Any] = '''=======
>>>>>>>
'''
a : List[str] = [
'''TextEncoderConfig''',
'''ByteTextEncoder''',
'''SubwordTextEncoder''',
'''encoder_config''',
'''maybe_build_from_corpus''',
'''manual_dir''',
]
a : int = [
# (pattern, replacement)
# Order is important here for some replacements
(R'''tfds\.core''', R'''datasets'''),
(R'''tf\.io\.gfile\.GFile''', R'''open'''),
(R'''tf\.([\w\d]+)''', R'''datasets.Value(\'\1\')'''),
(R'''tfds\.features\.Text\(\)''', R'''datasets.Value(\'string\')'''),
(R'''tfds\.features\.Text\(''', R'''datasets.Value(\'string\'),'''),
(R'''features\s*=\s*tfds.features.FeaturesDict\(''', R'''features=datasets.Features('''),
(R'''tfds\.features\.FeaturesDict\(''', R'''dict('''),
(R'''The TensorFlow Datasets Authors''', R'''The TensorFlow Datasets Authors and the HuggingFace Datasets Authors'''),
(R'''tfds\.''', R'''datasets.'''),
(R'''dl_manager\.manual_dir''', R'''self.config.data_dir'''),
(R'''self\.builder_config''', R'''self.config'''),
]
def _SCREAMING_SNAKE_CASE ( _lowercase : List[Any] ) ->Optional[Any]:
'''simple docstring'''
return ConvertCommand(args.tfds_path , args.datasets_directory )
class __UpperCamelCase ( UpperCAmelCase_ ):
@staticmethod
def __a ( lowerCAmelCase__ ) -> Optional[int]:
a : Any = parser.add_parser(
"convert" , help="Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset." , )
train_parser.add_argument(
"--tfds_path" , type=_snake_case , required=_snake_case , help="Path to a TensorFlow Datasets folder to convert or a single tfds file to convert." , )
train_parser.add_argument(
"--datasets_directory" , type=_snake_case , required=_snake_case , help="Path to the HuggingFace Datasets folder." )
train_parser.set_defaults(func=_snake_case )
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , *lowerCAmelCase__ ) -> int:
a : str = get_logger("datasets-cli/converting" )
a : int = tfds_path
a : Any = datasets_directory
def __a ( self ) -> int:
if os.path.isdir(self._tfds_path ):
a : Dict = os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
a : Optional[int] = os.path.dirname(self._tfds_path )
else:
raise ValueError("--tfds_path is neither a directory nor a file. Please check path." )
a : Any = os.path.abspath(self._datasets_directory )
self._logger.info(f"""Converting datasets from {abs_tfds_path} to {abs_datasets_path}""" )
a : Optional[int] = []
a : Dict = []
a : List[str] = {}
if os.path.isdir(self._tfds_path ):
a : Any = os.listdir(_snake_case )
else:
a : List[Any] = [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(f"""Looking at file {f_name}""" )
a : int = os.path.join(_snake_case , _snake_case )
a : Union[str, Any] = os.path.join(_snake_case , _snake_case )
if not os.path.isfile(_snake_case ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info("Skipping file" )
continue
with open(_snake_case , encoding="utf-8" ) as f:
a : List[Any] = f.readlines()
a : List[str] = []
a : Tuple = False
a : List[str] = False
a : int = []
for line in lines:
a : Optional[Any] = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
a : Optional[int] = "import datasets\n"
elif "import tensorflow" in out_line:
# order is important here
a : str = ""
continue
elif "from absl import logging" in out_line:
a : Optional[int] = "from datasets import logging\n"
elif "getLogger" in out_line:
a : Tuple = out_line.replace("getLogger" , "get_logger" )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
a : Optional[Any] = True
a : Tuple = list(filter(lambda lowerCAmelCase__ : e in out_line , _snake_case ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(_snake_case ) + "\n" )
out_lines.append(_snake_case )
out_lines.append(_snake_case )
continue
else:
for pattern, replacement in TO_CONVERT:
a : List[str] = re.sub(_snake_case , _snake_case , _snake_case )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
a : Dict = re.match(R"from\stensorflow_datasets.*import\s([^\.\r\n]+)" , _snake_case )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split("," ) )
a : str = "from . import " + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(f"""Error converting {out_line.strip()}""" )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
a : Optional[int] = True
out_lines.append(_snake_case )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
a : Optional[int] = f_name.replace(".py" , "" )
a : Any = os.path.join(_snake_case , _snake_case )
a : Any = os.path.join(_snake_case , _snake_case )
os.makedirs(_snake_case , exist_ok=_snake_case )
self._logger.info(f"""Adding directory {output_dir}""" )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(_snake_case )
if needs_manual_update:
with_manual_update.append(_snake_case )
with open(_snake_case , "w" , encoding="utf-8" ) as f:
f.writelines(_snake_case )
self._logger.info(f"""Converted in {output_file}""" )
for utils_file in utils_files:
try:
a : Union[str, Any] = os.path.basename(_snake_case )
a : Optional[int] = imports_to_builder_map[f_name.replace(".py" , "" )]
self._logger.info(f"""Moving {dest_folder} to {utils_file}""" )
shutil.copy(_snake_case , _snake_case )
except KeyError:
self._logger.error(f"""Cannot find destination folder for {utils_file}. Please copy manually.""" )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
f"""You need to manually update file {file_path} to remove configurations using \'TextEncoderConfig\'.""" )
| 707
|
"""simple docstring"""
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class __UpperCamelCase ( a__ , a__ , unittest.TestCase ):
lowerCamelCase : Dict =IFPipeline
lowerCamelCase : int =TEXT_TO_IMAGE_PARAMS - {"""width""", """height""", """latents"""}
lowerCamelCase : int =TEXT_TO_IMAGE_BATCH_PARAMS
lowerCamelCase : int =PipelineTesterMixin.required_optional_params - {"""latents"""}
def __a ( self ) -> List[str]:
return self._get_dummy_components()
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__=0 ) -> Dict:
if str(lowerCAmelCase__ ).startswith("mps" ):
a : Tuple = torch.manual_seed(lowerCAmelCase__ )
else:
a : int = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
a : Optional[Any] = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def __a ( self ) -> Union[str, Any]:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" )
def __a ( self ) -> Any:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def __a ( self ) -> Union[str, Any]:
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def __a ( self ) -> Optional[int]:
self._test_save_load_local()
def __a ( self ) -> Tuple:
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def __a ( self ) -> str:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@slow
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
def __a ( self ) -> Optional[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self ) -> Tuple:
# if
a : Tuple = IFPipeline.from_pretrained("DeepFloyd/IF-I-XL-v1.0" , variant="fp16" , torch_dtype=torch.floataa )
a : str = IFSuperResolutionPipeline.from_pretrained(
"DeepFloyd/IF-II-L-v1.0" , variant="fp16" , torch_dtype=torch.floataa , text_encoder=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to("cuda" )
a, a : List[str] = pipe_a.encode_prompt("anime turtle" , device="cuda" )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
a : Optional[int] = None
a : Optional[int] = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
a : Union[str, Any] = IFImgaImgPipeline(**pipe_a.components )
a : List[Any] = IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
a : Union[str, Any] = IFInpaintingPipeline(**pipe_a.components )
a : List[str] = IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Dict:
# pipeline 1
_start_torch_memory_measurement()
a : List[str] = torch.Generator(device="cpu" ).manual_seed(0 )
a : Dict = pipe_a(
prompt_embeds=lowerCAmelCase__ , negative_prompt_embeds=lowerCAmelCase__ , num_inference_steps=2 , generator=lowerCAmelCase__ , output_type="np" , )
a : List[str] = output.images[0]
assert image.shape == (64, 64, 3)
a : Dict = torch.cuda.max_memory_allocated()
assert mem_bytes < 13 * 10**9
a : Optional[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy" )
assert_mean_pixel_difference(lowerCAmelCase__ , lowerCAmelCase__ )
# pipeline 2
_start_torch_memory_measurement()
a : List[str] = torch.Generator(device="cpu" ).manual_seed(0 )
a : Optional[int] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(lowerCAmelCase__ )
a : Union[str, Any] = pipe_a(
prompt_embeds=lowerCAmelCase__ , negative_prompt_embeds=lowerCAmelCase__ , image=lowerCAmelCase__ , generator=lowerCAmelCase__ , num_inference_steps=2 , output_type="np" , )
a : List[str] = output.images[0]
assert image.shape == (256, 256, 3)
a : int = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
a : Union[str, Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy" )
assert_mean_pixel_difference(lowerCAmelCase__ , lowerCAmelCase__ )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> int:
# pipeline 1
_start_torch_memory_measurement()
a : int = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(lowerCAmelCase__ )
a : Tuple = torch.Generator(device="cpu" ).manual_seed(0 )
a : List[Any] = pipe_a(
prompt_embeds=lowerCAmelCase__ , negative_prompt_embeds=lowerCAmelCase__ , image=lowerCAmelCase__ , num_inference_steps=2 , generator=lowerCAmelCase__ , output_type="np" , )
a : Tuple = output.images[0]
assert image.shape == (64, 64, 3)
a : int = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
a : Optional[int] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy" )
assert_mean_pixel_difference(lowerCAmelCase__ , lowerCAmelCase__ )
# pipeline 2
_start_torch_memory_measurement()
a : Optional[int] = torch.Generator(device="cpu" ).manual_seed(0 )
a : List[Any] = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(lowerCAmelCase__ )
a : str = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(lowerCAmelCase__ )
a : Dict = pipe_a(
prompt_embeds=lowerCAmelCase__ , negative_prompt_embeds=lowerCAmelCase__ , image=lowerCAmelCase__ , original_image=lowerCAmelCase__ , generator=lowerCAmelCase__ , num_inference_steps=2 , output_type="np" , )
a : int = output.images[0]
assert image.shape == (256, 256, 3)
a : str = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
a : Any = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy" )
assert_mean_pixel_difference(lowerCAmelCase__ , lowerCAmelCase__ )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[int]:
# pipeline 1
_start_torch_memory_measurement()
a : Union[str, Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(lowerCAmelCase__ )
a : List[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(1 ) ).to(lowerCAmelCase__ )
a : Optional[int] = torch.Generator(device="cpu" ).manual_seed(0 )
a : List[str] = pipe_a(
prompt_embeds=lowerCAmelCase__ , negative_prompt_embeds=lowerCAmelCase__ , image=lowerCAmelCase__ , mask_image=lowerCAmelCase__ , num_inference_steps=2 , generator=lowerCAmelCase__ , output_type="np" , )
a : List[Any] = output.images[0]
assert image.shape == (64, 64, 3)
a : Tuple = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
a : Union[str, Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy" )
assert_mean_pixel_difference(lowerCAmelCase__ , lowerCAmelCase__ )
# pipeline 2
_start_torch_memory_measurement()
a : str = torch.Generator(device="cpu" ).manual_seed(0 )
a : Optional[int] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(lowerCAmelCase__ )
a : int = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(lowerCAmelCase__ )
a : Dict = floats_tensor((1, 3, 256, 256) , rng=random.Random(1 ) ).to(lowerCAmelCase__ )
a : Optional[int] = pipe_a(
prompt_embeds=lowerCAmelCase__ , negative_prompt_embeds=lowerCAmelCase__ , image=lowerCAmelCase__ , mask_image=lowerCAmelCase__ , original_image=lowerCAmelCase__ , generator=lowerCAmelCase__ , num_inference_steps=2 , output_type="np" , )
a : List[str] = output.images[0]
assert image.shape == (256, 256, 3)
a : Tuple = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
a : Optional[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy" )
assert_mean_pixel_difference(lowerCAmelCase__ , lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( ) ->List[str]:
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 31
| 0
|
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( _lowercase : str = 100_0000 ) ->Optional[int]:
'''simple docstring'''
a : Optional[int] = limit + 1
a : Dict = [0] * limit
for first_term in range(1 , _lowercase ):
for n in range(_lowercase , _lowercase , _lowercase ):
a : List[str] = first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
a : Optional[Any] = sum(1 for x in frequency[1:limit] if x == 10 )
return count
if __name__ == "__main__":
print(F'''{solution() = }''')
| 708
|
"""simple docstring"""
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class __UpperCamelCase ( unittest.TestCase ):
def __a ( self ) -> Optional[Any]:
a : Optional[int] = [
"safety_checker/pytorch_model.bin",
"safety_checker/model.safetensors",
"vae/diffusion_pytorch_model.bin",
"vae/diffusion_pytorch_model.safetensors",
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
self.assertTrue(is_safetensors_compatible(lowerCAmelCase__ ) )
def __a ( self ) -> Optional[Any]:
a : str = [
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
self.assertTrue(is_safetensors_compatible(lowerCAmelCase__ ) )
def __a ( self ) -> Dict:
a : List[str] = [
"safety_checker/pytorch_model.bin",
"safety_checker/model.safetensors",
"vae/diffusion_pytorch_model.bin",
"vae/diffusion_pytorch_model.safetensors",
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
"unet/diffusion_pytorch_model.bin",
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(lowerCAmelCase__ ) )
def __a ( self ) -> List[Any]:
a : Optional[Any] = [
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
]
self.assertTrue(is_safetensors_compatible(lowerCAmelCase__ ) )
def __a ( self ) -> Tuple:
a : Tuple = [
"safety_checker/pytorch_model.bin",
"safety_checker/model.safetensors",
"vae/diffusion_pytorch_model.bin",
"vae/diffusion_pytorch_model.safetensors",
"text_encoder/pytorch_model.bin",
# Removed: 'text_encoder/model.safetensors',
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
self.assertFalse(is_safetensors_compatible(lowerCAmelCase__ ) )
def __a ( self ) -> Dict:
a : Dict = [
"safety_checker/pytorch_model.fp16.bin",
"safety_checker/model.fp16.safetensors",
"vae/diffusion_pytorch_model.fp16.bin",
"vae/diffusion_pytorch_model.fp16.safetensors",
"text_encoder/pytorch_model.fp16.bin",
"text_encoder/model.fp16.safetensors",
"unet/diffusion_pytorch_model.fp16.bin",
"unet/diffusion_pytorch_model.fp16.safetensors",
]
a : Dict = "fp16"
self.assertTrue(is_safetensors_compatible(lowerCAmelCase__ , variant=lowerCAmelCase__ ) )
def __a ( self ) -> List[str]:
a : List[Any] = [
"unet/diffusion_pytorch_model.fp16.bin",
"unet/diffusion_pytorch_model.fp16.safetensors",
]
a : Any = "fp16"
self.assertTrue(is_safetensors_compatible(lowerCAmelCase__ , variant=lowerCAmelCase__ ) )
def __a ( self ) -> int:
# pass variant but use the non-variant filenames
a : int = [
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
a : Tuple = "fp16"
self.assertTrue(is_safetensors_compatible(lowerCAmelCase__ , variant=lowerCAmelCase__ ) )
def __a ( self ) -> str:
a : str = [
"safety_checker/pytorch_model.fp16.bin",
"safety_checker/model.fp16.safetensors",
"vae/diffusion_pytorch_model.fp16.bin",
"vae/diffusion_pytorch_model.fp16.safetensors",
"text_encoder/pytorch_model.fp16.bin",
"text_encoder/model.fp16.safetensors",
"unet/diffusion_pytorch_model.fp16.bin",
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
a : Any = "fp16"
self.assertFalse(is_safetensors_compatible(lowerCAmelCase__ , variant=lowerCAmelCase__ ) )
def __a ( self ) -> str:
a : Union[str, Any] = [
"text_encoder/pytorch_model.fp16.bin",
"text_encoder/model.fp16.safetensors",
]
a : str = "fp16"
self.assertTrue(is_safetensors_compatible(lowerCAmelCase__ , variant=lowerCAmelCase__ ) )
def __a ( self ) -> List[str]:
# pass variant but use the non-variant filenames
a : Optional[int] = [
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
]
a : str = "fp16"
self.assertTrue(is_safetensors_compatible(lowerCAmelCase__ , variant=lowerCAmelCase__ ) )
def __a ( self ) -> Optional[Any]:
a : Any = [
"safety_checker/pytorch_model.fp16.bin",
"safety_checker/model.fp16.safetensors",
"vae/diffusion_pytorch_model.fp16.bin",
"vae/diffusion_pytorch_model.fp16.safetensors",
"text_encoder/pytorch_model.fp16.bin",
# 'text_encoder/model.fp16.safetensors',
"unet/diffusion_pytorch_model.fp16.bin",
"unet/diffusion_pytorch_model.fp16.safetensors",
]
a : Optional[int] = "fp16"
self.assertFalse(is_safetensors_compatible(lowerCAmelCase__ , variant=lowerCAmelCase__ ) )
| 31
| 0
|
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation
def _SCREAMING_SNAKE_CASE ( _lowercase : Tuple ) ->int:
'''simple docstring'''
a : Optional[Any] = 384
a : List[Any] = 7
if "tiny" in model_name:
a : str = 96
a : List[Any] = (2, 2, 6, 2)
a : Dict = (3, 6, 12, 24)
elif "small" in model_name:
a : Tuple = 96
a : str = (2, 2, 18, 2)
a : Any = (3, 6, 12, 24)
elif "base" in model_name:
a : Optional[int] = 128
a : Optional[Any] = (2, 2, 18, 2)
a : int = (4, 8, 16, 32)
a : List[Any] = 12
a : int = 512
elif "large" in model_name:
a : Any = 192
a : Any = (2, 2, 18, 2)
a : Optional[Any] = (6, 12, 24, 48)
a : Any = 12
a : Tuple = 768
# set label information
a : Any = 150
a : Optional[int] = """huggingface/label-files"""
a : Optional[Any] = """ade20k-id2label.json"""
a : Dict = json.load(open(hf_hub_download(snake_case_ , snake_case_ , repo_type="dataset" ) , "r" ) )
a : str = {int(snake_case_ ): v for k, v in idalabel.items()}
a : Tuple = {v: k for k, v in idalabel.items()}
a : Any = SwinConfig(
embed_dim=snake_case_ , depths=snake_case_ , num_heads=snake_case_ , window_size=snake_case_ , out_features=["stage1", "stage2", "stage3", "stage4"] , )
a : Any = UperNetConfig(
backbone_config=snake_case_ , auxiliary_in_channels=snake_case_ , num_labels=snake_case_ , idalabel=snake_case_ , labelaid=snake_case_ , )
return config
def _SCREAMING_SNAKE_CASE ( _lowercase : List[Any] ) ->List[str]:
'''simple docstring'''
a : Optional[Any] = []
# fmt: off
# stem
rename_keys.append(("backbone.patch_embed.projection.weight", "backbone.embeddings.patch_embeddings.projection.weight") )
rename_keys.append(("backbone.patch_embed.projection.bias", "backbone.embeddings.patch_embeddings.projection.bias") )
rename_keys.append(("backbone.patch_embed.norm.weight", "backbone.embeddings.norm.weight") )
rename_keys.append(("backbone.patch_embed.norm.bias", "backbone.embeddings.norm.bias") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.norm1.weight""", F"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight""") )
rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.norm1.bias""", F"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias""") )
rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table""", F"""backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table""") )
rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index""", F"""backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index""") )
rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight""", F"""backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight""") )
rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias""", F"""backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias""") )
rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.norm2.weight""", F"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight""") )
rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.norm2.bias""", F"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias""") )
rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight""", F"""backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight""") )
rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias""", F"""backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias""") )
rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight""", F"""backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight""") )
rename_keys.append((F"""backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias""", F"""backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias""") )
if i < 3:
rename_keys.append((F"""backbone.stages.{i}.downsample.reduction.weight""", F"""backbone.encoder.layers.{i}.downsample.reduction.weight""") )
rename_keys.append((F"""backbone.stages.{i}.downsample.norm.weight""", F"""backbone.encoder.layers.{i}.downsample.norm.weight""") )
rename_keys.append((F"""backbone.stages.{i}.downsample.norm.bias""", F"""backbone.encoder.layers.{i}.downsample.norm.bias""") )
rename_keys.append((F"""backbone.norm{i}.weight""", F"""backbone.hidden_states_norms.stage{i+1}.weight""") )
rename_keys.append((F"""backbone.norm{i}.bias""", F"""backbone.hidden_states_norms.stage{i+1}.bias""") )
# decode head
rename_keys.extend(
[
("decode_head.conv_seg.weight", "decode_head.classifier.weight"),
("decode_head.conv_seg.bias", "decode_head.classifier.bias"),
("auxiliary_head.conv_seg.weight", "auxiliary_head.classifier.weight"),
("auxiliary_head.conv_seg.bias", "auxiliary_head.classifier.bias"),
] )
# fmt: on
return rename_keys
def _SCREAMING_SNAKE_CASE ( _lowercase : Optional[int] , _lowercase : Optional[Any] , _lowercase : Union[str, Any] ) ->Dict:
'''simple docstring'''
a : str = dct.pop(snake_case_ )
a : Union[str, Any] = val
def _SCREAMING_SNAKE_CASE ( _lowercase : List[Any] , _lowercase : Optional[Any] ) ->Tuple:
'''simple docstring'''
a : Dict = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
a : str = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
a : Union[str, Any] = state_dict.pop(F"""backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight""" )
a : Optional[Any] = state_dict.pop(F"""backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
a : Optional[int] = in_proj_weight[:dim, :]
a : Tuple = in_proj_bias[: dim]
a : Union[str, Any] = in_proj_weight[
dim : dim * 2, :
]
a : Tuple = in_proj_bias[
dim : dim * 2
]
a : int = in_proj_weight[
-dim :, :
]
a : Union[str, Any] = in_proj_bias[-dim :]
# fmt: on
def _SCREAMING_SNAKE_CASE ( _lowercase : Union[str, Any] ) ->Union[str, Any]:
'''simple docstring'''
a : Optional[int] = x.shape
a : Optional[Any] = x.reshape(snake_case_ , 4 , in_channel // 4 )
a : Tuple = x[:, [0, 2, 1, 3], :].transpose(1 , 2 ).reshape(snake_case_ , snake_case_ )
return x
def _SCREAMING_SNAKE_CASE ( _lowercase : Optional[int] ) ->List[Any]:
'''simple docstring'''
a : Optional[Any] = x.shape
a : str = x.reshape(snake_case_ , in_channel // 4 , 4 )
a : Any = x[:, :, [0, 2, 1, 3]].transpose(1 , 2 ).reshape(snake_case_ , snake_case_ )
return x
def _SCREAMING_SNAKE_CASE ( _lowercase : Optional[Any] ) ->List[Any]:
'''simple docstring'''
a : Any = x.shape[0]
a : List[Any] = x.reshape(4 , in_channel // 4 )
a : Dict = x[[0, 2, 1, 3], :].transpose(0 , 1 ).reshape(snake_case_ )
return x
def _SCREAMING_SNAKE_CASE ( _lowercase : str ) ->Union[str, Any]:
'''simple docstring'''
a : List[str] = x.shape[0]
a : Union[str, Any] = x.reshape(in_channel // 4 , 4 )
a : str = x[:, [0, 2, 1, 3]].transpose(0 , 1 ).reshape(snake_case_ )
return x
def _SCREAMING_SNAKE_CASE ( _lowercase : Any , _lowercase : List[str] , _lowercase : List[Any] ) ->List[str]:
'''simple docstring'''
a : Dict = {
"""upernet-swin-tiny""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth""",
"""upernet-swin-small""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth""",
"""upernet-swin-base""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth""",
"""upernet-swin-large""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth""",
}
a : Optional[int] = model_name_to_url[model_name]
a : int = torch.hub.load_state_dict_from_url(snake_case_ , map_location="cpu" , file_name=snake_case_ )[
"""state_dict"""
]
for name, param in state_dict.items():
print(snake_case_ , param.shape )
a : Tuple = get_upernet_config(snake_case_ )
a : Optional[Any] = UperNetForSemanticSegmentation(snake_case_ )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
a : Any = state_dict.pop(snake_case_ )
if "bn" in key:
a : List[Any] = key.replace("bn" , "batch_norm" )
a : List[Any] = val
# rename keys
a : Tuple = create_rename_keys(snake_case_ )
for src, dest in rename_keys:
rename_key(snake_case_ , snake_case_ , snake_case_ )
read_in_q_k_v(snake_case_ , config.backbone_config )
# fix downsample parameters
for key, value in state_dict.items():
if "downsample" in key:
if "reduction" in key:
a : Union[str, Any] = reverse_correct_unfold_reduction_order(snake_case_ )
if "norm" in key:
a : str = reverse_correct_unfold_norm_order(snake_case_ )
model.load_state_dict(snake_case_ )
# verify on image
a : Union[str, Any] = """https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg"""
a : int = Image.open(requests.get(snake_case_ , stream=snake_case_ ).raw ).convert("RGB" )
a : int = SegformerImageProcessor()
a : List[Any] = processor(snake_case_ , return_tensors="pt" ).pixel_values
with torch.no_grad():
a : Any = model(snake_case_ )
a : str = outputs.logits
print(logits.shape )
print("First values of logits:" , logits[0, 0, :3, :3] )
# assert values
if model_name == "upernet-swin-tiny":
a : List[Any] = torch.tensor(
[[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]] )
elif model_name == "upernet-swin-small":
a : Optional[Any] = torch.tensor(
[[-7.1921, -7.1921, -6.9532], [-7.1921, -7.1921, -6.9532], [-7.0908, -7.0908, -6.8534]] )
elif model_name == "upernet-swin-base":
a : Union[str, Any] = torch.tensor(
[[-6.5851, -6.5851, -6.4330], [-6.5851, -6.5851, -6.4330], [-6.4763, -6.4763, -6.3254]] )
elif model_name == "upernet-swin-large":
a : List[Any] = torch.tensor(
[[-7.5297, -7.5297, -7.3802], [-7.5297, -7.5297, -7.3802], [-7.4044, -7.4044, -7.2586]] )
print("Logits:" , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , snake_case_ , atol=1E-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(snake_case_ )
print(F"""Saving processor to {pytorch_dump_folder_path}""" )
processor.save_pretrained(snake_case_ )
if push_to_hub:
print(F"""Pushing model and processor for {model_name} to hub""" )
model.push_to_hub(F"""openmmlab/{model_name}""" )
processor.push_to_hub(F"""openmmlab/{model_name}""" )
if __name__ == "__main__":
a : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''upernet-swin-tiny''',
type=str,
choices=[F'''upernet-swin-{size}''' for size in ['''tiny''', '''small''', '''base''', '''large''']],
help='''Name of the Swin + UperNet model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
a : Any = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 709
|
"""simple docstring"""
import flax.linen as nn
import jax
import jax.numpy as jnp
class __UpperCamelCase ( nn.Module ):
lowerCamelCase : int
lowerCamelCase : jnp.dtype =jnp.floataa
def __a ( self ) -> Tuple:
a : str = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , lowerCAmelCase__ ) -> Optional[Any]:
a, a, a, a : List[str] = hidden_states.shape
a : List[Any] = jax.image.resize(
lowerCAmelCase__ , shape=(batch, height * 2, width * 2, channels) , method="nearest" , )
a : List[str] = self.conv(lowerCAmelCase__ )
return hidden_states
class __UpperCamelCase ( nn.Module ):
lowerCamelCase : int
lowerCamelCase : jnp.dtype =jnp.floataa
def __a ( self ) -> Dict:
a : Optional[Any] = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , lowerCAmelCase__ ) -> Tuple:
# pad = ((0, 0), (0, 1), (0, 1), (0, 0)) # pad height and width dim
# hidden_states = jnp.pad(hidden_states, pad_width=pad)
a : Tuple = self.conv(lowerCAmelCase__ )
return hidden_states
class __UpperCamelCase ( nn.Module ):
lowerCamelCase : int
lowerCamelCase : int =None
lowerCamelCase : float =0.0
lowerCamelCase : bool =None
lowerCamelCase : jnp.dtype =jnp.floataa
def __a ( self ) -> int:
a : Dict = self.in_channels if self.out_channels is None else self.out_channels
a : Union[str, Any] = nn.GroupNorm(num_groups=32 , epsilon=1E-5 )
a : List[Any] = nn.Conv(
lowerCAmelCase__ , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
a : List[Any] = nn.Dense(lowerCAmelCase__ , dtype=self.dtype )
a : Union[str, Any] = nn.GroupNorm(num_groups=32 , epsilon=1E-5 )
a : Optional[int] = nn.Dropout(self.dropout_prob )
a : Dict = nn.Conv(
lowerCAmelCase__ , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
a : Union[str, Any] = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
a : List[str] = None
if use_nin_shortcut:
a : Optional[Any] = nn.Conv(
lowerCAmelCase__ , kernel_size=(1, 1) , strides=(1, 1) , padding="VALID" , dtype=self.dtype , )
def __call__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=True ) -> str:
a : int = hidden_states
a : Tuple = self.norma(lowerCAmelCase__ )
a : Any = nn.swish(lowerCAmelCase__ )
a : int = self.conva(lowerCAmelCase__ )
a : int = self.time_emb_proj(nn.swish(lowerCAmelCase__ ) )
a : Tuple = jnp.expand_dims(jnp.expand_dims(lowerCAmelCase__ , 1 ) , 1 )
a : Dict = hidden_states + temb
a : str = self.norma(lowerCAmelCase__ )
a : List[Any] = nn.swish(lowerCAmelCase__ )
a : List[str] = self.dropout(lowerCAmelCase__ , lowerCAmelCase__ )
a : List[str] = self.conva(lowerCAmelCase__ )
if self.conv_shortcut is not None:
a : Tuple = self.conv_shortcut(lowerCAmelCase__ )
return hidden_states + residual
| 31
| 0
|
"""simple docstring"""
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class __UpperCamelCase ( unittest.TestCase ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=7 , lowerCAmelCase__=3 , lowerCAmelCase__=18 , lowerCAmelCase__=30 , lowerCAmelCase__=400 , lowerCAmelCase__=True , lowerCAmelCase__=None , lowerCAmelCase__=True , lowerCAmelCase__=None , lowerCAmelCase__=True , lowerCAmelCase__=[0.5, 0.5, 0.5] , lowerCAmelCase__=[0.5, 0.5, 0.5] , lowerCAmelCase__=False , ) -> Any:
a : Union[str, Any] = size if size is not None else {'''height''': 20, '''width''': 20}
a : Any = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
a : Any = parent
a : List[Any] = batch_size
a : Tuple = num_channels
a : int = image_size
a : List[Any] = min_resolution
a : List[Any] = max_resolution
a : List[Any] = do_resize
a : int = size
a : int = do_center_crop
a : int = crop_size
a : Tuple = do_normalize
a : List[str] = image_mean
a : Any = image_std
a : Union[str, Any] = do_reduce_labels
def __a ( self ) -> str:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_reduce_labels": self.do_reduce_labels,
}
def _SCREAMING_SNAKE_CASE ( ) ->Any:
'''simple docstring'''
a : int = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" )
a : Any = Image.open(dataset[0]["file"] )
a : List[Any] = Image.open(dataset[1]["file"] )
return image, map
def _SCREAMING_SNAKE_CASE ( ) ->Tuple:
'''simple docstring'''
a : Tuple = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" )
a : List[str] = Image.open(ds[0]["file"] )
a : int = Image.open(ds[1]["file"] )
a : Any = Image.open(ds[2]["file"] )
a : Dict = Image.open(ds[3]["file"] )
return [imagea, imagea], [mapa, mapa]
@require_torch
@require_vision
class __UpperCamelCase ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
lowerCamelCase : List[str] =BeitImageProcessor if is_vision_available() else None
def __a ( self ) -> Optional[int]:
a : str = BeitImageProcessingTester(self )
@property
def __a ( self ) -> str:
return self.image_processor_tester.prepare_image_processor_dict()
def __a ( self ) -> Tuple:
a : str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase__ , "do_resize" ) )
self.assertTrue(hasattr(UpperCamelCase__ , "size" ) )
self.assertTrue(hasattr(UpperCamelCase__ , "do_center_crop" ) )
self.assertTrue(hasattr(UpperCamelCase__ , "center_crop" ) )
self.assertTrue(hasattr(UpperCamelCase__ , "do_normalize" ) )
self.assertTrue(hasattr(UpperCamelCase__ , "image_mean" ) )
self.assertTrue(hasattr(UpperCamelCase__ , "image_std" ) )
def __a ( self ) -> str:
a : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 20, "width": 20} )
self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18} )
self.assertEqual(image_processor.do_reduce_labels , UpperCamelCase__ )
a : Optional[Any] = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , crop_size=84 , reduce_labels=UpperCamelCase__ )
self.assertEqual(image_processor.size , {"height": 42, "width": 42} )
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} )
self.assertEqual(image_processor.do_reduce_labels , UpperCamelCase__ )
def __a ( self ) -> Any:
pass
def __a ( self ) -> int:
# Initialize image_processing
a : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
a : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , Image.Image )
# Test not batched input
a : Any = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
a : List[Any] = image_processing(UpperCamelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def __a ( self ) -> Any:
# Initialize image_processing
a : str = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
a : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ , numpify=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , np.ndarray )
# Test not batched input
a : List[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
a : Dict = image_processing(UpperCamelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def __a ( self ) -> Optional[Any]:
# Initialize image_processing
a : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
a : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ , torchify=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , torch.Tensor )
# Test not batched input
a : Tuple = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
a : Tuple = image_processing(UpperCamelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def __a ( self ) -> Optional[int]:
# Initialize image_processing
a : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
a : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ , torchify=UpperCamelCase__ )
a : int = []
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , torch.Tensor )
maps.append(torch.zeros(image.shape[-2:] ).long() )
# Test not batched input
a : Tuple = image_processing(image_inputs[0] , maps[0] , return_tensors="pt" )
self.assertEqual(
encoding["pixel_values"].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(
encoding["labels"].shape , (
1,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(encoding["labels"].dtype , torch.long )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 255 )
# Test batched
a : Any = image_processing(UpperCamelCase__ , UpperCamelCase__ , return_tensors="pt" )
self.assertEqual(
encoding["pixel_values"].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(
encoding["labels"].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(encoding["labels"].dtype , torch.long )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 255 )
# Test not batched input (PIL images)
a : Optional[Any] = prepare_semantic_single_inputs()
a : Dict = image_processing(UpperCamelCase__ , UpperCamelCase__ , return_tensors="pt" )
self.assertEqual(
encoding["pixel_values"].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(
encoding["labels"].shape , (
1,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(encoding["labels"].dtype , torch.long )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 255 )
# Test batched input (PIL images)
a : List[str] = prepare_semantic_batch_inputs()
a : List[str] = image_processing(UpperCamelCase__ , UpperCamelCase__ , return_tensors="pt" )
self.assertEqual(
encoding["pixel_values"].shape , (
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(
encoding["labels"].shape , (
2,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(encoding["labels"].dtype , torch.long )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 255 )
def __a ( self ) -> Any:
# Initialize image_processing
a : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150
a : List[Any] = prepare_semantic_single_inputs()
a : int = image_processing(UpperCamelCase__ , UpperCamelCase__ , return_tensors="pt" )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 150 )
a : Optional[int] = True
a : str = image_processing(UpperCamelCase__ , UpperCamelCase__ , return_tensors="pt" )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 255 )
| 710
|
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def _SCREAMING_SNAKE_CASE ( _lowercase : str=None ) ->Optional[Any]:
'''simple docstring'''
if subparsers is not None:
a : Dict = subparsers.add_parser("test" )
else:
a : Tuple = argparse.ArgumentParser("Accelerate test command" )
parser.add_argument(
"--config_file" , default=_lowercase , help=(
"The path to use to store the config file. Will default to a file named default_config.yaml in the cache "
"location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have "
"such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed "
"with 'huggingface'."
) , )
if subparsers is not None:
parser.set_defaults(func=_lowercase )
return parser
def _SCREAMING_SNAKE_CASE ( _lowercase : str ) ->str:
'''simple docstring'''
a : List[Any] = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ["test_utils", "scripts", "test_script.py"] )
if args.config_file is None:
a : int = script_name
else:
a : int = F"""--config_file={args.config_file} {script_name}"""
a : Optional[int] = ["accelerate-launch"] + test_args.split()
a : Optional[int] = execute_subprocess_async(_lowercase , env=os.environ.copy() )
if result.returncode == 0:
print("Test is a success! You are ready for your distributed training!" )
def _SCREAMING_SNAKE_CASE ( ) ->Tuple:
'''simple docstring'''
a : Any = test_command_parser()
a : Union[str, Any] = parser.parse_args()
test_command(_lowercase )
if __name__ == "__main__":
main()
| 31
| 0
|
"""simple docstring"""
a : Optional[Any] = [
'''VerificationMode''',
'''Version''',
'''disable_progress_bar''',
'''enable_progress_bar''',
'''is_progress_bar_enabled''',
'''experimental''',
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental
| 711
|
"""simple docstring"""
a : str = 8.314_4598
def _SCREAMING_SNAKE_CASE ( _lowercase : float , _lowercase : float ) ->float:
'''simple docstring'''
if temperature < 0:
raise Exception("Temperature cannot be less than 0 K" )
if molar_mass <= 0:
raise Exception("Molar mass cannot be less than or equal to 0 kg/mol" )
else:
return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# example
a : Any = 300
a : Dict = 28
a : Dict = rms_speed_of_molecule(temperature, molar_mass)
print(F'''Vrms of Nitrogen gas at 300 K is {vrms} m/s''')
| 31
| 0
|
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( _lowercase : int = 1000 ) ->int:
'''simple docstring'''
a : Union[str, Any] = 2**power
a : Any = str(__snake_case )
a : Tuple = list(__snake_case )
a : str = 0
for i in list_num:
sum_of_num += int(__snake_case )
return sum_of_num
if __name__ == "__main__":
a : Any = int(input('''Enter the power of 2: ''').strip())
print('''2 ^ ''', power, ''' = ''', 2**power)
a : Tuple = solution(power)
print('''Sum of the digits is: ''', result)
| 712
|
"""simple docstring"""
import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers.generation import (
MaxLengthCriteria,
MaxNewTokensCriteria,
MaxTimeCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
@require_torch
class __UpperCamelCase ( unittest.TestCase ):
def __a ( self , lowerCAmelCase__ ) -> Optional[int]:
a : str = 3
a : str = 250
a : List[Any] = ids_tensor((batch_size, length) , lowerCAmelCase__ )
a : Optional[Any] = torch.ones((batch_size, length) , device=lowerCAmelCase__ , dtype=torch.float ) / length
return input_ids, scores
def __a ( self ) -> List[Any]:
a, a : str = self._get_tensors(5 )
a : Any = StoppingCriteriaList(
[
MaxLengthCriteria(max_length=10 ),
MaxTimeCriteria(max_time=0.1 ),
] )
self.assertFalse(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
a, a : str = self._get_tensors(9 )
self.assertFalse(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
a, a : Union[str, Any] = self._get_tensors(10 )
self.assertTrue(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
def __a ( self ) -> List[Any]:
a : Optional[Any] = MaxLengthCriteria(max_length=10 )
a, a : int = self._get_tensors(5 )
self.assertFalse(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
a, a : int = self._get_tensors(9 )
self.assertFalse(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
a, a : Union[str, Any] = self._get_tensors(10 )
self.assertTrue(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
def __a ( self ) -> List[str]:
a : Tuple = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 )
a, a : str = self._get_tensors(5 )
self.assertFalse(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
a, a : int = self._get_tensors(9 )
self.assertFalse(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
a, a : int = self._get_tensors(10 )
self.assertTrue(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
a : List[Any] = StoppingCriteriaList([criteria] )
self.assertEqual(criteria_list.max_length , 10 )
def __a ( self ) -> str:
a, a : Tuple = self._get_tensors(5 )
a : str = MaxTimeCriteria(max_time=0.1 )
self.assertFalse(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
a : Optional[int] = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 )
self.assertTrue(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
def __a ( self ) -> str:
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 10 )
with self.assertWarns(lowerCAmelCase__ ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 11 )
a : Optional[int] = validate_stopping_criteria(StoppingCriteriaList() , 11 )
self.assertEqual(len(lowerCAmelCase__ ) , 1 )
| 31
| 0
|
"""simple docstring"""
import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class __UpperCamelCase ( __lowercase ):
lowerCamelCase : List[str] =(EulerDiscreteScheduler,)
lowerCamelCase : Optional[Any] =10
def __a ( self , **lowerCAmelCase__ ) -> Union[str, Any]:
a : Optional[Any] = {
"num_train_timesteps": 1100,
"beta_start": 0.0_001,
"beta_end": 0.02,
"beta_schedule": "linear",
}
config.update(**__A )
return config
def __a ( self ) -> Any:
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=__A )
def __a ( self ) -> List[Any]:
for beta_start, beta_end in zip([0.00_001, 0.0_001, 0.001] , [0.0_002, 0.002, 0.02] ):
self.check_over_configs(beta_start=__A , beta_end=__A )
def __a ( self ) -> List[Any]:
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=__A )
def __a ( self ) -> str:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__A )
def __a ( self ) -> List[str]:
a : Any = self.scheduler_classes[0]
a : Union[str, Any] = self.get_scheduler_config()
a : Union[str, Any] = scheduler_class(**__A )
scheduler.set_timesteps(self.num_inference_steps )
a : Any = torch.manual_seed(0 )
a : str = self.dummy_model()
a : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
a : Tuple = sample.to(__A )
for i, t in enumerate(scheduler.timesteps ):
a : List[Any] = scheduler.scale_model_input(__A , __A )
a : Any = model(__A , __A )
a : Union[str, Any] = scheduler.step(__A , __A , __A , generator=__A )
a : Optional[int] = output.prev_sample
a : List[Any] = torch.sum(torch.abs(__A ) )
a : Optional[Any] = torch.mean(torch.abs(__A ) )
assert abs(result_sum.item() - 10.0_807 ) < 1E-2
assert abs(result_mean.item() - 0.0_131 ) < 1E-3
def __a ( self ) -> int:
a : Dict = self.scheduler_classes[0]
a : int = self.get_scheduler_config(prediction_type="v_prediction" )
a : Union[str, Any] = scheduler_class(**__A )
scheduler.set_timesteps(self.num_inference_steps )
a : List[Any] = torch.manual_seed(0 )
a : List[Any] = self.dummy_model()
a : Optional[int] = self.dummy_sample_deter * scheduler.init_noise_sigma
a : Tuple = sample.to(__A )
for i, t in enumerate(scheduler.timesteps ):
a : Tuple = scheduler.scale_model_input(__A , __A )
a : Tuple = model(__A , __A )
a : Dict = scheduler.step(__A , __A , __A , generator=__A )
a : Any = output.prev_sample
a : Optional[Any] = torch.sum(torch.abs(__A ) )
a : Tuple = torch.mean(torch.abs(__A ) )
assert abs(result_sum.item() - 0.0_002 ) < 1E-2
assert abs(result_mean.item() - 2.2_676E-06 ) < 1E-3
def __a ( self ) -> List[str]:
a : List[str] = self.scheduler_classes[0]
a : int = self.get_scheduler_config()
a : Union[str, Any] = scheduler_class(**__A )
scheduler.set_timesteps(self.num_inference_steps , device=__A )
a : Dict = torch.manual_seed(0 )
a : List[Any] = self.dummy_model()
a : Dict = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
a : Union[str, Any] = sample.to(__A )
for t in scheduler.timesteps:
a : List[Any] = scheduler.scale_model_input(__A , __A )
a : Optional[Any] = model(__A , __A )
a : Union[str, Any] = scheduler.step(__A , __A , __A , generator=__A )
a : Optional[Any] = output.prev_sample
a : Tuple = torch.sum(torch.abs(__A ) )
a : Union[str, Any] = torch.mean(torch.abs(__A ) )
assert abs(result_sum.item() - 10.0_807 ) < 1E-2
assert abs(result_mean.item() - 0.0_131 ) < 1E-3
def __a ( self ) -> str:
a : List[Any] = self.scheduler_classes[0]
a : str = self.get_scheduler_config()
a : Optional[int] = scheduler_class(**__A , use_karras_sigmas=__A )
scheduler.set_timesteps(self.num_inference_steps , device=__A )
a : List[str] = torch.manual_seed(0 )
a : Optional[Any] = self.dummy_model()
a : str = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
a : Optional[Any] = sample.to(__A )
for t in scheduler.timesteps:
a : List[Any] = scheduler.scale_model_input(__A , __A )
a : Optional[Any] = model(__A , __A )
a : Union[str, Any] = scheduler.step(__A , __A , __A , generator=__A )
a : Tuple = output.prev_sample
a : Optional[int] = torch.sum(torch.abs(__A ) )
a : Dict = torch.mean(torch.abs(__A ) )
assert abs(result_sum.item() - 124.52_299_499_511_719 ) < 1E-2
assert abs(result_mean.item() - 0.16_213_932_633_399_963 ) < 1E-3
| 713
|
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( _lowercase : int = 200 ) ->int:
'''simple docstring'''
a : Dict = [1, 2, 5, 10, 20, 50, 100, 200]
a : Optional[Any] = [0] * (pence + 1)
a : List[Any] = 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(_lowercase , pence + 1 , 1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(200) == 73682
| 31
| 0
|
"""simple docstring"""
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def _SCREAMING_SNAKE_CASE ( _lowercase : List[Any] , _lowercase : Optional[Any] , _lowercase : Any ) ->int:
'''simple docstring'''
a : Union[str, Any] = {
"en": "Machine learning is great, isn't it?",
"ru": "Машинное обучение - это здорово, не так ли?",
"de": "Maschinelles Lernen ist großartig, oder?",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
a : Tuple = {
"ru-en": ["[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)", "39.20"],
"en-ru": ["[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)", "33.47"],
"en-de": ["[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)", "42.83"],
"de-en": ["[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)", "41.35"],
}
a : Union[str, Any] = F"""{src_lang}-{tgt_lang}"""
a : Any = F"""
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt19
- facebook
license: apache-2.0
datasets:
- wmt19
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.
For more details, please see, [Facebook FAIR\'s WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).
The abbreviation FSMT stands for FairSeqMachineTranslation
All four models are available:
* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)
* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)
* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)
* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = \"facebook/wmt19-{src_lang}-{tgt_lang}\"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = \"{texts[src_lang]}\"
input_ids = tokenizer.encode(input, return_tensors=\"pt\")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
- The original (and this ported model) doesn\'t seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)
## Training data
Pretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).
## Eval results
pair | fairseq | transformers
-------|---------|----------
{pair} | {scores[pair][0]} | {scores[pair][1]}
The score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn\'t support:
- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).
- re-ranking
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=15
mkdir -p $DATA_DIR
sacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
note: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.
## Data Sources
- [training, etc.](http://www.statmt.org/wmt19/)
- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)
### BibTeX entry and citation info
```bibtex
@inproceedings{{...,
year={{2020}},
title={{Facebook FAIR\'s WMT19 News Translation Task Submission}},
author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},
booktitle={{Proc. of WMT}},
}}
```
## TODO
- port model ensemble (fairseq uses 4 model checkpoints)
"""
os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ )
a : Optional[Any] = os.path.join(UpperCamelCase__ , "README.md" )
print(F"""Generating {path}""" )
with open(UpperCamelCase__ , "w" , encoding="utf-8" ) as f:
f.write(UpperCamelCase__ )
# make sure we are under the root of the project
a : int = Path(__file__).resolve().parent.parent.parent
a : Union[str, Any] = repo_dir / '''model_cards'''
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
a , a , a : Any = model_name.split('''-''')
a : str = model_cards_dir / '''facebook''' / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 714
|
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class __UpperCamelCase ( metaclass=a__ ):
lowerCamelCase : Optional[Any] =["""transformers""", """torch""", """note_seq"""]
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Union[str, Any]:
requires_backends(self , ["transformers", "torch", "note_seq"] )
@classmethod
def __a ( cls , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Any:
requires_backends(cls , ["transformers", "torch", "note_seq"] )
@classmethod
def __a ( cls , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> int:
requires_backends(cls , ["transformers", "torch", "note_seq"] )
| 31
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a : Dict = {
'''configuration_bigbird_pegasus''': [
'''BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BigBirdPegasusConfig''',
'''BigBirdPegasusOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[Any] = [
'''BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BigBirdPegasusForCausalLM''',
'''BigBirdPegasusForConditionalGeneration''',
'''BigBirdPegasusForQuestionAnswering''',
'''BigBirdPegasusForSequenceClassification''',
'''BigBirdPegasusModel''',
'''BigBirdPegasusPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP,
BigBirdPegasusConfig,
BigBirdPegasusOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST,
BigBirdPegasusForCausalLM,
BigBirdPegasusForConditionalGeneration,
BigBirdPegasusForQuestionAnswering,
BigBirdPegasusForSequenceClassification,
BigBirdPegasusModel,
BigBirdPegasusPreTrainedModel,
)
else:
import sys
a : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 715
|
"""simple docstring"""
import qiskit
def _SCREAMING_SNAKE_CASE ( _lowercase : int , _lowercase : int ) ->qiskit.result.counts.Counts:
'''simple docstring'''
a : Union[str, Any] = qiskit.Aer.get_backend("aer_simulator" )
# Create a Quantum Circuit acting on the q register
a : Optional[Any] = qiskit.QuantumCircuit(_lowercase , _lowercase )
# Map the quantum measurement to the classical bits
circuit.measure([0] , [0] )
# Execute the circuit on the simulator
a : Optional[int] = qiskit.execute(_lowercase , _lowercase , shots=1000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(_lowercase )
if __name__ == "__main__":
print(F'''Total count for various states are: {single_qubit_measure(1, 1)}''')
| 31
| 0
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
a : Any = logging.get_logger(__name__)
def _SCREAMING_SNAKE_CASE ( _lowercase : int ) ->str:
'''simple docstring'''
a : List[Any] = DPTConfig(embedding_type="hybrid" )
if "large" in checkpoint_url:
a : Optional[Any] = 1024
a : Union[str, Any] = 4096
a : Any = 24
a : int = 16
a : Dict = [5, 11, 17, 23]
a : List[str] = [256, 512, 1024, 1024]
a : Optional[Any] = (1, 384, 384)
if "nyu" or "midas" in checkpoint_url:
a : int = 768
a : Tuple = [1, 1, 1, 0.5]
a : Union[str, Any] = [256, 512, 768, 768]
a : Optional[int] = 150
a : Any = 16
a : Optional[Any] = (1, 384, 384)
a : int = False
a : List[str] = 'project'
if "ade" in checkpoint_url:
a : List[str] = True
a : List[str] = 768
a : Optional[int] = [1, 1, 1, 0.5]
a : Optional[Any] = 150
a : int = 16
a : Any = 'huggingface/label-files'
a : Optional[Any] = 'ade20k-id2label.json'
a : Tuple = json.load(open(cached_download(hf_hub_url(_lowercase , _lowercase , repo_type="dataset" ) ) , "r" ) )
a : List[str] = {int(_lowercase ): v for k, v in idalabel.items()}
a : int = idalabel
a : List[Any] = {v: k for k, v in idalabel.items()}
a : Tuple = [1, 150, 480, 480]
return config, expected_shape
def _SCREAMING_SNAKE_CASE ( _lowercase : Optional[Any] ) ->Dict:
'''simple docstring'''
a : Union[str, Any] = ['pretrained.model.head.weight', 'pretrained.model.head.bias']
for k in ignore_keys:
state_dict.pop(_lowercase , _lowercase )
def _SCREAMING_SNAKE_CASE ( _lowercase : List[Any] ) ->Any:
'''simple docstring'''
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
a : Union[str, Any] = name.replace("pretrained.model" , "dpt.encoder" )
if "pretrained.model" in name:
a : str = name.replace("pretrained.model" , "dpt.embeddings" )
if "patch_embed" in name:
a : Union[str, Any] = name.replace("patch_embed" , "" )
if "pos_embed" in name:
a : Union[str, Any] = name.replace("pos_embed" , "position_embeddings" )
if "attn.proj" in name:
a : Union[str, Any] = name.replace("attn.proj" , "attention.output.dense" )
if "proj" in name and "project" not in name:
a : Optional[Any] = name.replace("proj" , "projection" )
if "blocks" in name:
a : Optional[int] = name.replace("blocks" , "layer" )
if "mlp.fc1" in name:
a : int = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
a : Any = name.replace("mlp.fc2" , "output.dense" )
if "norm1" in name and "backbone" not in name:
a : Tuple = name.replace("norm1" , "layernorm_before" )
if "norm2" in name and "backbone" not in name:
a : Union[str, Any] = name.replace("norm2" , "layernorm_after" )
if "scratch.output_conv" in name:
a : List[str] = name.replace("scratch.output_conv" , "head" )
if "scratch" in name:
a : Any = name.replace("scratch" , "neck" )
if "layer1_rn" in name:
a : List[Any] = name.replace("layer1_rn" , "convs.0" )
if "layer2_rn" in name:
a : Optional[Any] = name.replace("layer2_rn" , "convs.1" )
if "layer3_rn" in name:
a : List[str] = name.replace("layer3_rn" , "convs.2" )
if "layer4_rn" in name:
a : Dict = name.replace("layer4_rn" , "convs.3" )
if "refinenet" in name:
a : List[str] = int(name[len("neck.refinenet" ) : len("neck.refinenet" ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
a : Optional[Any] = name.replace(F"""refinenet{layer_idx}""" , F"""fusion_stage.layers.{abs(layer_idx-4 )}""" )
if "out_conv" in name:
a : Dict = name.replace("out_conv" , "projection" )
if "resConfUnit1" in name:
a : str = name.replace("resConfUnit1" , "residual_layer1" )
if "resConfUnit2" in name:
a : Union[str, Any] = name.replace("resConfUnit2" , "residual_layer2" )
if "conv1" in name:
a : Optional[Any] = name.replace("conv1" , "convolution1" )
if "conv2" in name:
a : Tuple = name.replace("conv2" , "convolution2" )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
a : str = name.replace("pretrained.act_postprocess1.0.project.0" , "neck.reassemble_stage.readout_projects.0.0" )
if "pretrained.act_postprocess2.0.project.0" in name:
a : Dict = name.replace("pretrained.act_postprocess2.0.project.0" , "neck.reassemble_stage.readout_projects.1.0" )
if "pretrained.act_postprocess3.0.project.0" in name:
a : Tuple = name.replace("pretrained.act_postprocess3.0.project.0" , "neck.reassemble_stage.readout_projects.2.0" )
if "pretrained.act_postprocess4.0.project.0" in name:
a : Optional[int] = name.replace("pretrained.act_postprocess4.0.project.0" , "neck.reassemble_stage.readout_projects.3.0" )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
a : Any = name.replace("pretrained.act_postprocess1.3" , "neck.reassemble_stage.layers.0.projection" )
if "pretrained.act_postprocess1.4" in name:
a : Optional[int] = name.replace("pretrained.act_postprocess1.4" , "neck.reassemble_stage.layers.0.resize" )
if "pretrained.act_postprocess2.3" in name:
a : str = name.replace("pretrained.act_postprocess2.3" , "neck.reassemble_stage.layers.1.projection" )
if "pretrained.act_postprocess2.4" in name:
a : str = name.replace("pretrained.act_postprocess2.4" , "neck.reassemble_stage.layers.1.resize" )
if "pretrained.act_postprocess3.3" in name:
a : Tuple = name.replace("pretrained.act_postprocess3.3" , "neck.reassemble_stage.layers.2.projection" )
if "pretrained.act_postprocess4.3" in name:
a : str = name.replace("pretrained.act_postprocess4.3" , "neck.reassemble_stage.layers.3.projection" )
if "pretrained.act_postprocess4.4" in name:
a : Optional[int] = name.replace("pretrained.act_postprocess4.4" , "neck.reassemble_stage.layers.3.resize" )
if "pretrained" in name:
a : List[str] = name.replace("pretrained" , "dpt" )
if "bn" in name:
a : List[str] = name.replace("bn" , "batch_norm" )
if "head" in name:
a : Optional[Any] = name.replace("head" , "head.head" )
if "encoder.norm" in name:
a : Optional[int] = name.replace("encoder.norm" , "layernorm" )
if "auxlayer" in name:
a : List[Any] = name.replace("auxlayer" , "auxiliary_head.head" )
if "backbone" in name:
a : List[str] = name.replace("backbone" , "backbone.bit.encoder" )
if ".." in name:
a : List[str] = name.replace(".." , "." )
if "stem.conv" in name:
a : Any = name.replace("stem.conv" , "bit.embedder.convolution" )
if "blocks" in name:
a : List[str] = name.replace("blocks" , "layers" )
if "convolution" in name and "backbone" in name:
a : int = name.replace("convolution" , "conv" )
if "layer" in name and "backbone" in name:
a : Optional[int] = name.replace("layer" , "layers" )
if "backbone.bit.encoder.bit" in name:
a : Union[str, Any] = name.replace("backbone.bit.encoder.bit" , "backbone.bit" )
if "embedder.conv" in name:
a : int = name.replace("embedder.conv" , "embedder.convolution" )
if "backbone.bit.encoder.stem.norm" in name:
a : str = name.replace("backbone.bit.encoder.stem.norm" , "backbone.bit.embedder.norm" )
return name
def _SCREAMING_SNAKE_CASE ( _lowercase : Dict , _lowercase : Tuple ) ->Union[str, Any]:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
a : Any = state_dict.pop(F"""dpt.encoder.layer.{i}.attn.qkv.weight""" )
a : Optional[int] = state_dict.pop(F"""dpt.encoder.layer.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
a : Any = in_proj_weight[: config.hidden_size, :]
a : Dict = in_proj_bias[: config.hidden_size]
a : Union[str, Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
a : List[str] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
a : Tuple = in_proj_weight[
-config.hidden_size :, :
]
a : str = in_proj_bias[-config.hidden_size :]
def _SCREAMING_SNAKE_CASE ( ) ->List[Any]:
'''simple docstring'''
a : Tuple = 'http://images.cocodataset.org/val2017/000000039769.jpg'
a : Tuple = Image.open(requests.get(_lowercase , stream=_lowercase ).raw )
return im
@torch.no_grad()
def _SCREAMING_SNAKE_CASE ( _lowercase : List[str] , _lowercase : str , _lowercase : int , _lowercase : List[Any] , _lowercase : List[Any] ) ->List[str]:
'''simple docstring'''
a : int = get_dpt_config(_lowercase )
# load original state_dict from URL
# state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")
a : Any = torch.load(_lowercase , map_location="cpu" )
# remove certain keys
remove_ignore_keys_(_lowercase )
# rename keys
for key in state_dict.copy().keys():
a : int = state_dict.pop(_lowercase )
a : List[Any] = val
# read in qkv matrices
read_in_q_k_v(_lowercase , _lowercase )
# load HuggingFace model
a : Optional[Any] = DPTForSemanticSegmentation(_lowercase ) if 'ade' in checkpoint_url else DPTForDepthEstimation(_lowercase )
model.load_state_dict(_lowercase )
model.eval()
# Check outputs on an image
a : str = 480 if 'ade' in checkpoint_url else 384
a : Dict = DPTImageProcessor(size=_lowercase )
a : int = prepare_img()
a : Optional[int] = image_processor(_lowercase , return_tensors="pt" )
# forward pass
a : List[Any] = model(**_lowercase ).logits if 'ade' in checkpoint_url else model(**_lowercase ).predicted_depth
if show_prediction:
a : str = (
torch.nn.functional.interpolate(
outputs.unsqueeze(1 ) , size=(image.size[1], image.size[0]) , mode="bicubic" , align_corners=_lowercase , )
.squeeze()
.cpu()
.numpy()
)
Image.fromarray((prediction / prediction.max()) * 255 ).show()
if pytorch_dump_folder_path is not None:
Path(_lowercase ).mkdir(exist_ok=_lowercase )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowercase )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_lowercase )
if push_to_hub:
model.push_to_hub("ybelkada/dpt-hybrid-midas" )
image_processor.push_to_hub("ybelkada/dpt-hybrid-midas" )
if __name__ == "__main__":
a : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt''',
type=str,
help='''URL of the original DPT checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=False,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
)
parser.add_argument(
'''--model_name''',
default='''dpt-large''',
type=str,
help='''Name of the model, in case you\'re pushing to the hub.''',
)
parser.add_argument(
'''--show_prediction''',
action='''store_true''',
)
a : str = parser.parse_args()
convert_dpt_checkpoint(
args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction
)
| 716
|
"""simple docstring"""
from random import randint
from tempfile import TemporaryFile
import numpy as np
def _SCREAMING_SNAKE_CASE ( _lowercase : Optional[int] , _lowercase : Optional[Any] , _lowercase : Union[str, Any] ) ->Dict:
'''simple docstring'''
a : List[str] = 0
if start < end:
a : Tuple = randint(_lowercase , _lowercase )
a : List[str] = a[end]
a : str = a[pivot]
a : Optional[int] = temp
a, a : Dict = _in_place_partition(_lowercase , _lowercase , _lowercase )
count += _in_place_quick_sort(_lowercase , _lowercase , p - 1 )
count += _in_place_quick_sort(_lowercase , p + 1 , _lowercase )
return count
def _SCREAMING_SNAKE_CASE ( _lowercase : Optional[Any] , _lowercase : str , _lowercase : List[Any] ) ->str:
'''simple docstring'''
a : Union[str, Any] = 0
a : List[Any] = randint(_lowercase , _lowercase )
a : int = a[end]
a : List[str] = a[pivot]
a : Tuple = temp
a : Union[str, Any] = start - 1
for index in range(_lowercase , _lowercase ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
a : List[str] = new_pivot_index + 1
a : Optional[int] = a[new_pivot_index]
a : Union[str, Any] = a[index]
a : List[Any] = temp
a : Tuple = a[new_pivot_index + 1]
a : str = a[end]
a : Dict = temp
return new_pivot_index + 1, count
a : int = TemporaryFile()
a : Tuple = 100 # 1000 elements are to be sorted
a , a : int = 0, 1 # mean and standard deviation
a : List[Any] = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print('''The array is''')
print(X)
outfile.seek(0) # using the same array
a : int = np.load(outfile)
a : Tuple = len(M) - 1
a : Union[str, Any] = _in_place_quick_sort(M, 0, r)
print(
'''No of Comparisons for 100 elements selected from a standard normal distribution'''
'''is :'''
)
print(z)
| 31
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a : Union[str, Any] = {
'''configuration_convbert''': ['''CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ConvBertConfig''', '''ConvBertOnnxConfig'''],
'''tokenization_convbert''': ['''ConvBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : str = ['''ConvBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Optional[Any] = [
'''CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ConvBertForMaskedLM''',
'''ConvBertForMultipleChoice''',
'''ConvBertForQuestionAnswering''',
'''ConvBertForSequenceClassification''',
'''ConvBertForTokenClassification''',
'''ConvBertLayer''',
'''ConvBertModel''',
'''ConvBertPreTrainedModel''',
'''load_tf_weights_in_convbert''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[str] = [
'''TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFConvBertForMaskedLM''',
'''TFConvBertForMultipleChoice''',
'''TFConvBertForQuestionAnswering''',
'''TFConvBertForSequenceClassification''',
'''TFConvBertForTokenClassification''',
'''TFConvBertLayer''',
'''TFConvBertModel''',
'''TFConvBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
a : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 717
|
"""simple docstring"""
import baseaa
def _SCREAMING_SNAKE_CASE ( _lowercase : str ) ->bytes:
'''simple docstring'''
return baseaa.aaaencode(string.encode("utf-8" ) )
def _SCREAMING_SNAKE_CASE ( _lowercase : bytes ) ->str:
'''simple docstring'''
return baseaa.aaadecode(_lowercase ).decode("utf-8" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 31
| 0
|
"""simple docstring"""
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def _SCREAMING_SNAKE_CASE ( _lowercase : Dict , _lowercase : Any , _lowercase : str , _lowercase : Optional[int] ) ->List[str]:
'''simple docstring'''
for param, grad_param in zip(model_a.parameters() , model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is False
), F"""Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})"""
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is True
), F"""Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})"""
def _SCREAMING_SNAKE_CASE ( _lowercase : Optional[int] , _lowercase : str , _lowercase : Dict , _lowercase : Union[str, Any] , _lowercase : Dict=True ) ->str:
'''simple docstring'''
model.train()
a : Optional[int] = model(_lowerCamelCase )
a : Union[str, Any] = F.mse_loss(_lowerCamelCase , target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(_lowerCamelCase )
def _SCREAMING_SNAKE_CASE ( _lowercase : List[str] , _lowercase : Any=False ) ->Any:
'''simple docstring'''
set_seed(42 )
a : Dict = RegressionModel()
a : Optional[Any] = deepcopy(_lowerCamelCase )
a : str = RegressionDataset(length=80 )
a : int = DataLoader(_lowerCamelCase , batch_size=16 )
model.to(accelerator.device )
if sched:
a : List[str] = AdamW(params=model.parameters() , lr=1E-3 )
a : Dict = AdamW(params=ddp_model.parameters() , lr=1E-3 )
a : Tuple = LambdaLR(_lowerCamelCase , lr_lambda=lambda _lowercase : epoch**0.65 )
a : Union[str, Any] = LambdaLR(_lowerCamelCase , lr_lambda=lambda _lowercase : epoch**0.65 )
# Make a copy of `model`
if sched:
a : List[Any] = accelerator.prepare(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
else:
a : Any = accelerator.prepare(_lowerCamelCase , _lowerCamelCase )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def _SCREAMING_SNAKE_CASE ( _lowercase : List[Any] ) ->str:
'''simple docstring'''
a : str = get_training_setup(_lowerCamelCase )
# Use a single batch
a : List[str] = next(iter(_lowerCamelCase ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
a : int = accelerator.gather((ddp_input, ddp_target) )
a : str = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(_lowerCamelCase ):
step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
else:
# Sync grads
step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad ), F"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
a : List[Any] = ddp_input[torch.randperm(len(_lowerCamelCase ) )]
def _SCREAMING_SNAKE_CASE ( _lowercase : Dict ) ->int:
'''simple docstring'''
a : List[Any] = get_training_setup(_lowerCamelCase )
# Use a single batch
a : List[Any] = next(iter(_lowerCamelCase ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
a : List[str] = accelerator.gather((ddp_input, ddp_target) )
a : List[Any] = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(_lowerCamelCase ):
step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
else:
# Sync grads
step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), F"""Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})"""
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), F"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
a : Optional[Any] = ddp_input[torch.randperm(len(_lowerCamelCase ) )]
def _SCREAMING_SNAKE_CASE ( _lowercase : Optional[int]=False , _lowercase : List[str]=False ) ->Union[str, Any]:
'''simple docstring'''
a : Tuple = Accelerator(
split_batches=_lowerCamelCase , dispatch_batches=_lowerCamelCase , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
a : List[Any] = get_training_setup(_lowerCamelCase )
for iteration, batch in enumerate(_lowerCamelCase ):
a : Optional[Any] = batch.values()
# Gather the distributed inputs and targs for the base model
a : Dict = accelerator.gather((ddp_input, ddp_target) )
a : Any = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(_lowerCamelCase ):
step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(_lowerCamelCase ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), F"""Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), F"""Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
a : int = ddp_input[torch.randperm(len(_lowerCamelCase ) )]
GradientState._reset_state()
def _SCREAMING_SNAKE_CASE ( _lowercase : Optional[Any]=False , _lowercase : int=False ) ->Optional[int]:
'''simple docstring'''
a : Any = Accelerator(
split_batches=_lowerCamelCase , dispatch_batches=_lowerCamelCase , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
a : Any = get_training_setup(_lowerCamelCase , _lowerCamelCase )
for iteration, batch in enumerate(_lowerCamelCase ):
a : Optional[int] = batch.values()
# Gather the distributed inputs and targs for the base model
a : Optional[Any] = accelerator.gather((ddp_input, ddp_target) )
a : Optional[int] = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(_lowerCamelCase )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(_lowerCamelCase ):
step_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), F"""Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]['lr']}\nDDP opt: {ddp_opt.param_groups[0]['lr']}\n"""
a : Optional[int] = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(_lowerCamelCase ))
if accelerator.num_processes > 1:
check_model_parameters(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
GradientState._reset_state()
def _SCREAMING_SNAKE_CASE ( ) ->List[Any]:
'''simple docstring'''
a : List[Any] = Accelerator()
a : str = RegressionDataset(length=80 )
a : Dict = DataLoader(_lowerCamelCase , batch_size=16 )
a : int = RegressionDataset(length=96 )
a : List[Any] = DataLoader(_lowerCamelCase , batch_size=16 )
a : List[str] = accelerator.prepare(_lowerCamelCase , _lowerCamelCase )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(_lowerCamelCase ):
assert id(accelerator.gradient_state.active_dataloader ) == id(_lowerCamelCase )
if iteration < len(_lowerCamelCase ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(_lowerCamelCase ):
assert id(accelerator.gradient_state.active_dataloader ) == id(_lowerCamelCase )
if batch_num < len(_lowerCamelCase ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def _SCREAMING_SNAKE_CASE ( ) ->Tuple:
'''simple docstring'''
a : int = Accelerator()
a : str = accelerator.state
if state.local_process_index == 0:
print("**Test `accumulate` gradient accumulation with dataloader break**" )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print("**Test NOOP `no_sync` context manager**" )
test_noop_sync(_lowerCamelCase )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print("**Test Distributed `no_sync` context manager**" )
test_distributed_sync(_lowerCamelCase )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation, " , F"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""" , )
test_gradient_accumulation(_lowerCamelCase , _lowerCamelCase )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version("<" , "2.0" ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation with optimizer and scheduler, " , "`split_batches=False`, `dispatch_batches=False`**" , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation with optimizer and scheduler, " , F"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""" , )
test_gradient_accumulation_with_opt_and_scheduler(_lowerCamelCase , _lowerCamelCase )
def _SCREAMING_SNAKE_CASE ( _lowercase : Union[str, Any] ) ->List[str]:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 718
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_albert import AlbertTokenizer
else:
a : Tuple = None
a : Any = logging.get_logger(__name__)
a : List[Any] = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
a : str = {
'''vocab_file''': {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/spiece.model''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/spiece.model''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/spiece.model''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model''',
},
'''tokenizer_file''': {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json''',
},
}
a : str = {
'''albert-base-v1''': 512,
'''albert-large-v1''': 512,
'''albert-xlarge-v1''': 512,
'''albert-xxlarge-v1''': 512,
'''albert-base-v2''': 512,
'''albert-large-v2''': 512,
'''albert-xlarge-v2''': 512,
'''albert-xxlarge-v2''': 512,
}
a : Union[str, Any] = '''▁'''
class __UpperCamelCase ( a__ ):
lowerCamelCase : Union[str, Any] =VOCAB_FILES_NAMES
lowerCamelCase : Dict =PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : List[Any] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : List[Any] =AlbertTokenizer
def __init__( self , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=False , lowerCAmelCase__="[CLS]" , lowerCAmelCase__="[SEP]" , lowerCAmelCase__="<unk>" , lowerCAmelCase__="[SEP]" , lowerCAmelCase__="<pad>" , lowerCAmelCase__="[CLS]" , lowerCAmelCase__="[MASK]" , **lowerCAmelCase__ , ) -> Union[str, Any]:
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
a : Optional[int] = (
AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ , normalized=lowerCAmelCase__ )
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
else mask_token
)
super().__init__(
lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ , remove_space=lowerCAmelCase__ , keep_accents=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , **lowerCAmelCase__ , )
a : Dict = do_lower_case
a : Any = remove_space
a : Optional[Any] = keep_accents
a : List[str] = vocab_file
a : Optional[Any] = False if not self.vocab_file else True
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]:
a : Optional[Any] = [self.sep_token_id]
a : int = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]:
a : Optional[Any] = [self.sep_token_id]
a : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(lowerCAmelCase__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
a : Dict = os.path.join(
lowerCAmelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase__ ):
copyfile(self.vocab_file , lowerCAmelCase__ )
return (out_vocab_file,)
| 31
| 0
|
import math
class __UpperCamelCase :
def __init__( self , lowerCAmelCase__=0 ) -> Tuple: # a graph with Node 0,1,...,N-1
a : int = n
a : Union[str, Any] = [
[math.inf for j in range(0 , A__ )] for i in range(0 , A__ )
] # adjacency matrix for weight
a : str = [
[math.inf for j in range(0 , A__ )] for i in range(0 , A__ )
] # dp[i][j] stores minimum distance from i to j
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Tuple:
a : str = w
def __a ( self ) -> Tuple:
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
a : Union[str, Any] = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[Any]:
return self.dp[u][v]
if __name__ == "__main__":
a : List[Any] = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 719
|
"""simple docstring"""
import itertools
import random
import unittest
import numpy as np
from transformers import BatchFeature, SpeechTaFeatureExtractor
from transformers.testing_utils import require_torch
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
a : List[Any] = random.Random()
def _SCREAMING_SNAKE_CASE ( _lowercase : List[str] , _lowercase : int=1.0 , _lowercase : Optional[int]=None , _lowercase : Union[str, Any]=None ) ->Optional[Any]:
'''simple docstring'''
if rng is None:
a : Tuple = global_rng
a : Tuple = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
class __UpperCamelCase ( unittest.TestCase ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=7 , lowerCAmelCase__=400 , lowerCAmelCase__=2000 , lowerCAmelCase__=1 , lowerCAmelCase__=0.0 , lowerCAmelCase__=1_6000 , lowerCAmelCase__=True , lowerCAmelCase__=80 , lowerCAmelCase__=16 , lowerCAmelCase__=64 , lowerCAmelCase__="hann_window" , lowerCAmelCase__=80 , lowerCAmelCase__=7600 , lowerCAmelCase__=1E-10 , lowerCAmelCase__=True , ) -> Optional[Any]:
a : int = parent
a : Tuple = batch_size
a : Dict = min_seq_length
a : Any = max_seq_length
a : Optional[int] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
a : Union[str, Any] = feature_size
a : Tuple = padding_value
a : str = sampling_rate
a : Dict = do_normalize
a : str = num_mel_bins
a : List[str] = hop_length
a : str = win_length
a : Optional[Any] = win_function
a : List[str] = fmin
a : Any = fmax
a : Optional[int] = mel_floor
a : Tuple = return_attention_mask
def __a ( self ) -> Optional[Any]:
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"do_normalize": self.do_normalize,
"num_mel_bins": self.num_mel_bins,
"hop_length": self.hop_length,
"win_length": self.win_length,
"win_function": self.win_function,
"fmin": self.fmin,
"fmax": self.fmax,
"mel_floor": self.mel_floor,
"return_attention_mask": self.return_attention_mask,
}
def __a ( self , lowerCAmelCase__=False , lowerCAmelCase__=False ) -> Tuple:
def _flatten(lowerCAmelCase__ ):
return list(itertools.chain(*lowerCAmelCase__ ) )
if equal_length:
a : Union[str, Any] = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
a : str = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
a : Any = [np.asarray(lowerCAmelCase__ ) for x in speech_inputs]
return speech_inputs
def __a ( self , lowerCAmelCase__=False , lowerCAmelCase__=False ) -> Dict:
if equal_length:
a : str = [floats_list((self.max_seq_length, self.num_mel_bins) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
a : Any = [
floats_list((x, self.num_mel_bins) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
a : Optional[int] = [np.asarray(lowerCAmelCase__ ) for x in speech_inputs]
return speech_inputs
@require_torch
class __UpperCamelCase ( a__ , unittest.TestCase ):
lowerCamelCase : Tuple =SpeechTaFeatureExtractor
def __a ( self ) -> Union[str, Any]:
a : Tuple = SpeechTaFeatureExtractionTester(self )
def __a ( self , lowerCAmelCase__ ) -> Union[str, Any]:
self.assertTrue(np.all(np.mean(lowerCAmelCase__ , axis=0 ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(lowerCAmelCase__ , axis=0 ) - 1 ) < 1E-3 ) )
def __a ( self ) -> Union[str, Any]:
# Tests that all call wrap to encode_plus and batch_encode_plus
a : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
a : Any = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
a : Any = [np.asarray(lowerCAmelCase__ ) for speech_input in speech_inputs]
# Test not batched input
a : Optional[int] = feat_extract(speech_inputs[0] , return_tensors="np" ).input_values
a : Optional[Any] = feat_extract(np_speech_inputs[0] , return_tensors="np" ).input_values
self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3 ) )
# Test batched
a : int = feat_extract(lowerCAmelCase__ , return_tensors="np" ).input_values
a : int = feat_extract(lowerCAmelCase__ , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3 ) )
def __a ( self ) -> Optional[Any]:
a : List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
a : Dict = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
a : int = ["longest", "max_length", "do_not_pad"]
a : Tuple = [None, 1600, None]
for max_length, padding in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
a : Dict = feat_extract(lowerCAmelCase__ , padding=lowerCAmelCase__ , max_length=lowerCAmelCase__ , return_tensors="np" )
a : List[Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self.assertTrue(input_values[0][800:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self.assertTrue(input_values[0][1000:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def __a ( self ) -> str:
a : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
a : List[str] = range(800 , 1400 , 200 )
a : List[str] = [floats_list((1, x) )[0] for x in lengths]
a : Any = ["longest", "max_length", "do_not_pad"]
a : Any = [None, 1600, None]
for max_length, padding in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
a : List[Any] = feat_extract(lowerCAmelCase__ , max_length=lowerCAmelCase__ , padding=lowerCAmelCase__ )
a : Dict = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def __a ( self ) -> Dict:
a : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
a : int = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
a : Union[str, Any] = feat_extract(
lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=1000 , padding="max_length" , return_tensors="np" )
a : List[Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def __a ( self ) -> Dict:
a : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
a : Tuple = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
a : List[Any] = feat_extract(
lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=1000 , padding="longest" , return_tensors="np" )
a : Union[str, Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1000) )
a : List[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
a : int = feat_extract(
lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=2000 , padding="longest" , return_tensors="np" )
a : Dict = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1200) )
def __a ( self ) -> List[str]:
a : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
a : Any = np.random.rand(100 ).astype(np.floataa )
a : Optional[Any] = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
a : str = feature_extractor.pad([{"input_values": inputs}] , return_tensors="np" )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
a : List[str] = feature_extractor.pad([{"input_values": inputs}] , return_tensors="pt" )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def __a ( self ) -> Tuple:
# Tests that all call wrap to encode_plus and batch_encode_plus
a : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
a : Optional[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
a : Tuple = [np.asarray(lowerCAmelCase__ ) for speech_input in speech_inputs]
# Test feature size
a : Union[str, Any] = feature_extractor(audio_target=lowerCAmelCase__ , padding=lowerCAmelCase__ , return_tensors="np" ).input_values
self.assertTrue(input_values.ndim == 3 )
self.assertTrue(input_values.shape[-1] == feature_extractor.num_mel_bins )
# Test not batched input
a : Dict = feature_extractor(speech_inputs[0] , return_tensors="np" ).input_values
a : List[Any] = feature_extractor(np_speech_inputs[0] , return_tensors="np" ).input_values
self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3 ) )
# Test batched
a : Optional[int] = feature_extractor(lowerCAmelCase__ , return_tensors="np" ).input_values
a : Any = feature_extractor(lowerCAmelCase__ , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
a : Optional[Any] = [floats_list((1, x) )[0] for x in (800, 800, 800)]
a : List[Any] = np.asarray(lowerCAmelCase__ )
a : str = feature_extractor(lowerCAmelCase__ , return_tensors="np" ).input_values
a : str = feature_extractor(lowerCAmelCase__ , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3 ) )
def __a ( self ) -> str:
a : Optional[Any] = self.feat_extract_tester.prepare_inputs_for_target()
a : Any = self.feature_extraction_class(**self.feat_extract_dict )
a : Union[str, Any] = feat_extract.model_input_names[0]
a : List[str] = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(lowerCAmelCase__ ) == len(lowerCAmelCase__ ) for x, y in zip(lowerCAmelCase__ , processed_features[input_name] ) ) )
a : Tuple = self.feat_extract_tester.prepare_inputs_for_target(equal_length=lowerCAmelCase__ )
a : List[Any] = BatchFeature({input_name: speech_inputs} , tensor_type="np" )
a : Tuple = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
a : Dict = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def __a ( self ) -> Tuple:
a : Tuple = self.feat_extract_tester.prepare_inputs_for_target(equal_length=lowerCAmelCase__ )
a : Optional[Any] = self.feature_extraction_class(**self.feat_extract_dict )
a : Optional[int] = feat_extract.model_input_names[0]
a : List[Any] = BatchFeature({input_name: speech_inputs} , tensor_type="pt" )
a : Tuple = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
a : List[str] = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def __a ( self ) -> Optional[Any]:
a : Dict = self.feature_extraction_class(**self.feat_extract_dict )
a : Optional[int] = self.feat_extract_tester.prepare_inputs_for_target()
a : Optional[Any] = feat_extract.model_input_names[0]
a : List[str] = BatchFeature({input_name: speech_inputs} )
a : Tuple = feat_extract.num_mel_bins # hack!
a : List[Any] = feat_extract.pad(lowerCAmelCase__ , padding="longest" , return_tensors="np" )[input_name]
a : Any = feat_extract.pad(lowerCAmelCase__ , padding="longest" , return_tensors="pt" )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1E-2 )
def __a ( self ) -> Union[str, Any]:
a : Any = self.feat_extract_dict
a : Optional[Any] = True
a : Union[str, Any] = self.feature_extraction_class(**lowerCAmelCase__ )
a : Any = self.feat_extract_tester.prepare_inputs_for_target()
a : Dict = [len(lowerCAmelCase__ ) for x in speech_inputs]
a : int = feat_extract.model_input_names[0]
a : List[Any] = BatchFeature({input_name: speech_inputs} )
a : Union[str, Any] = feat_extract.num_mel_bins # hack!
a : Dict = feat_extract.pad(lowerCAmelCase__ , padding="longest" , return_tensors="np" )
self.assertIn("attention_mask" , lowerCAmelCase__ )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , lowerCAmelCase__ )
def __a ( self ) -> Union[str, Any]:
a : Tuple = self.feat_extract_dict
a : str = True
a : Optional[Any] = self.feature_extraction_class(**lowerCAmelCase__ )
a : List[Any] = self.feat_extract_tester.prepare_inputs_for_target()
a : Dict = [len(lowerCAmelCase__ ) for x in speech_inputs]
a : Optional[Any] = feat_extract.model_input_names[0]
a : str = BatchFeature({input_name: speech_inputs} )
a : Optional[Any] = min(lowerCAmelCase__ )
a : List[Any] = feat_extract.num_mel_bins # hack!
a : Any = feat_extract.pad(
lowerCAmelCase__ , padding="max_length" , max_length=lowerCAmelCase__ , truncation=lowerCAmelCase__ , return_tensors="np" )
self.assertIn("attention_mask" , lowerCAmelCase__ )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
def __a ( self , lowerCAmelCase__ ) -> Optional[int]:
from datasets import load_dataset
a : Tuple = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
a : Optional[Any] = ds.sort("id" ).select(range(lowerCAmelCase__ ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def __a ( self ) -> Union[str, Any]:
# fmt: off
a : List[Any] = torch.tensor(
[2.3_804E-03, 2.0_752E-03, 1.9_836E-03, 2.1_057E-03, 1.6_174E-03,
3.0_518E-04, 9.1_553E-05, 3.3_569E-04, 9.7_656E-04, 1.8_311E-03,
2.0_142E-03, 2.1_057E-03, 1.7_395E-03, 4.5_776E-04, -3.9_673E-04,
4.5_776E-04, 1.0_071E-03, 9.1_553E-05, 4.8_828E-04, 1.1_597E-03,
7.3_242E-04, 9.4_604E-04, 1.8_005E-03, 1.8_311E-03, 8.8_501E-04,
4.2_725E-04, 4.8_828E-04, 7.3_242E-04, 1.0_986E-03, 2.1_057E-03] )
# fmt: on
a : List[str] = self._load_datasamples(1 )
a : Union[str, Any] = SpeechTaFeatureExtractor()
a : str = feature_extractor(lowerCAmelCase__ , return_tensors="pt" ).input_values
self.assertEquals(input_values.shape , (1, 9_3680) )
self.assertTrue(torch.allclose(input_values[0, :30] , lowerCAmelCase__ , atol=1E-6 ) )
def __a ( self ) -> Union[str, Any]:
# fmt: off
a : Tuple = torch.tensor(
[-2.6_870, -3.0_104, -3.1_356, -3.5_352, -3.0_044, -3.0_353, -3.4_719, -3.6_777,
-3.1_520, -2.9_435, -2.6_553, -2.8_795, -2.9_944, -2.5_921, -3.0_279, -3.0_386,
-3.0_864, -3.1_291, -3.2_353, -2.7_444, -2.6_831, -2.7_287, -3.1_761, -3.1_571,
-3.2_726, -3.0_582, -3.1_007, -3.4_533, -3.4_695, -3.0_998] )
# fmt: on
a : Dict = self._load_datasamples(1 )
a : Tuple = SpeechTaFeatureExtractor()
a : Optional[int] = feature_extractor(audio_target=lowerCAmelCase__ , return_tensors="pt" ).input_values
self.assertEquals(input_values.shape , (1, 366, 80) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , lowerCAmelCase__ , atol=1E-4 ) )
| 31
| 0
|
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class __UpperCamelCase ( unittest.TestCase ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=13 , lowerCAmelCase__=7 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=99 , lowerCAmelCase__=32 , lowerCAmelCase__=5 , lowerCAmelCase__=4 , lowerCAmelCase__=37 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=512 , lowerCAmelCase__=16 , lowerCAmelCase__=2 , lowerCAmelCase__=0.02 , lowerCAmelCase__=4 , ) -> List[Any]:
a : Union[str, Any] = parent
a : str = batch_size
a : int = seq_length
a : Optional[int] = is_training
a : Tuple = use_attention_mask
a : List[Any] = use_token_type_ids
a : Optional[int] = use_labels
a : Union[str, Any] = vocab_size
a : Tuple = hidden_size
a : Union[str, Any] = num_hidden_layers
a : str = num_attention_heads
a : Any = intermediate_size
a : Tuple = hidden_act
a : List[Any] = hidden_dropout_prob
a : str = attention_probs_dropout_prob
a : Dict = max_position_embeddings
a : Any = type_vocab_size
a : List[str] = type_sequence_label_size
a : Any = initializer_range
a : str = num_choices
def __a ( self ) -> int:
a : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a : Optional[Any] = None
if self.use_attention_mask:
a : str = random_attention_mask([self.batch_size, self.seq_length] )
a : Dict = None
if self.use_token_type_ids:
a : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
a : Any = RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_a , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def __a ( self ) -> Dict:
a : List[str] = self.prepare_config_and_inputs()
a, a, a, a : Tuple = config_and_inputs
a : Optional[int] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
def __a ( self ) -> Optional[int]:
a : Optional[Any] = self.prepare_config_and_inputs()
a, a, a, a : List[str] = config_and_inputs
a : List[str] = True
a : Optional[int] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
a : str = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class __UpperCamelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
lowerCamelCase : Tuple =True
lowerCamelCase : Optional[Any] =(
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def __a ( self ) -> Union[str, Any]:
a : List[Any] = FlaxRobertaModelTester(self )
@slow
def __a ( self ) -> Dict:
for model_class_name in self.all_model_classes:
a : str = model_class_name.from_pretrained("roberta-base" , from_pt=_a )
a : List[Any] = model(np.ones((1, 1) ) )
self.assertIsNotNone(_a )
| 720
|
"""simple docstring"""
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def _SCREAMING_SNAKE_CASE ( _lowercase : List[str] ) ->int:
'''simple docstring'''
a : int = {}
a : Union[str, Any] = tokenizer(example["content"] , truncation=_lowercase )["input_ids"]
a : Any = len(example["content"] ) / len(output["input_ids"] )
return output
a : int = HfArgumentParser(PretokenizationArguments)
a : Optional[int] = parser.parse_args()
if args.num_workers is None:
a : Tuple = multiprocessing.cpu_count()
a : Optional[int] = AutoTokenizer.from_pretrained(args.tokenizer_dir)
a : Dict = time.time()
a : Tuple = load_dataset(args.dataset_name, split='''train''')
print(F'''Dataset loaded in {time.time()-t_start:.2f}s''')
a : Dict = time.time()
a : Tuple = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
'''repo_name''',
'''path''',
'''copies''',
'''size''',
'''content''',
'''license''',
'''hash''',
'''line_mean''',
'''line_max''',
'''alpha_frac''',
'''autogenerated''',
],
)
print(F'''Dataset tokenized in {time.time()-t_start:.2f}s''')
a : Tuple = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(F'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
| 31
| 0
|
"""simple docstring"""
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class __UpperCamelCase ( unittest.TestCase ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=13 , lowerCAmelCase__=7 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=99 , lowerCAmelCase__=32 , lowerCAmelCase__=5 , lowerCAmelCase__=4 , lowerCAmelCase__=37 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=512 , lowerCAmelCase__=16 , lowerCAmelCase__=2 , lowerCAmelCase__=0.02 , lowerCAmelCase__=4 , ) -> Any:
a : List[Any] = parent
a : Any = batch_size
a : Optional[int] = seq_length
a : Optional[int] = is_training
a : int = use_attention_mask
a : Dict = use_token_type_ids
a : Union[str, Any] = use_labels
a : int = vocab_size
a : Optional[int] = hidden_size
a : Dict = num_hidden_layers
a : List[str] = num_attention_heads
a : Union[str, Any] = intermediate_size
a : Tuple = hidden_act
a : List[str] = hidden_dropout_prob
a : Optional[Any] = attention_probs_dropout_prob
a : Dict = max_position_embeddings
a : Tuple = type_vocab_size
a : List[str] = type_sequence_label_size
a : int = initializer_range
a : int = num_choices
def __a ( self ) -> Optional[Any]:
a : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a : Dict = None
if self.use_attention_mask:
a : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
a : List[str] = None
if self.use_token_type_ids:
a : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
a : str = AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def __a ( self ) -> List[Any]:
a : List[str] = self.prepare_config_and_inputs()
a, a, a, a : List[str] = config_and_inputs
a : Dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_flax
class __UpperCamelCase ( a__ , unittest.TestCase ):
lowerCamelCase : List[Any] =(
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def __a ( self ) -> int:
a : Optional[int] = FlaxAlbertModelTester(self )
@slow
def __a ( self ) -> List[Any]:
for model_class_name in self.all_model_classes:
a : Any = model_class_name.from_pretrained("albert-base-v2" )
a : Dict = model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCamelCase__ )
@require_flax
class __UpperCamelCase ( unittest.TestCase ):
@slow
def __a ( self ) -> Union[str, Any]:
a : str = FlaxAlbertModel.from_pretrained("albert-base-v2" )
a : List[str] = np.array([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
a : str = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
a : Optional[int] = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ )[0]
a : Union[str, Any] = (1, 11, 768)
self.assertEqual(output.shape , UpperCamelCase__ )
a : Tuple = np.array(
[[[-0.6_513, 1.5_035, -0.2_766], [-0.6_515, 1.5_046, -0.2_780], [-0.6_512, 1.5_049, -0.2_784]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , UpperCamelCase__ , atol=1E-4 ) )
| 721
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
a : List[Any] = TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
a : Union[str, Any] = TaTokenizerFast
a : Union[str, Any] = {'''configuration_mt5''': ['''MT5Config''', '''MT5OnnxConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : str = [
'''MT5EncoderModel''',
'''MT5ForConditionalGeneration''',
'''MT5ForQuestionAnswering''',
'''MT5Model''',
'''MT5PreTrainedModel''',
'''MT5Stack''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[str] = ['''TFMT5EncoderModel''', '''TFMT5ForConditionalGeneration''', '''TFMT5Model''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Optional[Any] = ['''FlaxMT5EncoderModel''', '''FlaxMT5ForConditionalGeneration''', '''FlaxMT5Model''']
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
a : List[Any] = _LazyModule(
__name__,
globals()['''__file__'''],
_import_structure,
extra_objects={'''MT5Tokenizer''': MTaTokenizer, '''MT5TokenizerFast''': MTaTokenizerFast},
module_spec=__spec__,
)
| 31
| 0
|
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( _lowercase : List[str] , _lowercase : Tuple ) ->Optional[int]:
'''simple docstring'''
return number | (1 << position)
def _SCREAMING_SNAKE_CASE ( _lowercase : str , _lowercase : Union[str, Any] ) ->Dict:
'''simple docstring'''
return number & ~(1 << position)
def _SCREAMING_SNAKE_CASE ( _lowercase : Tuple , _lowercase : int ) ->int:
'''simple docstring'''
return number ^ (1 << position)
def _SCREAMING_SNAKE_CASE ( _lowercase : int , _lowercase : Dict ) ->Union[str, Any]:
'''simple docstring'''
return ((number >> position) & 1) == 1
def _SCREAMING_SNAKE_CASE ( _lowercase : int , _lowercase : Tuple ) ->int:
'''simple docstring'''
return int((number & (1 << position)) != 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 700
|
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( _lowercase : int = 10 , _lowercase : int = 1000 , _lowercase : bool = True ) ->int:
'''simple docstring'''
assert (
isinstance(_lowercase , _lowercase )
and isinstance(_lowercase , _lowercase )
and isinstance(_lowercase , _lowercase )
), "Invalid type of value(s) specified to function!"
if min_val > max_val:
raise ValueError("Invalid value for min_val or max_val (min_value < max_value)" )
return min_val if option else max_val
def _SCREAMING_SNAKE_CASE ( _lowercase : int , _lowercase : int ) ->int:
'''simple docstring'''
return int((number_a + number_a) / 2 )
def _SCREAMING_SNAKE_CASE ( _lowercase : int , _lowercase : int , _lowercase : int ) ->None:
'''simple docstring'''
assert (
isinstance(_lowercase , _lowercase ) and isinstance(_lowercase , _lowercase ) and isinstance(_lowercase , _lowercase )
), 'argument values must be type of "int"'
if lower > higher:
raise ValueError("argument value for lower and higher must be(lower > higher)" )
if not lower < to_guess < higher:
raise ValueError(
"guess value must be within the range of lower and higher value" )
def answer(_lowercase : int ) -> str:
if number > to_guess:
return "high"
elif number < to_guess:
return "low"
else:
return "same"
print("started..." )
a : Optional[Any] = lower
a : List[Any] = higher
a : Tuple = []
while True:
a : List[Any] = get_avg(_lowercase , _lowercase )
last_numbers.append(_lowercase )
if answer(_lowercase ) == "low":
a : Optional[int] = number
elif answer(_lowercase ) == "high":
a : Tuple = number
else:
break
print(F"""guess the number : {last_numbers[-1]}""" )
print(F"""details : {last_numbers!s}""" )
def _SCREAMING_SNAKE_CASE ( ) ->None:
'''simple docstring'''
a : Tuple = int(input("Enter lower value : " ).strip() )
a : Dict = int(input("Enter high value : " ).strip() )
a : Optional[int] = int(input("Enter value to guess : " ).strip() )
guess_the_number(_lowercase , _lowercase , _lowercase )
if __name__ == "__main__":
main()
| 31
| 0
|
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..bit import BitConfig
a : Union[str, Any] = logging.get_logger(__name__)
a : Dict = {
'''Intel/dpt-large''': '''https://huggingface.co/Intel/dpt-large/resolve/main/config.json''',
# See all DPT models at https://huggingface.co/models?filter=dpt
}
class __UpperCamelCase ( _UpperCAmelCase ):
lowerCamelCase : Union[str, Any] ="""dpt"""
def __init__( self , lowerCAmelCase__=768 , lowerCAmelCase__=12 , lowerCAmelCase__=12 , lowerCAmelCase__=3072 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.02 , lowerCAmelCase__=1E-12 , lowerCAmelCase__=384 , lowerCAmelCase__=16 , lowerCAmelCase__=3 , lowerCAmelCase__=False , lowerCAmelCase__=True , lowerCAmelCase__=[2, 5, 8, 11] , lowerCAmelCase__="project" , lowerCAmelCase__=[4, 2, 1, 0.5] , lowerCAmelCase__=[96, 192, 384, 768] , lowerCAmelCase__=256 , lowerCAmelCase__=-1 , lowerCAmelCase__=False , lowerCAmelCase__=True , lowerCAmelCase__=0.4 , lowerCAmelCase__=255 , lowerCAmelCase__=0.1 , lowerCAmelCase__=[1, 1024, 24, 24] , lowerCAmelCase__=[0, 1] , lowerCAmelCase__=None , **lowerCAmelCase__ , ) -> List[Any]:
super().__init__(**lowercase__ )
a : Any = hidden_size
a : Union[str, Any] = is_hybrid
if self.is_hybrid:
if backbone_config is None:
logger.info("Initializing the config with a `BiT` backbone." )
a : Tuple = {
"global_padding": "same",
"layer_type": "bottleneck",
"depths": [3, 4, 9],
"out_features": ["stage1", "stage2", "stage3"],
"embedding_dynamic_padding": True,
}
a : Union[str, Any] = BitConfig(**lowercase__ )
elif isinstance(lowercase__ , lowercase__ ):
logger.info("Initializing the config with a `BiT` backbone." )
a : Dict = BitConfig(**lowercase__ )
elif isinstance(lowercase__ , lowercase__ ):
a : Optional[int] = backbone_config
else:
raise ValueError(
f"""backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}.""" )
a : List[Any] = backbone_featmap_shape
a : Union[str, Any] = neck_ignore_stages
if readout_type != "project":
raise ValueError("Readout type must be 'project' when using `DPT-hybrid` mode." )
else:
a : Optional[Any] = None
a : int = None
a : List[Any] = []
a : int = num_hidden_layers
a : Union[str, Any] = num_attention_heads
a : Tuple = intermediate_size
a : Optional[int] = hidden_act
a : Dict = hidden_dropout_prob
a : str = attention_probs_dropout_prob
a : str = initializer_range
a : List[Any] = layer_norm_eps
a : Any = image_size
a : Union[str, Any] = patch_size
a : Union[str, Any] = num_channels
a : Union[str, Any] = qkv_bias
a : Optional[Any] = backbone_out_indices
if readout_type not in ["ignore", "add", "project"]:
raise ValueError("Readout_type must be one of ['ignore', 'add', 'project']" )
a : Any = readout_type
a : Optional[Any] = reassemble_factors
a : str = neck_hidden_sizes
a : Union[str, Any] = fusion_hidden_size
a : Any = head_in_index
a : str = use_batch_norm_in_fusion_residual
# auxiliary head attributes (semantic segmentation)
a : List[Any] = use_auxiliary_head
a : int = auxiliary_loss_weight
a : Union[str, Any] = semantic_loss_ignore_index
a : Any = semantic_classifier_dropout
def __a ( self ) -> Optional[int]:
a : Union[str, Any] = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
a : List[str] = self.backbone_config.to_dict()
a : Optional[int] = self.__class__.model_type
return output
| 701
|
"""simple docstring"""
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
a : Any = logging.get_logger(__name__)
a : Tuple = {
'''tensor(bool)''': np.bool_,
'''tensor(int8)''': np.inta,
'''tensor(uint8)''': np.uinta,
'''tensor(int16)''': np.intaa,
'''tensor(uint16)''': np.uintaa,
'''tensor(int32)''': np.intaa,
'''tensor(uint32)''': np.uintaa,
'''tensor(int64)''': np.intaa,
'''tensor(uint64)''': np.uintaa,
'''tensor(float16)''': np.floataa,
'''tensor(float)''': np.floataa,
'''tensor(double)''': np.floataa,
}
class __UpperCamelCase :
def __init__( self , lowerCAmelCase__=None , **lowerCAmelCase__ ) -> str:
logger.info("`diffusers.OnnxRuntimeModel` is experimental and might change in the future." )
a : Optional[int] = model
a : int = kwargs.get("model_save_dir" , lowerCAmelCase__ )
a : Tuple = kwargs.get("latest_model_name" , lowerCAmelCase__ )
def __call__( self , **lowerCAmelCase__ ) -> Dict:
a : List[str] = {k: np.array(lowerCAmelCase__ ) for k, v in kwargs.items()}
return self.model.run(lowerCAmelCase__ , lowerCAmelCase__ )
@staticmethod
def __a ( lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None ) -> Union[str, Any]:
if provider is None:
logger.info("No onnxruntime provider specified, using CPUExecutionProvider" )
a : List[str] = "CPUExecutionProvider"
return ort.InferenceSession(lowerCAmelCase__ , providers=[provider] , sess_options=lowerCAmelCase__ )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None , **lowerCAmelCase__ ) -> int:
a : List[str] = file_name if file_name is not None else ONNX_WEIGHTS_NAME
a : Optional[int] = self.model_save_dir.joinpath(self.latest_model_name )
a : List[str] = Path(lowerCAmelCase__ ).joinpath(lowerCAmelCase__ )
try:
shutil.copyfile(lowerCAmelCase__ , lowerCAmelCase__ )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
a : str = self.model_save_dir.joinpath(lowerCAmelCase__ )
if src_path.exists():
a : Any = Path(lowerCAmelCase__ ).joinpath(lowerCAmelCase__ )
try:
shutil.copyfile(lowerCAmelCase__ , lowerCAmelCase__ )
except shutil.SameFileError:
pass
def __a ( self , lowerCAmelCase__ , **lowerCAmelCase__ , ) -> str:
if os.path.isfile(lowerCAmelCase__ ):
logger.error(f"""Provided path ({save_directory}) should be a directory, not a file""" )
return
os.makedirs(lowerCAmelCase__ , exist_ok=lowerCAmelCase__ )
# saving model weights/files
self._save_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
@classmethod
def __a ( cls , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = False , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , **lowerCAmelCase__ , ) -> Optional[int]:
a : Tuple = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(lowerCAmelCase__ ):
a : Tuple = OnnxRuntimeModel.load_model(
os.path.join(lowerCAmelCase__ , lowerCAmelCase__ ) , provider=lowerCAmelCase__ , sess_options=lowerCAmelCase__ )
a : Tuple = Path(lowerCAmelCase__ )
# load model from hub
else:
# download model
a : Optional[Any] = hf_hub_download(
repo_id=lowerCAmelCase__ , filename=lowerCAmelCase__ , use_auth_token=lowerCAmelCase__ , revision=lowerCAmelCase__ , cache_dir=lowerCAmelCase__ , force_download=lowerCAmelCase__ , )
a : Optional[int] = Path(lowerCAmelCase__ ).parent
a : List[Any] = Path(lowerCAmelCase__ ).name
a : int = OnnxRuntimeModel.load_model(lowerCAmelCase__ , provider=lowerCAmelCase__ , sess_options=lowerCAmelCase__ )
return cls(model=lowerCAmelCase__ , **lowerCAmelCase__ )
@classmethod
def __a ( cls , lowerCAmelCase__ , lowerCAmelCase__ = True , lowerCAmelCase__ = None , lowerCAmelCase__ = None , **lowerCAmelCase__ , ) -> List[str]:
a : Any = None
if len(str(lowerCAmelCase__ ).split("@" ) ) == 2:
a, a : Tuple = model_id.split("@" )
return cls._from_pretrained(
model_id=lowerCAmelCase__ , revision=lowerCAmelCase__ , cache_dir=lowerCAmelCase__ , force_download=lowerCAmelCase__ , use_auth_token=lowerCAmelCase__ , **lowerCAmelCase__ , )
| 31
| 0
|
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( _lowercase : Optional[Any] = 50 ) ->int:
'''simple docstring'''
a : str = [1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(F'''{solution() = }''')
| 702
|
"""simple docstring"""
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def _SCREAMING_SNAKE_CASE ( _lowercase : Optional[Any] , _lowercase : List[str] , _lowercase : Optional[Any] ) ->str:
'''simple docstring'''
a : Union[str, Any] = 1.5
a : List[str] = int(factor * num_class_images )
a : Optional[Any] = ClipClient(
url="https://knn.laion.ai/knn-service" , indice_name="laion_400m" , num_images=_lowercase , aesthetic_weight=0.1 )
os.makedirs(F"""{class_data_dir}/images""" , exist_ok=_lowercase )
if len(list(Path(F"""{class_data_dir}/images""" ).iterdir() ) ) >= num_class_images:
return
while True:
a : List[Any] = client.query(text=_lowercase )
if len(_lowercase ) >= factor * num_class_images or num_images > 1E4:
break
else:
a : Optional[int] = int(factor * num_images )
a : str = ClipClient(
url="https://knn.laion.ai/knn-service" , indice_name="laion_400m" , num_images=_lowercase , aesthetic_weight=0.1 , )
a : Optional[int] = 0
a : str = 0
a : Any = tqdm(desc="downloading real regularization images" , total=_lowercase )
with open(F"""{class_data_dir}/caption.txt""" , "w" ) as fa, open(F"""{class_data_dir}/urls.txt""" , "w" ) as fa, open(
F"""{class_data_dir}/images.txt""" , "w" ) as fa:
while total < num_class_images:
a : Optional[Any] = class_images[count]
count += 1
try:
a : str = requests.get(images["url"] )
if img.status_code == 200:
a : int = Image.open(BytesIO(img.content ) )
with open(F"""{class_data_dir}/images/{total}.jpg""" , "wb" ) as f:
f.write(img.content )
fa.write(images["caption"] + "\n" )
fa.write(images["url"] + "\n" )
fa.write(F"""{class_data_dir}/images/{total}.jpg""" + "\n" )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def _SCREAMING_SNAKE_CASE ( ) ->Dict:
'''simple docstring'''
a : Optional[int] = argparse.ArgumentParser("" , add_help=_lowercase )
parser.add_argument("--class_prompt" , help="text prompt to retrieve images" , required=_lowercase , type=_lowercase )
parser.add_argument("--class_data_dir" , help="path to save images" , required=_lowercase , type=_lowercase )
parser.add_argument("--num_class_images" , help="number of images to download" , default=200 , type=_lowercase )
return parser.parse_args()
if __name__ == "__main__":
a : List[Any] = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 31
| 0
|
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( _lowercase : int , _lowercase : int ) ->Optional[int]:
'''simple docstring'''
while second != 0:
a : List[str] = first & second
first ^= second
a : str = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
a : Optional[Any] = int(input('''Enter the first number: ''').strip())
a : Dict = int(input('''Enter the second number: ''').strip())
print(F'''{add(first, second) = }''')
| 703
|
"""simple docstring"""
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
a : Optional[int] = '''Run commands across TPU VMs for initial setup before running `accelerate launch`.'''
def _SCREAMING_SNAKE_CASE ( _lowercase : Any=None ) ->Optional[Any]:
'''simple docstring'''
if subparsers is not None:
a : int = subparsers.add_parser("tpu-config" , description=_description )
else:
a : List[Any] = argparse.ArgumentParser("Accelerate tpu-config command" , description=_description )
# Core arguments
a : Dict = parser.add_argument_group(
"Config Arguments" , "Arguments that can be configured through `accelerate config`." )
config_args.add_argument(
"--config_file" , type=_lowercase , default=_lowercase , help="Path to the config file to use for accelerate." , )
config_args.add_argument(
"--tpu_name" , default=_lowercase , help="The name of the TPU to use. If not specified, will use the TPU specified in the config file." , )
config_args.add_argument(
"--tpu_zone" , default=_lowercase , help="The zone of the TPU to use. If not specified, will use the zone specified in the config file." , )
a : Any = parser.add_argument_group("TPU Arguments" , "Arguments for options ran inside the TPU." )
pod_args.add_argument(
"--use_alpha" , action="store_true" , help="Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`." , )
pod_args.add_argument(
"--command_file" , default=_lowercase , help="The path to the file containing the commands to run on the pod on startup." , )
pod_args.add_argument(
"--command" , action="append" , nargs="+" , help="A command to run on the pod. Can be passed multiple times." , )
pod_args.add_argument(
"--install_accelerate" , action="store_true" , help="Whether to install accelerate on the pod. Defaults to False." , )
pod_args.add_argument(
"--accelerate_version" , default="latest" , help="The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify 'dev' to install from GitHub." , )
pod_args.add_argument(
"--debug" , action="store_true" , help="If set, will print the command that would be run instead of running it." )
if subparsers is not None:
parser.set_defaults(func=_lowercase )
return parser
def _SCREAMING_SNAKE_CASE ( _lowercase : Any ) ->Tuple:
'''simple docstring'''
a : Union[str, Any] = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(_lowercase ):
a : Optional[Any] = load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
a : int = defaults.command_file
if not args.command and defaults.commands is not None:
a : Union[str, Any] = defaults.commands
if not args.tpu_name:
a : int = defaults.tpu_name
if not args.tpu_zone:
a : Union[str, Any] = defaults.tpu_zone
if args.accelerate_version == "dev":
a : int = "git+https://github.com/huggingface/accelerate.git"
elif args.accelerate_version == "latest":
a : Optional[Any] = "accelerate -U"
elif isinstance(parse(args.accelerate_version ) , _lowercase ):
a : Optional[Any] = F"""accelerate=={args.accelerate_version}"""
if not args.command_file and not args.command:
raise ValueError("You must specify either a command file or a command to run on the pod." )
if args.command_file:
with open(args.command_file , "r" ) as f:
a : int = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , _lowercase ):
a : Union[str, Any] = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
a : Tuple = ["cd /usr/share"]
if args.install_accelerate:
new_cmd += [F"""pip install {args.accelerate_version}"""]
new_cmd += args.command
a : List[Any] = "; ".join(_lowercase )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
a : str = ["gcloud"]
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(F"""Running {' '.join(_lowercase )}""" )
return
subprocess.run(_lowercase )
print("Successfully setup pod." )
def _SCREAMING_SNAKE_CASE ( ) ->Tuple:
'''simple docstring'''
a : List[Any] = tpu_command_parser()
a : Optional[int] = parser.parse_args()
tpu_command_launcher(_lowercase )
| 31
| 0
|
"""simple docstring"""
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
'''stable diffusion controlnet''',
'''0.22.0''',
'''Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.''',
standard_warn=False,
stacklevel=3,
)
| 704
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
a : Tuple = None
a : int = logging.get_logger(__name__)
a : int = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
a : Optional[int] = {
'''vocab_file''': {
'''facebook/nllb-200-distilled-600M''': (
'''https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model'''
),
},
'''tokenizer_file''': {
'''facebook/nllb-200-distilled-600M''': (
'''https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json'''
),
},
}
a : int = {
'''facebook/nllb-large-en-ro''': 1024,
'''facebook/nllb-200-distilled-600M''': 1024,
}
# fmt: off
a : List[Any] = ['''ace_Arab''', '''ace_Latn''', '''acm_Arab''', '''acq_Arab''', '''aeb_Arab''', '''afr_Latn''', '''ajp_Arab''', '''aka_Latn''', '''amh_Ethi''', '''apc_Arab''', '''arb_Arab''', '''ars_Arab''', '''ary_Arab''', '''arz_Arab''', '''asm_Beng''', '''ast_Latn''', '''awa_Deva''', '''ayr_Latn''', '''azb_Arab''', '''azj_Latn''', '''bak_Cyrl''', '''bam_Latn''', '''ban_Latn''', '''bel_Cyrl''', '''bem_Latn''', '''ben_Beng''', '''bho_Deva''', '''bjn_Arab''', '''bjn_Latn''', '''bod_Tibt''', '''bos_Latn''', '''bug_Latn''', '''bul_Cyrl''', '''cat_Latn''', '''ceb_Latn''', '''ces_Latn''', '''cjk_Latn''', '''ckb_Arab''', '''crh_Latn''', '''cym_Latn''', '''dan_Latn''', '''deu_Latn''', '''dik_Latn''', '''dyu_Latn''', '''dzo_Tibt''', '''ell_Grek''', '''eng_Latn''', '''epo_Latn''', '''est_Latn''', '''eus_Latn''', '''ewe_Latn''', '''fao_Latn''', '''pes_Arab''', '''fij_Latn''', '''fin_Latn''', '''fon_Latn''', '''fra_Latn''', '''fur_Latn''', '''fuv_Latn''', '''gla_Latn''', '''gle_Latn''', '''glg_Latn''', '''grn_Latn''', '''guj_Gujr''', '''hat_Latn''', '''hau_Latn''', '''heb_Hebr''', '''hin_Deva''', '''hne_Deva''', '''hrv_Latn''', '''hun_Latn''', '''hye_Armn''', '''ibo_Latn''', '''ilo_Latn''', '''ind_Latn''', '''isl_Latn''', '''ita_Latn''', '''jav_Latn''', '''jpn_Jpan''', '''kab_Latn''', '''kac_Latn''', '''kam_Latn''', '''kan_Knda''', '''kas_Arab''', '''kas_Deva''', '''kat_Geor''', '''knc_Arab''', '''knc_Latn''', '''kaz_Cyrl''', '''kbp_Latn''', '''kea_Latn''', '''khm_Khmr''', '''kik_Latn''', '''kin_Latn''', '''kir_Cyrl''', '''kmb_Latn''', '''kon_Latn''', '''kor_Hang''', '''kmr_Latn''', '''lao_Laoo''', '''lvs_Latn''', '''lij_Latn''', '''lim_Latn''', '''lin_Latn''', '''lit_Latn''', '''lmo_Latn''', '''ltg_Latn''', '''ltz_Latn''', '''lua_Latn''', '''lug_Latn''', '''luo_Latn''', '''lus_Latn''', '''mag_Deva''', '''mai_Deva''', '''mal_Mlym''', '''mar_Deva''', '''min_Latn''', '''mkd_Cyrl''', '''plt_Latn''', '''mlt_Latn''', '''mni_Beng''', '''khk_Cyrl''', '''mos_Latn''', '''mri_Latn''', '''zsm_Latn''', '''mya_Mymr''', '''nld_Latn''', '''nno_Latn''', '''nob_Latn''', '''npi_Deva''', '''nso_Latn''', '''nus_Latn''', '''nya_Latn''', '''oci_Latn''', '''gaz_Latn''', '''ory_Orya''', '''pag_Latn''', '''pan_Guru''', '''pap_Latn''', '''pol_Latn''', '''por_Latn''', '''prs_Arab''', '''pbt_Arab''', '''quy_Latn''', '''ron_Latn''', '''run_Latn''', '''rus_Cyrl''', '''sag_Latn''', '''san_Deva''', '''sat_Beng''', '''scn_Latn''', '''shn_Mymr''', '''sin_Sinh''', '''slk_Latn''', '''slv_Latn''', '''smo_Latn''', '''sna_Latn''', '''snd_Arab''', '''som_Latn''', '''sot_Latn''', '''spa_Latn''', '''als_Latn''', '''srd_Latn''', '''srp_Cyrl''', '''ssw_Latn''', '''sun_Latn''', '''swe_Latn''', '''swh_Latn''', '''szl_Latn''', '''tam_Taml''', '''tat_Cyrl''', '''tel_Telu''', '''tgk_Cyrl''', '''tgl_Latn''', '''tha_Thai''', '''tir_Ethi''', '''taq_Latn''', '''taq_Tfng''', '''tpi_Latn''', '''tsn_Latn''', '''tso_Latn''', '''tuk_Latn''', '''tum_Latn''', '''tur_Latn''', '''twi_Latn''', '''tzm_Tfng''', '''uig_Arab''', '''ukr_Cyrl''', '''umb_Latn''', '''urd_Arab''', '''uzn_Latn''', '''vec_Latn''', '''vie_Latn''', '''war_Latn''', '''wol_Latn''', '''xho_Latn''', '''ydd_Hebr''', '''yor_Latn''', '''yue_Hant''', '''zho_Hans''', '''zho_Hant''', '''zul_Latn''']
class __UpperCamelCase ( a__ ):
lowerCamelCase : Optional[Any] =VOCAB_FILES_NAMES
lowerCamelCase : str =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : Dict =PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : List[Any] =["""input_ids""", """attention_mask"""]
lowerCamelCase : Union[str, Any] =NllbTokenizer
lowerCamelCase : List[int] =[]
lowerCamelCase : List[int] =[]
def __init__( self , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__="<s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__="<s>" , lowerCAmelCase__="<unk>" , lowerCAmelCase__="<pad>" , lowerCAmelCase__="<mask>" , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=False , **lowerCAmelCase__ , ) -> Optional[Any]:
# Mask token behave like a normal word, i.e. include the space before it
a : Dict = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else mask_token
a : Optional[Any] = legacy_behaviour
super().__init__(
vocab_file=lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , src_lang=lowerCAmelCase__ , tgt_lang=lowerCAmelCase__ , additional_special_tokens=lowerCAmelCase__ , legacy_behaviour=lowerCAmelCase__ , **lowerCAmelCase__ , )
a : int = vocab_file
a : Any = False if not self.vocab_file else True
a : List[str] = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({"additional_special_tokens": _additional_special_tokens} )
a : str = {
lang_code: self.convert_tokens_to_ids(lowerCAmelCase__ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
a : List[Any] = src_lang if src_lang is not None else "eng_Latn"
a : str = self.convert_tokens_to_ids(self._src_lang )
a : Any = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def __a ( self ) -> str:
return self._src_lang
@src_lang.setter
def __a ( self , lowerCAmelCase__ ) -> None:
a : List[str] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]:
a : str = [self.sep_token_id]
a : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ) -> Any:
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" )
a : Dict = src_lang
a : int = self(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ )
a : Dict = self.convert_tokens_to_ids(lowerCAmelCase__ )
a : Any = tgt_lang_id
return inputs
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = "eng_Latn" , lowerCAmelCase__ = None , lowerCAmelCase__ = "fra_Latn" , **lowerCAmelCase__ , ) -> BatchEncoding:
a : Optional[int] = src_lang
a : int = tgt_lang
return super().prepare_seqaseq_batch(lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ )
def __a ( self ) -> Tuple:
return self.set_src_lang_special_tokens(self.src_lang )
def __a ( self ) -> str:
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def __a ( self , lowerCAmelCase__ ) -> None:
a : int = self.convert_tokens_to_ids(lowerCAmelCase__ )
if self.legacy_behaviour:
a : Tuple = []
a : List[str] = [self.eos_token_id, self.cur_lang_code]
else:
a : int = [self.cur_lang_code]
a : int = [self.eos_token_id]
a : Union[str, Any] = self.convert_ids_to_tokens(self.prefix_tokens )
a : Any = self.convert_ids_to_tokens(self.suffix_tokens )
a : Any = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def __a ( self , lowerCAmelCase__ ) -> None:
a : str = self.convert_tokens_to_ids(lowerCAmelCase__ )
if self.legacy_behaviour:
a : Optional[Any] = []
a : int = [self.eos_token_id, self.cur_lang_code]
else:
a : List[Any] = [self.cur_lang_code]
a : List[Any] = [self.eos_token_id]
a : int = self.convert_ids_to_tokens(self.prefix_tokens )
a : int = self.convert_ids_to_tokens(self.suffix_tokens )
a : Any = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(lowerCAmelCase__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory.""" )
return
a : Any = os.path.join(
lowerCAmelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase__ ):
copyfile(self.vocab_file , lowerCAmelCase__ )
return (out_vocab_file,)
| 31
| 0
|
"""simple docstring"""
from typing import Dict, List, Optional
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
a : str = logging.get_logger(__name__)
a : Dict = {
'''nielsr/canine-s''': 2048,
}
# Unicode defines 1,114,112 total “codepoints”
a : int = 1114112
# Below: Constants defining canonical codepoints for special, pseudo-characters.
# Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py
a : Dict = 0
a : Optional[Any] = 0xE_000
a : Tuple = 0xE_001
a : str = 0xE_002
a : str = 0xE_003
a : Tuple = 0xE_004
# Maps special codepoints to human-readable names.
a : Dict[int, str] = {
# Special symbols are represented using codepoints values that are valid,
# but designated as "Private Use", meaning that they will never be assigned
# characters by the Unicode Consortium, and are thus safe for use here.
#
# NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly
# excluded and should fail with a hard error.
CLS: "[CLS]",
SEP: "[SEP]",
BOS: "[BOS]",
MASK: "[MASK]",
PAD: "[PAD]",
RESERVED: "[RESERVED]",
}
# Maps special codepoint human-readable names to their codepoint values.
a : Dict[str, int] = {name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()}
class __UpperCamelCase ( __a ):
lowerCamelCase : Tuple =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , lowerCAmelCase__=chr(snake_case__ ) , lowerCAmelCase__=chr(snake_case__ ) , lowerCAmelCase__=chr(snake_case__ ) , lowerCAmelCase__=chr(snake_case__ ) , lowerCAmelCase__=chr(snake_case__ ) , lowerCAmelCase__=chr(snake_case__ ) , lowerCAmelCase__=False , lowerCAmelCase__=2048 , **lowerCAmelCase__ , ) -> Any:
a : int = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else bos_token
a : Dict = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else eos_token
a : List[str] = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else sep_token
a : Dict = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else cls_token
a : List[str] = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
a : int = AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else mask_token
super().__init__(
bos_token=snake_case__ , eos_token=snake_case__ , sep_token=snake_case__ , cls_token=snake_case__ , pad_token=snake_case__ , mask_token=snake_case__ , add_prefix_space=snake_case__ , model_max_length=snake_case__ , **snake_case__ , )
# Creates a mapping for looking up the IDs of special symbols.
a : Union[str, Any] = {}
for codepoint, name in SPECIAL_CODEPOINTS.items():
a : int = codepoint
# Creates a mapping for looking up the string forms of special symbol IDs.
a : List[str] = {
codepoint: name for name, codepoint in self._special_codepoints.items()
}
a : int = UNICODE_VOCAB_SIZE
a : List[Any] = len(self._special_codepoints )
@property
def __a ( self ) -> List[Any]:
return self._unicode_vocab_size
def __a ( self , lowerCAmelCase__ ) -> int:
return list(snake_case__ )
def __a ( self , lowerCAmelCase__ ) -> Optional[int]:
try:
return ord(snake_case__ )
except TypeError:
raise ValueError(f"""invalid token: '{token}'""" )
def __a ( self , lowerCAmelCase__ ) -> List[str]:
try:
if index in SPECIAL_CODEPOINTS:
return SPECIAL_CODEPOINTS[index]
return chr(snake_case__ )
except TypeError:
raise ValueError(f"""invalid id: {index}""" )
def __a ( self , lowerCAmelCase__ ) -> int:
return "".join(snake_case__ )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[str]:
a : List[Any] = [self.sep_token_id]
a : Any = [self.cls_token_id]
a : Optional[int] = cls + token_ids_a + sep
if token_ids_a is not None:
result += token_ids_a + sep
return result
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = False ) -> Union[str, Any]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case__ , token_ids_a=snake_case__ , already_has_special_tokens=snake_case__ )
a : Dict = [1] + ([0] * len(snake_case__ )) + [1]
if token_ids_a is not None:
result += ([0] * len(snake_case__ )) + [1]
return result
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Dict:
a : Tuple = [self.sep_token_id]
a : Union[str, Any] = [self.cls_token_id]
a : List[Any] = len(cls + token_ids_a + sep ) * [0]
if token_ids_a is not None:
result += len(token_ids_a + sep ) * [1]
return result
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> str:
return ()
| 705
|
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
class __UpperCamelCase ( a__ ):
lowerCamelCase : torch.FloatTensor
lowerCamelCase : torch.FloatTensor
lowerCamelCase : Optional[torch.FloatTensor] =None
class __UpperCamelCase ( a__ , a__ ):
lowerCamelCase : Tuple =2
@register_to_config
def __init__( self , lowerCAmelCase__ = 0.02 , lowerCAmelCase__ = 100 , lowerCAmelCase__ = 1.007 , lowerCAmelCase__ = 80 , lowerCAmelCase__ = 0.05 , lowerCAmelCase__ = 50 , ) -> Union[str, Any]:
# standard deviation of the initial noise distribution
a : Tuple = sigma_max
# setable values
a : int = None
a : np.IntTensor = None
a : torch.FloatTensor = None # sigma(t_i)
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> torch.FloatTensor:
return sample
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[str]:
a : List[Any] = num_inference_steps
a : List[str] = np.arange(0 , self.num_inference_steps )[::-1].copy()
a : int = torch.from_numpy(lowerCAmelCase__ ).to(lowerCAmelCase__ )
a : List[str] = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in self.timesteps
]
a : Any = torch.tensor(lowerCAmelCase__ , dtype=torch.floataa , device=lowerCAmelCase__ )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Tuple[torch.FloatTensor, float]:
if self.config.s_min <= sigma <= self.config.s_max:
a : str = min(self.config.s_churn / self.num_inference_steps , 2**0.5 - 1 )
else:
a : Dict = 0
# sample eps ~ N(0, S_noise^2 * I)
a : Union[str, Any] = self.config.s_noise * randn_tensor(sample.shape , generator=lowerCAmelCase__ ).to(sample.device )
a : Any = sigma + gamma * sigma
a : Tuple = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = True , ) -> Union[KarrasVeOutput, Tuple]:
a : Union[str, Any] = sample_hat + sigma_hat * model_output
a : Tuple = (sample_hat - pred_original_sample) / sigma_hat
a : List[Any] = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=lowerCAmelCase__ , derivative=lowerCAmelCase__ , pred_original_sample=lowerCAmelCase__ )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = True , ) -> Union[KarrasVeOutput, Tuple]:
a : Optional[int] = sample_prev + sigma_prev * model_output
a : str = (sample_prev - pred_original_sample) / sigma_prev
a : Dict = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=lowerCAmelCase__ , derivative=lowerCAmelCase__ , pred_original_sample=lowerCAmelCase__ )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> str:
raise NotImplementedError()
| 31
| 0
|
"""simple docstring"""
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
a : Any = 10
def _SCREAMING_SNAKE_CASE ( _lowercase : Any , _lowercase : Optional[int] , _lowercase : Tuple , _lowercase : int ) ->Dict:
'''simple docstring'''
for i in range(lowerCAmelCase_ , lowerCAmelCase_ ):
if array[i] == target:
return i
return -1
def _SCREAMING_SNAKE_CASE ( _lowercase : Union[str, Any] , _lowercase : Optional[Any] ) ->List[Any]:
'''simple docstring'''
a : str = 0
a : List[Any] = len(lowerCAmelCase_ )
while left <= right:
if right - left < precision:
return lin_search(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
a : Union[str, Any] = (left + right) // 3 + 1
a : Optional[int] = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
a : int = one_third - 1
elif array[two_third] < target:
a : Any = two_third + 1
else:
a : int = one_third + 1
a : Dict = two_third - 1
else:
return -1
def _SCREAMING_SNAKE_CASE ( _lowercase : List[Any] , _lowercase : Optional[int] , _lowercase : Optional[Any] , _lowercase : Dict ) ->List[str]:
'''simple docstring'''
if left < right:
if right - left < precision:
return lin_search(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
a : Union[str, Any] = (left + right) // 3 + 1
a : Any = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(lowerCAmelCase_ , one_third - 1 , lowerCAmelCase_ , lowerCAmelCase_ )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
else:
return rec_ternary_search(one_third + 1 , two_third - 1 , lowerCAmelCase_ , lowerCAmelCase_ )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
a : Dict = input('''Enter numbers separated by comma:\n''').strip()
a : Optional[int] = [int(item.strip()) for item in user_input.split(''',''')]
assert collection == sorted(collection), F"List must be ordered.\n{collection}."
a : List[Any] = int(input('''Enter the number to be found in the list:\n''').strip())
a : Union[str, Any] = ite_ternary_search(collection, target)
a : Dict = rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(F'''Iterative search: {target} found at positions: {resulta}''')
print(F'''Recursive search: {target} found at positions: {resulta}''')
else:
print('''Not found''')
| 706
|
"""simple docstring"""
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
a : Optional[Any] = datasets.utils.logging.get_logger(__name__)
a : Union[str, Any] = ['''names''', '''prefix''']
a : Any = ['''warn_bad_lines''', '''error_bad_lines''', '''mangle_dupe_cols''']
a : Any = ['''encoding_errors''', '''on_bad_lines''']
a : List[str] = ['''date_format''']
@dataclass
class __UpperCamelCase ( datasets.BuilderConfig ):
lowerCamelCase : str =","
lowerCamelCase : Optional[str] =None
lowerCamelCase : Optional[Union[int, List[int], str]] ="infer"
lowerCamelCase : Optional[List[str]] =None
lowerCamelCase : Optional[List[str]] =None
lowerCamelCase : Optional[Union[int, str, List[int], List[str]]] =None
lowerCamelCase : Optional[Union[List[int], List[str]]] =None
lowerCamelCase : Optional[str] =None
lowerCamelCase : bool =True
lowerCamelCase : Optional[Literal["c", "python", "pyarrow"]] =None
lowerCamelCase : Dict[Union[int, str], Callable[[Any], Any]] =None
lowerCamelCase : Optional[list] =None
lowerCamelCase : Optional[list] =None
lowerCamelCase : bool =False
lowerCamelCase : Optional[Union[int, List[int]]] =None
lowerCamelCase : Optional[int] =None
lowerCamelCase : Optional[Union[str, List[str]]] =None
lowerCamelCase : bool =True
lowerCamelCase : bool =True
lowerCamelCase : bool =False
lowerCamelCase : bool =True
lowerCamelCase : Optional[str] =None
lowerCamelCase : str ="."
lowerCamelCase : Optional[str] =None
lowerCamelCase : str ='"'
lowerCamelCase : int =0
lowerCamelCase : Optional[str] =None
lowerCamelCase : Optional[str] =None
lowerCamelCase : Optional[str] =None
lowerCamelCase : Optional[str] =None
lowerCamelCase : bool =True
lowerCamelCase : bool =True
lowerCamelCase : int =0
lowerCamelCase : bool =True
lowerCamelCase : bool =False
lowerCamelCase : Optional[str] =None
lowerCamelCase : int =1_0000
lowerCamelCase : Optional[datasets.Features] =None
lowerCamelCase : Optional[str] ="strict"
lowerCamelCase : Literal["error", "warn", "skip"] ="error"
lowerCamelCase : Optional[str] =None
def __a ( self ) -> Dict:
if self.delimiter is not None:
a : int = self.delimiter
if self.column_names is not None:
a : Any = self.column_names
@property
def __a ( self ) -> List[str]:
a : Dict = {
"sep": self.sep,
"header": self.header,
"names": self.names,
"index_col": self.index_col,
"usecols": self.usecols,
"prefix": self.prefix,
"mangle_dupe_cols": self.mangle_dupe_cols,
"engine": self.engine,
"converters": self.converters,
"true_values": self.true_values,
"false_values": self.false_values,
"skipinitialspace": self.skipinitialspace,
"skiprows": self.skiprows,
"nrows": self.nrows,
"na_values": self.na_values,
"keep_default_na": self.keep_default_na,
"na_filter": self.na_filter,
"verbose": self.verbose,
"skip_blank_lines": self.skip_blank_lines,
"thousands": self.thousands,
"decimal": self.decimal,
"lineterminator": self.lineterminator,
"quotechar": self.quotechar,
"quoting": self.quoting,
"escapechar": self.escapechar,
"comment": self.comment,
"encoding": self.encoding,
"dialect": self.dialect,
"error_bad_lines": self.error_bad_lines,
"warn_bad_lines": self.warn_bad_lines,
"skipfooter": self.skipfooter,
"doublequote": self.doublequote,
"memory_map": self.memory_map,
"float_precision": self.float_precision,
"chunksize": self.chunksize,
"encoding_errors": self.encoding_errors,
"on_bad_lines": self.on_bad_lines,
"date_format": self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , lowerCAmelCase__ ):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class __UpperCamelCase ( datasets.ArrowBasedBuilder ):
lowerCamelCase : Union[str, Any] =CsvConfig
def __a ( self ) -> Optional[Any]:
return datasets.DatasetInfo(features=self.config.features )
def __a ( self , lowerCAmelCase__ ) -> Optional[int]:
if not self.config.data_files:
raise ValueError(f"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
a : Optional[Any] = dl_manager.download_and_extract(self.config.data_files )
if isinstance(lowerCAmelCase__ , (str, list, tuple) ):
a : Tuple = data_files
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
a : Tuple = [files]
a : int = [dl_manager.iter_files(lowerCAmelCase__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )]
a : int = []
for split_name, files in data_files.items():
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
a : Any = [files]
a : List[str] = [dl_manager.iter_files(lowerCAmelCase__ ) for file in files]
splits.append(datasets.SplitGenerator(name=lowerCAmelCase__ , gen_kwargs={"files": files} ) )
return splits
def __a ( self , lowerCAmelCase__ ) -> pa.Table:
if self.config.features is not None:
a : Optional[Any] = self.config.features.arrow_schema
if all(not require_storage_cast(lowerCAmelCase__ ) for feature in self.config.features.values() ):
# cheaper cast
a : Dict = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=lowerCAmelCase__ )
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
a : Union[str, Any] = table_cast(lowerCAmelCase__ , lowerCAmelCase__ )
return pa_table
def __a ( self , lowerCAmelCase__ ) -> Any:
a : Tuple = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
a : Any = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(lowerCAmelCase__ ) else object
for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() )
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(lowerCAmelCase__ ) ):
a : Tuple = pd.read_csv(lowerCAmelCase__ , iterator=lowerCAmelCase__ , dtype=lowerCAmelCase__ , **self.config.pd_read_csv_kwargs )
try:
for batch_idx, df in enumerate(lowerCAmelCase__ ):
a : Any = pa.Table.from_pandas(lowerCAmelCase__ )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(lowerCAmelCase__ )
except ValueError as e:
logger.error(f"""Failed to read file '{file}' with error {type(lowerCAmelCase__ )}: {e}""" )
raise
| 31
| 0
|
def _SCREAMING_SNAKE_CASE ( _lowercase : Optional[Any] = 100_0000 ) ->Any:
'''simple docstring'''
a : List[Any] = limit + 1
a : Dict = [0] * limit
for first_term in range(1 , _snake_case ):
for n in range(_snake_case , _snake_case , _snake_case ):
a : Union[str, Any] = first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
a : Dict = sum(1 for x in frequency[1:limit] if x == 10 )
return count
if __name__ == "__main__":
print(F'''{solution() = }''')
| 707
|
"""simple docstring"""
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class __UpperCamelCase ( a__ , a__ , unittest.TestCase ):
lowerCamelCase : Dict =IFPipeline
lowerCamelCase : int =TEXT_TO_IMAGE_PARAMS - {"""width""", """height""", """latents"""}
lowerCamelCase : int =TEXT_TO_IMAGE_BATCH_PARAMS
lowerCamelCase : int =PipelineTesterMixin.required_optional_params - {"""latents"""}
def __a ( self ) -> List[str]:
return self._get_dummy_components()
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__=0 ) -> Dict:
if str(lowerCAmelCase__ ).startswith("mps" ):
a : Tuple = torch.manual_seed(lowerCAmelCase__ )
else:
a : int = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
a : Optional[Any] = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def __a ( self ) -> Union[str, Any]:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" )
def __a ( self ) -> Any:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def __a ( self ) -> Union[str, Any]:
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def __a ( self ) -> Optional[int]:
self._test_save_load_local()
def __a ( self ) -> Tuple:
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def __a ( self ) -> str:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@slow
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
def __a ( self ) -> Optional[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self ) -> Tuple:
# if
a : Tuple = IFPipeline.from_pretrained("DeepFloyd/IF-I-XL-v1.0" , variant="fp16" , torch_dtype=torch.floataa )
a : str = IFSuperResolutionPipeline.from_pretrained(
"DeepFloyd/IF-II-L-v1.0" , variant="fp16" , torch_dtype=torch.floataa , text_encoder=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to("cuda" )
a, a : List[str] = pipe_a.encode_prompt("anime turtle" , device="cuda" )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
a : Optional[int] = None
a : Optional[int] = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
a : Union[str, Any] = IFImgaImgPipeline(**pipe_a.components )
a : List[Any] = IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
a : Union[str, Any] = IFInpaintingPipeline(**pipe_a.components )
a : List[str] = IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Dict:
# pipeline 1
_start_torch_memory_measurement()
a : List[str] = torch.Generator(device="cpu" ).manual_seed(0 )
a : Dict = pipe_a(
prompt_embeds=lowerCAmelCase__ , negative_prompt_embeds=lowerCAmelCase__ , num_inference_steps=2 , generator=lowerCAmelCase__ , output_type="np" , )
a : List[str] = output.images[0]
assert image.shape == (64, 64, 3)
a : Dict = torch.cuda.max_memory_allocated()
assert mem_bytes < 13 * 10**9
a : Optional[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy" )
assert_mean_pixel_difference(lowerCAmelCase__ , lowerCAmelCase__ )
# pipeline 2
_start_torch_memory_measurement()
a : List[str] = torch.Generator(device="cpu" ).manual_seed(0 )
a : Optional[int] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(lowerCAmelCase__ )
a : Union[str, Any] = pipe_a(
prompt_embeds=lowerCAmelCase__ , negative_prompt_embeds=lowerCAmelCase__ , image=lowerCAmelCase__ , generator=lowerCAmelCase__ , num_inference_steps=2 , output_type="np" , )
a : List[str] = output.images[0]
assert image.shape == (256, 256, 3)
a : int = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
a : Union[str, Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy" )
assert_mean_pixel_difference(lowerCAmelCase__ , lowerCAmelCase__ )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> int:
# pipeline 1
_start_torch_memory_measurement()
a : int = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(lowerCAmelCase__ )
a : Tuple = torch.Generator(device="cpu" ).manual_seed(0 )
a : List[Any] = pipe_a(
prompt_embeds=lowerCAmelCase__ , negative_prompt_embeds=lowerCAmelCase__ , image=lowerCAmelCase__ , num_inference_steps=2 , generator=lowerCAmelCase__ , output_type="np" , )
a : Tuple = output.images[0]
assert image.shape == (64, 64, 3)
a : int = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
a : Optional[int] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy" )
assert_mean_pixel_difference(lowerCAmelCase__ , lowerCAmelCase__ )
# pipeline 2
_start_torch_memory_measurement()
a : Optional[int] = torch.Generator(device="cpu" ).manual_seed(0 )
a : List[Any] = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(lowerCAmelCase__ )
a : str = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(lowerCAmelCase__ )
a : Dict = pipe_a(
prompt_embeds=lowerCAmelCase__ , negative_prompt_embeds=lowerCAmelCase__ , image=lowerCAmelCase__ , original_image=lowerCAmelCase__ , generator=lowerCAmelCase__ , num_inference_steps=2 , output_type="np" , )
a : int = output.images[0]
assert image.shape == (256, 256, 3)
a : str = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
a : Any = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy" )
assert_mean_pixel_difference(lowerCAmelCase__ , lowerCAmelCase__ )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[int]:
# pipeline 1
_start_torch_memory_measurement()
a : Union[str, Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(lowerCAmelCase__ )
a : List[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(1 ) ).to(lowerCAmelCase__ )
a : Optional[int] = torch.Generator(device="cpu" ).manual_seed(0 )
a : List[str] = pipe_a(
prompt_embeds=lowerCAmelCase__ , negative_prompt_embeds=lowerCAmelCase__ , image=lowerCAmelCase__ , mask_image=lowerCAmelCase__ , num_inference_steps=2 , generator=lowerCAmelCase__ , output_type="np" , )
a : List[Any] = output.images[0]
assert image.shape == (64, 64, 3)
a : Tuple = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
a : Union[str, Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy" )
assert_mean_pixel_difference(lowerCAmelCase__ , lowerCAmelCase__ )
# pipeline 2
_start_torch_memory_measurement()
a : str = torch.Generator(device="cpu" ).manual_seed(0 )
a : Optional[int] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(lowerCAmelCase__ )
a : int = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(lowerCAmelCase__ )
a : Dict = floats_tensor((1, 3, 256, 256) , rng=random.Random(1 ) ).to(lowerCAmelCase__ )
a : Optional[int] = pipe_a(
prompt_embeds=lowerCAmelCase__ , negative_prompt_embeds=lowerCAmelCase__ , image=lowerCAmelCase__ , mask_image=lowerCAmelCase__ , original_image=lowerCAmelCase__ , generator=lowerCAmelCase__ , num_inference_steps=2 , output_type="np" , )
a : List[str] = output.images[0]
assert image.shape == (256, 256, 3)
a : Tuple = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
a : Optional[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy" )
assert_mean_pixel_difference(lowerCAmelCase__ , lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( ) ->List[str]:
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 31
| 0
|
"""simple docstring"""
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
'''stable diffusion controlnet''',
'''0.22.0''',
'''Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.''',
standard_warn=False,
stacklevel=3,
)
| 708
|
"""simple docstring"""
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class __UpperCamelCase ( unittest.TestCase ):
def __a ( self ) -> Optional[Any]:
a : Optional[int] = [
"safety_checker/pytorch_model.bin",
"safety_checker/model.safetensors",
"vae/diffusion_pytorch_model.bin",
"vae/diffusion_pytorch_model.safetensors",
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
self.assertTrue(is_safetensors_compatible(lowerCAmelCase__ ) )
def __a ( self ) -> Optional[Any]:
a : str = [
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
self.assertTrue(is_safetensors_compatible(lowerCAmelCase__ ) )
def __a ( self ) -> Dict:
a : List[str] = [
"safety_checker/pytorch_model.bin",
"safety_checker/model.safetensors",
"vae/diffusion_pytorch_model.bin",
"vae/diffusion_pytorch_model.safetensors",
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
"unet/diffusion_pytorch_model.bin",
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(lowerCAmelCase__ ) )
def __a ( self ) -> List[Any]:
a : Optional[Any] = [
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
]
self.assertTrue(is_safetensors_compatible(lowerCAmelCase__ ) )
def __a ( self ) -> Tuple:
a : Tuple = [
"safety_checker/pytorch_model.bin",
"safety_checker/model.safetensors",
"vae/diffusion_pytorch_model.bin",
"vae/diffusion_pytorch_model.safetensors",
"text_encoder/pytorch_model.bin",
# Removed: 'text_encoder/model.safetensors',
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
self.assertFalse(is_safetensors_compatible(lowerCAmelCase__ ) )
def __a ( self ) -> Dict:
a : Dict = [
"safety_checker/pytorch_model.fp16.bin",
"safety_checker/model.fp16.safetensors",
"vae/diffusion_pytorch_model.fp16.bin",
"vae/diffusion_pytorch_model.fp16.safetensors",
"text_encoder/pytorch_model.fp16.bin",
"text_encoder/model.fp16.safetensors",
"unet/diffusion_pytorch_model.fp16.bin",
"unet/diffusion_pytorch_model.fp16.safetensors",
]
a : Dict = "fp16"
self.assertTrue(is_safetensors_compatible(lowerCAmelCase__ , variant=lowerCAmelCase__ ) )
def __a ( self ) -> List[str]:
a : List[Any] = [
"unet/diffusion_pytorch_model.fp16.bin",
"unet/diffusion_pytorch_model.fp16.safetensors",
]
a : Any = "fp16"
self.assertTrue(is_safetensors_compatible(lowerCAmelCase__ , variant=lowerCAmelCase__ ) )
def __a ( self ) -> int:
# pass variant but use the non-variant filenames
a : int = [
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
a : Tuple = "fp16"
self.assertTrue(is_safetensors_compatible(lowerCAmelCase__ , variant=lowerCAmelCase__ ) )
def __a ( self ) -> str:
a : str = [
"safety_checker/pytorch_model.fp16.bin",
"safety_checker/model.fp16.safetensors",
"vae/diffusion_pytorch_model.fp16.bin",
"vae/diffusion_pytorch_model.fp16.safetensors",
"text_encoder/pytorch_model.fp16.bin",
"text_encoder/model.fp16.safetensors",
"unet/diffusion_pytorch_model.fp16.bin",
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
a : Any = "fp16"
self.assertFalse(is_safetensors_compatible(lowerCAmelCase__ , variant=lowerCAmelCase__ ) )
def __a ( self ) -> str:
a : Union[str, Any] = [
"text_encoder/pytorch_model.fp16.bin",
"text_encoder/model.fp16.safetensors",
]
a : str = "fp16"
self.assertTrue(is_safetensors_compatible(lowerCAmelCase__ , variant=lowerCAmelCase__ ) )
def __a ( self ) -> List[str]:
# pass variant but use the non-variant filenames
a : Optional[int] = [
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
]
a : str = "fp16"
self.assertTrue(is_safetensors_compatible(lowerCAmelCase__ , variant=lowerCAmelCase__ ) )
def __a ( self ) -> Optional[Any]:
a : Any = [
"safety_checker/pytorch_model.fp16.bin",
"safety_checker/model.fp16.safetensors",
"vae/diffusion_pytorch_model.fp16.bin",
"vae/diffusion_pytorch_model.fp16.safetensors",
"text_encoder/pytorch_model.fp16.bin",
# 'text_encoder/model.fp16.safetensors',
"unet/diffusion_pytorch_model.fp16.bin",
"unet/diffusion_pytorch_model.fp16.safetensors",
]
a : Optional[int] = "fp16"
self.assertFalse(is_safetensors_compatible(lowerCAmelCase__ , variant=lowerCAmelCase__ ) )
| 31
| 0
|
"""simple docstring"""
from collections import deque
def _SCREAMING_SNAKE_CASE ( _lowercase : Union[str, Any] ) ->Optional[int]:
'''simple docstring'''
a : Any = len(A_ )
a : str = deque()
a : List[str] = [False for _ in range(A_ )]
a : List[str] = [-1 for _ in range(A_ )]
a : str = index_of[:]
def strong_connect(_lowercase : Optional[int] , _lowercase : List[str] , _lowercase : Optional[int] ):
a : List[Any] = index # the number when this node is seen
a : Tuple = index # lowest rank node reachable from here
index += 1
stack.append(A_ )
a : Any = True
for w in g[v]:
if index_of[w] == -1:
a : str = strong_connect(A_ , A_ , A_ )
a : Any = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
elif on_stack[w]:
a : int = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
if lowlink_of[v] == index_of[v]:
a : Optional[int] = []
a : Tuple = stack.pop()
a : List[str] = False
component.append(A_ )
while w != v:
a : str = stack.pop()
a : List[Any] = False
component.append(A_ )
components.append(A_ )
return index
a : int = []
for v in range(A_ ):
if index_of[v] == -1:
strong_connect(A_ , 0 , A_ )
return components
def _SCREAMING_SNAKE_CASE ( _lowercase : List[str] , _lowercase : str ) ->Tuple:
'''simple docstring'''
a : str = [[] for _ in range(A_ )]
for u, v in edges:
g[u].append(A_ )
return g
if __name__ == "__main__":
# Test
a : Tuple = 7
a : Union[str, Any] = [0, 0, 1, 2, 3, 3, 4, 4, 6]
a : Dict = [1, 3, 2, 0, 1, 4, 5, 6, 5]
a : Optional[int] = [(u, v) for u, v in zip(source, target)]
a : str = create_graph(n_vertices, edges)
assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g)
| 709
|
"""simple docstring"""
import flax.linen as nn
import jax
import jax.numpy as jnp
class __UpperCamelCase ( nn.Module ):
lowerCamelCase : int
lowerCamelCase : jnp.dtype =jnp.floataa
def __a ( self ) -> Tuple:
a : str = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , lowerCAmelCase__ ) -> Optional[Any]:
a, a, a, a : List[str] = hidden_states.shape
a : List[Any] = jax.image.resize(
lowerCAmelCase__ , shape=(batch, height * 2, width * 2, channels) , method="nearest" , )
a : List[str] = self.conv(lowerCAmelCase__ )
return hidden_states
class __UpperCamelCase ( nn.Module ):
lowerCamelCase : int
lowerCamelCase : jnp.dtype =jnp.floataa
def __a ( self ) -> Dict:
a : Optional[Any] = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , lowerCAmelCase__ ) -> Tuple:
# pad = ((0, 0), (0, 1), (0, 1), (0, 0)) # pad height and width dim
# hidden_states = jnp.pad(hidden_states, pad_width=pad)
a : Tuple = self.conv(lowerCAmelCase__ )
return hidden_states
class __UpperCamelCase ( nn.Module ):
lowerCamelCase : int
lowerCamelCase : int =None
lowerCamelCase : float =0.0
lowerCamelCase : bool =None
lowerCamelCase : jnp.dtype =jnp.floataa
def __a ( self ) -> int:
a : Dict = self.in_channels if self.out_channels is None else self.out_channels
a : Union[str, Any] = nn.GroupNorm(num_groups=32 , epsilon=1E-5 )
a : List[Any] = nn.Conv(
lowerCAmelCase__ , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
a : List[Any] = nn.Dense(lowerCAmelCase__ , dtype=self.dtype )
a : Union[str, Any] = nn.GroupNorm(num_groups=32 , epsilon=1E-5 )
a : Optional[int] = nn.Dropout(self.dropout_prob )
a : Dict = nn.Conv(
lowerCAmelCase__ , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
a : Union[str, Any] = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
a : List[str] = None
if use_nin_shortcut:
a : Optional[Any] = nn.Conv(
lowerCAmelCase__ , kernel_size=(1, 1) , strides=(1, 1) , padding="VALID" , dtype=self.dtype , )
def __call__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=True ) -> str:
a : int = hidden_states
a : Tuple = self.norma(lowerCAmelCase__ )
a : Any = nn.swish(lowerCAmelCase__ )
a : int = self.conva(lowerCAmelCase__ )
a : int = self.time_emb_proj(nn.swish(lowerCAmelCase__ ) )
a : Tuple = jnp.expand_dims(jnp.expand_dims(lowerCAmelCase__ , 1 ) , 1 )
a : Dict = hidden_states + temb
a : str = self.norma(lowerCAmelCase__ )
a : List[Any] = nn.swish(lowerCAmelCase__ )
a : List[str] = self.dropout(lowerCAmelCase__ , lowerCAmelCase__ )
a : List[str] = self.conva(lowerCAmelCase__ )
if self.conv_shortcut is not None:
a : Tuple = self.conv_shortcut(lowerCAmelCase__ )
return hidden_states + residual
| 31
| 0
|
"""simple docstring"""
import random
import torch
from huggingface_hub import HfApi
from diffusers import UNetaDModel
a : str = HfApi()
a : List[str] = {}
# fmt: off
a : Optional[Any] = torch.tensor([
-0.7515, -1.6883, 0.2420, 0.0300, 0.6347, 1.3433, -1.1743, -3.7467,
1.2342, -2.2485, 0.4636, 0.8076, -0.7991, 0.3969, 0.8498, 0.9189,
-1.8887, -3.3522, 0.7639, 0.2040, 0.6271, -2.7148, -1.6316, 3.0839,
0.3186, 0.2721, -0.9759, -1.2461, 2.6257, 1.3557
])
a : Dict = torch.tensor([
-2.3639, -2.5344, 0.0054, -0.6674, 1.5990, 1.0158, 0.3124, -2.1436,
1.8795, -2.5429, -0.1566, -0.3973, 1.2490, 2.6447, 1.2283, -0.5208,
-2.8154, -3.5119, 2.3838, 1.2033, 1.7201, -2.1256, -1.4576, 2.7948,
2.4204, -0.9752, -1.2546, 0.8027, 3.2758, 3.1365
])
a : Union[str, Any] = torch.tensor([
-0.6531, -0.6891, -0.3172, -0.5375, -0.9140, -0.5367, -0.1175, -0.7869,
-0.3808, -0.4513, -0.2098, -0.0083, 0.3183, 0.5140, 0.2247, -0.1304,
-0.1302, -0.2802, -0.2084, -0.2025, -0.4967, -0.4873, -0.0861, 0.6925,
0.0250, 0.1290, -0.1543, 0.6316, 1.0460, 1.4943
])
a : str = torch.tensor([
0.0911, 0.1107, 0.0182, 0.0435, -0.0805, -0.0608, 0.0381, 0.2172,
-0.0280, 0.1327, -0.0299, -0.0255, -0.0050, -0.1170, -0.1046, 0.0309,
0.1367, 0.1728, -0.0533, -0.0748, -0.0534, 0.1624, 0.0384, -0.1805,
-0.0707, 0.0642, 0.0220, -0.0134, -0.1333, -0.1505
])
a : Optional[Any] = torch.tensor([
0.1321, 0.1337, 0.0440, 0.0622, -0.0591, -0.0370, 0.0503, 0.2133,
-0.0177, 0.1415, -0.0116, -0.0112, 0.0044, -0.0980, -0.0789, 0.0395,
0.1502, 0.1785, -0.0488, -0.0514, -0.0404, 0.1539, 0.0454, -0.1559,
-0.0665, 0.0659, 0.0383, -0.0005, -0.1266, -0.1386
])
a : List[Any] = torch.tensor([
0.1154, 0.1218, 0.0307, 0.0526, -0.0711, -0.0541, 0.0366, 0.2078,
-0.0267, 0.1317, -0.0226, -0.0193, -0.0014, -0.1055, -0.0902, 0.0330,
0.1391, 0.1709, -0.0562, -0.0693, -0.0560, 0.1482, 0.0381, -0.1683,
-0.0681, 0.0661, 0.0331, -0.0046, -0.1268, -0.1431
])
a : Optional[int] = torch.tensor([
0.1192, 0.1240, 0.0414, 0.0606, -0.0557, -0.0412, 0.0430, 0.2042,
-0.0200, 0.1385, -0.0115, -0.0132, 0.0017, -0.0965, -0.0802, 0.0398,
0.1433, 0.1747, -0.0458, -0.0533, -0.0407, 0.1545, 0.0419, -0.1574,
-0.0645, 0.0626, 0.0341, -0.0010, -0.1199, -0.1390
])
a : Tuple = torch.tensor([
0.1075, 0.1074, 0.0205, 0.0431, -0.0774, -0.0607, 0.0298, 0.2042,
-0.0320, 0.1267, -0.0281, -0.0250, -0.0064, -0.1091, -0.0946, 0.0290,
0.1328, 0.1650, -0.0580, -0.0738, -0.0586, 0.1440, 0.0337, -0.1746,
-0.0712, 0.0605, 0.0250, -0.0099, -0.1316, -0.1473
])
a : Any = torch.tensor([
-1.4572, -2.0481, -0.0414, -0.6005, 1.4136, 0.5848, 0.4028, -2.7330,
1.2212, -2.1228, 0.2155, 0.4039, 0.7662, 2.0535, 0.7477, -0.3243,
-2.1758, -2.7648, 1.6947, 0.7026, 1.2338, -1.6078, -0.8682, 2.2810,
1.8574, -0.5718, -0.5586, -0.0186, 2.3415, 2.1251])
a : Union[str, Any] = torch.tensor([
-1.3690, -1.9720, -0.4090, -0.6966, 1.4660, 0.9938, -0.1385, -2.7324,
0.7736, -1.8917, 0.2923, 0.4293, 0.1693, 1.4112, 1.1887, -0.3181,
-2.2160, -2.6381, 1.3170, 0.8163, 0.9240, -1.6544, -0.6099, 2.5259,
1.6430, -0.9090, -0.9392, -0.0126, 2.4268, 2.3266
])
a : Tuple = torch.tensor([
-1.3525, -1.9628, -0.3956, -0.6860, 1.4664, 1.0014, -0.1259, -2.7212,
0.7772, -1.8811, 0.2996, 0.4388, 0.1704, 1.4029, 1.1701, -0.3027,
-2.2053, -2.6287, 1.3350, 0.8131, 0.9274, -1.6292, -0.6098, 2.5131,
1.6505, -0.8958, -0.9298, -0.0151, 2.4257, 2.3355
])
a : Dict = torch.tensor([
-2.0585, -2.7897, -0.2850, -0.8940, 1.9052, 0.5702, 0.6345, -3.8959,
1.5932, -3.2319, 0.1974, 0.0287, 1.7566, 2.6543, 0.8387, -0.5351,
-3.2736, -4.3375, 2.9029, 1.6390, 1.4640, -2.1701, -1.9013, 2.9341,
3.4981, -0.6255, -1.1644, -0.1591, 3.7097, 3.2066
])
a : Tuple = torch.tensor([
-2.3139, -2.5594, -0.0197, -0.6785, 1.7001, 1.1606, 0.3075, -2.1740,
1.8071, -2.5630, -0.0926, -0.3811, 1.2116, 2.6246, 1.2731, -0.5398,
-2.8153, -3.6140, 2.3893, 1.3262, 1.6258, -2.1856, -1.3267, 2.8395,
2.3779, -1.0623, -1.2468, 0.8959, 3.3367, 3.2243
])
a : List[str] = torch.tensor([
-2.0628, -2.7667, -0.2089, -0.8263, 2.0539, 0.5992, 0.6495, -3.8336,
1.6025, -3.2817, 0.1721, -0.0633, 1.7516, 2.7039, 0.8100, -0.5908,
-3.2113, -4.4343, 2.9257, 1.3632, 1.5562, -2.1489, -1.9894, 3.0560,
3.3396, -0.7328, -1.0417, 0.0383, 3.7093, 3.2343
])
a : Union[str, Any] = torch.tensor([
-1.4574, -2.0569, -0.0473, -0.6117, 1.4018, 0.5769, 0.4129, -2.7344,
1.2241, -2.1397, 0.2000, 0.3937, 0.7616, 2.0453, 0.7324, -0.3391,
-2.1746, -2.7744, 1.6963, 0.6921, 1.2187, -1.6172, -0.8877, 2.2439,
1.8471, -0.5839, -0.5605, -0.0464, 2.3250, 2.1219
])
# fmt: on
a : Any = api.list_models(filter='''diffusers''')
for mod in models:
if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256":
a : Union[str, Any] = '''/home/patrick/google_checkpoints/''' + mod.modelId.split('''/''')[-1]
print(F'''Started running {mod.modelId}!!!''')
if mod.modelId.startswith('''CompVis'''):
a : List[str] = UNetaDModel.from_pretrained(local_checkpoint, subfolder='''unet''')
else:
a : List[str] = UNetaDModel.from_pretrained(local_checkpoint)
torch.manual_seed(0)
random.seed(0)
a : int = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
a : Optional[int] = torch.tensor([10] * noise.shape[0])
with torch.no_grad():
a : Any = model(noise, time_step).sample
assert torch.allclose(
logits[0, 0, 0, :30], results['''_'''.join('''_'''.join(mod.modelId.split('''/''')).split('''-'''))], atol=1E-3
)
print(F'''{mod.modelId} has passed successfully!!!''')
| 710
|
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def _SCREAMING_SNAKE_CASE ( _lowercase : str=None ) ->Optional[Any]:
'''simple docstring'''
if subparsers is not None:
a : Dict = subparsers.add_parser("test" )
else:
a : Tuple = argparse.ArgumentParser("Accelerate test command" )
parser.add_argument(
"--config_file" , default=_lowercase , help=(
"The path to use to store the config file. Will default to a file named default_config.yaml in the cache "
"location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have "
"such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed "
"with 'huggingface'."
) , )
if subparsers is not None:
parser.set_defaults(func=_lowercase )
return parser
def _SCREAMING_SNAKE_CASE ( _lowercase : str ) ->str:
'''simple docstring'''
a : List[Any] = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ["test_utils", "scripts", "test_script.py"] )
if args.config_file is None:
a : int = script_name
else:
a : int = F"""--config_file={args.config_file} {script_name}"""
a : Optional[int] = ["accelerate-launch"] + test_args.split()
a : Optional[int] = execute_subprocess_async(_lowercase , env=os.environ.copy() )
if result.returncode == 0:
print("Test is a success! You are ready for your distributed training!" )
def _SCREAMING_SNAKE_CASE ( ) ->Tuple:
'''simple docstring'''
a : Any = test_command_parser()
a : Union[str, Any] = parser.parse_args()
test_command(_lowercase )
if __name__ == "__main__":
main()
| 31
| 0
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
a : Union[str, Any] = logging.get_logger(__name__)
def _SCREAMING_SNAKE_CASE ( _lowercase : Union[str, Any] , _lowercase : str=False ) ->Tuple:
'''simple docstring'''
a : Dict = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""blocks.{i}.norm1.weight""", F"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""blocks.{i}.norm1.bias""", F"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((F"""blocks.{i}.attn.proj.weight""", F"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.attn.proj.bias""", F"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""blocks.{i}.norm2.weight""", F"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""blocks.{i}.norm2.bias""", F"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.weight""", F"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.bias""", F"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.weight""", F"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.bias""", F"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
("cls_token", "vit.embeddings.cls_token"),
("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"),
("pos_embed", "vit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
("pre_logits.fc.weight", "pooler.dense.weight"),
("pre_logits.fc.bias", "pooler.dense.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
a : int = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def _SCREAMING_SNAKE_CASE ( _lowercase : Any , _lowercase : Optional[int] , _lowercase : Dict=False ) ->str:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
a : List[Any] = ""
else:
a : List[str] = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
a : Tuple = state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" )
a : Optional[Any] = state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
a : int = in_proj_weight[
: config.hidden_size, :
]
a : str = in_proj_bias[: config.hidden_size]
a : Any = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
a : Dict = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
a : Union[str, Any] = in_proj_weight[
-config.hidden_size :, :
]
a : int = in_proj_bias[-config.hidden_size :]
def _SCREAMING_SNAKE_CASE ( _lowercase : Any ) ->Dict:
'''simple docstring'''
a : Optional[int] = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(__snake_case , __snake_case )
def _SCREAMING_SNAKE_CASE ( _lowercase : Any , _lowercase : str , _lowercase : Tuple ) ->Any:
'''simple docstring'''
a : Optional[Any] = dct.pop(__snake_case )
a : List[str] = val
def _SCREAMING_SNAKE_CASE ( ) ->Dict:
'''simple docstring'''
a : int = "http://images.cocodataset.org/val2017/000000039769.jpg"
a : Dict = Image.open(requests.get(__snake_case , stream=__snake_case ).raw )
return im
@torch.no_grad()
def _SCREAMING_SNAKE_CASE ( _lowercase : Dict , _lowercase : Any ) ->Union[str, Any]:
'''simple docstring'''
a : List[str] = ViTConfig()
a : Any = False
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
if vit_name[-5:] == "in21k":
a : Union[str, Any] = True
a : int = int(vit_name[-12:-10] )
a : str = int(vit_name[-9:-6] )
else:
a : Any = 1000
a : str = "huggingface/label-files"
a : Optional[Any] = "imagenet-1k-id2label.json"
a : str = json.load(open(hf_hub_download(__snake_case , __snake_case , repo_type="dataset" ) , "r" ) )
a : str = {int(__snake_case ): v for k, v in idalabel.items()}
a : Tuple = idalabel
a : Any = {v: k for k, v in idalabel.items()}
a : List[str] = int(vit_name[-6:-4] )
a : Tuple = int(vit_name[-3:] )
# size of the architecture
if "deit" in vit_name:
if vit_name[9:].startswith("tiny" ):
a : Union[str, Any] = 192
a : Any = 768
a : int = 12
a : List[str] = 3
elif vit_name[9:].startswith("small" ):
a : List[Any] = 384
a : Dict = 1536
a : Optional[Any] = 12
a : Optional[int] = 6
else:
pass
else:
if vit_name[4:].startswith("small" ):
a : Any = 768
a : int = 2304
a : Optional[int] = 8
a : Any = 8
elif vit_name[4:].startswith("base" ):
pass
elif vit_name[4:].startswith("large" ):
a : List[Any] = 1024
a : Any = 4096
a : Dict = 24
a : Any = 16
elif vit_name[4:].startswith("huge" ):
a : int = 1280
a : Optional[Any] = 5120
a : Any = 32
a : Dict = 16
# load original model from timm
a : List[Any] = timm.create_model(__snake_case , pretrained=__snake_case )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
a : Union[str, Any] = timm_model.state_dict()
if base_model:
remove_classification_head_(__snake_case )
a : str = create_rename_keys(__snake_case , __snake_case )
for src, dest in rename_keys:
rename_key(__snake_case , __snake_case , __snake_case )
read_in_q_k_v(__snake_case , __snake_case , __snake_case )
# load HuggingFace model
if vit_name[-5:] == "in21k":
a : Optional[Any] = ViTModel(__snake_case ).eval()
else:
a : str = ViTForImageClassification(__snake_case ).eval()
model.load_state_dict(__snake_case )
# Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor
if "deit" in vit_name:
a : Optional[Any] = DeiTImageProcessor(size=config.image_size )
else:
a : str = ViTImageProcessor(size=config.image_size )
a : Union[str, Any] = image_processor(images=prepare_img() , return_tensors="pt" )
a : str = encoding["pixel_values"]
a : Union[str, Any] = model(__snake_case )
if base_model:
a : Union[str, Any] = timm_model.forward_features(__snake_case )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(__snake_case , outputs.pooler_output , atol=1E-3 )
else:
a : Dict = timm_model(__snake_case )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__snake_case , outputs.logits , atol=1E-3 )
Path(__snake_case ).mkdir(exist_ok=__snake_case )
print(F"""Saving model {vit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__snake_case )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__snake_case )
if __name__ == "__main__":
a : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--vit_name''',
default='''vit_base_patch16_224''',
type=str,
help='''Name of the ViT timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
a : List[str] = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
| 711
|
"""simple docstring"""
a : str = 8.314_4598
def _SCREAMING_SNAKE_CASE ( _lowercase : float , _lowercase : float ) ->float:
'''simple docstring'''
if temperature < 0:
raise Exception("Temperature cannot be less than 0 K" )
if molar_mass <= 0:
raise Exception("Molar mass cannot be less than or equal to 0 kg/mol" )
else:
return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# example
a : Any = 300
a : Dict = 28
a : Dict = rms_speed_of_molecule(temperature, molar_mass)
print(F'''Vrms of Nitrogen gas at 300 K is {vrms} m/s''')
| 31
| 0
|
"""simple docstring"""
class __UpperCamelCase :
def __init__( self , lowerCAmelCase__ ) -> Optional[Any]:
a : Optional[int] = arr.split("," )
def __a ( self ) -> Tuple:
a : Union[str, Any] = [int(self.array[0] )] * len(self.array )
a : str = [int(self.array[0] )] * len(self.array )
for i in range(1 , len(self.array ) ):
a : Optional[Any] = max(
int(self.array[i] ) + sum_value[i - 1] , int(self.array[i] ) )
a : Tuple = max(sum_value[i] , rear[i - 1] )
return rear[len(self.array ) - 1]
if __name__ == "__main__":
a : List[str] = input('''please input some numbers:''')
a : int = SubArray(whole_array)
a : str = array.solve_sub_array()
print(('''the results is:''', re))
| 712
|
"""simple docstring"""
import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers.generation import (
MaxLengthCriteria,
MaxNewTokensCriteria,
MaxTimeCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
@require_torch
class __UpperCamelCase ( unittest.TestCase ):
def __a ( self , lowerCAmelCase__ ) -> Optional[int]:
a : str = 3
a : str = 250
a : List[Any] = ids_tensor((batch_size, length) , lowerCAmelCase__ )
a : Optional[Any] = torch.ones((batch_size, length) , device=lowerCAmelCase__ , dtype=torch.float ) / length
return input_ids, scores
def __a ( self ) -> List[Any]:
a, a : str = self._get_tensors(5 )
a : Any = StoppingCriteriaList(
[
MaxLengthCriteria(max_length=10 ),
MaxTimeCriteria(max_time=0.1 ),
] )
self.assertFalse(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
a, a : str = self._get_tensors(9 )
self.assertFalse(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
a, a : Union[str, Any] = self._get_tensors(10 )
self.assertTrue(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
def __a ( self ) -> List[Any]:
a : Optional[Any] = MaxLengthCriteria(max_length=10 )
a, a : int = self._get_tensors(5 )
self.assertFalse(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
a, a : int = self._get_tensors(9 )
self.assertFalse(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
a, a : Union[str, Any] = self._get_tensors(10 )
self.assertTrue(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
def __a ( self ) -> List[str]:
a : Tuple = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 )
a, a : str = self._get_tensors(5 )
self.assertFalse(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
a, a : int = self._get_tensors(9 )
self.assertFalse(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
a, a : int = self._get_tensors(10 )
self.assertTrue(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
a : List[Any] = StoppingCriteriaList([criteria] )
self.assertEqual(criteria_list.max_length , 10 )
def __a ( self ) -> str:
a, a : Tuple = self._get_tensors(5 )
a : str = MaxTimeCriteria(max_time=0.1 )
self.assertFalse(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
a : Optional[int] = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 )
self.assertTrue(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
def __a ( self ) -> str:
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 10 )
with self.assertWarns(lowerCAmelCase__ ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 11 )
a : Optional[int] = validate_stopping_criteria(StoppingCriteriaList() , 11 )
self.assertEqual(len(lowerCAmelCase__ ) , 1 )
| 31
| 0
|
"""simple docstring"""
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class __UpperCamelCase :
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=13 , lowerCAmelCase__=32 , lowerCAmelCase__=2 , lowerCAmelCase__=3 , lowerCAmelCase__=16 , lowerCAmelCase__=[1, 2, 1] , lowerCAmelCase__=[2, 2, 4] , lowerCAmelCase__=2 , lowerCAmelCase__=2.0 , lowerCAmelCase__=True , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.1 , lowerCAmelCase__="gelu" , lowerCAmelCase__=False , lowerCAmelCase__=True , lowerCAmelCase__=0.02 , lowerCAmelCase__=1E-5 , lowerCAmelCase__=True , lowerCAmelCase__=None , lowerCAmelCase__=True , lowerCAmelCase__=10 , lowerCAmelCase__=8 , lowerCAmelCase__=["stage1", "stage2", "stage3"] , lowerCAmelCase__=[1, 2, 3] , ) -> Union[str, Any]:
a : Dict = parent
a : Tuple = batch_size
a : Optional[int] = image_size
a : List[str] = patch_size
a : List[str] = num_channels
a : Union[str, Any] = embed_dim
a : Optional[int] = depths
a : Union[str, Any] = num_heads
a : Union[str, Any] = window_size
a : Dict = mlp_ratio
a : Optional[Any] = qkv_bias
a : Union[str, Any] = hidden_dropout_prob
a : Union[str, Any] = attention_probs_dropout_prob
a : str = drop_path_rate
a : Optional[Any] = hidden_act
a : Dict = use_absolute_embeddings
a : List[str] = patch_norm
a : int = layer_norm_eps
a : int = initializer_range
a : Union[str, Any] = is_training
a : Dict = scope
a : Union[str, Any] = use_labels
a : str = type_sequence_label_size
a : Dict = encoder_stride
a : Any = out_features
a : int = out_indices
def __a ( self ) -> int:
a : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a : Optional[Any] = None
if self.use_labels:
a : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a : Optional[Any] = self.get_config()
return config, pixel_values, labels
def __a ( self ) -> Optional[int]:
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Union[str, Any]:
a : str = MaskFormerSwinModel(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
a : Tuple = model(UpperCamelCase_ )
a : Dict = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
a : int = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Dict:
a : Optional[Any] = MaskFormerSwinBackbone(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
a : Dict = model(UpperCamelCase_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , [16, 32, 64] )
# verify ValueError
with self.parent.assertRaises(UpperCamelCase_ ):
a : Tuple = ["stem"]
a : List[str] = MaskFormerSwinBackbone(config=UpperCamelCase_ )
def __a ( self ) -> Optional[Any]:
a : str = self.prepare_config_and_inputs()
a, a, a : int = config_and_inputs
a : Any = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __UpperCamelCase ( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
lowerCamelCase : int =(
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
lowerCamelCase : List[str] ={'feature-extraction': MaskFormerSwinModel} if is_torch_available() else {}
lowerCamelCase : Tuple =False
lowerCamelCase : List[str] =False
lowerCamelCase : Union[str, Any] =False
lowerCamelCase : str =False
lowerCamelCase : str =False
def __a ( self ) -> str:
a : Dict = MaskFormerSwinModelTester(self )
a : Optional[Any] = ConfigTester(self , config_class=UpperCamelCase_ , embed_dim=37 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
"`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn't work well with"
" `nn.DataParallel`"
) )
def __a ( self ) -> List[Any]:
pass
def __a ( self ) -> Optional[Any]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __a ( self ) -> Tuple:
return
def __a ( self ) -> int:
a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase_ )
def __a ( self ) -> Optional[Any]:
a : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*UpperCamelCase_ )
@unittest.skip("Swin does not use inputs_embeds" )
def __a ( self ) -> Optional[Any]:
pass
@unittest.skip("Swin does not support feedforward chunking" )
def __a ( self ) -> str:
pass
def __a ( self ) -> List[str]:
a, a : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a : Optional[int] = model_class(UpperCamelCase_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
a : Any = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase_ , nn.Linear ) )
def __a ( self ) -> Optional[Any]:
a, a : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a : List[str] = model_class(UpperCamelCase_ )
a : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a : List[Any] = [*signature.parameters.keys()]
a : List[Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , UpperCamelCase_ )
@unittest.skip(reason="MaskFormerSwin is only used as backbone and doesn't support output_attentions" )
def __a ( self ) -> List[str]:
pass
@unittest.skip(reason="MaskFormerSwin is only used as an internal backbone" )
def __a ( self ) -> Tuple:
pass
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[int]:
a : List[Any] = model_class(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
with torch.no_grad():
a : Tuple = model(**self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ ) )
a : int = outputs.hidden_states
a : int = getattr(
self.model_tester , "expected_num_hidden_layers" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(UpperCamelCase_ ) , UpperCamelCase_ )
# Swin has a different seq_length
a : List[str] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
a : Dict = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def __a ( self ) -> Dict:
a, a : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
a : List[str] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
a : Optional[int] = True
self.check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
a : Union[str, Any] = True
self.check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def __a ( self ) -> Union[str, Any]:
a, a : Dict = self.model_tester.prepare_config_and_inputs_for_common()
a : str = 3
a : List[str] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
a : List[Any] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
a : Optional[int] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
a : str = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
a : Optional[Any] = True
self.check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
a : Any = True
self.check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , (padded_height, padded_width) )
@unittest.skip(reason="MaskFormerSwin doesn't have pretrained checkpoints" )
def __a ( self ) -> List[str]:
pass
@unittest.skip(reason="This will be fixed once MaskFormerSwin is replaced by native Swin" )
def __a ( self ) -> Any:
pass
@unittest.skip(reason="This will be fixed once MaskFormerSwin is replaced by native Swin" )
def __a ( self ) -> str:
pass
def __a ( self ) -> int:
a, a : str = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(lowerCAmelCase__ ):
a : int = 0
return t
def check_equivalence(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__={} ):
with torch.no_grad():
a : Optional[int] = model(**UpperCamelCase_ , return_dict=UpperCamelCase_ , **UpperCamelCase_ )
a : Optional[Any] = model(**UpperCamelCase_ , return_dict=UpperCamelCase_ , **UpperCamelCase_ ).to_tuple()
def recursive_check(lowerCAmelCase__ , lowerCAmelCase__ ):
if isinstance(UpperCamelCase_ , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(UpperCamelCase_ , UpperCamelCase_ ):
recursive_check(UpperCamelCase_ , UpperCamelCase_ )
elif isinstance(UpperCamelCase_ , UpperCamelCase_ ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values() ):
recursive_check(UpperCamelCase_ , UpperCamelCase_ )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(UpperCamelCase_ ) , set_nan_tensor_to_zero(UpperCamelCase_ ) , atol=1E-5 ) , msg=(
"Tuple and dict output are not equal. Difference:"
f""" {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:"""
f""" {torch.isnan(UpperCamelCase_ ).any()} and `inf`: {torch.isinf(UpperCamelCase_ )}. Dict has"""
f""" `nan`: {torch.isnan(UpperCamelCase_ ).any()} and `inf`: {torch.isinf(UpperCamelCase_ )}."""
) , )
recursive_check(UpperCamelCase_ , UpperCamelCase_ )
for model_class in self.all_model_classes:
a : Any = model_class(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
a : Dict = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ )
a : List[Any] = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ )
check_equivalence(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
a : str = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ , return_labels=UpperCamelCase_ )
a : Optional[int] = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ , return_labels=UpperCamelCase_ )
check_equivalence(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
a : Dict = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ )
a : Any = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ )
check_equivalence(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , {"output_hidden_states": True} )
a : List[str] = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ , return_labels=UpperCamelCase_ )
a : str = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ , return_labels=UpperCamelCase_ )
check_equivalence(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , {"output_hidden_states": True} )
@require_torch
class __UpperCamelCase ( unittest.TestCase , __lowerCamelCase ):
lowerCamelCase : Dict =(MaskFormerSwinBackbone,) if is_torch_available() else ()
lowerCamelCase : Union[str, Any] =MaskFormerSwinConfig
def __a ( self ) -> str:
a : Dict = MaskFormerSwinModelTester(self )
def __a ( self ) -> str:
a, a : Any = self.model_tester.prepare_config_and_inputs_for_common()
a : List[str] = inputs_dict["pixel_values"].shape[0]
for backbone_class in self.all_model_classes:
a : Union[str, Any] = backbone_class(UpperCamelCase_ )
backbone.to(UpperCamelCase_ )
backbone.eval()
a : List[str] = backbone(**UpperCamelCase_ )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , UpperCamelCase_ )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
a : List[Any] = backbone(**UpperCamelCase_ , output_hidden_states=UpperCamelCase_ )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
a, a, a : List[str] = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
a : List[Any] = backbone(**UpperCamelCase_ , output_attentions=UpperCamelCase_ )
self.assertIsNotNone(outputs.attentions )
| 713
|
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( _lowercase : int = 200 ) ->int:
'''simple docstring'''
a : Dict = [1, 2, 5, 10, 20, 50, 100, 200]
a : Optional[Any] = [0] * (pence + 1)
a : List[Any] = 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(_lowercase , pence + 1 , 1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(200) == 73682
| 31
| 0
|
"""simple docstring"""
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
a : List[str] = logging.get_logger(__name__)
logging.set_verbosity_info()
def _SCREAMING_SNAKE_CASE ( _lowercase : Optional[Any] , _lowercase : Union[str, Any] ) ->List[Any]:
'''simple docstring'''
if "xprophetnet" in prophetnet_checkpoint_path:
a : int = XLMProphetNetForConditionalGenerationOld.from_pretrained(lowerCamelCase__ )
a : Any = XLMProphetNetForConditionalGeneration.from_pretrained(
lowerCamelCase__ , output_loading_info=lowerCamelCase__ )
else:
a : List[str] = ProphetNetForConditionalGenerationOld.from_pretrained(lowerCamelCase__ )
a : Optional[int] = ProphetNetForConditionalGeneration.from_pretrained(
lowerCamelCase__ , output_loading_info=lowerCamelCase__ )
a : int = ["key_proj", "value_proj", "query_proj"]
a : str = {
"self_attn": "ngram_self_attn",
"cross_attn": "encoder_attn",
"cross_attn_layer_norm": "encoder_attn_layer_norm",
"feed_forward_layer_norm": "final_layer_norm",
"feed_forward": "",
"intermediate": "fc1",
"output": "fc2",
"key_proj": "k_proj",
"query_proj": "q_proj",
"value_proj": "v_proj",
"word_embeddings": "embed_tokens",
"embeddings_layer_norm": "emb_layer_norm",
"relative_pos_embeddings": "relative_linear",
"ngram_embeddings": "ngram_input_embed",
"position_embeddings": "embed_positions",
}
for key in loading_info["missing_keys"]:
a : Union[str, Any] = key.split("." )
if attributes[0] == "lm_head":
a : Tuple = prophet
a : Tuple = prophet_old
else:
a : Tuple = prophet.prophetnet
a : List[str] = prophet_old.model
a : int = False
for attribute in attributes:
if attribute in mapping:
a : int = mapping[attribute]
if not hasattr(lowerCamelCase__ , lowerCamelCase__ ) and len(lowerCamelCase__ ) > 0:
a : Dict = attribute
elif hasattr(lowerCamelCase__ , lowerCamelCase__ ):
a : Optional[Any] = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
a : Any = old_model.weight
logger.info(F"""{attribute} is initialized.""" )
a : str = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
a : Tuple = old_model.bias
logger.info(F"""{attribute} is initialized""" )
a : str = True
break
elif attribute in special_keys and hasattr(lowerCamelCase__ , "in_proj_weight" ):
a : str = old_model.in_proj_weight.shape[0] // 3
a : Any = getattr(lowerCamelCase__ , lowerCamelCase__ )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
a : List[str] = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
a : str = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
a : List[str] = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
a : Any = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
a : Tuple = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
a : Union[str, Any] = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
a : Tuple = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 512, "We want 512 position_embeddings."
a : List[Any] = nn.Parameter(old_model.embed_positions.weight[:512, :] )
a : Union[str, Any] = True
break
if attribute.isdigit():
a : str = model[int(lowerCamelCase__ )]
a : Union[str, Any] = old_model[int(lowerCamelCase__ )]
else:
a : int = getattr(lowerCamelCase__ , lowerCamelCase__ )
if old_attribute == "":
a : str = old_model
else:
if not hasattr(lowerCamelCase__ , lowerCamelCase__ ):
raise ValueError(F"""{old_model} does not have {old_attribute}""" )
a : int = getattr(lowerCamelCase__ , lowerCamelCase__ )
if not is_key_init:
raise ValueError(F"""{key} was not correctly initialized!""" )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
prophet.save_pretrained(lowerCamelCase__ )
if __name__ == "__main__":
a : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--prophetnet_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
a : Optional[int] = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 714
|
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class __UpperCamelCase ( metaclass=a__ ):
lowerCamelCase : Optional[Any] =["""transformers""", """torch""", """note_seq"""]
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Union[str, Any]:
requires_backends(self , ["transformers", "torch", "note_seq"] )
@classmethod
def __a ( cls , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Any:
requires_backends(cls , ["transformers", "torch", "note_seq"] )
@classmethod
def __a ( cls , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> int:
requires_backends(cls , ["transformers", "torch", "note_seq"] )
| 31
| 0
|
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( _lowercase : Any ) ->str:
'''simple docstring'''
if isinstance(a__ , a__ ):
raise TypeError("'float' object cannot be interpreted as an integer" )
if isinstance(a__ , a__ ):
raise TypeError("'str' object cannot be interpreted as an integer" )
if num == 0:
return "0b0"
a : List[Any] = False
if num < 0:
a : Union[str, Any] = True
a : Optional[int] = -num
a : List[Any] = []
while num > 0:
binary.insert(0 , num % 2 )
num >>= 1
if negative:
return "-0b" + "".join(str(a__ ) for e in binary )
return "0b" + "".join(str(a__ ) for e in binary )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 715
|
"""simple docstring"""
import qiskit
def _SCREAMING_SNAKE_CASE ( _lowercase : int , _lowercase : int ) ->qiskit.result.counts.Counts:
'''simple docstring'''
a : Union[str, Any] = qiskit.Aer.get_backend("aer_simulator" )
# Create a Quantum Circuit acting on the q register
a : Optional[Any] = qiskit.QuantumCircuit(_lowercase , _lowercase )
# Map the quantum measurement to the classical bits
circuit.measure([0] , [0] )
# Execute the circuit on the simulator
a : Optional[int] = qiskit.execute(_lowercase , _lowercase , shots=1000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(_lowercase )
if __name__ == "__main__":
print(F'''Total count for various states are: {single_qubit_measure(1, 1)}''')
| 31
| 0
|
"""simple docstring"""
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __UpperCamelCase :
@staticmethod
def __a ( *lowerCAmelCase__ , **lowerCAmelCase__ ) -> List[str]:
pass
@is_pipeline_test
@require_vision
class __UpperCamelCase ( unittest.TestCase ):
@require_torch
def __a ( self ) -> Optional[int]:
a : Optional[int] = pipeline(
model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification" , )
a : Optional[int] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
a : Optional[int] = image_classifier(lowerCAmelCase_ , candidate_labels=["a", "b", "c"] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(lowerCAmelCase_ ) , [
[{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "b"}, {"score": 0.333, "label": "c"}],
[{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "c"}, {"score": 0.333, "label": "b"}],
] , )
a : Tuple = image_classifier([image] * 5 , candidate_labels=["A", "B", "C"] , batch_size=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase_ ) , [
[
{"score": 0.333, "label": ANY(lowerCAmelCase_ )},
{"score": 0.333, "label": ANY(lowerCAmelCase_ )},
{"score": 0.333, "label": ANY(lowerCAmelCase_ )},
],
[
{"score": 0.333, "label": ANY(lowerCAmelCase_ )},
{"score": 0.333, "label": ANY(lowerCAmelCase_ )},
{"score": 0.333, "label": ANY(lowerCAmelCase_ )},
],
[
{"score": 0.333, "label": ANY(lowerCAmelCase_ )},
{"score": 0.333, "label": ANY(lowerCAmelCase_ )},
{"score": 0.333, "label": ANY(lowerCAmelCase_ )},
],
[
{"score": 0.333, "label": ANY(lowerCAmelCase_ )},
{"score": 0.333, "label": ANY(lowerCAmelCase_ )},
{"score": 0.333, "label": ANY(lowerCAmelCase_ )},
],
[
{"score": 0.333, "label": ANY(lowerCAmelCase_ )},
{"score": 0.333, "label": ANY(lowerCAmelCase_ )},
{"score": 0.333, "label": ANY(lowerCAmelCase_ )},
],
] , )
@require_tf
def __a ( self ) -> Optional[int]:
a : Any = pipeline(
model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification" , framework="tf" )
a : Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
a : Optional[int] = image_classifier(lowerCAmelCase_ , candidate_labels=["a", "b", "c"] )
self.assertEqual(
nested_simplify(lowerCAmelCase_ ) , [{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "b"}, {"score": 0.333, "label": "c"}] , )
a : Optional[int] = image_classifier([image] * 5 , candidate_labels=["A", "B", "C"] , batch_size=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase_ ) , [
[
{"score": 0.333, "label": ANY(lowerCAmelCase_ )},
{"score": 0.333, "label": ANY(lowerCAmelCase_ )},
{"score": 0.333, "label": ANY(lowerCAmelCase_ )},
],
[
{"score": 0.333, "label": ANY(lowerCAmelCase_ )},
{"score": 0.333, "label": ANY(lowerCAmelCase_ )},
{"score": 0.333, "label": ANY(lowerCAmelCase_ )},
],
[
{"score": 0.333, "label": ANY(lowerCAmelCase_ )},
{"score": 0.333, "label": ANY(lowerCAmelCase_ )},
{"score": 0.333, "label": ANY(lowerCAmelCase_ )},
],
[
{"score": 0.333, "label": ANY(lowerCAmelCase_ )},
{"score": 0.333, "label": ANY(lowerCAmelCase_ )},
{"score": 0.333, "label": ANY(lowerCAmelCase_ )},
],
[
{"score": 0.333, "label": ANY(lowerCAmelCase_ )},
{"score": 0.333, "label": ANY(lowerCAmelCase_ )},
{"score": 0.333, "label": ANY(lowerCAmelCase_ )},
],
] , )
@slow
@require_torch
def __a ( self ) -> Any:
a : str = pipeline(
task="zero-shot-image-classification" , model="openai/clip-vit-base-patch32" , )
# This is an image of 2 cats with remotes and no planes
a : List[str] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
a : Any = image_classifier(lowerCAmelCase_ , candidate_labels=["cat", "plane", "remote"] )
self.assertEqual(
nested_simplify(lowerCAmelCase_ ) , [
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
] , )
a : Optional[Any] = image_classifier([image] * 5 , candidate_labels=["cat", "plane", "remote"] , batch_size=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase_ ) , [
[
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
],
]
* 5 , )
@slow
@require_tf
def __a ( self ) -> Optional[int]:
a : Optional[int] = pipeline(
task="zero-shot-image-classification" , model="openai/clip-vit-base-patch32" , framework="tf" )
# This is an image of 2 cats with remotes and no planes
a : str = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
a : int = image_classifier(lowerCAmelCase_ , candidate_labels=["cat", "plane", "remote"] )
self.assertEqual(
nested_simplify(lowerCAmelCase_ ) , [
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
] , )
a : Optional[int] = image_classifier([image] * 5 , candidate_labels=["cat", "plane", "remote"] , batch_size=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase_ ) , [
[
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
],
]
* 5 , )
| 716
|
"""simple docstring"""
from random import randint
from tempfile import TemporaryFile
import numpy as np
def _SCREAMING_SNAKE_CASE ( _lowercase : Optional[int] , _lowercase : Optional[Any] , _lowercase : Union[str, Any] ) ->Dict:
'''simple docstring'''
a : List[str] = 0
if start < end:
a : Tuple = randint(_lowercase , _lowercase )
a : List[str] = a[end]
a : str = a[pivot]
a : Optional[int] = temp
a, a : Dict = _in_place_partition(_lowercase , _lowercase , _lowercase )
count += _in_place_quick_sort(_lowercase , _lowercase , p - 1 )
count += _in_place_quick_sort(_lowercase , p + 1 , _lowercase )
return count
def _SCREAMING_SNAKE_CASE ( _lowercase : Optional[Any] , _lowercase : str , _lowercase : List[Any] ) ->str:
'''simple docstring'''
a : Union[str, Any] = 0
a : List[Any] = randint(_lowercase , _lowercase )
a : int = a[end]
a : List[str] = a[pivot]
a : Tuple = temp
a : Union[str, Any] = start - 1
for index in range(_lowercase , _lowercase ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
a : List[str] = new_pivot_index + 1
a : Optional[int] = a[new_pivot_index]
a : Union[str, Any] = a[index]
a : List[Any] = temp
a : Tuple = a[new_pivot_index + 1]
a : str = a[end]
a : Dict = temp
return new_pivot_index + 1, count
a : int = TemporaryFile()
a : Tuple = 100 # 1000 elements are to be sorted
a , a : int = 0, 1 # mean and standard deviation
a : List[Any] = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print('''The array is''')
print(X)
outfile.seek(0) # using the same array
a : int = np.load(outfile)
a : Tuple = len(M) - 1
a : Union[str, Any] = _in_place_quick_sort(M, 0, r)
print(
'''No of Comparisons for 100 elements selected from a standard normal distribution'''
'''is :'''
)
print(z)
| 31
| 0
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a : int = logging.get_logger(__name__)
a : int = {
'facebook/data2vec-text-base': 'https://huggingface.co/data2vec/resolve/main/config.json',
}
class __UpperCamelCase ( a__ ):
lowerCamelCase : Tuple ='data2vec-text'
def __init__( self , lowerCAmelCase__=3_0522 , lowerCAmelCase__=768 , lowerCAmelCase__=12 , lowerCAmelCase__=12 , lowerCAmelCase__=3072 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=512 , lowerCAmelCase__=2 , lowerCAmelCase__=0.02 , lowerCAmelCase__=1E-12 , lowerCAmelCase__=1 , lowerCAmelCase__=0 , lowerCAmelCase__=2 , lowerCAmelCase__="absolute" , lowerCAmelCase__=True , lowerCAmelCase__=None , **lowerCAmelCase__ , ) -> str:
super().__init__(pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , **UpperCAmelCase__ )
a : List[str] = vocab_size
a : List[str] = hidden_size
a : str = num_hidden_layers
a : Any = num_attention_heads
a : Optional[int] = hidden_act
a : Optional[int] = intermediate_size
a : Union[str, Any] = hidden_dropout_prob
a : List[Any] = attention_probs_dropout_prob
a : Optional[Any] = max_position_embeddings
a : Union[str, Any] = type_vocab_size
a : Optional[int] = initializer_range
a : List[str] = layer_norm_eps
a : Any = position_embedding_type
a : int = use_cache
a : List[Any] = classifier_dropout
class __UpperCamelCase ( a__ ):
@property
def __a ( self ) -> List[str]:
if self.task == "multiple-choice":
a : Optional[Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
a : List[str] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 717
|
"""simple docstring"""
import baseaa
def _SCREAMING_SNAKE_CASE ( _lowercase : str ) ->bytes:
'''simple docstring'''
return baseaa.aaaencode(string.encode("utf-8" ) )
def _SCREAMING_SNAKE_CASE ( _lowercase : bytes ) ->str:
'''simple docstring'''
return baseaa.aaadecode(_lowercase ).decode("utf-8" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 31
| 0
|
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( _lowercase : Union[str, Any] ) ->list:
'''simple docstring'''
if n_term == "":
return []
a : list = []
for temp in range(int(_lowercase ) ):
series.append(F"""1/{temp + 1}""" if series else "1" )
return series
if __name__ == "__main__":
a : Dict = input('''Enter the last number (nth term) of the Harmonic Series''')
print('''Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n''')
print(harmonic_series(nth_term))
| 718
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_albert import AlbertTokenizer
else:
a : Tuple = None
a : Any = logging.get_logger(__name__)
a : List[Any] = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
a : str = {
'''vocab_file''': {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/spiece.model''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/spiece.model''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/spiece.model''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model''',
},
'''tokenizer_file''': {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json''',
},
}
a : str = {
'''albert-base-v1''': 512,
'''albert-large-v1''': 512,
'''albert-xlarge-v1''': 512,
'''albert-xxlarge-v1''': 512,
'''albert-base-v2''': 512,
'''albert-large-v2''': 512,
'''albert-xlarge-v2''': 512,
'''albert-xxlarge-v2''': 512,
}
a : Union[str, Any] = '''▁'''
class __UpperCamelCase ( a__ ):
lowerCamelCase : Union[str, Any] =VOCAB_FILES_NAMES
lowerCamelCase : Dict =PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : List[Any] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : List[Any] =AlbertTokenizer
def __init__( self , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=False , lowerCAmelCase__="[CLS]" , lowerCAmelCase__="[SEP]" , lowerCAmelCase__="<unk>" , lowerCAmelCase__="[SEP]" , lowerCAmelCase__="<pad>" , lowerCAmelCase__="[CLS]" , lowerCAmelCase__="[MASK]" , **lowerCAmelCase__ , ) -> Union[str, Any]:
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
a : Optional[int] = (
AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ , normalized=lowerCAmelCase__ )
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
else mask_token
)
super().__init__(
lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ , remove_space=lowerCAmelCase__ , keep_accents=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , **lowerCAmelCase__ , )
a : Dict = do_lower_case
a : Any = remove_space
a : Optional[Any] = keep_accents
a : List[str] = vocab_file
a : Optional[Any] = False if not self.vocab_file else True
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]:
a : Optional[Any] = [self.sep_token_id]
a : int = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]:
a : Optional[Any] = [self.sep_token_id]
a : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(lowerCAmelCase__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
a : Dict = os.path.join(
lowerCAmelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase__ ):
copyfile(self.vocab_file , lowerCAmelCase__ )
return (out_vocab_file,)
| 31
| 0
|
from copy import deepcopy
class __UpperCamelCase :
def __init__( self , lowerCAmelCase__ = None , lowerCAmelCase__ = None ) -> int:
if arr is None and size is not None:
a : Dict = size
a : Any = [0] * size
elif arr is not None:
self.init(SCREAMING_SNAKE_CASE_ )
else:
raise ValueError("Either arr or size must be specified" )
def __a ( self , lowerCAmelCase__ ) -> Any:
a : str = len(SCREAMING_SNAKE_CASE_ )
a : int = deepcopy(SCREAMING_SNAKE_CASE_ )
for i in range(1 , self.size ):
a : int = self.next_(SCREAMING_SNAKE_CASE_ )
if j < self.size:
self.tree[j] += self.tree[i]
def __a ( self ) -> int:
a : Tuple = self.tree[:]
for i in range(self.size - 1 , 0 , -1 ):
a : str = self.next_(SCREAMING_SNAKE_CASE_ )
if j < self.size:
arr[j] -= arr[i]
return arr
@staticmethod
def __a ( lowerCAmelCase__ ) -> Any:
return index + (index & (-index))
@staticmethod
def __a ( lowerCAmelCase__ ) -> Dict:
return index - (index & (-index))
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[Any]:
if index == 0:
self.tree[0] += value
return
while index < self.size:
self.tree[index] += value
a : Optional[Any] = self.next_(SCREAMING_SNAKE_CASE_ )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> int:
self.add(SCREAMING_SNAKE_CASE_ , value - self.get(SCREAMING_SNAKE_CASE_ ) )
def __a ( self , lowerCAmelCase__ ) -> Tuple:
if right == 0:
return 0
a : Tuple = self.tree[0]
right -= 1 # make right inclusive
while right > 0:
result += self.tree[right]
a : Dict = self.prev(SCREAMING_SNAKE_CASE_ )
return result
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> str:
return self.prefix(SCREAMING_SNAKE_CASE_ ) - self.prefix(SCREAMING_SNAKE_CASE_ )
def __a ( self , lowerCAmelCase__ ) -> List[Any]:
return self.query(SCREAMING_SNAKE_CASE_ , index + 1 )
def __a ( self , lowerCAmelCase__ ) -> List[str]:
value -= self.tree[0]
if value < 0:
return -1
a : Tuple = 1 # Largest power of 2 <= size
while j * 2 < self.size:
j *= 2
a : Dict = 0
while j > 0:
if i + j < self.size and self.tree[i + j] <= value:
value -= self.tree[i + j]
i += j
j //= 2
return i
if __name__ == "__main__":
import doctest
doctest.testmod()
| 719
|
"""simple docstring"""
import itertools
import random
import unittest
import numpy as np
from transformers import BatchFeature, SpeechTaFeatureExtractor
from transformers.testing_utils import require_torch
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
a : List[Any] = random.Random()
def _SCREAMING_SNAKE_CASE ( _lowercase : List[str] , _lowercase : int=1.0 , _lowercase : Optional[int]=None , _lowercase : Union[str, Any]=None ) ->Optional[Any]:
'''simple docstring'''
if rng is None:
a : Tuple = global_rng
a : Tuple = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
class __UpperCamelCase ( unittest.TestCase ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=7 , lowerCAmelCase__=400 , lowerCAmelCase__=2000 , lowerCAmelCase__=1 , lowerCAmelCase__=0.0 , lowerCAmelCase__=1_6000 , lowerCAmelCase__=True , lowerCAmelCase__=80 , lowerCAmelCase__=16 , lowerCAmelCase__=64 , lowerCAmelCase__="hann_window" , lowerCAmelCase__=80 , lowerCAmelCase__=7600 , lowerCAmelCase__=1E-10 , lowerCAmelCase__=True , ) -> Optional[Any]:
a : int = parent
a : Tuple = batch_size
a : Dict = min_seq_length
a : Any = max_seq_length
a : Optional[int] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
a : Union[str, Any] = feature_size
a : Tuple = padding_value
a : str = sampling_rate
a : Dict = do_normalize
a : str = num_mel_bins
a : List[str] = hop_length
a : str = win_length
a : Optional[Any] = win_function
a : List[str] = fmin
a : Any = fmax
a : Optional[int] = mel_floor
a : Tuple = return_attention_mask
def __a ( self ) -> Optional[Any]:
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"do_normalize": self.do_normalize,
"num_mel_bins": self.num_mel_bins,
"hop_length": self.hop_length,
"win_length": self.win_length,
"win_function": self.win_function,
"fmin": self.fmin,
"fmax": self.fmax,
"mel_floor": self.mel_floor,
"return_attention_mask": self.return_attention_mask,
}
def __a ( self , lowerCAmelCase__=False , lowerCAmelCase__=False ) -> Tuple:
def _flatten(lowerCAmelCase__ ):
return list(itertools.chain(*lowerCAmelCase__ ) )
if equal_length:
a : Union[str, Any] = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
a : str = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
a : Any = [np.asarray(lowerCAmelCase__ ) for x in speech_inputs]
return speech_inputs
def __a ( self , lowerCAmelCase__=False , lowerCAmelCase__=False ) -> Dict:
if equal_length:
a : str = [floats_list((self.max_seq_length, self.num_mel_bins) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
a : Any = [
floats_list((x, self.num_mel_bins) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
a : Optional[int] = [np.asarray(lowerCAmelCase__ ) for x in speech_inputs]
return speech_inputs
@require_torch
class __UpperCamelCase ( a__ , unittest.TestCase ):
lowerCamelCase : Tuple =SpeechTaFeatureExtractor
def __a ( self ) -> Union[str, Any]:
a : Tuple = SpeechTaFeatureExtractionTester(self )
def __a ( self , lowerCAmelCase__ ) -> Union[str, Any]:
self.assertTrue(np.all(np.mean(lowerCAmelCase__ , axis=0 ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(lowerCAmelCase__ , axis=0 ) - 1 ) < 1E-3 ) )
def __a ( self ) -> Union[str, Any]:
# Tests that all call wrap to encode_plus and batch_encode_plus
a : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
a : Any = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
a : Any = [np.asarray(lowerCAmelCase__ ) for speech_input in speech_inputs]
# Test not batched input
a : Optional[int] = feat_extract(speech_inputs[0] , return_tensors="np" ).input_values
a : Optional[Any] = feat_extract(np_speech_inputs[0] , return_tensors="np" ).input_values
self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3 ) )
# Test batched
a : int = feat_extract(lowerCAmelCase__ , return_tensors="np" ).input_values
a : int = feat_extract(lowerCAmelCase__ , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3 ) )
def __a ( self ) -> Optional[Any]:
a : List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
a : Dict = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
a : int = ["longest", "max_length", "do_not_pad"]
a : Tuple = [None, 1600, None]
for max_length, padding in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
a : Dict = feat_extract(lowerCAmelCase__ , padding=lowerCAmelCase__ , max_length=lowerCAmelCase__ , return_tensors="np" )
a : List[Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self.assertTrue(input_values[0][800:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self.assertTrue(input_values[0][1000:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def __a ( self ) -> str:
a : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
a : List[str] = range(800 , 1400 , 200 )
a : List[str] = [floats_list((1, x) )[0] for x in lengths]
a : Any = ["longest", "max_length", "do_not_pad"]
a : Any = [None, 1600, None]
for max_length, padding in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
a : List[Any] = feat_extract(lowerCAmelCase__ , max_length=lowerCAmelCase__ , padding=lowerCAmelCase__ )
a : Dict = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def __a ( self ) -> Dict:
a : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
a : int = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
a : Union[str, Any] = feat_extract(
lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=1000 , padding="max_length" , return_tensors="np" )
a : List[Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def __a ( self ) -> Dict:
a : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
a : Tuple = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
a : List[Any] = feat_extract(
lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=1000 , padding="longest" , return_tensors="np" )
a : Union[str, Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1000) )
a : List[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
a : int = feat_extract(
lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=2000 , padding="longest" , return_tensors="np" )
a : Dict = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1200) )
def __a ( self ) -> List[str]:
a : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
a : Any = np.random.rand(100 ).astype(np.floataa )
a : Optional[Any] = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
a : str = feature_extractor.pad([{"input_values": inputs}] , return_tensors="np" )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
a : List[str] = feature_extractor.pad([{"input_values": inputs}] , return_tensors="pt" )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def __a ( self ) -> Tuple:
# Tests that all call wrap to encode_plus and batch_encode_plus
a : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
a : Optional[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
a : Tuple = [np.asarray(lowerCAmelCase__ ) for speech_input in speech_inputs]
# Test feature size
a : Union[str, Any] = feature_extractor(audio_target=lowerCAmelCase__ , padding=lowerCAmelCase__ , return_tensors="np" ).input_values
self.assertTrue(input_values.ndim == 3 )
self.assertTrue(input_values.shape[-1] == feature_extractor.num_mel_bins )
# Test not batched input
a : Dict = feature_extractor(speech_inputs[0] , return_tensors="np" ).input_values
a : List[Any] = feature_extractor(np_speech_inputs[0] , return_tensors="np" ).input_values
self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3 ) )
# Test batched
a : Optional[int] = feature_extractor(lowerCAmelCase__ , return_tensors="np" ).input_values
a : Any = feature_extractor(lowerCAmelCase__ , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
a : Optional[Any] = [floats_list((1, x) )[0] for x in (800, 800, 800)]
a : List[Any] = np.asarray(lowerCAmelCase__ )
a : str = feature_extractor(lowerCAmelCase__ , return_tensors="np" ).input_values
a : str = feature_extractor(lowerCAmelCase__ , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3 ) )
def __a ( self ) -> str:
a : Optional[Any] = self.feat_extract_tester.prepare_inputs_for_target()
a : Any = self.feature_extraction_class(**self.feat_extract_dict )
a : Union[str, Any] = feat_extract.model_input_names[0]
a : List[str] = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(lowerCAmelCase__ ) == len(lowerCAmelCase__ ) for x, y in zip(lowerCAmelCase__ , processed_features[input_name] ) ) )
a : Tuple = self.feat_extract_tester.prepare_inputs_for_target(equal_length=lowerCAmelCase__ )
a : List[Any] = BatchFeature({input_name: speech_inputs} , tensor_type="np" )
a : Tuple = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
a : Dict = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def __a ( self ) -> Tuple:
a : Tuple = self.feat_extract_tester.prepare_inputs_for_target(equal_length=lowerCAmelCase__ )
a : Optional[Any] = self.feature_extraction_class(**self.feat_extract_dict )
a : Optional[int] = feat_extract.model_input_names[0]
a : List[Any] = BatchFeature({input_name: speech_inputs} , tensor_type="pt" )
a : Tuple = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
a : List[str] = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def __a ( self ) -> Optional[Any]:
a : Dict = self.feature_extraction_class(**self.feat_extract_dict )
a : Optional[int] = self.feat_extract_tester.prepare_inputs_for_target()
a : Optional[Any] = feat_extract.model_input_names[0]
a : List[str] = BatchFeature({input_name: speech_inputs} )
a : Tuple = feat_extract.num_mel_bins # hack!
a : List[Any] = feat_extract.pad(lowerCAmelCase__ , padding="longest" , return_tensors="np" )[input_name]
a : Any = feat_extract.pad(lowerCAmelCase__ , padding="longest" , return_tensors="pt" )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1E-2 )
def __a ( self ) -> Union[str, Any]:
a : Any = self.feat_extract_dict
a : Optional[Any] = True
a : Union[str, Any] = self.feature_extraction_class(**lowerCAmelCase__ )
a : Any = self.feat_extract_tester.prepare_inputs_for_target()
a : Dict = [len(lowerCAmelCase__ ) for x in speech_inputs]
a : int = feat_extract.model_input_names[0]
a : List[Any] = BatchFeature({input_name: speech_inputs} )
a : Union[str, Any] = feat_extract.num_mel_bins # hack!
a : Dict = feat_extract.pad(lowerCAmelCase__ , padding="longest" , return_tensors="np" )
self.assertIn("attention_mask" , lowerCAmelCase__ )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , lowerCAmelCase__ )
def __a ( self ) -> Union[str, Any]:
a : Tuple = self.feat_extract_dict
a : str = True
a : Optional[Any] = self.feature_extraction_class(**lowerCAmelCase__ )
a : List[Any] = self.feat_extract_tester.prepare_inputs_for_target()
a : Dict = [len(lowerCAmelCase__ ) for x in speech_inputs]
a : Optional[Any] = feat_extract.model_input_names[0]
a : str = BatchFeature({input_name: speech_inputs} )
a : Optional[Any] = min(lowerCAmelCase__ )
a : List[Any] = feat_extract.num_mel_bins # hack!
a : Any = feat_extract.pad(
lowerCAmelCase__ , padding="max_length" , max_length=lowerCAmelCase__ , truncation=lowerCAmelCase__ , return_tensors="np" )
self.assertIn("attention_mask" , lowerCAmelCase__ )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
def __a ( self , lowerCAmelCase__ ) -> Optional[int]:
from datasets import load_dataset
a : Tuple = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
a : Optional[Any] = ds.sort("id" ).select(range(lowerCAmelCase__ ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def __a ( self ) -> Union[str, Any]:
# fmt: off
a : List[Any] = torch.tensor(
[2.3_804E-03, 2.0_752E-03, 1.9_836E-03, 2.1_057E-03, 1.6_174E-03,
3.0_518E-04, 9.1_553E-05, 3.3_569E-04, 9.7_656E-04, 1.8_311E-03,
2.0_142E-03, 2.1_057E-03, 1.7_395E-03, 4.5_776E-04, -3.9_673E-04,
4.5_776E-04, 1.0_071E-03, 9.1_553E-05, 4.8_828E-04, 1.1_597E-03,
7.3_242E-04, 9.4_604E-04, 1.8_005E-03, 1.8_311E-03, 8.8_501E-04,
4.2_725E-04, 4.8_828E-04, 7.3_242E-04, 1.0_986E-03, 2.1_057E-03] )
# fmt: on
a : List[str] = self._load_datasamples(1 )
a : Union[str, Any] = SpeechTaFeatureExtractor()
a : str = feature_extractor(lowerCAmelCase__ , return_tensors="pt" ).input_values
self.assertEquals(input_values.shape , (1, 9_3680) )
self.assertTrue(torch.allclose(input_values[0, :30] , lowerCAmelCase__ , atol=1E-6 ) )
def __a ( self ) -> Union[str, Any]:
# fmt: off
a : Tuple = torch.tensor(
[-2.6_870, -3.0_104, -3.1_356, -3.5_352, -3.0_044, -3.0_353, -3.4_719, -3.6_777,
-3.1_520, -2.9_435, -2.6_553, -2.8_795, -2.9_944, -2.5_921, -3.0_279, -3.0_386,
-3.0_864, -3.1_291, -3.2_353, -2.7_444, -2.6_831, -2.7_287, -3.1_761, -3.1_571,
-3.2_726, -3.0_582, -3.1_007, -3.4_533, -3.4_695, -3.0_998] )
# fmt: on
a : Dict = self._load_datasamples(1 )
a : Tuple = SpeechTaFeatureExtractor()
a : Optional[int] = feature_extractor(audio_target=lowerCAmelCase__ , return_tensors="pt" ).input_values
self.assertEquals(input_values.shape , (1, 366, 80) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , lowerCAmelCase__ , atol=1E-4 ) )
| 31
| 0
|
import colorsys
from PIL import Image # type: ignore
def _SCREAMING_SNAKE_CASE ( _lowercase : float , _lowercase : float , _lowercase : int ) ->Dict:
'''simple docstring'''
a : Any = x
a : int = y
for step in range(_lowercase ): # noqa: B007
a : str = a * a - b * b + x
a : Tuple = 2 * a * b + y
a : List[Any] = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def _SCREAMING_SNAKE_CASE ( _lowercase : float ) ->Tuple:
'''simple docstring'''
if distance == 1:
return (0, 0, 0)
else:
return (255, 255, 255)
def _SCREAMING_SNAKE_CASE ( _lowercase : float ) ->int:
'''simple docstring'''
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 255 ) for i in colorsys.hsv_to_rgb(_lowercase , 1 , 1 ) )
def _SCREAMING_SNAKE_CASE ( _lowercase : int = 800 , _lowercase : int = 600 , _lowercase : float = -0.6 , _lowercase : float = 0 , _lowercase : float = 3.2 , _lowercase : int = 50 , _lowercase : bool = True , ) ->List[str]:
'''simple docstring'''
a : Any = Image.new("RGB" , (image_width, image_height) )
a : str = img.load()
# loop through the image-coordinates
for image_x in range(_lowercase ):
for image_y in range(_lowercase ):
# determine the figure-coordinates based on the image-coordinates
a : str = figure_width / image_width * image_height
a : Dict = figure_center_x + (image_x / image_width - 0.5) * figure_width
a : Union[str, Any] = figure_center_y + (image_y / image_height - 0.5) * figure_height
a : List[str] = get_distance(_lowercase , _lowercase , _lowercase )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
a : Tuple = get_color_coded_rgb(_lowercase )
else:
a : List[str] = get_black_and_white_rgb(_lowercase )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
a : Optional[Any] = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 720
|
"""simple docstring"""
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def _SCREAMING_SNAKE_CASE ( _lowercase : List[str] ) ->int:
'''simple docstring'''
a : int = {}
a : Union[str, Any] = tokenizer(example["content"] , truncation=_lowercase )["input_ids"]
a : Any = len(example["content"] ) / len(output["input_ids"] )
return output
a : int = HfArgumentParser(PretokenizationArguments)
a : Optional[int] = parser.parse_args()
if args.num_workers is None:
a : Tuple = multiprocessing.cpu_count()
a : Optional[int] = AutoTokenizer.from_pretrained(args.tokenizer_dir)
a : Dict = time.time()
a : Tuple = load_dataset(args.dataset_name, split='''train''')
print(F'''Dataset loaded in {time.time()-t_start:.2f}s''')
a : Dict = time.time()
a : Tuple = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
'''repo_name''',
'''path''',
'''copies''',
'''size''',
'''content''',
'''license''',
'''hash''',
'''line_mean''',
'''line_max''',
'''alpha_frac''',
'''autogenerated''',
],
)
print(F'''Dataset tokenized in {time.time()-t_start:.2f}s''')
a : Tuple = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(F'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
| 31
| 0
|
"""simple docstring"""
import argparse
import shlex
import runhouse as rh
if __name__ == "__main__":
# Refer to https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup for cloud access
# setup instructions, if using on-demand hardware
# If user passes --user <user> --host <host> --key_path <key_path> <example> <args>, fill them in as BYO cluster
# If user passes --instance <instance> --provider <provider> <example> <args>, fill them in as on-demand cluster
# Throw an error if user passes both BYO and on-demand cluster args
# Otherwise, use default values
_A : Dict = argparse.ArgumentParser()
parser.add_argument('''--user''', type=str, default='''ubuntu''')
parser.add_argument('''--host''', type=str, default='''localhost''')
parser.add_argument('''--key_path''', type=str, default=None)
parser.add_argument('''--instance''', type=str, default='''V100:1''')
parser.add_argument('''--provider''', type=str, default='''cheapest''')
parser.add_argument('''--use_spot''', type=bool, default=False)
parser.add_argument('''--example''', type=str, default='''pytorch/text-generation/run_generation.py''')
_A , _A : Dict = parser.parse_known_args()
if args.host != "localhost":
if args.instance != "V100:1" or args.provider != "cheapest":
raise ValueError('''Cannot specify both BYO and on-demand cluster args''')
_A : Optional[Any] = rh.cluster(
name='''rh-cluster''', ips=[args.host], ssh_creds={'''ssh_user''': args.user, '''ssh_private_key''': args.key_path}
)
else:
_A : Dict = rh.cluster(
name='''rh-cluster''', instance_type=args.instance, provider=args.provider, use_spot=args.use_spot
)
_A : Tuple = args.example.rsplit('''/''', 1)[0]
# Set up remote environment
cluster.install_packages(['''pip:./''']) # Installs transformers from local source
# Note transformers is copied into the home directory on the remote machine, so we can install from there
cluster.run([F'''pip install -r transformers/examples/{example_dir}/requirements.txt'''])
cluster.run(['''pip install torch --upgrade --extra-index-url https://download.pytorch.org/whl/cu117'''])
# Run example. You can bypass the CLI wrapper and paste your own code here.
cluster.run([F'''python transformers/examples/{args.example} {" ".join(shlex.quote(arg) for arg in unknown)}'''])
# Alternatively, we can just import and run a training function (especially if there's no wrapper CLI):
# from my_script... import train
# reqs = ['pip:./', 'torch', 'datasets', 'accelerate', 'evaluate', 'tqdm', 'scipy', 'scikit-learn', 'tensorboard']
# launch_train_gpu = rh.function(fn=train,
# system=gpu,
# reqs=reqs,
# name='train_bert_glue')
#
# We can pass in arguments just like we would to a function:
# launch_train_gpu(num_epochs = 3, lr = 2e-5, seed = 42, batch_size = 16
# stream_logs=True)
| 721
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
a : List[Any] = TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
a : Union[str, Any] = TaTokenizerFast
a : Union[str, Any] = {'''configuration_mt5''': ['''MT5Config''', '''MT5OnnxConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : str = [
'''MT5EncoderModel''',
'''MT5ForConditionalGeneration''',
'''MT5ForQuestionAnswering''',
'''MT5Model''',
'''MT5PreTrainedModel''',
'''MT5Stack''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[str] = ['''TFMT5EncoderModel''', '''TFMT5ForConditionalGeneration''', '''TFMT5Model''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Optional[Any] = ['''FlaxMT5EncoderModel''', '''FlaxMT5ForConditionalGeneration''', '''FlaxMT5Model''']
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
a : List[Any] = _LazyModule(
__name__,
globals()['''__file__'''],
_import_structure,
extra_objects={'''MT5Tokenizer''': MTaTokenizer, '''MT5TokenizerFast''': MTaTokenizerFast},
module_spec=__spec__,
)
| 31
| 0
|
"""simple docstring"""
import argparse
import logging
import os
import time
import timeit
import datasets
import numpy as np
import pycuda.autoinit # noqa: F401
import pycuda.driver as cuda
import tensorrt as trt
import torch
from absl import logging as absl_logging
from accelerate import Accelerator
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from utils_qa import postprocess_qa_predictions
import transformers
from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed
from transformers.trainer_pt_utils import nested_concat, nested_truncate
a : Union[str, Any] = trt.Logger(trt.Logger.WARNING)
a : Any = absl_logging.get_absl_logger()
absl_logger.setLevel(logging.WARNING)
a : Union[str, Any] = logging.getLogger(__name__)
a : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--onnx_model_path''',
default=None,
type=str,
required=True,
help='''Path to ONNX model: ''',
)
parser.add_argument(
'''--output_dir''',
default=None,
type=str,
required=True,
help='''The output directory where the model checkpoints and predictions will be written.''',
)
# Other parameters
parser.add_argument(
'''--tokenizer_name''',
default='''''',
type=str,
required=True,
help='''Pretrained tokenizer name or path if not the same as model_name''',
)
parser.add_argument(
'''--version_2_with_negative''',
action='''store_true''',
help='''If true, the SQuAD examples contain some that do not have an answer.''',
)
parser.add_argument(
'''--null_score_diff_threshold''',
type=float,
default=0.0,
help='''If null_score - best_non_null is greater than the threshold predict null.''',
)
parser.add_argument(
'''--max_seq_length''',
default=384,
type=int,
help=(
'''The maximum total input sequence length after WordPiece tokenization. Sequences '''
'''longer than this will be truncated, and sequences shorter than this will be padded.'''
),
)
parser.add_argument(
'''--doc_stride''',
default=128,
type=int,
help='''When splitting up a long document into chunks, how much stride to take between chunks.''',
)
parser.add_argument('''--per_device_eval_batch_size''', default=8, type=int, help='''Batch size per GPU/CPU for evaluation.''')
parser.add_argument(
'''--n_best_size''',
default=20,
type=int,
help='''The total number of n-best predictions to generate in the nbest_predictions.json output file.''',
)
parser.add_argument(
'''--max_answer_length''',
default=30,
type=int,
help=(
'''The maximum length of an answer that can be generated. This is needed because the start '''
'''and end predictions are not conditioned on one another.'''
),
)
parser.add_argument('''--seed''', type=int, default=42, help='''random seed for initialization''')
parser.add_argument(
'''--dataset_name''',
type=str,
default=None,
required=True,
help='''The name of the dataset to use (via the datasets library).''',
)
parser.add_argument(
'''--dataset_config_name''',
type=str,
default=None,
help='''The configuration name of the dataset to use (via the datasets library).''',
)
parser.add_argument(
'''--preprocessing_num_workers''', type=int, default=4, help='''A csv or a json file containing the training data.'''
)
parser.add_argument('''--overwrite_cache''', action='''store_true''', help='''Overwrite the cached training and evaluation sets''')
parser.add_argument(
'''--fp16''',
action='''store_true''',
help='''Whether to use 16-bit (mixed) precision instead of 32-bit''',
)
parser.add_argument(
'''--int8''',
action='''store_true''',
help='''Whether to use INT8''',
)
a : Optional[int] = parser.parse_args()
if args.tokenizer_name:
a : Tuple = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True)
else:
raise ValueError(
'''You are instantiating a new tokenizer from scratch. This is not supported by this script.'''
'''You can do it from another script, save it, and load it from here, using --tokenizer_name.'''
)
logger.info('''Training/evaluation parameters %s''', args)
a : Tuple = args.per_device_eval_batch_size
a : Tuple = (args.eval_batch_size, args.max_seq_length)
# TRT Engine properties
a : Union[str, Any] = True
a : Union[str, Any] = '''temp_engine/bert-fp32.engine'''
if args.fpaa:
a : int = '''temp_engine/bert-fp16.engine'''
if args.inta:
a : List[str] = '''temp_engine/bert-int8.engine'''
# import ONNX file
if not os.path.exists('''temp_engine'''):
os.makedirs('''temp_engine''')
a : int = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(
network, TRT_LOGGER
) as parser:
with open(args.onnx_model_path, '''rb''') as model:
if not parser.parse(model.read()):
for error in range(parser.num_errors):
print(parser.get_error(error))
# Query input names and shapes from parsed TensorRT network
a : Dict = [network.get_input(i) for i in range(network.num_inputs)]
a : Union[str, Any] = [_input.name for _input in network_inputs] # ex: ["actual_input1"]
with builder.create_builder_config() as config:
a : Tuple = 1 << 50
if STRICT_TYPES:
config.set_flag(trt.BuilderFlag.STRICT_TYPES)
if args.fpaa:
config.set_flag(trt.BuilderFlag.FPaa)
if args.inta:
config.set_flag(trt.BuilderFlag.INTa)
a : Optional[int] = builder.create_optimization_profile()
config.add_optimization_profile(profile)
for i in range(len(input_names)):
profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE)
a : Optional[int] = builder.build_engine(network, config)
# serialize_engine and store in file (can be directly loaded and deserialized):
with open(engine_name, '''wb''') as f:
f.write(engine.serialize())
def _SCREAMING_SNAKE_CASE ( _lowercase : Optional[int] , _lowercase : Any , _lowercase : List[str] , _lowercase : Dict , _lowercase : int , _lowercase : Optional[int] , _lowercase : List[str] , _lowercase : int ) ->Union[str, Any]:
'''simple docstring'''
a : int = np.asarray(inputs["input_ids"] , dtype=np.intaa )
a : Union[str, Any] = np.asarray(inputs["attention_mask"] , dtype=np.intaa )
a : Tuple = np.asarray(inputs["token_type_ids"] , dtype=np.intaa )
# Copy inputs
cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , _lowerCAmelCase )
cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , _lowerCAmelCase )
cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , _lowerCAmelCase )
# start time
a : int = time.time()
# Run inference
context.execute_async(
bindings=[int(_lowerCAmelCase ) for d_inp in d_inputs] + [int(_lowerCAmelCase ), int(_lowerCAmelCase )] , stream_handle=stream.handle )
# Transfer predictions back from GPU
cuda.memcpy_dtoh_async(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
cuda.memcpy_dtoh_async(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# Synchronize the stream and take time
stream.synchronize()
# end time
a : Optional[int] = time.time()
a : str = end_time - start_time
a : Dict = (h_outputa, h_outputa)
# print(outputs)
return outputs, infer_time
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
a : List[str] = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''',
datefmt='''%m/%d/%Y %H:%M:%S''',
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
a : List[Any] = load_dataset(args.dataset_name, args.dataset_config_name)
else:
raise ValueError('''Evaluation requires a dataset name''')
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Preprocessing the datasets.
# Preprocessing is slighlty different for training and evaluation.
a : int = raw_datasets['''validation'''].column_names
a : Union[str, Any] = '''question''' if '''question''' in column_names else column_names[0]
a : Optional[int] = '''context''' if '''context''' in column_names else column_names[1]
a : int = '''answers''' if '''answers''' in column_names else column_names[2]
# Padding side determines if we do (question|context) or (context|question).
a : int = tokenizer.padding_side == '''right'''
if args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F'''The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the'''
F'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.'''
)
a : Union[str, Any] = min(args.max_seq_length, tokenizer.model_max_length)
def _SCREAMING_SNAKE_CASE ( _lowercase : Any ) ->Optional[Any]:
'''simple docstring'''
a : int = [q.lstrip() for q in examples[question_column_name]]
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
a : Any = tokenizer(
examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation="only_second" if pad_on_right else "only_first" , max_length=_lowerCAmelCase , stride=args.doc_stride , return_overflowing_tokens=_lowerCAmelCase , return_offsets_mapping=_lowerCAmelCase , padding="max_length" , )
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
a : Optional[int] = tokenized_examples.pop("overflow_to_sample_mapping" )
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
a : int = []
for i in range(len(tokenized_examples["input_ids"] ) ):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
a : Dict = tokenized_examples.sequence_ids(_lowerCAmelCase )
a : str = 1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
a : Tuple = sample_mapping[i]
tokenized_examples["example_id"].append(examples["id"][sample_index] )
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
a : List[str] = [
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples["offset_mapping"][i] )
]
return tokenized_examples
a : Optional[Any] = raw_datasets['''validation''']
# Validation Feature Creation
a : str = eval_examples.map(
prepare_validation_features,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc='''Running tokenizer on validation dataset''',
)
a : Dict = default_data_collator
a : Tuple = eval_dataset.remove_columns(['''example_id''', '''offset_mapping'''])
a : int = DataLoader(
eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size
)
def _SCREAMING_SNAKE_CASE ( _lowercase : Optional[int] , _lowercase : Any , _lowercase : Union[str, Any] , _lowercase : Union[str, Any]="eval" ) ->Union[str, Any]:
'''simple docstring'''
a : int = postprocess_qa_predictions(
examples=_lowerCAmelCase , features=_lowerCAmelCase , predictions=_lowerCAmelCase , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=_lowerCAmelCase , )
# Format the result to the format the metric expects.
if args.version_2_with_negative:
a : Any = [
{"id": k, "prediction_text": v, "no_answer_probability": 0.0} for k, v in predictions.items()
]
else:
a : List[Any] = [{"id": k, "prediction_text": v} for k, v in predictions.items()]
a : Tuple = [{"id": ex["id"], "answers": ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=_lowerCAmelCase , label_ids=_lowerCAmelCase )
a : Optional[int] = load_metric('''squad_v2''' if args.version_2_with_negative else '''squad''')
# Evaluation!
logger.info('''Loading ONNX model %s for evaluation''', args.onnx_model_path)
with open(engine_name, '''rb''') as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine(
f.read()
) as engine, engine.create_execution_context() as context:
# setup for TRT inferrence
for i in range(len(input_names)):
context.set_binding_shape(i, INPUT_SHAPE)
assert context.all_binding_shapes_specified
def _SCREAMING_SNAKE_CASE ( _lowercase : str ) ->Tuple:
'''simple docstring'''
return trt.volume(engine.get_binding_shape(_lowerCAmelCase ) ) * engine.get_binding_dtype(_lowerCAmelCase ).itemsize
# Allocate device memory for inputs and outputs.
a : str = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)]
# Allocate output buffer
a : Optional[int] = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa)
a : int = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa)
a : Tuple = cuda.mem_alloc(h_outputa.nbytes)
a : Union[str, Any] = cuda.mem_alloc(h_outputa.nbytes)
# Create a stream in which to copy inputs/outputs and run inference.
a : Tuple = cuda.Stream()
# Evaluation
logger.info('''***** Running Evaluation *****''')
logger.info(F''' Num examples = {len(eval_dataset)}''')
logger.info(F''' Batch size = {args.per_device_eval_batch_size}''')
a : List[str] = 0.0
a : str = 0
a : Optional[int] = timeit.default_timer()
a : Optional[int] = None
for step, batch in enumerate(eval_dataloader):
a , a : Optional[Any] = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream)
total_time += infer_time
niter += 1
a , a : str = outputs
a : int = torch.tensor(start_logits)
a : int = torch.tensor(end_logits)
# necessary to pad predictions and labels for being gathered
a : Union[str, Any] = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-100)
a : List[str] = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-100)
a : int = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy())
a : Optional[Any] = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100)
if all_preds is not None:
a : Dict = nested_truncate(all_preds, len(eval_dataset))
a : Any = timeit.default_timer() - start_time
logger.info(''' Evaluation done in total %f secs (%f sec per example)''', evalTime, evalTime / len(eval_dataset))
# Inference time from TRT
logger.info('''Average Inference Time = {:.3f} ms'''.format(total_time * 1000 / niter))
logger.info('''Total Inference Time = {:.3f} ms'''.format(total_time * 1000))
logger.info('''Total Number of Inference = %d''', niter)
a : Dict = post_processing_function(eval_examples, eval_dataset, all_preds)
a : Optional[int] = metric.compute(predictions=prediction.predictions, references=prediction.label_ids)
logger.info(F'''Evaluation metrics: {eval_metric}''')
| 700
|
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( _lowercase : int = 10 , _lowercase : int = 1000 , _lowercase : bool = True ) ->int:
'''simple docstring'''
assert (
isinstance(_lowercase , _lowercase )
and isinstance(_lowercase , _lowercase )
and isinstance(_lowercase , _lowercase )
), "Invalid type of value(s) specified to function!"
if min_val > max_val:
raise ValueError("Invalid value for min_val or max_val (min_value < max_value)" )
return min_val if option else max_val
def _SCREAMING_SNAKE_CASE ( _lowercase : int , _lowercase : int ) ->int:
'''simple docstring'''
return int((number_a + number_a) / 2 )
def _SCREAMING_SNAKE_CASE ( _lowercase : int , _lowercase : int , _lowercase : int ) ->None:
'''simple docstring'''
assert (
isinstance(_lowercase , _lowercase ) and isinstance(_lowercase , _lowercase ) and isinstance(_lowercase , _lowercase )
), 'argument values must be type of "int"'
if lower > higher:
raise ValueError("argument value for lower and higher must be(lower > higher)" )
if not lower < to_guess < higher:
raise ValueError(
"guess value must be within the range of lower and higher value" )
def answer(_lowercase : int ) -> str:
if number > to_guess:
return "high"
elif number < to_guess:
return "low"
else:
return "same"
print("started..." )
a : Optional[Any] = lower
a : List[Any] = higher
a : Tuple = []
while True:
a : List[Any] = get_avg(_lowercase , _lowercase )
last_numbers.append(_lowercase )
if answer(_lowercase ) == "low":
a : Optional[int] = number
elif answer(_lowercase ) == "high":
a : Tuple = number
else:
break
print(F"""guess the number : {last_numbers[-1]}""" )
print(F"""details : {last_numbers!s}""" )
def _SCREAMING_SNAKE_CASE ( ) ->None:
'''simple docstring'''
a : Tuple = int(input("Enter lower value : " ).strip() )
a : Dict = int(input("Enter high value : " ).strip() )
a : Optional[int] = int(input("Enter value to guess : " ).strip() )
guess_the_number(_lowercase , _lowercase , _lowercase )
if __name__ == "__main__":
main()
| 31
| 0
|
"""simple docstring"""
from __future__ import annotations
import math
def _SCREAMING_SNAKE_CASE ( _lowercase : float , _lowercase : int ) ->Dict:
'''simple docstring'''
a : Optional[int] = u
for i in range(1 , lowerCamelCase_ ):
a : Optional[Any] = temp * (u - i)
return temp
def _SCREAMING_SNAKE_CASE ( ) ->str:
'''simple docstring'''
a : Optional[int] = int(input("enter the numbers of values: " ) )
a : list[list[float]] = []
for _ in range(lowerCamelCase_ ):
y.append([] )
for i in range(lowerCamelCase_ ):
for j in range(lowerCamelCase_ ):
y[i].append(lowerCamelCase_ )
a : int = 0
print("enter the values of parameters in a list: " )
a : Optional[int] = list(map(lowerCamelCase_ , input().split() ) )
print("enter the values of corresponding parameters: " )
for i in range(lowerCamelCase_ ):
a : List[str] = float(input() )
a : Union[str, Any] = int(input("enter the value to interpolate: " ) )
a : Optional[int] = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , lowerCamelCase_ ):
for j in range(n - i ):
a : List[Any] = y[j + 1][i - 1] - y[j][i - 1]
a : str = y[0][0]
for i in range(1 , lowerCamelCase_ ):
summ += (ucal(lowerCamelCase_ , lowerCamelCase_ ) * y[0][i]) / math.factorial(lowerCamelCase_ )
print(F"""the value at {value} is {summ}""" )
if __name__ == "__main__":
main()
| 701
|
"""simple docstring"""
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
a : Any = logging.get_logger(__name__)
a : Tuple = {
'''tensor(bool)''': np.bool_,
'''tensor(int8)''': np.inta,
'''tensor(uint8)''': np.uinta,
'''tensor(int16)''': np.intaa,
'''tensor(uint16)''': np.uintaa,
'''tensor(int32)''': np.intaa,
'''tensor(uint32)''': np.uintaa,
'''tensor(int64)''': np.intaa,
'''tensor(uint64)''': np.uintaa,
'''tensor(float16)''': np.floataa,
'''tensor(float)''': np.floataa,
'''tensor(double)''': np.floataa,
}
class __UpperCamelCase :
def __init__( self , lowerCAmelCase__=None , **lowerCAmelCase__ ) -> str:
logger.info("`diffusers.OnnxRuntimeModel` is experimental and might change in the future." )
a : Optional[int] = model
a : int = kwargs.get("model_save_dir" , lowerCAmelCase__ )
a : Tuple = kwargs.get("latest_model_name" , lowerCAmelCase__ )
def __call__( self , **lowerCAmelCase__ ) -> Dict:
a : List[str] = {k: np.array(lowerCAmelCase__ ) for k, v in kwargs.items()}
return self.model.run(lowerCAmelCase__ , lowerCAmelCase__ )
@staticmethod
def __a ( lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None ) -> Union[str, Any]:
if provider is None:
logger.info("No onnxruntime provider specified, using CPUExecutionProvider" )
a : List[str] = "CPUExecutionProvider"
return ort.InferenceSession(lowerCAmelCase__ , providers=[provider] , sess_options=lowerCAmelCase__ )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None , **lowerCAmelCase__ ) -> int:
a : List[str] = file_name if file_name is not None else ONNX_WEIGHTS_NAME
a : Optional[int] = self.model_save_dir.joinpath(self.latest_model_name )
a : List[str] = Path(lowerCAmelCase__ ).joinpath(lowerCAmelCase__ )
try:
shutil.copyfile(lowerCAmelCase__ , lowerCAmelCase__ )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
a : str = self.model_save_dir.joinpath(lowerCAmelCase__ )
if src_path.exists():
a : Any = Path(lowerCAmelCase__ ).joinpath(lowerCAmelCase__ )
try:
shutil.copyfile(lowerCAmelCase__ , lowerCAmelCase__ )
except shutil.SameFileError:
pass
def __a ( self , lowerCAmelCase__ , **lowerCAmelCase__ , ) -> str:
if os.path.isfile(lowerCAmelCase__ ):
logger.error(f"""Provided path ({save_directory}) should be a directory, not a file""" )
return
os.makedirs(lowerCAmelCase__ , exist_ok=lowerCAmelCase__ )
# saving model weights/files
self._save_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
@classmethod
def __a ( cls , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = False , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , **lowerCAmelCase__ , ) -> Optional[int]:
a : Tuple = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(lowerCAmelCase__ ):
a : Tuple = OnnxRuntimeModel.load_model(
os.path.join(lowerCAmelCase__ , lowerCAmelCase__ ) , provider=lowerCAmelCase__ , sess_options=lowerCAmelCase__ )
a : Tuple = Path(lowerCAmelCase__ )
# load model from hub
else:
# download model
a : Optional[Any] = hf_hub_download(
repo_id=lowerCAmelCase__ , filename=lowerCAmelCase__ , use_auth_token=lowerCAmelCase__ , revision=lowerCAmelCase__ , cache_dir=lowerCAmelCase__ , force_download=lowerCAmelCase__ , )
a : Optional[int] = Path(lowerCAmelCase__ ).parent
a : List[Any] = Path(lowerCAmelCase__ ).name
a : int = OnnxRuntimeModel.load_model(lowerCAmelCase__ , provider=lowerCAmelCase__ , sess_options=lowerCAmelCase__ )
return cls(model=lowerCAmelCase__ , **lowerCAmelCase__ )
@classmethod
def __a ( cls , lowerCAmelCase__ , lowerCAmelCase__ = True , lowerCAmelCase__ = None , lowerCAmelCase__ = None , **lowerCAmelCase__ , ) -> List[str]:
a : Any = None
if len(str(lowerCAmelCase__ ).split("@" ) ) == 2:
a, a : Tuple = model_id.split("@" )
return cls._from_pretrained(
model_id=lowerCAmelCase__ , revision=lowerCAmelCase__ , cache_dir=lowerCAmelCase__ , force_download=lowerCAmelCase__ , use_auth_token=lowerCAmelCase__ , **lowerCAmelCase__ , )
| 31
| 0
|
"""simple docstring"""
import copy
import os
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a : Dict = logging.get_logger(__name__)
a : int = {
"google/owlvit-base-patch32": "https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json",
"google/owlvit-base-patch16": "https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json",
"google/owlvit-large-patch14": "https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json",
}
class __UpperCamelCase ( UpperCAmelCase_ ):
lowerCamelCase : Union[str, Any] ="""owlvit_text_model"""
def __init__( self , lowerCAmelCase__=4_9408 , lowerCAmelCase__=512 , lowerCAmelCase__=2048 , lowerCAmelCase__=12 , lowerCAmelCase__=8 , lowerCAmelCase__=16 , lowerCAmelCase__="quick_gelu" , lowerCAmelCase__=1E-5 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.02 , lowerCAmelCase__=1.0 , lowerCAmelCase__=0 , lowerCAmelCase__=4_9406 , lowerCAmelCase__=4_9407 , **lowerCAmelCase__ , ) -> Tuple:
super().__init__(pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase )
a : Optional[Any] = vocab_size
a : List[str] = hidden_size
a : Any = intermediate_size
a : List[Any] = num_hidden_layers
a : Optional[Any] = num_attention_heads
a : str = max_position_embeddings
a : str = hidden_act
a : Dict = layer_norm_eps
a : Optional[Any] = attention_dropout
a : Optional[Any] = initializer_range
a : List[Any] = initializer_factor
@classmethod
def __a ( cls , lowerCAmelCase__ , **lowerCAmelCase__ ) -> Dict:
cls._set_token_in_kwargs(_lowercase )
a, a : int = cls.get_config_dict(_lowercase , **_lowercase )
# get the text config dict if we are loading from OwlViTConfig
if config_dict.get("model_type" ) == "owlvit":
a : Optional[int] = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_lowercase , **_lowercase )
class __UpperCamelCase ( UpperCAmelCase_ ):
lowerCamelCase : Union[str, Any] ="""owlvit_vision_model"""
def __init__( self , lowerCAmelCase__=768 , lowerCAmelCase__=3072 , lowerCAmelCase__=12 , lowerCAmelCase__=12 , lowerCAmelCase__=3 , lowerCAmelCase__=768 , lowerCAmelCase__=32 , lowerCAmelCase__="quick_gelu" , lowerCAmelCase__=1E-5 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.02 , lowerCAmelCase__=1.0 , **lowerCAmelCase__ , ) -> List[str]:
super().__init__(**_lowercase )
a : Any = hidden_size
a : Dict = intermediate_size
a : Union[str, Any] = num_hidden_layers
a : Optional[int] = num_attention_heads
a : Dict = num_channels
a : Dict = image_size
a : List[Any] = patch_size
a : Optional[Any] = hidden_act
a : str = layer_norm_eps
a : str = attention_dropout
a : List[Any] = initializer_range
a : List[Any] = initializer_factor
@classmethod
def __a ( cls , lowerCAmelCase__ , **lowerCAmelCase__ ) -> Dict:
cls._set_token_in_kwargs(_lowercase )
a, a : str = cls.get_config_dict(_lowercase , **_lowercase )
# get the vision config dict if we are loading from OwlViTConfig
if config_dict.get("model_type" ) == "owlvit":
a : Tuple = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_lowercase , **_lowercase )
class __UpperCamelCase ( UpperCAmelCase_ ):
lowerCamelCase : str ="""owlvit"""
lowerCamelCase : Dict =True
def __init__( self , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=512 , lowerCAmelCase__=2.6_592 , lowerCAmelCase__=True , **lowerCAmelCase__ , ) -> List[str]:
super().__init__(**_lowercase )
if text_config is None:
a : List[str] = {}
logger.info("text_config is None. Initializing the OwlViTTextConfig with default values." )
if vision_config is None:
a : int = {}
logger.info("vision_config is None. initializing the OwlViTVisionConfig with default values." )
a : List[Any] = OwlViTTextConfig(**_lowercase )
a : Union[str, Any] = OwlViTVisionConfig(**_lowercase )
a : Optional[Any] = projection_dim
a : Union[str, Any] = logit_scale_init_value
a : Any = return_dict
a : Optional[int] = 1.0
@classmethod
def __a ( cls , lowerCAmelCase__ , **lowerCAmelCase__ ) -> Dict:
cls._set_token_in_kwargs(_lowercase )
a, a : str = cls.get_config_dict(_lowercase , **_lowercase )
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_lowercase , **_lowercase )
@classmethod
def __a ( cls , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ) -> Union[str, Any]:
a : List[str] = {}
a : Union[str, Any] = text_config
a : int = vision_config
return cls.from_dict(_lowercase , **_lowercase )
def __a ( self ) -> Tuple:
a : Optional[Any] = copy.deepcopy(self.__dict__ )
a : Dict = self.text_config.to_dict()
a : Any = self.vision_config.to_dict()
a : Dict = self.__class__.model_type
return output
class __UpperCamelCase ( UpperCAmelCase_ ):
@property
def __a ( self ) -> Any:
return OrderedDict(
[
("input_ids", {0: "batch", 1: "sequence"}),
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("attention_mask", {0: "batch", 1: "sequence"}),
] )
@property
def __a ( self ) -> Tuple:
return OrderedDict(
[
("logits_per_image", {0: "batch"}),
("logits_per_text", {0: "batch"}),
("text_embeds", {0: "batch"}),
("image_embeds", {0: "batch"}),
] )
@property
def __a ( self ) -> Tuple:
return 1E-4
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = -1 , lowerCAmelCase__ = -1 , lowerCAmelCase__ = None , ) -> Tuple:
a : Any = super().generate_dummy_inputs(
processor.tokenizer , batch_size=_lowercase , seq_length=_lowercase , framework=_lowercase )
a : List[Any] = super().generate_dummy_inputs(
processor.image_processor , batch_size=_lowercase , framework=_lowercase )
return {**text_input_dict, **image_input_dict}
@property
def __a ( self ) -> Dict:
return 14
| 702
|
"""simple docstring"""
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def _SCREAMING_SNAKE_CASE ( _lowercase : Optional[Any] , _lowercase : List[str] , _lowercase : Optional[Any] ) ->str:
'''simple docstring'''
a : Union[str, Any] = 1.5
a : List[str] = int(factor * num_class_images )
a : Optional[Any] = ClipClient(
url="https://knn.laion.ai/knn-service" , indice_name="laion_400m" , num_images=_lowercase , aesthetic_weight=0.1 )
os.makedirs(F"""{class_data_dir}/images""" , exist_ok=_lowercase )
if len(list(Path(F"""{class_data_dir}/images""" ).iterdir() ) ) >= num_class_images:
return
while True:
a : List[Any] = client.query(text=_lowercase )
if len(_lowercase ) >= factor * num_class_images or num_images > 1E4:
break
else:
a : Optional[int] = int(factor * num_images )
a : str = ClipClient(
url="https://knn.laion.ai/knn-service" , indice_name="laion_400m" , num_images=_lowercase , aesthetic_weight=0.1 , )
a : Optional[int] = 0
a : str = 0
a : Any = tqdm(desc="downloading real regularization images" , total=_lowercase )
with open(F"""{class_data_dir}/caption.txt""" , "w" ) as fa, open(F"""{class_data_dir}/urls.txt""" , "w" ) as fa, open(
F"""{class_data_dir}/images.txt""" , "w" ) as fa:
while total < num_class_images:
a : Optional[Any] = class_images[count]
count += 1
try:
a : str = requests.get(images["url"] )
if img.status_code == 200:
a : int = Image.open(BytesIO(img.content ) )
with open(F"""{class_data_dir}/images/{total}.jpg""" , "wb" ) as f:
f.write(img.content )
fa.write(images["caption"] + "\n" )
fa.write(images["url"] + "\n" )
fa.write(F"""{class_data_dir}/images/{total}.jpg""" + "\n" )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def _SCREAMING_SNAKE_CASE ( ) ->Dict:
'''simple docstring'''
a : Optional[int] = argparse.ArgumentParser("" , add_help=_lowercase )
parser.add_argument("--class_prompt" , help="text prompt to retrieve images" , required=_lowercase , type=_lowercase )
parser.add_argument("--class_data_dir" , help="path to save images" , required=_lowercase , type=_lowercase )
parser.add_argument("--num_class_images" , help="number of images to download" , default=200 , type=_lowercase )
return parser.parse_args()
if __name__ == "__main__":
a : List[Any] = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 31
| 0
|
"""simple docstring"""
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class __UpperCamelCase :
def __init__( self , lowerCAmelCase__=2 , lowerCAmelCase__=3 , lowerCAmelCase__=64 , lowerCAmelCase__=None ) -> Optional[Any]:
a : int = np.random.default_rng(lowercase_ )
a : List[Any] = length
a : Optional[Any] = rng.normal(size=(length,) ).astype(np.floataa )
a : Tuple = a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa )
def __len__( self ) -> List[Any]:
return self.length
def __getitem__( self , lowerCAmelCase__ ) -> int:
return {"x": self.x[i], "y": self.y[i]}
class __UpperCamelCase ( torch.nn.Module ):
def __init__( self , lowerCAmelCase__=0 , lowerCAmelCase__=0 , lowerCAmelCase__=False ) -> Optional[Any]:
super().__init__()
a : Optional[int] = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
a : List[Any] = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
a : List[Any] = True
def __a ( self , lowerCAmelCase__=None ) -> Union[str, Any]:
if self.first_batch:
print(f"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""" )
a : List[Any] = False
return x * self.a[0] + self.b[0]
class __UpperCamelCase ( torch.nn.Module ):
def __init__( self , lowerCAmelCase__=0 , lowerCAmelCase__=0 , lowerCAmelCase__=False ) -> List[Any]:
super().__init__()
a : str = torch.nn.Parameter(torch.tensor(lowercase_ ).float() )
a : List[str] = torch.nn.Parameter(torch.tensor(lowercase_ ).float() )
a : str = True
def __a ( self , lowerCAmelCase__=None ) -> Union[str, Any]:
if self.first_batch:
print(f"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""" )
a : str = False
return x * self.a + self.b
def _SCREAMING_SNAKE_CASE ( _lowercase : Dict , _lowercase : Dict = 16 ) ->Dict:
'''simple docstring'''
from datasets import load_dataset
from transformers import AutoTokenizer
a : Dict = AutoTokenizer.from_pretrained("bert-base-cased" )
a : Union[str, Any] = {"train": "tests/test_samples/MRPC/train.csv", "validation": "tests/test_samples/MRPC/dev.csv"}
a : Optional[int] = load_dataset("csv" , data_files=__SCREAMING_SNAKE_CASE )
a : Optional[Any] = datasets["train"].unique("label" )
a : List[Any] = {v: i for i, v in enumerate(__SCREAMING_SNAKE_CASE )}
def tokenize_function(_lowercase : int ):
# max_length=None => use the model max length (it's actually the default)
a : str = tokenizer(
examples["sentence1"] , examples["sentence2"] , truncation=__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , padding="max_length" )
if "label" in examples:
a : Optional[int] = [label_to_id[l] for l in examples["label"]]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
a : str = datasets.map(
__SCREAMING_SNAKE_CASE , batched=__SCREAMING_SNAKE_CASE , remove_columns=["sentence1", "sentence2", "label"] , )
def collate_fn(_lowercase : Any ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(__SCREAMING_SNAKE_CASE , padding="max_length" , max_length=128 , return_tensors="pt" )
return tokenizer.pad(__SCREAMING_SNAKE_CASE , padding="longest" , return_tensors="pt" )
# Instantiate dataloaders.
a : Union[str, Any] = DataLoader(tokenized_datasets["train"] , shuffle=__SCREAMING_SNAKE_CASE , collate_fn=__SCREAMING_SNAKE_CASE , batch_size=2 )
a : List[str] = DataLoader(tokenized_datasets["validation"] , shuffle=__SCREAMING_SNAKE_CASE , collate_fn=__SCREAMING_SNAKE_CASE , batch_size=1 )
return train_dataloader, eval_dataloader
| 703
|
"""simple docstring"""
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
a : Optional[int] = '''Run commands across TPU VMs for initial setup before running `accelerate launch`.'''
def _SCREAMING_SNAKE_CASE ( _lowercase : Any=None ) ->Optional[Any]:
'''simple docstring'''
if subparsers is not None:
a : int = subparsers.add_parser("tpu-config" , description=_description )
else:
a : List[Any] = argparse.ArgumentParser("Accelerate tpu-config command" , description=_description )
# Core arguments
a : Dict = parser.add_argument_group(
"Config Arguments" , "Arguments that can be configured through `accelerate config`." )
config_args.add_argument(
"--config_file" , type=_lowercase , default=_lowercase , help="Path to the config file to use for accelerate." , )
config_args.add_argument(
"--tpu_name" , default=_lowercase , help="The name of the TPU to use. If not specified, will use the TPU specified in the config file." , )
config_args.add_argument(
"--tpu_zone" , default=_lowercase , help="The zone of the TPU to use. If not specified, will use the zone specified in the config file." , )
a : Any = parser.add_argument_group("TPU Arguments" , "Arguments for options ran inside the TPU." )
pod_args.add_argument(
"--use_alpha" , action="store_true" , help="Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`." , )
pod_args.add_argument(
"--command_file" , default=_lowercase , help="The path to the file containing the commands to run on the pod on startup." , )
pod_args.add_argument(
"--command" , action="append" , nargs="+" , help="A command to run on the pod. Can be passed multiple times." , )
pod_args.add_argument(
"--install_accelerate" , action="store_true" , help="Whether to install accelerate on the pod. Defaults to False." , )
pod_args.add_argument(
"--accelerate_version" , default="latest" , help="The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify 'dev' to install from GitHub." , )
pod_args.add_argument(
"--debug" , action="store_true" , help="If set, will print the command that would be run instead of running it." )
if subparsers is not None:
parser.set_defaults(func=_lowercase )
return parser
def _SCREAMING_SNAKE_CASE ( _lowercase : Any ) ->Tuple:
'''simple docstring'''
a : Union[str, Any] = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(_lowercase ):
a : Optional[Any] = load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
a : int = defaults.command_file
if not args.command and defaults.commands is not None:
a : Union[str, Any] = defaults.commands
if not args.tpu_name:
a : int = defaults.tpu_name
if not args.tpu_zone:
a : Union[str, Any] = defaults.tpu_zone
if args.accelerate_version == "dev":
a : int = "git+https://github.com/huggingface/accelerate.git"
elif args.accelerate_version == "latest":
a : Optional[Any] = "accelerate -U"
elif isinstance(parse(args.accelerate_version ) , _lowercase ):
a : Optional[Any] = F"""accelerate=={args.accelerate_version}"""
if not args.command_file and not args.command:
raise ValueError("You must specify either a command file or a command to run on the pod." )
if args.command_file:
with open(args.command_file , "r" ) as f:
a : int = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , _lowercase ):
a : Union[str, Any] = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
a : Tuple = ["cd /usr/share"]
if args.install_accelerate:
new_cmd += [F"""pip install {args.accelerate_version}"""]
new_cmd += args.command
a : List[Any] = "; ".join(_lowercase )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
a : str = ["gcloud"]
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(F"""Running {' '.join(_lowercase )}""" )
return
subprocess.run(_lowercase )
print("Successfully setup pod." )
def _SCREAMING_SNAKE_CASE ( ) ->Tuple:
'''simple docstring'''
a : List[Any] = tpu_command_parser()
a : Optional[int] = parser.parse_args()
tpu_command_launcher(_lowercase )
| 31
| 0
|
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( _lowercase : str , _lowercase : List[Any] ) ->Optional[Any]:
'''simple docstring'''
a : Dict = word.split()
def justify(_lowercase : Optional[Any] , _lowercase : int , _lowercase : Dict ) -> str:
a : Tuple = max_width - width
a : str = len(_lowercase )
if len(_lowercase ) == 1:
# if there is only word in line
# just insert overall_spaces_count for the remainder of line
return line[0] + " " * overall_spaces_count
else:
a : Optional[Any] = words_count - 1
# num_spaces_between_words_list[i] : tells you to insert
# num_spaces_between_words_list[i] spaces
# after word on line[i]
a : Optional[int] = spaces_to_insert_between_words * [
overall_spaces_count // spaces_to_insert_between_words
]
a : Optional[int] = (
overall_spaces_count % spaces_to_insert_between_words
)
# distribute spaces via round robin to the left words
for i in range(_lowercase ):
num_spaces_between_words_list[i] += 1
a : int = []
for i in range(_lowercase ):
# add the word
aligned_words_list.append(line[i] )
# add the spaces to insert
aligned_words_list.append(num_spaces_between_words_list[i] * " " )
# just add the last word to the sentence
aligned_words_list.append(line[-1] )
# join the aligned words list to form a justified line
return "".join(_lowercase )
a : Tuple = []
a : str = []
a : Optional[int] = 0
for word in words:
if width + len(_lowercase ) + len(_lowercase ) <= max_width:
# keep adding words until we can fill out max_width
# width = sum of length of all words (without overall_spaces_count)
# len(word) = length of current word
# len(line) = number of overall_spaces_count to insert between words
line.append(_lowercase )
width += len(_lowercase )
else:
# justify the line and add it to result
answer.append(justify(_lowercase , _lowercase , _lowercase ) )
# reset new line and new width
a, a : List[str] = [word], len(_lowercase )
a : Tuple = max_width - width - len(_lowercase )
answer.append(" ".join(_lowercase ) + (remaining_spaces + 1) * " " )
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 704
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
a : Tuple = None
a : int = logging.get_logger(__name__)
a : int = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
a : Optional[int] = {
'''vocab_file''': {
'''facebook/nllb-200-distilled-600M''': (
'''https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model'''
),
},
'''tokenizer_file''': {
'''facebook/nllb-200-distilled-600M''': (
'''https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json'''
),
},
}
a : int = {
'''facebook/nllb-large-en-ro''': 1024,
'''facebook/nllb-200-distilled-600M''': 1024,
}
# fmt: off
a : List[Any] = ['''ace_Arab''', '''ace_Latn''', '''acm_Arab''', '''acq_Arab''', '''aeb_Arab''', '''afr_Latn''', '''ajp_Arab''', '''aka_Latn''', '''amh_Ethi''', '''apc_Arab''', '''arb_Arab''', '''ars_Arab''', '''ary_Arab''', '''arz_Arab''', '''asm_Beng''', '''ast_Latn''', '''awa_Deva''', '''ayr_Latn''', '''azb_Arab''', '''azj_Latn''', '''bak_Cyrl''', '''bam_Latn''', '''ban_Latn''', '''bel_Cyrl''', '''bem_Latn''', '''ben_Beng''', '''bho_Deva''', '''bjn_Arab''', '''bjn_Latn''', '''bod_Tibt''', '''bos_Latn''', '''bug_Latn''', '''bul_Cyrl''', '''cat_Latn''', '''ceb_Latn''', '''ces_Latn''', '''cjk_Latn''', '''ckb_Arab''', '''crh_Latn''', '''cym_Latn''', '''dan_Latn''', '''deu_Latn''', '''dik_Latn''', '''dyu_Latn''', '''dzo_Tibt''', '''ell_Grek''', '''eng_Latn''', '''epo_Latn''', '''est_Latn''', '''eus_Latn''', '''ewe_Latn''', '''fao_Latn''', '''pes_Arab''', '''fij_Latn''', '''fin_Latn''', '''fon_Latn''', '''fra_Latn''', '''fur_Latn''', '''fuv_Latn''', '''gla_Latn''', '''gle_Latn''', '''glg_Latn''', '''grn_Latn''', '''guj_Gujr''', '''hat_Latn''', '''hau_Latn''', '''heb_Hebr''', '''hin_Deva''', '''hne_Deva''', '''hrv_Latn''', '''hun_Latn''', '''hye_Armn''', '''ibo_Latn''', '''ilo_Latn''', '''ind_Latn''', '''isl_Latn''', '''ita_Latn''', '''jav_Latn''', '''jpn_Jpan''', '''kab_Latn''', '''kac_Latn''', '''kam_Latn''', '''kan_Knda''', '''kas_Arab''', '''kas_Deva''', '''kat_Geor''', '''knc_Arab''', '''knc_Latn''', '''kaz_Cyrl''', '''kbp_Latn''', '''kea_Latn''', '''khm_Khmr''', '''kik_Latn''', '''kin_Latn''', '''kir_Cyrl''', '''kmb_Latn''', '''kon_Latn''', '''kor_Hang''', '''kmr_Latn''', '''lao_Laoo''', '''lvs_Latn''', '''lij_Latn''', '''lim_Latn''', '''lin_Latn''', '''lit_Latn''', '''lmo_Latn''', '''ltg_Latn''', '''ltz_Latn''', '''lua_Latn''', '''lug_Latn''', '''luo_Latn''', '''lus_Latn''', '''mag_Deva''', '''mai_Deva''', '''mal_Mlym''', '''mar_Deva''', '''min_Latn''', '''mkd_Cyrl''', '''plt_Latn''', '''mlt_Latn''', '''mni_Beng''', '''khk_Cyrl''', '''mos_Latn''', '''mri_Latn''', '''zsm_Latn''', '''mya_Mymr''', '''nld_Latn''', '''nno_Latn''', '''nob_Latn''', '''npi_Deva''', '''nso_Latn''', '''nus_Latn''', '''nya_Latn''', '''oci_Latn''', '''gaz_Latn''', '''ory_Orya''', '''pag_Latn''', '''pan_Guru''', '''pap_Latn''', '''pol_Latn''', '''por_Latn''', '''prs_Arab''', '''pbt_Arab''', '''quy_Latn''', '''ron_Latn''', '''run_Latn''', '''rus_Cyrl''', '''sag_Latn''', '''san_Deva''', '''sat_Beng''', '''scn_Latn''', '''shn_Mymr''', '''sin_Sinh''', '''slk_Latn''', '''slv_Latn''', '''smo_Latn''', '''sna_Latn''', '''snd_Arab''', '''som_Latn''', '''sot_Latn''', '''spa_Latn''', '''als_Latn''', '''srd_Latn''', '''srp_Cyrl''', '''ssw_Latn''', '''sun_Latn''', '''swe_Latn''', '''swh_Latn''', '''szl_Latn''', '''tam_Taml''', '''tat_Cyrl''', '''tel_Telu''', '''tgk_Cyrl''', '''tgl_Latn''', '''tha_Thai''', '''tir_Ethi''', '''taq_Latn''', '''taq_Tfng''', '''tpi_Latn''', '''tsn_Latn''', '''tso_Latn''', '''tuk_Latn''', '''tum_Latn''', '''tur_Latn''', '''twi_Latn''', '''tzm_Tfng''', '''uig_Arab''', '''ukr_Cyrl''', '''umb_Latn''', '''urd_Arab''', '''uzn_Latn''', '''vec_Latn''', '''vie_Latn''', '''war_Latn''', '''wol_Latn''', '''xho_Latn''', '''ydd_Hebr''', '''yor_Latn''', '''yue_Hant''', '''zho_Hans''', '''zho_Hant''', '''zul_Latn''']
class __UpperCamelCase ( a__ ):
lowerCamelCase : Optional[Any] =VOCAB_FILES_NAMES
lowerCamelCase : str =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : Dict =PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : List[Any] =["""input_ids""", """attention_mask"""]
lowerCamelCase : Union[str, Any] =NllbTokenizer
lowerCamelCase : List[int] =[]
lowerCamelCase : List[int] =[]
def __init__( self , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__="<s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__="<s>" , lowerCAmelCase__="<unk>" , lowerCAmelCase__="<pad>" , lowerCAmelCase__="<mask>" , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=False , **lowerCAmelCase__ , ) -> Optional[Any]:
# Mask token behave like a normal word, i.e. include the space before it
a : Dict = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else mask_token
a : Optional[Any] = legacy_behaviour
super().__init__(
vocab_file=lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , src_lang=lowerCAmelCase__ , tgt_lang=lowerCAmelCase__ , additional_special_tokens=lowerCAmelCase__ , legacy_behaviour=lowerCAmelCase__ , **lowerCAmelCase__ , )
a : int = vocab_file
a : Any = False if not self.vocab_file else True
a : List[str] = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({"additional_special_tokens": _additional_special_tokens} )
a : str = {
lang_code: self.convert_tokens_to_ids(lowerCAmelCase__ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
a : List[Any] = src_lang if src_lang is not None else "eng_Latn"
a : str = self.convert_tokens_to_ids(self._src_lang )
a : Any = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def __a ( self ) -> str:
return self._src_lang
@src_lang.setter
def __a ( self , lowerCAmelCase__ ) -> None:
a : List[str] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]:
a : str = [self.sep_token_id]
a : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ) -> Any:
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" )
a : Dict = src_lang
a : int = self(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ )
a : Dict = self.convert_tokens_to_ids(lowerCAmelCase__ )
a : Any = tgt_lang_id
return inputs
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = "eng_Latn" , lowerCAmelCase__ = None , lowerCAmelCase__ = "fra_Latn" , **lowerCAmelCase__ , ) -> BatchEncoding:
a : Optional[int] = src_lang
a : int = tgt_lang
return super().prepare_seqaseq_batch(lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ )
def __a ( self ) -> Tuple:
return self.set_src_lang_special_tokens(self.src_lang )
def __a ( self ) -> str:
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def __a ( self , lowerCAmelCase__ ) -> None:
a : int = self.convert_tokens_to_ids(lowerCAmelCase__ )
if self.legacy_behaviour:
a : Tuple = []
a : List[str] = [self.eos_token_id, self.cur_lang_code]
else:
a : int = [self.cur_lang_code]
a : int = [self.eos_token_id]
a : Union[str, Any] = self.convert_ids_to_tokens(self.prefix_tokens )
a : Any = self.convert_ids_to_tokens(self.suffix_tokens )
a : Any = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def __a ( self , lowerCAmelCase__ ) -> None:
a : str = self.convert_tokens_to_ids(lowerCAmelCase__ )
if self.legacy_behaviour:
a : Optional[Any] = []
a : int = [self.eos_token_id, self.cur_lang_code]
else:
a : List[Any] = [self.cur_lang_code]
a : List[Any] = [self.eos_token_id]
a : int = self.convert_ids_to_tokens(self.prefix_tokens )
a : int = self.convert_ids_to_tokens(self.suffix_tokens )
a : Any = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(lowerCAmelCase__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory.""" )
return
a : Any = os.path.join(
lowerCAmelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase__ ):
copyfile(self.vocab_file , lowerCAmelCase__ )
return (out_vocab_file,)
| 31
| 0
|
"""simple docstring"""
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class __UpperCamelCase ( __lowercase ):
lowerCamelCase : Optional[Any] =(DDPMScheduler,)
def __a ( self , **lowerCAmelCase__ ) -> Optional[int]:
a : Tuple = {
"num_train_timesteps": 1000,
"beta_start": 0.0_001,
"beta_end": 0.02,
"beta_schedule": "linear",
"variance_type": "fixed_small",
"clip_sample": True,
}
config.update(**lowerCAmelCase__ )
return config
def __a ( self ) -> List[str]:
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=lowerCAmelCase__ )
def __a ( self ) -> Optional[int]:
for beta_start, beta_end in zip([0.0_001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=lowerCAmelCase__ , beta_end=lowerCAmelCase__ )
def __a ( self ) -> Tuple:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowerCAmelCase__ )
def __a ( self ) -> List[str]:
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=lowerCAmelCase__ )
def __a ( self ) -> str:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=lowerCAmelCase__ )
def __a ( self ) -> Tuple:
self.check_over_configs(thresholding=lowerCAmelCase__ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=lowerCAmelCase__ , prediction_type=lowerCAmelCase__ , sample_max_value=lowerCAmelCase__ , )
def __a ( self ) -> int:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCAmelCase__ )
def __a ( self ) -> Union[str, Any]:
for t in [0, 500, 999]:
self.check_over_forward(time_step=lowerCAmelCase__ )
def __a ( self ) -> Dict:
a : Union[str, Any] = self.scheduler_classes[0]
a : Any = self.get_scheduler_config()
a : str = scheduler_class(**lowerCAmelCase__ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.00_979 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1E-5
def __a ( self ) -> List[Any]:
a : Union[str, Any] = self.scheduler_classes[0]
a : Union[str, Any] = self.get_scheduler_config()
a : str = scheduler_class(**lowerCAmelCase__ )
a : Optional[int] = len(lowerCAmelCase__ )
a : Optional[Any] = self.dummy_model()
a : List[str] = self.dummy_sample_deter
a : Any = torch.manual_seed(0 )
for t in reversed(range(lowerCAmelCase__ ) ):
# 1. predict noise residual
a : Tuple = model(lowerCAmelCase__ , lowerCAmelCase__ )
# 2. predict previous mean of sample x_t-1
a : int = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , generator=lowerCAmelCase__ ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
a : int = pred_prev_sample
a : Optional[Any] = torch.sum(torch.abs(lowerCAmelCase__ ) )
a : int = torch.mean(torch.abs(lowerCAmelCase__ ) )
assert abs(result_sum.item() - 258.9_606 ) < 1E-2
assert abs(result_mean.item() - 0.3_372 ) < 1E-3
def __a ( self ) -> Any:
a : Tuple = self.scheduler_classes[0]
a : str = self.get_scheduler_config(prediction_type="v_prediction" )
a : Optional[int] = scheduler_class(**lowerCAmelCase__ )
a : List[str] = len(lowerCAmelCase__ )
a : Dict = self.dummy_model()
a : Union[str, Any] = self.dummy_sample_deter
a : List[Any] = torch.manual_seed(0 )
for t in reversed(range(lowerCAmelCase__ ) ):
# 1. predict noise residual
a : Tuple = model(lowerCAmelCase__ , lowerCAmelCase__ )
# 2. predict previous mean of sample x_t-1
a : List[Any] = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , generator=lowerCAmelCase__ ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
a : int = pred_prev_sample
a : List[str] = torch.sum(torch.abs(lowerCAmelCase__ ) )
a : Optional[int] = torch.mean(torch.abs(lowerCAmelCase__ ) )
assert abs(result_sum.item() - 202.0_296 ) < 1E-2
assert abs(result_mean.item() - 0.2_631 ) < 1E-3
def __a ( self ) -> Any:
a : List[Any] = self.scheduler_classes[0]
a : int = self.get_scheduler_config()
a : Union[str, Any] = scheduler_class(**lowerCAmelCase__ )
a : Any = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=lowerCAmelCase__ )
a : Dict = scheduler.timesteps
for i, timestep in enumerate(lowerCAmelCase__ ):
if i == len(lowerCAmelCase__ ) - 1:
a : Tuple = -1
else:
a : Any = timesteps[i + 1]
a : Optional[Any] = scheduler.previous_timestep(lowerCAmelCase__ )
a : Union[str, Any] = prev_t.item()
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def __a ( self ) -> Any:
a : Union[str, Any] = self.scheduler_classes[0]
a : Any = self.get_scheduler_config()
a : Optional[Any] = scheduler_class(**lowerCAmelCase__ )
a : Dict = [100, 87, 50, 51, 0]
with self.assertRaises(lowerCAmelCase__ , msg="`custom_timesteps` must be in descending order." ):
scheduler.set_timesteps(timesteps=lowerCAmelCase__ )
def __a ( self ) -> int:
a : Tuple = self.scheduler_classes[0]
a : int = self.get_scheduler_config()
a : List[Any] = scheduler_class(**lowerCAmelCase__ )
a : Union[str, Any] = [100, 87, 50, 1, 0]
a : Optional[Any] = len(lowerCAmelCase__ )
with self.assertRaises(lowerCAmelCase__ , msg="Can only pass one of `num_inference_steps` or `custom_timesteps`." ):
scheduler.set_timesteps(num_inference_steps=lowerCAmelCase__ , timesteps=lowerCAmelCase__ )
def __a ( self ) -> Optional[Any]:
a : str = self.scheduler_classes[0]
a : Optional[Any] = self.get_scheduler_config()
a : List[str] = scheduler_class(**lowerCAmelCase__ )
a : Optional[Any] = [scheduler.config.num_train_timesteps]
with self.assertRaises(
lowerCAmelCase__ , msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}" , ):
scheduler.set_timesteps(timesteps=lowerCAmelCase__ )
| 705
|
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
class __UpperCamelCase ( a__ ):
lowerCamelCase : torch.FloatTensor
lowerCamelCase : torch.FloatTensor
lowerCamelCase : Optional[torch.FloatTensor] =None
class __UpperCamelCase ( a__ , a__ ):
lowerCamelCase : Tuple =2
@register_to_config
def __init__( self , lowerCAmelCase__ = 0.02 , lowerCAmelCase__ = 100 , lowerCAmelCase__ = 1.007 , lowerCAmelCase__ = 80 , lowerCAmelCase__ = 0.05 , lowerCAmelCase__ = 50 , ) -> Union[str, Any]:
# standard deviation of the initial noise distribution
a : Tuple = sigma_max
# setable values
a : int = None
a : np.IntTensor = None
a : torch.FloatTensor = None # sigma(t_i)
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> torch.FloatTensor:
return sample
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[str]:
a : List[Any] = num_inference_steps
a : List[str] = np.arange(0 , self.num_inference_steps )[::-1].copy()
a : int = torch.from_numpy(lowerCAmelCase__ ).to(lowerCAmelCase__ )
a : List[str] = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in self.timesteps
]
a : Any = torch.tensor(lowerCAmelCase__ , dtype=torch.floataa , device=lowerCAmelCase__ )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Tuple[torch.FloatTensor, float]:
if self.config.s_min <= sigma <= self.config.s_max:
a : str = min(self.config.s_churn / self.num_inference_steps , 2**0.5 - 1 )
else:
a : Dict = 0
# sample eps ~ N(0, S_noise^2 * I)
a : Union[str, Any] = self.config.s_noise * randn_tensor(sample.shape , generator=lowerCAmelCase__ ).to(sample.device )
a : Any = sigma + gamma * sigma
a : Tuple = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = True , ) -> Union[KarrasVeOutput, Tuple]:
a : Union[str, Any] = sample_hat + sigma_hat * model_output
a : Tuple = (sample_hat - pred_original_sample) / sigma_hat
a : List[Any] = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=lowerCAmelCase__ , derivative=lowerCAmelCase__ , pred_original_sample=lowerCAmelCase__ )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = True , ) -> Union[KarrasVeOutput, Tuple]:
a : Optional[int] = sample_prev + sigma_prev * model_output
a : str = (sample_prev - pred_original_sample) / sigma_prev
a : Dict = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=lowerCAmelCase__ , derivative=lowerCAmelCase__ , pred_original_sample=lowerCAmelCase__ )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> str:
raise NotImplementedError()
| 31
| 0
|
"""simple docstring"""
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_flax_cross_test,
require_flax,
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_flax_available, is_torch_available, is_vision_available
from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_flax_bert import FlaxBertModelTester
from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester
from ..vit.test_modeling_flax_vit import FlaxViTModelTester
if is_flax_available():
from transformers import (
FlaxBertModel,
FlaxCLIPVisionModel,
FlaxVisionTextDualEncoderModel,
FlaxViTModel,
VisionTextDualEncoderConfig,
VisionTextDualEncoderProcessor,
)
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
if is_torch_available():
import torch
from transformers import VisionTextDualEncoderModel
if is_vision_available():
from PIL import Image
def _SCREAMING_SNAKE_CASE ( _lowercase : List[Any] ) ->Optional[int]:
'''simple docstring'''
if isinstance(_SCREAMING_SNAKE_CASE , collections.abc.Iterable ):
return x
return (x, x)
@require_flax
class __UpperCamelCase :
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> Dict:
pass
def __a ( self ) -> Dict:
pass
def __a ( self ) -> Tuple:
pass
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> int:
a : List[str] = np.abs((a - b) ).max()
self.assertLessEqual(__UpperCamelCase , __UpperCamelCase , f"""Difference between torch and flax is {diff} (>= {tol}).""" )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None , **lowerCAmelCase__ ) -> Union[str, Any]:
a : Union[str, Any] = VisionTextDualEncoderConfig.from_vision_text_configs(__UpperCamelCase , __UpperCamelCase )
a : List[Any] = FlaxVisionTextDualEncoderModel(__UpperCamelCase )
a : Optional[Any] = model(input_ids=__UpperCamelCase , pixel_values=__UpperCamelCase , attention_mask=__UpperCamelCase )
self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], config.projection_dim) )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None , **lowerCAmelCase__ ) -> Tuple:
a, a : Optional[int] = self.get_vision_text_model(__UpperCamelCase , __UpperCamelCase )
a : Optional[Any] = {"vision_model": vision_model, "text_model": text_model}
a : str = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**__UpperCamelCase )
a : Optional[Any] = model(input_ids=__UpperCamelCase , pixel_values=__UpperCamelCase , attention_mask=__UpperCamelCase )
self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], model.config.projection_dim) )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None , **lowerCAmelCase__ ) -> Tuple:
a, a : int = self.get_vision_text_model(__UpperCamelCase , __UpperCamelCase )
a : Tuple = {"vision_model": vision_model, "text_model": text_model}
a : str = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**__UpperCamelCase )
a : List[Any] = model(input_ids=__UpperCamelCase , pixel_values=__UpperCamelCase , attention_mask=__UpperCamelCase )
a : Tuple = output[0]
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__UpperCamelCase )
a : Optional[int] = FlaxVisionTextDualEncoderModel.from_pretrained(__UpperCamelCase )
a : List[Any] = model(input_ids=__UpperCamelCase , pixel_values=__UpperCamelCase , attention_mask=__UpperCamelCase )
a : List[Any] = after_output[0]
a : List[str] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__UpperCamelCase , 1E-3 )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None , **lowerCAmelCase__ ) -> Optional[Any]:
a, a : Optional[Any] = self.get_vision_text_model(__UpperCamelCase , __UpperCamelCase )
a : Union[str, Any] = {"vision_model": vision_model, "text_model": text_model}
a : Any = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**__UpperCamelCase )
a : Tuple = model(
input_ids=__UpperCamelCase , pixel_values=__UpperCamelCase , attention_mask=__UpperCamelCase , output_attentions=__UpperCamelCase )
a : int = output.vision_model_output.attentions
self.assertEqual(len(__UpperCamelCase ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
a : List[str] = to_atuple(vision_model.config.image_size )
a : List[str] = to_atuple(vision_model.config.patch_size )
a : int = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
a : Optional[int] = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
a : Optional[int] = output.text_model_output.attentions
self.assertEqual(len(__UpperCamelCase ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Any:
pt_model.to(__UpperCamelCase )
pt_model.eval()
# prepare inputs
a : Optional[int] = inputs_dict
a : Union[str, Any] = {k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()}
with torch.no_grad():
a : str = pt_model(**__UpperCamelCase ).to_tuple()
a : Optional[Any] = fx_model(**__UpperCamelCase ).to_tuple()
self.assertEqual(len(__UpperCamelCase ) , len(__UpperCamelCase ) , "Output lengths differ between Flax and PyTorch" )
for fx_output, pt_output in zip(fx_outputs[:4] , pt_outputs[:4] ):
self.assert_almost_equals(__UpperCamelCase , pt_output.numpy() , 4E-2 )
# PT -> Flax
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(__UpperCamelCase )
a : str = FlaxVisionTextDualEncoderModel.from_pretrained(__UpperCamelCase , from_pt=__UpperCamelCase )
a : int = fx_model_loaded(**__UpperCamelCase ).to_tuple()
self.assertEqual(len(__UpperCamelCase ) , len(__UpperCamelCase ) , "Output lengths differ between Flax and PyTorch" )
for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] , pt_outputs[:4] ):
self.assert_almost_equals(__UpperCamelCase , pt_output.numpy() , 4E-2 )
# Flax -> PT
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(__UpperCamelCase )
a : Union[str, Any] = VisionTextDualEncoderModel.from_pretrained(__UpperCamelCase , from_flax=__UpperCamelCase )
pt_model_loaded.to(__UpperCamelCase )
pt_model_loaded.eval()
with torch.no_grad():
a : List[str] = pt_model_loaded(**__UpperCamelCase ).to_tuple()
self.assertEqual(len(__UpperCamelCase ) , len(__UpperCamelCase ) , "Output lengths differ between Flax and PyTorch" )
for fx_output, pt_output_loaded in zip(fx_outputs[:4] , pt_outputs_loaded[:4] ):
self.assert_almost_equals(__UpperCamelCase , pt_output_loaded.numpy() , 4E-2 )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[Any]:
a : Any = VisionTextDualEncoderConfig.from_vision_text_configs(__UpperCamelCase , __UpperCamelCase )
a : Any = VisionTextDualEncoderModel(__UpperCamelCase )
a : Optional[Any] = FlaxVisionTextDualEncoderModel(__UpperCamelCase )
a : Union[str, Any] = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , __UpperCamelCase )
a : Tuple = fx_state
self.check_pt_flax_equivalence(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Union[str, Any]:
a : Dict = VisionTextDualEncoderConfig.from_vision_text_configs(__UpperCamelCase , __UpperCamelCase )
a : List[str] = VisionTextDualEncoderModel(__UpperCamelCase )
a : Optional[int] = FlaxVisionTextDualEncoderModel(__UpperCamelCase )
a : str = load_flax_weights_in_pytorch_model(__UpperCamelCase , fx_model.params )
self.check_pt_flax_equivalence(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def __a ( self ) -> Optional[Any]:
a : Dict = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**__UpperCamelCase )
def __a ( self ) -> List[Any]:
a : Union[str, Any] = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**__UpperCamelCase )
def __a ( self ) -> List[str]:
a : Optional[int] = self.prepare_config_and_inputs()
self.check_save_load(**__UpperCamelCase )
def __a ( self ) -> Optional[int]:
a : Optional[Any] = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**__UpperCamelCase )
@is_pt_flax_cross_test
def __a ( self ) -> Optional[int]:
a : str = self.prepare_config_and_inputs()
a : Dict = config_inputs_dict.pop("vision_config" )
a : Dict = config_inputs_dict.pop("text_config" )
a : Any = config_inputs_dict
self.check_equivalence_pt_to_flax(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
self.check_equivalence_flax_to_pt(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
@slow
def __a ( self ) -> List[Any]:
a, a : List[str] = self.get_pretrained_model_and_inputs()
a : List[str] = model_a(**__UpperCamelCase )
a : int = outputs[0]
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(__UpperCamelCase )
a : Optional[int] = FlaxVisionTextDualEncoderModel.from_pretrained(__UpperCamelCase )
a : Dict = model_a(**__UpperCamelCase )
a : Optional[int] = after_outputs[0]
a : Any = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__UpperCamelCase , 1E-5 )
@require_flax
class __UpperCamelCase ( a__ , unittest.TestCase ):
def __a ( self ) -> List[Any]:
a : Union[str, Any] = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
"hf-internal-testing/tiny-random-vit" , "hf-internal-testing/tiny-bert" , vision_from_pt=__UpperCamelCase , text_from_pt=__UpperCamelCase , )
a : List[Any] = 13
a : List[str] = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
a : List[Any] = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
a : List[Any] = random_attention_mask([batch_size, 4] )
a : Any = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask}
return model, inputs
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[Any]:
a : Dict = FlaxViTModel(__UpperCamelCase )
a : Optional[int] = FlaxBertModel(__UpperCamelCase )
return vision_model, text_model
def __a ( self ) -> List[Any]:
a : str = FlaxViTModelTester(self )
a : Any = FlaxBertModelTester(self )
a : int = vit_model_tester.prepare_config_and_inputs()
a : List[Any] = bert_model_tester.prepare_config_and_inputs()
a, a : Optional[Any] = vision_config_and_inputs
a, a, a, a : int = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_torch
class __UpperCamelCase ( a__ , unittest.TestCase ):
def __a ( self ) -> Optional[Any]:
a : Any = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
"hf-internal-testing/tiny-random-clip" , "hf-internal-testing/tiny-bert" , vision_from_pt=__UpperCamelCase , text_from_pt=__UpperCamelCase , )
a : Optional[int] = 13
a : List[Any] = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
a : int = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
a : Dict = random_attention_mask([batch_size, 4] )
a : Any = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask}
return model, inputs
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[int]:
a : Optional[Any] = FlaxCLIPVisionModel(__UpperCamelCase )
a : List[str] = FlaxBertModel(__UpperCamelCase )
return vision_model, text_model
def __a ( self ) -> int:
a : str = FlaxCLIPVisionModelTester(self )
a : int = FlaxBertModelTester(self )
a : List[str] = clip_model_tester.prepare_config_and_inputs()
a : Dict = bert_model_tester.prepare_config_and_inputs()
a, a : str = vision_config_and_inputs
a, a, a, a : List[str] = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_flax
@require_vision
class __UpperCamelCase ( unittest.TestCase ):
@slow
def __a ( self ) -> List[Any]:
a : List[str] = FlaxVisionTextDualEncoderModel.from_pretrained("clip-italian/clip-italian" , logit_scale_init_value=1.0 )
a : Union[str, Any] = VisionTextDualEncoderProcessor.from_pretrained("clip-italian/clip-italian" )
a : Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
a : int = processor(
text=["una foto di un gatto", "una foto di un cane"] , images=__UpperCamelCase , padding=__UpperCamelCase , return_tensors="np" )
a : Optional[Any] = model(**__UpperCamelCase )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
a : Dict = np.array([[1.2_284_727, 0.3_104_122]] )
self.assertTrue(np.allclose(outputs.logits_per_image , __UpperCamelCase , atol=1E-3 ) )
| 706
|
"""simple docstring"""
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
a : Optional[Any] = datasets.utils.logging.get_logger(__name__)
a : Union[str, Any] = ['''names''', '''prefix''']
a : Any = ['''warn_bad_lines''', '''error_bad_lines''', '''mangle_dupe_cols''']
a : Any = ['''encoding_errors''', '''on_bad_lines''']
a : List[str] = ['''date_format''']
@dataclass
class __UpperCamelCase ( datasets.BuilderConfig ):
lowerCamelCase : str =","
lowerCamelCase : Optional[str] =None
lowerCamelCase : Optional[Union[int, List[int], str]] ="infer"
lowerCamelCase : Optional[List[str]] =None
lowerCamelCase : Optional[List[str]] =None
lowerCamelCase : Optional[Union[int, str, List[int], List[str]]] =None
lowerCamelCase : Optional[Union[List[int], List[str]]] =None
lowerCamelCase : Optional[str] =None
lowerCamelCase : bool =True
lowerCamelCase : Optional[Literal["c", "python", "pyarrow"]] =None
lowerCamelCase : Dict[Union[int, str], Callable[[Any], Any]] =None
lowerCamelCase : Optional[list] =None
lowerCamelCase : Optional[list] =None
lowerCamelCase : bool =False
lowerCamelCase : Optional[Union[int, List[int]]] =None
lowerCamelCase : Optional[int] =None
lowerCamelCase : Optional[Union[str, List[str]]] =None
lowerCamelCase : bool =True
lowerCamelCase : bool =True
lowerCamelCase : bool =False
lowerCamelCase : bool =True
lowerCamelCase : Optional[str] =None
lowerCamelCase : str ="."
lowerCamelCase : Optional[str] =None
lowerCamelCase : str ='"'
lowerCamelCase : int =0
lowerCamelCase : Optional[str] =None
lowerCamelCase : Optional[str] =None
lowerCamelCase : Optional[str] =None
lowerCamelCase : Optional[str] =None
lowerCamelCase : bool =True
lowerCamelCase : bool =True
lowerCamelCase : int =0
lowerCamelCase : bool =True
lowerCamelCase : bool =False
lowerCamelCase : Optional[str] =None
lowerCamelCase : int =1_0000
lowerCamelCase : Optional[datasets.Features] =None
lowerCamelCase : Optional[str] ="strict"
lowerCamelCase : Literal["error", "warn", "skip"] ="error"
lowerCamelCase : Optional[str] =None
def __a ( self ) -> Dict:
if self.delimiter is not None:
a : int = self.delimiter
if self.column_names is not None:
a : Any = self.column_names
@property
def __a ( self ) -> List[str]:
a : Dict = {
"sep": self.sep,
"header": self.header,
"names": self.names,
"index_col": self.index_col,
"usecols": self.usecols,
"prefix": self.prefix,
"mangle_dupe_cols": self.mangle_dupe_cols,
"engine": self.engine,
"converters": self.converters,
"true_values": self.true_values,
"false_values": self.false_values,
"skipinitialspace": self.skipinitialspace,
"skiprows": self.skiprows,
"nrows": self.nrows,
"na_values": self.na_values,
"keep_default_na": self.keep_default_na,
"na_filter": self.na_filter,
"verbose": self.verbose,
"skip_blank_lines": self.skip_blank_lines,
"thousands": self.thousands,
"decimal": self.decimal,
"lineterminator": self.lineterminator,
"quotechar": self.quotechar,
"quoting": self.quoting,
"escapechar": self.escapechar,
"comment": self.comment,
"encoding": self.encoding,
"dialect": self.dialect,
"error_bad_lines": self.error_bad_lines,
"warn_bad_lines": self.warn_bad_lines,
"skipfooter": self.skipfooter,
"doublequote": self.doublequote,
"memory_map": self.memory_map,
"float_precision": self.float_precision,
"chunksize": self.chunksize,
"encoding_errors": self.encoding_errors,
"on_bad_lines": self.on_bad_lines,
"date_format": self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , lowerCAmelCase__ ):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class __UpperCamelCase ( datasets.ArrowBasedBuilder ):
lowerCamelCase : Union[str, Any] =CsvConfig
def __a ( self ) -> Optional[Any]:
return datasets.DatasetInfo(features=self.config.features )
def __a ( self , lowerCAmelCase__ ) -> Optional[int]:
if not self.config.data_files:
raise ValueError(f"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
a : Optional[Any] = dl_manager.download_and_extract(self.config.data_files )
if isinstance(lowerCAmelCase__ , (str, list, tuple) ):
a : Tuple = data_files
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
a : Tuple = [files]
a : int = [dl_manager.iter_files(lowerCAmelCase__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )]
a : int = []
for split_name, files in data_files.items():
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
a : Any = [files]
a : List[str] = [dl_manager.iter_files(lowerCAmelCase__ ) for file in files]
splits.append(datasets.SplitGenerator(name=lowerCAmelCase__ , gen_kwargs={"files": files} ) )
return splits
def __a ( self , lowerCAmelCase__ ) -> pa.Table:
if self.config.features is not None:
a : Optional[Any] = self.config.features.arrow_schema
if all(not require_storage_cast(lowerCAmelCase__ ) for feature in self.config.features.values() ):
# cheaper cast
a : Dict = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=lowerCAmelCase__ )
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
a : Union[str, Any] = table_cast(lowerCAmelCase__ , lowerCAmelCase__ )
return pa_table
def __a ( self , lowerCAmelCase__ ) -> Any:
a : Tuple = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
a : Any = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(lowerCAmelCase__ ) else object
for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() )
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(lowerCAmelCase__ ) ):
a : Tuple = pd.read_csv(lowerCAmelCase__ , iterator=lowerCAmelCase__ , dtype=lowerCAmelCase__ , **self.config.pd_read_csv_kwargs )
try:
for batch_idx, df in enumerate(lowerCAmelCase__ ):
a : Any = pa.Table.from_pandas(lowerCAmelCase__ )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(lowerCAmelCase__ )
except ValueError as e:
logger.error(f"""Failed to read file '{file}' with error {type(lowerCAmelCase__ )}: {e}""" )
raise
| 31
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : int = logging.get_logger(__name__)
a : str = {
'''google/vivit-b-16x2-kinetics400''': (
'''https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json'''
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class __UpperCamelCase ( A_ ):
lowerCamelCase : List[str] ='''vivit'''
def __init__( self , lowerCAmelCase__=224 , lowerCAmelCase__=32 , lowerCAmelCase__=[2, 16, 16] , lowerCAmelCase__=3 , lowerCAmelCase__=768 , lowerCAmelCase__=12 , lowerCAmelCase__=12 , lowerCAmelCase__=3072 , lowerCAmelCase__="gelu_fast" , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.02 , lowerCAmelCase__=1E-06 , lowerCAmelCase__=True , **lowerCAmelCase__ , ) -> Dict:
a : Tuple = hidden_size
a : Dict = num_hidden_layers
a : Optional[int] = num_attention_heads
a : Any = intermediate_size
a : Union[str, Any] = hidden_act
a : Union[str, Any] = hidden_dropout_prob
a : List[str] = attention_probs_dropout_prob
a : Optional[Any] = initializer_range
a : List[Any] = layer_norm_eps
a : Tuple = image_size
a : List[str] = num_frames
a : Optional[int] = tubelet_size
a : Optional[int] = num_channels
a : Union[str, Any] = qkv_bias
super().__init__(**lowerCAmelCase__ )
| 707
|
"""simple docstring"""
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class __UpperCamelCase ( a__ , a__ , unittest.TestCase ):
lowerCamelCase : Dict =IFPipeline
lowerCamelCase : int =TEXT_TO_IMAGE_PARAMS - {"""width""", """height""", """latents"""}
lowerCamelCase : int =TEXT_TO_IMAGE_BATCH_PARAMS
lowerCamelCase : int =PipelineTesterMixin.required_optional_params - {"""latents"""}
def __a ( self ) -> List[str]:
return self._get_dummy_components()
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__=0 ) -> Dict:
if str(lowerCAmelCase__ ).startswith("mps" ):
a : Tuple = torch.manual_seed(lowerCAmelCase__ )
else:
a : int = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
a : Optional[Any] = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def __a ( self ) -> Union[str, Any]:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" )
def __a ( self ) -> Any:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def __a ( self ) -> Union[str, Any]:
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def __a ( self ) -> Optional[int]:
self._test_save_load_local()
def __a ( self ) -> Tuple:
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def __a ( self ) -> str:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@slow
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
def __a ( self ) -> Optional[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self ) -> Tuple:
# if
a : Tuple = IFPipeline.from_pretrained("DeepFloyd/IF-I-XL-v1.0" , variant="fp16" , torch_dtype=torch.floataa )
a : str = IFSuperResolutionPipeline.from_pretrained(
"DeepFloyd/IF-II-L-v1.0" , variant="fp16" , torch_dtype=torch.floataa , text_encoder=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to("cuda" )
a, a : List[str] = pipe_a.encode_prompt("anime turtle" , device="cuda" )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
a : Optional[int] = None
a : Optional[int] = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
a : Union[str, Any] = IFImgaImgPipeline(**pipe_a.components )
a : List[Any] = IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
a : Union[str, Any] = IFInpaintingPipeline(**pipe_a.components )
a : List[str] = IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Dict:
# pipeline 1
_start_torch_memory_measurement()
a : List[str] = torch.Generator(device="cpu" ).manual_seed(0 )
a : Dict = pipe_a(
prompt_embeds=lowerCAmelCase__ , negative_prompt_embeds=lowerCAmelCase__ , num_inference_steps=2 , generator=lowerCAmelCase__ , output_type="np" , )
a : List[str] = output.images[0]
assert image.shape == (64, 64, 3)
a : Dict = torch.cuda.max_memory_allocated()
assert mem_bytes < 13 * 10**9
a : Optional[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy" )
assert_mean_pixel_difference(lowerCAmelCase__ , lowerCAmelCase__ )
# pipeline 2
_start_torch_memory_measurement()
a : List[str] = torch.Generator(device="cpu" ).manual_seed(0 )
a : Optional[int] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(lowerCAmelCase__ )
a : Union[str, Any] = pipe_a(
prompt_embeds=lowerCAmelCase__ , negative_prompt_embeds=lowerCAmelCase__ , image=lowerCAmelCase__ , generator=lowerCAmelCase__ , num_inference_steps=2 , output_type="np" , )
a : List[str] = output.images[0]
assert image.shape == (256, 256, 3)
a : int = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
a : Union[str, Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy" )
assert_mean_pixel_difference(lowerCAmelCase__ , lowerCAmelCase__ )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> int:
# pipeline 1
_start_torch_memory_measurement()
a : int = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(lowerCAmelCase__ )
a : Tuple = torch.Generator(device="cpu" ).manual_seed(0 )
a : List[Any] = pipe_a(
prompt_embeds=lowerCAmelCase__ , negative_prompt_embeds=lowerCAmelCase__ , image=lowerCAmelCase__ , num_inference_steps=2 , generator=lowerCAmelCase__ , output_type="np" , )
a : Tuple = output.images[0]
assert image.shape == (64, 64, 3)
a : int = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
a : Optional[int] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy" )
assert_mean_pixel_difference(lowerCAmelCase__ , lowerCAmelCase__ )
# pipeline 2
_start_torch_memory_measurement()
a : Optional[int] = torch.Generator(device="cpu" ).manual_seed(0 )
a : List[Any] = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(lowerCAmelCase__ )
a : str = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(lowerCAmelCase__ )
a : Dict = pipe_a(
prompt_embeds=lowerCAmelCase__ , negative_prompt_embeds=lowerCAmelCase__ , image=lowerCAmelCase__ , original_image=lowerCAmelCase__ , generator=lowerCAmelCase__ , num_inference_steps=2 , output_type="np" , )
a : int = output.images[0]
assert image.shape == (256, 256, 3)
a : str = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
a : Any = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy" )
assert_mean_pixel_difference(lowerCAmelCase__ , lowerCAmelCase__ )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[int]:
# pipeline 1
_start_torch_memory_measurement()
a : Union[str, Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(lowerCAmelCase__ )
a : List[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(1 ) ).to(lowerCAmelCase__ )
a : Optional[int] = torch.Generator(device="cpu" ).manual_seed(0 )
a : List[str] = pipe_a(
prompt_embeds=lowerCAmelCase__ , negative_prompt_embeds=lowerCAmelCase__ , image=lowerCAmelCase__ , mask_image=lowerCAmelCase__ , num_inference_steps=2 , generator=lowerCAmelCase__ , output_type="np" , )
a : List[Any] = output.images[0]
assert image.shape == (64, 64, 3)
a : Tuple = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
a : Union[str, Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy" )
assert_mean_pixel_difference(lowerCAmelCase__ , lowerCAmelCase__ )
# pipeline 2
_start_torch_memory_measurement()
a : str = torch.Generator(device="cpu" ).manual_seed(0 )
a : Optional[int] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(lowerCAmelCase__ )
a : int = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(lowerCAmelCase__ )
a : Dict = floats_tensor((1, 3, 256, 256) , rng=random.Random(1 ) ).to(lowerCAmelCase__ )
a : Optional[int] = pipe_a(
prompt_embeds=lowerCAmelCase__ , negative_prompt_embeds=lowerCAmelCase__ , image=lowerCAmelCase__ , mask_image=lowerCAmelCase__ , original_image=lowerCAmelCase__ , generator=lowerCAmelCase__ , num_inference_steps=2 , output_type="np" , )
a : List[str] = output.images[0]
assert image.shape == (256, 256, 3)
a : Tuple = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
a : Optional[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy" )
assert_mean_pixel_difference(lowerCAmelCase__ , lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( ) ->List[str]:
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 31
| 0
|
"""simple docstring"""
import importlib
import os
import fsspec
import pytest
from fsspec import register_implementation
from fsspec.registry import _registry as _fsspec_registry
from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem
from .utils import require_lza, require_zstandard
def _SCREAMING_SNAKE_CASE ( _lowercase : Optional[Any] ) ->Tuple:
'''simple docstring'''
assert "mock" in _fsspec_registry
assert "bz2" in _fsspec_registry
def _SCREAMING_SNAKE_CASE ( ) ->Optional[Any]:
'''simple docstring'''
assert "mock" not in _fsspec_registry
assert "bz2" in _fsspec_registry
def _SCREAMING_SNAKE_CASE ( ) ->List[str]:
'''simple docstring'''
a : int = 'mock-s3-bucket'
a : str = F"""s3://{mock_bucket}"""
a : Any = extract_path_from_uri(lowerCAmelCase__ )
assert dataset_path.startswith("s3://" ) is False
a : List[Any] = './local/path'
a : str = extract_path_from_uri(lowerCAmelCase__ )
assert dataset_path == new_dataset_path
def _SCREAMING_SNAKE_CASE ( _lowercase : Optional[int] ) ->Dict:
'''simple docstring'''
a : int = is_remote_filesystem(lowerCAmelCase__ )
assert is_remote is True
a : Union[str, Any] = fsspec.filesystem("file" )
a : List[str] = is_remote_filesystem(lowerCAmelCase__ )
assert is_remote is False
@pytest.mark.parametrize("compression_fs_class" , lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( _lowercase : Optional[int] , _lowercase : Any , _lowercase : str , _lowercase : Any , _lowercase : Any , _lowercase : Union[str, Any] , _lowercase : List[Any] ) ->Union[str, Any]:
'''simple docstring'''
a : List[str] = {'gzip': gz_file, 'xz': xz_file, 'zstd': zstd_file, 'bz2': bza_file, 'lz4': lza_file}
a : int = input_paths[compression_fs_class.protocol]
if input_path is None:
a : int = F"""for \'{compression_fs_class.protocol}\' compression protocol, """
if compression_fs_class.protocol == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_fs_class.protocol == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(lowerCAmelCase__ )
a : List[Any] = fsspec.filesystem(compression_fs_class.protocol , fo=lowerCAmelCase__ )
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
a : Tuple = os.path.basename(lowerCAmelCase__ )
a : Any = expected_filename[: expected_filename.rindex("." )]
assert fs.glob("*" ) == [expected_filename]
with fs.open(lowerCAmelCase__ , "r" , encoding="utf-8" ) as f, open(lowerCAmelCase__ , encoding="utf-8" ) as expected_file:
assert f.read() == expected_file.read()
@pytest.mark.parametrize("protocol" , ["zip", "gzip"] )
def _SCREAMING_SNAKE_CASE ( _lowercase : Optional[int] , _lowercase : List[Any] , _lowercase : int ) ->List[Any]:
'''simple docstring'''
a : Dict = {'zip': zip_jsonl_path, 'gzip': jsonl_gz_path}
a : int = compressed_file_paths[protocol]
a : List[Any] = 'dataset.jsonl'
a : Any = F"""{protocol}://{member_file_path}::{compressed_file_path}"""
a : List[str] = fsspec.get_fs_token_paths(lowerCAmelCase__ )
assert fs.isfile(lowerCAmelCase__ )
assert not fs.isfile("non_existing_" + member_file_path )
@pytest.mark.integration
def _SCREAMING_SNAKE_CASE ( _lowercase : List[Any] , _lowercase : Union[str, Any] , _lowercase : Optional[Any] , _lowercase : Tuple ) ->Union[str, Any]:
'''simple docstring'''
a : Tuple = hf_api.dataset_info(lowerCAmelCase__ , token=lowerCAmelCase__ )
a : Union[str, Any] = HfFileSystem(repo_info=lowerCAmelCase__ , token=lowerCAmelCase__ )
assert sorted(hffs.glob("*" ) ) == [".gitattributes", "data"]
assert hffs.isdir("data" )
assert hffs.isfile(".gitattributes" ) and hffs.isfile("data/text_data.txt" )
with open(lowerCAmelCase__ ) as f:
assert hffs.open("data/text_data.txt" , "r" ).read() == f.read()
def _SCREAMING_SNAKE_CASE ( ) ->Any:
'''simple docstring'''
a : int = 'bz2'
# Import module
import datasets.filesystems
# Overwrite protocol and reload
register_implementation(lowerCAmelCase__ , lowerCAmelCase__ , clobber=lowerCAmelCase__ )
with pytest.warns(lowerCAmelCase__ ) as warning_info:
importlib.reload(datasets.filesystems )
assert len(lowerCAmelCase__ ) == 1
assert (
str(warning_info[0].message )
== F"""A filesystem protocol was already set for {protocol} and will be overwritten."""
)
| 708
|
"""simple docstring"""
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class __UpperCamelCase ( unittest.TestCase ):
def __a ( self ) -> Optional[Any]:
a : Optional[int] = [
"safety_checker/pytorch_model.bin",
"safety_checker/model.safetensors",
"vae/diffusion_pytorch_model.bin",
"vae/diffusion_pytorch_model.safetensors",
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
self.assertTrue(is_safetensors_compatible(lowerCAmelCase__ ) )
def __a ( self ) -> Optional[Any]:
a : str = [
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
self.assertTrue(is_safetensors_compatible(lowerCAmelCase__ ) )
def __a ( self ) -> Dict:
a : List[str] = [
"safety_checker/pytorch_model.bin",
"safety_checker/model.safetensors",
"vae/diffusion_pytorch_model.bin",
"vae/diffusion_pytorch_model.safetensors",
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
"unet/diffusion_pytorch_model.bin",
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(lowerCAmelCase__ ) )
def __a ( self ) -> List[Any]:
a : Optional[Any] = [
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
]
self.assertTrue(is_safetensors_compatible(lowerCAmelCase__ ) )
def __a ( self ) -> Tuple:
a : Tuple = [
"safety_checker/pytorch_model.bin",
"safety_checker/model.safetensors",
"vae/diffusion_pytorch_model.bin",
"vae/diffusion_pytorch_model.safetensors",
"text_encoder/pytorch_model.bin",
# Removed: 'text_encoder/model.safetensors',
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
self.assertFalse(is_safetensors_compatible(lowerCAmelCase__ ) )
def __a ( self ) -> Dict:
a : Dict = [
"safety_checker/pytorch_model.fp16.bin",
"safety_checker/model.fp16.safetensors",
"vae/diffusion_pytorch_model.fp16.bin",
"vae/diffusion_pytorch_model.fp16.safetensors",
"text_encoder/pytorch_model.fp16.bin",
"text_encoder/model.fp16.safetensors",
"unet/diffusion_pytorch_model.fp16.bin",
"unet/diffusion_pytorch_model.fp16.safetensors",
]
a : Dict = "fp16"
self.assertTrue(is_safetensors_compatible(lowerCAmelCase__ , variant=lowerCAmelCase__ ) )
def __a ( self ) -> List[str]:
a : List[Any] = [
"unet/diffusion_pytorch_model.fp16.bin",
"unet/diffusion_pytorch_model.fp16.safetensors",
]
a : Any = "fp16"
self.assertTrue(is_safetensors_compatible(lowerCAmelCase__ , variant=lowerCAmelCase__ ) )
def __a ( self ) -> int:
# pass variant but use the non-variant filenames
a : int = [
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
a : Tuple = "fp16"
self.assertTrue(is_safetensors_compatible(lowerCAmelCase__ , variant=lowerCAmelCase__ ) )
def __a ( self ) -> str:
a : str = [
"safety_checker/pytorch_model.fp16.bin",
"safety_checker/model.fp16.safetensors",
"vae/diffusion_pytorch_model.fp16.bin",
"vae/diffusion_pytorch_model.fp16.safetensors",
"text_encoder/pytorch_model.fp16.bin",
"text_encoder/model.fp16.safetensors",
"unet/diffusion_pytorch_model.fp16.bin",
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
a : Any = "fp16"
self.assertFalse(is_safetensors_compatible(lowerCAmelCase__ , variant=lowerCAmelCase__ ) )
def __a ( self ) -> str:
a : Union[str, Any] = [
"text_encoder/pytorch_model.fp16.bin",
"text_encoder/model.fp16.safetensors",
]
a : str = "fp16"
self.assertTrue(is_safetensors_compatible(lowerCAmelCase__ , variant=lowerCAmelCase__ ) )
def __a ( self ) -> List[str]:
# pass variant but use the non-variant filenames
a : Optional[int] = [
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
]
a : str = "fp16"
self.assertTrue(is_safetensors_compatible(lowerCAmelCase__ , variant=lowerCAmelCase__ ) )
def __a ( self ) -> Optional[Any]:
a : Any = [
"safety_checker/pytorch_model.fp16.bin",
"safety_checker/model.fp16.safetensors",
"vae/diffusion_pytorch_model.fp16.bin",
"vae/diffusion_pytorch_model.fp16.safetensors",
"text_encoder/pytorch_model.fp16.bin",
# 'text_encoder/model.fp16.safetensors',
"unet/diffusion_pytorch_model.fp16.bin",
"unet/diffusion_pytorch_model.fp16.safetensors",
]
a : Optional[int] = "fp16"
self.assertFalse(is_safetensors_compatible(lowerCAmelCase__ , variant=lowerCAmelCase__ ) )
| 31
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
a : Tuple = {
'''configuration_nezha''': ['''NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''NezhaConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Any = [
'''NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''NezhaForNextSentencePrediction''',
'''NezhaForMaskedLM''',
'''NezhaForPreTraining''',
'''NezhaForMultipleChoice''',
'''NezhaForQuestionAnswering''',
'''NezhaForSequenceClassification''',
'''NezhaForTokenClassification''',
'''NezhaModel''',
'''NezhaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nezha import (
NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
NezhaPreTrainedModel,
)
else:
import sys
a : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 709
|
"""simple docstring"""
import flax.linen as nn
import jax
import jax.numpy as jnp
class __UpperCamelCase ( nn.Module ):
lowerCamelCase : int
lowerCamelCase : jnp.dtype =jnp.floataa
def __a ( self ) -> Tuple:
a : str = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , lowerCAmelCase__ ) -> Optional[Any]:
a, a, a, a : List[str] = hidden_states.shape
a : List[Any] = jax.image.resize(
lowerCAmelCase__ , shape=(batch, height * 2, width * 2, channels) , method="nearest" , )
a : List[str] = self.conv(lowerCAmelCase__ )
return hidden_states
class __UpperCamelCase ( nn.Module ):
lowerCamelCase : int
lowerCamelCase : jnp.dtype =jnp.floataa
def __a ( self ) -> Dict:
a : Optional[Any] = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , lowerCAmelCase__ ) -> Tuple:
# pad = ((0, 0), (0, 1), (0, 1), (0, 0)) # pad height and width dim
# hidden_states = jnp.pad(hidden_states, pad_width=pad)
a : Tuple = self.conv(lowerCAmelCase__ )
return hidden_states
class __UpperCamelCase ( nn.Module ):
lowerCamelCase : int
lowerCamelCase : int =None
lowerCamelCase : float =0.0
lowerCamelCase : bool =None
lowerCamelCase : jnp.dtype =jnp.floataa
def __a ( self ) -> int:
a : Dict = self.in_channels if self.out_channels is None else self.out_channels
a : Union[str, Any] = nn.GroupNorm(num_groups=32 , epsilon=1E-5 )
a : List[Any] = nn.Conv(
lowerCAmelCase__ , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
a : List[Any] = nn.Dense(lowerCAmelCase__ , dtype=self.dtype )
a : Union[str, Any] = nn.GroupNorm(num_groups=32 , epsilon=1E-5 )
a : Optional[int] = nn.Dropout(self.dropout_prob )
a : Dict = nn.Conv(
lowerCAmelCase__ , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
a : Union[str, Any] = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
a : List[str] = None
if use_nin_shortcut:
a : Optional[Any] = nn.Conv(
lowerCAmelCase__ , kernel_size=(1, 1) , strides=(1, 1) , padding="VALID" , dtype=self.dtype , )
def __call__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=True ) -> str:
a : int = hidden_states
a : Tuple = self.norma(lowerCAmelCase__ )
a : Any = nn.swish(lowerCAmelCase__ )
a : int = self.conva(lowerCAmelCase__ )
a : int = self.time_emb_proj(nn.swish(lowerCAmelCase__ ) )
a : Tuple = jnp.expand_dims(jnp.expand_dims(lowerCAmelCase__ , 1 ) , 1 )
a : Dict = hidden_states + temb
a : str = self.norma(lowerCAmelCase__ )
a : List[Any] = nn.swish(lowerCAmelCase__ )
a : List[str] = self.dropout(lowerCAmelCase__ , lowerCAmelCase__ )
a : List[str] = self.conva(lowerCAmelCase__ )
if self.conv_shortcut is not None:
a : Tuple = self.conv_shortcut(lowerCAmelCase__ )
return hidden_states + residual
| 31
| 0
|
"""simple docstring"""
# This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
ControlNetModel,
DDIMScheduler,
StableDiffusionControlNetImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel
from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
)
enable_full_determinism()
class __UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , unittest.TestCase ):
lowerCamelCase : Dict =StableDiffusionControlNetImgaImgPipeline
lowerCamelCase : Union[str, Any] =TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''}
lowerCamelCase : Tuple =TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
lowerCamelCase : Dict =IMAGE_TO_IMAGE_IMAGE_PARAMS.union({"""control_image"""} )
lowerCamelCase : Optional[int] =IMAGE_TO_IMAGE_IMAGE_PARAMS
def __a ( self ) -> str:
torch.manual_seed(0 )
a : int = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
torch.manual_seed(0 )
a : Union[str, Any] = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
torch.manual_seed(0 )
a : str = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=UpperCamelCase__ , set_alpha_to_one=UpperCamelCase__ , )
torch.manual_seed(0 )
a : Optional[int] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
a : int = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
a : Dict = CLIPTextModel(UpperCamelCase__ )
a : Optional[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
a : List[Any] = {
"unet": unet,
"controlnet": controlnet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__=0 ) -> Dict:
if str(UpperCamelCase__ ).startswith("mps" ):
a : List[str] = torch.manual_seed(UpperCamelCase__ )
else:
a : Optional[int] = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
a : str = 2
a : Tuple = randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=UpperCamelCase__ , device=torch.device(UpperCamelCase__ ) , )
a : Dict = floats_tensor(control_image.shape , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
a : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
a : Optional[Any] = Image.fromarray(np.uinta(UpperCamelCase__ ) ).convert("RGB" ).resize((64, 64) )
a : Optional[int] = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
"image": image,
"control_image": control_image,
}
return inputs
def __a ( self ) -> Any:
return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def __a ( self ) -> Optional[int]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 )
def __a ( self ) -> List[Any]:
self._test_inference_batch_single_identical(expected_max_diff=2E-3 )
class __UpperCamelCase ( lowercase_ , lowercase_ , unittest.TestCase ):
lowerCamelCase : int =StableDiffusionControlNetImgaImgPipeline
lowerCamelCase : int =TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''}
lowerCamelCase : Optional[int] =TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
lowerCamelCase : Optional[Any] =frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess
def __a ( self ) -> Union[str, Any]:
torch.manual_seed(0 )
a : Any = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
torch.manual_seed(0 )
def init_weights(lowerCAmelCase__ ):
if isinstance(UpperCamelCase__ , torch.nn.Convad ):
torch.nn.init.normal(m.weight )
m.bias.data.fill_(1.0 )
a : Optional[int] = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(UpperCamelCase__ )
torch.manual_seed(0 )
a : str = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(UpperCamelCase__ )
torch.manual_seed(0 )
a : Optional[int] = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=UpperCamelCase__ , set_alpha_to_one=UpperCamelCase__ , )
torch.manual_seed(0 )
a : int = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
a : int = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
a : Optional[int] = CLIPTextModel(UpperCamelCase__ )
a : Any = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
a : List[str] = MultiControlNetModel([controlneta, controlneta] )
a : Optional[int] = {
"unet": unet,
"controlnet": controlnet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__=0 ) -> List[str]:
if str(UpperCamelCase__ ).startswith("mps" ):
a : List[Any] = torch.manual_seed(UpperCamelCase__ )
else:
a : Optional[int] = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
a : Any = 2
a : List[Any] = [
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=UpperCamelCase__ , device=torch.device(UpperCamelCase__ ) , ),
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=UpperCamelCase__ , device=torch.device(UpperCamelCase__ ) , ),
]
a : Tuple = floats_tensor(control_image[0].shape , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
a : str = image.cpu().permute(0 , 2 , 3 , 1 )[0]
a : Optional[int] = Image.fromarray(np.uinta(UpperCamelCase__ ) ).convert("RGB" ).resize((64, 64) )
a : List[Any] = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
"image": image,
"control_image": control_image,
}
return inputs
def __a ( self ) -> Optional[Any]:
a : Tuple = self.get_dummy_components()
a : List[str] = self.pipeline_class(**UpperCamelCase__ )
pipe.to(UpperCamelCase__ )
a : int = 10.0
a : Optional[int] = 4
a : Dict = self.get_dummy_inputs(UpperCamelCase__ )
a : Union[str, Any] = steps
a : Optional[Any] = scale
a : str = pipe(**UpperCamelCase__ )[0]
a : List[str] = self.get_dummy_inputs(UpperCamelCase__ )
a : List[Any] = steps
a : Any = scale
a : List[str] = pipe(**UpperCamelCase__ , control_guidance_start=0.1 , control_guidance_end=0.2 )[0]
a : Union[str, Any] = self.get_dummy_inputs(UpperCamelCase__ )
a : Dict = steps
a : List[Any] = scale
a : List[str] = pipe(**UpperCamelCase__ , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7] )[0]
a : Any = self.get_dummy_inputs(UpperCamelCase__ )
a : str = steps
a : Optional[int] = scale
a : Optional[Any] = pipe(**UpperCamelCase__ , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8] )[0]
# make sure that all outputs are different
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
assert np.sum(np.abs(output_a - output_a ) ) > 1E-3
def __a ( self ) -> Optional[int]:
return self._test_attention_slicing_forward_pass(expected_max_diff=2E-3 )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def __a ( self ) -> Any:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2E-3 )
def __a ( self ) -> Dict:
self._test_inference_batch_single_identical(expected_max_diff=2E-3 )
def __a ( self ) -> Optional[Any]:
a : List[str] = self.get_dummy_components()
a : List[Any] = self.pipeline_class(**UpperCamelCase__ )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmpdir:
try:
# save_pretrained is not implemented for Multi-ControlNet
pipe.save_pretrained(UpperCamelCase__ )
except NotImplementedError:
pass
@slow
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
def __a ( self ) -> List[str]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self ) -> int:
a : int = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny" )
a : Optional[Any] = StableDiffusionControlNetImgaImgPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , safety_checker=UpperCamelCase__ , controlnet=UpperCamelCase__ )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
a : List[str] = torch.Generator(device="cpu" ).manual_seed(0 )
a : Dict = "evil space-punk bird"
a : Dict = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png" ).resize((512, 512) )
a : List[Any] = load_image(
"https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png" ).resize((512, 512) )
a : Union[str, Any] = pipe(
UpperCamelCase__ , UpperCamelCase__ , control_image=UpperCamelCase__ , generator=UpperCamelCase__ , output_type="np" , num_inference_steps=50 , strength=0.6 , )
a : List[Any] = output.images[0]
assert image.shape == (512, 512, 3)
a : Any = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy" )
assert np.abs(expected_image - image ).max() < 9E-2
| 710
|
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def _SCREAMING_SNAKE_CASE ( _lowercase : str=None ) ->Optional[Any]:
'''simple docstring'''
if subparsers is not None:
a : Dict = subparsers.add_parser("test" )
else:
a : Tuple = argparse.ArgumentParser("Accelerate test command" )
parser.add_argument(
"--config_file" , default=_lowercase , help=(
"The path to use to store the config file. Will default to a file named default_config.yaml in the cache "
"location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have "
"such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed "
"with 'huggingface'."
) , )
if subparsers is not None:
parser.set_defaults(func=_lowercase )
return parser
def _SCREAMING_SNAKE_CASE ( _lowercase : str ) ->str:
'''simple docstring'''
a : List[Any] = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ["test_utils", "scripts", "test_script.py"] )
if args.config_file is None:
a : int = script_name
else:
a : int = F"""--config_file={args.config_file} {script_name}"""
a : Optional[int] = ["accelerate-launch"] + test_args.split()
a : Optional[int] = execute_subprocess_async(_lowercase , env=os.environ.copy() )
if result.returncode == 0:
print("Test is a success! You are ready for your distributed training!" )
def _SCREAMING_SNAKE_CASE ( ) ->Tuple:
'''simple docstring'''
a : Any = test_command_parser()
a : Union[str, Any] = parser.parse_args()
test_command(_lowercase )
if __name__ == "__main__":
main()
| 31
| 0
|
"""simple docstring"""
import qiskit
def _SCREAMING_SNAKE_CASE ( _lowercase : Optional[int] = 2 ) ->qiskit.result.counts.Counts:
'''simple docstring'''
a : Optional[int] = qubits
# Using Aer's simulator
a : Dict = qiskit.Aer.get_backend("aer_simulator" )
# Creating a Quantum Circuit acting on the q register
a : int = qiskit.QuantumCircuit(__lowerCAmelCase , __lowerCAmelCase )
# Adding a H gate on qubit 0 (now q0 in superposition)
circuit.h(0 )
for i in range(1 , __lowerCAmelCase ):
# Adding CX (CNOT) gate
circuit.cx(i - 1 , __lowerCAmelCase )
# Mapping the quantum measurement to the classical bits
circuit.measure(list(range(__lowerCAmelCase ) ) , list(range(__lowerCAmelCase ) ) )
# Now measuring any one qubit would affect other qubits to collapse
# their super position and have same state as the measured one.
# Executing the circuit on the simulator
a : Optional[int] = qiskit.execute(__lowerCAmelCase , __lowerCAmelCase , shots=1000 )
return job.result().get_counts(__lowerCAmelCase )
if __name__ == "__main__":
print(F'''Total count for various states are: {quantum_entanglement(3)}''')
| 711
|
"""simple docstring"""
a : str = 8.314_4598
def _SCREAMING_SNAKE_CASE ( _lowercase : float , _lowercase : float ) ->float:
'''simple docstring'''
if temperature < 0:
raise Exception("Temperature cannot be less than 0 K" )
if molar_mass <= 0:
raise Exception("Molar mass cannot be less than or equal to 0 kg/mol" )
else:
return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# example
a : Any = 300
a : Dict = 28
a : Dict = rms_speed_of_molecule(temperature, molar_mass)
print(F'''Vrms of Nitrogen gas at 300 K is {vrms} m/s''')
| 31
| 0
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFXLMRobertaModel
@require_tf
@require_sentencepiece
@require_tokenizers
class __UpperCamelCase ( unittest.TestCase ):
@slow
def __a ( self ) -> Tuple:
a : str = TFXLMRobertaModel.from_pretrained("jplu/tf-xlm-roberta-base" )
a : Tuple = {
'''input_ids''': tf.convert_to_tensor([[0, 2646, 1_0269, 83, 9_9942, 2]] , dtype=tf.intaa ), # "My dog is cute"
'''attention_mask''': tf.convert_to_tensor([[1, 1, 1, 1, 1, 1]] , dtype=tf.intaa ),
}
a : Tuple = model(_UpperCAmelCase )['''last_hidden_state''']
a : int = tf.TensorShape((1, 6, 768) )
self.assertEqual(output.shape , _UpperCAmelCase )
# compare the actual values for a slice.
a : Tuple = tf.convert_to_tensor(
[
[
[0.0_681_762, 0.10_894_451, 0.06_772_504],
[-0.06_423_668, 0.02_366_615, 0.04_329_344],
[-0.06_057_295, 0.09_974_135, -0.00_070_584],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 712
|
"""simple docstring"""
import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers.generation import (
MaxLengthCriteria,
MaxNewTokensCriteria,
MaxTimeCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
@require_torch
class __UpperCamelCase ( unittest.TestCase ):
def __a ( self , lowerCAmelCase__ ) -> Optional[int]:
a : str = 3
a : str = 250
a : List[Any] = ids_tensor((batch_size, length) , lowerCAmelCase__ )
a : Optional[Any] = torch.ones((batch_size, length) , device=lowerCAmelCase__ , dtype=torch.float ) / length
return input_ids, scores
def __a ( self ) -> List[Any]:
a, a : str = self._get_tensors(5 )
a : Any = StoppingCriteriaList(
[
MaxLengthCriteria(max_length=10 ),
MaxTimeCriteria(max_time=0.1 ),
] )
self.assertFalse(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
a, a : str = self._get_tensors(9 )
self.assertFalse(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
a, a : Union[str, Any] = self._get_tensors(10 )
self.assertTrue(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
def __a ( self ) -> List[Any]:
a : Optional[Any] = MaxLengthCriteria(max_length=10 )
a, a : int = self._get_tensors(5 )
self.assertFalse(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
a, a : int = self._get_tensors(9 )
self.assertFalse(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
a, a : Union[str, Any] = self._get_tensors(10 )
self.assertTrue(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
def __a ( self ) -> List[str]:
a : Tuple = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 )
a, a : str = self._get_tensors(5 )
self.assertFalse(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
a, a : int = self._get_tensors(9 )
self.assertFalse(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
a, a : int = self._get_tensors(10 )
self.assertTrue(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
a : List[Any] = StoppingCriteriaList([criteria] )
self.assertEqual(criteria_list.max_length , 10 )
def __a ( self ) -> str:
a, a : Tuple = self._get_tensors(5 )
a : str = MaxTimeCriteria(max_time=0.1 )
self.assertFalse(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
a : Optional[int] = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 )
self.assertTrue(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
def __a ( self ) -> str:
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 10 )
with self.assertWarns(lowerCAmelCase__ ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 11 )
a : Optional[int] = validate_stopping_criteria(StoppingCriteriaList() , 11 )
self.assertEqual(len(lowerCAmelCase__ ) , 1 )
| 31
| 0
|
"""simple docstring"""
from math import sqrt
def _SCREAMING_SNAKE_CASE ( _lowercase : Union[str, Any] ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(sqrt(UpperCAmelCase__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _SCREAMING_SNAKE_CASE ( _lowercase : List[Any] = 1_0001 ):
'''simple docstring'''
a : Optional[Any] = 0
a : Dict = 1
while count != nth and number < 3:
number += 1
if is_prime(UpperCAmelCase__ ):
count += 1
while count != nth:
number += 2
if is_prime(UpperCAmelCase__ ):
count += 1
return number
if __name__ == "__main__":
print(F'''{solution() = }''')
| 713
|
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( _lowercase : int = 200 ) ->int:
'''simple docstring'''
a : Dict = [1, 2, 5, 10, 20, 50, 100, 200]
a : Optional[Any] = [0] * (pence + 1)
a : List[Any] = 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(_lowercase , pence + 1 , 1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(200) == 73682
| 31
| 0
|
"""simple docstring"""
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from ...test_tokenization_common import TokenizerTesterMixin
a : Any = get_tests_dir('''fixtures/test_sentencepiece.model''')
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
a : Tuple = 250004
a : int = 250020
@require_sentencepiece
@require_tokenizers
class __UpperCamelCase ( a__ , unittest.TestCase ):
lowerCamelCase : List[Any] =MBartaaTokenizer
lowerCamelCase : List[str] =MBartaaTokenizerFast
lowerCamelCase : Optional[int] =True
lowerCamelCase : List[Any] =True
def __a ( self ) -> str:
super().setUp()
# We have a SentencePiece fixture for testing
a : Any = MBartaaTokenizer(__UpperCamelCase , src_lang="en_XX" , tgt_lang="ro_RO" , keep_accents=__UpperCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
def __a ( self ) -> int:
a : int = "<s>"
a : Tuple = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCamelCase ) , __UpperCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCamelCase ) , __UpperCamelCase )
def __a ( self ) -> List[str]:
a : Optional[int] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(vocab_keys[-1] , "<mask>" )
self.assertEqual(len(__UpperCamelCase ) , 1054 )
def __a ( self ) -> List[Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 1054 )
def __a ( self ) -> Optional[int]:
a : str = MBartaaTokenizer(__UpperCamelCase , src_lang="en_XX" , tgt_lang="ro_RO" , keep_accents=__UpperCamelCase )
a : Any = tokenizer.tokenize("This is a test" )
self.assertListEqual(__UpperCamelCase , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__UpperCamelCase ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
a : List[Any] = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
__UpperCamelCase , [SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", "."] , )
a : int = tokenizer.convert_tokens_to_ids(__UpperCamelCase )
self.assertListEqual(
__UpperCamelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
a : int = tokenizer.convert_ids_to_tokens(__UpperCamelCase )
self.assertListEqual(
__UpperCamelCase , [SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", "."] , )
@slow
def __a ( self ) -> List[str]:
# fmt: off
a : Tuple = {"input_ids": [[25_0004, 1_1062, 8_2772, 7, 15, 8_2772, 538, 5_1529, 237, 1_7198, 1290, 206, 9, 21_5175, 1314, 136, 1_7198, 1290, 206, 9, 5_6359, 42, 12_2009, 9, 1_6466, 16, 8_7344, 4537, 9, 4717, 7_8381, 6, 15_9958, 7, 15, 2_4480, 618, 4, 527, 2_2693, 5428, 4, 2777, 2_4480, 9874, 4, 4_3523, 594, 4, 803, 1_8392, 3_3189, 18, 4, 4_3523, 2_4447, 1_2399, 100, 2_4955, 8_3658, 9626, 14_4057, 15, 839, 2_2335, 16, 136, 2_4955, 8_3658, 8_3479, 15, 3_9102, 724, 16, 678, 645, 2789, 1328, 4589, 42, 12_2009, 11_5774, 23, 805, 1328, 4_6876, 7, 136, 5_3894, 1940, 4_2227, 4_1159, 1_7721, 823, 425, 4, 2_7512, 9_8722, 206, 136, 5531, 4970, 919, 1_7336, 5, 2], [25_0004, 2_0080, 618, 83, 8_2775, 47, 479, 9, 1517, 73, 5_3894, 333, 8_0581, 11_0117, 1_8811, 5256, 1295, 51, 15_2526, 297, 7986, 390, 12_4416, 538, 3_5431, 214, 98, 1_5044, 2_5737, 136, 7108, 4_3701, 23, 756, 13_5355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [25_0004, 581, 6_3773, 11_9455, 6, 14_7797, 8_8203, 7, 645, 70, 21, 3285, 1_0269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__UpperCamelCase , model_name="facebook/mbart-large-50" , revision="d3913889c59cd5c9e456b269c376325eabad57e2" , )
def __a ( self ) -> Optional[Any]:
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
a : List[str] = (self.rust_tokenizer_class, "hf-internal-testing/tiny-random-mbart50", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
a : Dict = self.rust_tokenizer_class.from_pretrained(__UpperCamelCase , **__UpperCamelCase )
a : str = self.tokenizer_class.from_pretrained(__UpperCamelCase , **__UpperCamelCase )
a : str = tempfile.mkdtemp()
a : str = tokenizer_r.save_pretrained(__UpperCamelCase )
a : Optional[Any] = tokenizer_p.save_pretrained(__UpperCamelCase )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
a : Tuple = tuple(f for f in tokenizer_r_files if "tokenizer.json" not in f )
self.assertSequenceEqual(__UpperCamelCase , __UpperCamelCase )
# Checks everything loads correctly in the same way
a : Union[str, Any] = tokenizer_r.from_pretrained(__UpperCamelCase )
a : Optional[Any] = tokenizer_p.from_pretrained(__UpperCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__UpperCamelCase , __UpperCamelCase ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(__UpperCamelCase )
# Save tokenizer rust, legacy_format=True
a : int = tempfile.mkdtemp()
a : Tuple = tokenizer_r.save_pretrained(__UpperCamelCase , legacy_format=__UpperCamelCase )
a : Optional[int] = tokenizer_p.save_pretrained(__UpperCamelCase )
# Checks it save with the same files
self.assertSequenceEqual(__UpperCamelCase , __UpperCamelCase )
# Checks everything loads correctly in the same way
a : Optional[int] = tokenizer_r.from_pretrained(__UpperCamelCase )
a : Optional[Any] = tokenizer_p.from_pretrained(__UpperCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__UpperCamelCase , __UpperCamelCase ) )
shutil.rmtree(__UpperCamelCase )
# Save tokenizer rust, legacy_format=False
a : Tuple = tempfile.mkdtemp()
a : Optional[Any] = tokenizer_r.save_pretrained(__UpperCamelCase , legacy_format=__UpperCamelCase )
a : Optional[Any] = tokenizer_p.save_pretrained(__UpperCamelCase )
# Checks it saved the tokenizer.json file
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
a : Tuple = tokenizer_r.from_pretrained(__UpperCamelCase )
a : Optional[Any] = tokenizer_p.from_pretrained(__UpperCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__UpperCamelCase , __UpperCamelCase ) )
shutil.rmtree(__UpperCamelCase )
@require_torch
@require_sentencepiece
@require_tokenizers
class __UpperCamelCase ( unittest.TestCase ):
lowerCamelCase : int ="""facebook/mbart-large-50-one-to-many-mmt"""
lowerCamelCase : Tuple =[
""" UN Chief Says There Is No Military Solution in Syria""",
""" Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.""",
]
lowerCamelCase : List[Any] =[
"""Şeful ONU declară că nu există o soluţie militară în Siria""",
"""Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"""
""" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"""
""" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.""",
]
lowerCamelCase : Optional[int] =[EN_CODE, 8274, 12_7873, 2_5916, 7, 8622, 2071, 438, 6_7485, 53, 18_7895, 23, 5_1712, 2]
@classmethod
def __a ( cls ) -> int:
a : List[Any] = MBartaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="en_XX" , tgt_lang="ro_RO" )
a : Optional[int] = 1
return cls
def __a ( self ) -> Union[str, Any]:
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ar_AR"] , 25_0001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["en_EN"] , 25_0004 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ro_RO"] , 25_0020 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["mr_IN"] , 25_0038 )
def __a ( self ) -> Union[str, Any]:
a : Tuple = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , __UpperCamelCase )
def __a ( self ) -> Optional[int]:
self.assertIn(__UpperCamelCase , self.tokenizer.all_special_ids )
a : Optional[Any] = [RO_CODE, 884, 9019, 96, 9, 916, 8_6792, 36, 1_8743, 1_5596, 5, 2]
a : List[str] = self.tokenizer.decode(__UpperCamelCase , skip_special_tokens=__UpperCamelCase )
a : Any = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__UpperCamelCase )
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
self.assertNotIn(self.tokenizer.eos_token , __UpperCamelCase )
def __a ( self ) -> Tuple:
a : List[Any] = ["this is gunna be a long sentence " * 20]
assert isinstance(src_text[0] , __UpperCamelCase )
a : Union[str, Any] = 10
a : int = self.tokenizer(__UpperCamelCase , max_length=__UpperCamelCase , truncation=__UpperCamelCase ).input_ids[0]
self.assertEqual(ids[0] , __UpperCamelCase )
self.assertEqual(ids[-1] , 2 )
self.assertEqual(len(__UpperCamelCase ) , __UpperCamelCase )
def __a ( self ) -> int:
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["<mask>", "ar_AR"] ) , [25_0053, 25_0001] )
def __a ( self ) -> Any:
a : List[Any] = tempfile.mkdtemp()
a : List[Any] = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(__UpperCamelCase )
a : Union[str, Any] = MBartaaTokenizer.from_pretrained(__UpperCamelCase )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , __UpperCamelCase )
@require_torch
def __a ( self ) -> int:
a : Optional[int] = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=__UpperCamelCase , return_tensors="pt" )
a : Union[str, Any] = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == RO_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE]
@require_torch
def __a ( self ) -> Optional[int]:
a : Tuple = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=__UpperCamelCase , truncation=__UpperCamelCase , max_length=len(self.expected_src_tokens ) , return_tensors="pt" , )
a : Dict = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
a : List[str] = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , __UpperCamelCase )
self.assertEqual(2 , batch.decoder_input_ids[0, 0] ) # decoder_start_token_id
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def __a ( self ) -> Dict:
a : Optional[int] = self.tokenizer(self.src_text , padding=__UpperCamelCase , truncation=__UpperCamelCase , max_length=3 , return_tensors="pt" )
a : List[Any] = self.tokenizer(
text_target=self.tgt_text , padding=__UpperCamelCase , truncation=__UpperCamelCase , max_length=10 , return_tensors="pt" )
a : Optional[int] = targets["input_ids"]
a : str = shift_tokens_right(__UpperCamelCase , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def __a ( self ) -> Dict:
a : Tuple = self.tokenizer._build_translation_inputs(
"A test" , return_tensors="pt" , src_lang="en_XX" , tgt_lang="ar_AR" )
self.assertEqual(
nested_simplify(__UpperCamelCase ) , {
# en_XX, A, test, EOS
"input_ids": [[25_0004, 62, 3034, 2]],
"attention_mask": [[1, 1, 1, 1]],
# ar_AR
"forced_bos_token_id": 25_0001,
} , )
| 714
|
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class __UpperCamelCase ( metaclass=a__ ):
lowerCamelCase : Optional[Any] =["""transformers""", """torch""", """note_seq"""]
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Union[str, Any]:
requires_backends(self , ["transformers", "torch", "note_seq"] )
@classmethod
def __a ( cls , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Any:
requires_backends(cls , ["transformers", "torch", "note_seq"] )
@classmethod
def __a ( cls , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> int:
requires_backends(cls , ["transformers", "torch", "note_seq"] )
| 31
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : Optional[int] = logging.get_logger(__name__)
a : Tuple = {
'''google/realm-cc-news-pretrained-embedder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json'''
),
'''google/realm-cc-news-pretrained-encoder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json'''
),
'''google/realm-cc-news-pretrained-scorer''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json'''
),
'''google/realm-cc-news-pretrained-openqa''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json'''
),
'''google/realm-orqa-nq-openqa''': '''https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json''',
'''google/realm-orqa-nq-reader''': '''https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json''',
'''google/realm-orqa-wq-openqa''': '''https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json''',
'''google/realm-orqa-wq-reader''': '''https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json''',
# See all REALM models at https://huggingface.co/models?filter=realm
}
class __UpperCamelCase ( a__ ):
lowerCamelCase : Dict ="""realm"""
def __init__( self , lowerCAmelCase__=3_0522 , lowerCAmelCase__=768 , lowerCAmelCase__=128 , lowerCAmelCase__=12 , lowerCAmelCase__=12 , lowerCAmelCase__=8 , lowerCAmelCase__=3072 , lowerCAmelCase__="gelu_new" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=512 , lowerCAmelCase__=2 , lowerCAmelCase__=0.02 , lowerCAmelCase__=1E-12 , lowerCAmelCase__=256 , lowerCAmelCase__=10 , lowerCAmelCase__=1E-3 , lowerCAmelCase__=5 , lowerCAmelCase__=320 , lowerCAmelCase__=1335_3718 , lowerCAmelCase__=5000 , lowerCAmelCase__=1 , lowerCAmelCase__=0 , lowerCAmelCase__=2 , **lowerCAmelCase__ , ) -> Optional[Any]:
super().__init__(pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
# Common config
a : str = vocab_size
a : int = max_position_embeddings
a : Any = hidden_size
a : List[Any] = retriever_proj_size
a : Tuple = num_hidden_layers
a : List[str] = num_attention_heads
a : List[str] = num_candidates
a : Union[str, Any] = intermediate_size
a : str = hidden_act
a : List[str] = hidden_dropout_prob
a : List[str] = attention_probs_dropout_prob
a : List[str] = initializer_range
a : Tuple = type_vocab_size
a : str = layer_norm_eps
# Reader config
a : Any = span_hidden_size
a : Dict = max_span_width
a : Tuple = reader_layer_norm_eps
a : Union[str, Any] = reader_beam_size
a : int = reader_seq_len
# Retrieval config
a : List[str] = num_block_records
a : int = searcher_beam_size
| 715
|
"""simple docstring"""
import qiskit
def _SCREAMING_SNAKE_CASE ( _lowercase : int , _lowercase : int ) ->qiskit.result.counts.Counts:
'''simple docstring'''
a : Union[str, Any] = qiskit.Aer.get_backend("aer_simulator" )
# Create a Quantum Circuit acting on the q register
a : Optional[Any] = qiskit.QuantumCircuit(_lowercase , _lowercase )
# Map the quantum measurement to the classical bits
circuit.measure([0] , [0] )
# Execute the circuit on the simulator
a : Optional[int] = qiskit.execute(_lowercase , _lowercase , shots=1000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(_lowercase )
if __name__ == "__main__":
print(F'''Total count for various states are: {single_qubit_measure(1, 1)}''')
| 31
| 0
|
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mobilebert import MobileBertTokenizer
a : List[str] = logging.get_logger(__name__)
a : List[Any] = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
a : Optional[int] = {
"""vocab_file""": {"""mobilebert-uncased""": """https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt"""},
"""tokenizer_file""": {
"""mobilebert-uncased""": """https://huggingface.co/google/mobilebert-uncased/resolve/main/tokenizer.json"""
},
}
a : int = {"""mobilebert-uncased""": 512}
a : Tuple = {}
class __UpperCamelCase ( __UpperCAmelCase ):
lowerCamelCase : Tuple =VOCAB_FILES_NAMES
lowerCamelCase : Any =PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : List[Any] =PRETRAINED_INIT_CONFIGURATION
lowerCamelCase : Optional[Any] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : Tuple =MobileBertTokenizer
def __init__( self , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=True , lowerCAmelCase__="[UNK]" , lowerCAmelCase__="[SEP]" , lowerCAmelCase__="[PAD]" , lowerCAmelCase__="[CLS]" , lowerCAmelCase__="[MASK]" , lowerCAmelCase__=True , lowerCAmelCase__=None , **lowerCAmelCase__ , ) -> Any:
super().__init__(
lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , tokenize_chinese_chars=lowerCAmelCase__ , strip_accents=lowerCAmelCase__ , **lowerCAmelCase__ , )
a : Optional[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , lowerCAmelCase__ ) != do_lower_case
or normalizer_state.get("strip_accents" , lowerCAmelCase__ ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , lowerCAmelCase__ ) != tokenize_chinese_chars
):
a : Dict = getattr(lowerCAmelCase__ , normalizer_state.pop("type" ) )
a : Union[str, Any] = do_lower_case
a : int = strip_accents
a : List[str] = tokenize_chinese_chars
a : Tuple = normalizer_class(**lowerCAmelCase__ )
a : Dict = do_lower_case
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__=None ) -> Dict:
a : List[str] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Dict:
a : Any = [self.sep_token_id]
a : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Optional[Any]:
a : List[str] = self._tokenizer.model.save(lowerCAmelCase__ , name=lowerCAmelCase__ )
return tuple(lowerCAmelCase__ )
| 716
|
"""simple docstring"""
from random import randint
from tempfile import TemporaryFile
import numpy as np
def _SCREAMING_SNAKE_CASE ( _lowercase : Optional[int] , _lowercase : Optional[Any] , _lowercase : Union[str, Any] ) ->Dict:
'''simple docstring'''
a : List[str] = 0
if start < end:
a : Tuple = randint(_lowercase , _lowercase )
a : List[str] = a[end]
a : str = a[pivot]
a : Optional[int] = temp
a, a : Dict = _in_place_partition(_lowercase , _lowercase , _lowercase )
count += _in_place_quick_sort(_lowercase , _lowercase , p - 1 )
count += _in_place_quick_sort(_lowercase , p + 1 , _lowercase )
return count
def _SCREAMING_SNAKE_CASE ( _lowercase : Optional[Any] , _lowercase : str , _lowercase : List[Any] ) ->str:
'''simple docstring'''
a : Union[str, Any] = 0
a : List[Any] = randint(_lowercase , _lowercase )
a : int = a[end]
a : List[str] = a[pivot]
a : Tuple = temp
a : Union[str, Any] = start - 1
for index in range(_lowercase , _lowercase ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
a : List[str] = new_pivot_index + 1
a : Optional[int] = a[new_pivot_index]
a : Union[str, Any] = a[index]
a : List[Any] = temp
a : Tuple = a[new_pivot_index + 1]
a : str = a[end]
a : Dict = temp
return new_pivot_index + 1, count
a : int = TemporaryFile()
a : Tuple = 100 # 1000 elements are to be sorted
a , a : int = 0, 1 # mean and standard deviation
a : List[Any] = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print('''The array is''')
print(X)
outfile.seek(0) # using the same array
a : int = np.load(outfile)
a : Tuple = len(M) - 1
a : Union[str, Any] = _in_place_quick_sort(M, 0, r)
print(
'''No of Comparisons for 100 elements selected from a standard normal distribution'''
'''is :'''
)
print(z)
| 31
| 0
|
"""simple docstring"""
import flax.linen as nn
import jax
import jax.numpy as jnp
class __UpperCamelCase ( nn.Module ):
lowerCamelCase : int
lowerCamelCase : jnp.dtype =jnp.floataa
def __a ( self ) -> Optional[int]:
a : Dict = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , lowerCAmelCase__ ) -> Optional[int]:
a : Optional[Any] = hidden_states.shape
a : List[Any] = jax.image.resize(
lowerCamelCase__ , shape=(batch, height * 2, width * 2, channels) , method="nearest" , )
a : Any = self.conv(lowerCamelCase__ )
return hidden_states
class __UpperCamelCase ( nn.Module ):
lowerCamelCase : int
lowerCamelCase : jnp.dtype =jnp.floataa
def __a ( self ) -> str:
a : List[str] = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , lowerCAmelCase__ ) -> int:
a : List[str] = self.conv(lowerCamelCase__ )
return hidden_states
class __UpperCamelCase ( nn.Module ):
lowerCamelCase : int
lowerCamelCase : int =None
lowerCamelCase : float =0.0
lowerCamelCase : bool =None
lowerCamelCase : jnp.dtype =jnp.floataa
def __a ( self ) -> Dict:
a : str = self.in_channels if self.out_channels is None else self.out_channels
a : Any = nn.GroupNorm(num_groups=32 , epsilon=1E-5 )
a : Tuple = nn.Conv(
lowerCamelCase__ , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
a : Union[str, Any] = nn.Dense(lowerCamelCase__ , dtype=self.dtype )
a : Optional[Any] = nn.GroupNorm(num_groups=32 , epsilon=1E-5 )
a : Tuple = nn.Dropout(self.dropout_prob )
a : List[str] = nn.Conv(
lowerCamelCase__ , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
a : Optional[int] = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
a : Optional[Any] = None
if use_nin_shortcut:
a : List[str] = nn.Conv(
lowerCamelCase__ , kernel_size=(1, 1) , strides=(1, 1) , padding="VALID" , dtype=self.dtype , )
def __call__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=True ) -> Union[str, Any]:
a : Optional[int] = hidden_states
a : int = self.norma(lowerCamelCase__ )
a : List[str] = nn.swish(lowerCamelCase__ )
a : Tuple = self.conva(lowerCamelCase__ )
a : List[str] = self.time_emb_proj(nn.swish(lowerCamelCase__ ) )
a : str = jnp.expand_dims(jnp.expand_dims(lowerCamelCase__ , 1 ) , 1 )
a : Tuple = hidden_states + temb
a : Dict = self.norma(lowerCamelCase__ )
a : int = nn.swish(lowerCamelCase__ )
a : str = self.dropout(lowerCamelCase__ , lowerCamelCase__ )
a : int = self.conva(lowerCamelCase__ )
if self.conv_shortcut is not None:
a : str = self.conv_shortcut(lowerCamelCase__ )
return hidden_states + residual
| 717
|
"""simple docstring"""
import baseaa
def _SCREAMING_SNAKE_CASE ( _lowercase : str ) ->bytes:
'''simple docstring'''
return baseaa.aaaencode(string.encode("utf-8" ) )
def _SCREAMING_SNAKE_CASE ( _lowercase : bytes ) ->str:
'''simple docstring'''
return baseaa.aaadecode(_lowercase ).decode("utf-8" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 31
| 0
|
"""simple docstring"""
import json
import os
from collections import Counter
import torch
import torchvision
import torchvision.transforms as transforms
from PIL import Image
from torch import nn
from torch.utils.data import Dataset
a : List[str] = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)}
class __UpperCamelCase ( nn.Module ):
def __init__( self , lowerCAmelCase__ ) -> str:
super().__init__()
a : Union[str, Any] = torchvision.models.resnetaaa(pretrained=lowerCAmelCase__ )
a : Optional[Any] = list(model.children() )[:-2]
a : Tuple = nn.Sequential(*lowerCAmelCase__ )
a : Optional[Any] = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] )
def __a ( self , lowerCAmelCase__ ) -> List[str]:
# Bx3x224x224 -> Bx2048x7x7 -> Bx2048xN -> BxNx2048
a : str = self.pool(self.model(lowerCAmelCase__ ) )
a : str = torch.flatten(lowerCAmelCase__ , start_dim=2 )
a : Optional[Any] = out.transpose(1 , 2 ).contiguous()
return out # BxNx2048
class __UpperCamelCase ( UpperCamelCase_ ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Any:
a : Optional[int] = [json.loads(lowerCAmelCase__ ) for l in open(lowerCAmelCase__ )]
a : List[str] = os.path.dirname(lowerCAmelCase__ )
a : int = tokenizer
a : Any = labels
a : List[Any] = len(lowerCAmelCase__ )
a : Any = max_seq_length
a : Optional[Any] = transforms
def __len__( self ) -> Optional[Any]:
return len(self.data )
def __getitem__( self , lowerCAmelCase__ ) -> Tuple:
a : Optional[Any] = torch.LongTensor(self.tokenizer.encode(self.data[index]["text"] , add_special_tokens=lowerCAmelCase__ ) )
a : Tuple = sentence[0], sentence[1:-1], sentence[-1]
a : Tuple = sentence[: self.max_seq_length]
a : Optional[Any] = torch.zeros(self.n_classes )
a : Dict = 1
a : Optional[int] = Image.open(os.path.join(self.data_dir , self.data[index]["img"] ) ).convert("RGB" )
a : int = self.transforms(lowerCAmelCase__ )
return {
"image_start_token": start_token,
"image_end_token": end_token,
"sentence": sentence,
"image": image,
"label": label,
}
def __a ( self ) -> Optional[int]:
a : List[Any] = Counter()
for row in self.data:
label_freqs.update(row["label"] )
return label_freqs
def _SCREAMING_SNAKE_CASE ( _lowercase : int ) ->List[str]:
'''simple docstring'''
a : Any = [len(row["sentence"] ) for row in batch]
a : Optional[int] = len(snake_case_ ), max(snake_case_ )
a : Union[str, Any] = torch.zeros(snake_case_ , snake_case_ , dtype=torch.long )
a : List[Any] = torch.zeros(snake_case_ , snake_case_ , dtype=torch.long )
for i_batch, (input_row, length) in enumerate(zip(snake_case_ , snake_case_ ) ):
a : Optional[int] = input_row['''sentence''']
a : Dict = 1
a : Optional[Any] = torch.stack([row["image"] for row in batch] )
a : List[str] = torch.stack([row["label"] for row in batch] )
a : Any = torch.stack([row["image_start_token"] for row in batch] )
a : Optional[int] = torch.stack([row["image_end_token"] for row in batch] )
return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor
def _SCREAMING_SNAKE_CASE ( ) ->Any:
'''simple docstring'''
return [
"Crime",
"Drama",
"Thriller",
"Action",
"Comedy",
"Romance",
"Documentary",
"Short",
"Mystery",
"History",
"Family",
"Adventure",
"Fantasy",
"Sci-Fi",
"Western",
"Horror",
"Sport",
"War",
"Music",
"Musical",
"Animation",
"Biography",
"Film-Noir",
]
def _SCREAMING_SNAKE_CASE ( ) ->Union[str, Any]:
'''simple docstring'''
return transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.4677_7044, 0.4453_1429, 0.4066_1017] , std=[0.1222_1994, 0.1214_5835, 0.1438_0469] , ),
] )
| 718
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_albert import AlbertTokenizer
else:
a : Tuple = None
a : Any = logging.get_logger(__name__)
a : List[Any] = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
a : str = {
'''vocab_file''': {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/spiece.model''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/spiece.model''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/spiece.model''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model''',
},
'''tokenizer_file''': {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json''',
},
}
a : str = {
'''albert-base-v1''': 512,
'''albert-large-v1''': 512,
'''albert-xlarge-v1''': 512,
'''albert-xxlarge-v1''': 512,
'''albert-base-v2''': 512,
'''albert-large-v2''': 512,
'''albert-xlarge-v2''': 512,
'''albert-xxlarge-v2''': 512,
}
a : Union[str, Any] = '''▁'''
class __UpperCamelCase ( a__ ):
lowerCamelCase : Union[str, Any] =VOCAB_FILES_NAMES
lowerCamelCase : Dict =PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : List[Any] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : List[Any] =AlbertTokenizer
def __init__( self , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=False , lowerCAmelCase__="[CLS]" , lowerCAmelCase__="[SEP]" , lowerCAmelCase__="<unk>" , lowerCAmelCase__="[SEP]" , lowerCAmelCase__="<pad>" , lowerCAmelCase__="[CLS]" , lowerCAmelCase__="[MASK]" , **lowerCAmelCase__ , ) -> Union[str, Any]:
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
a : Optional[int] = (
AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ , normalized=lowerCAmelCase__ )
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
else mask_token
)
super().__init__(
lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ , remove_space=lowerCAmelCase__ , keep_accents=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , **lowerCAmelCase__ , )
a : Dict = do_lower_case
a : Any = remove_space
a : Optional[Any] = keep_accents
a : List[str] = vocab_file
a : Optional[Any] = False if not self.vocab_file else True
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]:
a : Optional[Any] = [self.sep_token_id]
a : int = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]:
a : Optional[Any] = [self.sep_token_id]
a : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(lowerCAmelCase__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
a : Dict = os.path.join(
lowerCAmelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase__ ):
copyfile(self.vocab_file , lowerCAmelCase__ )
return (out_vocab_file,)
| 31
| 0
|
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
a : Optional[int] = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
a : Dict = [file for file in filepaths if file != file.lower()]
if upper_files:
print(F'''{len(upper_files)} files contain uppercase characters:''')
print('''\n'''.join(upper_files) + '''\n''')
a : Tuple = [file for file in filepaths if """ """ in file]
if space_files:
print(F'''{len(space_files)} files contain space characters:''')
print('''\n'''.join(space_files) + '''\n''')
a : Union[str, Any] = [file for file in filepaths if """-""" in file]
if hyphen_files:
print(F'''{len(hyphen_files)} files contain hyphen characters:''')
print('''\n'''.join(hyphen_files) + '''\n''')
a : List[Any] = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(F'''{len(nodir_files)} files are not in a directory:''')
print('''\n'''.join(nodir_files) + '''\n''')
a : List[str] = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 719
|
"""simple docstring"""
import itertools
import random
import unittest
import numpy as np
from transformers import BatchFeature, SpeechTaFeatureExtractor
from transformers.testing_utils import require_torch
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
a : List[Any] = random.Random()
def _SCREAMING_SNAKE_CASE ( _lowercase : List[str] , _lowercase : int=1.0 , _lowercase : Optional[int]=None , _lowercase : Union[str, Any]=None ) ->Optional[Any]:
'''simple docstring'''
if rng is None:
a : Tuple = global_rng
a : Tuple = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
class __UpperCamelCase ( unittest.TestCase ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=7 , lowerCAmelCase__=400 , lowerCAmelCase__=2000 , lowerCAmelCase__=1 , lowerCAmelCase__=0.0 , lowerCAmelCase__=1_6000 , lowerCAmelCase__=True , lowerCAmelCase__=80 , lowerCAmelCase__=16 , lowerCAmelCase__=64 , lowerCAmelCase__="hann_window" , lowerCAmelCase__=80 , lowerCAmelCase__=7600 , lowerCAmelCase__=1E-10 , lowerCAmelCase__=True , ) -> Optional[Any]:
a : int = parent
a : Tuple = batch_size
a : Dict = min_seq_length
a : Any = max_seq_length
a : Optional[int] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
a : Union[str, Any] = feature_size
a : Tuple = padding_value
a : str = sampling_rate
a : Dict = do_normalize
a : str = num_mel_bins
a : List[str] = hop_length
a : str = win_length
a : Optional[Any] = win_function
a : List[str] = fmin
a : Any = fmax
a : Optional[int] = mel_floor
a : Tuple = return_attention_mask
def __a ( self ) -> Optional[Any]:
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"do_normalize": self.do_normalize,
"num_mel_bins": self.num_mel_bins,
"hop_length": self.hop_length,
"win_length": self.win_length,
"win_function": self.win_function,
"fmin": self.fmin,
"fmax": self.fmax,
"mel_floor": self.mel_floor,
"return_attention_mask": self.return_attention_mask,
}
def __a ( self , lowerCAmelCase__=False , lowerCAmelCase__=False ) -> Tuple:
def _flatten(lowerCAmelCase__ ):
return list(itertools.chain(*lowerCAmelCase__ ) )
if equal_length:
a : Union[str, Any] = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
a : str = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
a : Any = [np.asarray(lowerCAmelCase__ ) for x in speech_inputs]
return speech_inputs
def __a ( self , lowerCAmelCase__=False , lowerCAmelCase__=False ) -> Dict:
if equal_length:
a : str = [floats_list((self.max_seq_length, self.num_mel_bins) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
a : Any = [
floats_list((x, self.num_mel_bins) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
a : Optional[int] = [np.asarray(lowerCAmelCase__ ) for x in speech_inputs]
return speech_inputs
@require_torch
class __UpperCamelCase ( a__ , unittest.TestCase ):
lowerCamelCase : Tuple =SpeechTaFeatureExtractor
def __a ( self ) -> Union[str, Any]:
a : Tuple = SpeechTaFeatureExtractionTester(self )
def __a ( self , lowerCAmelCase__ ) -> Union[str, Any]:
self.assertTrue(np.all(np.mean(lowerCAmelCase__ , axis=0 ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(lowerCAmelCase__ , axis=0 ) - 1 ) < 1E-3 ) )
def __a ( self ) -> Union[str, Any]:
# Tests that all call wrap to encode_plus and batch_encode_plus
a : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
a : Any = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
a : Any = [np.asarray(lowerCAmelCase__ ) for speech_input in speech_inputs]
# Test not batched input
a : Optional[int] = feat_extract(speech_inputs[0] , return_tensors="np" ).input_values
a : Optional[Any] = feat_extract(np_speech_inputs[0] , return_tensors="np" ).input_values
self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3 ) )
# Test batched
a : int = feat_extract(lowerCAmelCase__ , return_tensors="np" ).input_values
a : int = feat_extract(lowerCAmelCase__ , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3 ) )
def __a ( self ) -> Optional[Any]:
a : List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
a : Dict = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
a : int = ["longest", "max_length", "do_not_pad"]
a : Tuple = [None, 1600, None]
for max_length, padding in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
a : Dict = feat_extract(lowerCAmelCase__ , padding=lowerCAmelCase__ , max_length=lowerCAmelCase__ , return_tensors="np" )
a : List[Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self.assertTrue(input_values[0][800:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self.assertTrue(input_values[0][1000:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def __a ( self ) -> str:
a : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
a : List[str] = range(800 , 1400 , 200 )
a : List[str] = [floats_list((1, x) )[0] for x in lengths]
a : Any = ["longest", "max_length", "do_not_pad"]
a : Any = [None, 1600, None]
for max_length, padding in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
a : List[Any] = feat_extract(lowerCAmelCase__ , max_length=lowerCAmelCase__ , padding=lowerCAmelCase__ )
a : Dict = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def __a ( self ) -> Dict:
a : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
a : int = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
a : Union[str, Any] = feat_extract(
lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=1000 , padding="max_length" , return_tensors="np" )
a : List[Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def __a ( self ) -> Dict:
a : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
a : Tuple = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
a : List[Any] = feat_extract(
lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=1000 , padding="longest" , return_tensors="np" )
a : Union[str, Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1000) )
a : List[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
a : int = feat_extract(
lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=2000 , padding="longest" , return_tensors="np" )
a : Dict = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1200) )
def __a ( self ) -> List[str]:
a : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
a : Any = np.random.rand(100 ).astype(np.floataa )
a : Optional[Any] = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
a : str = feature_extractor.pad([{"input_values": inputs}] , return_tensors="np" )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
a : List[str] = feature_extractor.pad([{"input_values": inputs}] , return_tensors="pt" )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def __a ( self ) -> Tuple:
# Tests that all call wrap to encode_plus and batch_encode_plus
a : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
a : Optional[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
a : Tuple = [np.asarray(lowerCAmelCase__ ) for speech_input in speech_inputs]
# Test feature size
a : Union[str, Any] = feature_extractor(audio_target=lowerCAmelCase__ , padding=lowerCAmelCase__ , return_tensors="np" ).input_values
self.assertTrue(input_values.ndim == 3 )
self.assertTrue(input_values.shape[-1] == feature_extractor.num_mel_bins )
# Test not batched input
a : Dict = feature_extractor(speech_inputs[0] , return_tensors="np" ).input_values
a : List[Any] = feature_extractor(np_speech_inputs[0] , return_tensors="np" ).input_values
self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3 ) )
# Test batched
a : Optional[int] = feature_extractor(lowerCAmelCase__ , return_tensors="np" ).input_values
a : Any = feature_extractor(lowerCAmelCase__ , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
a : Optional[Any] = [floats_list((1, x) )[0] for x in (800, 800, 800)]
a : List[Any] = np.asarray(lowerCAmelCase__ )
a : str = feature_extractor(lowerCAmelCase__ , return_tensors="np" ).input_values
a : str = feature_extractor(lowerCAmelCase__ , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3 ) )
def __a ( self ) -> str:
a : Optional[Any] = self.feat_extract_tester.prepare_inputs_for_target()
a : Any = self.feature_extraction_class(**self.feat_extract_dict )
a : Union[str, Any] = feat_extract.model_input_names[0]
a : List[str] = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(lowerCAmelCase__ ) == len(lowerCAmelCase__ ) for x, y in zip(lowerCAmelCase__ , processed_features[input_name] ) ) )
a : Tuple = self.feat_extract_tester.prepare_inputs_for_target(equal_length=lowerCAmelCase__ )
a : List[Any] = BatchFeature({input_name: speech_inputs} , tensor_type="np" )
a : Tuple = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
a : Dict = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def __a ( self ) -> Tuple:
a : Tuple = self.feat_extract_tester.prepare_inputs_for_target(equal_length=lowerCAmelCase__ )
a : Optional[Any] = self.feature_extraction_class(**self.feat_extract_dict )
a : Optional[int] = feat_extract.model_input_names[0]
a : List[Any] = BatchFeature({input_name: speech_inputs} , tensor_type="pt" )
a : Tuple = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
a : List[str] = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def __a ( self ) -> Optional[Any]:
a : Dict = self.feature_extraction_class(**self.feat_extract_dict )
a : Optional[int] = self.feat_extract_tester.prepare_inputs_for_target()
a : Optional[Any] = feat_extract.model_input_names[0]
a : List[str] = BatchFeature({input_name: speech_inputs} )
a : Tuple = feat_extract.num_mel_bins # hack!
a : List[Any] = feat_extract.pad(lowerCAmelCase__ , padding="longest" , return_tensors="np" )[input_name]
a : Any = feat_extract.pad(lowerCAmelCase__ , padding="longest" , return_tensors="pt" )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1E-2 )
def __a ( self ) -> Union[str, Any]:
a : Any = self.feat_extract_dict
a : Optional[Any] = True
a : Union[str, Any] = self.feature_extraction_class(**lowerCAmelCase__ )
a : Any = self.feat_extract_tester.prepare_inputs_for_target()
a : Dict = [len(lowerCAmelCase__ ) for x in speech_inputs]
a : int = feat_extract.model_input_names[0]
a : List[Any] = BatchFeature({input_name: speech_inputs} )
a : Union[str, Any] = feat_extract.num_mel_bins # hack!
a : Dict = feat_extract.pad(lowerCAmelCase__ , padding="longest" , return_tensors="np" )
self.assertIn("attention_mask" , lowerCAmelCase__ )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , lowerCAmelCase__ )
def __a ( self ) -> Union[str, Any]:
a : Tuple = self.feat_extract_dict
a : str = True
a : Optional[Any] = self.feature_extraction_class(**lowerCAmelCase__ )
a : List[Any] = self.feat_extract_tester.prepare_inputs_for_target()
a : Dict = [len(lowerCAmelCase__ ) for x in speech_inputs]
a : Optional[Any] = feat_extract.model_input_names[0]
a : str = BatchFeature({input_name: speech_inputs} )
a : Optional[Any] = min(lowerCAmelCase__ )
a : List[Any] = feat_extract.num_mel_bins # hack!
a : Any = feat_extract.pad(
lowerCAmelCase__ , padding="max_length" , max_length=lowerCAmelCase__ , truncation=lowerCAmelCase__ , return_tensors="np" )
self.assertIn("attention_mask" , lowerCAmelCase__ )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
def __a ( self , lowerCAmelCase__ ) -> Optional[int]:
from datasets import load_dataset
a : Tuple = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
a : Optional[Any] = ds.sort("id" ).select(range(lowerCAmelCase__ ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def __a ( self ) -> Union[str, Any]:
# fmt: off
a : List[Any] = torch.tensor(
[2.3_804E-03, 2.0_752E-03, 1.9_836E-03, 2.1_057E-03, 1.6_174E-03,
3.0_518E-04, 9.1_553E-05, 3.3_569E-04, 9.7_656E-04, 1.8_311E-03,
2.0_142E-03, 2.1_057E-03, 1.7_395E-03, 4.5_776E-04, -3.9_673E-04,
4.5_776E-04, 1.0_071E-03, 9.1_553E-05, 4.8_828E-04, 1.1_597E-03,
7.3_242E-04, 9.4_604E-04, 1.8_005E-03, 1.8_311E-03, 8.8_501E-04,
4.2_725E-04, 4.8_828E-04, 7.3_242E-04, 1.0_986E-03, 2.1_057E-03] )
# fmt: on
a : List[str] = self._load_datasamples(1 )
a : Union[str, Any] = SpeechTaFeatureExtractor()
a : str = feature_extractor(lowerCAmelCase__ , return_tensors="pt" ).input_values
self.assertEquals(input_values.shape , (1, 9_3680) )
self.assertTrue(torch.allclose(input_values[0, :30] , lowerCAmelCase__ , atol=1E-6 ) )
def __a ( self ) -> Union[str, Any]:
# fmt: off
a : Tuple = torch.tensor(
[-2.6_870, -3.0_104, -3.1_356, -3.5_352, -3.0_044, -3.0_353, -3.4_719, -3.6_777,
-3.1_520, -2.9_435, -2.6_553, -2.8_795, -2.9_944, -2.5_921, -3.0_279, -3.0_386,
-3.0_864, -3.1_291, -3.2_353, -2.7_444, -2.6_831, -2.7_287, -3.1_761, -3.1_571,
-3.2_726, -3.0_582, -3.1_007, -3.4_533, -3.4_695, -3.0_998] )
# fmt: on
a : Dict = self._load_datasamples(1 )
a : Tuple = SpeechTaFeatureExtractor()
a : Optional[int] = feature_extractor(audio_target=lowerCAmelCase__ , return_tensors="pt" ).input_values
self.assertEquals(input_values.shape , (1, 366, 80) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , lowerCAmelCase__ , atol=1E-4 ) )
| 31
| 0
|
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import (
BaseOutput,
OptionalDependencyNotAvailable,
is_flax_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_onnx_available,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
@dataclass
class __UpperCamelCase ( a__ ):
lowerCamelCase : Union[List[PIL.Image.Image], np.ndarray]
lowerCamelCase : Optional[List[bool]]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_cycle_diffusion import CycleDiffusionPipeline
from .pipeline_stable_diffusion import StableDiffusionPipeline
from .pipeline_stable_diffusion_attend_and_excite import StableDiffusionAttendAndExcitePipeline
from .pipeline_stable_diffusion_imgaimg import StableDiffusionImgaImgPipeline
from .pipeline_stable_diffusion_inpaint import StableDiffusionInpaintPipeline
from .pipeline_stable_diffusion_inpaint_legacy import StableDiffusionInpaintPipelineLegacy
from .pipeline_stable_diffusion_instruct_pixapix import StableDiffusionInstructPixaPixPipeline
from .pipeline_stable_diffusion_latent_upscale import StableDiffusionLatentUpscalePipeline
from .pipeline_stable_diffusion_ldmad import StableDiffusionLDMaDPipeline
from .pipeline_stable_diffusion_model_editing import StableDiffusionModelEditingPipeline
from .pipeline_stable_diffusion_panorama import StableDiffusionPanoramaPipeline
from .pipeline_stable_diffusion_paradigms import StableDiffusionParadigmsPipeline
from .pipeline_stable_diffusion_sag import StableDiffusionSAGPipeline
from .pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from .pipeline_stable_unclip import StableUnCLIPPipeline
from .pipeline_stable_unclip_imgaimg import StableUnCLIPImgaImgPipeline
from .safety_checker import StableDiffusionSafetyChecker
from .stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.25.0''')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import StableDiffusionImageVariationPipeline
else:
from .pipeline_stable_diffusion_image_variation import StableDiffusionImageVariationPipeline
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.26.0''')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionPixaPixZeroPipeline,
)
else:
from .pipeline_stable_diffusion_depthaimg import StableDiffusionDepthaImgPipeline
from .pipeline_stable_diffusion_diffedit import StableDiffusionDiffEditPipeline
from .pipeline_stable_diffusion_pixapix_zero import StableDiffusionPixaPixZeroPipeline
try:
if not (
is_torch_available()
and is_transformers_available()
and is_k_diffusion_available()
and is_k_diffusion_version('''>=''', '''0.0.12''')
):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipeline_stable_diffusion_k_diffusion import StableDiffusionKDiffusionPipeline
try:
if not (is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_onnx_objects import * # noqa F403
else:
from .pipeline_onnx_stable_diffusion import OnnxStableDiffusionPipeline, StableDiffusionOnnxPipeline
from .pipeline_onnx_stable_diffusion_imgaimg import OnnxStableDiffusionImgaImgPipeline
from .pipeline_onnx_stable_diffusion_inpaint import OnnxStableDiffusionInpaintPipeline
from .pipeline_onnx_stable_diffusion_inpaint_legacy import OnnxStableDiffusionInpaintPipelineLegacy
from .pipeline_onnx_stable_diffusion_upscale import OnnxStableDiffusionUpscalePipeline
if is_transformers_available() and is_flax_available():
import flax
@flax.struct.dataclass
class __UpperCamelCase ( a__ ):
lowerCamelCase : np.ndarray
lowerCamelCase : List[bool]
from ...schedulers.scheduling_pndm_flax import PNDMSchedulerState
from .pipeline_flax_stable_diffusion import FlaxStableDiffusionPipeline
from .pipeline_flax_stable_diffusion_imgaimg import FlaxStableDiffusionImgaImgPipeline
from .pipeline_flax_stable_diffusion_inpaint import FlaxStableDiffusionInpaintPipeline
from .safety_checker_flax import FlaxStableDiffusionSafetyChecker
| 720
|
"""simple docstring"""
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def _SCREAMING_SNAKE_CASE ( _lowercase : List[str] ) ->int:
'''simple docstring'''
a : int = {}
a : Union[str, Any] = tokenizer(example["content"] , truncation=_lowercase )["input_ids"]
a : Any = len(example["content"] ) / len(output["input_ids"] )
return output
a : int = HfArgumentParser(PretokenizationArguments)
a : Optional[int] = parser.parse_args()
if args.num_workers is None:
a : Tuple = multiprocessing.cpu_count()
a : Optional[int] = AutoTokenizer.from_pretrained(args.tokenizer_dir)
a : Dict = time.time()
a : Tuple = load_dataset(args.dataset_name, split='''train''')
print(F'''Dataset loaded in {time.time()-t_start:.2f}s''')
a : Dict = time.time()
a : Tuple = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
'''repo_name''',
'''path''',
'''copies''',
'''size''',
'''content''',
'''license''',
'''hash''',
'''line_mean''',
'''line_max''',
'''alpha_frac''',
'''autogenerated''',
],
)
print(F'''Dataset tokenized in {time.time()-t_start:.2f}s''')
a : Tuple = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(F'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
| 31
| 0
|
"""simple docstring"""
from statistics import mean, stdev
def _SCREAMING_SNAKE_CASE ( _lowercase : list , _lowercase : int = 3 ) ->Any:
'''simple docstring'''
a : Dict = min(_lowercase )
a : Any = max(_lowercase )
# normalize data
return [round((x - x_min) / (x_max - x_min) , _lowercase ) for x in data]
def _SCREAMING_SNAKE_CASE ( _lowercase : list , _lowercase : int = 3 ) ->Union[str, Any]:
'''simple docstring'''
a : Optional[Any] = mean(_lowercase )
a : Optional[int] = stdev(_lowercase )
# standardize data
return [round((x - mu) / (sigma) , _lowercase ) for x in data]
| 721
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
a : List[Any] = TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
a : Union[str, Any] = TaTokenizerFast
a : Union[str, Any] = {'''configuration_mt5''': ['''MT5Config''', '''MT5OnnxConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : str = [
'''MT5EncoderModel''',
'''MT5ForConditionalGeneration''',
'''MT5ForQuestionAnswering''',
'''MT5Model''',
'''MT5PreTrainedModel''',
'''MT5Stack''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[str] = ['''TFMT5EncoderModel''', '''TFMT5ForConditionalGeneration''', '''TFMT5Model''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Optional[Any] = ['''FlaxMT5EncoderModel''', '''FlaxMT5ForConditionalGeneration''', '''FlaxMT5Model''']
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
a : List[Any] = _LazyModule(
__name__,
globals()['''__file__'''],
_import_structure,
extra_objects={'''MT5Tokenizer''': MTaTokenizer, '''MT5TokenizerFast''': MTaTokenizerFast},
module_spec=__spec__,
)
| 31
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
a : Optional[Any] = {
'''configuration_layoutlmv2''': ['''LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LayoutLMv2Config'''],
'''processing_layoutlmv2''': ['''LayoutLMv2Processor'''],
'''tokenization_layoutlmv2''': ['''LayoutLMv2Tokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Optional[Any] = ['''LayoutLMv2TokenizerFast''']
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Dict = ['''LayoutLMv2FeatureExtractor''']
a : str = ['''LayoutLMv2ImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Optional[Any] = [
'''LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LayoutLMv2ForQuestionAnswering''',
'''LayoutLMv2ForSequenceClassification''',
'''LayoutLMv2ForTokenClassification''',
'''LayoutLMv2Layer''',
'''LayoutLMv2Model''',
'''LayoutLMv2PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaLayer,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
else:
import sys
a : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 700
|
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( _lowercase : int = 10 , _lowercase : int = 1000 , _lowercase : bool = True ) ->int:
'''simple docstring'''
assert (
isinstance(_lowercase , _lowercase )
and isinstance(_lowercase , _lowercase )
and isinstance(_lowercase , _lowercase )
), "Invalid type of value(s) specified to function!"
if min_val > max_val:
raise ValueError("Invalid value for min_val or max_val (min_value < max_value)" )
return min_val if option else max_val
def _SCREAMING_SNAKE_CASE ( _lowercase : int , _lowercase : int ) ->int:
'''simple docstring'''
return int((number_a + number_a) / 2 )
def _SCREAMING_SNAKE_CASE ( _lowercase : int , _lowercase : int , _lowercase : int ) ->None:
'''simple docstring'''
assert (
isinstance(_lowercase , _lowercase ) and isinstance(_lowercase , _lowercase ) and isinstance(_lowercase , _lowercase )
), 'argument values must be type of "int"'
if lower > higher:
raise ValueError("argument value for lower and higher must be(lower > higher)" )
if not lower < to_guess < higher:
raise ValueError(
"guess value must be within the range of lower and higher value" )
def answer(_lowercase : int ) -> str:
if number > to_guess:
return "high"
elif number < to_guess:
return "low"
else:
return "same"
print("started..." )
a : Optional[Any] = lower
a : List[Any] = higher
a : Tuple = []
while True:
a : List[Any] = get_avg(_lowercase , _lowercase )
last_numbers.append(_lowercase )
if answer(_lowercase ) == "low":
a : Optional[int] = number
elif answer(_lowercase ) == "high":
a : Tuple = number
else:
break
print(F"""guess the number : {last_numbers[-1]}""" )
print(F"""details : {last_numbers!s}""" )
def _SCREAMING_SNAKE_CASE ( ) ->None:
'''simple docstring'''
a : Tuple = int(input("Enter lower value : " ).strip() )
a : Dict = int(input("Enter high value : " ).strip() )
a : Optional[int] = int(input("Enter value to guess : " ).strip() )
guess_the_number(_lowercase , _lowercase , _lowercase )
if __name__ == "__main__":
main()
| 31
| 0
|
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( _lowercase : int = 400_0000 ) ->int:
'''simple docstring'''
a : Optional[Any] = []
a : int = 0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(_lowercase )
a : Any = b, a + b
return sum(_lowercase )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 701
|
"""simple docstring"""
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
a : Any = logging.get_logger(__name__)
a : Tuple = {
'''tensor(bool)''': np.bool_,
'''tensor(int8)''': np.inta,
'''tensor(uint8)''': np.uinta,
'''tensor(int16)''': np.intaa,
'''tensor(uint16)''': np.uintaa,
'''tensor(int32)''': np.intaa,
'''tensor(uint32)''': np.uintaa,
'''tensor(int64)''': np.intaa,
'''tensor(uint64)''': np.uintaa,
'''tensor(float16)''': np.floataa,
'''tensor(float)''': np.floataa,
'''tensor(double)''': np.floataa,
}
class __UpperCamelCase :
def __init__( self , lowerCAmelCase__=None , **lowerCAmelCase__ ) -> str:
logger.info("`diffusers.OnnxRuntimeModel` is experimental and might change in the future." )
a : Optional[int] = model
a : int = kwargs.get("model_save_dir" , lowerCAmelCase__ )
a : Tuple = kwargs.get("latest_model_name" , lowerCAmelCase__ )
def __call__( self , **lowerCAmelCase__ ) -> Dict:
a : List[str] = {k: np.array(lowerCAmelCase__ ) for k, v in kwargs.items()}
return self.model.run(lowerCAmelCase__ , lowerCAmelCase__ )
@staticmethod
def __a ( lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None ) -> Union[str, Any]:
if provider is None:
logger.info("No onnxruntime provider specified, using CPUExecutionProvider" )
a : List[str] = "CPUExecutionProvider"
return ort.InferenceSession(lowerCAmelCase__ , providers=[provider] , sess_options=lowerCAmelCase__ )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None , **lowerCAmelCase__ ) -> int:
a : List[str] = file_name if file_name is not None else ONNX_WEIGHTS_NAME
a : Optional[int] = self.model_save_dir.joinpath(self.latest_model_name )
a : List[str] = Path(lowerCAmelCase__ ).joinpath(lowerCAmelCase__ )
try:
shutil.copyfile(lowerCAmelCase__ , lowerCAmelCase__ )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
a : str = self.model_save_dir.joinpath(lowerCAmelCase__ )
if src_path.exists():
a : Any = Path(lowerCAmelCase__ ).joinpath(lowerCAmelCase__ )
try:
shutil.copyfile(lowerCAmelCase__ , lowerCAmelCase__ )
except shutil.SameFileError:
pass
def __a ( self , lowerCAmelCase__ , **lowerCAmelCase__ , ) -> str:
if os.path.isfile(lowerCAmelCase__ ):
logger.error(f"""Provided path ({save_directory}) should be a directory, not a file""" )
return
os.makedirs(lowerCAmelCase__ , exist_ok=lowerCAmelCase__ )
# saving model weights/files
self._save_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
@classmethod
def __a ( cls , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = False , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , **lowerCAmelCase__ , ) -> Optional[int]:
a : Tuple = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(lowerCAmelCase__ ):
a : Tuple = OnnxRuntimeModel.load_model(
os.path.join(lowerCAmelCase__ , lowerCAmelCase__ ) , provider=lowerCAmelCase__ , sess_options=lowerCAmelCase__ )
a : Tuple = Path(lowerCAmelCase__ )
# load model from hub
else:
# download model
a : Optional[Any] = hf_hub_download(
repo_id=lowerCAmelCase__ , filename=lowerCAmelCase__ , use_auth_token=lowerCAmelCase__ , revision=lowerCAmelCase__ , cache_dir=lowerCAmelCase__ , force_download=lowerCAmelCase__ , )
a : Optional[int] = Path(lowerCAmelCase__ ).parent
a : List[Any] = Path(lowerCAmelCase__ ).name
a : int = OnnxRuntimeModel.load_model(lowerCAmelCase__ , provider=lowerCAmelCase__ , sess_options=lowerCAmelCase__ )
return cls(model=lowerCAmelCase__ , **lowerCAmelCase__ )
@classmethod
def __a ( cls , lowerCAmelCase__ , lowerCAmelCase__ = True , lowerCAmelCase__ = None , lowerCAmelCase__ = None , **lowerCAmelCase__ , ) -> List[str]:
a : Any = None
if len(str(lowerCAmelCase__ ).split("@" ) ) == 2:
a, a : Tuple = model_id.split("@" )
return cls._from_pretrained(
model_id=lowerCAmelCase__ , revision=lowerCAmelCase__ , cache_dir=lowerCAmelCase__ , force_download=lowerCAmelCase__ , use_auth_token=lowerCAmelCase__ , **lowerCAmelCase__ , )
| 31
| 0
|
"""simple docstring"""
a : Optional[Any] = [
'''Audio''',
'''Array2D''',
'''Array3D''',
'''Array4D''',
'''Array5D''',
'''ClassLabel''',
'''Features''',
'''Sequence''',
'''Value''',
'''Image''',
'''Translation''',
'''TranslationVariableLanguages''',
]
from .audio import Audio
from .features import ArrayaD, ArrayaD, ArrayaD, ArrayaD, ClassLabel, Features, Sequence, Value
from .image import Image
from .translation import Translation, TranslationVariableLanguages
| 702
|
"""simple docstring"""
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def _SCREAMING_SNAKE_CASE ( _lowercase : Optional[Any] , _lowercase : List[str] , _lowercase : Optional[Any] ) ->str:
'''simple docstring'''
a : Union[str, Any] = 1.5
a : List[str] = int(factor * num_class_images )
a : Optional[Any] = ClipClient(
url="https://knn.laion.ai/knn-service" , indice_name="laion_400m" , num_images=_lowercase , aesthetic_weight=0.1 )
os.makedirs(F"""{class_data_dir}/images""" , exist_ok=_lowercase )
if len(list(Path(F"""{class_data_dir}/images""" ).iterdir() ) ) >= num_class_images:
return
while True:
a : List[Any] = client.query(text=_lowercase )
if len(_lowercase ) >= factor * num_class_images or num_images > 1E4:
break
else:
a : Optional[int] = int(factor * num_images )
a : str = ClipClient(
url="https://knn.laion.ai/knn-service" , indice_name="laion_400m" , num_images=_lowercase , aesthetic_weight=0.1 , )
a : Optional[int] = 0
a : str = 0
a : Any = tqdm(desc="downloading real regularization images" , total=_lowercase )
with open(F"""{class_data_dir}/caption.txt""" , "w" ) as fa, open(F"""{class_data_dir}/urls.txt""" , "w" ) as fa, open(
F"""{class_data_dir}/images.txt""" , "w" ) as fa:
while total < num_class_images:
a : Optional[Any] = class_images[count]
count += 1
try:
a : str = requests.get(images["url"] )
if img.status_code == 200:
a : int = Image.open(BytesIO(img.content ) )
with open(F"""{class_data_dir}/images/{total}.jpg""" , "wb" ) as f:
f.write(img.content )
fa.write(images["caption"] + "\n" )
fa.write(images["url"] + "\n" )
fa.write(F"""{class_data_dir}/images/{total}.jpg""" + "\n" )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def _SCREAMING_SNAKE_CASE ( ) ->Dict:
'''simple docstring'''
a : Optional[int] = argparse.ArgumentParser("" , add_help=_lowercase )
parser.add_argument("--class_prompt" , help="text prompt to retrieve images" , required=_lowercase , type=_lowercase )
parser.add_argument("--class_data_dir" , help="path to save images" , required=_lowercase , type=_lowercase )
parser.add_argument("--num_class_images" , help="number of images to download" , default=200 , type=_lowercase )
return parser.parse_args()
if __name__ == "__main__":
a : List[Any] = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 31
| 0
|
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( _lowercase : str , _lowercase : int ) ->str:
'''simple docstring'''
a : list[list[str]] = [[] for _ in range(_lowercase )]
a : Dict = key - 1
if key <= 0:
raise ValueError("Height of grid can't be 0 or negative" )
if key == 1 or len(_lowercase ) <= key:
return input_string
for position, character in enumerate(_lowercase ):
a : str = position % (lowest * 2) # puts it in bounds
a : Optional[Any] = min(_lowercase , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append(_lowercase )
a : Optional[int] = ["".join(_lowercase ) for row in temp_grid]
a : str = "".join(_lowercase )
return output_string
def _SCREAMING_SNAKE_CASE ( _lowercase : str , _lowercase : int ) ->str:
'''simple docstring'''
a : str = []
a : Dict = key - 1
if key <= 0:
raise ValueError("Height of grid can't be 0 or negative" )
if key == 1:
return input_string
a : list[list[str]] = [[] for _ in range(_lowercase )] # generates template
for position in range(len(_lowercase ) ):
a : Any = position % (lowest * 2) # puts it in bounds
a : Any = min(_lowercase , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append("*" )
a : List[str] = 0
for row in temp_grid: # fills in the characters
a : Union[str, Any] = input_string[counter : counter + len(_lowercase )]
grid.append(list(_lowercase ) )
counter += len(_lowercase )
a : Union[str, Any] = "" # reads as zigzag
for position in range(len(_lowercase ) ):
a : Tuple = position % (lowest * 2) # puts it in bounds
a : str = min(_lowercase , lowest * 2 - num ) # creates zigzag pattern
output_string += grid[num][0]
grid[num].pop(0 )
return output_string
def _SCREAMING_SNAKE_CASE ( _lowercase : str ) ->dict[int, str]:
'''simple docstring'''
a : Optional[Any] = {}
for key_guess in range(1 , len(_lowercase ) ): # tries every key
a : int = decrypt(_lowercase , _lowercase )
return results
if __name__ == "__main__":
import doctest
doctest.testmod()
| 703
|
"""simple docstring"""
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
a : Optional[int] = '''Run commands across TPU VMs for initial setup before running `accelerate launch`.'''
def _SCREAMING_SNAKE_CASE ( _lowercase : Any=None ) ->Optional[Any]:
'''simple docstring'''
if subparsers is not None:
a : int = subparsers.add_parser("tpu-config" , description=_description )
else:
a : List[Any] = argparse.ArgumentParser("Accelerate tpu-config command" , description=_description )
# Core arguments
a : Dict = parser.add_argument_group(
"Config Arguments" , "Arguments that can be configured through `accelerate config`." )
config_args.add_argument(
"--config_file" , type=_lowercase , default=_lowercase , help="Path to the config file to use for accelerate." , )
config_args.add_argument(
"--tpu_name" , default=_lowercase , help="The name of the TPU to use. If not specified, will use the TPU specified in the config file." , )
config_args.add_argument(
"--tpu_zone" , default=_lowercase , help="The zone of the TPU to use. If not specified, will use the zone specified in the config file." , )
a : Any = parser.add_argument_group("TPU Arguments" , "Arguments for options ran inside the TPU." )
pod_args.add_argument(
"--use_alpha" , action="store_true" , help="Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`." , )
pod_args.add_argument(
"--command_file" , default=_lowercase , help="The path to the file containing the commands to run on the pod on startup." , )
pod_args.add_argument(
"--command" , action="append" , nargs="+" , help="A command to run on the pod. Can be passed multiple times." , )
pod_args.add_argument(
"--install_accelerate" , action="store_true" , help="Whether to install accelerate on the pod. Defaults to False." , )
pod_args.add_argument(
"--accelerate_version" , default="latest" , help="The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify 'dev' to install from GitHub." , )
pod_args.add_argument(
"--debug" , action="store_true" , help="If set, will print the command that would be run instead of running it." )
if subparsers is not None:
parser.set_defaults(func=_lowercase )
return parser
def _SCREAMING_SNAKE_CASE ( _lowercase : Any ) ->Tuple:
'''simple docstring'''
a : Union[str, Any] = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(_lowercase ):
a : Optional[Any] = load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
a : int = defaults.command_file
if not args.command and defaults.commands is not None:
a : Union[str, Any] = defaults.commands
if not args.tpu_name:
a : int = defaults.tpu_name
if not args.tpu_zone:
a : Union[str, Any] = defaults.tpu_zone
if args.accelerate_version == "dev":
a : int = "git+https://github.com/huggingface/accelerate.git"
elif args.accelerate_version == "latest":
a : Optional[Any] = "accelerate -U"
elif isinstance(parse(args.accelerate_version ) , _lowercase ):
a : Optional[Any] = F"""accelerate=={args.accelerate_version}"""
if not args.command_file and not args.command:
raise ValueError("You must specify either a command file or a command to run on the pod." )
if args.command_file:
with open(args.command_file , "r" ) as f:
a : int = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , _lowercase ):
a : Union[str, Any] = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
a : Tuple = ["cd /usr/share"]
if args.install_accelerate:
new_cmd += [F"""pip install {args.accelerate_version}"""]
new_cmd += args.command
a : List[Any] = "; ".join(_lowercase )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
a : str = ["gcloud"]
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(F"""Running {' '.join(_lowercase )}""" )
return
subprocess.run(_lowercase )
print("Successfully setup pod." )
def _SCREAMING_SNAKE_CASE ( ) ->Tuple:
'''simple docstring'''
a : List[Any] = tpu_command_parser()
a : Optional[int] = parser.parse_args()
tpu_command_launcher(_lowercase )
| 31
| 0
|
"""simple docstring"""
import argparse
import torch
from transformers import GPTaLMHeadModel, RobertaForMaskedLM
if __name__ == "__main__":
a : Optional[Any] = argparse.ArgumentParser(
description=(
'''Extraction some layers of the full RobertaForMaskedLM or GPT2LMHeadModel for Transfer Learned'''
''' Distillation'''
)
)
parser.add_argument('''--model_type''', default='''roberta''', choices=['''roberta''', '''gpt2'''])
parser.add_argument('''--model_name''', default='''roberta-large''', type=str)
parser.add_argument('''--dump_checkpoint''', default='''serialization_dir/tf_roberta_048131723.pth''', type=str)
parser.add_argument('''--vocab_transform''', action='''store_true''')
a : str = parser.parse_args()
if args.model_type == "roberta":
a : int = RobertaForMaskedLM.from_pretrained(args.model_name)
a : Dict = '''roberta'''
elif args.model_type == "gpt2":
a : Tuple = GPTaLMHeadModel.from_pretrained(args.model_name)
a : List[str] = '''transformer'''
a : Dict = model.state_dict()
a : int = {}
# Embeddings #
if args.model_type == "gpt2":
for param_name in ["wte.weight", "wpe.weight"]:
a : List[str] = state_dict[F'''{prefix}.{param_name}''']
else:
for w in ["word_embeddings", "position_embeddings", "token_type_embeddings"]:
a : List[Any] = F'''{prefix}.embeddings.{w}.weight'''
a : Optional[Any] = state_dict[param_name]
for w in ["weight", "bias"]:
a : Tuple = F'''{prefix}.embeddings.LayerNorm.{w}'''
a : Dict = state_dict[param_name]
# Transformer Blocks #
a : List[str] = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
if args.model_type == "gpt2":
for layer in ["ln_1", "attn.c_attn", "attn.c_proj", "ln_2", "mlp.c_fc", "mlp.c_proj"]:
for w in ["weight", "bias"]:
a : int = state_dict[
F'''{prefix}.h.{teacher_idx}.{layer}.{w}'''
]
a : Union[str, Any] = state_dict[F'''{prefix}.h.{teacher_idx}.attn.bias''']
else:
for layer in [
"attention.self.query",
"attention.self.key",
"attention.self.value",
"attention.output.dense",
"attention.output.LayerNorm",
"intermediate.dense",
"output.dense",
"output.LayerNorm",
]:
for w in ["weight", "bias"]:
a : int = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.{layer}.{w}'''
]
std_idx += 1
# Language Modeling Head ###s
if args.model_type == "roberta":
for layer in ["lm_head.decoder.weight", "lm_head.bias"]:
a : Tuple = state_dict[F'''{layer}''']
if args.vocab_transform:
for w in ["weight", "bias"]:
a : Tuple = state_dict[F'''lm_head.dense.{w}''']
a : Tuple = state_dict[F'''lm_head.layer_norm.{w}''']
elif args.model_type == "gpt2":
for w in ["weight", "bias"]:
a : Any = state_dict[F'''{prefix}.ln_f.{w}''']
a : Union[str, Any] = state_dict['''lm_head.weight''']
print(F'''N layers selected for distillation: {std_idx}''')
print(F'''Number of params transferred for distillation: {len(compressed_sd.keys())}''')
print(F'''Save transferred checkpoint to {args.dump_checkpoint}.''')
torch.save(compressed_sd, args.dump_checkpoint)
| 704
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
a : Tuple = None
a : int = logging.get_logger(__name__)
a : int = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
a : Optional[int] = {
'''vocab_file''': {
'''facebook/nllb-200-distilled-600M''': (
'''https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model'''
),
},
'''tokenizer_file''': {
'''facebook/nllb-200-distilled-600M''': (
'''https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json'''
),
},
}
a : int = {
'''facebook/nllb-large-en-ro''': 1024,
'''facebook/nllb-200-distilled-600M''': 1024,
}
# fmt: off
a : List[Any] = ['''ace_Arab''', '''ace_Latn''', '''acm_Arab''', '''acq_Arab''', '''aeb_Arab''', '''afr_Latn''', '''ajp_Arab''', '''aka_Latn''', '''amh_Ethi''', '''apc_Arab''', '''arb_Arab''', '''ars_Arab''', '''ary_Arab''', '''arz_Arab''', '''asm_Beng''', '''ast_Latn''', '''awa_Deva''', '''ayr_Latn''', '''azb_Arab''', '''azj_Latn''', '''bak_Cyrl''', '''bam_Latn''', '''ban_Latn''', '''bel_Cyrl''', '''bem_Latn''', '''ben_Beng''', '''bho_Deva''', '''bjn_Arab''', '''bjn_Latn''', '''bod_Tibt''', '''bos_Latn''', '''bug_Latn''', '''bul_Cyrl''', '''cat_Latn''', '''ceb_Latn''', '''ces_Latn''', '''cjk_Latn''', '''ckb_Arab''', '''crh_Latn''', '''cym_Latn''', '''dan_Latn''', '''deu_Latn''', '''dik_Latn''', '''dyu_Latn''', '''dzo_Tibt''', '''ell_Grek''', '''eng_Latn''', '''epo_Latn''', '''est_Latn''', '''eus_Latn''', '''ewe_Latn''', '''fao_Latn''', '''pes_Arab''', '''fij_Latn''', '''fin_Latn''', '''fon_Latn''', '''fra_Latn''', '''fur_Latn''', '''fuv_Latn''', '''gla_Latn''', '''gle_Latn''', '''glg_Latn''', '''grn_Latn''', '''guj_Gujr''', '''hat_Latn''', '''hau_Latn''', '''heb_Hebr''', '''hin_Deva''', '''hne_Deva''', '''hrv_Latn''', '''hun_Latn''', '''hye_Armn''', '''ibo_Latn''', '''ilo_Latn''', '''ind_Latn''', '''isl_Latn''', '''ita_Latn''', '''jav_Latn''', '''jpn_Jpan''', '''kab_Latn''', '''kac_Latn''', '''kam_Latn''', '''kan_Knda''', '''kas_Arab''', '''kas_Deva''', '''kat_Geor''', '''knc_Arab''', '''knc_Latn''', '''kaz_Cyrl''', '''kbp_Latn''', '''kea_Latn''', '''khm_Khmr''', '''kik_Latn''', '''kin_Latn''', '''kir_Cyrl''', '''kmb_Latn''', '''kon_Latn''', '''kor_Hang''', '''kmr_Latn''', '''lao_Laoo''', '''lvs_Latn''', '''lij_Latn''', '''lim_Latn''', '''lin_Latn''', '''lit_Latn''', '''lmo_Latn''', '''ltg_Latn''', '''ltz_Latn''', '''lua_Latn''', '''lug_Latn''', '''luo_Latn''', '''lus_Latn''', '''mag_Deva''', '''mai_Deva''', '''mal_Mlym''', '''mar_Deva''', '''min_Latn''', '''mkd_Cyrl''', '''plt_Latn''', '''mlt_Latn''', '''mni_Beng''', '''khk_Cyrl''', '''mos_Latn''', '''mri_Latn''', '''zsm_Latn''', '''mya_Mymr''', '''nld_Latn''', '''nno_Latn''', '''nob_Latn''', '''npi_Deva''', '''nso_Latn''', '''nus_Latn''', '''nya_Latn''', '''oci_Latn''', '''gaz_Latn''', '''ory_Orya''', '''pag_Latn''', '''pan_Guru''', '''pap_Latn''', '''pol_Latn''', '''por_Latn''', '''prs_Arab''', '''pbt_Arab''', '''quy_Latn''', '''ron_Latn''', '''run_Latn''', '''rus_Cyrl''', '''sag_Latn''', '''san_Deva''', '''sat_Beng''', '''scn_Latn''', '''shn_Mymr''', '''sin_Sinh''', '''slk_Latn''', '''slv_Latn''', '''smo_Latn''', '''sna_Latn''', '''snd_Arab''', '''som_Latn''', '''sot_Latn''', '''spa_Latn''', '''als_Latn''', '''srd_Latn''', '''srp_Cyrl''', '''ssw_Latn''', '''sun_Latn''', '''swe_Latn''', '''swh_Latn''', '''szl_Latn''', '''tam_Taml''', '''tat_Cyrl''', '''tel_Telu''', '''tgk_Cyrl''', '''tgl_Latn''', '''tha_Thai''', '''tir_Ethi''', '''taq_Latn''', '''taq_Tfng''', '''tpi_Latn''', '''tsn_Latn''', '''tso_Latn''', '''tuk_Latn''', '''tum_Latn''', '''tur_Latn''', '''twi_Latn''', '''tzm_Tfng''', '''uig_Arab''', '''ukr_Cyrl''', '''umb_Latn''', '''urd_Arab''', '''uzn_Latn''', '''vec_Latn''', '''vie_Latn''', '''war_Latn''', '''wol_Latn''', '''xho_Latn''', '''ydd_Hebr''', '''yor_Latn''', '''yue_Hant''', '''zho_Hans''', '''zho_Hant''', '''zul_Latn''']
class __UpperCamelCase ( a__ ):
lowerCamelCase : Optional[Any] =VOCAB_FILES_NAMES
lowerCamelCase : str =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : Dict =PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : List[Any] =["""input_ids""", """attention_mask"""]
lowerCamelCase : Union[str, Any] =NllbTokenizer
lowerCamelCase : List[int] =[]
lowerCamelCase : List[int] =[]
def __init__( self , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__="<s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__="<s>" , lowerCAmelCase__="<unk>" , lowerCAmelCase__="<pad>" , lowerCAmelCase__="<mask>" , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=False , **lowerCAmelCase__ , ) -> Optional[Any]:
# Mask token behave like a normal word, i.e. include the space before it
a : Dict = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else mask_token
a : Optional[Any] = legacy_behaviour
super().__init__(
vocab_file=lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , src_lang=lowerCAmelCase__ , tgt_lang=lowerCAmelCase__ , additional_special_tokens=lowerCAmelCase__ , legacy_behaviour=lowerCAmelCase__ , **lowerCAmelCase__ , )
a : int = vocab_file
a : Any = False if not self.vocab_file else True
a : List[str] = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({"additional_special_tokens": _additional_special_tokens} )
a : str = {
lang_code: self.convert_tokens_to_ids(lowerCAmelCase__ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
a : List[Any] = src_lang if src_lang is not None else "eng_Latn"
a : str = self.convert_tokens_to_ids(self._src_lang )
a : Any = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def __a ( self ) -> str:
return self._src_lang
@src_lang.setter
def __a ( self , lowerCAmelCase__ ) -> None:
a : List[str] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]:
a : str = [self.sep_token_id]
a : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ) -> Any:
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" )
a : Dict = src_lang
a : int = self(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ )
a : Dict = self.convert_tokens_to_ids(lowerCAmelCase__ )
a : Any = tgt_lang_id
return inputs
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = "eng_Latn" , lowerCAmelCase__ = None , lowerCAmelCase__ = "fra_Latn" , **lowerCAmelCase__ , ) -> BatchEncoding:
a : Optional[int] = src_lang
a : int = tgt_lang
return super().prepare_seqaseq_batch(lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ )
def __a ( self ) -> Tuple:
return self.set_src_lang_special_tokens(self.src_lang )
def __a ( self ) -> str:
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def __a ( self , lowerCAmelCase__ ) -> None:
a : int = self.convert_tokens_to_ids(lowerCAmelCase__ )
if self.legacy_behaviour:
a : Tuple = []
a : List[str] = [self.eos_token_id, self.cur_lang_code]
else:
a : int = [self.cur_lang_code]
a : int = [self.eos_token_id]
a : Union[str, Any] = self.convert_ids_to_tokens(self.prefix_tokens )
a : Any = self.convert_ids_to_tokens(self.suffix_tokens )
a : Any = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def __a ( self , lowerCAmelCase__ ) -> None:
a : str = self.convert_tokens_to_ids(lowerCAmelCase__ )
if self.legacy_behaviour:
a : Optional[Any] = []
a : int = [self.eos_token_id, self.cur_lang_code]
else:
a : List[Any] = [self.cur_lang_code]
a : List[Any] = [self.eos_token_id]
a : int = self.convert_ids_to_tokens(self.prefix_tokens )
a : int = self.convert_ids_to_tokens(self.suffix_tokens )
a : Any = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(lowerCAmelCase__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory.""" )
return
a : Any = os.path.join(
lowerCAmelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase__ ):
copyfile(self.vocab_file , lowerCAmelCase__ )
return (out_vocab_file,)
| 31
| 0
|
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( _lowercase : str = "The quick brown fox jumps over the lazy dog" , ) ->bool:
'''simple docstring'''
a : str = set()
# Replace all the whitespace in our sentence
a : str = input_str.replace(" " , "" )
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower() )
return len(_lowercase ) == 26
def _SCREAMING_SNAKE_CASE ( _lowercase : str = "The quick brown fox jumps over the lazy dog" , ) ->bool:
'''simple docstring'''
a : Optional[Any] = [False] * 26
for char in input_str:
if char.islower():
a : Tuple = True
elif char.isupper():
a : Tuple = True
return all(_lowercase )
def _SCREAMING_SNAKE_CASE ( _lowercase : str = "The quick brown fox jumps over the lazy dog" , ) ->bool:
'''simple docstring'''
return len({char for char in input_str.lower() if char.isalpha()} ) == 26
def _SCREAMING_SNAKE_CASE ( ) ->None:
'''simple docstring'''
from timeit import timeit
a : Dict = "from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest"
print(timeit("is_pangram()" , setup=_lowercase ) )
print(timeit("is_pangram_faster()" , setup=_lowercase ) )
print(timeit("is_pangram_fastest()" , setup=_lowercase ) )
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 705
|
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
class __UpperCamelCase ( a__ ):
lowerCamelCase : torch.FloatTensor
lowerCamelCase : torch.FloatTensor
lowerCamelCase : Optional[torch.FloatTensor] =None
class __UpperCamelCase ( a__ , a__ ):
lowerCamelCase : Tuple =2
@register_to_config
def __init__( self , lowerCAmelCase__ = 0.02 , lowerCAmelCase__ = 100 , lowerCAmelCase__ = 1.007 , lowerCAmelCase__ = 80 , lowerCAmelCase__ = 0.05 , lowerCAmelCase__ = 50 , ) -> Union[str, Any]:
# standard deviation of the initial noise distribution
a : Tuple = sigma_max
# setable values
a : int = None
a : np.IntTensor = None
a : torch.FloatTensor = None # sigma(t_i)
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> torch.FloatTensor:
return sample
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[str]:
a : List[Any] = num_inference_steps
a : List[str] = np.arange(0 , self.num_inference_steps )[::-1].copy()
a : int = torch.from_numpy(lowerCAmelCase__ ).to(lowerCAmelCase__ )
a : List[str] = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in self.timesteps
]
a : Any = torch.tensor(lowerCAmelCase__ , dtype=torch.floataa , device=lowerCAmelCase__ )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Tuple[torch.FloatTensor, float]:
if self.config.s_min <= sigma <= self.config.s_max:
a : str = min(self.config.s_churn / self.num_inference_steps , 2**0.5 - 1 )
else:
a : Dict = 0
# sample eps ~ N(0, S_noise^2 * I)
a : Union[str, Any] = self.config.s_noise * randn_tensor(sample.shape , generator=lowerCAmelCase__ ).to(sample.device )
a : Any = sigma + gamma * sigma
a : Tuple = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = True , ) -> Union[KarrasVeOutput, Tuple]:
a : Union[str, Any] = sample_hat + sigma_hat * model_output
a : Tuple = (sample_hat - pred_original_sample) / sigma_hat
a : List[Any] = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=lowerCAmelCase__ , derivative=lowerCAmelCase__ , pred_original_sample=lowerCAmelCase__ )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = True , ) -> Union[KarrasVeOutput, Tuple]:
a : Optional[int] = sample_prev + sigma_prev * model_output
a : str = (sample_prev - pred_original_sample) / sigma_prev
a : Dict = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=lowerCAmelCase__ , derivative=lowerCAmelCase__ , pred_original_sample=lowerCAmelCase__ )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> str:
raise NotImplementedError()
| 31
| 0
|
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( _lowercase : int , _lowercase : float , _lowercase : float ) ->float:
'''simple docstring'''
return round(float(moles / volume ) * nfactor )
def _SCREAMING_SNAKE_CASE ( _lowercase : float , _lowercase : float , _lowercase : float ) ->float:
'''simple docstring'''
return round(float((moles * 0.0821 * temperature) / (volume) ) )
def _SCREAMING_SNAKE_CASE ( _lowercase : float , _lowercase : float , _lowercase : float ) ->float:
'''simple docstring'''
return round(float((moles * 0.0821 * temperature) / (pressure) ) )
def _SCREAMING_SNAKE_CASE ( _lowercase : float , _lowercase : float , _lowercase : float ) ->float:
'''simple docstring'''
return round(float((pressure * volume) / (0.0821 * moles) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 706
|
"""simple docstring"""
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
a : Optional[Any] = datasets.utils.logging.get_logger(__name__)
a : Union[str, Any] = ['''names''', '''prefix''']
a : Any = ['''warn_bad_lines''', '''error_bad_lines''', '''mangle_dupe_cols''']
a : Any = ['''encoding_errors''', '''on_bad_lines''']
a : List[str] = ['''date_format''']
@dataclass
class __UpperCamelCase ( datasets.BuilderConfig ):
lowerCamelCase : str =","
lowerCamelCase : Optional[str] =None
lowerCamelCase : Optional[Union[int, List[int], str]] ="infer"
lowerCamelCase : Optional[List[str]] =None
lowerCamelCase : Optional[List[str]] =None
lowerCamelCase : Optional[Union[int, str, List[int], List[str]]] =None
lowerCamelCase : Optional[Union[List[int], List[str]]] =None
lowerCamelCase : Optional[str] =None
lowerCamelCase : bool =True
lowerCamelCase : Optional[Literal["c", "python", "pyarrow"]] =None
lowerCamelCase : Dict[Union[int, str], Callable[[Any], Any]] =None
lowerCamelCase : Optional[list] =None
lowerCamelCase : Optional[list] =None
lowerCamelCase : bool =False
lowerCamelCase : Optional[Union[int, List[int]]] =None
lowerCamelCase : Optional[int] =None
lowerCamelCase : Optional[Union[str, List[str]]] =None
lowerCamelCase : bool =True
lowerCamelCase : bool =True
lowerCamelCase : bool =False
lowerCamelCase : bool =True
lowerCamelCase : Optional[str] =None
lowerCamelCase : str ="."
lowerCamelCase : Optional[str] =None
lowerCamelCase : str ='"'
lowerCamelCase : int =0
lowerCamelCase : Optional[str] =None
lowerCamelCase : Optional[str] =None
lowerCamelCase : Optional[str] =None
lowerCamelCase : Optional[str] =None
lowerCamelCase : bool =True
lowerCamelCase : bool =True
lowerCamelCase : int =0
lowerCamelCase : bool =True
lowerCamelCase : bool =False
lowerCamelCase : Optional[str] =None
lowerCamelCase : int =1_0000
lowerCamelCase : Optional[datasets.Features] =None
lowerCamelCase : Optional[str] ="strict"
lowerCamelCase : Literal["error", "warn", "skip"] ="error"
lowerCamelCase : Optional[str] =None
def __a ( self ) -> Dict:
if self.delimiter is not None:
a : int = self.delimiter
if self.column_names is not None:
a : Any = self.column_names
@property
def __a ( self ) -> List[str]:
a : Dict = {
"sep": self.sep,
"header": self.header,
"names": self.names,
"index_col": self.index_col,
"usecols": self.usecols,
"prefix": self.prefix,
"mangle_dupe_cols": self.mangle_dupe_cols,
"engine": self.engine,
"converters": self.converters,
"true_values": self.true_values,
"false_values": self.false_values,
"skipinitialspace": self.skipinitialspace,
"skiprows": self.skiprows,
"nrows": self.nrows,
"na_values": self.na_values,
"keep_default_na": self.keep_default_na,
"na_filter": self.na_filter,
"verbose": self.verbose,
"skip_blank_lines": self.skip_blank_lines,
"thousands": self.thousands,
"decimal": self.decimal,
"lineterminator": self.lineterminator,
"quotechar": self.quotechar,
"quoting": self.quoting,
"escapechar": self.escapechar,
"comment": self.comment,
"encoding": self.encoding,
"dialect": self.dialect,
"error_bad_lines": self.error_bad_lines,
"warn_bad_lines": self.warn_bad_lines,
"skipfooter": self.skipfooter,
"doublequote": self.doublequote,
"memory_map": self.memory_map,
"float_precision": self.float_precision,
"chunksize": self.chunksize,
"encoding_errors": self.encoding_errors,
"on_bad_lines": self.on_bad_lines,
"date_format": self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , lowerCAmelCase__ ):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class __UpperCamelCase ( datasets.ArrowBasedBuilder ):
lowerCamelCase : Union[str, Any] =CsvConfig
def __a ( self ) -> Optional[Any]:
return datasets.DatasetInfo(features=self.config.features )
def __a ( self , lowerCAmelCase__ ) -> Optional[int]:
if not self.config.data_files:
raise ValueError(f"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
a : Optional[Any] = dl_manager.download_and_extract(self.config.data_files )
if isinstance(lowerCAmelCase__ , (str, list, tuple) ):
a : Tuple = data_files
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
a : Tuple = [files]
a : int = [dl_manager.iter_files(lowerCAmelCase__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )]
a : int = []
for split_name, files in data_files.items():
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
a : Any = [files]
a : List[str] = [dl_manager.iter_files(lowerCAmelCase__ ) for file in files]
splits.append(datasets.SplitGenerator(name=lowerCAmelCase__ , gen_kwargs={"files": files} ) )
return splits
def __a ( self , lowerCAmelCase__ ) -> pa.Table:
if self.config.features is not None:
a : Optional[Any] = self.config.features.arrow_schema
if all(not require_storage_cast(lowerCAmelCase__ ) for feature in self.config.features.values() ):
# cheaper cast
a : Dict = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=lowerCAmelCase__ )
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
a : Union[str, Any] = table_cast(lowerCAmelCase__ , lowerCAmelCase__ )
return pa_table
def __a ( self , lowerCAmelCase__ ) -> Any:
a : Tuple = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
a : Any = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(lowerCAmelCase__ ) else object
for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() )
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(lowerCAmelCase__ ) ):
a : Tuple = pd.read_csv(lowerCAmelCase__ , iterator=lowerCAmelCase__ , dtype=lowerCAmelCase__ , **self.config.pd_read_csv_kwargs )
try:
for batch_idx, df in enumerate(lowerCAmelCase__ ):
a : Any = pa.Table.from_pandas(lowerCAmelCase__ )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(lowerCAmelCase__ )
except ValueError as e:
logger.error(f"""Failed to read file '{file}' with error {type(lowerCAmelCase__ )}: {e}""" )
raise
| 31
| 0
|
import argparse
import shlex
import runhouse as rh
if __name__ == "__main__":
# Refer to https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup for cloud access
# setup instructions, if using on-demand hardware
# If user passes --user <user> --host <host> --key_path <key_path> <example> <args>, fill them in as BYO cluster
# If user passes --instance <instance> --provider <provider> <example> <args>, fill them in as on-demand cluster
# Throw an error if user passes both BYO and on-demand cluster args
# Otherwise, use default values
a : Optional[int] = argparse.ArgumentParser()
parser.add_argument('''--user''', type=str, default='''ubuntu''')
parser.add_argument('''--host''', type=str, default='''localhost''')
parser.add_argument('''--key_path''', type=str, default=None)
parser.add_argument('''--instance''', type=str, default='''V100:1''')
parser.add_argument('''--provider''', type=str, default='''cheapest''')
parser.add_argument('''--use_spot''', type=bool, default=False)
parser.add_argument('''--example''', type=str, default='''pytorch/text-generation/run_generation.py''')
a : Dict = parser.parse_known_args()
if args.host != "localhost":
if args.instance != "V100:1" or args.provider != "cheapest":
raise ValueError('''Cannot specify both BYO and on-demand cluster args''')
a : Any = rh.cluster(
name='''rh-cluster''', ips=[args.host], ssh_creds={'''ssh_user''': args.user, '''ssh_private_key''': args.key_path}
)
else:
a : List[str] = rh.cluster(
name='''rh-cluster''', instance_type=args.instance, provider=args.provider, use_spot=args.use_spot
)
a : Optional[Any] = args.example.rsplit('''/''', 1)[0]
# Set up remote environment
cluster.install_packages(['''pip:./''']) # Installs transformers from local source
# Note transformers is copied into the home directory on the remote machine, so we can install from there
cluster.run([F'''pip install -r transformers/examples/{example_dir}/requirements.txt'''])
cluster.run(['''pip install torch --upgrade --extra-index-url https://download.pytorch.org/whl/cu117'''])
# Run example. You can bypass the CLI wrapper and paste your own code here.
cluster.run([F'''python transformers/examples/{args.example} {" ".join(shlex.quote(arg) for arg in unknown)}'''])
# Alternatively, we can just import and run a training function (especially if there's no wrapper CLI):
# from my_script... import train
# reqs = ['pip:./', 'torch', 'datasets', 'accelerate', 'evaluate', 'tqdm', 'scipy', 'scikit-learn', 'tensorboard']
# launch_train_gpu = rh.function(fn=train,
# system=gpu,
# reqs=reqs,
# name='train_bert_glue')
#
# We can pass in arguments just like we would to a function:
# launch_train_gpu(num_epochs = 3, lr = 2e-5, seed = 42, batch_size = 16
# stream_logs=True)
| 707
|
"""simple docstring"""
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class __UpperCamelCase ( a__ , a__ , unittest.TestCase ):
lowerCamelCase : Dict =IFPipeline
lowerCamelCase : int =TEXT_TO_IMAGE_PARAMS - {"""width""", """height""", """latents"""}
lowerCamelCase : int =TEXT_TO_IMAGE_BATCH_PARAMS
lowerCamelCase : int =PipelineTesterMixin.required_optional_params - {"""latents"""}
def __a ( self ) -> List[str]:
return self._get_dummy_components()
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__=0 ) -> Dict:
if str(lowerCAmelCase__ ).startswith("mps" ):
a : Tuple = torch.manual_seed(lowerCAmelCase__ )
else:
a : int = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
a : Optional[Any] = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def __a ( self ) -> Union[str, Any]:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" )
def __a ( self ) -> Any:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def __a ( self ) -> Union[str, Any]:
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def __a ( self ) -> Optional[int]:
self._test_save_load_local()
def __a ( self ) -> Tuple:
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def __a ( self ) -> str:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@slow
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
def __a ( self ) -> Optional[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self ) -> Tuple:
# if
a : Tuple = IFPipeline.from_pretrained("DeepFloyd/IF-I-XL-v1.0" , variant="fp16" , torch_dtype=torch.floataa )
a : str = IFSuperResolutionPipeline.from_pretrained(
"DeepFloyd/IF-II-L-v1.0" , variant="fp16" , torch_dtype=torch.floataa , text_encoder=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to("cuda" )
a, a : List[str] = pipe_a.encode_prompt("anime turtle" , device="cuda" )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
a : Optional[int] = None
a : Optional[int] = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
a : Union[str, Any] = IFImgaImgPipeline(**pipe_a.components )
a : List[Any] = IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
a : Union[str, Any] = IFInpaintingPipeline(**pipe_a.components )
a : List[str] = IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Dict:
# pipeline 1
_start_torch_memory_measurement()
a : List[str] = torch.Generator(device="cpu" ).manual_seed(0 )
a : Dict = pipe_a(
prompt_embeds=lowerCAmelCase__ , negative_prompt_embeds=lowerCAmelCase__ , num_inference_steps=2 , generator=lowerCAmelCase__ , output_type="np" , )
a : List[str] = output.images[0]
assert image.shape == (64, 64, 3)
a : Dict = torch.cuda.max_memory_allocated()
assert mem_bytes < 13 * 10**9
a : Optional[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy" )
assert_mean_pixel_difference(lowerCAmelCase__ , lowerCAmelCase__ )
# pipeline 2
_start_torch_memory_measurement()
a : List[str] = torch.Generator(device="cpu" ).manual_seed(0 )
a : Optional[int] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(lowerCAmelCase__ )
a : Union[str, Any] = pipe_a(
prompt_embeds=lowerCAmelCase__ , negative_prompt_embeds=lowerCAmelCase__ , image=lowerCAmelCase__ , generator=lowerCAmelCase__ , num_inference_steps=2 , output_type="np" , )
a : List[str] = output.images[0]
assert image.shape == (256, 256, 3)
a : int = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
a : Union[str, Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy" )
assert_mean_pixel_difference(lowerCAmelCase__ , lowerCAmelCase__ )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> int:
# pipeline 1
_start_torch_memory_measurement()
a : int = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(lowerCAmelCase__ )
a : Tuple = torch.Generator(device="cpu" ).manual_seed(0 )
a : List[Any] = pipe_a(
prompt_embeds=lowerCAmelCase__ , negative_prompt_embeds=lowerCAmelCase__ , image=lowerCAmelCase__ , num_inference_steps=2 , generator=lowerCAmelCase__ , output_type="np" , )
a : Tuple = output.images[0]
assert image.shape == (64, 64, 3)
a : int = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
a : Optional[int] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy" )
assert_mean_pixel_difference(lowerCAmelCase__ , lowerCAmelCase__ )
# pipeline 2
_start_torch_memory_measurement()
a : Optional[int] = torch.Generator(device="cpu" ).manual_seed(0 )
a : List[Any] = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(lowerCAmelCase__ )
a : str = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(lowerCAmelCase__ )
a : Dict = pipe_a(
prompt_embeds=lowerCAmelCase__ , negative_prompt_embeds=lowerCAmelCase__ , image=lowerCAmelCase__ , original_image=lowerCAmelCase__ , generator=lowerCAmelCase__ , num_inference_steps=2 , output_type="np" , )
a : int = output.images[0]
assert image.shape == (256, 256, 3)
a : str = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
a : Any = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy" )
assert_mean_pixel_difference(lowerCAmelCase__ , lowerCAmelCase__ )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[int]:
# pipeline 1
_start_torch_memory_measurement()
a : Union[str, Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(lowerCAmelCase__ )
a : List[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(1 ) ).to(lowerCAmelCase__ )
a : Optional[int] = torch.Generator(device="cpu" ).manual_seed(0 )
a : List[str] = pipe_a(
prompt_embeds=lowerCAmelCase__ , negative_prompt_embeds=lowerCAmelCase__ , image=lowerCAmelCase__ , mask_image=lowerCAmelCase__ , num_inference_steps=2 , generator=lowerCAmelCase__ , output_type="np" , )
a : List[Any] = output.images[0]
assert image.shape == (64, 64, 3)
a : Tuple = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
a : Union[str, Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy" )
assert_mean_pixel_difference(lowerCAmelCase__ , lowerCAmelCase__ )
# pipeline 2
_start_torch_memory_measurement()
a : str = torch.Generator(device="cpu" ).manual_seed(0 )
a : Optional[int] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(lowerCAmelCase__ )
a : int = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(lowerCAmelCase__ )
a : Dict = floats_tensor((1, 3, 256, 256) , rng=random.Random(1 ) ).to(lowerCAmelCase__ )
a : Optional[int] = pipe_a(
prompt_embeds=lowerCAmelCase__ , negative_prompt_embeds=lowerCAmelCase__ , image=lowerCAmelCase__ , mask_image=lowerCAmelCase__ , original_image=lowerCAmelCase__ , generator=lowerCAmelCase__ , num_inference_steps=2 , output_type="np" , )
a : List[str] = output.images[0]
assert image.shape == (256, 256, 3)
a : Tuple = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
a : Optional[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy" )
assert_mean_pixel_difference(lowerCAmelCase__ , lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( ) ->List[str]:
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 31
| 0
|
"""simple docstring"""
import argparse
import json
import os
from tensorflow.core.protobuf.saved_model_pba import SavedModel
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
a : Optional[int] = '''.'''
# Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model)
a : Optional[Any] = [
'''Assert''',
'''AssignVariableOp''',
'''EmptyTensorList''',
'''MergeV2Checkpoints''',
'''ReadVariableOp''',
'''ResourceGather''',
'''RestoreV2''',
'''SaveV2''',
'''ShardedFilename''',
'''StatefulPartitionedCall''',
'''StaticRegexFullMatch''',
'''VarHandleOp''',
]
def _SCREAMING_SNAKE_CASE ( _lowercase : Dict , _lowercase : Tuple , _lowercase : List[str] ) ->Union[str, Any]:
'''simple docstring'''
a : Dict = SavedModel()
a : Dict = []
with open(os.path.join(_lowercase , "utils" , "tf_ops" , "onnx.json" ) ) as f:
a : int = json.load(_lowercase )["opsets"]
for i in range(1 , opset + 1 ):
onnx_ops.extend(onnx_opsets[str(_lowercase )] )
with open(_lowercase , "rb" ) as f:
saved_model.ParseFromString(f.read() )
a : Any = set()
# Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs)
for meta_graph in saved_model.meta_graphs:
# Add operations in the graph definition
model_op_names.update(node.op for node in meta_graph.graph_def.node )
# Go through the functions in the graph definition
for func in meta_graph.graph_def.library.function:
# Add operations in each function
model_op_names.update(node.op for node in func.node_def )
# Convert to list, sorted if you want
a : Tuple = sorted(_lowercase )
a : str = []
for op in model_op_names:
if op not in onnx_ops and op not in INTERNAL_OPS:
incompatible_ops.append(_lowercase )
if strict and len(_lowercase ) > 0:
raise Exception(F"""Found the following incompatible ops for the opset {opset}:\n""" + incompatible_ops )
elif len(_lowercase ) > 0:
print(F"""Found the following incompatible ops for the opset {opset}:""" )
print(*_lowercase , sep="\n" )
else:
print(F"""The saved model {saved_model_path} can properly be converted with ONNX.""" )
if __name__ == "__main__":
a : Any = argparse.ArgumentParser()
parser.add_argument('''--saved_model_path''', help='''Path of the saved model to check (the .pb file).''')
parser.add_argument(
'''--opset''', default=12, type=int, help='''The ONNX opset against which the model has to be tested.'''
)
parser.add_argument(
'''--framework''', choices=['''onnx'''], default='''onnx''', help='''Frameworks against which to test the saved model.'''
)
parser.add_argument(
'''--strict''', action='''store_true''', help='''Whether make the checking strict (raise errors) or not (raise warnings)'''
)
a : Union[str, Any] = parser.parse_args()
if args.framework == "onnx":
onnx_compliancy(args.saved_model_path, args.strict, args.opset)
| 708
|
"""simple docstring"""
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class __UpperCamelCase ( unittest.TestCase ):
def __a ( self ) -> Optional[Any]:
a : Optional[int] = [
"safety_checker/pytorch_model.bin",
"safety_checker/model.safetensors",
"vae/diffusion_pytorch_model.bin",
"vae/diffusion_pytorch_model.safetensors",
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
self.assertTrue(is_safetensors_compatible(lowerCAmelCase__ ) )
def __a ( self ) -> Optional[Any]:
a : str = [
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
self.assertTrue(is_safetensors_compatible(lowerCAmelCase__ ) )
def __a ( self ) -> Dict:
a : List[str] = [
"safety_checker/pytorch_model.bin",
"safety_checker/model.safetensors",
"vae/diffusion_pytorch_model.bin",
"vae/diffusion_pytorch_model.safetensors",
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
"unet/diffusion_pytorch_model.bin",
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(lowerCAmelCase__ ) )
def __a ( self ) -> List[Any]:
a : Optional[Any] = [
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
]
self.assertTrue(is_safetensors_compatible(lowerCAmelCase__ ) )
def __a ( self ) -> Tuple:
a : Tuple = [
"safety_checker/pytorch_model.bin",
"safety_checker/model.safetensors",
"vae/diffusion_pytorch_model.bin",
"vae/diffusion_pytorch_model.safetensors",
"text_encoder/pytorch_model.bin",
# Removed: 'text_encoder/model.safetensors',
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
self.assertFalse(is_safetensors_compatible(lowerCAmelCase__ ) )
def __a ( self ) -> Dict:
a : Dict = [
"safety_checker/pytorch_model.fp16.bin",
"safety_checker/model.fp16.safetensors",
"vae/diffusion_pytorch_model.fp16.bin",
"vae/diffusion_pytorch_model.fp16.safetensors",
"text_encoder/pytorch_model.fp16.bin",
"text_encoder/model.fp16.safetensors",
"unet/diffusion_pytorch_model.fp16.bin",
"unet/diffusion_pytorch_model.fp16.safetensors",
]
a : Dict = "fp16"
self.assertTrue(is_safetensors_compatible(lowerCAmelCase__ , variant=lowerCAmelCase__ ) )
def __a ( self ) -> List[str]:
a : List[Any] = [
"unet/diffusion_pytorch_model.fp16.bin",
"unet/diffusion_pytorch_model.fp16.safetensors",
]
a : Any = "fp16"
self.assertTrue(is_safetensors_compatible(lowerCAmelCase__ , variant=lowerCAmelCase__ ) )
def __a ( self ) -> int:
# pass variant but use the non-variant filenames
a : int = [
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
a : Tuple = "fp16"
self.assertTrue(is_safetensors_compatible(lowerCAmelCase__ , variant=lowerCAmelCase__ ) )
def __a ( self ) -> str:
a : str = [
"safety_checker/pytorch_model.fp16.bin",
"safety_checker/model.fp16.safetensors",
"vae/diffusion_pytorch_model.fp16.bin",
"vae/diffusion_pytorch_model.fp16.safetensors",
"text_encoder/pytorch_model.fp16.bin",
"text_encoder/model.fp16.safetensors",
"unet/diffusion_pytorch_model.fp16.bin",
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
a : Any = "fp16"
self.assertFalse(is_safetensors_compatible(lowerCAmelCase__ , variant=lowerCAmelCase__ ) )
def __a ( self ) -> str:
a : Union[str, Any] = [
"text_encoder/pytorch_model.fp16.bin",
"text_encoder/model.fp16.safetensors",
]
a : str = "fp16"
self.assertTrue(is_safetensors_compatible(lowerCAmelCase__ , variant=lowerCAmelCase__ ) )
def __a ( self ) -> List[str]:
# pass variant but use the non-variant filenames
a : Optional[int] = [
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
]
a : str = "fp16"
self.assertTrue(is_safetensors_compatible(lowerCAmelCase__ , variant=lowerCAmelCase__ ) )
def __a ( self ) -> Optional[Any]:
a : Any = [
"safety_checker/pytorch_model.fp16.bin",
"safety_checker/model.fp16.safetensors",
"vae/diffusion_pytorch_model.fp16.bin",
"vae/diffusion_pytorch_model.fp16.safetensors",
"text_encoder/pytorch_model.fp16.bin",
# 'text_encoder/model.fp16.safetensors',
"unet/diffusion_pytorch_model.fp16.bin",
"unet/diffusion_pytorch_model.fp16.safetensors",
]
a : Optional[int] = "fp16"
self.assertFalse(is_safetensors_compatible(lowerCAmelCase__ , variant=lowerCAmelCase__ ) )
| 31
| 0
|
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
class __UpperCamelCase ( a__ ):
lowerCamelCase : torch.FloatTensor
lowerCamelCase : torch.FloatTensor
lowerCamelCase : Optional[torch.FloatTensor] =None
class __UpperCamelCase ( a__ , a__ ):
lowerCamelCase : Tuple =2
@register_to_config
def __init__( self , lowerCAmelCase__ = 0.02 , lowerCAmelCase__ = 100 , lowerCAmelCase__ = 1.007 , lowerCAmelCase__ = 80 , lowerCAmelCase__ = 0.05 , lowerCAmelCase__ = 50 , ) -> Union[str, Any]:
# standard deviation of the initial noise distribution
a : Tuple = sigma_max
# setable values
a : int = None
a : np.IntTensor = None
a : torch.FloatTensor = None # sigma(t_i)
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> torch.FloatTensor:
return sample
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[str]:
a : List[Any] = num_inference_steps
a : List[str] = np.arange(0 , self.num_inference_steps )[::-1].copy()
a : int = torch.from_numpy(lowerCAmelCase__ ).to(lowerCAmelCase__ )
a : List[str] = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in self.timesteps
]
a : Any = torch.tensor(lowerCAmelCase__ , dtype=torch.floataa , device=lowerCAmelCase__ )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Tuple[torch.FloatTensor, float]:
if self.config.s_min <= sigma <= self.config.s_max:
a : str = min(self.config.s_churn / self.num_inference_steps , 2**0.5 - 1 )
else:
a : Dict = 0
# sample eps ~ N(0, S_noise^2 * I)
a : Union[str, Any] = self.config.s_noise * randn_tensor(sample.shape , generator=lowerCAmelCase__ ).to(sample.device )
a : Any = sigma + gamma * sigma
a : Tuple = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = True , ) -> Union[KarrasVeOutput, Tuple]:
a : Union[str, Any] = sample_hat + sigma_hat * model_output
a : Tuple = (sample_hat - pred_original_sample) / sigma_hat
a : List[Any] = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=lowerCAmelCase__ , derivative=lowerCAmelCase__ , pred_original_sample=lowerCAmelCase__ )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = True , ) -> Union[KarrasVeOutput, Tuple]:
a : Optional[int] = sample_prev + sigma_prev * model_output
a : str = (sample_prev - pred_original_sample) / sigma_prev
a : Dict = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=lowerCAmelCase__ , derivative=lowerCAmelCase__ , pred_original_sample=lowerCAmelCase__ )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> str:
raise NotImplementedError()
| 709
|
"""simple docstring"""
import flax.linen as nn
import jax
import jax.numpy as jnp
class __UpperCamelCase ( nn.Module ):
lowerCamelCase : int
lowerCamelCase : jnp.dtype =jnp.floataa
def __a ( self ) -> Tuple:
a : str = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , lowerCAmelCase__ ) -> Optional[Any]:
a, a, a, a : List[str] = hidden_states.shape
a : List[Any] = jax.image.resize(
lowerCAmelCase__ , shape=(batch, height * 2, width * 2, channels) , method="nearest" , )
a : List[str] = self.conv(lowerCAmelCase__ )
return hidden_states
class __UpperCamelCase ( nn.Module ):
lowerCamelCase : int
lowerCamelCase : jnp.dtype =jnp.floataa
def __a ( self ) -> Dict:
a : Optional[Any] = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , lowerCAmelCase__ ) -> Tuple:
# pad = ((0, 0), (0, 1), (0, 1), (0, 0)) # pad height and width dim
# hidden_states = jnp.pad(hidden_states, pad_width=pad)
a : Tuple = self.conv(lowerCAmelCase__ )
return hidden_states
class __UpperCamelCase ( nn.Module ):
lowerCamelCase : int
lowerCamelCase : int =None
lowerCamelCase : float =0.0
lowerCamelCase : bool =None
lowerCamelCase : jnp.dtype =jnp.floataa
def __a ( self ) -> int:
a : Dict = self.in_channels if self.out_channels is None else self.out_channels
a : Union[str, Any] = nn.GroupNorm(num_groups=32 , epsilon=1E-5 )
a : List[Any] = nn.Conv(
lowerCAmelCase__ , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
a : List[Any] = nn.Dense(lowerCAmelCase__ , dtype=self.dtype )
a : Union[str, Any] = nn.GroupNorm(num_groups=32 , epsilon=1E-5 )
a : Optional[int] = nn.Dropout(self.dropout_prob )
a : Dict = nn.Conv(
lowerCAmelCase__ , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
a : Union[str, Any] = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
a : List[str] = None
if use_nin_shortcut:
a : Optional[Any] = nn.Conv(
lowerCAmelCase__ , kernel_size=(1, 1) , strides=(1, 1) , padding="VALID" , dtype=self.dtype , )
def __call__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=True ) -> str:
a : int = hidden_states
a : Tuple = self.norma(lowerCAmelCase__ )
a : Any = nn.swish(lowerCAmelCase__ )
a : int = self.conva(lowerCAmelCase__ )
a : int = self.time_emb_proj(nn.swish(lowerCAmelCase__ ) )
a : Tuple = jnp.expand_dims(jnp.expand_dims(lowerCAmelCase__ , 1 ) , 1 )
a : Dict = hidden_states + temb
a : str = self.norma(lowerCAmelCase__ )
a : List[Any] = nn.swish(lowerCAmelCase__ )
a : List[str] = self.dropout(lowerCAmelCase__ , lowerCAmelCase__ )
a : List[str] = self.conva(lowerCAmelCase__ )
if self.conv_shortcut is not None:
a : Tuple = self.conv_shortcut(lowerCAmelCase__ )
return hidden_states + residual
| 31
| 0
|
"""simple docstring"""
from __future__ import annotations
import requests
a : str = set(
'''approved_at_utc approved_by author_flair_background_color
author_flair_css_class author_flair_richtext author_flair_template_id author_fullname
author_premium can_mod_post category clicked content_categories created_utc downs
edited gilded gildings hidden hide_score is_created_from_ads_ui is_meta
is_original_content is_reddit_media_domain is_video link_flair_css_class
link_flair_richtext link_flair_text link_flair_text_color media_embed mod_reason_title
name permalink pwls quarantine saved score secure_media secure_media_embed selftext
subreddit subreddit_name_prefixed subreddit_type thumbnail title top_awarded_type
total_awards_received ups upvote_ratio url user_reports'''.split()
)
def _SCREAMING_SNAKE_CASE ( _lowercase : str , _lowercase : int = 1 , _lowercase : str = "new" , _lowercase : list | None = None ) ->dict:
'''simple docstring'''
a : Dict = wanted_data or []
if invalid_search_terms := ", ".join(sorted(set(_lowercase ) - valid_terms ) ):
a : Any = F"""Invalid search term: {invalid_search_terms}"""
raise ValueError(_lowercase )
a : str = requests.get(
F"""https://reddit.com/r/{subreddit}/{age}.json?limit={limit}""" , headers={"User-agent": "A random string"} , )
if response.status_code == 429:
raise requests.HTTPError
a : Optional[Any] = response.json()
if not wanted_data:
return {id_: data["data"]["children"][id_] for id_ in range(_lowercase )}
a : int = {}
for id_ in range(_lowercase ):
a : Tuple = {
item: data["data"]["children"][id_]["data"][item] for item in wanted_data
}
return data_dict
if __name__ == "__main__":
# If you get Error 429, that means you are rate limited.Try after some time
print(get_subreddit_data('''learnpython''', wanted_data=['''title''', '''url''', '''selftext''']))
| 710
|
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def _SCREAMING_SNAKE_CASE ( _lowercase : str=None ) ->Optional[Any]:
'''simple docstring'''
if subparsers is not None:
a : Dict = subparsers.add_parser("test" )
else:
a : Tuple = argparse.ArgumentParser("Accelerate test command" )
parser.add_argument(
"--config_file" , default=_lowercase , help=(
"The path to use to store the config file. Will default to a file named default_config.yaml in the cache "
"location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have "
"such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed "
"with 'huggingface'."
) , )
if subparsers is not None:
parser.set_defaults(func=_lowercase )
return parser
def _SCREAMING_SNAKE_CASE ( _lowercase : str ) ->str:
'''simple docstring'''
a : List[Any] = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ["test_utils", "scripts", "test_script.py"] )
if args.config_file is None:
a : int = script_name
else:
a : int = F"""--config_file={args.config_file} {script_name}"""
a : Optional[int] = ["accelerate-launch"] + test_args.split()
a : Optional[int] = execute_subprocess_async(_lowercase , env=os.environ.copy() )
if result.returncode == 0:
print("Test is a success! You are ready for your distributed training!" )
def _SCREAMING_SNAKE_CASE ( ) ->Tuple:
'''simple docstring'''
a : Any = test_command_parser()
a : Union[str, Any] = parser.parse_args()
test_command(_lowercase )
if __name__ == "__main__":
main()
| 31
| 0
|
"""simple docstring"""
import os
import time
import numpy as np
import onnxruntime as ort
a : str = '''1'''
a : List[str] = '''0'''
a : str = '''1'''
a : List[Any] = ort.SessionOptions()
a : List[Any] = ort.GraphOptimizationLevel.ORT_DISABLE_ALL
print('''Create inference session...''')
a : int = ['''TensorrtExecutionProvider''', '''CUDAExecutionProvider''']
a : Union[str, Any] = ort.InferenceSession('''model.onnx''', sess_options=sess_opt, providers=execution_provider)
a : int = ort.RunOptions()
a : Union[str, Any] = 128
a : Union[str, Any] = 1
a : int = np.ones((batch, sequence), dtype=np.intaa)
a : Tuple = np.ones((batch, sequence), dtype=np.intaa)
a : Tuple = np.ones((batch, sequence), dtype=np.intaa)
print('''Warm up phase...''')
sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print('''Start inference...''')
a : Optional[Any] = time.time()
a : Dict = 2000
a : Union[str, Any] = {}
for iter in range(max_iters):
a : Optional[Any] = sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print('''Average Inference Time = {:.3f} ms'''.format((time.time() - start_time) * 1000 / max_iters))
| 711
|
"""simple docstring"""
a : str = 8.314_4598
def _SCREAMING_SNAKE_CASE ( _lowercase : float , _lowercase : float ) ->float:
'''simple docstring'''
if temperature < 0:
raise Exception("Temperature cannot be less than 0 K" )
if molar_mass <= 0:
raise Exception("Molar mass cannot be less than or equal to 0 kg/mol" )
else:
return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# example
a : Any = 300
a : Dict = 28
a : Dict = rms_speed_of_molecule(temperature, molar_mass)
print(F'''Vrms of Nitrogen gas at 300 K is {vrms} m/s''')
| 31
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : List[Any] = logging.get_logger(__name__)
a : Dict = {
'''funnel-transformer/small''': '''https://huggingface.co/funnel-transformer/small/resolve/main/config.json''',
'''funnel-transformer/small-base''': '''https://huggingface.co/funnel-transformer/small-base/resolve/main/config.json''',
'''funnel-transformer/medium''': '''https://huggingface.co/funnel-transformer/medium/resolve/main/config.json''',
'''funnel-transformer/medium-base''': '''https://huggingface.co/funnel-transformer/medium-base/resolve/main/config.json''',
'''funnel-transformer/intermediate''': (
'''https://huggingface.co/funnel-transformer/intermediate/resolve/main/config.json'''
),
'''funnel-transformer/intermediate-base''': (
'''https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/config.json'''
),
'''funnel-transformer/large''': '''https://huggingface.co/funnel-transformer/large/resolve/main/config.json''',
'''funnel-transformer/large-base''': '''https://huggingface.co/funnel-transformer/large-base/resolve/main/config.json''',
'''funnel-transformer/xlarge''': '''https://huggingface.co/funnel-transformer/xlarge/resolve/main/config.json''',
'''funnel-transformer/xlarge-base''': '''https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/config.json''',
}
class __UpperCamelCase ( a__ ):
lowerCamelCase : Dict ="""funnel"""
lowerCamelCase : Any ={
"""hidden_size""": """d_model""",
"""num_attention_heads""": """n_head""",
}
def __init__( self , lowerCAmelCase__=3_0522 , lowerCAmelCase__=[4, 4, 4] , lowerCAmelCase__=None , lowerCAmelCase__=2 , lowerCAmelCase__=768 , lowerCAmelCase__=12 , lowerCAmelCase__=64 , lowerCAmelCase__=3072 , lowerCAmelCase__="gelu_new" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.1 , lowerCAmelCase__=None , lowerCAmelCase__=1E-9 , lowerCAmelCase__="mean" , lowerCAmelCase__="relative_shift" , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=True , **lowerCAmelCase__ , ) -> int:
a : Tuple = vocab_size
a : List[Any] = block_sizes
a : Dict = [1] * len(lowerCAmelCase__ ) if block_repeats is None else block_repeats
assert len(lowerCAmelCase__ ) == len(
self.block_repeats ), "`block_sizes` and `block_repeats` should have the same length."
a : Dict = num_decoder_layers
a : Optional[int] = d_model
a : str = n_head
a : Optional[Any] = d_head
a : Union[str, Any] = d_inner
a : List[Any] = hidden_act
a : List[str] = hidden_dropout
a : Any = attention_dropout
a : Optional[int] = activation_dropout
a : List[str] = initializer_range
a : List[str] = initializer_std
a : Any = layer_norm_eps
assert pooling_type in [
"mean",
"max",
], f"""Got {pooling_type} for `pooling_type` but only 'mean' and 'max' are supported."""
a : Optional[int] = pooling_type
assert attention_type in [
"relative_shift",
"factorized",
], f"""Got {attention_type} for `attention_type` but only 'relative_shift' and 'factorized' are supported."""
a : int = attention_type
a : Optional[Any] = separate_cls
a : List[str] = truncate_seq
a : str = pool_q_only
super().__init__(**lowerCAmelCase__ )
@property
def __a ( self ) -> Tuple:
return sum(self.block_sizes )
@num_hidden_layers.setter
def __a ( self , lowerCAmelCase__ ) -> List[Any]:
raise NotImplementedError(
"This model does not support the setting of `num_hidden_layers`. Please set `block_sizes`." )
@property
def __a ( self ) -> Any:
return len(self.block_sizes )
@num_blocks.setter
def __a ( self , lowerCAmelCase__ ) -> List[str]:
raise NotImplementedError("This model does not support the setting of `num_blocks`. Please set `block_sizes`." )
| 712
|
"""simple docstring"""
import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers.generation import (
MaxLengthCriteria,
MaxNewTokensCriteria,
MaxTimeCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
@require_torch
class __UpperCamelCase ( unittest.TestCase ):
def __a ( self , lowerCAmelCase__ ) -> Optional[int]:
a : str = 3
a : str = 250
a : List[Any] = ids_tensor((batch_size, length) , lowerCAmelCase__ )
a : Optional[Any] = torch.ones((batch_size, length) , device=lowerCAmelCase__ , dtype=torch.float ) / length
return input_ids, scores
def __a ( self ) -> List[Any]:
a, a : str = self._get_tensors(5 )
a : Any = StoppingCriteriaList(
[
MaxLengthCriteria(max_length=10 ),
MaxTimeCriteria(max_time=0.1 ),
] )
self.assertFalse(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
a, a : str = self._get_tensors(9 )
self.assertFalse(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
a, a : Union[str, Any] = self._get_tensors(10 )
self.assertTrue(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
def __a ( self ) -> List[Any]:
a : Optional[Any] = MaxLengthCriteria(max_length=10 )
a, a : int = self._get_tensors(5 )
self.assertFalse(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
a, a : int = self._get_tensors(9 )
self.assertFalse(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
a, a : Union[str, Any] = self._get_tensors(10 )
self.assertTrue(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
def __a ( self ) -> List[str]:
a : Tuple = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 )
a, a : str = self._get_tensors(5 )
self.assertFalse(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
a, a : int = self._get_tensors(9 )
self.assertFalse(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
a, a : int = self._get_tensors(10 )
self.assertTrue(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
a : List[Any] = StoppingCriteriaList([criteria] )
self.assertEqual(criteria_list.max_length , 10 )
def __a ( self ) -> str:
a, a : Tuple = self._get_tensors(5 )
a : str = MaxTimeCriteria(max_time=0.1 )
self.assertFalse(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
a : Optional[int] = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 )
self.assertTrue(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
def __a ( self ) -> str:
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 10 )
with self.assertWarns(lowerCAmelCase__ ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 11 )
a : Optional[int] = validate_stopping_criteria(StoppingCriteriaList() , 11 )
self.assertEqual(len(lowerCAmelCase__ ) , 1 )
| 31
| 0
|
"""simple docstring"""
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a : Tuple = logging.get_logger(__name__)
a : List[str] = '''▁'''
a : Optional[int] = {
'''vocab_file''': '''vocab.json''',
'''spm_file''': '''sentencepiece.bpe.model''',
}
a : Optional[Any] = {
'''vocab_file''': {
'''facebook/s2t-small-librispeech-asr''': (
'''https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json'''
),
},
'''spm_file''': {
'''facebook/s2t-small-librispeech-asr''': (
'''https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model'''
)
},
}
a : Tuple = {
'''facebook/s2t-small-librispeech-asr''': 1024,
}
a : Optional[int] = ['''pt''', '''fr''', '''ru''', '''nl''', '''ro''', '''it''', '''es''', '''de''']
a : str = {'''mustc''': MUSTC_LANGS}
class __UpperCamelCase ( a__ ):
lowerCamelCase : Optional[Any] =VOCAB_FILES_NAMES
lowerCamelCase : List[Any] =PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : List[Any] =MAX_MODEL_INPUT_SIZES
lowerCamelCase : List[str] =["""input_ids""", """attention_mask"""]
lowerCamelCase : List[int] =[]
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__="<s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__="<pad>" , lowerCAmelCase__="<unk>" , lowerCAmelCase__=False , lowerCAmelCase__=False , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__ = None , **lowerCAmelCase__ , ) -> None:
a : str = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , do_upper_case=lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ , tgt_lang=lowerCAmelCase__ , lang_codes=lowerCAmelCase__ , sp_model_kwargs=self.sp_model_kwargs , **lowerCAmelCase__ , )
a : Tuple = do_upper_case
a : List[Any] = do_lower_case
a : List[str] = load_json(lowerCAmelCase__ )
a : str = {v: k for k, v in self.encoder.items()}
a : Tuple = spm_file
a : int = load_spm(lowerCAmelCase__ , self.sp_model_kwargs )
if lang_codes is not None:
a : List[Any] = lang_codes
a : Any = LANGUAGES[lang_codes]
a : Tuple = [f"""<lang:{lang}>""" for lang in self.langs]
a : Tuple = {lang: self.sp_model.PieceToId(f"""<lang:{lang}>""" ) for lang in self.langs}
a : Dict = self.lang_tokens
a : Union[str, Any] = tgt_lang if tgt_lang is not None else self.langs[0]
self.set_tgt_lang_special_tokens(self._tgt_lang )
else:
a : Tuple = {}
@property
def __a ( self ) -> int:
return len(self.encoder )
@property
def __a ( self ) -> str:
return self._tgt_lang
@tgt_lang.setter
def __a ( self , lowerCAmelCase__ ) -> None:
a : List[str] = new_tgt_lang
self.set_tgt_lang_special_tokens(lowerCAmelCase__ )
def __a ( self , lowerCAmelCase__ ) -> None:
a : int = self.lang_code_to_id[tgt_lang]
a : Union[str, Any] = [lang_code_id]
def __a ( self , lowerCAmelCase__ ) -> List[str]:
return self.sp_model.encode(lowerCAmelCase__ , out_type=lowerCAmelCase__ )
def __a ( self , lowerCAmelCase__ ) -> Any:
return self.encoder.get(lowerCAmelCase__ , self.encoder[self.unk_token] )
def __a ( self , lowerCAmelCase__ ) -> str:
return self.decoder.get(lowerCAmelCase__ , self.unk_token )
def __a ( self , lowerCAmelCase__ ) -> str:
a : int = []
a : Optional[int] = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
a : Optional[Any] = self.sp_model.decode(lowerCAmelCase__ )
out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " "
a : Any = []
else:
current_sub_tokens.append(lowerCAmelCase__ )
a : Optional[int] = self.sp_model.decode(lowerCAmelCase__ )
out_string += decoded.upper() if self.do_upper_case else decoded
return out_string.strip()
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__=None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id]
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase__ , token_ids_a=lowerCAmelCase__ , already_has_special_tokens=lowerCAmelCase__ )
a : int = [1] * len(self.prefix_tokens )
a : Tuple = [1]
if token_ids_a is None:
return prefix_ones + ([0] * len(lowerCAmelCase__ )) + suffix_ones
return prefix_ones + ([0] * len(lowerCAmelCase__ )) + ([0] * len(lowerCAmelCase__ )) + suffix_ones
def __a ( self ) -> Dict:
a : Optional[Any] = self.encoder.copy()
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Dict:
a : Optional[Any] = self.__dict__.copy()
a : Dict = None
return state
def __setstate__( self , lowerCAmelCase__ ) -> None:
a : int = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
a : Optional[int] = {}
a : Union[str, Any] = load_spm(self.spm_file , self.sp_model_kwargs )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Tuple[str]:
a : str = Path(lowerCAmelCase__ )
assert save_dir.is_dir(), f"""{save_directory} should be a directory"""
a : Any = save_dir / (
(filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["vocab_file"]
)
a : Tuple = save_dir / (
(filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["spm_file"]
)
save_json(self.encoder , lowerCAmelCase__ )
if os.path.abspath(self.spm_file ) != os.path.abspath(lowerCAmelCase__ ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , lowerCAmelCase__ )
elif not os.path.isfile(self.spm_file ):
with open(lowerCAmelCase__ , "wb" ) as fi:
a : Optional[int] = self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase__ )
return (str(lowerCAmelCase__ ), str(lowerCAmelCase__ ))
def _SCREAMING_SNAKE_CASE ( _lowercase : str , _lowercase : Dict[str, Any] ):
'''simple docstring'''
a : Optional[int] = sentencepiece.SentencePieceProcessor(**_lowercase )
spm.Load(str(_lowercase ) )
return spm
def _SCREAMING_SNAKE_CASE ( _lowercase : str ):
'''simple docstring'''
with open(_lowercase , "r" ) as f:
return json.load(_lowercase )
def _SCREAMING_SNAKE_CASE ( _lowercase : Dict , _lowercase : str ):
'''simple docstring'''
with open(_lowercase , "w" ) as f:
json.dump(_lowercase , _lowercase , indent=2 )
| 713
|
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( _lowercase : int = 200 ) ->int:
'''simple docstring'''
a : Dict = [1, 2, 5, 10, 20, 50, 100, 200]
a : Optional[Any] = [0] * (pence + 1)
a : List[Any] = 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(_lowercase , pence + 1 , 1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(200) == 73682
| 31
| 0
|
"""simple docstring"""
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class __UpperCamelCase :
@property
def __a ( self ) -> List[str]:
return self.get_dummy_input()
@property
def __a ( self ) -> List[str]:
if self.block_type == "down":
return (4, 32, 16, 16)
elif self.block_type == "mid":
return (4, 32, 32, 32)
elif self.block_type == "up":
return (4, 32, 64, 64)
raise ValueError(f"""'{self.block_type}' is not a supported block_type. Set it to 'up', 'mid', or 'down'.""" )
def __a ( self , lowerCAmelCase__=True , lowerCAmelCase__=False , lowerCAmelCase__=False , lowerCAmelCase__=False , ) -> List[Any]:
a : Any = 4
a : List[str] = 32
a : Dict = (32, 32)
a : Union[str, Any] = torch.manual_seed(0 )
a : Dict = torch.device(lowerCAmelCase__ )
a : Tuple = (batch_size, num_channels) + sizes
a : int = randn_tensor(lowerCAmelCase__ , generator=lowerCAmelCase__ , device=lowerCAmelCase__ )
a : str = {"hidden_states": hidden_states}
if include_temb:
a : Dict = 128
a : Optional[int] = randn_tensor((batch_size, temb_channels) , generator=lowerCAmelCase__ , device=lowerCAmelCase__ )
if include_res_hidden_states_tuple:
a : Optional[Any] = torch.manual_seed(1 )
a : Optional[int] = (randn_tensor(lowerCAmelCase__ , generator=lowerCAmelCase__ , device=lowerCAmelCase__ ),)
if include_encoder_hidden_states:
a : List[str] = floats_tensor((batch_size, 32, 32) ).to(lowerCAmelCase__ )
if include_skip_sample:
a : Tuple = randn_tensor(((batch_size, 3) + sizes) , generator=lowerCAmelCase__ , device=lowerCAmelCase__ )
return dummy_input
def __a ( self ) -> Dict:
a : List[Any] = {
"in_channels": 32,
"out_channels": 32,
"temb_channels": 128,
}
if self.block_type == "up":
a : Optional[Any] = 32
if self.block_type == "mid":
init_dict.pop("out_channels" )
a : List[str] = self.dummy_input
return init_dict, inputs_dict
def __a ( self , lowerCAmelCase__ ) -> str:
a : Tuple = self.prepare_init_args_and_inputs_for_common()
a : Tuple = self.block_class(**lowerCAmelCase__ )
unet_block.to(lowerCAmelCase__ )
unet_block.eval()
with torch.no_grad():
a : int = unet_block(**lowerCAmelCase__ )
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
a : str = output[0]
self.assertEqual(output.shape , self.output_shape )
a : List[str] = output[0, -1, -3:, -3:]
a : Union[str, Any] = torch.tensor(lowerCAmelCase__ ).to(lowerCAmelCase__ )
assert torch_all_close(output_slice.flatten() , lowerCAmelCase__ , atol=5E-3 )
@unittest.skipIf(torch_device == "mps" , "Training is not supported in mps" )
def __a ( self ) -> List[str]:
a : Optional[Any] = self.prepare_init_args_and_inputs_for_common()
a : Tuple = self.block_class(**lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.train()
a : Optional[int] = model(**lowerCAmelCase__ )
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
a : List[str] = output[0]
a : List[Any] = torch.device(lowerCAmelCase__ )
a : Any = randn_tensor(output.shape , device=lowerCAmelCase__ )
a : int = torch.nn.functional.mse_loss(lowerCAmelCase__ , lowerCAmelCase__ )
loss.backward()
| 714
|
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class __UpperCamelCase ( metaclass=a__ ):
lowerCamelCase : Optional[Any] =["""transformers""", """torch""", """note_seq"""]
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Union[str, Any]:
requires_backends(self , ["transformers", "torch", "note_seq"] )
@classmethod
def __a ( cls , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Any:
requires_backends(cls , ["transformers", "torch", "note_seq"] )
@classmethod
def __a ( cls , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> int:
requires_backends(cls , ["transformers", "torch", "note_seq"] )
| 31
| 0
|
"""simple docstring"""
from __future__ import annotations
from functools import lru_cache
from math import ceil
a : int = 100
a : str = set(range(3, NUM_PRIMES, 2))
primes.add(2)
a : int
for prime in range(3, ceil(NUM_PRIMES**0.5), 2):
if prime not in primes:
continue
primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime)))
@lru_cache(maxsize=100 )
def _SCREAMING_SNAKE_CASE ( _lowercase : int ) ->set[int]:
'''simple docstring'''
if number_to_partition < 0:
return set()
elif number_to_partition == 0:
return {1}
a : set[int] = set()
a : int
a : int
for prime in primes:
if prime > number_to_partition:
continue
for sub in partition(number_to_partition - prime ):
ret.add(sub * prime )
return ret
def _SCREAMING_SNAKE_CASE ( _lowercase : int = 5000 ) ->int | None:
'''simple docstring'''
for number_to_partition in range(1 , _lowercase ):
if len(partition(_lowercase ) ) > number_unique_partitions:
return number_to_partition
return None
if __name__ == "__main__":
print(F'''{solution() = }''')
| 715
|
"""simple docstring"""
import qiskit
def _SCREAMING_SNAKE_CASE ( _lowercase : int , _lowercase : int ) ->qiskit.result.counts.Counts:
'''simple docstring'''
a : Union[str, Any] = qiskit.Aer.get_backend("aer_simulator" )
# Create a Quantum Circuit acting on the q register
a : Optional[Any] = qiskit.QuantumCircuit(_lowercase , _lowercase )
# Map the quantum measurement to the classical bits
circuit.measure([0] , [0] )
# Execute the circuit on the simulator
a : Optional[int] = qiskit.execute(_lowercase , _lowercase , shots=1000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(_lowercase )
if __name__ == "__main__":
print(F'''Total count for various states are: {single_qubit_measure(1, 1)}''')
| 31
| 0
|
"""simple docstring"""
from __future__ import annotations
import time
a : Optional[Any] = list[tuple[int, int]]
a : str = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
a : Dict = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
class __UpperCamelCase :
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[str]:
a : int = pos_x
a : str = pos_y
a : Tuple = (pos_y, pos_x)
a : Optional[int] = goal_x
a : Optional[Any] = goal_y
a : List[Any] = parent
class __UpperCamelCase :
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[Any]:
a : List[str] = Node(start[1] , start[0] , goal[1] , goal[0] , lowerCAmelCase__ )
a : Union[str, Any] = Node(goal[1] , goal[0] , goal[1] , goal[0] , lowerCAmelCase__ )
a : Any = [self.start]
a : Any = False
def __a ( self ) -> Path | None:
while self.node_queue:
a : Optional[int] = self.node_queue.pop(0 )
if current_node.pos == self.target.pos:
a : str = True
return self.retrace_path(lowerCAmelCase__ )
a : Union[str, Any] = self.get_successors(lowerCAmelCase__ )
for node in successors:
self.node_queue.append(lowerCAmelCase__ )
if not self.reached:
return [self.start.pos]
return None
def __a ( self , lowerCAmelCase__ ) -> list[Node]:
a : int = []
for action in delta:
a : str = parent.pos_x + action[1]
a : Optional[Any] = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(lowerCAmelCase__ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(lowerCAmelCase__ , lowerCAmelCase__ , self.target.pos_y , self.target.pos_x , lowerCAmelCase__ ) )
return successors
def __a ( self , lowerCAmelCase__ ) -> Path:
a : int = node
a : Optional[int] = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
a : str = current_node.parent
path.reverse()
return path
class __UpperCamelCase :
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[Any]:
a : Tuple = BreadthFirstSearch(lowerCAmelCase__ , lowerCAmelCase__ )
a : str = BreadthFirstSearch(lowerCAmelCase__ , lowerCAmelCase__ )
a : int = False
def __a ( self ) -> Path | None:
while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue:
a : Tuple = self.fwd_bfs.node_queue.pop(0 )
a : List[Any] = self.bwd_bfs.node_queue.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
a : Dict = True
return self.retrace_bidirectional_path(
lowerCAmelCase__ , lowerCAmelCase__ )
a : Any = current_bwd_node
a : List[str] = current_fwd_node
a : Optional[Any] = {
self.fwd_bfs: self.fwd_bfs.get_successors(lowerCAmelCase__ ),
self.bwd_bfs: self.bwd_bfs.get_successors(lowerCAmelCase__ ),
}
for bfs in [self.fwd_bfs, self.bwd_bfs]:
for node in successors[bfs]:
bfs.node_queue.append(lowerCAmelCase__ )
if not self.reached:
return [self.fwd_bfs.start.pos]
return None
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> Path:
a : str = self.fwd_bfs.retrace_path(lowerCAmelCase__ )
a : List[Any] = self.bwd_bfs.retrace_path(lowerCAmelCase__ )
bwd_path.pop()
bwd_path.reverse()
a : int = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
import doctest
doctest.testmod()
a : Optional[Any] = (0, 0)
a : int = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
a : Optional[int] = time.time()
a : Optional[Any] = BreadthFirstSearch(init, goal)
a : Tuple = bfs.search()
a : int = time.time() - start_bfs_time
print('''Unidirectional BFS computation time : ''', bfs_time)
a : Optional[Any] = time.time()
a : Union[str, Any] = BidirectionalBreadthFirstSearch(init, goal)
a : str = bd_bfs.search()
a : List[str] = time.time() - start_bd_bfs_time
print('''Bidirectional BFS computation time : ''', bd_bfs_time)
| 716
|
"""simple docstring"""
from random import randint
from tempfile import TemporaryFile
import numpy as np
def _SCREAMING_SNAKE_CASE ( _lowercase : Optional[int] , _lowercase : Optional[Any] , _lowercase : Union[str, Any] ) ->Dict:
'''simple docstring'''
a : List[str] = 0
if start < end:
a : Tuple = randint(_lowercase , _lowercase )
a : List[str] = a[end]
a : str = a[pivot]
a : Optional[int] = temp
a, a : Dict = _in_place_partition(_lowercase , _lowercase , _lowercase )
count += _in_place_quick_sort(_lowercase , _lowercase , p - 1 )
count += _in_place_quick_sort(_lowercase , p + 1 , _lowercase )
return count
def _SCREAMING_SNAKE_CASE ( _lowercase : Optional[Any] , _lowercase : str , _lowercase : List[Any] ) ->str:
'''simple docstring'''
a : Union[str, Any] = 0
a : List[Any] = randint(_lowercase , _lowercase )
a : int = a[end]
a : List[str] = a[pivot]
a : Tuple = temp
a : Union[str, Any] = start - 1
for index in range(_lowercase , _lowercase ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
a : List[str] = new_pivot_index + 1
a : Optional[int] = a[new_pivot_index]
a : Union[str, Any] = a[index]
a : List[Any] = temp
a : Tuple = a[new_pivot_index + 1]
a : str = a[end]
a : Dict = temp
return new_pivot_index + 1, count
a : int = TemporaryFile()
a : Tuple = 100 # 1000 elements are to be sorted
a , a : int = 0, 1 # mean and standard deviation
a : List[Any] = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print('''The array is''')
print(X)
outfile.seek(0) # using the same array
a : int = np.load(outfile)
a : Tuple = len(M) - 1
a : Union[str, Any] = _in_place_quick_sort(M, 0, r)
print(
'''No of Comparisons for 100 elements selected from a standard normal distribution'''
'''is :'''
)
print(z)
| 31
| 0
|
"""simple docstring"""
import unittest
import torch
from diffusers import VQModel
from diffusers.utils import floats_tensor, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class __UpperCamelCase ( a__ , a__ , unittest.TestCase ):
lowerCamelCase : List[str] =VQModel
lowerCamelCase : List[str] ="""sample"""
@property
def __a ( self , lowerCAmelCase__=(32, 32) ) -> Any:
a : Dict = 4
a : str = 3
a : str = floats_tensor((batch_size, num_channels) + sizes ).to(lowerCAmelCase__ )
return {"sample": image}
@property
def __a ( self ) -> Union[str, Any]:
return (3, 32, 32)
@property
def __a ( self ) -> int:
return (3, 32, 32)
def __a ( self ) -> int:
a : str = {
"block_out_channels": [32, 64],
"in_channels": 3,
"out_channels": 3,
"down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"],
"up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"],
"latent_channels": 3,
}
a : Union[str, Any] = self.dummy_input
return init_dict, inputs_dict
def __a ( self ) -> List[Any]:
pass
def __a ( self ) -> Union[str, Any]:
pass
def __a ( self ) -> int:
a : Optional[int] = VQModel.from_pretrained("fusing/vqgan-dummy" , output_loading_info=lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertEqual(len(loading_info["missing_keys"] ) , 0 )
model.to(lowerCAmelCase__ )
a : Optional[int] = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def __a ( self ) -> int:
a : List[str] = VQModel.from_pretrained("fusing/vqgan-dummy" )
model.to(lowerCAmelCase__ ).eval()
torch.manual_seed(0 )
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0 )
a : int = torch.randn(1 , model.config.in_channels , model.config.sample_size , model.config.sample_size )
a : Union[str, Any] = image.to(lowerCAmelCase__ )
with torch.no_grad():
a : Dict = model(lowerCAmelCase__ ).sample
a : Tuple = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
a : Tuple = torch.tensor([-0.0_153, -0.4_044, -0.1_880, -0.5_161, -0.2_418, -0.4_072, -0.1_612, -0.0_633, -0.0_143] )
# fmt: on
self.assertTrue(torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3 ) )
| 717
|
"""simple docstring"""
import baseaa
def _SCREAMING_SNAKE_CASE ( _lowercase : str ) ->bytes:
'''simple docstring'''
return baseaa.aaaencode(string.encode("utf-8" ) )
def _SCREAMING_SNAKE_CASE ( _lowercase : bytes ) ->str:
'''simple docstring'''
return baseaa.aaadecode(_lowercase ).decode("utf-8" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 31
| 0
|
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class __UpperCamelCase ( a__ ):
lowerCamelCase : torch.FloatTensor
class __UpperCamelCase ( nn.Module ):
def __init__( self , lowerCAmelCase__=3 , lowerCAmelCase__=3 , lowerCAmelCase__=("DownEncoderBlock2D",) , lowerCAmelCase__=(64,) , lowerCAmelCase__=2 , lowerCAmelCase__=32 , lowerCAmelCase__="silu" , lowerCAmelCase__=True , ) -> Dict:
super().__init__()
a : Tuple = layers_per_block
a : List[str] = torch.nn.Convad(
lowerCAmelCase__ , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
a : str = None
a : Any = nn.ModuleList([] )
# down
a : Tuple = block_out_channels[0]
for i, down_block_type in enumerate(lowerCAmelCase__ ):
a : Dict = output_channel
a : str = block_out_channels[i]
a : int = i == len(lowerCAmelCase__ ) - 1
a : List[Any] = get_down_block(
lowerCAmelCase__ , num_layers=self.layers_per_block , in_channels=lowerCAmelCase__ , out_channels=lowerCAmelCase__ , add_downsample=not is_final_block , resnet_eps=1E-6 , downsample_padding=0 , resnet_act_fn=lowerCAmelCase__ , resnet_groups=lowerCAmelCase__ , attention_head_dim=lowerCAmelCase__ , temb_channels=lowerCAmelCase__ , )
self.down_blocks.append(lowerCAmelCase__ )
# mid
a : str = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=lowerCAmelCase__ , output_scale_factor=1 , resnet_time_scale_shift="default" , attention_head_dim=block_out_channels[-1] , resnet_groups=lowerCAmelCase__ , temb_channels=lowerCAmelCase__ , )
# out
a : Tuple = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=lowerCAmelCase__ , eps=1E-6 )
a : Dict = nn.SiLU()
a : Union[str, Any] = 2 * out_channels if double_z else out_channels
a : List[Any] = nn.Convad(block_out_channels[-1] , lowerCAmelCase__ , 3 , padding=1 )
a : List[str] = False
def __a ( self , lowerCAmelCase__ ) -> Union[str, Any]:
a : Optional[Any] = x
a : Dict = self.conv_in(lowerCAmelCase__ )
if self.training and self.gradient_checkpointing:
def create_custom_forward(lowerCAmelCase__ ):
def custom_forward(*lowerCAmelCase__ ):
return module(*lowerCAmelCase__ )
return custom_forward
# down
if is_torch_version(">=" , "1.11.0" ):
for down_block in self.down_blocks:
a : List[Any] = torch.utils.checkpoint.checkpoint(
create_custom_forward(lowerCAmelCase__ ) , lowerCAmelCase__ , use_reentrant=lowerCAmelCase__ )
# middle
a : List[str] = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , lowerCAmelCase__ , use_reentrant=lowerCAmelCase__ )
else:
for down_block in self.down_blocks:
a : List[Any] = torch.utils.checkpoint.checkpoint(create_custom_forward(lowerCAmelCase__ ) , lowerCAmelCase__ )
# middle
a : Optional[int] = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , lowerCAmelCase__ )
else:
# down
for down_block in self.down_blocks:
a : Optional[int] = down_block(lowerCAmelCase__ )
# middle
a : int = self.mid_block(lowerCAmelCase__ )
# post-process
a : Tuple = self.conv_norm_out(lowerCAmelCase__ )
a : int = self.conv_act(lowerCAmelCase__ )
a : str = self.conv_out(lowerCAmelCase__ )
return sample
class __UpperCamelCase ( nn.Module ):
def __init__( self , lowerCAmelCase__=3 , lowerCAmelCase__=3 , lowerCAmelCase__=("UpDecoderBlock2D",) , lowerCAmelCase__=(64,) , lowerCAmelCase__=2 , lowerCAmelCase__=32 , lowerCAmelCase__="silu" , lowerCAmelCase__="group" , ) -> Dict:
super().__init__()
a : List[str] = layers_per_block
a : Optional[int] = nn.Convad(
lowerCAmelCase__ , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
a : int = None
a : Union[str, Any] = nn.ModuleList([] )
a : Optional[int] = in_channels if norm_type == "spatial" else None
# mid
a : Any = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=lowerCAmelCase__ , output_scale_factor=1 , resnet_time_scale_shift="default" if norm_type == "group" else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=lowerCAmelCase__ , temb_channels=lowerCAmelCase__ , )
# up
a : List[str] = list(reversed(lowerCAmelCase__ ) )
a : List[Any] = reversed_block_out_channels[0]
for i, up_block_type in enumerate(lowerCAmelCase__ ):
a : str = output_channel
a : Tuple = reversed_block_out_channels[i]
a : Any = i == len(lowerCAmelCase__ ) - 1
a : str = get_up_block(
lowerCAmelCase__ , num_layers=self.layers_per_block + 1 , in_channels=lowerCAmelCase__ , out_channels=lowerCAmelCase__ , prev_output_channel=lowerCAmelCase__ , add_upsample=not is_final_block , resnet_eps=1E-6 , resnet_act_fn=lowerCAmelCase__ , resnet_groups=lowerCAmelCase__ , attention_head_dim=lowerCAmelCase__ , temb_channels=lowerCAmelCase__ , resnet_time_scale_shift=lowerCAmelCase__ , )
self.up_blocks.append(lowerCAmelCase__ )
a : List[str] = output_channel
# out
if norm_type == "spatial":
a : Optional[Any] = SpatialNorm(block_out_channels[0] , lowerCAmelCase__ )
else:
a : Optional[Any] = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=lowerCAmelCase__ , eps=1E-6 )
a : int = nn.SiLU()
a : Union[str, Any] = nn.Convad(block_out_channels[0] , lowerCAmelCase__ , 3 , padding=1 )
a : int = False
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__=None ) -> Any:
a : Any = z
a : Union[str, Any] = self.conv_in(lowerCAmelCase__ )
a : Any = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(lowerCAmelCase__ ):
def custom_forward(*lowerCAmelCase__ ):
return module(*lowerCAmelCase__ )
return custom_forward
if is_torch_version(">=" , "1.11.0" ):
# middle
a : Optional[Any] = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , lowerCAmelCase__ , lowerCAmelCase__ , use_reentrant=lowerCAmelCase__ )
a : List[Any] = sample.to(lowerCAmelCase__ )
# up
for up_block in self.up_blocks:
a : str = torch.utils.checkpoint.checkpoint(
create_custom_forward(lowerCAmelCase__ ) , lowerCAmelCase__ , lowerCAmelCase__ , use_reentrant=lowerCAmelCase__ )
else:
# middle
a : Optional[Any] = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , lowerCAmelCase__ , lowerCAmelCase__ )
a : int = sample.to(lowerCAmelCase__ )
# up
for up_block in self.up_blocks:
a : Dict = torch.utils.checkpoint.checkpoint(create_custom_forward(lowerCAmelCase__ ) , lowerCAmelCase__ , lowerCAmelCase__ )
else:
# middle
a : List[Any] = self.mid_block(lowerCAmelCase__ , lowerCAmelCase__ )
a : int = sample.to(lowerCAmelCase__ )
# up
for up_block in self.up_blocks:
a : Optional[Any] = up_block(lowerCAmelCase__ , lowerCAmelCase__ )
# post-process
if latent_embeds is None:
a : str = self.conv_norm_out(lowerCAmelCase__ )
else:
a : Optional[int] = self.conv_norm_out(lowerCAmelCase__ , lowerCAmelCase__ )
a : Any = self.conv_act(lowerCAmelCase__ )
a : Tuple = self.conv_out(lowerCAmelCase__ )
return sample
class __UpperCamelCase ( nn.Module ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__="random" , lowerCAmelCase__=False , lowerCAmelCase__=True ) -> List[Any]:
super().__init__()
a : Union[str, Any] = n_e
a : Optional[Any] = vq_embed_dim
a : int = beta
a : Any = legacy
a : str = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
a : str = remap
if self.remap is not None:
self.register_buffer("used" , torch.tensor(np.load(self.remap ) ) )
a : List[Any] = self.used.shape[0]
a : Optional[int] = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
a : List[str] = self.re_embed
a : List[str] = self.re_embed + 1
print(
f"""Remapping {self.n_e} indices to {self.re_embed} indices. """
f"""Using {self.unknown_index} for unknown indices.""" )
else:
a : str = n_e
a : Union[str, Any] = sane_index_shape
def __a ( self , lowerCAmelCase__ ) -> str:
a : List[Any] = inds.shape
assert len(lowerCAmelCase__ ) > 1
a : Union[str, Any] = inds.reshape(ishape[0] , -1 )
a : Optional[Any] = self.used.to(lowerCAmelCase__ )
a : List[str] = (inds[:, :, None] == used[None, None, ...]).long()
a : str = match.argmax(-1 )
a : int = match.sum(2 ) < 1
if self.unknown_index == "random":
a : Optional[int] = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
a : Optional[Any] = self.unknown_index
return new.reshape(lowerCAmelCase__ )
def __a ( self , lowerCAmelCase__ ) -> int:
a : str = inds.shape
assert len(lowerCAmelCase__ ) > 1
a : Tuple = inds.reshape(ishape[0] , -1 )
a : Dict = self.used.to(lowerCAmelCase__ )
if self.re_embed > self.used.shape[0]: # extra token
a : Optional[Any] = 0 # simply set to zero
a : Optional[Any] = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , lowerCAmelCase__ )
return back.reshape(lowerCAmelCase__ )
def __a ( self , lowerCAmelCase__ ) -> int:
# reshape z -> (batch, height, width, channel) and flatten
a : Tuple = z.permute(0 , 2 , 3 , 1 ).contiguous()
a : Optional[int] = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
a : Tuple = torch.argmin(torch.cdist(lowerCAmelCase__ , self.embedding.weight ) , dim=1 )
a : Dict = self.embedding(lowerCAmelCase__ ).view(z.shape )
a : Dict = None
a : Tuple = None
# compute loss for embedding
if not self.legacy:
a : Optional[int] = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
a : Union[str, Any] = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
a : Optional[int] = z + (z_q - z).detach()
# reshape back to match original input shape
a : Dict = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
a : Optional[Any] = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
a : Optional[Any] = self.remap_to_used(lowerCAmelCase__ )
a : List[Any] = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
a : List[Any] = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> str:
# shape specifying (batch, height, width, channel)
if self.remap is not None:
a : List[str] = indices.reshape(shape[0] , -1 ) # add batch axis
a : Optional[Any] = self.unmap_to_all(lowerCAmelCase__ )
a : Any = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
a : str = self.embedding(lowerCAmelCase__ )
if shape is not None:
a : List[str] = z_q.view(lowerCAmelCase__ )
# reshape back to match original input shape
a : Optional[int] = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class __UpperCamelCase ( a__ ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=False ) -> int:
a : Any = parameters
a : Tuple = torch.chunk(lowerCAmelCase__ , 2 , dim=1 )
a : Optional[Any] = torch.clamp(self.logvar , -30.0 , 20.0 )
a : int = deterministic
a : Optional[Any] = torch.exp(0.5 * self.logvar )
a : Optional[int] = torch.exp(self.logvar )
if self.deterministic:
a : int = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def __a ( self , lowerCAmelCase__ = None ) -> torch.FloatTensor:
# make sure sample is on the same device as the parameters and has same dtype
a : List[str] = randn_tensor(
self.mean.shape , generator=lowerCAmelCase__ , device=self.parameters.device , dtype=self.parameters.dtype )
a : List[str] = self.mean + self.std * sample
return x
def __a ( self , lowerCAmelCase__=None ) -> Tuple:
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__=[1, 2, 3] ) -> int:
if self.deterministic:
return torch.Tensor([0.0] )
a : List[str] = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=lowerCAmelCase__ )
def __a ( self ) -> Any:
return self.mean
| 718
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_albert import AlbertTokenizer
else:
a : Tuple = None
a : Any = logging.get_logger(__name__)
a : List[Any] = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
a : str = {
'''vocab_file''': {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/spiece.model''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/spiece.model''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/spiece.model''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model''',
},
'''tokenizer_file''': {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json''',
},
}
a : str = {
'''albert-base-v1''': 512,
'''albert-large-v1''': 512,
'''albert-xlarge-v1''': 512,
'''albert-xxlarge-v1''': 512,
'''albert-base-v2''': 512,
'''albert-large-v2''': 512,
'''albert-xlarge-v2''': 512,
'''albert-xxlarge-v2''': 512,
}
a : Union[str, Any] = '''▁'''
class __UpperCamelCase ( a__ ):
lowerCamelCase : Union[str, Any] =VOCAB_FILES_NAMES
lowerCamelCase : Dict =PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : List[Any] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : List[Any] =AlbertTokenizer
def __init__( self , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=False , lowerCAmelCase__="[CLS]" , lowerCAmelCase__="[SEP]" , lowerCAmelCase__="<unk>" , lowerCAmelCase__="[SEP]" , lowerCAmelCase__="<pad>" , lowerCAmelCase__="[CLS]" , lowerCAmelCase__="[MASK]" , **lowerCAmelCase__ , ) -> Union[str, Any]:
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
a : Optional[int] = (
AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ , normalized=lowerCAmelCase__ )
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
else mask_token
)
super().__init__(
lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ , remove_space=lowerCAmelCase__ , keep_accents=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , **lowerCAmelCase__ , )
a : Dict = do_lower_case
a : Any = remove_space
a : Optional[Any] = keep_accents
a : List[str] = vocab_file
a : Optional[Any] = False if not self.vocab_file else True
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]:
a : Optional[Any] = [self.sep_token_id]
a : int = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]:
a : Optional[Any] = [self.sep_token_id]
a : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(lowerCAmelCase__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
a : Dict = os.path.join(
lowerCAmelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase__ ):
copyfile(self.vocab_file , lowerCAmelCase__ )
return (out_vocab_file,)
| 31
| 0
|
from __future__ import annotations
from collections.abc import Generator
def _SCREAMING_SNAKE_CASE ( ) ->Generator[int, None, None]:
'''simple docstring'''
a : dict[int, int] = {}
a : Dict = 2
while True:
a : Optional[Any] = factor_map.pop(_lowercase , _lowercase )
if factor:
a : Tuple = factor + prime
while x in factor_map:
x += factor
a : Optional[Any] = factor
else:
a : int = prime
yield prime
prime += 1
def _SCREAMING_SNAKE_CASE ( _lowercase : float = 1E1_0 ) ->int:
'''simple docstring'''
a : Tuple = sieve()
a : str = 1
while True:
a : Optional[int] = next(_lowercase )
if (2 * prime * n) > limit:
return n
# Ignore the next prime as the reminder will be 2.
next(_lowercase )
n += 2
if __name__ == "__main__":
print(solution())
| 719
|
"""simple docstring"""
import itertools
import random
import unittest
import numpy as np
from transformers import BatchFeature, SpeechTaFeatureExtractor
from transformers.testing_utils import require_torch
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
a : List[Any] = random.Random()
def _SCREAMING_SNAKE_CASE ( _lowercase : List[str] , _lowercase : int=1.0 , _lowercase : Optional[int]=None , _lowercase : Union[str, Any]=None ) ->Optional[Any]:
'''simple docstring'''
if rng is None:
a : Tuple = global_rng
a : Tuple = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
class __UpperCamelCase ( unittest.TestCase ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=7 , lowerCAmelCase__=400 , lowerCAmelCase__=2000 , lowerCAmelCase__=1 , lowerCAmelCase__=0.0 , lowerCAmelCase__=1_6000 , lowerCAmelCase__=True , lowerCAmelCase__=80 , lowerCAmelCase__=16 , lowerCAmelCase__=64 , lowerCAmelCase__="hann_window" , lowerCAmelCase__=80 , lowerCAmelCase__=7600 , lowerCAmelCase__=1E-10 , lowerCAmelCase__=True , ) -> Optional[Any]:
a : int = parent
a : Tuple = batch_size
a : Dict = min_seq_length
a : Any = max_seq_length
a : Optional[int] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
a : Union[str, Any] = feature_size
a : Tuple = padding_value
a : str = sampling_rate
a : Dict = do_normalize
a : str = num_mel_bins
a : List[str] = hop_length
a : str = win_length
a : Optional[Any] = win_function
a : List[str] = fmin
a : Any = fmax
a : Optional[int] = mel_floor
a : Tuple = return_attention_mask
def __a ( self ) -> Optional[Any]:
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"do_normalize": self.do_normalize,
"num_mel_bins": self.num_mel_bins,
"hop_length": self.hop_length,
"win_length": self.win_length,
"win_function": self.win_function,
"fmin": self.fmin,
"fmax": self.fmax,
"mel_floor": self.mel_floor,
"return_attention_mask": self.return_attention_mask,
}
def __a ( self , lowerCAmelCase__=False , lowerCAmelCase__=False ) -> Tuple:
def _flatten(lowerCAmelCase__ ):
return list(itertools.chain(*lowerCAmelCase__ ) )
if equal_length:
a : Union[str, Any] = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
a : str = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
a : Any = [np.asarray(lowerCAmelCase__ ) for x in speech_inputs]
return speech_inputs
def __a ( self , lowerCAmelCase__=False , lowerCAmelCase__=False ) -> Dict:
if equal_length:
a : str = [floats_list((self.max_seq_length, self.num_mel_bins) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
a : Any = [
floats_list((x, self.num_mel_bins) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
a : Optional[int] = [np.asarray(lowerCAmelCase__ ) for x in speech_inputs]
return speech_inputs
@require_torch
class __UpperCamelCase ( a__ , unittest.TestCase ):
lowerCamelCase : Tuple =SpeechTaFeatureExtractor
def __a ( self ) -> Union[str, Any]:
a : Tuple = SpeechTaFeatureExtractionTester(self )
def __a ( self , lowerCAmelCase__ ) -> Union[str, Any]:
self.assertTrue(np.all(np.mean(lowerCAmelCase__ , axis=0 ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(lowerCAmelCase__ , axis=0 ) - 1 ) < 1E-3 ) )
def __a ( self ) -> Union[str, Any]:
# Tests that all call wrap to encode_plus and batch_encode_plus
a : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
a : Any = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
a : Any = [np.asarray(lowerCAmelCase__ ) for speech_input in speech_inputs]
# Test not batched input
a : Optional[int] = feat_extract(speech_inputs[0] , return_tensors="np" ).input_values
a : Optional[Any] = feat_extract(np_speech_inputs[0] , return_tensors="np" ).input_values
self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3 ) )
# Test batched
a : int = feat_extract(lowerCAmelCase__ , return_tensors="np" ).input_values
a : int = feat_extract(lowerCAmelCase__ , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3 ) )
def __a ( self ) -> Optional[Any]:
a : List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
a : Dict = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
a : int = ["longest", "max_length", "do_not_pad"]
a : Tuple = [None, 1600, None]
for max_length, padding in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
a : Dict = feat_extract(lowerCAmelCase__ , padding=lowerCAmelCase__ , max_length=lowerCAmelCase__ , return_tensors="np" )
a : List[Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self.assertTrue(input_values[0][800:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self.assertTrue(input_values[0][1000:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def __a ( self ) -> str:
a : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
a : List[str] = range(800 , 1400 , 200 )
a : List[str] = [floats_list((1, x) )[0] for x in lengths]
a : Any = ["longest", "max_length", "do_not_pad"]
a : Any = [None, 1600, None]
for max_length, padding in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
a : List[Any] = feat_extract(lowerCAmelCase__ , max_length=lowerCAmelCase__ , padding=lowerCAmelCase__ )
a : Dict = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def __a ( self ) -> Dict:
a : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
a : int = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
a : Union[str, Any] = feat_extract(
lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=1000 , padding="max_length" , return_tensors="np" )
a : List[Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def __a ( self ) -> Dict:
a : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
a : Tuple = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
a : List[Any] = feat_extract(
lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=1000 , padding="longest" , return_tensors="np" )
a : Union[str, Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1000) )
a : List[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
a : int = feat_extract(
lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=2000 , padding="longest" , return_tensors="np" )
a : Dict = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1200) )
def __a ( self ) -> List[str]:
a : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
a : Any = np.random.rand(100 ).astype(np.floataa )
a : Optional[Any] = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
a : str = feature_extractor.pad([{"input_values": inputs}] , return_tensors="np" )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
a : List[str] = feature_extractor.pad([{"input_values": inputs}] , return_tensors="pt" )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def __a ( self ) -> Tuple:
# Tests that all call wrap to encode_plus and batch_encode_plus
a : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
a : Optional[Any] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
a : Tuple = [np.asarray(lowerCAmelCase__ ) for speech_input in speech_inputs]
# Test feature size
a : Union[str, Any] = feature_extractor(audio_target=lowerCAmelCase__ , padding=lowerCAmelCase__ , return_tensors="np" ).input_values
self.assertTrue(input_values.ndim == 3 )
self.assertTrue(input_values.shape[-1] == feature_extractor.num_mel_bins )
# Test not batched input
a : Dict = feature_extractor(speech_inputs[0] , return_tensors="np" ).input_values
a : List[Any] = feature_extractor(np_speech_inputs[0] , return_tensors="np" ).input_values
self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3 ) )
# Test batched
a : Optional[int] = feature_extractor(lowerCAmelCase__ , return_tensors="np" ).input_values
a : Any = feature_extractor(lowerCAmelCase__ , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
a : Optional[Any] = [floats_list((1, x) )[0] for x in (800, 800, 800)]
a : List[Any] = np.asarray(lowerCAmelCase__ )
a : str = feature_extractor(lowerCAmelCase__ , return_tensors="np" ).input_values
a : str = feature_extractor(lowerCAmelCase__ , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3 ) )
def __a ( self ) -> str:
a : Optional[Any] = self.feat_extract_tester.prepare_inputs_for_target()
a : Any = self.feature_extraction_class(**self.feat_extract_dict )
a : Union[str, Any] = feat_extract.model_input_names[0]
a : List[str] = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(lowerCAmelCase__ ) == len(lowerCAmelCase__ ) for x, y in zip(lowerCAmelCase__ , processed_features[input_name] ) ) )
a : Tuple = self.feat_extract_tester.prepare_inputs_for_target(equal_length=lowerCAmelCase__ )
a : List[Any] = BatchFeature({input_name: speech_inputs} , tensor_type="np" )
a : Tuple = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
a : Dict = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def __a ( self ) -> Tuple:
a : Tuple = self.feat_extract_tester.prepare_inputs_for_target(equal_length=lowerCAmelCase__ )
a : Optional[Any] = self.feature_extraction_class(**self.feat_extract_dict )
a : Optional[int] = feat_extract.model_input_names[0]
a : List[Any] = BatchFeature({input_name: speech_inputs} , tensor_type="pt" )
a : Tuple = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
a : List[str] = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def __a ( self ) -> Optional[Any]:
a : Dict = self.feature_extraction_class(**self.feat_extract_dict )
a : Optional[int] = self.feat_extract_tester.prepare_inputs_for_target()
a : Optional[Any] = feat_extract.model_input_names[0]
a : List[str] = BatchFeature({input_name: speech_inputs} )
a : Tuple = feat_extract.num_mel_bins # hack!
a : List[Any] = feat_extract.pad(lowerCAmelCase__ , padding="longest" , return_tensors="np" )[input_name]
a : Any = feat_extract.pad(lowerCAmelCase__ , padding="longest" , return_tensors="pt" )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1E-2 )
def __a ( self ) -> Union[str, Any]:
a : Any = self.feat_extract_dict
a : Optional[Any] = True
a : Union[str, Any] = self.feature_extraction_class(**lowerCAmelCase__ )
a : Any = self.feat_extract_tester.prepare_inputs_for_target()
a : Dict = [len(lowerCAmelCase__ ) for x in speech_inputs]
a : int = feat_extract.model_input_names[0]
a : List[Any] = BatchFeature({input_name: speech_inputs} )
a : Union[str, Any] = feat_extract.num_mel_bins # hack!
a : Dict = feat_extract.pad(lowerCAmelCase__ , padding="longest" , return_tensors="np" )
self.assertIn("attention_mask" , lowerCAmelCase__ )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , lowerCAmelCase__ )
def __a ( self ) -> Union[str, Any]:
a : Tuple = self.feat_extract_dict
a : str = True
a : Optional[Any] = self.feature_extraction_class(**lowerCAmelCase__ )
a : List[Any] = self.feat_extract_tester.prepare_inputs_for_target()
a : Dict = [len(lowerCAmelCase__ ) for x in speech_inputs]
a : Optional[Any] = feat_extract.model_input_names[0]
a : str = BatchFeature({input_name: speech_inputs} )
a : Optional[Any] = min(lowerCAmelCase__ )
a : List[Any] = feat_extract.num_mel_bins # hack!
a : Any = feat_extract.pad(
lowerCAmelCase__ , padding="max_length" , max_length=lowerCAmelCase__ , truncation=lowerCAmelCase__ , return_tensors="np" )
self.assertIn("attention_mask" , lowerCAmelCase__ )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
def __a ( self , lowerCAmelCase__ ) -> Optional[int]:
from datasets import load_dataset
a : Tuple = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
a : Optional[Any] = ds.sort("id" ).select(range(lowerCAmelCase__ ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def __a ( self ) -> Union[str, Any]:
# fmt: off
a : List[Any] = torch.tensor(
[2.3_804E-03, 2.0_752E-03, 1.9_836E-03, 2.1_057E-03, 1.6_174E-03,
3.0_518E-04, 9.1_553E-05, 3.3_569E-04, 9.7_656E-04, 1.8_311E-03,
2.0_142E-03, 2.1_057E-03, 1.7_395E-03, 4.5_776E-04, -3.9_673E-04,
4.5_776E-04, 1.0_071E-03, 9.1_553E-05, 4.8_828E-04, 1.1_597E-03,
7.3_242E-04, 9.4_604E-04, 1.8_005E-03, 1.8_311E-03, 8.8_501E-04,
4.2_725E-04, 4.8_828E-04, 7.3_242E-04, 1.0_986E-03, 2.1_057E-03] )
# fmt: on
a : List[str] = self._load_datasamples(1 )
a : Union[str, Any] = SpeechTaFeatureExtractor()
a : str = feature_extractor(lowerCAmelCase__ , return_tensors="pt" ).input_values
self.assertEquals(input_values.shape , (1, 9_3680) )
self.assertTrue(torch.allclose(input_values[0, :30] , lowerCAmelCase__ , atol=1E-6 ) )
def __a ( self ) -> Union[str, Any]:
# fmt: off
a : Tuple = torch.tensor(
[-2.6_870, -3.0_104, -3.1_356, -3.5_352, -3.0_044, -3.0_353, -3.4_719, -3.6_777,
-3.1_520, -2.9_435, -2.6_553, -2.8_795, -2.9_944, -2.5_921, -3.0_279, -3.0_386,
-3.0_864, -3.1_291, -3.2_353, -2.7_444, -2.6_831, -2.7_287, -3.1_761, -3.1_571,
-3.2_726, -3.0_582, -3.1_007, -3.4_533, -3.4_695, -3.0_998] )
# fmt: on
a : Dict = self._load_datasamples(1 )
a : Tuple = SpeechTaFeatureExtractor()
a : Optional[int] = feature_extractor(audio_target=lowerCAmelCase__ , return_tensors="pt" ).input_values
self.assertEquals(input_values.shape , (1, 366, 80) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , lowerCAmelCase__ , atol=1E-4 ) )
| 31
| 0
|
def _SCREAMING_SNAKE_CASE ( _lowercase : int = 200 ) ->int:
'''simple docstring'''
a : Dict = [1, 2, 5, 10, 20, 50, 100, 200]
a : Optional[Any] = [0] * (pence + 1)
a : List[Any] = 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(_lowercase , pence + 1 , 1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(200) == 73682
| 720
|
"""simple docstring"""
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def _SCREAMING_SNAKE_CASE ( _lowercase : List[str] ) ->int:
'''simple docstring'''
a : int = {}
a : Union[str, Any] = tokenizer(example["content"] , truncation=_lowercase )["input_ids"]
a : Any = len(example["content"] ) / len(output["input_ids"] )
return output
a : int = HfArgumentParser(PretokenizationArguments)
a : Optional[int] = parser.parse_args()
if args.num_workers is None:
a : Tuple = multiprocessing.cpu_count()
a : Optional[int] = AutoTokenizer.from_pretrained(args.tokenizer_dir)
a : Dict = time.time()
a : Tuple = load_dataset(args.dataset_name, split='''train''')
print(F'''Dataset loaded in {time.time()-t_start:.2f}s''')
a : Dict = time.time()
a : Tuple = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
'''repo_name''',
'''path''',
'''copies''',
'''size''',
'''content''',
'''license''',
'''hash''',
'''line_mean''',
'''line_max''',
'''alpha_frac''',
'''autogenerated''',
],
)
print(F'''Dataset tokenized in {time.time()-t_start:.2f}s''')
a : Tuple = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(F'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
| 31
| 0
|
"""simple docstring"""
from __future__ import annotations
from typing import TypedDict
class __UpperCamelCase ( a__ ):
lowerCamelCase : str
lowerCamelCase : int
def _SCREAMING_SNAKE_CASE ( _lowercase : str ) ->list[str]:
'''simple docstring'''
if not isinstance(_lowercase , _lowercase ):
raise TypeError("The parameter s type must be str." )
return [s[i:] + s[:i] for i in range(len(_lowercase ) )]
def _SCREAMING_SNAKE_CASE ( _lowercase : str ) ->BWTTransformDict:
'''simple docstring'''
if not isinstance(_lowercase , _lowercase ):
raise TypeError("The parameter s type must be str." )
if not s:
raise ValueError("The parameter s must not be empty." )
a : str = all_rotations(_lowercase )
rotations.sort() # sort the list of rotations in alphabetically order
# make a string composed of the last char of each rotation
a : BWTTransformDict = {
"bwt_string": "".join([word[-1] for word in rotations] ),
"idx_original_string": rotations.index(_lowercase ),
}
return response
def _SCREAMING_SNAKE_CASE ( _lowercase : str , _lowercase : int ) ->str:
'''simple docstring'''
if not isinstance(_lowercase , _lowercase ):
raise TypeError("The parameter bwt_string type must be str." )
if not bwt_string:
raise ValueError("The parameter bwt_string must not be empty." )
try:
a : Tuple = int(_lowercase )
except ValueError:
raise TypeError(
"The parameter idx_original_string type must be int or passive"
" of cast to int." )
if idx_original_string < 0:
raise ValueError("The parameter idx_original_string must not be lower than 0." )
if idx_original_string >= len(_lowercase ):
raise ValueError(
"The parameter idx_original_string must be lower than" " len(bwt_string)." )
a : str = [""] * len(_lowercase )
for _ in range(len(_lowercase ) ):
for i in range(len(_lowercase ) ):
a : int = bwt_string[i] + ordered_rotations[i]
ordered_rotations.sort()
return ordered_rotations[idx_original_string]
if __name__ == "__main__":
_A : Optional[int] = '''Provide a string that I will generate its BWT transform: '''
_A : Optional[int] = input(entry_msg).strip()
_A : List[Any] = bwt_transform(s)
print(
F'''Burrows Wheeler transform for string \'{s}\' results '''
F'''in \'{result["bwt_string"]}\''''
)
_A : Any = reverse_bwt(result['''bwt_string'''], result['''idx_original_string'''])
print(
F'''Reversing Burrows Wheeler transform for entry \'{result["bwt_string"]}\' '''
F'''we get original string \'{original_string}\''''
)
| 721
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
a : List[Any] = TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
a : Union[str, Any] = TaTokenizerFast
a : Union[str, Any] = {'''configuration_mt5''': ['''MT5Config''', '''MT5OnnxConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : str = [
'''MT5EncoderModel''',
'''MT5ForConditionalGeneration''',
'''MT5ForQuestionAnswering''',
'''MT5Model''',
'''MT5PreTrainedModel''',
'''MT5Stack''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[str] = ['''TFMT5EncoderModel''', '''TFMT5ForConditionalGeneration''', '''TFMT5Model''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Optional[Any] = ['''FlaxMT5EncoderModel''', '''FlaxMT5ForConditionalGeneration''', '''FlaxMT5Model''']
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
a : List[Any] = _LazyModule(
__name__,
globals()['''__file__'''],
_import_structure,
extra_objects={'''MT5Tokenizer''': MTaTokenizer, '''MT5TokenizerFast''': MTaTokenizerFast},
module_spec=__spec__,
)
| 31
| 0
|
"""simple docstring"""
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class __UpperCamelCase ( unittest.TestCase ):
def __a ( self ) -> Optional[Any]:
a : Optional[int] = [
"safety_checker/pytorch_model.bin",
"safety_checker/model.safetensors",
"vae/diffusion_pytorch_model.bin",
"vae/diffusion_pytorch_model.safetensors",
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
self.assertTrue(is_safetensors_compatible(lowerCAmelCase__ ) )
def __a ( self ) -> Optional[Any]:
a : str = [
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
self.assertTrue(is_safetensors_compatible(lowerCAmelCase__ ) )
def __a ( self ) -> Dict:
a : List[str] = [
"safety_checker/pytorch_model.bin",
"safety_checker/model.safetensors",
"vae/diffusion_pytorch_model.bin",
"vae/diffusion_pytorch_model.safetensors",
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
"unet/diffusion_pytorch_model.bin",
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(lowerCAmelCase__ ) )
def __a ( self ) -> List[Any]:
a : Optional[Any] = [
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
]
self.assertTrue(is_safetensors_compatible(lowerCAmelCase__ ) )
def __a ( self ) -> Tuple:
a : Tuple = [
"safety_checker/pytorch_model.bin",
"safety_checker/model.safetensors",
"vae/diffusion_pytorch_model.bin",
"vae/diffusion_pytorch_model.safetensors",
"text_encoder/pytorch_model.bin",
# Removed: 'text_encoder/model.safetensors',
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
self.assertFalse(is_safetensors_compatible(lowerCAmelCase__ ) )
def __a ( self ) -> Dict:
a : Dict = [
"safety_checker/pytorch_model.fp16.bin",
"safety_checker/model.fp16.safetensors",
"vae/diffusion_pytorch_model.fp16.bin",
"vae/diffusion_pytorch_model.fp16.safetensors",
"text_encoder/pytorch_model.fp16.bin",
"text_encoder/model.fp16.safetensors",
"unet/diffusion_pytorch_model.fp16.bin",
"unet/diffusion_pytorch_model.fp16.safetensors",
]
a : Dict = "fp16"
self.assertTrue(is_safetensors_compatible(lowerCAmelCase__ , variant=lowerCAmelCase__ ) )
def __a ( self ) -> List[str]:
a : List[Any] = [
"unet/diffusion_pytorch_model.fp16.bin",
"unet/diffusion_pytorch_model.fp16.safetensors",
]
a : Any = "fp16"
self.assertTrue(is_safetensors_compatible(lowerCAmelCase__ , variant=lowerCAmelCase__ ) )
def __a ( self ) -> int:
# pass variant but use the non-variant filenames
a : int = [
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
a : Tuple = "fp16"
self.assertTrue(is_safetensors_compatible(lowerCAmelCase__ , variant=lowerCAmelCase__ ) )
def __a ( self ) -> str:
a : str = [
"safety_checker/pytorch_model.fp16.bin",
"safety_checker/model.fp16.safetensors",
"vae/diffusion_pytorch_model.fp16.bin",
"vae/diffusion_pytorch_model.fp16.safetensors",
"text_encoder/pytorch_model.fp16.bin",
"text_encoder/model.fp16.safetensors",
"unet/diffusion_pytorch_model.fp16.bin",
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
a : Any = "fp16"
self.assertFalse(is_safetensors_compatible(lowerCAmelCase__ , variant=lowerCAmelCase__ ) )
def __a ( self ) -> str:
a : Union[str, Any] = [
"text_encoder/pytorch_model.fp16.bin",
"text_encoder/model.fp16.safetensors",
]
a : str = "fp16"
self.assertTrue(is_safetensors_compatible(lowerCAmelCase__ , variant=lowerCAmelCase__ ) )
def __a ( self ) -> List[str]:
# pass variant but use the non-variant filenames
a : Optional[int] = [
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
]
a : str = "fp16"
self.assertTrue(is_safetensors_compatible(lowerCAmelCase__ , variant=lowerCAmelCase__ ) )
def __a ( self ) -> Optional[Any]:
a : Any = [
"safety_checker/pytorch_model.fp16.bin",
"safety_checker/model.fp16.safetensors",
"vae/diffusion_pytorch_model.fp16.bin",
"vae/diffusion_pytorch_model.fp16.safetensors",
"text_encoder/pytorch_model.fp16.bin",
# 'text_encoder/model.fp16.safetensors',
"unet/diffusion_pytorch_model.fp16.bin",
"unet/diffusion_pytorch_model.fp16.safetensors",
]
a : Optional[int] = "fp16"
self.assertFalse(is_safetensors_compatible(lowerCAmelCase__ , variant=lowerCAmelCase__ ) )
| 700
|
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( _lowercase : int = 10 , _lowercase : int = 1000 , _lowercase : bool = True ) ->int:
'''simple docstring'''
assert (
isinstance(_lowercase , _lowercase )
and isinstance(_lowercase , _lowercase )
and isinstance(_lowercase , _lowercase )
), "Invalid type of value(s) specified to function!"
if min_val > max_val:
raise ValueError("Invalid value for min_val or max_val (min_value < max_value)" )
return min_val if option else max_val
def _SCREAMING_SNAKE_CASE ( _lowercase : int , _lowercase : int ) ->int:
'''simple docstring'''
return int((number_a + number_a) / 2 )
def _SCREAMING_SNAKE_CASE ( _lowercase : int , _lowercase : int , _lowercase : int ) ->None:
'''simple docstring'''
assert (
isinstance(_lowercase , _lowercase ) and isinstance(_lowercase , _lowercase ) and isinstance(_lowercase , _lowercase )
), 'argument values must be type of "int"'
if lower > higher:
raise ValueError("argument value for lower and higher must be(lower > higher)" )
if not lower < to_guess < higher:
raise ValueError(
"guess value must be within the range of lower and higher value" )
def answer(_lowercase : int ) -> str:
if number > to_guess:
return "high"
elif number < to_guess:
return "low"
else:
return "same"
print("started..." )
a : Optional[Any] = lower
a : List[Any] = higher
a : Tuple = []
while True:
a : List[Any] = get_avg(_lowercase , _lowercase )
last_numbers.append(_lowercase )
if answer(_lowercase ) == "low":
a : Optional[int] = number
elif answer(_lowercase ) == "high":
a : Tuple = number
else:
break
print(F"""guess the number : {last_numbers[-1]}""" )
print(F"""details : {last_numbers!s}""" )
def _SCREAMING_SNAKE_CASE ( ) ->None:
'''simple docstring'''
a : Tuple = int(input("Enter lower value : " ).strip() )
a : Dict = int(input("Enter high value : " ).strip() )
a : Optional[int] = int(input("Enter value to guess : " ).strip() )
guess_the_number(_lowercase , _lowercase , _lowercase )
if __name__ == "__main__":
main()
| 31
| 0
|
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( _lowercase : int ) ->int:
'''simple docstring'''
if not isinstance(_lowercase , _lowercase ):
raise TypeError("Input value must be an 'int' type" )
a : List[Any] = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 701
|
"""simple docstring"""
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
a : Any = logging.get_logger(__name__)
a : Tuple = {
'''tensor(bool)''': np.bool_,
'''tensor(int8)''': np.inta,
'''tensor(uint8)''': np.uinta,
'''tensor(int16)''': np.intaa,
'''tensor(uint16)''': np.uintaa,
'''tensor(int32)''': np.intaa,
'''tensor(uint32)''': np.uintaa,
'''tensor(int64)''': np.intaa,
'''tensor(uint64)''': np.uintaa,
'''tensor(float16)''': np.floataa,
'''tensor(float)''': np.floataa,
'''tensor(double)''': np.floataa,
}
class __UpperCamelCase :
def __init__( self , lowerCAmelCase__=None , **lowerCAmelCase__ ) -> str:
logger.info("`diffusers.OnnxRuntimeModel` is experimental and might change in the future." )
a : Optional[int] = model
a : int = kwargs.get("model_save_dir" , lowerCAmelCase__ )
a : Tuple = kwargs.get("latest_model_name" , lowerCAmelCase__ )
def __call__( self , **lowerCAmelCase__ ) -> Dict:
a : List[str] = {k: np.array(lowerCAmelCase__ ) for k, v in kwargs.items()}
return self.model.run(lowerCAmelCase__ , lowerCAmelCase__ )
@staticmethod
def __a ( lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None ) -> Union[str, Any]:
if provider is None:
logger.info("No onnxruntime provider specified, using CPUExecutionProvider" )
a : List[str] = "CPUExecutionProvider"
return ort.InferenceSession(lowerCAmelCase__ , providers=[provider] , sess_options=lowerCAmelCase__ )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None , **lowerCAmelCase__ ) -> int:
a : List[str] = file_name if file_name is not None else ONNX_WEIGHTS_NAME
a : Optional[int] = self.model_save_dir.joinpath(self.latest_model_name )
a : List[str] = Path(lowerCAmelCase__ ).joinpath(lowerCAmelCase__ )
try:
shutil.copyfile(lowerCAmelCase__ , lowerCAmelCase__ )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
a : str = self.model_save_dir.joinpath(lowerCAmelCase__ )
if src_path.exists():
a : Any = Path(lowerCAmelCase__ ).joinpath(lowerCAmelCase__ )
try:
shutil.copyfile(lowerCAmelCase__ , lowerCAmelCase__ )
except shutil.SameFileError:
pass
def __a ( self , lowerCAmelCase__ , **lowerCAmelCase__ , ) -> str:
if os.path.isfile(lowerCAmelCase__ ):
logger.error(f"""Provided path ({save_directory}) should be a directory, not a file""" )
return
os.makedirs(lowerCAmelCase__ , exist_ok=lowerCAmelCase__ )
# saving model weights/files
self._save_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
@classmethod
def __a ( cls , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = False , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , **lowerCAmelCase__ , ) -> Optional[int]:
a : Tuple = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(lowerCAmelCase__ ):
a : Tuple = OnnxRuntimeModel.load_model(
os.path.join(lowerCAmelCase__ , lowerCAmelCase__ ) , provider=lowerCAmelCase__ , sess_options=lowerCAmelCase__ )
a : Tuple = Path(lowerCAmelCase__ )
# load model from hub
else:
# download model
a : Optional[Any] = hf_hub_download(
repo_id=lowerCAmelCase__ , filename=lowerCAmelCase__ , use_auth_token=lowerCAmelCase__ , revision=lowerCAmelCase__ , cache_dir=lowerCAmelCase__ , force_download=lowerCAmelCase__ , )
a : Optional[int] = Path(lowerCAmelCase__ ).parent
a : List[Any] = Path(lowerCAmelCase__ ).name
a : int = OnnxRuntimeModel.load_model(lowerCAmelCase__ , provider=lowerCAmelCase__ , sess_options=lowerCAmelCase__ )
return cls(model=lowerCAmelCase__ , **lowerCAmelCase__ )
@classmethod
def __a ( cls , lowerCAmelCase__ , lowerCAmelCase__ = True , lowerCAmelCase__ = None , lowerCAmelCase__ = None , **lowerCAmelCase__ , ) -> List[str]:
a : Any = None
if len(str(lowerCAmelCase__ ).split("@" ) ) == 2:
a, a : Tuple = model_id.split("@" )
return cls._from_pretrained(
model_id=lowerCAmelCase__ , revision=lowerCAmelCase__ , cache_dir=lowerCAmelCase__ , force_download=lowerCAmelCase__ , use_auth_token=lowerCAmelCase__ , **lowerCAmelCase__ , )
| 31
| 0
|
"""simple docstring"""
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
'The `image_to_image.py` script is outdated. Please use directly `from diffusers import'
' StableDiffusionImg2ImgPipeline` instead.'
)
| 702
|
"""simple docstring"""
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def _SCREAMING_SNAKE_CASE ( _lowercase : Optional[Any] , _lowercase : List[str] , _lowercase : Optional[Any] ) ->str:
'''simple docstring'''
a : Union[str, Any] = 1.5
a : List[str] = int(factor * num_class_images )
a : Optional[Any] = ClipClient(
url="https://knn.laion.ai/knn-service" , indice_name="laion_400m" , num_images=_lowercase , aesthetic_weight=0.1 )
os.makedirs(F"""{class_data_dir}/images""" , exist_ok=_lowercase )
if len(list(Path(F"""{class_data_dir}/images""" ).iterdir() ) ) >= num_class_images:
return
while True:
a : List[Any] = client.query(text=_lowercase )
if len(_lowercase ) >= factor * num_class_images or num_images > 1E4:
break
else:
a : Optional[int] = int(factor * num_images )
a : str = ClipClient(
url="https://knn.laion.ai/knn-service" , indice_name="laion_400m" , num_images=_lowercase , aesthetic_weight=0.1 , )
a : Optional[int] = 0
a : str = 0
a : Any = tqdm(desc="downloading real regularization images" , total=_lowercase )
with open(F"""{class_data_dir}/caption.txt""" , "w" ) as fa, open(F"""{class_data_dir}/urls.txt""" , "w" ) as fa, open(
F"""{class_data_dir}/images.txt""" , "w" ) as fa:
while total < num_class_images:
a : Optional[Any] = class_images[count]
count += 1
try:
a : str = requests.get(images["url"] )
if img.status_code == 200:
a : int = Image.open(BytesIO(img.content ) )
with open(F"""{class_data_dir}/images/{total}.jpg""" , "wb" ) as f:
f.write(img.content )
fa.write(images["caption"] + "\n" )
fa.write(images["url"] + "\n" )
fa.write(F"""{class_data_dir}/images/{total}.jpg""" + "\n" )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def _SCREAMING_SNAKE_CASE ( ) ->Dict:
'''simple docstring'''
a : Optional[int] = argparse.ArgumentParser("" , add_help=_lowercase )
parser.add_argument("--class_prompt" , help="text prompt to retrieve images" , required=_lowercase , type=_lowercase )
parser.add_argument("--class_data_dir" , help="path to save images" , required=_lowercase , type=_lowercase )
parser.add_argument("--num_class_images" , help="number of images to download" , default=200 , type=_lowercase )
return parser.parse_args()
if __name__ == "__main__":
a : List[Any] = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 31
| 0
|
"""simple docstring"""
import argparse
import glob
import logging
import os
import sys
import time
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Tuple
import numpy as np
import pytorch_lightning as pl
import torch
from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback
from torch import nn
from torch.utils.data import DataLoader
from transformers import MBartTokenizer, TaForConditionalGeneration
from transformers.models.bart.modeling_bart import shift_tokens_right
from utils import (
ROUGE_KEYS,
LegacySeqaSeqDataset,
SeqaSeqDataset,
assert_all_frozen,
calculate_bleu,
calculate_rouge,
check_output_dir,
flatten_list,
freeze_embeds,
freeze_params,
get_git_info,
label_smoothed_nll_loss,
lmap,
pickle_save,
save_git_info,
save_json,
use_task_specific_params,
)
# need the parent dir module
sys.path.insert(2, str(Path(__file__).resolve().parents[1]))
from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa
a : List[Any] = logging.getLogger(__name__)
class __UpperCamelCase ( a__ ):
lowerCamelCase : Optional[int] ="""summarization"""
lowerCamelCase : int =["""loss"""]
lowerCamelCase : Dict =ROUGE_KEYS
lowerCamelCase : Optional[Any] ="""rouge2"""
def __init__( self , lowerCAmelCase__ , **lowerCAmelCase__ ) -> Tuple:
if hparams.sortish_sampler and hparams.gpus > 1:
a : Union[str, Any] = False
elif hparams.max_tokens_per_batch is not None:
if hparams.gpus > 1:
raise NotImplementedError("Dynamic Batch size does not work for multi-gpu training" )
if hparams.sortish_sampler:
raise ValueError("--sortish_sampler and --max_tokens_per_batch may not be used simultaneously" )
super().__init__(lowerCAmelCase__ , num_labels=lowerCAmelCase__ , mode=self.mode , **lowerCAmelCase__ )
use_task_specific_params(self.model , "summarization" )
save_git_info(self.hparams.output_dir )
a : Optional[Any] = Path(self.output_dir ) / "metrics.json"
a : int = Path(self.output_dir ) / "hparams.pkl"
pickle_save(self.hparams , self.hparams_save_path )
a : List[Any] = 0
a : Union[str, Any] = defaultdict(lowerCAmelCase__ )
a : Any = self.config.model_type
a : str = self.config.tgt_vocab_size if self.model_type == "fsmt" else self.config.vocab_size
a : dict = {
"data_dir": self.hparams.data_dir,
"max_source_length": self.hparams.max_source_length,
"prefix": self.model.config.prefix or "",
}
a : Tuple = {
"train": self.hparams.n_train,
"val": self.hparams.n_val,
"test": self.hparams.n_test,
}
a : Optional[Any] = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()}
a : str = {
"train": self.hparams.max_target_length,
"val": self.hparams.val_max_target_length,
"test": self.hparams.test_max_target_length,
}
assert self.target_lens["train"] <= self.target_lens["val"], f"""target_lens: {self.target_lens}"""
assert self.target_lens["train"] <= self.target_lens["test"], f"""target_lens: {self.target_lens}"""
if self.hparams.freeze_embeds:
freeze_embeds(self.model )
if self.hparams.freeze_encoder:
freeze_params(self.model.get_encoder() )
assert_all_frozen(self.model.get_encoder() )
a : Optional[Any] = get_git_info()["repo_sha"]
a : List[Any] = hparams.num_workers
a : Union[str, Any] = None # default to config
if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer , lowerCAmelCase__ ):
a : Optional[int] = self.tokenizer.lang_code_to_id[hparams.tgt_lang]
a : Optional[Any] = self.decoder_start_token_id
a : Union[str, Any] = (
SeqaSeqDataset if hasattr(self.tokenizer , "prepare_seq2seq_batch" ) else LegacySeqaSeqDataset
)
a : Optional[Any] = False
a : int = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams
if self.hparams.eval_max_gen_length is not None:
a : Any = self.hparams.eval_max_gen_length
else:
a : Optional[int] = self.model.config.max_length
a : Optional[Any] = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric
def __a ( self , lowerCAmelCase__ ) -> Dict[str, List[str]]:
a : str = {
k: self.tokenizer.batch_decode(v.tolist() ) if "mask" not in k else v.shape for k, v in batch.items()
}
save_json(lowerCAmelCase__ , Path(self.output_dir ) / "text_batch.json" )
save_json({k: v.tolist() for k, v in batch.items()} , Path(self.output_dir ) / "tok_batch.json" )
a : Any = True
return readable_batch
def __a ( self , lowerCAmelCase__ , **lowerCAmelCase__ ) -> int:
return self.model(lowerCAmelCase__ , **lowerCAmelCase__ )
def __a ( self , lowerCAmelCase__ ) -> int:
a : str = self.tokenizer.batch_decode(
lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ , clean_up_tokenization_spaces=lowerCAmelCase__ )
return lmap(str.strip , lowerCAmelCase__ )
def __a ( self , lowerCAmelCase__ ) -> Tuple:
a : List[Any] = self.tokenizer.pad_token_id
a : Optional[int] = batch["input_ids"], batch["attention_mask"]
a : Tuple = batch["labels"]
if isinstance(self.model , lowerCAmelCase__ ):
a : Optional[int] = self.model._shift_right(lowerCAmelCase__ )
else:
a : Optional[int] = shift_tokens_right(lowerCAmelCase__ , lowerCAmelCase__ )
if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero
a : List[str] = decoder_input_ids
self.save_readable_batch(lowerCAmelCase__ )
a : Any = self(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , decoder_input_ids=lowerCAmelCase__ , use_cache=lowerCAmelCase__ )
a : Any = outputs["logits"]
if self.hparams.label_smoothing == 0:
# Same behavior as modeling_bart.py, besides ignoring pad_token_id
a : List[str] = nn.CrossEntropyLoss(ignore_index=lowerCAmelCase__ )
assert lm_logits.shape[-1] == self.vocab_size
a : Optional[int] = ce_loss_fct(lm_logits.view(-1 , lm_logits.shape[-1] ) , tgt_ids.view(-1 ) )
else:
a : Tuple = nn.functional.log_softmax(lowerCAmelCase__ , dim=-1 )
a : Any = label_smoothed_nll_loss(
lowerCAmelCase__ , lowerCAmelCase__ , self.hparams.label_smoothing , ignore_index=lowerCAmelCase__ )
return (loss,)
@property
def __a ( self ) -> int:
return self.tokenizer.pad_token_id
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> Dict:
a : Optional[int] = self._step(lowerCAmelCase__ )
a : Dict = dict(zip(self.loss_names , lowerCAmelCase__ ) )
# tokens per batch
a : List[Any] = batch["input_ids"].ne(self.pad ).sum() + batch["labels"].ne(self.pad ).sum()
a : List[Any] = batch["input_ids"].shape[0]
a : Any = batch["input_ids"].eq(self.pad ).sum()
a : str = batch["input_ids"].eq(self.pad ).float().mean()
# TODO(SS): make a wandb summary metric for this
return {"loss": loss_tensors[0], "log": logs}
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> Dict:
return self._generative_step(lowerCAmelCase__ )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__="val" ) -> Dict:
self.step_count += 1
a : List[str] = {k: torch.stack([x[k] for x in outputs] ).mean() for k in self.loss_names}
a : int = losses["loss"]
a : Any = {
k: np.array([x[k] for x in outputs] ).mean() for k in self.metric_names + ["gen_time", "gen_len"]
}
a : Optional[Any] = (
generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric]
)
a : torch.FloatTensor = torch.tensor(lowerCAmelCase__ ).type_as(lowerCAmelCase__ )
generative_metrics.update({k: v.item() for k, v in losses.items()} )
losses.update(lowerCAmelCase__ )
a : Optional[int] = {f"""{prefix}_avg_{k}""": x for k, x in losses.items()}
a : List[Any] = self.step_count
self.metrics[prefix].append(lowerCAmelCase__ ) # callback writes this to self.metrics_save_path
a : Optional[int] = flatten_list([x["preds"] for x in outputs] )
return {
"log": all_metrics,
"preds": preds,
f"""{prefix}_loss""": loss,
f"""{prefix}_{self.val_metric}""": metric_tensor,
}
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> Dict:
return calculate_rouge(lowerCAmelCase__ , lowerCAmelCase__ )
def __a ( self , lowerCAmelCase__ ) -> dict:
a : Any = time.time()
# parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens')
a : Optional[int] = self.model.generate(
batch["input_ids"] , attention_mask=batch["attention_mask"] , use_cache=lowerCAmelCase__ , decoder_start_token_id=self.decoder_start_token_id , num_beams=self.eval_beams , max_length=self.eval_max_length , )
a : str = (time.time() - ta) / batch["input_ids"].shape[0]
a : List[str] = self.ids_to_clean_text(lowerCAmelCase__ )
a : List[str] = self.ids_to_clean_text(batch["labels"] )
a : int = self._step(lowerCAmelCase__ )
a : int = dict(zip(self.loss_names , lowerCAmelCase__ ) )
a : Dict = self.calc_generative_metrics(lowerCAmelCase__ , lowerCAmelCase__ )
a : Optional[int] = np.mean(lmap(lowerCAmelCase__ , lowerCAmelCase__ ) )
base_metrics.update(gen_time=lowerCAmelCase__ , gen_len=lowerCAmelCase__ , preds=lowerCAmelCase__ , target=lowerCAmelCase__ , **lowerCAmelCase__ )
return base_metrics
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[Any]:
return self._generative_step(lowerCAmelCase__ )
def __a ( self , lowerCAmelCase__ ) -> Optional[Any]:
return self.validation_epoch_end(lowerCAmelCase__ , prefix="test" )
def __a ( self , lowerCAmelCase__ ) -> SeqaSeqDataset:
a : Union[str, Any] = self.n_obs[type_path]
a : List[str] = self.target_lens[type_path]
a : Any = self.dataset_class(
self.tokenizer , type_path=lowerCAmelCase__ , n_obs=lowerCAmelCase__ , max_target_length=lowerCAmelCase__ , **self.dataset_kwargs , )
return dataset
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = False ) -> DataLoader:
a : Tuple = self.get_dataset(lowerCAmelCase__ )
if self.hparams.sortish_sampler and type_path != "test" and type_path != "val":
a : int = dataset.make_sortish_sampler(lowerCAmelCase__ , distributed=self.hparams.gpus > 1 )
return DataLoader(
lowerCAmelCase__ , batch_size=lowerCAmelCase__ , collate_fn=dataset.collate_fn , shuffle=lowerCAmelCase__ , num_workers=self.num_workers , sampler=lowerCAmelCase__ , )
elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val":
a : Union[str, Any] = dataset.make_dynamic_sampler(
self.hparams.max_tokens_per_batch , distributed=self.hparams.gpus > 1 )
return DataLoader(
lowerCAmelCase__ , batch_sampler=lowerCAmelCase__ , collate_fn=dataset.collate_fn , num_workers=self.num_workers , )
else:
return DataLoader(
lowerCAmelCase__ , batch_size=lowerCAmelCase__ , collate_fn=dataset.collate_fn , shuffle=lowerCAmelCase__ , num_workers=self.num_workers , sampler=lowerCAmelCase__ , )
def __a ( self ) -> DataLoader:
a : List[str] = self.get_dataloader("train" , batch_size=self.hparams.train_batch_size , shuffle=lowerCAmelCase__ )
return dataloader
def __a ( self ) -> DataLoader:
return self.get_dataloader("val" , batch_size=self.hparams.eval_batch_size )
def __a ( self ) -> DataLoader:
return self.get_dataloader("test" , batch_size=self.hparams.eval_batch_size )
@staticmethod
def __a ( lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[int]:
BaseTransformer.add_model_specific_args(lowerCAmelCase__ , lowerCAmelCase__ )
add_generic_args(lowerCAmelCase__ , lowerCAmelCase__ )
parser.add_argument(
"--max_source_length" , default=1024 , type=lowerCAmelCase__ , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument(
"--max_target_length" , default=56 , type=lowerCAmelCase__ , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument(
"--val_max_target_length" , default=142 , type=lowerCAmelCase__ , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument(
"--test_max_target_length" , default=142 , type=lowerCAmelCase__ , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument("--freeze_encoder" , action="store_true" )
parser.add_argument("--freeze_embeds" , action="store_true" )
parser.add_argument("--sortish_sampler" , action="store_true" , default=lowerCAmelCase__ )
parser.add_argument("--overwrite_output_dir" , action="store_true" , default=lowerCAmelCase__ )
parser.add_argument("--max_tokens_per_batch" , type=lowerCAmelCase__ , default=lowerCAmelCase__ )
parser.add_argument("--logger_name" , type=lowerCAmelCase__ , choices=["default", "wandb", "wandb_shared"] , default="default" )
parser.add_argument("--n_train" , type=lowerCAmelCase__ , default=-1 , required=lowerCAmelCase__ , help="# examples. -1 means use all." )
parser.add_argument("--n_val" , type=lowerCAmelCase__ , default=500 , required=lowerCAmelCase__ , help="# examples. -1 means use all." )
parser.add_argument("--n_test" , type=lowerCAmelCase__ , default=-1 , required=lowerCAmelCase__ , help="# examples. -1 means use all." )
parser.add_argument(
"--task" , type=lowerCAmelCase__ , default="summarization" , required=lowerCAmelCase__ , help="# examples. -1 means use all." )
parser.add_argument("--label_smoothing" , type=lowerCAmelCase__ , default=0.0 , required=lowerCAmelCase__ )
parser.add_argument("--src_lang" , type=lowerCAmelCase__ , default="" , required=lowerCAmelCase__ )
parser.add_argument("--tgt_lang" , type=lowerCAmelCase__ , default="" , required=lowerCAmelCase__ )
parser.add_argument("--eval_beams" , type=lowerCAmelCase__ , default=lowerCAmelCase__ , required=lowerCAmelCase__ )
parser.add_argument(
"--val_metric" , type=lowerCAmelCase__ , default=lowerCAmelCase__ , required=lowerCAmelCase__ , choices=["bleu", "rouge2", "loss", None] )
parser.add_argument("--eval_max_gen_length" , type=lowerCAmelCase__ , default=lowerCAmelCase__ , help="never generate more than n tokens" )
parser.add_argument("--save_top_k" , type=lowerCAmelCase__ , default=1 , required=lowerCAmelCase__ , help="How many checkpoints to save" )
parser.add_argument(
"--early_stopping_patience" , type=lowerCAmelCase__ , default=-1 , required=lowerCAmelCase__ , help=(
"-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So"
" val_check_interval will effect it."
) , )
return parser
class __UpperCamelCase ( a__ ):
lowerCamelCase : Tuple ="""translation"""
lowerCamelCase : Optional[int] =["""loss"""]
lowerCamelCase : Tuple =["""bleu"""]
lowerCamelCase : Tuple ="""bleu"""
def __init__( self , lowerCAmelCase__ , **lowerCAmelCase__ ) -> Optional[Any]:
super().__init__(lowerCAmelCase__ , **lowerCAmelCase__ )
a : Optional[Any] = hparams.src_lang
a : Union[str, Any] = hparams.tgt_lang
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> dict:
return calculate_bleu(lowerCAmelCase__ , lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( _lowercase : List[Any] , _lowercase : Tuple=None ) ->SummarizationModule:
'''simple docstring'''
Path(args.output_dir ).mkdir(exist_ok=_lowercase )
check_output_dir(_lowercase , expected_items=3 )
if model is None:
if "summarization" in args.task:
a : SummarizationModule = SummarizationModule(_lowercase )
else:
a : SummarizationModule = TranslationModule(_lowercase )
a : int = Path(args.data_dir ).name
if (
args.logger_name == "default"
or args.fast_dev_run
or str(args.output_dir ).startswith("/tmp" )
or str(args.output_dir ).startswith("/var" )
):
a : Tuple = True # don't pollute wandb logs unnecessarily
elif args.logger_name == "wandb":
from pytorch_lightning.loggers import WandbLogger
a : Optional[int] = os.environ.get("WANDB_PROJECT" , _lowercase )
a : Dict = WandbLogger(name=model.output_dir.name , project=_lowercase )
elif args.logger_name == "wandb_shared":
from pytorch_lightning.loggers import WandbLogger
a : str = WandbLogger(name=model.output_dir.name , project=F"""hf_{dataset}""" )
if args.early_stopping_patience >= 0:
a : Tuple = get_early_stopping_callback(model.val_metric , args.early_stopping_patience )
else:
a : Optional[int] = False
a : Optional[Any] = args.val_metric == "loss"
a : pl.Trainer = generic_train(
_lowercase , _lowercase , logging_callback=SeqaSeqLoggingCallback() , checkpoint_callback=get_checkpoint_callback(
args.output_dir , model.val_metric , args.save_top_k , _lowercase ) , early_stopping_callback=_lowercase , logger=_lowercase , )
pickle_save(model.hparams , model.output_dir / "hparams.pkl" )
if not args.do_predict:
return model
a : Optional[Any] = ""
a : Dict = sorted(glob.glob(os.path.join(args.output_dir , "*.ckpt" ) , recursive=_lowercase ) )
if checkpoints:
a : Optional[int] = checkpoints[-1]
a : int = checkpoints[-1]
trainer.logger.log_hyperparams(model.hparams )
# test() without a model tests using the best checkpoint automatically
trainer.test()
return model
if __name__ == "__main__":
a : List[str] = argparse.ArgumentParser()
a : Optional[int] = pl.Trainer.add_argparse_args(parser)
a : int = SummarizationModule.add_model_specific_args(parser, os.getcwd())
a : Union[str, Any] = parser.parse_args()
main(args)
| 703
|
"""simple docstring"""
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
a : Optional[int] = '''Run commands across TPU VMs for initial setup before running `accelerate launch`.'''
def _SCREAMING_SNAKE_CASE ( _lowercase : Any=None ) ->Optional[Any]:
'''simple docstring'''
if subparsers is not None:
a : int = subparsers.add_parser("tpu-config" , description=_description )
else:
a : List[Any] = argparse.ArgumentParser("Accelerate tpu-config command" , description=_description )
# Core arguments
a : Dict = parser.add_argument_group(
"Config Arguments" , "Arguments that can be configured through `accelerate config`." )
config_args.add_argument(
"--config_file" , type=_lowercase , default=_lowercase , help="Path to the config file to use for accelerate." , )
config_args.add_argument(
"--tpu_name" , default=_lowercase , help="The name of the TPU to use. If not specified, will use the TPU specified in the config file." , )
config_args.add_argument(
"--tpu_zone" , default=_lowercase , help="The zone of the TPU to use. If not specified, will use the zone specified in the config file." , )
a : Any = parser.add_argument_group("TPU Arguments" , "Arguments for options ran inside the TPU." )
pod_args.add_argument(
"--use_alpha" , action="store_true" , help="Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`." , )
pod_args.add_argument(
"--command_file" , default=_lowercase , help="The path to the file containing the commands to run on the pod on startup." , )
pod_args.add_argument(
"--command" , action="append" , nargs="+" , help="A command to run on the pod. Can be passed multiple times." , )
pod_args.add_argument(
"--install_accelerate" , action="store_true" , help="Whether to install accelerate on the pod. Defaults to False." , )
pod_args.add_argument(
"--accelerate_version" , default="latest" , help="The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify 'dev' to install from GitHub." , )
pod_args.add_argument(
"--debug" , action="store_true" , help="If set, will print the command that would be run instead of running it." )
if subparsers is not None:
parser.set_defaults(func=_lowercase )
return parser
def _SCREAMING_SNAKE_CASE ( _lowercase : Any ) ->Tuple:
'''simple docstring'''
a : Union[str, Any] = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(_lowercase ):
a : Optional[Any] = load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
a : int = defaults.command_file
if not args.command and defaults.commands is not None:
a : Union[str, Any] = defaults.commands
if not args.tpu_name:
a : int = defaults.tpu_name
if not args.tpu_zone:
a : Union[str, Any] = defaults.tpu_zone
if args.accelerate_version == "dev":
a : int = "git+https://github.com/huggingface/accelerate.git"
elif args.accelerate_version == "latest":
a : Optional[Any] = "accelerate -U"
elif isinstance(parse(args.accelerate_version ) , _lowercase ):
a : Optional[Any] = F"""accelerate=={args.accelerate_version}"""
if not args.command_file and not args.command:
raise ValueError("You must specify either a command file or a command to run on the pod." )
if args.command_file:
with open(args.command_file , "r" ) as f:
a : int = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , _lowercase ):
a : Union[str, Any] = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
a : Tuple = ["cd /usr/share"]
if args.install_accelerate:
new_cmd += [F"""pip install {args.accelerate_version}"""]
new_cmd += args.command
a : List[Any] = "; ".join(_lowercase )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
a : str = ["gcloud"]
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(F"""Running {' '.join(_lowercase )}""" )
return
subprocess.run(_lowercase )
print("Successfully setup pod." )
def _SCREAMING_SNAKE_CASE ( ) ->Tuple:
'''simple docstring'''
a : List[Any] = tpu_command_parser()
a : Optional[int] = parser.parse_args()
tpu_command_launcher(_lowercase )
| 31
| 0
|
"""simple docstring"""
from __future__ import annotations
from collections.abc import Iterator
class __UpperCamelCase :
def __init__( self , lowerCAmelCase__ ) -> None:
a : Dict = value
a : Node | None = None
a : Node | None = None
class __UpperCamelCase :
def __init__( self , lowerCAmelCase__ ) -> None:
a : int = tree
def __a ( self , lowerCAmelCase__ ) -> int:
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left ) + self.depth_first_search(node.right )
)
def __iter__( self ) -> Iterator[int]:
yield self.depth_first_search(self.tree )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 704
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
a : Tuple = None
a : int = logging.get_logger(__name__)
a : int = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
a : Optional[int] = {
'''vocab_file''': {
'''facebook/nllb-200-distilled-600M''': (
'''https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model'''
),
},
'''tokenizer_file''': {
'''facebook/nllb-200-distilled-600M''': (
'''https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json'''
),
},
}
a : int = {
'''facebook/nllb-large-en-ro''': 1024,
'''facebook/nllb-200-distilled-600M''': 1024,
}
# fmt: off
a : List[Any] = ['''ace_Arab''', '''ace_Latn''', '''acm_Arab''', '''acq_Arab''', '''aeb_Arab''', '''afr_Latn''', '''ajp_Arab''', '''aka_Latn''', '''amh_Ethi''', '''apc_Arab''', '''arb_Arab''', '''ars_Arab''', '''ary_Arab''', '''arz_Arab''', '''asm_Beng''', '''ast_Latn''', '''awa_Deva''', '''ayr_Latn''', '''azb_Arab''', '''azj_Latn''', '''bak_Cyrl''', '''bam_Latn''', '''ban_Latn''', '''bel_Cyrl''', '''bem_Latn''', '''ben_Beng''', '''bho_Deva''', '''bjn_Arab''', '''bjn_Latn''', '''bod_Tibt''', '''bos_Latn''', '''bug_Latn''', '''bul_Cyrl''', '''cat_Latn''', '''ceb_Latn''', '''ces_Latn''', '''cjk_Latn''', '''ckb_Arab''', '''crh_Latn''', '''cym_Latn''', '''dan_Latn''', '''deu_Latn''', '''dik_Latn''', '''dyu_Latn''', '''dzo_Tibt''', '''ell_Grek''', '''eng_Latn''', '''epo_Latn''', '''est_Latn''', '''eus_Latn''', '''ewe_Latn''', '''fao_Latn''', '''pes_Arab''', '''fij_Latn''', '''fin_Latn''', '''fon_Latn''', '''fra_Latn''', '''fur_Latn''', '''fuv_Latn''', '''gla_Latn''', '''gle_Latn''', '''glg_Latn''', '''grn_Latn''', '''guj_Gujr''', '''hat_Latn''', '''hau_Latn''', '''heb_Hebr''', '''hin_Deva''', '''hne_Deva''', '''hrv_Latn''', '''hun_Latn''', '''hye_Armn''', '''ibo_Latn''', '''ilo_Latn''', '''ind_Latn''', '''isl_Latn''', '''ita_Latn''', '''jav_Latn''', '''jpn_Jpan''', '''kab_Latn''', '''kac_Latn''', '''kam_Latn''', '''kan_Knda''', '''kas_Arab''', '''kas_Deva''', '''kat_Geor''', '''knc_Arab''', '''knc_Latn''', '''kaz_Cyrl''', '''kbp_Latn''', '''kea_Latn''', '''khm_Khmr''', '''kik_Latn''', '''kin_Latn''', '''kir_Cyrl''', '''kmb_Latn''', '''kon_Latn''', '''kor_Hang''', '''kmr_Latn''', '''lao_Laoo''', '''lvs_Latn''', '''lij_Latn''', '''lim_Latn''', '''lin_Latn''', '''lit_Latn''', '''lmo_Latn''', '''ltg_Latn''', '''ltz_Latn''', '''lua_Latn''', '''lug_Latn''', '''luo_Latn''', '''lus_Latn''', '''mag_Deva''', '''mai_Deva''', '''mal_Mlym''', '''mar_Deva''', '''min_Latn''', '''mkd_Cyrl''', '''plt_Latn''', '''mlt_Latn''', '''mni_Beng''', '''khk_Cyrl''', '''mos_Latn''', '''mri_Latn''', '''zsm_Latn''', '''mya_Mymr''', '''nld_Latn''', '''nno_Latn''', '''nob_Latn''', '''npi_Deva''', '''nso_Latn''', '''nus_Latn''', '''nya_Latn''', '''oci_Latn''', '''gaz_Latn''', '''ory_Orya''', '''pag_Latn''', '''pan_Guru''', '''pap_Latn''', '''pol_Latn''', '''por_Latn''', '''prs_Arab''', '''pbt_Arab''', '''quy_Latn''', '''ron_Latn''', '''run_Latn''', '''rus_Cyrl''', '''sag_Latn''', '''san_Deva''', '''sat_Beng''', '''scn_Latn''', '''shn_Mymr''', '''sin_Sinh''', '''slk_Latn''', '''slv_Latn''', '''smo_Latn''', '''sna_Latn''', '''snd_Arab''', '''som_Latn''', '''sot_Latn''', '''spa_Latn''', '''als_Latn''', '''srd_Latn''', '''srp_Cyrl''', '''ssw_Latn''', '''sun_Latn''', '''swe_Latn''', '''swh_Latn''', '''szl_Latn''', '''tam_Taml''', '''tat_Cyrl''', '''tel_Telu''', '''tgk_Cyrl''', '''tgl_Latn''', '''tha_Thai''', '''tir_Ethi''', '''taq_Latn''', '''taq_Tfng''', '''tpi_Latn''', '''tsn_Latn''', '''tso_Latn''', '''tuk_Latn''', '''tum_Latn''', '''tur_Latn''', '''twi_Latn''', '''tzm_Tfng''', '''uig_Arab''', '''ukr_Cyrl''', '''umb_Latn''', '''urd_Arab''', '''uzn_Latn''', '''vec_Latn''', '''vie_Latn''', '''war_Latn''', '''wol_Latn''', '''xho_Latn''', '''ydd_Hebr''', '''yor_Latn''', '''yue_Hant''', '''zho_Hans''', '''zho_Hant''', '''zul_Latn''']
class __UpperCamelCase ( a__ ):
lowerCamelCase : Optional[Any] =VOCAB_FILES_NAMES
lowerCamelCase : str =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : Dict =PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : List[Any] =["""input_ids""", """attention_mask"""]
lowerCamelCase : Union[str, Any] =NllbTokenizer
lowerCamelCase : List[int] =[]
lowerCamelCase : List[int] =[]
def __init__( self , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__="<s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__="<s>" , lowerCAmelCase__="<unk>" , lowerCAmelCase__="<pad>" , lowerCAmelCase__="<mask>" , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=False , **lowerCAmelCase__ , ) -> Optional[Any]:
# Mask token behave like a normal word, i.e. include the space before it
a : Dict = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else mask_token
a : Optional[Any] = legacy_behaviour
super().__init__(
vocab_file=lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , src_lang=lowerCAmelCase__ , tgt_lang=lowerCAmelCase__ , additional_special_tokens=lowerCAmelCase__ , legacy_behaviour=lowerCAmelCase__ , **lowerCAmelCase__ , )
a : int = vocab_file
a : Any = False if not self.vocab_file else True
a : List[str] = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({"additional_special_tokens": _additional_special_tokens} )
a : str = {
lang_code: self.convert_tokens_to_ids(lowerCAmelCase__ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
a : List[Any] = src_lang if src_lang is not None else "eng_Latn"
a : str = self.convert_tokens_to_ids(self._src_lang )
a : Any = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def __a ( self ) -> str:
return self._src_lang
@src_lang.setter
def __a ( self , lowerCAmelCase__ ) -> None:
a : List[str] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]:
a : str = [self.sep_token_id]
a : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ) -> Any:
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" )
a : Dict = src_lang
a : int = self(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ )
a : Dict = self.convert_tokens_to_ids(lowerCAmelCase__ )
a : Any = tgt_lang_id
return inputs
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = "eng_Latn" , lowerCAmelCase__ = None , lowerCAmelCase__ = "fra_Latn" , **lowerCAmelCase__ , ) -> BatchEncoding:
a : Optional[int] = src_lang
a : int = tgt_lang
return super().prepare_seqaseq_batch(lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ )
def __a ( self ) -> Tuple:
return self.set_src_lang_special_tokens(self.src_lang )
def __a ( self ) -> str:
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def __a ( self , lowerCAmelCase__ ) -> None:
a : int = self.convert_tokens_to_ids(lowerCAmelCase__ )
if self.legacy_behaviour:
a : Tuple = []
a : List[str] = [self.eos_token_id, self.cur_lang_code]
else:
a : int = [self.cur_lang_code]
a : int = [self.eos_token_id]
a : Union[str, Any] = self.convert_ids_to_tokens(self.prefix_tokens )
a : Any = self.convert_ids_to_tokens(self.suffix_tokens )
a : Any = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def __a ( self , lowerCAmelCase__ ) -> None:
a : str = self.convert_tokens_to_ids(lowerCAmelCase__ )
if self.legacy_behaviour:
a : Optional[Any] = []
a : int = [self.eos_token_id, self.cur_lang_code]
else:
a : List[Any] = [self.cur_lang_code]
a : List[Any] = [self.eos_token_id]
a : int = self.convert_ids_to_tokens(self.prefix_tokens )
a : int = self.convert_ids_to_tokens(self.suffix_tokens )
a : Any = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(lowerCAmelCase__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory.""" )
return
a : Any = os.path.join(
lowerCAmelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase__ ):
copyfile(self.vocab_file , lowerCAmelCase__ )
return (out_vocab_file,)
| 31
| 0
|
"""simple docstring"""
import math
import flax.linen as nn
import jax.numpy as jnp
def _SCREAMING_SNAKE_CASE ( _lowercase : jnp.ndarray , _lowercase : int , _lowercase : float = 1 , _lowercase : float = 1 , _lowercase : float = 1.0E4 , _lowercase : bool = False , _lowercase : float = 1.0 , ) ->jnp.ndarray:
'''simple docstring'''
assert timesteps.ndim == 1, "Timesteps should be a 1d-array"
assert embedding_dim % 2 == 0, F"""Embedding dimension {embedding_dim} should be even"""
a : Union[str, Any] = float(embedding_dim // 2 )
a : Optional[Any] = math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift)
a : List[str] = min_timescale * jnp.exp(jnp.arange(_lowercase , dtype=jnp.floataa ) * -log_timescale_increment )
a : int = jnp.expand_dims(_lowercase , 1 ) * jnp.expand_dims(_lowercase , 0 )
# scale embeddings
a : int = scale * emb
if flip_sin_to_cos:
a : Tuple = jnp.concatenate([jnp.cos(_lowercase ), jnp.sin(_lowercase )] , axis=1 )
else:
a : Dict = jnp.concatenate([jnp.sin(_lowercase ), jnp.cos(_lowercase )] , axis=1 )
a : Dict = jnp.reshape(_lowercase , [jnp.shape(_lowercase )[0], embedding_dim] )
return signal
class __UpperCamelCase ( nn.Module ):
lowerCamelCase : int =32
lowerCamelCase : jnp.dtype =jnp.floataa
@nn.compact
def __call__( self , lowerCAmelCase__ ) -> Any:
a : Optional[int] = nn.Dense(self.time_embed_dim , dtype=self.dtype , name="linear_1" )(lowerCAmelCase__ )
a : int = nn.silu(lowerCAmelCase__ )
a : Optional[int] = nn.Dense(self.time_embed_dim , dtype=self.dtype , name="linear_2" )(lowerCAmelCase__ )
return temb
class __UpperCamelCase ( nn.Module ):
lowerCamelCase : int =32
lowerCamelCase : bool =False
lowerCamelCase : float =1
@nn.compact
def __call__( self , lowerCAmelCase__ ) -> Union[str, Any]:
return get_sinusoidal_embeddings(
lowerCAmelCase__ , embedding_dim=self.dim , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.freq_shift )
| 705
|
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
class __UpperCamelCase ( a__ ):
lowerCamelCase : torch.FloatTensor
lowerCamelCase : torch.FloatTensor
lowerCamelCase : Optional[torch.FloatTensor] =None
class __UpperCamelCase ( a__ , a__ ):
lowerCamelCase : Tuple =2
@register_to_config
def __init__( self , lowerCAmelCase__ = 0.02 , lowerCAmelCase__ = 100 , lowerCAmelCase__ = 1.007 , lowerCAmelCase__ = 80 , lowerCAmelCase__ = 0.05 , lowerCAmelCase__ = 50 , ) -> Union[str, Any]:
# standard deviation of the initial noise distribution
a : Tuple = sigma_max
# setable values
a : int = None
a : np.IntTensor = None
a : torch.FloatTensor = None # sigma(t_i)
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> torch.FloatTensor:
return sample
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[str]:
a : List[Any] = num_inference_steps
a : List[str] = np.arange(0 , self.num_inference_steps )[::-1].copy()
a : int = torch.from_numpy(lowerCAmelCase__ ).to(lowerCAmelCase__ )
a : List[str] = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in self.timesteps
]
a : Any = torch.tensor(lowerCAmelCase__ , dtype=torch.floataa , device=lowerCAmelCase__ )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Tuple[torch.FloatTensor, float]:
if self.config.s_min <= sigma <= self.config.s_max:
a : str = min(self.config.s_churn / self.num_inference_steps , 2**0.5 - 1 )
else:
a : Dict = 0
# sample eps ~ N(0, S_noise^2 * I)
a : Union[str, Any] = self.config.s_noise * randn_tensor(sample.shape , generator=lowerCAmelCase__ ).to(sample.device )
a : Any = sigma + gamma * sigma
a : Tuple = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = True , ) -> Union[KarrasVeOutput, Tuple]:
a : Union[str, Any] = sample_hat + sigma_hat * model_output
a : Tuple = (sample_hat - pred_original_sample) / sigma_hat
a : List[Any] = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=lowerCAmelCase__ , derivative=lowerCAmelCase__ , pred_original_sample=lowerCAmelCase__ )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = True , ) -> Union[KarrasVeOutput, Tuple]:
a : Optional[int] = sample_prev + sigma_prev * model_output
a : str = (sample_prev - pred_original_sample) / sigma_prev
a : Dict = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=lowerCAmelCase__ , derivative=lowerCAmelCase__ , pred_original_sample=lowerCAmelCase__ )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> str:
raise NotImplementedError()
| 31
| 0
|
"""simple docstring"""
import argparse
import json
import os
import torch
from transformers.file_utils import has_file
from diffusers import UNetaDConditionModel, UNetaDModel
a : Optional[Any] = False
a : List[Any] = True
a : List[Any] = False
if __name__ == "__main__":
a : Tuple = argparse.ArgumentParser()
parser.add_argument(
'''--repo_path''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the architecture.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
a : Tuple = parser.parse_args()
a : List[str] = {
'''image_size''': '''sample_size''',
'''num_res_blocks''': '''layers_per_block''',
'''block_channels''': '''block_out_channels''',
'''down_blocks''': '''down_block_types''',
'''up_blocks''': '''up_block_types''',
'''downscale_freq_shift''': '''freq_shift''',
'''resnet_num_groups''': '''norm_num_groups''',
'''resnet_act_fn''': '''act_fn''',
'''resnet_eps''': '''norm_eps''',
'''num_head_channels''': '''attention_head_dim''',
}
a : Optional[int] = {
'''time_steps''': '''time_proj''',
'''mid''': '''mid_block''',
'''downsample_blocks''': '''down_blocks''',
'''upsample_blocks''': '''up_blocks''',
}
a : Union[str, Any] = '''''' if has_file(args.repo_path, '''config.json''') else '''unet'''
with open(os.path.join(args.repo_path, subfolder, '''config.json'''), '''r''', encoding='''utf-8''') as reader:
a : int = reader.read()
a : List[Any] = json.loads(text)
if do_only_config:
for key in config_parameters_to_change.keys():
config.pop(key, None)
if has_file(args.repo_path, '''config.json'''):
a : List[Any] = UNetaDModel(**config)
else:
a : Dict = UNetaDConditionModel if '''ldm-text2im-large-256''' in args.repo_path else UNetaDModel
a : List[str] = class_name(**config)
if do_only_config:
model.save_config(os.path.join(args.repo_path, subfolder))
a : str = dict(model.config)
if do_only_renaming:
for key, value in config_parameters_to_change.items():
if key in config:
a : str = config[key]
del config[key]
a : int = [k.replace('''UNetRes''', '''''') for k in config['''down_block_types''']]
a : Tuple = [k.replace('''UNetRes''', '''''') for k in config['''up_block_types''']]
if do_only_weights:
a : List[Any] = torch.load(os.path.join(args.repo_path, subfolder, '''diffusion_pytorch_model.bin'''))
a : int = {}
for param_key, param_value in state_dict.items():
if param_key.endswith('''.op.bias''') or param_key.endswith('''.op.weight'''):
continue
a : List[Any] = False
for key, new_key in key_parameters_to_change.items():
if not has_changed and param_key.split('''.''')[0] == key:
a : Union[str, Any] = param_value
a : str = True
if not has_changed:
a : List[Any] = param_value
model.load_state_dict(new_state_dict)
model.save_pretrained(os.path.join(args.repo_path, subfolder))
| 706
|
"""simple docstring"""
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
a : Optional[Any] = datasets.utils.logging.get_logger(__name__)
a : Union[str, Any] = ['''names''', '''prefix''']
a : Any = ['''warn_bad_lines''', '''error_bad_lines''', '''mangle_dupe_cols''']
a : Any = ['''encoding_errors''', '''on_bad_lines''']
a : List[str] = ['''date_format''']
@dataclass
class __UpperCamelCase ( datasets.BuilderConfig ):
lowerCamelCase : str =","
lowerCamelCase : Optional[str] =None
lowerCamelCase : Optional[Union[int, List[int], str]] ="infer"
lowerCamelCase : Optional[List[str]] =None
lowerCamelCase : Optional[List[str]] =None
lowerCamelCase : Optional[Union[int, str, List[int], List[str]]] =None
lowerCamelCase : Optional[Union[List[int], List[str]]] =None
lowerCamelCase : Optional[str] =None
lowerCamelCase : bool =True
lowerCamelCase : Optional[Literal["c", "python", "pyarrow"]] =None
lowerCamelCase : Dict[Union[int, str], Callable[[Any], Any]] =None
lowerCamelCase : Optional[list] =None
lowerCamelCase : Optional[list] =None
lowerCamelCase : bool =False
lowerCamelCase : Optional[Union[int, List[int]]] =None
lowerCamelCase : Optional[int] =None
lowerCamelCase : Optional[Union[str, List[str]]] =None
lowerCamelCase : bool =True
lowerCamelCase : bool =True
lowerCamelCase : bool =False
lowerCamelCase : bool =True
lowerCamelCase : Optional[str] =None
lowerCamelCase : str ="."
lowerCamelCase : Optional[str] =None
lowerCamelCase : str ='"'
lowerCamelCase : int =0
lowerCamelCase : Optional[str] =None
lowerCamelCase : Optional[str] =None
lowerCamelCase : Optional[str] =None
lowerCamelCase : Optional[str] =None
lowerCamelCase : bool =True
lowerCamelCase : bool =True
lowerCamelCase : int =0
lowerCamelCase : bool =True
lowerCamelCase : bool =False
lowerCamelCase : Optional[str] =None
lowerCamelCase : int =1_0000
lowerCamelCase : Optional[datasets.Features] =None
lowerCamelCase : Optional[str] ="strict"
lowerCamelCase : Literal["error", "warn", "skip"] ="error"
lowerCamelCase : Optional[str] =None
def __a ( self ) -> Dict:
if self.delimiter is not None:
a : int = self.delimiter
if self.column_names is not None:
a : Any = self.column_names
@property
def __a ( self ) -> List[str]:
a : Dict = {
"sep": self.sep,
"header": self.header,
"names": self.names,
"index_col": self.index_col,
"usecols": self.usecols,
"prefix": self.prefix,
"mangle_dupe_cols": self.mangle_dupe_cols,
"engine": self.engine,
"converters": self.converters,
"true_values": self.true_values,
"false_values": self.false_values,
"skipinitialspace": self.skipinitialspace,
"skiprows": self.skiprows,
"nrows": self.nrows,
"na_values": self.na_values,
"keep_default_na": self.keep_default_na,
"na_filter": self.na_filter,
"verbose": self.verbose,
"skip_blank_lines": self.skip_blank_lines,
"thousands": self.thousands,
"decimal": self.decimal,
"lineterminator": self.lineterminator,
"quotechar": self.quotechar,
"quoting": self.quoting,
"escapechar": self.escapechar,
"comment": self.comment,
"encoding": self.encoding,
"dialect": self.dialect,
"error_bad_lines": self.error_bad_lines,
"warn_bad_lines": self.warn_bad_lines,
"skipfooter": self.skipfooter,
"doublequote": self.doublequote,
"memory_map": self.memory_map,
"float_precision": self.float_precision,
"chunksize": self.chunksize,
"encoding_errors": self.encoding_errors,
"on_bad_lines": self.on_bad_lines,
"date_format": self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , lowerCAmelCase__ ):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class __UpperCamelCase ( datasets.ArrowBasedBuilder ):
lowerCamelCase : Union[str, Any] =CsvConfig
def __a ( self ) -> Optional[Any]:
return datasets.DatasetInfo(features=self.config.features )
def __a ( self , lowerCAmelCase__ ) -> Optional[int]:
if not self.config.data_files:
raise ValueError(f"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
a : Optional[Any] = dl_manager.download_and_extract(self.config.data_files )
if isinstance(lowerCAmelCase__ , (str, list, tuple) ):
a : Tuple = data_files
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
a : Tuple = [files]
a : int = [dl_manager.iter_files(lowerCAmelCase__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )]
a : int = []
for split_name, files in data_files.items():
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
a : Any = [files]
a : List[str] = [dl_manager.iter_files(lowerCAmelCase__ ) for file in files]
splits.append(datasets.SplitGenerator(name=lowerCAmelCase__ , gen_kwargs={"files": files} ) )
return splits
def __a ( self , lowerCAmelCase__ ) -> pa.Table:
if self.config.features is not None:
a : Optional[Any] = self.config.features.arrow_schema
if all(not require_storage_cast(lowerCAmelCase__ ) for feature in self.config.features.values() ):
# cheaper cast
a : Dict = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=lowerCAmelCase__ )
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
a : Union[str, Any] = table_cast(lowerCAmelCase__ , lowerCAmelCase__ )
return pa_table
def __a ( self , lowerCAmelCase__ ) -> Any:
a : Tuple = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
a : Any = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(lowerCAmelCase__ ) else object
for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() )
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(lowerCAmelCase__ ) ):
a : Tuple = pd.read_csv(lowerCAmelCase__ , iterator=lowerCAmelCase__ , dtype=lowerCAmelCase__ , **self.config.pd_read_csv_kwargs )
try:
for batch_idx, df in enumerate(lowerCAmelCase__ ):
a : Any = pa.Table.from_pandas(lowerCAmelCase__ )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(lowerCAmelCase__ )
except ValueError as e:
logger.error(f"""Failed to read file '{file}' with error {type(lowerCAmelCase__ )}: {e}""" )
raise
| 31
| 0
|
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
a : Optional[int] = '''Run commands across TPU VMs for initial setup before running `accelerate launch`.'''
def _SCREAMING_SNAKE_CASE ( _lowercase : Any=None ) ->Optional[Any]:
'''simple docstring'''
if subparsers is not None:
a : int = subparsers.add_parser("tpu-config" , description=_description )
else:
a : List[Any] = argparse.ArgumentParser("Accelerate tpu-config command" , description=_description )
# Core arguments
a : Dict = parser.add_argument_group(
"Config Arguments" , "Arguments that can be configured through `accelerate config`." )
config_args.add_argument(
"--config_file" , type=_lowercase , default=_lowercase , help="Path to the config file to use for accelerate." , )
config_args.add_argument(
"--tpu_name" , default=_lowercase , help="The name of the TPU to use. If not specified, will use the TPU specified in the config file." , )
config_args.add_argument(
"--tpu_zone" , default=_lowercase , help="The zone of the TPU to use. If not specified, will use the zone specified in the config file." , )
a : Any = parser.add_argument_group("TPU Arguments" , "Arguments for options ran inside the TPU." )
pod_args.add_argument(
"--use_alpha" , action="store_true" , help="Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`." , )
pod_args.add_argument(
"--command_file" , default=_lowercase , help="The path to the file containing the commands to run on the pod on startup." , )
pod_args.add_argument(
"--command" , action="append" , nargs="+" , help="A command to run on the pod. Can be passed multiple times." , )
pod_args.add_argument(
"--install_accelerate" , action="store_true" , help="Whether to install accelerate on the pod. Defaults to False." , )
pod_args.add_argument(
"--accelerate_version" , default="latest" , help="The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify 'dev' to install from GitHub." , )
pod_args.add_argument(
"--debug" , action="store_true" , help="If set, will print the command that would be run instead of running it." )
if subparsers is not None:
parser.set_defaults(func=_lowercase )
return parser
def _SCREAMING_SNAKE_CASE ( _lowercase : Any ) ->Tuple:
'''simple docstring'''
a : Union[str, Any] = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(_lowercase ):
a : Optional[Any] = load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
a : int = defaults.command_file
if not args.command and defaults.commands is not None:
a : Union[str, Any] = defaults.commands
if not args.tpu_name:
a : int = defaults.tpu_name
if not args.tpu_zone:
a : Union[str, Any] = defaults.tpu_zone
if args.accelerate_version == "dev":
a : int = "git+https://github.com/huggingface/accelerate.git"
elif args.accelerate_version == "latest":
a : Optional[Any] = "accelerate -U"
elif isinstance(parse(args.accelerate_version ) , _lowercase ):
a : Optional[Any] = F"""accelerate=={args.accelerate_version}"""
if not args.command_file and not args.command:
raise ValueError("You must specify either a command file or a command to run on the pod." )
if args.command_file:
with open(args.command_file , "r" ) as f:
a : int = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , _lowercase ):
a : Union[str, Any] = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
a : Tuple = ["cd /usr/share"]
if args.install_accelerate:
new_cmd += [F"""pip install {args.accelerate_version}"""]
new_cmd += args.command
a : List[Any] = "; ".join(_lowercase )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
a : str = ["gcloud"]
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(F"""Running {' '.join(_lowercase )}""" )
return
subprocess.run(_lowercase )
print("Successfully setup pod." )
def _SCREAMING_SNAKE_CASE ( ) ->Tuple:
'''simple docstring'''
a : List[Any] = tpu_command_parser()
a : Optional[int] = parser.parse_args()
tpu_command_launcher(_lowercase )
| 707
|
"""simple docstring"""
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class __UpperCamelCase ( a__ , a__ , unittest.TestCase ):
lowerCamelCase : Dict =IFPipeline
lowerCamelCase : int =TEXT_TO_IMAGE_PARAMS - {"""width""", """height""", """latents"""}
lowerCamelCase : int =TEXT_TO_IMAGE_BATCH_PARAMS
lowerCamelCase : int =PipelineTesterMixin.required_optional_params - {"""latents"""}
def __a ( self ) -> List[str]:
return self._get_dummy_components()
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__=0 ) -> Dict:
if str(lowerCAmelCase__ ).startswith("mps" ):
a : Tuple = torch.manual_seed(lowerCAmelCase__ )
else:
a : int = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
a : Optional[Any] = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def __a ( self ) -> Union[str, Any]:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" )
def __a ( self ) -> Any:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def __a ( self ) -> Union[str, Any]:
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def __a ( self ) -> Optional[int]:
self._test_save_load_local()
def __a ( self ) -> Tuple:
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def __a ( self ) -> str:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@slow
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
def __a ( self ) -> Optional[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self ) -> Tuple:
# if
a : Tuple = IFPipeline.from_pretrained("DeepFloyd/IF-I-XL-v1.0" , variant="fp16" , torch_dtype=torch.floataa )
a : str = IFSuperResolutionPipeline.from_pretrained(
"DeepFloyd/IF-II-L-v1.0" , variant="fp16" , torch_dtype=torch.floataa , text_encoder=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to("cuda" )
a, a : List[str] = pipe_a.encode_prompt("anime turtle" , device="cuda" )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
a : Optional[int] = None
a : Optional[int] = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
a : Union[str, Any] = IFImgaImgPipeline(**pipe_a.components )
a : List[Any] = IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
a : Union[str, Any] = IFInpaintingPipeline(**pipe_a.components )
a : List[str] = IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Dict:
# pipeline 1
_start_torch_memory_measurement()
a : List[str] = torch.Generator(device="cpu" ).manual_seed(0 )
a : Dict = pipe_a(
prompt_embeds=lowerCAmelCase__ , negative_prompt_embeds=lowerCAmelCase__ , num_inference_steps=2 , generator=lowerCAmelCase__ , output_type="np" , )
a : List[str] = output.images[0]
assert image.shape == (64, 64, 3)
a : Dict = torch.cuda.max_memory_allocated()
assert mem_bytes < 13 * 10**9
a : Optional[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy" )
assert_mean_pixel_difference(lowerCAmelCase__ , lowerCAmelCase__ )
# pipeline 2
_start_torch_memory_measurement()
a : List[str] = torch.Generator(device="cpu" ).manual_seed(0 )
a : Optional[int] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(lowerCAmelCase__ )
a : Union[str, Any] = pipe_a(
prompt_embeds=lowerCAmelCase__ , negative_prompt_embeds=lowerCAmelCase__ , image=lowerCAmelCase__ , generator=lowerCAmelCase__ , num_inference_steps=2 , output_type="np" , )
a : List[str] = output.images[0]
assert image.shape == (256, 256, 3)
a : int = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
a : Union[str, Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy" )
assert_mean_pixel_difference(lowerCAmelCase__ , lowerCAmelCase__ )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> int:
# pipeline 1
_start_torch_memory_measurement()
a : int = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(lowerCAmelCase__ )
a : Tuple = torch.Generator(device="cpu" ).manual_seed(0 )
a : List[Any] = pipe_a(
prompt_embeds=lowerCAmelCase__ , negative_prompt_embeds=lowerCAmelCase__ , image=lowerCAmelCase__ , num_inference_steps=2 , generator=lowerCAmelCase__ , output_type="np" , )
a : Tuple = output.images[0]
assert image.shape == (64, 64, 3)
a : int = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
a : Optional[int] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy" )
assert_mean_pixel_difference(lowerCAmelCase__ , lowerCAmelCase__ )
# pipeline 2
_start_torch_memory_measurement()
a : Optional[int] = torch.Generator(device="cpu" ).manual_seed(0 )
a : List[Any] = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(lowerCAmelCase__ )
a : str = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(lowerCAmelCase__ )
a : Dict = pipe_a(
prompt_embeds=lowerCAmelCase__ , negative_prompt_embeds=lowerCAmelCase__ , image=lowerCAmelCase__ , original_image=lowerCAmelCase__ , generator=lowerCAmelCase__ , num_inference_steps=2 , output_type="np" , )
a : int = output.images[0]
assert image.shape == (256, 256, 3)
a : str = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
a : Any = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy" )
assert_mean_pixel_difference(lowerCAmelCase__ , lowerCAmelCase__ )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[int]:
# pipeline 1
_start_torch_memory_measurement()
a : Union[str, Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(lowerCAmelCase__ )
a : List[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(1 ) ).to(lowerCAmelCase__ )
a : Optional[int] = torch.Generator(device="cpu" ).manual_seed(0 )
a : List[str] = pipe_a(
prompt_embeds=lowerCAmelCase__ , negative_prompt_embeds=lowerCAmelCase__ , image=lowerCAmelCase__ , mask_image=lowerCAmelCase__ , num_inference_steps=2 , generator=lowerCAmelCase__ , output_type="np" , )
a : List[Any] = output.images[0]
assert image.shape == (64, 64, 3)
a : Tuple = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
a : Union[str, Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy" )
assert_mean_pixel_difference(lowerCAmelCase__ , lowerCAmelCase__ )
# pipeline 2
_start_torch_memory_measurement()
a : str = torch.Generator(device="cpu" ).manual_seed(0 )
a : Optional[int] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(lowerCAmelCase__ )
a : int = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(lowerCAmelCase__ )
a : Dict = floats_tensor((1, 3, 256, 256) , rng=random.Random(1 ) ).to(lowerCAmelCase__ )
a : Optional[int] = pipe_a(
prompt_embeds=lowerCAmelCase__ , negative_prompt_embeds=lowerCAmelCase__ , image=lowerCAmelCase__ , mask_image=lowerCAmelCase__ , original_image=lowerCAmelCase__ , generator=lowerCAmelCase__ , num_inference_steps=2 , output_type="np" , )
a : List[str] = output.images[0]
assert image.shape == (256, 256, 3)
a : Tuple = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
a : Optional[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy" )
assert_mean_pixel_difference(lowerCAmelCase__ , lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( ) ->List[str]:
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 31
| 0
|
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( _lowercase : list[list] ) ->list[list]:
'''simple docstring'''
a : Union[str, Any] = current_set.copy()
for row_index, row in enumerate(_lowercase ):
a : Optional[int] = row[0]
for column_index, column in enumerate(_lowercase ):
if magnitude == 0:
a : Optional[Any] = column
continue
a : str = column / magnitude
# Subtract to cancel term
a : Union[str, Any] = current_set[0]
a : Optional[Any] = [first_row]
a : List[Any] = current_set[1::]
for row in current_set:
a : Any = []
# If first term is 0, it is already in form we want, so we preserve it
if row[0] == 0:
final_set.append(_lowercase )
continue
for column_index in range(len(_lowercase ) ):
temp_row.append(first_row[column_index] - row[column_index] )
final_set.append(_lowercase )
# Create next recursion iteration set
if len(final_set[0] ) != 3:
a : Tuple = final_set[0]
a : Any = []
a : str = []
for row in final_set[1::]:
current_first_column.append(row[0] )
next_iteration.append(row[1::] )
a : List[str] = simplify(_lowercase )
for i in range(len(_lowercase ) ):
resultant[i].insert(0 , current_first_column[i] )
resultant.insert(0 , _lowercase )
a : Union[str, Any] = resultant
return final_set
def _SCREAMING_SNAKE_CASE ( _lowercase : list[list] ) ->list:
'''simple docstring'''
if len(_lowercase ) == 0:
raise IndexError("solve_simultaneous() requires n lists of length n+1" )
a : Optional[int] = len(_lowercase ) + 1
if any(len(_lowercase ) != _length for item in equations ):
raise IndexError("solve_simultaneous() requires n lists of length n+1" )
for row in equations:
if any(not isinstance(_lowercase , (int, float) ) for column in row ):
raise ValueError("solve_simultaneous() requires lists of integers" )
if len(_lowercase ) == 1:
return [equations[0][-1] / equations[0][0]]
a : Tuple = equations.copy()
if any(0 in row for row in data_set ):
a : List[str] = data_set.copy()
a : List[str] = []
for row_index, row in enumerate(_lowercase ):
if 0 not in row:
a : List[Any] = data_set.pop(_lowercase )
break
if not full_row:
raise ValueError("solve_simultaneous() requires at least 1 full equation" )
data_set.insert(0 , _lowercase )
a : int = data_set.copy()
a : List[str] = simplify(_lowercase )
a : Tuple = simplified[::-1]
a : list = []
for row in simplified:
a : str = row[-1]
if not solutions:
if row[-2] == 0:
solutions.append(0 )
continue
solutions.append(current_solution / row[-2] )
continue
a : Optional[Any] = row.copy()[: len(_lowercase ) - 1 :]
while temp_row[0] == 0:
temp_row.pop(0 )
if len(_lowercase ) == 0:
solutions.append(0 )
continue
a : Dict = temp_row[1::]
a : Union[str, Any] = temp_row[::-1]
for column_index, column in enumerate(_lowercase ):
current_solution -= column * solutions[column_index]
solutions.append(_lowercase )
a : List[Any] = []
for item in solutions:
final.append(float(round(_lowercase , 5 ) ) )
return final[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
a : Tuple = [
[2, 1, 1, 1, 1, 4],
[1, 2, 1, 1, 1, 5],
[1, 1, 2, 1, 1, 6],
[1, 1, 1, 2, 1, 7],
[1, 1, 1, 1, 2, 8],
]
print(solve_simultaneous(eq))
print(solve_simultaneous([[4, 2]]))
| 708
|
"""simple docstring"""
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class __UpperCamelCase ( unittest.TestCase ):
def __a ( self ) -> Optional[Any]:
a : Optional[int] = [
"safety_checker/pytorch_model.bin",
"safety_checker/model.safetensors",
"vae/diffusion_pytorch_model.bin",
"vae/diffusion_pytorch_model.safetensors",
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
self.assertTrue(is_safetensors_compatible(lowerCAmelCase__ ) )
def __a ( self ) -> Optional[Any]:
a : str = [
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
self.assertTrue(is_safetensors_compatible(lowerCAmelCase__ ) )
def __a ( self ) -> Dict:
a : List[str] = [
"safety_checker/pytorch_model.bin",
"safety_checker/model.safetensors",
"vae/diffusion_pytorch_model.bin",
"vae/diffusion_pytorch_model.safetensors",
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
"unet/diffusion_pytorch_model.bin",
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(lowerCAmelCase__ ) )
def __a ( self ) -> List[Any]:
a : Optional[Any] = [
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
]
self.assertTrue(is_safetensors_compatible(lowerCAmelCase__ ) )
def __a ( self ) -> Tuple:
a : Tuple = [
"safety_checker/pytorch_model.bin",
"safety_checker/model.safetensors",
"vae/diffusion_pytorch_model.bin",
"vae/diffusion_pytorch_model.safetensors",
"text_encoder/pytorch_model.bin",
# Removed: 'text_encoder/model.safetensors',
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
self.assertFalse(is_safetensors_compatible(lowerCAmelCase__ ) )
def __a ( self ) -> Dict:
a : Dict = [
"safety_checker/pytorch_model.fp16.bin",
"safety_checker/model.fp16.safetensors",
"vae/diffusion_pytorch_model.fp16.bin",
"vae/diffusion_pytorch_model.fp16.safetensors",
"text_encoder/pytorch_model.fp16.bin",
"text_encoder/model.fp16.safetensors",
"unet/diffusion_pytorch_model.fp16.bin",
"unet/diffusion_pytorch_model.fp16.safetensors",
]
a : Dict = "fp16"
self.assertTrue(is_safetensors_compatible(lowerCAmelCase__ , variant=lowerCAmelCase__ ) )
def __a ( self ) -> List[str]:
a : List[Any] = [
"unet/diffusion_pytorch_model.fp16.bin",
"unet/diffusion_pytorch_model.fp16.safetensors",
]
a : Any = "fp16"
self.assertTrue(is_safetensors_compatible(lowerCAmelCase__ , variant=lowerCAmelCase__ ) )
def __a ( self ) -> int:
# pass variant but use the non-variant filenames
a : int = [
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
a : Tuple = "fp16"
self.assertTrue(is_safetensors_compatible(lowerCAmelCase__ , variant=lowerCAmelCase__ ) )
def __a ( self ) -> str:
a : str = [
"safety_checker/pytorch_model.fp16.bin",
"safety_checker/model.fp16.safetensors",
"vae/diffusion_pytorch_model.fp16.bin",
"vae/diffusion_pytorch_model.fp16.safetensors",
"text_encoder/pytorch_model.fp16.bin",
"text_encoder/model.fp16.safetensors",
"unet/diffusion_pytorch_model.fp16.bin",
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
a : Any = "fp16"
self.assertFalse(is_safetensors_compatible(lowerCAmelCase__ , variant=lowerCAmelCase__ ) )
def __a ( self ) -> str:
a : Union[str, Any] = [
"text_encoder/pytorch_model.fp16.bin",
"text_encoder/model.fp16.safetensors",
]
a : str = "fp16"
self.assertTrue(is_safetensors_compatible(lowerCAmelCase__ , variant=lowerCAmelCase__ ) )
def __a ( self ) -> List[str]:
# pass variant but use the non-variant filenames
a : Optional[int] = [
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
]
a : str = "fp16"
self.assertTrue(is_safetensors_compatible(lowerCAmelCase__ , variant=lowerCAmelCase__ ) )
def __a ( self ) -> Optional[Any]:
a : Any = [
"safety_checker/pytorch_model.fp16.bin",
"safety_checker/model.fp16.safetensors",
"vae/diffusion_pytorch_model.fp16.bin",
"vae/diffusion_pytorch_model.fp16.safetensors",
"text_encoder/pytorch_model.fp16.bin",
# 'text_encoder/model.fp16.safetensors',
"unet/diffusion_pytorch_model.fp16.bin",
"unet/diffusion_pytorch_model.fp16.safetensors",
]
a : Optional[int] = "fp16"
self.assertFalse(is_safetensors_compatible(lowerCAmelCase__ , variant=lowerCAmelCase__ ) )
| 31
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.