code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE : List[Any] = {
"configuration_nllb_moe": [
"NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP",
"NllbMoeConfig",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Optional[Any] = [
"NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST",
"NllbMoeForConditionalGeneration",
"NllbMoeModel",
"NllbMoePreTrainedModel",
"NllbMoeTop2Router",
"NllbMoeSparseMLP",
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
SCREAMING_SNAKE_CASE : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 89
|
"""simple docstring"""
__UpperCAmelCase : List[str] = {str(digit): digit**5 for digit in range(10)}
def A ( _A ):
"""simple docstring"""
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(_A ) )
def A ( ):
"""simple docstring"""
return sum(
number
for number in range(1_000, 1_000_000 )
if number == digits_fifth_powers_sum(_A ) )
if __name__ == "__main__":
print(solution())
| 584
| 0
|
'''simple docstring'''
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class snake_case__ ( UpperCamelCase):
a_ = ["image_processor", "tokenizer"]
a_ = "BridgeTowerImageProcessor"
a_ = ("RobertaTokenizer", "RobertaTokenizerFast")
def __init__( self : Union[str, Any] , _A : str , _A : str ) -> str:
super().__init__(_A , _A )
def __call__( self : Optional[int] , _A : Optional[int] , _A : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , _A : bool = True , _A : Union[bool, str, PaddingStrategy] = False , _A : Union[bool, str, TruncationStrategy] = None , _A : Optional[int] = None , _A : int = 0 , _A : Optional[int] = None , _A : Optional[bool] = None , _A : Optional[bool] = None , _A : bool = False , _A : bool = False , _A : bool = False , _A : bool = False , _A : bool = True , _A : Optional[Union[str, TensorType]] = None , **_A : List[Any] , ) -> BatchEncoding:
UpperCAmelCase_ : List[str] = self.tokenizer(
text=_A , add_special_tokens=_A , padding=_A , truncation=_A , max_length=_A , stride=_A , pad_to_multiple_of=_A , return_token_type_ids=_A , return_attention_mask=_A , return_overflowing_tokens=_A , return_special_tokens_mask=_A , return_offsets_mapping=_A , return_length=_A , verbose=_A , return_tensors=_A , **_A , )
# add pixel_values + pixel_mask
UpperCAmelCase_ : List[Any] = self.image_processor(
_A , return_tensors=_A , do_normalize=_A , do_center_crop=_A , **_A )
encoding.update(_A )
return encoding
def A ( self : Any , *_A : int , **_A : Dict ) -> Dict:
return self.tokenizer.batch_decode(*_A , **_A )
def A ( self : Optional[Any] , *_A : Any , **_A : Tuple ) -> int:
return self.tokenizer.decode(*_A , **_A )
@property
def A ( self : int ) -> Optional[Any]:
UpperCAmelCase_ : int = self.tokenizer.model_input_names
UpperCAmelCase_ : Tuple = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 704
|
'''simple docstring'''
from itertools import product
def __UpperCAmelCase ( A : int , A : int ) -> list[int]:
UpperCAmelCase_ : Tuple = sides_number
UpperCAmelCase_ : str = max_face_number * dice_number
UpperCAmelCase_ : Union[str, Any] = [0] * (max_total + 1)
UpperCAmelCase_ : Optional[Any] = 1
UpperCAmelCase_ : List[Any] = range(A , max_face_number + 1 )
for dice_numbers in product(A , repeat=A ):
UpperCAmelCase_ : Optional[Any] = sum(A )
totals_frequencies[total] += 1
return totals_frequencies
def __UpperCAmelCase ( ) -> float:
UpperCAmelCase_ : List[str] = total_frequency_distribution(
sides_number=4 , dice_number=9 )
UpperCAmelCase_ : Optional[int] = total_frequency_distribution(
sides_number=6 , dice_number=6 )
UpperCAmelCase_ : List[str] = 0
UpperCAmelCase_ : Optional[int] = 9
UpperCAmelCase_ : List[str] = 4 * 9
UpperCAmelCase_ : Optional[int] = 6
for peter_total in range(A , max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
UpperCAmelCase_ : Tuple = (4**9) * (6**6)
UpperCAmelCase_ : List[str] = peter_wins_count / total_games_number
UpperCAmelCase_ : Optional[Any] = round(A , ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(f'''{solution() = }''')
| 216
| 0
|
"""simple docstring"""
from __future__ import annotations
lowerCAmelCase__ = tuple[int, int, int]
lowerCAmelCase__ = tuple[str, str, str]
# used alphabet --------------------------
# from string.ascii_uppercase
lowerCAmelCase__ = '''ABCDEFGHIJKLMNOPQRSTUVWXYZ'''
# -------------------------- default selection --------------------------
# rotors --------------------------
lowerCAmelCase__ = '''EGZWVONAHDCLFQMSIPJBYUKXTR'''
lowerCAmelCase__ = '''FOBHMDKEXQNRAULPGSJVTYICZW'''
lowerCAmelCase__ = '''ZJXESIUQLHAVRMDOYGTNFWPBKC'''
# reflector --------------------------
lowerCAmelCase__ = {
'''A''': '''N''',
'''N''': '''A''',
'''B''': '''O''',
'''O''': '''B''',
'''C''': '''P''',
'''P''': '''C''',
'''D''': '''Q''',
'''Q''': '''D''',
'''E''': '''R''',
'''R''': '''E''',
'''F''': '''S''',
'''S''': '''F''',
'''G''': '''T''',
'''T''': '''G''',
'''H''': '''U''',
'''U''': '''H''',
'''I''': '''V''',
'''V''': '''I''',
'''J''': '''W''',
'''W''': '''J''',
'''K''': '''X''',
'''X''': '''K''',
'''L''': '''Y''',
'''Y''': '''L''',
'''M''': '''Z''',
'''Z''': '''M''',
}
# -------------------------- extra rotors --------------------------
lowerCAmelCase__ = '''RMDJXFUWGISLHVTCQNKYPBEZOA'''
lowerCAmelCase__ = '''SGLCPQWZHKXAREONTFBVIYJUDM'''
lowerCAmelCase__ = '''HVSICLTYKQUBXDWAJZOMFGPREN'''
lowerCAmelCase__ = '''RZWQHFMVDBKICJLNTUXAGYPSOE'''
lowerCAmelCase__ = '''LFKIJODBEGAMQPXVUHYSTCZRWN'''
lowerCAmelCase__ = '''KOAEGVDHXPQZMLFTYWJNBRCIUS'''
def snake_case_ ( A_ : RotorPositionT, A_ : RotorSelectionT, A_ : str ):
'''simple docstring'''
if (unique_rotsel := len(set(A_ ) )) < 3:
_lowerCamelCase : List[Any] = F'''Please use 3 unique rotors (not {unique_rotsel})'''
raise Exception(A_ )
# Checks if rotor positions are valid
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Optional[Any] = rotpos
if not 0 < rotorposa <= len(A_ ):
_lowerCamelCase : Any = F'''First rotor position is not within range of 1..26 ({rotorposa}'''
raise ValueError(A_ )
if not 0 < rotorposa <= len(A_ ):
_lowerCamelCase : Union[str, Any] = F'''Second rotor position is not within range of 1..26 ({rotorposa})'''
raise ValueError(A_ )
if not 0 < rotorposa <= len(A_ ):
_lowerCamelCase : Optional[Any] = F'''Third rotor position is not within range of 1..26 ({rotorposa})'''
raise ValueError(A_ )
# Validates string and returns dict
_lowerCamelCase : Optional[int] = _plugboard(A_ )
return rotpos, rotsel, pbdict
def snake_case_ ( A_ : str ):
'''simple docstring'''
if not isinstance(A_, A_ ):
_lowerCamelCase : List[Any] = F'''Plugboard setting isn\'t type string ({type(A_ )})'''
raise TypeError(A_ )
elif len(A_ ) % 2 != 0:
_lowerCamelCase : Optional[Any] = F'''Odd number of symbols ({len(A_ )})'''
raise Exception(A_ )
elif pbstring == "":
return {}
pbstring.replace(''' ''', '''''' )
# Checks if all characters are unique
_lowerCamelCase : Optional[Any] = set()
for i in pbstring:
if i not in abc:
_lowerCamelCase : int = F'''\'{i}\' not in list of symbols'''
raise Exception(A_ )
elif i in tmppbl:
_lowerCamelCase : Optional[int] = F'''Duplicate symbol ({i})'''
raise Exception(A_ )
else:
tmppbl.add(A_ )
del tmppbl
# Created the dictionary
_lowerCamelCase : int = {}
for j in range(0, len(A_ ) - 1, 2 ):
_lowerCamelCase : List[str] = pbstring[j + 1]
_lowerCamelCase : str = pbstring[j]
return pb
def snake_case_ ( A_ : str, A_ : RotorPositionT, A_ : RotorSelectionT = (rotora, rotora, rotora), A_ : str = "", ):
'''simple docstring'''
_lowerCamelCase : List[str] = text.upper()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : int = _validator(
A_, A_, plugb.upper() )
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Dict = rotor_position
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Union[str, Any] = rotor_selection
rotorposa -= 1
rotorposa -= 1
rotorposa -= 1
_lowerCamelCase : Any = []
# encryption/decryption process --------------------------
for symbol in text:
if symbol in abc:
# 1st plugboard --------------------------
if symbol in plugboard:
_lowerCamelCase : List[str] = plugboard[symbol]
# rotor ra --------------------------
_lowerCamelCase : Optional[Any] = abc.index(A_ ) + rotorposa
_lowerCamelCase : Any = rotora[index % len(A_ )]
# rotor rb --------------------------
_lowerCamelCase : int = abc.index(A_ ) + rotorposa
_lowerCamelCase : int = rotora[index % len(A_ )]
# rotor rc --------------------------
_lowerCamelCase : str = abc.index(A_ ) + rotorposa
_lowerCamelCase : Optional[int] = rotora[index % len(A_ )]
# reflector --------------------------
# this is the reason you don't need another machine to decipher
_lowerCamelCase : Optional[Any] = reflector[symbol]
# 2nd rotors
_lowerCamelCase : List[Any] = abc[rotora.index(A_ ) - rotorposa]
_lowerCamelCase : Any = abc[rotora.index(A_ ) - rotorposa]
_lowerCamelCase : int = abc[rotora.index(A_ ) - rotorposa]
# 2nd plugboard
if symbol in plugboard:
_lowerCamelCase : Optional[int] = plugboard[symbol]
# moves/resets rotor positions
rotorposa += 1
if rotorposa >= len(A_ ):
_lowerCamelCase : Optional[Any] = 0
rotorposa += 1
if rotorposa >= len(A_ ):
_lowerCamelCase : Dict = 0
rotorposa += 1
if rotorposa >= len(A_ ):
_lowerCamelCase : Dict = 0
# else:
# pass
# Error could be also raised
# raise ValueError(
# 'Invalid symbol('+repr(symbol)+')')
result.append(A_ )
return "".join(A_ )
if __name__ == "__main__":
lowerCAmelCase__ = '''This is my Python script that emulates the Enigma machine from WWII.'''
lowerCAmelCase__ = (1, 1, 1)
lowerCAmelCase__ = '''pictures'''
lowerCAmelCase__ = (rotora, rotora, rotora)
lowerCAmelCase__ = enigma(message, rotor_pos, rotor_sel, pb)
print('''Encrypted message:''', en)
print('''Decrypted message:''', enigma(en, rotor_pos, rotor_sel, pb))
| 83
|
"""simple docstring"""
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def snake_case_ ( A_ : Dict, A_ : bool = True, A_ : float = math.inf, A_ : float = -math.inf, A_ : float = math.inf, A_ : float = -math.inf, A_ : bool = False, A_ : float = 1_00, A_ : float = 0.01, A_ : float = 1, ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = False
_lowerCamelCase : str = search_prob
_lowerCamelCase : str = start_temperate
_lowerCamelCase : Optional[Any] = []
_lowerCamelCase : int = 0
_lowerCamelCase : Any = None
while not search_end:
_lowerCamelCase : Dict = current_state.score()
if best_state is None or current_score > best_state.score():
_lowerCamelCase : Tuple = current_state
scores.append(A_ )
iterations += 1
_lowerCamelCase : List[Any] = None
_lowerCamelCase : Optional[int] = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
_lowerCamelCase : List[Any] = random.randint(0, len(A_ ) - 1 ) # picking a random neighbor
_lowerCamelCase : Dict = neighbors.pop(A_ )
_lowerCamelCase : Union[str, Any] = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
_lowerCamelCase : str = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
_lowerCamelCase : Optional[Any] = picked_neighbor
else:
_lowerCamelCase : Optional[int] = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
_lowerCamelCase : Union[str, Any] = picked_neighbor
_lowerCamelCase : List[str] = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
_lowerCamelCase : Tuple = True
else:
_lowerCamelCase : Optional[Any] = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(A_ ), A_ )
plt.xlabel('''Iterations''' )
plt.ylabel('''Function values''' )
plt.show()
return best_state
if __name__ == "__main__":
def snake_case_ ( A_ : int, A_ : Tuple ):
'''simple docstring'''
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
lowerCAmelCase__ = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase__ = simulated_annealing(
prob, find_max=False, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'''The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '''
F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
# starting the problem with initial coordinates (12, 47)
lowerCAmelCase__ = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase__ = simulated_annealing(
prob, find_max=True, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'''The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '''
F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
def snake_case_ ( A_ : Optional[int], A_ : List[Any] ):
'''simple docstring'''
return (3 * x**2) - (6 * y)
lowerCAmelCase__ = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase__ = simulated_annealing(prob, find_max=False, visualization=True)
print(
'''The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '''
F"""{local_min.score()}"""
)
lowerCAmelCase__ = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase__ = simulated_annealing(prob, find_max=True, visualization=True)
print(
'''The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '''
F"""{local_min.score()}"""
)
| 83
| 1
|
from __future__ import annotations
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import is_tf_available, is_vision_available
from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_tf_bert import TFBertModelTester
from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester
from ..deit.test_modeling_tf_deit import TFDeiTModelTester
from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester
from ..vit.test_modeling_tf_vit import TFViTModelTester
if is_tf_available():
from transformers import (
TFBertModel,
TFCLIPVisionModel,
TFDeiTModel,
TFRobertaModel,
TFVisionTextDualEncoderModel,
TFViTModel,
VisionTextDualEncoderConfig,
)
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor
def UpperCamelCase_ ( lowerCAmelCase__ ):
"""simple docstring"""
if isinstance(lowerCAmelCase__ , collections.abc.Iterable ):
return x
return (x, x)
@require_tf
class __A :
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self , _snake_case , _snake_case ):
pass
def SCREAMING_SNAKE_CASE__ ( self ):
pass
def SCREAMING_SNAKE_CASE__ ( self ):
pass
def SCREAMING_SNAKE_CASE__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case=None , **_snake_case ):
_lowerCAmelCase : Tuple = VisionTextDualEncoderConfig.from_vision_text_configs(_snake_case , _snake_case )
_lowerCAmelCase : Tuple = TFVisionTextDualEncoderModel(_snake_case )
_lowerCAmelCase : Any = model(input_ids=_snake_case , pixel_values=_snake_case , attention_mask=_snake_case )
self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], config.projection_dim) )
def SCREAMING_SNAKE_CASE__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case=None , **_snake_case ):
_lowerCAmelCase : Optional[int] = self.get_vision_text_model(_snake_case , _snake_case )
_lowerCAmelCase : Any = TFVisionTextDualEncoderModel(vision_model=_snake_case , text_model=_snake_case )
_lowerCAmelCase : Union[str, Any] = model(input_ids=_snake_case , pixel_values=_snake_case , attention_mask=_snake_case )
self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], model.config.projection_dim) )
def SCREAMING_SNAKE_CASE__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case=None , **_snake_case ):
_lowerCAmelCase : Union[str, Any] = self.get_vision_text_model(_snake_case , _snake_case )
_lowerCAmelCase : Any = {"vision_model": vision_model, "text_model": text_model}
_lowerCAmelCase : Union[str, Any] = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**_snake_case )
_lowerCAmelCase : Dict = model(input_ids=_snake_case , pixel_values=_snake_case , attention_mask=_snake_case )
self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], model.config.projection_dim) )
def SCREAMING_SNAKE_CASE__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case=None , **_snake_case ):
_lowerCAmelCase : List[str] = self.get_vision_text_model(_snake_case , _snake_case )
_lowerCAmelCase : Optional[int] = TFVisionTextDualEncoderModel(vision_model=_snake_case , text_model=_snake_case )
_lowerCAmelCase : Tuple = model(input_ids=_snake_case , pixel_values=_snake_case , attention_mask=_snake_case )
_lowerCAmelCase : List[str] = output[0].numpy()
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_snake_case )
_lowerCAmelCase : Dict = TFVisionTextDualEncoderModel.from_pretrained(_snake_case )
_lowerCAmelCase : Dict = model(input_ids=_snake_case , pixel_values=_snake_case , attention_mask=_snake_case )
_lowerCAmelCase : Optional[int] = after_output[0].numpy()
_lowerCAmelCase : Any = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_snake_case , 1E-5 )
def SCREAMING_SNAKE_CASE__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case=None , **_snake_case ):
_lowerCAmelCase : Optional[int] = self.get_vision_text_model(_snake_case , _snake_case )
_lowerCAmelCase : Tuple = TFVisionTextDualEncoderModel(vision_model=_snake_case , text_model=_snake_case )
_lowerCAmelCase : List[str] = model(
input_ids=_snake_case , pixel_values=_snake_case , attention_mask=_snake_case , output_attentions=_snake_case )
_lowerCAmelCase : Optional[Any] = output.vision_model_output.attentions
self.assertEqual(len(_snake_case ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
_lowerCAmelCase : List[Any] = to_atuple(vision_model.config.image_size )
_lowerCAmelCase : Optional[int] = to_atuple(vision_model.config.patch_size )
_lowerCAmelCase : Union[str, Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
_lowerCAmelCase : Any = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
_lowerCAmelCase : int = output.text_model_output.attentions
self.assertEqual(len(_snake_case ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def SCREAMING_SNAKE_CASE__ ( self , _snake_case , _snake_case , _snake_case ):
_lowerCAmelCase : List[str] = np.abs((a - b) ).max()
self.assertLessEqual(_snake_case , _snake_case , F"""Difference between torch and flax is {diff} (>= {tol}).""" )
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : Union[str, Any] = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_model(**_snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : Any = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**_snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : Any = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**_snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : int = self.prepare_config_and_inputs()
self.check_save_load(**_snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : int = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**_snake_case )
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : Optional[Any] = self.get_pretrained_model_and_inputs()
_lowerCAmelCase : List[str] = model_a(**_snake_case )
_lowerCAmelCase : List[Any] = outputs[0].numpy()
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(_snake_case )
_lowerCAmelCase : Optional[Any] = TFVisionTextDualEncoderModel.from_pretrained(_snake_case )
_lowerCAmelCase : List[str] = model_a(**_snake_case )
_lowerCAmelCase : Any = after_outputs[0].numpy()
_lowerCAmelCase : Dict = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_snake_case , 1E-5 )
@require_tf
class __A ( snake_case__ ,unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : List[Any] = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"hf-internal-testing/tiny-random-vit" , "hf-internal-testing/tiny-random-bert" )
_lowerCAmelCase : Optional[int] = 13
_lowerCAmelCase : Optional[int] = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
_lowerCAmelCase : Optional[int] = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
_lowerCAmelCase : Optional[int] = random_attention_mask([batch_size, 4] )
_lowerCAmelCase : Tuple = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask}
return model, inputs
def SCREAMING_SNAKE_CASE__ ( self , _snake_case , _snake_case ):
_lowerCAmelCase : Optional[Any] = TFViTModel(_snake_case , name="vision_model" )
_lowerCAmelCase : Union[str, Any] = TFBertModel(_snake_case , name="text_model" )
return vision_model, text_model
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : List[Any] = TFViTModelTester(self )
_lowerCAmelCase : List[str] = TFBertModelTester(self )
_lowerCAmelCase : str = vit_model_tester.prepare_config_and_inputs()
_lowerCAmelCase : int = bert_model_tester.prepare_config_and_inputs()
_lowerCAmelCase : Union[str, Any] = vision_config_and_inputs
(
_lowerCAmelCase
) : Dict = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class __A ( snake_case__ ,unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self ):
# DeiT repo doesn't have TF weights, but we don't actually use the weights at all so let's
# just reinitialize it.
_lowerCAmelCase : Dict = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"Rocketknight1/tiny-random-deit-tf" , "hf-internal-testing/tiny-random-roberta" )
_lowerCAmelCase : List[Any] = 13
_lowerCAmelCase : Tuple = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
_lowerCAmelCase : List[str] = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
_lowerCAmelCase : List[Any] = random_attention_mask([batch_size, 4] )
_lowerCAmelCase : Optional[int] = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask}
return model, inputs
def SCREAMING_SNAKE_CASE__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case=None , **_snake_case ):
_lowerCAmelCase : Optional[int] = self.get_vision_text_model(_snake_case , _snake_case )
_lowerCAmelCase : Optional[int] = TFVisionTextDualEncoderModel(vision_model=_snake_case , text_model=_snake_case )
_lowerCAmelCase : Tuple = model(
input_ids=_snake_case , pixel_values=_snake_case , attention_mask=_snake_case , output_attentions=_snake_case )
_lowerCAmelCase : Tuple = output.vision_model_output.attentions
self.assertEqual(len(_snake_case ) , vision_config.num_hidden_layers )
# in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
_lowerCAmelCase : Any = to_atuple(vision_model.config.image_size )
_lowerCAmelCase : List[str] = to_atuple(vision_model.config.patch_size )
_lowerCAmelCase : Any = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
_lowerCAmelCase : Dict = num_patches + 2
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
_lowerCAmelCase : str = output.text_model_output.attentions
self.assertEqual(len(_snake_case ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def SCREAMING_SNAKE_CASE__ ( self , _snake_case , _snake_case ):
_lowerCAmelCase : Any = TFDeiTModel(_snake_case , name="vision_model" )
_lowerCAmelCase : int = TFRobertaModel(_snake_case , name="text_model" )
return vision_model, text_model
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : Optional[int] = TFDeiTModelTester(self )
_lowerCAmelCase : Union[str, Any] = TFRobertaModelTester(self )
_lowerCAmelCase : Any = vit_model_tester.prepare_config_and_inputs()
_lowerCAmelCase : Union[str, Any] = bert_model_tester.prepare_config_and_inputs()
_lowerCAmelCase : int = vision_config_and_inputs
(
_lowerCAmelCase
) : Optional[int] = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class __A ( snake_case__ ,unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : Optional[Any] = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
"Rocketknight1/tiny-random-clip-tf" , "hf-internal-testing/tiny-random-bert" )
_lowerCAmelCase : List[str] = 13
_lowerCAmelCase : List[Any] = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
_lowerCAmelCase : Dict = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
_lowerCAmelCase : Tuple = random_attention_mask([batch_size, 4] )
_lowerCAmelCase : Any = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask}
return model, inputs
def SCREAMING_SNAKE_CASE__ ( self , _snake_case , _snake_case ):
_lowerCAmelCase : Any = TFCLIPVisionModel(_snake_case , name="vision_model" )
_lowerCAmelCase : Any = TFBertModel(_snake_case , name="text_model" )
return vision_model, text_model
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : Optional[int] = TFCLIPVisionModelTester(self )
_lowerCAmelCase : Union[str, Any] = TFBertModelTester(self )
_lowerCAmelCase : str = clip_model_tester.prepare_config_and_inputs()
_lowerCAmelCase : Dict = bert_model_tester.prepare_config_and_inputs()
_lowerCAmelCase : Tuple = vision_config_and_inputs
(
_lowerCAmelCase
) : Any = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_vision
@require_tf
class __A ( unittest.TestCase ):
'''simple docstring'''
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : Dict = TFVisionTextDualEncoderModel.from_pretrained(
"clip-italian/clip-italian" , logit_scale_init_value=1.0 , from_pt=_snake_case )
_lowerCAmelCase : Union[str, Any] = VisionTextDualEncoderProcessor.from_pretrained("clip-italian/clip-italian" )
_lowerCAmelCase : Optional[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
_lowerCAmelCase : Optional[int] = processor(
text=["una foto di un gatto", "una foto di un cane"] , images=_snake_case , padding=_snake_case , return_tensors="np" )
_lowerCAmelCase : List[Any] = model(**_snake_case )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
_lowerCAmelCase : Any = np.array([[1.228_4727, 0.310_4122]] )
self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , _snake_case , atol=1E-3 ) )
| 703
|
from __future__ import annotations
from typing import Generic, TypeVar
snake_case = TypeVar("T")
class __A ( Generic[T] ):
'''simple docstring'''
def __init__( self , _snake_case ):
_lowerCAmelCase : List[Any] = data
_lowerCAmelCase : Dict = self
_lowerCAmelCase : Tuple = 0
class __A ( Generic[T] ):
'''simple docstring'''
def __init__( self ):
# map from node name to the node object
_lowerCAmelCase : dict[T, DisjointSetTreeNode[T]] = {}
def SCREAMING_SNAKE_CASE__ ( self , _snake_case ):
# create a new set with x as its member
_lowerCAmelCase : List[str] = DisjointSetTreeNode(_snake_case )
def SCREAMING_SNAKE_CASE__ ( self , _snake_case ):
# find the set x belongs to (with path-compression)
_lowerCAmelCase : Dict = self.map[data]
if elem_ref != elem_ref.parent:
_lowerCAmelCase : Tuple = self.find_set(elem_ref.parent.data )
return elem_ref.parent
def SCREAMING_SNAKE_CASE__ ( self , _snake_case , _snake_case ):
# helper function for union operation
if nodea.rank > nodea.rank:
_lowerCAmelCase : int = nodea
else:
_lowerCAmelCase : Optional[Any] = nodea
if nodea.rank == nodea.rank:
nodea.rank += 1
def SCREAMING_SNAKE_CASE__ ( self , _snake_case , _snake_case ):
# merge 2 disjoint sets
self.link(self.find_set(_snake_case ) , self.find_set(_snake_case ) )
class __A ( Generic[T] ):
'''simple docstring'''
def __init__( self ):
# connections: map from the node to the neighbouring nodes (with weights)
_lowerCAmelCase : dict[T, dict[T, int]] = {}
def SCREAMING_SNAKE_CASE__ ( self , _snake_case ):
# add a node ONLY if its not present in the graph
if node not in self.connections:
_lowerCAmelCase : Any = {}
def SCREAMING_SNAKE_CASE__ ( self , _snake_case , _snake_case , _snake_case ):
# add an edge with the given weight
self.add_node(_snake_case )
self.add_node(_snake_case )
_lowerCAmelCase : int = weight
_lowerCAmelCase : Optional[int] = weight
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : Tuple = []
_lowerCAmelCase : Optional[Any] = set()
for start in self.connections:
for end in self.connections[start]:
if (start, end) not in seen:
seen.add((end, start) )
edges.append((start, end, self.connections[start][end]) )
edges.sort(key=lambda _snake_case : x[2] )
# creating the disjoint set
_lowerCAmelCase : Tuple = DisjointSetTree[T]()
for node in self.connections:
disjoint_set.make_set(_snake_case )
# MST generation
_lowerCAmelCase : Optional[Any] = 0
_lowerCAmelCase : Dict = 0
_lowerCAmelCase : int = GraphUndirectedWeighted[T]()
while num_edges < len(self.connections ) - 1:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = edges[index]
index += 1
_lowerCAmelCase : Dict = disjoint_set.find_set(_snake_case )
_lowerCAmelCase : List[str] = disjoint_set.find_set(_snake_case )
if parent_u != parent_v:
num_edges += 1
graph.add_edge(_snake_case , _snake_case , _snake_case )
disjoint_set.union(_snake_case , _snake_case )
return graph
| 587
| 0
|
"""simple docstring"""
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class UpperCAmelCase :
@staticmethod
def __UpperCAmelCase ( *__lowerCamelCase : List[str] , **__lowerCamelCase : Any ):
"""simple docstring"""
pass
def snake_case ( lowerCAmelCase_ ) -> Dict:
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
snake_case = (
'''https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png'''
)
@is_pipeline_test
@require_torch
@require_vision
class UpperCAmelCase ( unittest.TestCase ):
A__ : Dict = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def __UpperCAmelCase ( self : Tuple , __lowerCamelCase : Dict , __lowerCamelCase : List[Any] , __lowerCamelCase : List[Any] ):
"""simple docstring"""
_snake_case = pipeline(
'''document-question-answering''' , model=__lowerCamelCase , tokenizer=__lowerCamelCase , image_processor=__lowerCamelCase )
_snake_case = INVOICE_URL
_snake_case = list(zip(*apply_tesseract(load_image(__lowerCamelCase ) , __lowerCamelCase , '''''' ) ) )
_snake_case = '''What is the placebo?'''
_snake_case = [
{
'''image''': load_image(__lowerCamelCase ),
'''question''': question,
},
{
'''image''': image,
'''question''': question,
},
{
'''image''': image,
'''question''': question,
'''word_boxes''': word_boxes,
},
]
return dqa_pipeline, examples
def __UpperCAmelCase ( self : Union[str, Any] , __lowerCamelCase : Any , __lowerCamelCase : Optional[Any] ):
"""simple docstring"""
_snake_case = dqa_pipeline(__lowerCamelCase , top_k=2 )
self.assertEqual(
__lowerCamelCase , [
[
{'''score''': ANY(__lowerCamelCase ), '''answer''': ANY(__lowerCamelCase ), '''start''': ANY(__lowerCamelCase ), '''end''': ANY(__lowerCamelCase )},
{'''score''': ANY(__lowerCamelCase ), '''answer''': ANY(__lowerCamelCase ), '''start''': ANY(__lowerCamelCase ), '''end''': ANY(__lowerCamelCase )},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def __UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
_snake_case = pipeline('''document-question-answering''' , model='''hf-internal-testing/tiny-random-layoutlmv2''' )
_snake_case = INVOICE_URL
_snake_case = '''How many cats are there?'''
_snake_case = [
{'''score''': 0.0_0_0_1, '''answer''': '''oy 2312/2019''', '''start''': 3_8, '''end''': 3_9},
{'''score''': 0.0_0_0_1, '''answer''': '''oy 2312/2019 DUE''', '''start''': 3_8, '''end''': 4_0},
]
_snake_case = dqa_pipeline(image=__lowerCamelCase , question=__lowerCamelCase , top_k=2 )
self.assertEqual(nested_simplify(__lowerCamelCase , decimals=4 ) , __lowerCamelCase )
_snake_case = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(nested_simplify(__lowerCamelCase , decimals=4 ) , __lowerCamelCase )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
_snake_case = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
_snake_case = dqa_pipeline(image=__lowerCamelCase , question=__lowerCamelCase , top_k=2 )
self.assertEqual(__lowerCamelCase , [] )
# We can optionnally pass directly the words and bounding boxes
_snake_case = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
_snake_case = []
_snake_case = []
_snake_case = dqa_pipeline(image=__lowerCamelCase , question=__lowerCamelCase , words=__lowerCamelCase , boxes=__lowerCamelCase , top_k=2 )
self.assertEqual(__lowerCamelCase , [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def __UpperCAmelCase ( self : List[str] ):
"""simple docstring"""
_snake_case = pipeline(
'''document-question-answering''' , model='''tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa''' , revision='''9977165''' , )
_snake_case = INVOICE_URL
_snake_case = '''What is the invoice number?'''
_snake_case = dqa_pipeline(image=__lowerCamelCase , question=__lowerCamelCase , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
{'''score''': 0.9_9_4_4, '''answer''': '''us-001''', '''start''': 1_6, '''end''': 1_6},
{'''score''': 0.0_0_0_9, '''answer''': '''us-001''', '''start''': 1_6, '''end''': 1_6},
] , )
_snake_case = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
{'''score''': 0.9_9_4_4, '''answer''': '''us-001''', '''start''': 1_6, '''end''': 1_6},
{'''score''': 0.0_0_0_9, '''answer''': '''us-001''', '''start''': 1_6, '''end''': 1_6},
] , )
_snake_case = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
[
{'''score''': 0.9_9_4_4, '''answer''': '''us-001''', '''start''': 1_6, '''end''': 1_6},
{'''score''': 0.0_0_0_9, '''answer''': '''us-001''', '''start''': 1_6, '''end''': 1_6},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def __UpperCAmelCase ( self : str ):
"""simple docstring"""
_snake_case = pipeline(
'''document-question-answering''' , model='''tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa''' , revision='''9977165''' , max_seq_len=5_0 , )
_snake_case = INVOICE_URL
_snake_case = '''What is the invoice number?'''
_snake_case = dqa_pipeline(image=__lowerCamelCase , question=__lowerCamelCase , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
{'''score''': 0.9_9_7_4, '''answer''': '''1110212019''', '''start''': 2_3, '''end''': 2_3},
{'''score''': 0.9_9_4_8, '''answer''': '''us-001''', '''start''': 1_6, '''end''': 1_6},
] , )
_snake_case = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
{'''score''': 0.9_9_7_4, '''answer''': '''1110212019''', '''start''': 2_3, '''end''': 2_3},
{'''score''': 0.9_9_4_8, '''answer''': '''us-001''', '''start''': 1_6, '''end''': 1_6},
] , )
_snake_case = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
[
{'''score''': 0.9_9_7_4, '''answer''': '''1110212019''', '''start''': 2_3, '''end''': 2_3},
{'''score''': 0.9_9_4_8, '''answer''': '''us-001''', '''start''': 1_6, '''end''': 1_6},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def __UpperCAmelCase ( self : int ):
"""simple docstring"""
_snake_case = AutoTokenizer.from_pretrained(
'''impira/layoutlm-document-qa''' , revision='''3dc6de3''' , add_prefix_space=__lowerCamelCase )
_snake_case = pipeline(
'''document-question-answering''' , model='''impira/layoutlm-document-qa''' , tokenizer=__lowerCamelCase , revision='''3dc6de3''' , )
_snake_case = INVOICE_URL
_snake_case = '''What is the invoice number?'''
_snake_case = dqa_pipeline(image=__lowerCamelCase , question=__lowerCamelCase , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
{'''score''': 0.4_2_5_1, '''answer''': '''us-001''', '''start''': 1_6, '''end''': 1_6},
{'''score''': 0.0_8_1_9, '''answer''': '''1110212019''', '''start''': 2_3, '''end''': 2_3},
] , )
_snake_case = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
{'''score''': 0.4_2_5_1, '''answer''': '''us-001''', '''start''': 1_6, '''end''': 1_6},
{'''score''': 0.0_8_1_9, '''answer''': '''1110212019''', '''start''': 2_3, '''end''': 2_3},
] , )
_snake_case = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
[
{'''score''': 0.4_2_5_1, '''answer''': '''us-001''', '''start''': 1_6, '''end''': 1_6},
{'''score''': 0.0_8_1_9, '''answer''': '''1110212019''', '''start''': 2_3, '''end''': 2_3},
]
]
* 2 , )
_snake_case = list(zip(*apply_tesseract(load_image(__lowerCamelCase ) , __lowerCamelCase , '''''' ) ) )
# This model should also work if `image` is set to None
_snake_case = dqa_pipeline({'''image''': None, '''word_boxes''': word_boxes, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
{'''score''': 0.4_2_5_1, '''answer''': '''us-001''', '''start''': 1_6, '''end''': 1_6},
{'''score''': 0.0_8_1_9, '''answer''': '''1110212019''', '''start''': 2_3, '''end''': 2_3},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def __UpperCAmelCase ( self : List[Any] ):
"""simple docstring"""
_snake_case = AutoTokenizer.from_pretrained(
'''impira/layoutlm-document-qa''' , revision='''3dc6de3''' , add_prefix_space=__lowerCamelCase )
_snake_case = pipeline(
'''document-question-answering''' , model='''impira/layoutlm-document-qa''' , tokenizer=__lowerCamelCase , revision='''3dc6de3''' , max_seq_len=5_0 , )
_snake_case = INVOICE_URL
_snake_case = '''What is the invoice number?'''
_snake_case = dqa_pipeline(image=__lowerCamelCase , question=__lowerCamelCase , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
{'''score''': 0.9_9_9_9, '''answer''': '''us-001''', '''start''': 1_6, '''end''': 1_6},
{'''score''': 0.9_9_9_8, '''answer''': '''us-001''', '''start''': 1_6, '''end''': 1_6},
] , )
_snake_case = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
[
{'''score''': 0.9_9_9_9, '''answer''': '''us-001''', '''start''': 1_6, '''end''': 1_6},
{'''score''': 0.9_9_9_8, '''answer''': '''us-001''', '''start''': 1_6, '''end''': 1_6},
]
]
* 2 , )
_snake_case = list(zip(*apply_tesseract(load_image(__lowerCamelCase ) , __lowerCamelCase , '''''' ) ) )
# This model should also work if `image` is set to None
_snake_case = dqa_pipeline({'''image''': None, '''word_boxes''': word_boxes, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4 ) , [
{'''score''': 0.9_9_9_9, '''answer''': '''us-001''', '''start''': 1_6, '''end''': 1_6},
{'''score''': 0.9_9_9_8, '''answer''': '''us-001''', '''start''': 1_6, '''end''': 1_6},
] , )
@slow
@require_torch
def __UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
_snake_case = pipeline(
'''document-question-answering''' , model='''naver-clova-ix/donut-base-finetuned-docvqa''' , tokenizer=AutoTokenizer.from_pretrained('''naver-clova-ix/donut-base-finetuned-docvqa''' ) , feature_extractor='''naver-clova-ix/donut-base-finetuned-docvqa''' , )
_snake_case = INVOICE_URL
_snake_case = '''What is the invoice number?'''
_snake_case = dqa_pipeline(image=__lowerCamelCase , question=__lowerCamelCase , top_k=2 )
self.assertEqual(nested_simplify(__lowerCamelCase , decimals=4 ) , [{'''answer''': '''us-001'''}] )
@require_tf
@unittest.skip('''Document question answering not implemented in TF''' )
def __UpperCAmelCase ( self : List[Any] ):
"""simple docstring"""
pass
| 103
|
"""simple docstring"""
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class lowerCAmelCase__ ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self : int , lowercase_ : Dict[str, int] , lowercase_ : List[str] , lowercase_ : int = None , lowercase_ : int = None):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE_ : List[Any] = pad_token_id
SCREAMING_SNAKE_CASE_ : Any = max_length
SCREAMING_SNAKE_CASE_ : List[str] = vocab
SCREAMING_SNAKE_CASE_ : Any = merges
SCREAMING_SNAKE_CASE_ : List[str] = BytePairTokenizer(lowercase_ , lowercase_ , sequence_length=lowercase_)
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Dict , lowercase_ : GPTaTokenizer , *lowercase_ : Dict , **lowercase_ : str):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [''' '''.join(lowercase_) for m in tokenizer.bpe_ranks.keys()]
SCREAMING_SNAKE_CASE_ : List[str] = tokenizer.get_vocab()
return cls(lowercase_ , lowercase_ , *lowercase_ , **lowercase_)
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Union[str, Any] , lowercase_ : Union[str, os.PathLike] , *lowercase_ : str , **lowercase_ : Optional[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[str] = GPTaTokenizer.from_pretrained(lowercase_ , *lowercase_ , **lowercase_)
return cls.from_tokenizer(lowercase_ , *lowercase_ , **lowercase_)
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : List[Any] , lowercase_ : int):
'''simple docstring'''
return cls(**lowercase_)
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
'''simple docstring'''
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowercase_ : List[Any] , lowercase_ : int = None):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.tf_tokenizer(lowercase_)
SCREAMING_SNAKE_CASE_ : List[str] = tf.ones_like(lowercase_)
if self.pad_token_id is not None:
# pad the tokens up to max length
SCREAMING_SNAKE_CASE_ : List[Any] = max_length if max_length is not None else self.max_length
if max_length is not None:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = pad_model_inputs(
lowercase_ , max_seq_length=lowercase_ , pad_value=self.pad_token_id)
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 512
| 0
|
"""simple docstring"""
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class _UpperCAmelCase ( _A ):
def A ( self : Dict ) -> Tuple:
lowercase_ : Union[str, Any] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(A , '''tf_padding''' ) )
self.parent.assertTrue(hasattr(A , '''depth_multiplier''' ) )
class _UpperCAmelCase :
def __init__( self : Any , A : Optional[int] , A : Dict=13 , A : Any=3 , A : List[str]=32 , A : Optional[int]=0.25 , A : int=8 , A : str=8 , A : int=6 , A : Tuple=32 , A : str=True , A : str=True , A : Dict=True , A : Optional[Any]="relu6" , A : List[Any]=12_80 , A : List[Any]=0.1 , A : Dict=0.02 , A : str=True , A : int=True , A : Tuple=10 , A : Dict=None , ) -> Optional[Any]:
lowercase_ : Dict = parent
lowercase_ : Optional[int] = batch_size
lowercase_ : Dict = num_channels
lowercase_ : Optional[Any] = image_size
lowercase_ : str = depth_multiplier
lowercase_ : Union[str, Any] = depth_divisible_by
lowercase_ : int = min_depth
lowercase_ : Any = expand_ratio
lowercase_ : Any = tf_padding
lowercase_ : Optional[int] = output_stride
lowercase_ : Optional[int] = first_layer_is_expansion
lowercase_ : Dict = finegrained_output
lowercase_ : Optional[Any] = hidden_act
lowercase_ : Any = last_hidden_size if finegrained_output else int(last_hidden_size * depth_multiplier )
lowercase_ : Tuple = classifier_dropout_prob
lowercase_ : Optional[int] = use_labels
lowercase_ : Union[str, Any] = is_training
lowercase_ : Dict = num_labels
lowercase_ : List[Any] = initializer_range
lowercase_ : List[str] = scope
def A ( self : Tuple ) -> str:
lowercase_ : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase_ : str = None
lowercase_ : List[Any] = None
if self.use_labels:
lowercase_ : Optional[int] = ids_tensor([self.batch_size] , self.num_labels )
lowercase_ : str = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
lowercase_ : List[Any] = self.get_config()
return config, pixel_values, labels, pixel_labels
def A ( self : Optional[Any] ) -> List[Any]:
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , depth_divisible_by=self.depth_divisible_by , min_depth=self.min_depth , expand_ratio=self.expand_ratio , output_stride=self.output_stride , first_layer_is_expansion=self.first_layer_is_expansion , finegrained_output=self.finegrained_output , hidden_act=self.hidden_act , tf_padding=self.tf_padding , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def A ( self : int , A : Union[str, Any] , A : List[str] , A : Dict , A : Tuple ) -> Any:
lowercase_ : str = MobileNetVaModel(config=A )
model.to(A )
model.eval()
lowercase_ : Optional[int] = model(A )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
self.parent.assertEqual(
result.pooler_output.shape , (self.batch_size, self.last_hidden_size) , )
def A ( self : Dict , A : List[Any] , A : str , A : str , A : Any ) -> List[Any]:
lowercase_ : str = self.num_labels
lowercase_ : Any = MobileNetVaForImageClassification(A )
model.to(A )
model.eval()
lowercase_ : List[Any] = model(A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A ( self : Union[str, Any] , A : Optional[int] , A : List[Any] , A : Optional[Any] , A : Optional[Any] ) -> Dict:
lowercase_ : Optional[int] = self.num_labels
lowercase_ : List[str] = MobileNetVaForSemanticSegmentation(A )
model.to(A )
model.eval()
lowercase_ : int = model(A )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
lowercase_ : Union[str, Any] = model(A , labels=A )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def A ( self : Any ) -> Union[str, Any]:
lowercase_ : Optional[int] = self.prepare_config_and_inputs()
lowercase_ : List[Any] = config_and_inputs
lowercase_ : Union[str, Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( _A , _A , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : Tuple = (
(MobileNetVaModel, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE_ : Dict = (
{
"feature-extraction": MobileNetVaModel,
"image-classification": MobileNetVaForImageClassification,
"image-segmentation": MobileNetVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ : str = False
SCREAMING_SNAKE_CASE_ : Any = False
SCREAMING_SNAKE_CASE_ : Union[str, Any] = False
SCREAMING_SNAKE_CASE_ : str = False
def A ( self : List[str] ) -> Tuple:
lowercase_ : List[Any] = MobileNetVaModelTester(self )
lowercase_ : Optional[int] = MobileNetVaConfigTester(self , config_class=A , has_text_modality=A )
def A ( self : str ) -> Dict:
self.config_tester.run_common_tests()
@unittest.skip(reason='''MobileNetV2 does not use inputs_embeds''' )
def A ( self : Union[str, Any] ) -> str:
pass
@unittest.skip(reason='''MobileNetV2 does not support input and output embeddings''' )
def A ( self : List[str] ) -> int:
pass
@unittest.skip(reason='''MobileNetV2 does not output attentions''' )
def A ( self : Optional[int] ) -> Union[str, Any]:
pass
def A ( self : List[Any] ) -> Dict:
lowercase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : Optional[int] = model_class(A )
lowercase_ : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ : Dict = [*signature.parameters.keys()]
lowercase_ : Optional[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , A )
def A ( self : List[str] ) -> Optional[Any]:
lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def A ( self : Union[str, Any] ) -> List[str]:
def check_hidden_states_output(A : Optional[Any] , A : List[Any] , A : List[Any] ):
lowercase_ : Optional[int] = model_class(A )
model.to(A )
model.eval()
with torch.no_grad():
lowercase_ : int = model(**self._prepare_for_class(A , A ) )
lowercase_ : List[Any] = outputs.hidden_states
lowercase_ : Optional[Any] = 16
self.assertEqual(len(A ) , A )
lowercase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : List[str] = True
check_hidden_states_output(A , A , A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase_ : List[Any] = True
check_hidden_states_output(A , A , A )
def A ( self : int ) -> Dict:
lowercase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A )
def A ( self : int ) -> List[str]:
lowercase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*A )
@slow
def A ( self : List[str] ) -> List[str]:
for model_name in MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ : List[Any] = MobileNetVaModel.from_pretrained(A )
self.assertIsNotNone(A )
def lowercase ( ):
lowercase_ : List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class _UpperCAmelCase ( unittest.TestCase ):
@cached_property
def A ( self : Optional[int] ) -> Dict:
return (
MobileNetVaImageProcessor.from_pretrained('''google/mobilenet_v2_1.0_224''' ) if is_vision_available() else None
)
@slow
def A ( self : Union[str, Any] ) -> Optional[int]:
lowercase_ : Tuple = MobileNetVaForImageClassification.from_pretrained('''google/mobilenet_v2_1.0_224''' ).to(A )
lowercase_ : Union[str, Any] = self.default_image_processor
lowercase_ : List[Any] = prepare_img()
lowercase_ : List[str] = image_processor(images=A , return_tensors='''pt''' ).to(A )
# forward pass
with torch.no_grad():
lowercase_ : str = model(**A )
# verify the logits
lowercase_ : Union[str, Any] = torch.Size((1, 10_01) )
self.assertEqual(outputs.logits.shape , A )
lowercase_ : str = torch.tensor([0.2445, -1.1993, 0.1905] ).to(A )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , A , atol=1e-4 ) )
@slow
def A ( self : List[str] ) -> int:
lowercase_ : List[Any] = MobileNetVaForSemanticSegmentation.from_pretrained('''google/deeplabv3_mobilenet_v2_1.0_513''' )
lowercase_ : Optional[int] = model.to(A )
lowercase_ : Dict = MobileNetVaImageProcessor.from_pretrained('''google/deeplabv3_mobilenet_v2_1.0_513''' )
lowercase_ : Optional[int] = prepare_img()
lowercase_ : str = image_processor(images=A , return_tensors='''pt''' ).to(A )
# forward pass
with torch.no_grad():
lowercase_ : List[str] = model(**A )
lowercase_ : Optional[int] = outputs.logits
# verify the logits
lowercase_ : Tuple = torch.Size((1, 21, 65, 65) )
self.assertEqual(logits.shape , A )
lowercase_ : Any = torch.tensor(
[
[[17.5790, 17.7581, 18.3355], [18.3257, 18.4230, 18.8973], [18.6169, 18.8650, 19.2187]],
[[-2.1595, -2.0977, -2.3741], [-2.4226, -2.3028, -2.6835], [-2.7819, -2.5991, -2.7706]],
[[4.2058, 4.8317, 4.7638], [4.4136, 5.0361, 4.9383], [4.5028, 4.9644, 4.8734]],
] , device=A , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , A , atol=1e-4 ) )
| 710
|
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__A : Optional[int] = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
__A : List[str] = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""", F"""encoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(F"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", F"""encoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.weight""", F"""encoder.layers.{i}.fc1.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.bias""", F"""encoder.layers.{i}.fc1.bias"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.weight""", F"""encoder.layers.{i}.fc2.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.bias""", F"""encoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(F"""transformer.encoder.layers.{i}.norm1.weight""", F"""encoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((F"""transformer.encoder.layers.{i}.norm1.bias""", F"""encoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.weight""", F"""encoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.bias""", F"""encoder.layers.{i}.final_layer_norm.bias"""))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", F"""decoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", F"""decoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.cross_attn.out_proj.weight""",
F"""decoder.layers.{i}.encoder_attn.out_proj.weight""",
)
)
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.cross_attn.out_proj.bias""",
F"""decoder.layers.{i}.encoder_attn.out_proj.bias""",
)
)
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.weight""", F"""decoder.layers.{i}.fc1.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.bias""", F"""decoder.layers.{i}.fc1.bias"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.weight""", F"""decoder.layers.{i}.fc2.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.bias""", F"""decoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm1.weight""", F"""decoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.norm1.bias""", F"""decoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.weight""", F"""decoder.layers.{i}.encoder_attn_layer_norm.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.bias""", F"""decoder.layers.{i}.encoder_attn_layer_norm.bias""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.weight""", F"""decoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.bias""", F"""decoder.layers.{i}.final_layer_norm.bias"""))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(F"""transformer.decoder.layers.{i}.sa_qcontent_proj.weight""", F"""decoder.layers.{i}.sa_qcontent_proj.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.sa_kcontent_proj.weight""", F"""decoder.layers.{i}.sa_kcontent_proj.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.sa_qpos_proj.weight""", F"""decoder.layers.{i}.sa_qpos_proj.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.sa_kpos_proj.weight""", F"""decoder.layers.{i}.sa_kpos_proj.weight""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.sa_v_proj.weight""", F"""decoder.layers.{i}.sa_v_proj.weight"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.ca_qcontent_proj.weight""", F"""decoder.layers.{i}.ca_qcontent_proj.weight""")
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.ca_kcontent_proj.weight""", F"""decoder.layers.{i}.ca_kcontent_proj.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.ca_kpos_proj.weight""", F"""decoder.layers.{i}.ca_kpos_proj.weight""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.ca_v_proj.weight""", F"""decoder.layers.{i}.ca_v_proj.weight"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight""", F"""decoder.layers.{i}.ca_qpos_sine_proj.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.sa_qcontent_proj.bias""", F"""decoder.layers.{i}.sa_qcontent_proj.bias""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.sa_kcontent_proj.bias""", F"""decoder.layers.{i}.sa_kcontent_proj.bias""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.sa_qpos_proj.bias""", F"""decoder.layers.{i}.sa_qpos_proj.bias"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.sa_kpos_proj.bias""", F"""decoder.layers.{i}.sa_kpos_proj.bias"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.sa_v_proj.bias""", F"""decoder.layers.{i}.sa_v_proj.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.ca_qcontent_proj.bias""", F"""decoder.layers.{i}.ca_qcontent_proj.bias""")
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.ca_kcontent_proj.bias""", F"""decoder.layers.{i}.ca_kcontent_proj.bias""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.ca_kpos_proj.bias""", F"""decoder.layers.{i}.ca_kpos_proj.bias"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.ca_v_proj.bias""", F"""decoder.layers.{i}.ca_v_proj.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias""", F"""decoder.layers.{i}.ca_qpos_sine_proj.bias""")
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
('''input_proj.weight''', '''input_projection.weight'''),
('''input_proj.bias''', '''input_projection.bias'''),
('''query_embed.weight''', '''query_position_embeddings.weight'''),
('''transformer.decoder.norm.weight''', '''decoder.layernorm.weight'''),
('''transformer.decoder.norm.bias''', '''decoder.layernorm.bias'''),
('''class_embed.weight''', '''class_labels_classifier.weight'''),
('''class_embed.bias''', '''class_labels_classifier.bias'''),
('''bbox_embed.layers.0.weight''', '''bbox_predictor.layers.0.weight'''),
('''bbox_embed.layers.0.bias''', '''bbox_predictor.layers.0.bias'''),
('''bbox_embed.layers.1.weight''', '''bbox_predictor.layers.1.weight'''),
('''bbox_embed.layers.1.bias''', '''bbox_predictor.layers.1.bias'''),
('''bbox_embed.layers.2.weight''', '''bbox_predictor.layers.2.weight'''),
('''bbox_embed.layers.2.bias''', '''bbox_predictor.layers.2.bias'''),
('''transformer.decoder.ref_point_head.layers.0.weight''', '''decoder.ref_point_head.layers.0.weight'''),
('''transformer.decoder.ref_point_head.layers.0.bias''', '''decoder.ref_point_head.layers.0.bias'''),
('''transformer.decoder.ref_point_head.layers.1.weight''', '''decoder.ref_point_head.layers.1.weight'''),
('''transformer.decoder.ref_point_head.layers.1.bias''', '''decoder.ref_point_head.layers.1.bias'''),
('''transformer.decoder.query_scale.layers.0.weight''', '''decoder.query_scale.layers.0.weight'''),
('''transformer.decoder.query_scale.layers.0.bias''', '''decoder.query_scale.layers.0.bias'''),
('''transformer.decoder.query_scale.layers.1.weight''', '''decoder.query_scale.layers.1.weight'''),
('''transformer.decoder.query_scale.layers.1.bias''', '''decoder.query_scale.layers.1.bias'''),
('''transformer.decoder.layers.0.ca_qpos_proj.weight''', '''decoder.layers.0.ca_qpos_proj.weight'''),
('''transformer.decoder.layers.0.ca_qpos_proj.bias''', '''decoder.layers.0.ca_qpos_proj.bias'''),
]
)
def lowercase ( __snake_case : List[str] , __snake_case : Optional[int] , __snake_case : Optional[Any] ):
lowercase_ : Any = state_dict.pop(__snake_case )
lowercase_ : List[Any] = val
def lowercase ( __snake_case : Any ):
lowercase_ : int = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
lowercase_ : int = key.replace('''backbone.0.body''' , '''backbone.conv_encoder.model''' )
lowercase_ : Dict = value
else:
lowercase_ : Tuple = value
return new_state_dict
def lowercase ( __snake_case : List[str] , __snake_case : Any=False ):
lowercase_ : Optional[int] = ''''''
if is_panoptic:
lowercase_ : Optional[int] = '''conditional_detr.'''
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
lowercase_ : List[str] = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
lowercase_ : List[str] = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
lowercase_ : Union[str, Any] = in_proj_weight[:2_5_6, :]
lowercase_ : Tuple = in_proj_bias[:2_5_6]
lowercase_ : Optional[Any] = in_proj_weight[2_5_6:5_1_2, :]
lowercase_ : str = in_proj_bias[2_5_6:5_1_2]
lowercase_ : str = in_proj_weight[-2_5_6:, :]
lowercase_ : Tuple = in_proj_bias[-2_5_6:]
def lowercase ( ):
lowercase_ : List[str] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowercase_ : Optional[int] = Image.open(requests.get(__snake_case , stream=__snake_case ).raw )
return im
@torch.no_grad()
def lowercase ( __snake_case : str , __snake_case : List[Any] ):
lowercase_ : List[str] = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
lowercase_ : Optional[Any] = '''resnet101'''
if "dc5" in model_name:
lowercase_ : Any = True
lowercase_ : int = '''panoptic''' in model_name
if is_panoptic:
lowercase_ : List[Any] = 2_5_0
else:
lowercase_ : List[Any] = 9_1
lowercase_ : List[str] = '''huggingface/label-files'''
lowercase_ : Union[str, Any] = '''coco-detection-id2label.json'''
lowercase_ : Optional[Any] = json.load(open(hf_hub_download(__snake_case , __snake_case , repo_type='''dataset''' ) , '''r''' ) )
lowercase_ : Union[str, Any] = {int(__snake_case ): v for k, v in idalabel.items()}
lowercase_ : Any = idalabel
lowercase_ : Any = {v: k for k, v in idalabel.items()}
# load image processor
lowercase_ : Optional[int] = '''coco_panoptic''' if is_panoptic else '''coco_detection'''
lowercase_ : Tuple = ConditionalDetrImageProcessor(format=__snake_case )
# prepare image
lowercase_ : int = prepare_img()
lowercase_ : Dict = image_processor(images=__snake_case , return_tensors='''pt''' )
lowercase_ : List[str] = encoding['''pixel_values''']
logger.info(F'''Converting model {model_name}...''' )
# load original model from torch hub
lowercase_ : Dict = torch.hub.load('''DeppMeng/ConditionalDETR''' , __snake_case , pretrained=__snake_case ).eval()
lowercase_ : int = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
lowercase_ : Union[str, Any] = '''conditional_detr.''' + src
rename_key(__snake_case , __snake_case , __snake_case )
lowercase_ : int = rename_backbone_keys(__snake_case )
# query, key and value matrices need special treatment
read_in_q_k_v(__snake_case , is_panoptic=__snake_case )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
lowercase_ : List[Any] = '''conditional_detr.model.''' if is_panoptic else '''model.'''
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith('''conditional_detr''' )
and not key.startswith('''class_labels_classifier''' )
and not key.startswith('''bbox_predictor''' )
):
lowercase_ : Optional[int] = state_dict.pop(__snake_case )
lowercase_ : List[Any] = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
lowercase_ : str = state_dict.pop(__snake_case )
lowercase_ : str = val
elif key.startswith('''bbox_attention''' ) or key.startswith('''mask_head''' ):
continue
else:
lowercase_ : Dict = state_dict.pop(__snake_case )
lowercase_ : Tuple = val
else:
if not key.startswith('''class_labels_classifier''' ) and not key.startswith('''bbox_predictor''' ):
lowercase_ : Tuple = state_dict.pop(__snake_case )
lowercase_ : List[Any] = val
# finally, create HuggingFace model and load state dict
lowercase_ : Dict = ConditionalDetrForSegmentation(__snake_case ) if is_panoptic else ConditionalDetrForObjectDetection(__snake_case )
model.load_state_dict(__snake_case )
model.eval()
model.push_to_hub(repo_id=__snake_case , organization='''DepuMeng''' , commit_message='''Add model''' )
# verify our conversion
lowercase_ : Optional[int] = conditional_detr(__snake_case )
lowercase_ : List[str] = model(__snake_case )
assert torch.allclose(outputs.logits , original_outputs['''pred_logits'''] , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs['''pred_boxes'''] , atol=1e-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs['''pred_masks'''] , atol=1e-4 )
# Save model and image processor
logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(__snake_case ).mkdir(exist_ok=__snake_case )
model.save_pretrained(__snake_case )
image_processor.save_pretrained(__snake_case )
if __name__ == "__main__":
__A : str = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
default='''conditional_detr_resnet50''',
type=str,
help='''Name of the CONDITIONAL_DETR model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
__A : Any = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 141
| 0
|
"""simple docstring"""
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import platform
import sys
snake_case = '''3'''
print('''Python version:''', sys.version)
print('''OS platform:''', platform.platform())
print('''OS architecture:''', platform.machine())
try:
import torch
print('''Torch version:''', torch.__version__)
print('''Cuda available:''', torch.cuda.is_available())
print('''Cuda version:''', torch.version.cuda)
print('''CuDNN version:''', torch.backends.cudnn.version())
print('''Number of GPUs available:''', torch.cuda.device_count())
except ImportError:
print('''Torch version:''', None)
try:
import transformers
print('''transformers version:''', transformers.__version__)
except ImportError:
print('''transformers version:''', None)
| 103
|
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
snake_case = logging.get_logger(__name__)
snake_case = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
snake_case = {
'''vocab_file''': {
'''junnyu/roformer_chinese_small''': '''https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt''',
'''junnyu/roformer_chinese_base''': '''https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt''',
'''junnyu/roformer_chinese_char_small''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt'''
),
'''junnyu/roformer_chinese_char_base''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt'''
),
'''junnyu/roformer_small_discriminator''': (
'''https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt'''
),
'''junnyu/roformer_small_generator''': (
'''https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt'''
),
}
}
snake_case = {
'''junnyu/roformer_chinese_small''': 1_5_3_6,
'''junnyu/roformer_chinese_base''': 1_5_3_6,
'''junnyu/roformer_chinese_char_small''': 5_1_2,
'''junnyu/roformer_chinese_char_base''': 5_1_2,
'''junnyu/roformer_small_discriminator''': 1_2_8,
'''junnyu/roformer_small_generator''': 1_2_8,
}
snake_case = {
'''junnyu/roformer_chinese_small''': {'''do_lower_case''': True},
'''junnyu/roformer_chinese_base''': {'''do_lower_case''': True},
'''junnyu/roformer_chinese_char_small''': {'''do_lower_case''': True},
'''junnyu/roformer_chinese_char_base''': {'''do_lower_case''': True},
'''junnyu/roformer_small_discriminator''': {'''do_lower_case''': True},
'''junnyu/roformer_small_generator''': {'''do_lower_case''': True},
}
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
A__ : str = VOCAB_FILES_NAMES
A__ : Tuple = PRETRAINED_VOCAB_FILES_MAP
A__ : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : Dict = PRETRAINED_INIT_CONFIGURATION
A__ : Any = RoFormerTokenizer
def __init__( self : Optional[int] , __lowerCamelCase : Any=None , __lowerCamelCase : Optional[Any]=None , __lowerCamelCase : int=True , __lowerCamelCase : Any="[UNK]" , __lowerCamelCase : Tuple="[SEP]" , __lowerCamelCase : Optional[Any]="[PAD]" , __lowerCamelCase : Union[str, Any]="[CLS]" , __lowerCamelCase : int="[MASK]" , __lowerCamelCase : str=True , __lowerCamelCase : List[Any]=None , **__lowerCamelCase : Any , ):
"""simple docstring"""
super().__init__(
__lowerCamelCase , tokenizer_file=__lowerCamelCase , do_lower_case=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , pad_token=__lowerCamelCase , cls_token=__lowerCamelCase , mask_token=__lowerCamelCase , tokenize_chinese_chars=__lowerCamelCase , strip_accents=__lowerCamelCase , **__lowerCamelCase , )
_snake_case = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
pre_tok_state.get('''lowercase''' , __lowerCamelCase ) != do_lower_case
or pre_tok_state.get('''strip_accents''' , __lowerCamelCase ) != strip_accents
):
_snake_case = getattr(__lowerCamelCase , pre_tok_state.pop('''type''' ) )
_snake_case = do_lower_case
_snake_case = strip_accents
_snake_case = pre_tok_class(**__lowerCamelCase )
_snake_case = do_lower_case
def __getstate__( self : int ):
"""simple docstring"""
_snake_case = self.__dict__.copy()
_snake_case = BertPreTokenizer()
return state
def __setstate__( self : Dict , __lowerCamelCase : Optional[Any] ):
"""simple docstring"""
_snake_case = d
_snake_case = self.__dict__['''_tokenizer'''].get_vocab()
_snake_case = PreTokenizer.custom(JiebaPreTokenizer(__lowerCamelCase ) )
def __UpperCAmelCase ( self : Optional[Any] , __lowerCamelCase : Any , __lowerCamelCase : List[Any]=None ):
"""simple docstring"""
_snake_case = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __UpperCAmelCase ( self : Dict , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ):
"""simple docstring"""
_snake_case = [self.sep_token_id]
_snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __UpperCAmelCase ( self : Union[str, Any] , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ):
"""simple docstring"""
_snake_case = self._tokenizer.model.save(__lowerCamelCase , name=__lowerCamelCase )
return tuple(__lowerCamelCase )
def __UpperCAmelCase ( self : Optional[int] , __lowerCamelCase : List[str] , __lowerCamelCase : List[str]=None , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : List[str]=False , **__lowerCamelCase : List[Any] , ):
"""simple docstring"""
_snake_case = BertPreTokenizer()
return super().save_pretrained(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase )
| 103
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
'microsoft/biogpt': 'https://huggingface.co/microsoft/biogpt/resolve/main/config.json',
# See all BioGPT models at https://huggingface.co/models?filter=biogpt
}
class lowerCAmelCase__ ( UpperCAmelCase__ ):
lowerCAmelCase : Union[str, Any] = "biogpt"
def __init__( self : Dict , lowerCamelCase__ : Union[str, Any]=4_23_84 , lowerCamelCase__ : Any=10_24 , lowerCamelCase__ : List[str]=24 , lowerCamelCase__ : Dict=16 , lowerCamelCase__ : Optional[Any]=40_96 , lowerCamelCase__ : Optional[Any]="gelu" , lowerCamelCase__ : Any=0.1 , lowerCamelCase__ : Union[str, Any]=0.1 , lowerCamelCase__ : List[str]=10_24 , lowerCamelCase__ : List[str]=0.0_2 , lowerCamelCase__ : Any=1E-12 , lowerCamelCase__ : Tuple=True , lowerCamelCase__ : Optional[Any]=True , lowerCamelCase__ : Union[str, Any]=0.0 , lowerCamelCase__ : Optional[Any]=0.0 , lowerCamelCase__ : str=1 , lowerCamelCase__ : Any=0 , lowerCamelCase__ : List[str]=2 , **lowerCamelCase__ : List[Any] , ) ->str:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = vocab_size
_UpperCAmelCase : Optional[int] = max_position_embeddings
_UpperCAmelCase : List[str] = hidden_size
_UpperCAmelCase : List[str] = num_hidden_layers
_UpperCAmelCase : List[Any] = num_attention_heads
_UpperCAmelCase : int = intermediate_size
_UpperCAmelCase : Tuple = hidden_act
_UpperCAmelCase : str = hidden_dropout_prob
_UpperCAmelCase : Tuple = attention_probs_dropout_prob
_UpperCAmelCase : List[Any] = initializer_range
_UpperCAmelCase : Optional[int] = layer_norm_eps
_UpperCAmelCase : List[Any] = scale_embedding
_UpperCAmelCase : List[str] = use_cache
_UpperCAmelCase : Optional[int] = layerdrop
_UpperCAmelCase : Any = activation_dropout
super().__init__(pad_token_id=lowerCamelCase__ , bos_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ , **lowerCamelCase__ )
| 702
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCamelCase__ = {'configuration_mra': ['MRA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MraConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'MRA_PRETRAINED_MODEL_ARCHIVE_LIST',
'MraForMaskedLM',
'MraForMultipleChoice',
'MraForQuestionAnswering',
'MraForSequenceClassification',
'MraForTokenClassification',
'MraLayer',
'MraModel',
'MraPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 40
| 0
|
from __future__ import annotations
from decimal import Decimal
from numpy import array
def __a ( A__ : list[list[float]] ):
SCREAMING_SNAKE_CASE = Decimal
# Check if the provided matrix has 2 rows and 2 columns
# since this implementation only works for 2x2 matrices
if len(A__ ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2:
# Calculate the determinant of the matrix
SCREAMING_SNAKE_CASE = float(
d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) )
if determinant == 0:
raise ValueError("This matrix has no inverse." )
# Creates a copy of the matrix with swapped positions of the elements
SCREAMING_SNAKE_CASE = [[0.0, 0.0], [0.0, 0.0]]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = matrix[1][1], matrix[0][0]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = -matrix[1][0], -matrix[0][1]
# Calculate the inverse of the matrix
return [
[(float(d(A__ ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix
]
elif (
len(A__ ) == 3
and len(matrix[0] ) == 3
and len(matrix[1] ) == 3
and len(matrix[2] ) == 3
):
# Calculate the determinant of the matrix using Sarrus rule
SCREAMING_SNAKE_CASE = float(
(
(d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] ))
+ (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] ))
+ (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] ))
)
- (
(d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] ))
+ (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] ))
+ (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] ))
) )
if determinant == 0:
raise ValueError("This matrix has no inverse." )
# Creating cofactor matrix
SCREAMING_SNAKE_CASE = [
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
]
SCREAMING_SNAKE_CASE = (d(matrix[1][1] ) * d(matrix[2][2] )) - (
d(matrix[1][2] ) * d(matrix[2][1] )
)
SCREAMING_SNAKE_CASE = -(
(d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] ))
)
SCREAMING_SNAKE_CASE = (d(matrix[1][0] ) * d(matrix[2][1] )) - (
d(matrix[1][1] ) * d(matrix[2][0] )
)
SCREAMING_SNAKE_CASE = -(
(d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] ))
)
SCREAMING_SNAKE_CASE = (d(matrix[0][0] ) * d(matrix[2][2] )) - (
d(matrix[0][2] ) * d(matrix[2][0] )
)
SCREAMING_SNAKE_CASE = -(
(d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] ))
)
SCREAMING_SNAKE_CASE = (d(matrix[0][1] ) * d(matrix[1][2] )) - (
d(matrix[0][2] ) * d(matrix[1][1] )
)
SCREAMING_SNAKE_CASE = -(
(d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] ))
)
SCREAMING_SNAKE_CASE = (d(matrix[0][0] ) * d(matrix[1][1] )) - (
d(matrix[0][1] ) * d(matrix[1][0] )
)
# Transpose the cofactor matrix (Adjoint matrix)
SCREAMING_SNAKE_CASE = array(A__ )
for i in range(3 ):
for j in range(3 ):
SCREAMING_SNAKE_CASE = cofactor_matrix[j][i]
# Inverse of the matrix using the formula (1/determinant) * adjoint matrix
SCREAMING_SNAKE_CASE = array(A__ )
for i in range(3 ):
for j in range(3 ):
inverse_matrix[i][j] /= d(A__ )
# Calculate the inverse of the matrix
return [[float(d(A__ ) ) or 0.0 for n in row] for row in inverse_matrix]
raise ValueError("Please provide a matrix of size 2x2 or 3x3." )
| 16
|
'''simple docstring'''
def UpperCAmelCase_ ( lowerCamelCase_ ):
"""simple docstring"""
lowerCAmelCase__ : str = len(lowerCamelCase_ )
lowerCAmelCase__ : Optional[Any] = len(matrix[0] )
lowerCAmelCase__ : Any = min(lowerCamelCase_ , lowerCamelCase_ )
for row in range(lowerCamelCase_ ):
# Check if diagonal element is not zero
if matrix[row][row] != 0:
# Eliminate all the elements below the diagonal
for col in range(row + 1 , lowerCamelCase_ ):
lowerCAmelCase__ : Tuple = matrix[col][row] / matrix[row][row]
for i in range(lowerCamelCase_ , lowerCamelCase_ ):
matrix[col][i] -= multiplier * matrix[row][i]
else:
# Find a non-zero diagonal element to swap rows
lowerCAmelCase__ : Dict = True
for i in range(row + 1 , lowerCamelCase_ ):
if matrix[i][row] != 0:
lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = matrix[i], matrix[row]
lowerCAmelCase__ : Optional[Any] = False
break
if reduce:
rank -= 1
for i in range(lowerCamelCase_ ):
lowerCAmelCase__ : Union[str, Any] = matrix[i][rank]
# Reduce the row pointer by one to stay on the same row
row -= 1
return rank
if __name__ == "__main__":
import doctest
doctest.testmod()
| 378
| 0
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class __snake_case ( unittest.TestCase ):
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = tempfile.mkdtemp()
# fmt: off
UpperCAmelCase_ = ['''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>''']
# fmt: on
UpperCAmelCase_ = dict(zip(_snake_case , range(len(_snake_case))))
UpperCAmelCase_ = ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''', '''''']
UpperCAmelCase_ = {'''unk_token''': '''<unk>'''}
UpperCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''])
UpperCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''])
with open(self.vocab_file , '''w''' , encoding='''utf-8''') as fp:
fp.write(json.dumps(_snake_case) + '''\n''')
with open(self.merges_file , '''w''' , encoding='''utf-8''') as fp:
fp.write('''\n'''.join(_snake_case))
UpperCAmelCase_ = {
'''do_resize''': True,
'''size''': 20,
'''do_center_crop''': True,
'''crop_size''': 18,
'''do_normalize''': True,
'''image_mean''': [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
'''image_std''': [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
}
UpperCAmelCase_ = os.path.join(self.tmpdirname , _snake_case)
with open(self.image_processor_file , '''w''' , encoding='''utf-8''') as fp:
json.dump(_snake_case , _snake_case)
def lowerCamelCase ( self : List[Any] , **_snake_case : Any):
"""simple docstring"""
return CLIPTokenizer.from_pretrained(self.tmpdirname , **_snake_case)
def lowerCamelCase ( self : List[str] , **_snake_case : Union[str, Any]):
"""simple docstring"""
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **_snake_case)
def lowerCamelCase ( self : Optional[int] , **_snake_case : Any):
"""simple docstring"""
return CLIPImageProcessor.from_pretrained(self.tmpdirname , **_snake_case)
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
shutil.rmtree(self.tmpdirname)
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)]
UpperCAmelCase_ = [Image.fromarray(np.moveaxis(_snake_case , 0 , -1)) for x in image_inputs]
return image_inputs
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = self.get_rust_tokenizer()
UpperCAmelCase_ = self.get_image_processor()
UpperCAmelCase_ = CLIPProcessor(tokenizer=_snake_case , image_processor=_snake_case)
processor_slow.save_pretrained(self.tmpdirname)
UpperCAmelCase_ = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=_snake_case)
UpperCAmelCase_ = CLIPProcessor(tokenizer=_snake_case , image_processor=_snake_case)
processor_fast.save_pretrained(self.tmpdirname)
UpperCAmelCase_ = CLIPProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab())
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab())
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab())
self.assertIsInstance(processor_slow.tokenizer , _snake_case)
self.assertIsInstance(processor_fast.tokenizer , _snake_case)
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string())
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string())
self.assertIsInstance(processor_slow.image_processor , _snake_case)
self.assertIsInstance(processor_fast.image_processor , _snake_case)
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
UpperCAmelCase_ = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''')
UpperCAmelCase_ = self.get_image_processor(do_normalize=_snake_case , padding_value=1.0)
UpperCAmelCase_ = CLIPProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_snake_case , padding_value=1.0)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer , _snake_case)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , _snake_case)
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = self.get_image_processor()
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = CLIPProcessor(tokenizer=_snake_case , image_processor=_snake_case)
UpperCAmelCase_ = self.prepare_image_inputs()
UpperCAmelCase_ = image_processor(_snake_case , return_tensors='''np''')
UpperCAmelCase_ = processor(images=_snake_case , return_tensors='''np''')
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2)
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ = self.get_image_processor()
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = CLIPProcessor(tokenizer=_snake_case , image_processor=_snake_case)
UpperCAmelCase_ = '''lower newer'''
UpperCAmelCase_ = processor(text=_snake_case)
UpperCAmelCase_ = tokenizer(_snake_case)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key])
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
UpperCAmelCase_ = self.get_image_processor()
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = CLIPProcessor(tokenizer=_snake_case , image_processor=_snake_case)
UpperCAmelCase_ = '''lower newer'''
UpperCAmelCase_ = self.prepare_image_inputs()
UpperCAmelCase_ = processor(text=_snake_case , images=_snake_case)
self.assertListEqual(list(inputs.keys()) , ['''input_ids''', '''attention_mask''', '''pixel_values'''])
# test if it raises when no input is passed
with pytest.raises(_snake_case):
processor()
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
UpperCAmelCase_ = self.get_image_processor()
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = CLIPProcessor(tokenizer=_snake_case , image_processor=_snake_case)
UpperCAmelCase_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCAmelCase_ = processor.batch_decode(_snake_case)
UpperCAmelCase_ = tokenizer.batch_decode(_snake_case)
self.assertListEqual(_snake_case , _snake_case)
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ = self.get_image_processor()
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = CLIPProcessor(tokenizer=_snake_case , image_processor=_snake_case)
UpperCAmelCase_ = '''lower newer'''
UpperCAmelCase_ = self.prepare_image_inputs()
UpperCAmelCase_ = processor(text=_snake_case , images=_snake_case)
self.assertListEqual(list(inputs.keys()) , processor.model_input_names)
| 720
|
from ...processing_utils import ProcessorMixin
class __snake_case ( a ):
UpperCAmelCase__ : Optional[int] = '''WhisperFeatureExtractor'''
UpperCAmelCase__ : Union[str, Any] = '''WhisperTokenizer'''
def __init__( self : str , _snake_case : int , _snake_case : Any):
"""simple docstring"""
super().__init__(_snake_case , _snake_case)
UpperCAmelCase_ = self.feature_extractor
UpperCAmelCase_ = False
def lowerCamelCase ( self : int , _snake_case : List[Any]=None , _snake_case : Dict=None , _snake_case : Union[str, Any]=True):
"""simple docstring"""
return self.tokenizer.get_decoder_prompt_ids(task=_snake_case , language=_snake_case , no_timestamps=_snake_case)
def __call__( self : List[str] , *_snake_case : Union[str, Any] , **_snake_case : str):
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*_snake_case , **_snake_case)
UpperCAmelCase_ = kwargs.pop('''audio''' , _snake_case)
UpperCAmelCase_ = kwargs.pop('''sampling_rate''' , _snake_case)
UpperCAmelCase_ = kwargs.pop('''text''' , _snake_case)
if len(_snake_case) > 0:
UpperCAmelCase_ = args[0]
UpperCAmelCase_ = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''')
if audio is not None:
UpperCAmelCase_ = self.feature_extractor(_snake_case , *_snake_case , sampling_rate=_snake_case , **_snake_case)
if text is not None:
UpperCAmelCase_ = self.tokenizer(_snake_case , **_snake_case)
if text is None:
return inputs
elif audio is None:
return encodings
else:
UpperCAmelCase_ = encodings['''input_ids''']
return inputs
def lowerCamelCase ( self : str , *_snake_case : Tuple , **_snake_case : List[Any]):
"""simple docstring"""
return self.tokenizer.batch_decode(*_snake_case , **_snake_case)
def lowerCamelCase ( self : Dict , *_snake_case : List[Any] , **_snake_case : List[str]):
"""simple docstring"""
return self.tokenizer.decode(*_snake_case , **_snake_case)
def lowerCamelCase ( self : str , _snake_case : str , _snake_case : List[Any]="np"):
"""simple docstring"""
return self.tokenizer.get_prompt_ids(_snake_case , return_tensors=_snake_case)
| 169
| 0
|
'''simple docstring'''
from __future__ import annotations
from typing import Any
def __snake_case (__UpperCAmelCase ):
"""simple docstring"""
create_state_space_tree(__UpperCAmelCase , [] , 0 )
def __snake_case (__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
if index == len(__UpperCAmelCase ):
print(__UpperCAmelCase )
return
create_state_space_tree(__UpperCAmelCase , __UpperCAmelCase , index + 1 )
current_subsequence.append(sequence[index] )
create_state_space_tree(__UpperCAmelCase , __UpperCAmelCase , index + 1 )
current_subsequence.pop()
if __name__ == "__main__":
__lowerCamelCase : list[Any] = [3, 1, 2, 4]
generate_all_subsequences(seq)
seq.clear()
seq.extend(["""A""", """B""", """C"""])
generate_all_subsequences(seq)
| 501
|
'''simple docstring'''
from collections.abc import Callable
def __snake_case (__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase_ : float = a
lowerCamelCase_ : float = b
if function(__UpperCAmelCase ) == 0: # one of the a or b is a root for the function
return a
elif function(__UpperCAmelCase ) == 0:
return b
elif (
function(__UpperCAmelCase ) * function(__UpperCAmelCase ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError('''could not find root in given interval.''' )
else:
lowerCamelCase_ : float = start + (end - start) / 2.0
while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7
if function(__UpperCAmelCase ) == 0:
return mid
elif function(__UpperCAmelCase ) * function(__UpperCAmelCase ) < 0:
lowerCamelCase_ : List[str] = mid
else:
lowerCamelCase_ : Any = mid
lowerCamelCase_ : int = start + (end - start) / 2.0
return mid
def __snake_case (__UpperCAmelCase ):
"""simple docstring"""
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 1000))
import doctest
doctest.testmod()
| 501
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = {
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class __A ( lowerCamelCase__ ):
"""simple docstring"""
UpperCAmelCase__ = """megatron-bert"""
def __init__( self , a__=2_9056 , a__=1024 , a__=24 , a__=16 , a__=4096 , a__="gelu" , a__=0.1 , a__=0.1 , a__=512 , a__=2 , a__=0.02 , a__=1e-12 , a__=0 , a__="absolute" , a__=True , **a__ , ):
"""simple docstring"""
super().__init__(pad_token_id=a__ , **a__)
_lowerCamelCase : Optional[int] = vocab_size
_lowerCamelCase : str = hidden_size
_lowerCamelCase : Dict = num_hidden_layers
_lowerCamelCase : Union[str, Any] = num_attention_heads
_lowerCamelCase : Any = hidden_act
_lowerCamelCase : Optional[int] = intermediate_size
_lowerCamelCase : int = hidden_dropout_prob
_lowerCamelCase : Union[str, Any] = attention_probs_dropout_prob
_lowerCamelCase : Union[str, Any] = max_position_embeddings
_lowerCamelCase : str = type_vocab_size
_lowerCamelCase : List[Any] = initializer_range
_lowerCamelCase : List[str] = layer_norm_eps
_lowerCamelCase : Dict = position_embedding_type
_lowerCamelCase : Optional[Any] = use_cache
| 613
|
import warnings
from .generation import TFGenerationMixin
class __A ( lowerCamelCase__ ):
"""simple docstring"""
warnings.warn(
"""Importing `TFGenerationMixin` from `src/transformers/generation_tf_utils.py` is deprecated and will """
"""be removed in Transformers v5. Import as `from transformers import TFGenerationMixin` instead.""" ,lowerCamelCase__ ,)
| 613
| 1
|
'''simple docstring'''
from unittest.mock import Mock, patch
from file_transfer.send_file import send_file
@patch('''socket.socket''' )
@patch('''builtins.open''' )
def UpperCamelCase ( lowercase_ : int , lowercase_ : List[str] ) -> Dict:
'''simple docstring'''
lowercase =Mock()
lowercase =conn, Mock()
lowercase =iter([1, None] )
lowercase =lambda lowercase_ : next(lowercase_ )
# ===== invoke =====
send_file(filename='''mytext.txt''' , testing=lowercase_ )
# ===== ensurance =====
sock.assert_called_once()
sock.return_value.bind.assert_called_once()
sock.return_value.listen.assert_called_once()
sock.return_value.accept.assert_called_once()
conn.recv.assert_called_once()
file.return_value.__enter__.assert_called_once()
file.return_value.__enter__.return_value.read.assert_called()
conn.send.assert_called_once()
conn.close.assert_called_once()
sock.return_value.shutdown.assert_called_once()
sock.return_value.close.assert_called_once()
| 72
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a : Dict = logging.get_logger(__name__)
_a : Union[str, Any] = {
"""google/vivit-b-16x2-kinetics400""": (
"""https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json"""
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class _UpperCAmelCase ( _A ):
"""simple docstring"""
A = '''vivit'''
def __init__( self , _lowerCAmelCase=224 , _lowerCAmelCase=32 , _lowerCAmelCase=[2, 16, 16] , _lowerCAmelCase=3 , _lowerCAmelCase=768 , _lowerCAmelCase=12 , _lowerCAmelCase=12 , _lowerCAmelCase=3_072 , _lowerCAmelCase="gelu_fast" , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.02 , _lowerCAmelCase=1e-06 , _lowerCAmelCase=True , **_lowerCAmelCase , ):
'''simple docstring'''
lowerCAmelCase__ :Any = hidden_size
lowerCAmelCase__ :Union[str, Any] = num_hidden_layers
lowerCAmelCase__ :Dict = num_attention_heads
lowerCAmelCase__ :int = intermediate_size
lowerCAmelCase__ :List[Any] = hidden_act
lowerCAmelCase__ :str = hidden_dropout_prob
lowerCAmelCase__ :Tuple = attention_probs_dropout_prob
lowerCAmelCase__ :Optional[int] = initializer_range
lowerCAmelCase__ :Optional[int] = layer_norm_eps
lowerCAmelCase__ :Optional[int] = image_size
lowerCAmelCase__ :Any = num_frames
lowerCAmelCase__ :List[str] = tubelet_size
lowerCAmelCase__ :List[str] = num_channels
lowerCAmelCase__ :str = qkv_bias
super().__init__(**_lowerCAmelCase )
| 145
| 0
|
import requests
lowerCamelCase : List[Any] = "https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey="
def _SCREAMING_SNAKE_CASE ( lowercase : str ):
'''simple docstring'''
lowerCamelCase_ = requests.get(_NEWS_API + bbc_news_api_key ).json()
# each article in the list is a dict
for i, article in enumerate(bbc_news_page['articles'] , 1 ):
print(f"""{i}.) {article["title"]}""" )
if __name__ == "__main__":
fetch_bbc_news(bbc_news_api_key="<Your BBC News API key goes here>")
| 651
|
from manim import *
class A( UpperCamelCase ):
'''simple docstring'''
def a__ ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = Rectangle(height=0.5 , width=0.5 )
lowerCamelCase_ = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
lowerCamelCase_ = Rectangle(height=0.25 , width=0.25 )
lowerCamelCase_ = [mem.copy() for i in range(6 )]
lowerCamelCase_ = [mem.copy() for i in range(6 )]
lowerCamelCase_ = VGroup(*A_ ).arrange(A_ , buff=0 )
lowerCamelCase_ = VGroup(*A_ ).arrange(A_ , buff=0 )
lowerCamelCase_ = VGroup(A_ , A_ ).arrange(A_ , buff=0 )
lowerCamelCase_ = Text('CPU' , font_size=24 )
lowerCamelCase_ = Group(A_ , A_ ).arrange(A_ , buff=0.5 , aligned_edge=A_ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(A_ )
lowerCamelCase_ = [mem.copy() for i in range(4 )]
lowerCamelCase_ = VGroup(*A_ ).arrange(A_ , buff=0 )
lowerCamelCase_ = Text('GPU' , font_size=24 )
lowerCamelCase_ = Group(A_ , A_ ).arrange(A_ , buff=0.5 , aligned_edge=A_ )
gpu.move_to([-1, -1, 0] )
self.add(A_ )
lowerCamelCase_ = [mem.copy() for i in range(6 )]
lowerCamelCase_ = VGroup(*A_ ).arrange(A_ , buff=0 )
lowerCamelCase_ = Text('Model' , font_size=24 )
lowerCamelCase_ = Group(A_ , A_ ).arrange(A_ , buff=0.5 , aligned_edge=A_ )
model.move_to([3, -1.0, 0] )
self.add(A_ )
lowerCamelCase_ = []
lowerCamelCase_ = []
for i, rect in enumerate(A_ ):
lowerCamelCase_ = fill.copy().set_fill(A_ , opacity=0.8 )
target.move_to(A_ )
model_arr.append(A_ )
lowerCamelCase_ = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(A_ , opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(A_ )
self.add(*A_ , *A_ )
lowerCamelCase_ = [meta_mem.copy() for i in range(6 )]
lowerCamelCase_ = [meta_mem.copy() for i in range(6 )]
lowerCamelCase_ = VGroup(*A_ ).arrange(A_ , buff=0 )
lowerCamelCase_ = VGroup(*A_ ).arrange(A_ , buff=0 )
lowerCamelCase_ = VGroup(A_ , A_ ).arrange(A_ , buff=0 )
lowerCamelCase_ = Text('Disk' , font_size=24 )
lowerCamelCase_ = Group(A_ , A_ ).arrange(A_ , buff=0.5 , aligned_edge=A_ )
disk.move_to([-4, -1.25, 0] )
self.add(A_ , A_ )
lowerCamelCase_ = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
lowerCamelCase_ = MarkupText(
f"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(A_ , A_ )
lowerCamelCase_ = MarkupText(
f"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=18 , )
blue_text.next_to(A_ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(A_ )
lowerCamelCase_ = MarkupText(
f"""Now watch as an input is passed through the model\nand how the memory is utilized and handled.""" , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(A_ ) )
lowerCamelCase_ = Square(0.3 )
input.set_fill(A_ , opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] , A_ , buff=0.5 )
self.play(Write(A_ ) )
input.generate_target()
input.target.next_to(model_arr[0] , direction=A_ , buff=0.02 )
self.play(MoveToTarget(A_ ) )
self.play(FadeOut(A_ ) )
lowerCamelCase_ = Arrow(start=A_ , end=A_ , color=A_ , buff=0.5 )
a.next_to(model_arr[0].get_left() , A_ , buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
lowerCamelCase_ = MarkupText(
f"""As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.""" , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(A_ , run_time=3 ) )
lowerCamelCase_ = {'run_time': 1, 'fade_in': True, 'fade_out': True, 'buff': 0.02}
self.play(
Write(A_ ) , Circumscribe(model_arr[0] , color=A_ , **A_ ) , Circumscribe(model_cpu_arr[0] , color=A_ , **A_ ) , Circumscribe(gpu_rect[0] , color=A_ , **A_ ) , )
self.play(MoveToTarget(model_cpu_arr[0] ) )
lowerCamelCase_ = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.02 , A_ , buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.02 )
lowerCamelCase_ = AnimationGroup(
FadeOut(A_ , run_time=0.5 ) , MoveToTarget(A_ , run_time=0.5 ) , FadeIn(A_ , run_time=0.5 ) , lag_ratio=0.2 )
self.play(A_ )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
lowerCamelCase_ = 0.7
self.play(
Circumscribe(model_arr[i] , **A_ ) , Circumscribe(cpu_left_col_base[i] , **A_ ) , Circumscribe(cpu_left_col_base[i + 1] , color=A_ , **A_ ) , Circumscribe(gpu_rect[0] , color=A_ , **A_ ) , Circumscribe(model_arr[i + 1] , color=A_ , **A_ ) , )
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , )
else:
self.play(
MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , )
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.02 , buff=0.2 )
self.play(
Circumscribe(model_arr[-1] , color=A_ , **A_ ) , Circumscribe(cpu_left_col_base[-1] , color=A_ , **A_ ) , Circumscribe(gpu_rect[0] , color=A_ , **A_ ) , )
self.play(MoveToTarget(model_cpu_arr[i] ) )
lowerCamelCase_ = a_c
lowerCamelCase_ = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] , RIGHT + 0.02 , buff=0.5 )
self.play(
FadeOut(A_ ) , FadeOut(A_ , run_time=0.5 ) , )
lowerCamelCase_ = MarkupText(f"""Inference on a model too large for GPU memory\nis successfully completed.""" , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(A_ , run_time=3 ) , MoveToTarget(A_ ) )
self.wait()
| 651
| 1
|
'''simple docstring'''
import os
from argparse import ArgumentParser
from typing import List
import torch.utils.data
from datasets import Dataset, IterableDataset
from datasets.distributed import split_dataset_by_node
UpperCAmelCase_ : Dict = 4
UpperCAmelCase_ : Any = 3
class lowercase__ ( _snake_case ):
'''simple docstring'''
pass
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
for shard in shards:
for i in range(SCREAMING_SNAKE_CASE__ ):
yield {"i": i, "shard": shard}
def snake_case_ ( ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Dict = int(os.environ["""RANK"""] )
_SCREAMING_SNAKE_CASE : int = int(os.environ["""WORLD_SIZE"""] )
_SCREAMING_SNAKE_CASE : List[Any] = ArgumentParser()
parser.add_argument("""--streaming""" , type=SCREAMING_SNAKE_CASE__ )
parser.add_argument("""--local_rank""" , type=SCREAMING_SNAKE_CASE__ )
parser.add_argument("""--num_workers""" , type=SCREAMING_SNAKE_CASE__ , default=0 )
_SCREAMING_SNAKE_CASE : int = parser.parse_args()
_SCREAMING_SNAKE_CASE : Optional[Any] = args.streaming
_SCREAMING_SNAKE_CASE : List[Any] = args.num_workers
_SCREAMING_SNAKE_CASE : Dict = {"""shards""": [f"""shard_{shard_idx}""" for shard_idx in range(SCREAMING_SNAKE_CASE__ )]}
_SCREAMING_SNAKE_CASE : Union[str, Any] = IterableDataset.from_generator(SCREAMING_SNAKE_CASE__ , gen_kwargs=SCREAMING_SNAKE_CASE__ )
if not streaming:
_SCREAMING_SNAKE_CASE : Optional[int] = Dataset.from_list(list(SCREAMING_SNAKE_CASE__ ) )
_SCREAMING_SNAKE_CASE : List[Any] = split_dataset_by_node(SCREAMING_SNAKE_CASE__ , rank=SCREAMING_SNAKE_CASE__ , world_size=SCREAMING_SNAKE_CASE__ )
_SCREAMING_SNAKE_CASE : Dict = torch.utils.data.DataLoader(SCREAMING_SNAKE_CASE__ , num_workers=SCREAMING_SNAKE_CASE__ )
_SCREAMING_SNAKE_CASE : str = NUM_SHARDS * NUM_ITEMS_PER_SHARD
_SCREAMING_SNAKE_CASE : List[str] = full_size // world_size
expected_local_size += int(rank < (full_size % world_size) )
_SCREAMING_SNAKE_CASE : List[Any] = sum(1 for _ in dataloader )
if local_size != expected_local_size:
raise FailedTestError(f"""local_size {local_size} != expected_local_size {expected_local_size}""" )
if __name__ == "__main__":
main()
| 533
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase_ : Dict = {
'configuration_mobilebert': [
'MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'MobileBertConfig',
'MobileBertOnnxConfig',
],
'tokenization_mobilebert': ['MobileBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Optional[Any] = ['MobileBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Optional[Any] = [
'MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MobileBertForMaskedLM',
'MobileBertForMultipleChoice',
'MobileBertForNextSentencePrediction',
'MobileBertForPreTraining',
'MobileBertForQuestionAnswering',
'MobileBertForSequenceClassification',
'MobileBertForTokenClassification',
'MobileBertLayer',
'MobileBertModel',
'MobileBertPreTrainedModel',
'load_tf_weights_in_mobilebert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : List[str] = [
'TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFMobileBertForMaskedLM',
'TFMobileBertForMultipleChoice',
'TFMobileBertForNextSentencePrediction',
'TFMobileBertForPreTraining',
'TFMobileBertForQuestionAnswering',
'TFMobileBertForSequenceClassification',
'TFMobileBertForTokenClassification',
'TFMobileBertMainLayer',
'TFMobileBertModel',
'TFMobileBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mobilebert import (
MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileBertConfig,
MobileBertOnnxConfig,
)
from .tokenization_mobilebert import MobileBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mobilebert_fast import MobileBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilebert import (
MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertLayer,
MobileBertModel,
MobileBertPreTrainedModel,
load_tf_weights_in_mobilebert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilebert import (
TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertMainLayer,
TFMobileBertModel,
TFMobileBertPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 533
| 1
|
'''simple docstring'''
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
__snake_case =logging.get_logger(__name__)
class UpperCAmelCase_ :
def __init__( self : Any , UpperCAmelCase__ : Any = None , UpperCAmelCase__ : Dict = None , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : List[str]=None ) -> Tuple:
if not conversation_id:
lowerCAmelCase = uuid.uuida()
if past_user_inputs is None:
lowerCAmelCase = []
if generated_responses is None:
lowerCAmelCase = []
lowerCAmelCase = conversation_id
lowerCAmelCase = past_user_inputs
lowerCAmelCase = generated_responses
lowerCAmelCase = text
def __eq__( self : List[Any] , UpperCAmelCase__ : Tuple ) -> str:
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def __UpperCAmelCase ( self : Any , UpperCAmelCase__ : Any , UpperCAmelCase__ : Tuple = False ) -> Optional[Any]:
if self.new_user_input:
if overwrite:
logger.warning(
F'''User input added while unprocessed input was existing: \"{self.new_user_input}\" was overwritten '''
F'''with: \"{text}\".''' )
lowerCAmelCase = text
else:
logger.warning(
F'''User input added while unprocessed input was existing: \"{self.new_user_input}\" new input '''
F'''ignored: \"{text}\". Set `overwrite` to True to overwrite unprocessed user input''' )
else:
lowerCAmelCase = text
def __UpperCAmelCase ( self : Optional[int] ) -> Tuple:
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
lowerCAmelCase = None
def __UpperCAmelCase ( self : Tuple , UpperCAmelCase__ : Any ) -> List[Any]:
self.generated_responses.append(UpperCamelCase__ )
def __UpperCAmelCase ( self : List[Any] ) -> str:
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self : Union[str, Any] ) -> str:
lowerCAmelCase = F'''Conversation id: {self.uuid} \n'''
for is_user, text in self.iter_texts():
lowerCAmelCase = '''user''' if is_user else '''bot'''
output += F'''{name} >> {text} \n'''
return output
@add_end_docstrings(
SCREAMING_SNAKE_CASE_ , r'''\n min_length_for_response (`int`, *optional*, defaults to 32):\n The minimum length (in number of tokens) for a response.\n minimum_tokens (`int`, *optional*, defaults to 10):\n The minimum length of tokens to leave for a response.\n ''' , )
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE_ ):
def __init__( self : Optional[Any] , *UpperCAmelCase__ : str , **UpperCAmelCase__ : Union[str, Any] ) -> Union[str, Any]:
super().__init__(*UpperCamelCase__ , **UpperCamelCase__ )
if self.tokenizer.pad_token_id is None:
lowerCAmelCase = self.tokenizer.eos_token
def __UpperCAmelCase ( self : int , UpperCAmelCase__ : Tuple=None , UpperCAmelCase__ : Any=None , UpperCAmelCase__ : List[Any]=None , **UpperCAmelCase__ : Union[str, Any] ) -> List[str]:
lowerCAmelCase = {}
lowerCAmelCase = {}
lowerCAmelCase = {}
if min_length_for_response is not None:
lowerCAmelCase = min_length_for_response
if minimum_tokens is not None:
lowerCAmelCase = minimum_tokens
if "max_length" in generate_kwargs:
lowerCAmelCase = generate_kwargs['''max_length''']
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
lowerCAmelCase = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(UpperCamelCase__ )
return preprocess_params, forward_params, postprocess_params
def __call__( self : str , UpperCAmelCase__ : Any , UpperCAmelCase__ : Tuple=0 , **UpperCAmelCase__ : Union[str, Any] ) -> int:
lowerCAmelCase = super().__call__(UpperCamelCase__ , num_workers=UpperCamelCase__ , **UpperCamelCase__ )
if isinstance(UpperCamelCase__ , UpperCamelCase__ ) and len(UpperCamelCase__ ) == 1:
return outputs[0]
return outputs
def __UpperCAmelCase ( self : str , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Dict=3_2 ) -> Optional[Any]:
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise ValueError('ConversationalPipeline, expects Conversation as inputs' )
if conversation.new_user_input is None:
raise ValueError(
F'''Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. '''
'Add user inputs with the conversation\'s `add_user_input` method' )
if hasattr(self.tokenizer , '_build_conversation_input_ids' ):
lowerCAmelCase = self.tokenizer._build_conversation_input_ids(UpperCamelCase__ )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
lowerCAmelCase = self._legacy_parse_and_tokenize(UpperCamelCase__ )
if self.framework == "pt":
lowerCAmelCase = torch.LongTensor([input_ids] )
elif self.framework == "tf":
lowerCAmelCase = tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def __UpperCAmelCase ( self : Optional[int] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Dict=1_0 , **UpperCAmelCase__ : Optional[int] ) -> int:
lowerCAmelCase = generate_kwargs.get('max_length' , self.model.config.max_length )
lowerCAmelCase = model_inputs['''input_ids'''].shape[1]
if max_length - minimum_tokens < n:
logger.warning(F'''Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})''' )
lowerCAmelCase = max_length - minimum_tokens
lowerCAmelCase = model_inputs['''input_ids'''][:, -trim:]
if "attention_mask" in model_inputs:
lowerCAmelCase = model_inputs['''attention_mask'''][:, -trim:]
lowerCAmelCase = model_inputs.pop('conversation' )
lowerCAmelCase = max_length
lowerCAmelCase = self.model.generate(**UpperCamelCase__ , **UpperCamelCase__ )
if self.model.config.is_encoder_decoder:
lowerCAmelCase = 1
else:
lowerCAmelCase = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def __UpperCAmelCase ( self : List[str] , UpperCAmelCase__ : str , UpperCAmelCase__ : List[str]=True ) -> int:
lowerCAmelCase = model_outputs['''output_ids''']
lowerCAmelCase = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=UpperCamelCase__ , clean_up_tokenization_spaces=UpperCamelCase__ , )
lowerCAmelCase = model_outputs['''conversation''']
conversation.mark_processed()
conversation.append_response(UpperCamelCase__ )
return conversation
def __UpperCAmelCase ( self : str , UpperCAmelCase__ : Union[str, Any] ) -> List[Any]:
lowerCAmelCase = self.tokenizer.eos_token_id
lowerCAmelCase = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) )
if len(UpperCamelCase__ ) > self.tokenizer.model_max_length:
lowerCAmelCase = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 711
|
'''simple docstring'''
import warnings
warnings.warn(
"""memory_utils has been reorganized to utils.memory. Import `find_executable_batchsize` from the main `__init__`: """
"""`from accelerate import find_executable_batch_size` to avoid this warning.""",
FutureWarning,
)
| 513
| 0
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = """▁"""
UpperCAmelCase = {"""vocab_file""": """sentencepiece.bpe.model"""}
UpperCAmelCase = {
"""vocab_file""": {
"""facebook/xglm-564M""": """https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model""",
}
}
UpperCAmelCase = {
"""facebook/xglm-564M""": 2048,
}
class lowercase__ ( A_ ):
__UpperCAmelCase = VOCAB_FILES_NAMES
__UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase = ['''input_ids''', '''attention_mask''']
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE="<s>" , SCREAMING_SNAKE_CASE="</s>" , SCREAMING_SNAKE_CASE="</s>" , SCREAMING_SNAKE_CASE="<s>" , SCREAMING_SNAKE_CASE="<unk>" , SCREAMING_SNAKE_CASE="<pad>" , SCREAMING_SNAKE_CASE = None , **SCREAMING_SNAKE_CASE , ) -> None:
_lowerCamelCase : Tuple = {} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
_lowerCamelCase : List[str] = 7
_lowerCamelCase : List[str] = [F'<madeupword{i}>' for i in range(self.num_madeup_words)]
_lowerCamelCase : Union[str, Any] = kwargs.get("""additional_special_tokens""" , [])
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=SCREAMING_SNAKE_CASE , eos_token=SCREAMING_SNAKE_CASE , unk_token=SCREAMING_SNAKE_CASE , sep_token=SCREAMING_SNAKE_CASE , cls_token=SCREAMING_SNAKE_CASE , pad_token=SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **SCREAMING_SNAKE_CASE , )
_lowerCamelCase : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(str(SCREAMING_SNAKE_CASE))
_lowerCamelCase : Union[str, Any] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
_lowerCamelCase : List[Any] = 1
# Mimic fairseq token-to-id alignment for the first 4 token
_lowerCamelCase : List[Any] = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3}
_lowerCamelCase : str = len(self.sp_model)
_lowerCamelCase : List[Any] = {F'<madeupword{i}>': sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words)}
self.fairseq_tokens_to_ids.update(SCREAMING_SNAKE_CASE)
_lowerCamelCase : Dict = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self) -> Optional[Any]:
_lowerCamelCase : str = self.__dict__.copy()
_lowerCamelCase : Any = None
_lowerCamelCase : List[str] = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , SCREAMING_SNAKE_CASE) -> List[str]:
_lowerCamelCase : int = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs"""):
_lowerCamelCase : Optional[int] = {}
_lowerCamelCase : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.LoadFromSerializedProto(self.sp_model_proto)
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None) -> List[int]:
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
_lowerCamelCase : List[Any] = [self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = False) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE , token_ids_a=SCREAMING_SNAKE_CASE , already_has_special_tokens=SCREAMING_SNAKE_CASE)
if token_ids_a is None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE))
return [1] + ([0] * len(SCREAMING_SNAKE_CASE)) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE))
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None) -> List[int]:
_lowerCamelCase : Optional[int] = [self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a) * [0]
@property
def UpperCamelCase_ ( self) -> List[str]:
return len(self.sp_model) + self.fairseq_offset + self.num_madeup_words
def UpperCamelCase_ ( self) -> Dict:
_lowerCamelCase : Dict = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE) -> List[str]:
return self.sp_model.encode(SCREAMING_SNAKE_CASE , out_type=SCREAMING_SNAKE_CASE)
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE) -> Optional[int]:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_lowerCamelCase : List[Any] = self.sp_model.PieceToId(SCREAMING_SNAKE_CASE)
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE) -> int:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset)
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE) -> List[Any]:
_lowerCamelCase : Any = """""".join(SCREAMING_SNAKE_CASE).replace(SCREAMING_SNAKE_CASE , """ """).strip()
return out_string
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None) -> Tuple[str]:
if not os.path.isdir(SCREAMING_SNAKE_CASE):
logger.error(F'Vocabulary path ({save_directory}) should be a directory')
return
_lowerCamelCase : Tuple = os.path.join(
SCREAMING_SNAKE_CASE , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""])
if os.path.abspath(self.vocab_file) != os.path.abspath(SCREAMING_SNAKE_CASE) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE)
elif not os.path.isfile(self.vocab_file):
with open(SCREAMING_SNAKE_CASE , """wb""") as fi:
_lowerCamelCase : List[Any] = self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE)
return (out_vocab_file,)
| 88
|
'''simple docstring'''
from __future__ import annotations
from math import pi
def UpperCamelCase ( a , a , a ) -> dict[str, float]:
'''simple docstring'''
if (inductance, frequency, reactance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if inductance < 0:
raise ValueError('''Inductance cannot be negative''' )
if frequency < 0:
raise ValueError('''Frequency cannot be negative''' )
if reactance < 0:
raise ValueError('''Inductive reactance cannot be negative''' )
if inductance == 0:
return {"inductance": reactance / (2 * pi * frequency)}
elif frequency == 0:
return {"frequency": reactance / (2 * pi * inductance)}
elif reactance == 0:
return {"reactance": 2 * pi * frequency * inductance}
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 432
| 0
|
def lowerCamelCase__ ( _A , _A ):
'''simple docstring'''
_validate_point(_A )
_validate_point(_A )
if len(_A ) != len(_A ):
raise ValueError("Both points must be in the same n-dimensional space" )
return float(sum(abs(a - b ) for a, b in zip(_A , _A ) ) )
def lowerCamelCase__ ( _A ):
'''simple docstring'''
if point:
if isinstance(_A , _A ):
for item in point:
if not isinstance(_A , (int, float) ):
snake_case_ = (
"Expected a list of numbers as input, found "
f"{type(_A ).__name__}"
)
raise TypeError(_A )
else:
snake_case_ = f"Expected a list of numbers as input, found {type(_A ).__name__}"
raise TypeError(_A )
else:
raise ValueError("Missing an input" )
def lowerCamelCase__ ( _A , _A ):
'''simple docstring'''
_validate_point(_A )
_validate_point(_A )
if len(_A ) != len(_A ):
raise ValueError("Both points must be in the same n-dimensional space" )
return float(sum(abs(x - y ) for x, y in zip(_A , _A ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 139
|
import baseaa
def lowerCamelCase__ ( _A ):
'''simple docstring'''
return baseaa.baaencode(string.encode("utf-8" ) )
def lowerCamelCase__ ( _A ):
'''simple docstring'''
return baseaa.baadecode(_A ).decode("utf-8" )
if __name__ == "__main__":
lowercase__ : str = "Hello World!"
lowercase__ : Tuple = baseaa_encode(test)
print(encoded)
lowercase__ : Optional[Any] = baseaa_decode(encoded)
print(decoded)
| 139
| 1
|
"""simple docstring"""
import operator as op
__A = '''scaler.pt'''
__A = '''pytorch_model'''
__A = '''random_states'''
__A = '''optimizer'''
__A = '''scheduler'''
__A = '''pytorch_model.bin'''
__A = '''pytorch_model.bin.index.json'''
__A = '''model.safetensors'''
__A = '''model.safetensors.index.json'''
__A = '''1.10.2'''
__A = '''py38'''
__A = '''4.17.0'''
__A = ['''ml.p3.16xlarge''', '''ml.p3dn.24xlarge''', '''ml.p4dn.24xlarge''']
__A = ['''FULL_SHARD''', '''SHARD_GRAD_OP''', '''NO_SHARD''', '''HYBRID_SHARD''', '''HYBRID_SHARD_ZERO2''']
__A = ['''TRANSFORMER_BASED_WRAP''', '''SIZE_BASED_WRAP''', '''NO_WRAP''']
__A = ['''BACKWARD_PRE''', '''BACKWARD_POST''', '''NO_PREFETCH''']
__A = ['''FULL_STATE_DICT''', '''LOCAL_STATE_DICT''', '''SHARDED_STATE_DICT''']
__A = '''2.0.1'''
__A = ['''pdsh''', '''standard''', '''openmpi''', '''mvapich''']
__A = ['''default''', '''reduce-overhead''', '''max-autotune''']
__A = {'''>''': op.gt, '''>=''': op.ge, '''==''': op.eq, '''!=''': op.ne, '''<=''': op.le, '''<''': op.lt}
# These are the args for `torch.distributed.launch` for pytorch < 1.9
__A = [
'''nnodes''',
'''nproc_per_node''',
'''rdzv_backend''',
'''rdzv_endpoint''',
'''rdzv_id''',
'''rdzv_conf''',
'''standalone''',
'''max_restarts''',
'''monitor_interval''',
'''start_method''',
'''role''',
'''module''',
'''m''',
'''no_python''',
'''run_path''',
'''log_dir''',
'''r''',
'''redirects''',
'''t''',
'''tee''',
'''node_rank''',
'''master_addr''',
'''master_port''',
]
__A = ['''DEEPSPEED''', '''MULTI_GPU''', '''FSDP''', '''MEGATRON_LM''']
__A = ['''DEEPSPEED''', '''MULTI_XPU''', '''FSDP''']
| 646
|
"""simple docstring"""
from manim import *
class _snake_case ( a__ ):
def lowerCamelCase__ ( self : str ):
__lowerCamelCase : Tuple = Rectangle(height=0.5 , width=0.5 )
__lowerCamelCase : Dict = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0 )
__lowerCamelCase : str = [mem.copy() for i in range(6 )]
__lowerCamelCase : str = [mem.copy() for i in range(6 )]
__lowerCamelCase : Union[str, Any] = VGroup(*UpperCAmelCase ).arrange(UpperCAmelCase , buff=0 )
__lowerCamelCase : List[str] = VGroup(*UpperCAmelCase ).arrange(UpperCAmelCase , buff=0 )
__lowerCamelCase : Dict = VGroup(UpperCAmelCase , UpperCAmelCase ).arrange(UpperCAmelCase , buff=0 )
__lowerCamelCase : str = Text("CPU" , font_size=24 )
__lowerCamelCase : List[Any] = Group(UpperCAmelCase , UpperCAmelCase ).arrange(UpperCAmelCase , buff=0.5 , aligned_edge=UpperCAmelCase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(UpperCAmelCase )
__lowerCamelCase : Tuple = [mem.copy() for i in range(1 )]
__lowerCamelCase : List[str] = VGroup(*UpperCAmelCase ).arrange(UpperCAmelCase , buff=0 )
__lowerCamelCase : Optional[Any] = Text("GPU" , font_size=24 )
__lowerCamelCase : Any = Group(UpperCAmelCase , UpperCAmelCase ).arrange(UpperCAmelCase , buff=0.5 , aligned_edge=UpperCAmelCase )
gpu.align_to(UpperCAmelCase , UpperCAmelCase )
gpu.set_x(gpu.get_x() - 1 )
self.add(UpperCAmelCase )
__lowerCamelCase : List[Any] = [mem.copy() for i in range(6 )]
__lowerCamelCase : Optional[int] = VGroup(*UpperCAmelCase ).arrange(UpperCAmelCase , buff=0 )
__lowerCamelCase : List[str] = Text("Model" , font_size=24 )
__lowerCamelCase : Tuple = Group(UpperCAmelCase , UpperCAmelCase ).arrange(UpperCAmelCase , buff=0.5 , aligned_edge=UpperCAmelCase )
model.move_to([3, -1.0, 0] )
self.play(
Create(UpperCAmelCase , run_time=1 ) , Create(UpperCAmelCase , run_time=1 ) , Create(UpperCAmelCase , run_time=1 ) , )
__lowerCamelCase : int = MarkupText(
F"""First, an empty model skeleton is loaded\ninto <span fgcolor='{YELLOW}'>memory</span> without using much RAM.""" , font_size=24 , )
__lowerCamelCase : Dict = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
__lowerCamelCase : str = MarkupText(
F"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(UpperCAmelCase , run_time=2.5 ) , Write(UpperCAmelCase ) , Write(UpperCAmelCase ) )
self.add(UpperCAmelCase )
__lowerCamelCase : Any = []
__lowerCamelCase : int = []
__lowerCamelCase : Optional[Any] = []
for i, rect in enumerate(UpperCAmelCase ):
__lowerCamelCase : Union[str, Any] = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0.0 ).set_fill(UpperCAmelCase , opacity=0.7 )
cpu_target.move_to(UpperCAmelCase )
cpu_target.generate_target()
__lowerCamelCase : Optional[Any] = 0.4_6 / 4
__lowerCamelCase : Dict = 0.4_6 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.0_2 , direction=UpperCAmelCase )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target , direction=UpperCAmelCase , buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target , direction=UpperCAmelCase , buff=0.0 )
cpu_targs.append(UpperCAmelCase )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(UpperCAmelCase ) )
second_animations.append(MoveToTarget(UpperCAmelCase , run_time=1.5 ) )
self.play(*UpperCAmelCase )
self.play(*UpperCAmelCase )
self.wait()
| 646
| 1
|
from ..utils import DummyObject, requires_backends
class __magic_name__ ( metaclass=lowercase_ ):
"""simple docstring"""
_UpperCamelCase = ["torch", "scipy"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ['''torch''', '''scipy'''] )
@classmethod
def _UpperCAmelCase ( cls , *a__ , **a__ ):
requires_backends(cls , ['''torch''', '''scipy'''] )
@classmethod
def _UpperCAmelCase ( cls , *a__ , **a__ ):
requires_backends(cls , ['''torch''', '''scipy'''] )
| 297
|
import math
import qiskit
def _lowerCamelCase ( _a = 1 , _a = 1 , _a = 1 ):
"""simple docstring"""
if (
isinstance(_a , _a )
or isinstance(_a , _a )
or isinstance(_a , _a )
):
raise TypeError('''inputs must be integers.''' )
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError('''inputs must be positive.''' )
if (
(math.floor(_a ) != input_a)
or (math.floor(_a ) != input_a)
or (math.floor(_a ) != carry_in)
):
raise ValueError('''inputs must be exact integers.''' )
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError('''inputs must be less or equal to 2.''' )
# build registers
_lowerCamelCase = qiskit.QuantumRegister(4 , '''qr''' )
_lowerCamelCase = qiskit.ClassicalRegister(2 , '''cr''' )
# list the entries
_lowerCamelCase = [input_a, input_a, carry_in]
_lowerCamelCase = qiskit.QuantumCircuit(_a , _a )
for i in range(0 , 3 ):
if entry[i] == 2:
quantum_circuit.h(_a ) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(_a ) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(_a ) # for 0 entries
# build the circuit
quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate
quantum_circuit.cx(0 , 1 )
quantum_circuit.ccx(1 , 2 , 3 )
quantum_circuit.cx(1 , 2 )
quantum_circuit.cx(0 , 1 )
quantum_circuit.measure([2, 3] , _a ) # measure the last two qbits
_lowerCamelCase = qiskit.Aer.get_backend('''aer_simulator''' )
_lowerCamelCase = qiskit.execute(_a , _a , shots=1_0_0_0 )
return job.result().get_counts(_a )
if __name__ == "__main__":
print(F'Total sum count for state is: {quantum_full_adder(1, 1, 1)}')
| 297
| 1
|
def UpperCamelCase_( __magic_name__ : int = 4000000 ):
"""simple docstring"""
_lowerCAmelCase :Union[str, Any] = [0, 1]
_lowerCAmelCase :List[str] = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1] )
if fib[i + 2] > n:
break
i += 1
_lowerCAmelCase :Union[str, Any] = 0
for j in range(len(__magic_name__ ) - 1 ):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(F'''{solution() = }''')
| 687
|
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class UpperCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( self: int ):
_lowerCAmelCase :Optional[int] = 10
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
_lowerCAmelCase :str = [1, 2, 3, 4]
_lowerCAmelCase :Union[str, Any] = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(_UpperCAmelCase , self.block_size , 0 ) , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: int ):
_lowerCAmelCase :List[Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
_lowerCAmelCase :List[Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(_UpperCAmelCase , self.block_size , 0 ) , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Optional[int] ):
_lowerCAmelCase :Dict = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
_lowerCAmelCase :Optional[int] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(_UpperCAmelCase , self.block_size , 0 ) , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: List[str] ):
_lowerCAmelCase :List[str] = 'It was the year of Our Lord one thousand seven hundred and\n seventy-five.\n\nSpiritual revelations were conceded to England at that\n favoured period, as at this.'
_lowerCAmelCase , _lowerCAmelCase :Optional[Any] = process_story(_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , [] )
def SCREAMING_SNAKE_CASE__ ( self: Any ):
_lowerCAmelCase :Optional[int] = ''
_lowerCAmelCase , _lowerCAmelCase :str = process_story(_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , [] )
self.assertEqual(_UpperCAmelCase , [] )
def SCREAMING_SNAKE_CASE__ ( self: str ):
_lowerCAmelCase :Optional[Any] = (
'It was the year of Our Lord one thousand seven hundred and '
'seventy-five\n\nSpiritual revelations were conceded to England '
'at that favoured period, as at this.\n@highlight\n\nIt was the best of times'
)
_lowerCAmelCase , _lowerCAmelCase :Optional[int] = process_story(_UpperCAmelCase )
_lowerCAmelCase :Optional[Any] = [
'It was the year of Our Lord one thousand seven hundred and seventy-five.',
'Spiritual revelations were conceded to England at that favoured period, as at this.',
]
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase :Optional[int] = ['It was the best of times.']
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self: Tuple ):
_lowerCAmelCase :Union[str, Any] = torch.tensor([1, 2, 3, 4] )
_lowerCAmelCase :List[Any] = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(_UpperCAmelCase , 0 ).numpy() , expected.numpy() )
def SCREAMING_SNAKE_CASE__ ( self: Optional[int] ):
_lowerCAmelCase :List[Any] = torch.tensor([1, 2, 3, 4, 23, 23, 23] )
_lowerCAmelCase :Optional[int] = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(_UpperCAmelCase , 23 ).numpy() , expected.numpy() )
def SCREAMING_SNAKE_CASE__ ( self: Optional[Any] ):
_lowerCAmelCase :Tuple = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
_lowerCAmelCase :List[Any] = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(_UpperCAmelCase , 1 ).numpy() , expected.numpy() )
def SCREAMING_SNAKE_CASE__ ( self: str ):
_lowerCAmelCase :List[str] = 101
_lowerCAmelCase :Dict = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]] )
_lowerCAmelCase :int = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
_lowerCAmelCase :List[str] = compute_token_type_ids(_UpperCAmelCase , _UpperCAmelCase )
np.testing.assert_array_equal(_UpperCAmelCase , _UpperCAmelCase )
| 687
| 1
|
'''simple docstring'''
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
_UpperCamelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowerCamelCase__ ( UpperCamelCase_ ):
'''simple docstring'''
def __init__( self : Any , __A : Dict , __A : int , __A : Union[str, Any] , __A : List[str] , __A : Any , __A : int , __A : List[str] , __A : List[Any] , __A : Tuple , ) -> List[Any]:
'''simple docstring'''
super().__init__()
if hasattr(scheduler.config , """steps_offset""" ) and scheduler.config.steps_offset != 1:
lowerCAmelCase__ = (
f'''The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`'''
f''' should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure '''
"""to update the config accordingly as leaving `steps_offset` might led to incorrect results"""
""" in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"""
""" it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"""
""" file"""
)
deprecate("""steps_offset!=1""" , """1.0.0""" , _a , standard_warn=_a )
lowerCAmelCase__ = dict(scheduler.config )
lowerCAmelCase__ = 1
lowerCAmelCase__ = FrozenDict(_a )
if hasattr(scheduler.config , """skip_prk_steps""" ) and scheduler.config.skip_prk_steps is False:
lowerCAmelCase__ = (
f'''The configuration file of this scheduler: {scheduler} has not set the configuration'''
""" `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make"""
""" sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to"""
""" incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face"""
""" Hub, it would be very nice if you could open a Pull request for the"""
""" `scheduler/scheduler_config.json` file"""
)
deprecate("""skip_prk_steps not set""" , """1.0.0""" , _a , standard_warn=_a )
lowerCAmelCase__ = dict(scheduler.config )
lowerCAmelCase__ = True
lowerCAmelCase__ = FrozenDict(_a )
if safety_checker is None:
logger.warning(
f'''You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure'''
""" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"""
""" results in services or applications open to the public. Both the diffusers team and Hugging Face"""
""" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"""
""" it only for use-cases that involve analyzing network behavior or auditing its results. For more"""
""" information, please have a look at https://github.com/huggingface/diffusers/pull/254 .""" )
self.register_modules(
segmentation_model=_a , segmentation_processor=_a , vae=_a , text_encoder=_a , tokenizer=_a , unet=_a , scheduler=_a , safety_checker=_a , feature_extractor=_a , )
def lowercase__ ( self : Any , __A : Optional[Any] = "auto" ) -> Optional[int]:
'''simple docstring'''
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
lowerCAmelCase__ = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_a )
def lowercase__ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
self.enable_attention_slicing(_a )
def lowercase__ ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
lowerCAmelCase__ = torch.device("""cuda""" )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(_a , _a )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowercase__ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
if self.device != torch.device("""meta""" ) or not hasattr(self.unet , """_hf_hook""" ):
return self.device
for module in self.unet.modules():
if (
hasattr(_a , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__( self : str , __A : Optional[Any] , __A : Any , __A : int , __A : List[str] = 512 , __A : int = 512 , __A : Tuple = 50 , __A : int = 7.5 , __A : List[str] = None , __A : List[str] = 1 , __A : Dict = 0.0 , __A : Any = None , __A : Any = None , __A : Optional[int] = "pil" , __A : Optional[int] = True , __A : List[str] = None , __A : Any = 1 , **__A : Union[str, Any] , ) -> List[str]:
'''simple docstring'''
lowerCAmelCase__ = self.segmentation_processor(
text=[text] , images=[image] , padding="""max_length""" , return_tensors="""pt""" ).to(self.device )
lowerCAmelCase__ = self.segmentation_model(**_a )
lowerCAmelCase__ = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
lowerCAmelCase__ = self.numpy_to_pil(_a )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
lowerCAmelCase__ = StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=_a , image=_a , mask_image=_a , height=_a , width=_a , num_inference_steps=_a , guidance_scale=_a , negative_prompt=_a , num_images_per_prompt=_a , eta=_a , generator=_a , latents=_a , output_type=_a , return_dict=_a , callback=_a , callback_steps=_a , )
| 712
|
'''simple docstring'''
def _lowerCAmelCase( UpperCAmelCase_ : str ) -> int:
assert column_title.isupper()
lowerCAmelCase__ = 0
lowerCAmelCase__ = len(UpperCAmelCase_ ) - 1
lowerCAmelCase__ = 0
while index >= 0:
lowerCAmelCase__ = (ord(column_title[index] ) - 64) * pow(26 , UpperCAmelCase_ )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 211
| 0
|
"""simple docstring"""
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import BaseOutput, is_torch_available, is_transformers_available
@dataclass
class _lowerCAmelCase ( a ):
"""simple docstring"""
__magic_name__ :Union[List[PIL.Image.Image], np.ndarray]
__magic_name__ :Optional[List[bool]]
if is_transformers_available() and is_torch_available():
from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline
| 93
|
import pytest
_lowerCamelCase ="""__dummy_dataset1__"""
_lowerCamelCase ="""
import json
import os
import datasets
REPO_URL = \"https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/\"
URLS = {\"train\": REPO_URL + \"wikiann-bn-train.jsonl\", \"validation\": REPO_URL + \"wikiann-bn-validation.jsonl\"}
class __DummyDataset1__(datasets.GeneratorBasedBuilder):
def _info(self):
features = datasets.Features(
{
\"tokens\": datasets.Sequence(datasets.Value(\"string\")),
\"ner_tags\": datasets.Sequence(
datasets.features.ClassLabel(
names=[
\"O\",
\"B-PER\",
\"I-PER\",
\"B-ORG\",
\"I-ORG\",
\"B-LOC\",
\"I-LOC\",
]
)
),
\"langs\": datasets.Sequence(datasets.Value(\"string\")),
\"spans\": datasets.Sequence(datasets.Value(\"string\")),
}
)
return datasets.DatasetInfo(features=features)
def _split_generators(self, dl_manager):
dl_path = dl_manager.download(URLS)
return [
datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={\"filepath\": dl_path[\"train\"]}),
datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={\"filepath\": dl_path[\"validation\"]}),
]
def _generate_examples(self, filepath):
with open(filepath, \"r\", encoding=\"utf-8\") as f:
for i, line in enumerate(f):
yield i, json.loads(line)
"""
@pytest.fixture
def _a ( ):
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def _a ( ):
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def _a ( lowerCamelCase, lowerCamelCase, lowerCamelCase ):
lowerCamelCase : Union[str, Any] = dataset_loading_script_name
lowerCamelCase : Dict = tmp_path / """datasets""" / script_name
script_dir.mkdir(parents=lowerCamelCase )
lowerCamelCase : str = script_dir / F'''{script_name}.py'''
with open(lowerCamelCase, """w""" ) as f:
f.write(lowerCamelCase )
return str(lowerCamelCase )
| 681
| 0
|
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: List[str] = 3 , __lowerCamelCase: List[str] = 7 , __lowerCamelCase: Any = 100_0000 ):
'''simple docstring'''
lowercase_ = 0
lowercase_ = 1
for current_denominator in range(1 , limit + 1 ):
lowercase_ = current_denominator * numerator // denominator
if current_denominator % denominator == 0:
current_numerator -= 1
if current_numerator * max_denominator > current_denominator * max_numerator:
lowercase_ = current_numerator
lowercase_ = current_denominator
return max_numerator
if __name__ == "__main__":
print(solution(numerator=3, denominator=7, limit=1_0_0_0_0_0_0))
| 717
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE__ = {
"""configuration_swinv2""": ["""SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Swinv2Config"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"""SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Swinv2ForImageClassification""",
"""Swinv2ForMaskedImageModeling""",
"""Swinv2Model""",
"""Swinv2PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swinva import (
SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinvaForImageClassification,
SwinvaForMaskedImageModeling,
SwinvaModel,
SwinvaPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 601
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase_ = {
'configuration_blenderbot': [
'BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BlenderbotConfig',
'BlenderbotOnnxConfig',
],
'tokenization_blenderbot': ['BlenderbotTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ['BlenderbotTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BlenderbotForCausalLM',
'BlenderbotForConditionalGeneration',
'BlenderbotModel',
'BlenderbotPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'TFBlenderbotForConditionalGeneration',
'TFBlenderbotModel',
'TFBlenderbotPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'FlaxBlenderbotForConditionalGeneration',
'FlaxBlenderbotModel',
'FlaxBlenderbotPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 562
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowercase_ = {'configuration_swin': ['SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'SwinConfig', 'SwinOnnxConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'SWIN_PRETRAINED_MODEL_ARCHIVE_LIST',
'SwinForImageClassification',
'SwinForMaskedImageModeling',
'SwinModel',
'SwinPreTrainedModel',
'SwinBackbone',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFSwinForImageClassification',
'TFSwinForMaskedImageModeling',
'TFSwinModel',
'TFSwinPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig, SwinOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swin import (
SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinBackbone,
SwinForImageClassification,
SwinForMaskedImageModeling,
SwinModel,
SwinPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_swin import (
TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSwinForImageClassification,
TFSwinForMaskedImageModeling,
TFSwinModel,
TFSwinPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 562
| 1
|
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
_lowerCAmelCase = 42
_lowerCAmelCase = 42
def __init__( self , UpperCamelCase__ , UpperCamelCase__ ):
super().__init__()
self.register_modules(unet=UpperCamelCase__ , scheduler=UpperCamelCase__ )
@torch.no_grad()
def __call__( self , UpperCamelCase__ = 1 , UpperCamelCase__ = 2000 , UpperCamelCase__ = None , UpperCamelCase__ = "pil" , UpperCamelCase__ = True , **UpperCamelCase__ , ):
A__ : Tuple = self.unet.config.sample_size
A__ : List[Any] = (batch_size, 3, img_size, img_size)
A__ : Tuple = self.unet
A__ : Union[str, Any] = randn_tensor(UpperCamelCase__ , generator=UpperCamelCase__ ) * self.scheduler.init_noise_sigma
A__ : Tuple = sample.to(self.device )
self.scheduler.set_timesteps(UpperCamelCase__ )
self.scheduler.set_sigmas(UpperCamelCase__ )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
A__ : Dict = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
A__ : Optional[Any] = self.unet(UpperCamelCase__ , UpperCamelCase__ ).sample
A__ : Any = self.scheduler.step_correct(UpperCamelCase__ , UpperCamelCase__ , generator=UpperCamelCase__ ).prev_sample
# prediction step
A__ : Any = model(UpperCamelCase__ , UpperCamelCase__ ).sample
A__ : Dict = self.scheduler.step_pred(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , generator=UpperCamelCase__ )
A__ , A__ : str = output.prev_sample, output.prev_sample_mean
A__ : List[str] = sample_mean.clamp(0 , 1 )
A__ : Optional[int] = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
A__ : Optional[int] = self.numpy_to_pil(UpperCamelCase__ )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=UpperCamelCase__ )
| 55
|
from numpy import exp, pi, sqrt
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Dict , __UpperCamelCase : float = 0.0 , __UpperCamelCase : float = 1.0 ) -> int:
"""simple docstring"""
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 55
| 1
|
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax.numpy as jnp
from jax import random
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .scheduling_utils_flax import FlaxSchedulerMixin
@flax.struct.dataclass
class _UpperCAmelCase :
"""simple docstring"""
snake_case = None
snake_case = None
snake_case = None # sigma(t_i)
@classmethod
def lowerCAmelCase ( cls : Optional[int] ):
'''simple docstring'''
return cls()
@dataclass
class _UpperCAmelCase ( lowerCAmelCase__ ):
"""simple docstring"""
snake_case = 42
snake_case = 42
snake_case = 42
class _UpperCAmelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
@property
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
return True
@register_to_config
def __init__( self : List[Any] , __UpperCAmelCase : float = 0.02 , __UpperCAmelCase : float = 100 , __UpperCAmelCase : float = 1.007 , __UpperCAmelCase : float = 80 , __UpperCAmelCase : float = 0.05 , __UpperCAmelCase : float = 50 , ):
'''simple docstring'''
pass
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
return KarrasVeSchedulerState.create()
def lowerCAmelCase ( self : List[str] , __UpperCAmelCase : KarrasVeSchedulerState , __UpperCAmelCase : int , __UpperCAmelCase : Tuple = () ):
'''simple docstring'''
_A = jnp.arange(0 , lowerCAmelCase_ )[::-1].copy()
_A = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in timesteps
]
return state.replace(
num_inference_steps=lowerCAmelCase_ , schedule=jnp.array(lowerCAmelCase_ , dtype=jnp.floataa ) , timesteps=lowerCAmelCase_ , )
def lowerCAmelCase ( self : str , __UpperCAmelCase : KarrasVeSchedulerState , __UpperCAmelCase : jnp.ndarray , __UpperCAmelCase : float , __UpperCAmelCase : random.KeyArray , ):
'''simple docstring'''
if self.config.s_min <= sigma <= self.config.s_max:
_A = min(self.config.s_churn / state.num_inference_steps , 2**0.5 - 1 )
else:
_A = 0
# sample eps ~ N(0, S_noise^2 * I)
_A = random.split(lowerCAmelCase_ , num=1 )
_A = self.config.s_noise * random.normal(key=lowerCAmelCase_ , shape=sample.shape )
_A = sigma + gamma * sigma
_A = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def lowerCAmelCase ( self : str , __UpperCAmelCase : KarrasVeSchedulerState , __UpperCAmelCase : jnp.ndarray , __UpperCAmelCase : float , __UpperCAmelCase : float , __UpperCAmelCase : jnp.ndarray , __UpperCAmelCase : bool = True , ):
'''simple docstring'''
_A = sample_hat + sigma_hat * model_output
_A = (sample_hat - pred_original_sample) / sigma_hat
_A = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=lowerCAmelCase_ , derivative=lowerCAmelCase_ , state=lowerCAmelCase_ )
def lowerCAmelCase ( self : Dict , __UpperCAmelCase : KarrasVeSchedulerState , __UpperCAmelCase : jnp.ndarray , __UpperCAmelCase : float , __UpperCAmelCase : float , __UpperCAmelCase : jnp.ndarray , __UpperCAmelCase : jnp.ndarray , __UpperCAmelCase : jnp.ndarray , __UpperCAmelCase : bool = True , ):
'''simple docstring'''
_A = sample_prev + sigma_prev * model_output
_A = (sample_prev - pred_original_sample) / sigma_prev
_A = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=lowerCAmelCase_ , derivative=lowerCAmelCase_ , state=lowerCAmelCase_ )
def lowerCAmelCase ( self : int , __UpperCAmelCase : KarrasVeSchedulerState , __UpperCAmelCase : List[str] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Union[str, Any] ):
'''simple docstring'''
raise NotImplementedError()
| 330
|
import logging
import os
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
from tqdm import auto as tqdm_lib
A_ = {
"debug": logging.DEBUG,
"info": logging.INFO,
"warning": logging.WARNING,
"error": logging.ERROR,
"critical": logging.CRITICAL,
}
A_ = logging.WARNING
def UpperCAmelCase ( )-> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = os.getenv('''DATASETS_VERBOSITY''' ,UpperCAmelCase )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
f'''Unknown option DATASETS_VERBOSITY={env_level_str}, '''
f'''has to be one of: { ', '.join(log_levels.keys() ) }''' )
return _default_log_level
def UpperCAmelCase ( )-> str:
'''simple docstring'''
return __name__.split('''.''' )[0]
def UpperCAmelCase ( )-> logging.Logger:
'''simple docstring'''
return logging.getLogger(_get_library_name() )
def UpperCAmelCase ( )-> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = _get_library_root_logger()
library_root_logger.setLevel(_get_default_logging_level() )
def UpperCAmelCase ( )-> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = _get_library_root_logger()
library_root_logger.setLevel(logging.NOTSET )
def UpperCAmelCase ( UpperCAmelCase = None )-> logging.Logger:
'''simple docstring'''
if name is None:
SCREAMING_SNAKE_CASE_ = _get_library_name()
return logging.getLogger(UpperCAmelCase )
def UpperCAmelCase ( )-> int:
'''simple docstring'''
return _get_library_root_logger().getEffectiveLevel()
def UpperCAmelCase ( UpperCAmelCase )-> None:
'''simple docstring'''
_get_library_root_logger().setLevel(UpperCAmelCase )
def UpperCAmelCase ( )-> Optional[Any]:
'''simple docstring'''
return set_verbosity(UpperCAmelCase )
def UpperCAmelCase ( )-> Union[str, Any]:
'''simple docstring'''
return set_verbosity(UpperCAmelCase )
def UpperCAmelCase ( )-> Dict:
'''simple docstring'''
return set_verbosity(UpperCAmelCase )
def UpperCAmelCase ( )-> Optional[int]:
'''simple docstring'''
return set_verbosity(UpperCAmelCase )
def UpperCAmelCase ( )-> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = False
def UpperCAmelCase ( )-> None:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = True
# Configure the library root logger at the module level (singleton-like)
_configure_library_root_logger()
class snake_case :
'''simple docstring'''
def __init__( self : Tuple , *lowerCAmelCase_ : Union[str, Any] , **lowerCAmelCase_ : str ) -> Optional[int]: # pylint: disable=unused-argument
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = args[0] if args else None
def __iter__( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
return iter(self._iterator )
def __getattr__( self : Union[str, Any] , lowerCAmelCase_ : Union[str, Any] ) -> int:
"""simple docstring"""
def empty_fn(*lowerCAmelCase_ : List[Any] , **lowerCAmelCase_ : Union[str, Any] ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self : str ) -> List[str]:
"""simple docstring"""
return self
def __exit__( self : List[Any] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Dict ) -> Dict:
"""simple docstring"""
return
A_ = True
class snake_case :
'''simple docstring'''
def __call__( self : Union[str, Any] , *lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Union[str, Any]=False , **lowerCAmelCase_ : Tuple ) -> Tuple:
"""simple docstring"""
if _tqdm_active and not disable:
return tqdm_lib.tqdm(*lowerCAmelCase_ , **lowerCAmelCase_ )
else:
return EmptyTqdm(*lowerCAmelCase_ , **lowerCAmelCase_ )
def _lowercase ( self : int , *lowerCAmelCase_ : Union[str, Any] , **lowerCAmelCase_ : Optional[int] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*lowerCAmelCase_ , **lowerCAmelCase_ )
def _lowercase ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
A_ = _tqdm_cls()
def UpperCAmelCase ( )-> bool:
'''simple docstring'''
global _tqdm_active
return bool(_tqdm_active )
def UpperCAmelCase ( )-> List[str]:
'''simple docstring'''
global _tqdm_active
SCREAMING_SNAKE_CASE_ = True
def UpperCAmelCase ( )-> Optional[int]:
'''simple docstring'''
global _tqdm_active
SCREAMING_SNAKE_CASE_ = False
| 393
| 0
|
"""simple docstring"""
import dataclasses
import json
import warnings
from dataclasses import dataclass, field
from time import time
from typing import List
from ..utils import logging
_A = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE ( __UpperCAmelCase=None , __UpperCAmelCase=None ) -> List[str]:
return field(default_factory=lambda: default , metadata=__UpperCAmelCase )
@dataclass
class lowerCamelCase :
'''simple docstring'''
a = list_field(
default=[] , metadata={
"help": (
"Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version"
" of all available models"
)
} , )
a = list_field(
default=[8] , metadata={"help": "List of batch sizes for which memory and time performance will be evaluated"} )
a = list_field(
default=[8, 3_2, 1_2_8, 5_1_2] , metadata={"help": "List of sequence lengths for which memory and time performance will be evaluated"} , )
a = field(
default=__a , metadata={"help": "Whether to benchmark inference of model. Inference can be disabled via --no-inference."} , )
a = field(
default=__a , metadata={"help": "Whether to run on available cuda devices. Cuda can be disabled via --no-cuda."} , )
a = field(
default=__a , metadata={"help": "Whether to run on available tpu devices. TPU can be disabled via --no-tpu."} )
a = field(default=__a , metadata={"help": "Use FP16 to accelerate inference."} )
a = field(default=__a , metadata={"help": "Benchmark training of model"} )
a = field(default=__a , metadata={"help": "Verbose memory tracing"} )
a = field(
default=__a , metadata={"help": "Whether to perform speed measurements. Speed measurements can be disabled via --no-speed."} , )
a = field(
default=__a , metadata={
"help": "Whether to perform memory measurements. Memory measurements can be disabled via --no-memory"
} , )
a = field(default=__a , metadata={"help": "Trace memory line by line"} )
a = field(default=__a , metadata={"help": "Save result to a CSV file"} )
a = field(default=__a , metadata={"help": "Save all print statements in a log file"} )
a = field(default=__a , metadata={"help": "Whether to print environment information"} )
a = field(
default=__a , metadata={
"help": (
"Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use"
" multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled"
" for debugging / testing and on TPU."
)
} , )
a = field(
default=f"""inference_time_{round(time() )}.csv""" , metadata={"help": "CSV filename used if saving time results to csv."} , )
a = field(
default=f"""inference_memory_{round(time() )}.csv""" , metadata={"help": "CSV filename used if saving memory results to csv."} , )
a = field(
default=f"""train_time_{round(time() )}.csv""" , metadata={"help": "CSV filename used if saving time results to csv for training."} , )
a = field(
default=f"""train_memory_{round(time() )}.csv""" , metadata={"help": "CSV filename used if saving memory results to csv for training."} , )
a = field(
default=f"""env_info_{round(time() )}.csv""" , metadata={"help": "CSV filename used if saving environment information."} , )
a = field(
default=f"""log_{round(time() )}.csv""" , metadata={"help": "Log filename used if print statements are saved in log."} , )
a = field(default=3 , metadata={"help": "Times an experiment will be run."} )
a = field(
default=__a , metadata={
"help": (
"Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain"
" model weights."
)
} , )
def lowerCAmelCase_ ( self : Optional[Any] ) -> Any:
warnings.warn(
F"""The class {self.__class__} is deprecated. Hugging Face Benchmarking utils"""
" are deprecated in general and it is advised to use external Benchmarking libraries "
" to benchmark Transformer models." , A__ , )
def lowerCAmelCase_ ( self : int ) -> Tuple:
return json.dumps(dataclasses.asdict(self ) , indent=2 )
@property
def lowerCAmelCase_ ( self : Dict ) -> List[str]:
if len(self.models ) <= 0:
raise ValueError(
"Please make sure you provide at least one model name / model identifier, *e.g.* `--models"
" bert-base-cased` or `args.models = [\'bert-base-cased\']." )
return self.models
@property
def lowerCAmelCase_ ( self : Optional[Any] ) -> Optional[int]:
if not self.multi_process:
return False
elif self.is_tpu:
logger.info("Multiprocessing is currently not possible on TPU." )
return False
else:
return True
| 700
|
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( __UpperCAmelCase ) -> List[str]:
if not head:
return True
# split the list to two parts
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = head.next, head
while fast and fast.next:
SCREAMING_SNAKE_CASE__ = fast.next.next
SCREAMING_SNAKE_CASE__ = slow.next
SCREAMING_SNAKE_CASE__ = slow.next
SCREAMING_SNAKE_CASE__ = None # Don't forget here! But forget still works!
# reverse the second part
SCREAMING_SNAKE_CASE__ = None
while second:
SCREAMING_SNAKE_CASE__ = second.next
SCREAMING_SNAKE_CASE__ = node
SCREAMING_SNAKE_CASE__ = second
SCREAMING_SNAKE_CASE__ = nxt
# compare two parts
# second part has the same or one less node
while node:
if node.val != head.val:
return False
SCREAMING_SNAKE_CASE__ = node.next
SCREAMING_SNAKE_CASE__ = head.next
return True
def SCREAMING_SNAKE_CASE ( __UpperCAmelCase ) -> str:
if not head or not head.next:
return True
# 1. Get the midpoint (slow)
SCREAMING_SNAKE_CASE__ = SCREAMING_SNAKE_CASE__ = SCREAMING_SNAKE_CASE__ = head
while fast and fast.next:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = fast.next.next, slow.next
# 2. Push the second half into the stack
SCREAMING_SNAKE_CASE__ = [slow.val]
while slow.next:
SCREAMING_SNAKE_CASE__ = slow.next
stack.append(slow.val )
# 3. Comparison
while stack:
if stack.pop() != cur.val:
return False
SCREAMING_SNAKE_CASE__ = cur.next
return True
def SCREAMING_SNAKE_CASE ( __UpperCAmelCase ) -> str:
if not head or not head.next:
return True
SCREAMING_SNAKE_CASE__ = {}
SCREAMING_SNAKE_CASE__ = 0
while head:
if head.val in d:
d[head.val].append(__UpperCAmelCase )
else:
SCREAMING_SNAKE_CASE__ = [pos]
SCREAMING_SNAKE_CASE__ = head.next
pos += 1
SCREAMING_SNAKE_CASE__ = pos - 1
SCREAMING_SNAKE_CASE__ = 0
for v in d.values():
if len(__UpperCAmelCase ) % 2 != 0:
middle += 1
else:
SCREAMING_SNAKE_CASE__ = 0
for i in range(0 , len(__UpperCAmelCase ) ):
if v[i] + v[len(__UpperCAmelCase ) - 1 - step] != checksum:
return False
step += 1
if middle > 1:
return False
return True
| 538
| 0
|
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 21
|
'''simple docstring'''
from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class __SCREAMING_SNAKE_CASE ( yaml.SafeLoader ):
def __magic_name__ ( self : Any , __lowercase : str ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : str =[self.constructed_objects[key_node] for key_node, _ in node.value]
SCREAMING_SNAKE_CASE__ : Optional[int] =[tuple(__lowercase ) if isinstance(__lowercase , __lowercase ) else key for key in keys]
SCREAMING_SNAKE_CASE__ : Optional[int] =Counter(__lowercase )
SCREAMING_SNAKE_CASE__ : List[str] =[key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(F"Got duplicate yaml keys: {duplicate_keys}" )
def __magic_name__ ( self : Dict , __lowercase : Any , __lowercase : str=False ) -> int:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =super().construct_mapping(__lowercase , deep=__lowercase )
self._check_no_duplicates_on_constructed_node(__lowercase )
return mapping
def _a( UpperCamelCase__ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int =list(readme_content.splitlines() )
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
SCREAMING_SNAKE_CASE__ : List[str] =full_content[1:].index('''---''' ) + 1
SCREAMING_SNAKE_CASE__ : str ='''\n'''.join(full_content[1:sep_idx] )
return yamlblock, "\n".join(full_content[sep_idx + 1 :] )
return None, "\n".join(UpperCamelCase__ )
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
# class attributes
snake_case_ = {"""train_eval_index"""} # train-eval-index in the YAML metadata
@classmethod
def __magic_name__ ( cls : Optional[int] , __lowercase : Path ) -> "DatasetMetadata":
with open(__lowercase , encoding='''utf-8''' ) as readme_file:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] =_split_yaml_from_readme(readme_file.read() )
if yaml_string is not None:
return cls.from_yaml_string(__lowercase )
else:
return cls()
def __magic_name__ ( self : int , __lowercase : Path ) -> int:
if path.exists():
with open(__lowercase , encoding='''utf-8''' ) as readme_file:
SCREAMING_SNAKE_CASE__ : Optional[int] =readme_file.read()
else:
SCREAMING_SNAKE_CASE__ : List[str] =None
SCREAMING_SNAKE_CASE__ : Union[str, Any] =self._to_readme(__lowercase )
with open(__lowercase , '''w''' , encoding='''utf-8''' ) as readme_file:
readme_file.write(__lowercase )
def __magic_name__ ( self : int , __lowercase : Optional[str] = None ) -> str:
if readme_content is not None:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] =_split_yaml_from_readme(__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] ='''---\n''' + self.to_yaml_string() + '''---\n''' + content
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] ='''---\n''' + self.to_yaml_string() + '''---\n'''
return full_content
@classmethod
def __magic_name__ ( cls : Union[str, Any] , __lowercase : str ) -> "DatasetMetadata":
SCREAMING_SNAKE_CASE__ : Tuple =yaml.load(__lowercase , Loader=_NoDuplicateSafeLoader ) or {}
# Convert the YAML keys to DatasetMetadata fields
SCREAMING_SNAKE_CASE__ : Optional[Any] ={
(key.replace('''-''' , '''_''' ) if key.replace('''-''' , '''_''' ) in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**__lowercase )
def __magic_name__ ( self : Optional[int] ) -> str:
return yaml.safe_dump(
{
(key.replace('''_''' , '''-''' ) if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
} , sort_keys=__lowercase , allow_unicode=__lowercase , encoding='''utf-8''' , ).decode('''utf-8''' )
a_ = {
'image-classification': [],
'translation': [],
'image-segmentation': [],
'fill-mask': [],
'automatic-speech-recognition': [],
'token-classification': [],
'sentence-similarity': [],
'audio-classification': [],
'question-answering': [],
'summarization': [],
'zero-shot-classification': [],
'table-to-text': [],
'feature-extraction': [],
'other': [],
'multiple-choice': [],
'text-classification': [],
'text-to-image': [],
'text2text-generation': [],
'zero-shot-image-classification': [],
'tabular-classification': [],
'tabular-regression': [],
'image-to-image': [],
'tabular-to-text': [],
'unconditional-image-generation': [],
'text-retrieval': [],
'text-to-speech': [],
'object-detection': [],
'audio-to-audio': [],
'text-generation': [],
'conversational': [],
'table-question-answering': [],
'visual-question-answering': [],
'image-to-text': [],
'reinforcement-learning': [],
'voice-activity-detection': [],
'time-series-forecasting': [],
'document-question-answering': [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
a_ = ArgumentParser(usage='Validate the yaml metadata block of a README.md file.')
ap.add_argument('readme_filepath')
a_ = ap.parse_args()
a_ = Path(args.readme_filepath)
a_ = DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath)
| 296
| 0
|
from math import sqrt
def lowerCamelCase_ ( lowerCAmelCase: int )-> List[Any]:
_snake_case : Dict = 0
for i in range(1 , int(sqrt(snake_case_ ) + 1 ) ):
if n % i == 0 and i != sqrt(snake_case_ ):
total += i + n // i
elif i == sqrt(snake_case_ ):
total += i
return total - n
def lowerCamelCase_ ( lowerCAmelCase: Union[str, Any] = 1_00_00 )-> int:
_snake_case : Optional[Any] = sum(
i
for i in range(1 , snake_case_ )
if sum_of_divisors(sum_of_divisors(snake_case_ ) ) == i and sum_of_divisors(snake_case_ ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 710
|
def lowerCamelCase_ ( lowerCAmelCase: int )-> list:
_snake_case : List[Any] = int(lowerCAmelCase )
if n_element < 1:
_snake_case : int = ValueError('a should be a positive number' )
raise my_error
_snake_case : Union[str, Any] = [1]
_snake_case , _snake_case , _snake_case : Any = (0, 0, 0)
_snake_case : str = 1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
lowerCAmelCase_ = input("""Enter the last number (nth term) of the Hamming Number Series: """)
print("""Formula of Hamming Number Series => 2^i * 3^j * 5^k""")
lowerCAmelCase_ = hamming(int(n))
print("""-----------------------------------------------------""")
print(F"""The list with nth numbers is: {hamming_numbers}""")
print("""-----------------------------------------------------""")
| 669
| 0
|
from __future__ import annotations
import csv
import requests
from bsa import BeautifulSoup
def __a ( lowerCAmelCase_ : str = "" ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_= url or 'https://www.imdb.com/chart/top/?ref_=nv_mv_250'
UpperCAmelCase_= BeautifulSoup(requests.get(__snake_case ).text ,"""html.parser""" )
UpperCAmelCase_= soup.find_all("""td""" ,attrs="""titleColumn""" )
UpperCAmelCase_= soup.find_all("""td""" ,class_="""ratingColumn imdbRating""" )
return {
title.a.text: float(rating.strong.text )
for title, rating in zip(__snake_case ,__snake_case )
}
def __a ( lowerCAmelCase_ : str = "IMDb_Top_250_Movies.csv" ) -> Any:
'''simple docstring'''
UpperCAmelCase_= get_imdb_top_aaa_movies()
with open(__snake_case ,"""w""" ,newline="""""" ) as out_file:
UpperCAmelCase_= csv.writer(__snake_case )
writer.writerow(["""Movie title""", """IMDb rating"""] )
for title, rating in movies.items():
writer.writerow([title, rating] )
if __name__ == "__main__":
write_movies()
| 593
|
import time
import warnings
from abc import ABC
from copy import deepcopy
from typing import Optional
import torch
from ..utils import add_start_docstrings, logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = R'\n Args:\n input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`):\n Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax\n or scores for each vocabulary token after SoftMax.\n kwargs (`Dict[str, Any]`, *optional*):\n Additional stopping criteria specific kwargs.\n\n Return:\n `bool`. `False` indicates we should continue, `True` indicates we should stop.\n\n'
class lowerCamelCase (_snake_case ):
'''simple docstring'''
@add_start_docstrings(_UpperCamelCase )
def __call__( self , _UpperCamelCase , _UpperCamelCase , **_UpperCamelCase ) -> bool:
raise NotImplementedError('StoppingCriteria needs to be subclassed' )
class lowerCamelCase (_snake_case ):
'''simple docstring'''
def __init__( self , _UpperCamelCase , _UpperCamelCase = None ) -> int:
UpperCAmelCase_ : Union[str, Any] = max_length
UpperCAmelCase_ : List[Any] = max_position_embeddings
@add_start_docstrings(_UpperCamelCase )
def __call__( self , _UpperCamelCase , _UpperCamelCase , **_UpperCamelCase ) -> bool:
UpperCAmelCase_ : Union[str, Any] = input_ids.shape[-1]
UpperCAmelCase_ : List[Any] = cur_len >= self.max_length
if self.max_position_embeddings is not None and not is_done and cur_len >= self.max_position_embeddings:
logger.warning_once(
'This is a friendly reminder - the current text generation call will exceed the model\'s predefined '
f"maximum length ({self.max_position_embeddings}). Depending on the model, you may observe "
'exceptions, performance degradation, or nothing at all.' )
return is_done
class lowerCamelCase (_snake_case ):
'''simple docstring'''
def __init__( self , _UpperCamelCase , _UpperCamelCase ) -> Optional[Any]:
warnings.warn(
'The class `MaxNewTokensCriteria` is deprecated. '
f"Please use `MaxLengthCriteria(max_length={start_length + max_new_tokens})` "
'with `max_length = start_length + max_new_tokens` instead.' , _UpperCamelCase , )
UpperCAmelCase_ : Optional[int] = start_length
UpperCAmelCase_ : Union[str, Any] = max_new_tokens
UpperCAmelCase_ : List[Any] = start_length + max_new_tokens
@add_start_docstrings(_UpperCamelCase )
def __call__( self , _UpperCamelCase , _UpperCamelCase , **_UpperCamelCase ) -> bool:
return input_ids.shape[-1] >= self.max_length
class lowerCamelCase (_snake_case ):
'''simple docstring'''
def __init__( self , _UpperCamelCase , _UpperCamelCase = None ) -> Optional[Any]:
UpperCAmelCase_ : List[str] = max_time
UpperCAmelCase_ : str = time.time() if initial_timestamp is None else initial_timestamp
@add_start_docstrings(_UpperCamelCase )
def __call__( self , _UpperCamelCase , _UpperCamelCase , **_UpperCamelCase ) -> bool:
return time.time() - self.initial_timestamp > self.max_time
class lowerCamelCase (_snake_case ):
'''simple docstring'''
@add_start_docstrings(_UpperCamelCase )
def __call__( self , _UpperCamelCase , _UpperCamelCase , **_UpperCamelCase ) -> bool:
return any(criteria(_UpperCamelCase , _UpperCamelCase ) for criteria in self )
@property
def __UpperCAmelCase ( self ) -> Optional[int]:
for stopping_criterium in self:
if isinstance(_UpperCamelCase , _UpperCamelCase ):
return stopping_criterium.max_length
elif isinstance(_UpperCamelCase , _UpperCamelCase ):
return stopping_criterium.max_length
return None
def lowercase__ ( __snake_case : StoppingCriteriaList , __snake_case : int ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = stopping_criteria.max_length
UpperCAmelCase_ : Optional[Any] = deepcopy(__snake_case )
if stopping_max_length is not None and stopping_max_length != max_length:
warnings.warn('You set different `max_length` for stopping criteria and `max_length` parameter' , __snake_case )
elif stopping_max_length is None:
new_stopping_criteria.append(MaxLengthCriteria(max_length=__snake_case ) )
return new_stopping_criteria
| 406
| 0
|
'''simple docstring'''
import json
import os
from datetime import date
from pathlib import Path
from tabulate import DataRow, TableFormat, tabulate
SCREAMING_SNAKE_CASE__ = TableFormat(
lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow("", "|", "|"),
datarow=DataRow("", "|", "|"),
padding=1,
with_header_hide=None,
)
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = {"type": "section", "text": {"type": "plain_text", "text": "No failed tests! 🤗", "emoji": True}}
SCREAMING_SNAKE_CASE__ = [
{
"type": "header",
"text": {
"type": "plain_text",
"text": f'''🤗 Accelerate nightly {os.environ.get('TEST_TYPE', '')} test results''',
"emoji": True,
},
}
]
SCREAMING_SNAKE_CASE__ = 0
for log in Path().glob("*.log"):
SCREAMING_SNAKE_CASE__ = 0
with open(log, "r") as f:
for line in f:
SCREAMING_SNAKE_CASE__ = json.loads(line)
if line.get("nodeid", "") != "":
SCREAMING_SNAKE_CASE__ = line["nodeid"]
if line.get("duration", None) is not None:
SCREAMING_SNAKE_CASE__ = f'''{line['duration']:.4f}'''
if line.get("outcome", "") == "failed":
section_num_failed += 1
failed.append([test, duration, log.name.split("_")[0]])
total_num_failed += 1
group_info.append([str(log), section_num_failed, failed])
SCREAMING_SNAKE_CASE__ = []
log.unlink()
SCREAMING_SNAKE_CASE__ = ""
SCREAMING_SNAKE_CASE__ = []
if total_num_failed > 0:
for name, num_failed, failed_tests in group_info:
if num_failed > 0:
if num_failed == 1:
message += f"*{name[1:]}: {num_failed} failed test*\n"
else:
message += f"*{name[1:]}: {num_failed} failed tests*\n"
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = {}
for test in failed_tests:
SCREAMING_SNAKE_CASE__ = test[0].split("::")
SCREAMING_SNAKE_CASE__ = data[0].split("/")[-1]
if data[0] not in filesafailed:
SCREAMING_SNAKE_CASE__ = [data[1:]]
else:
filesafailed[data[0]] += [data[1:]]
failed_table.append(data)
SCREAMING_SNAKE_CASE__ = [test[0] for test in failed_table]
SCREAMING_SNAKE_CASE__ = list(set(files))
# Count number of instances in failed_tests
SCREAMING_SNAKE_CASE__ = []
for file in individual_files:
table.append([file, len(filesafailed[file])])
SCREAMING_SNAKE_CASE__ = tabulate(
table,
headers=["Test Location", "Num Failed"],
tablefmt=hf_table_format,
stralign="right",
)
message += f"\n```\n{failed_table}\n```"
all_filesafailed.append(filesafailed)
if len(message) > 3000:
SCREAMING_SNAKE_CASE__ = "Too many failed tests, please see the full report in the Action results."
SCREAMING_SNAKE_CASE__ = len(err) + 10
SCREAMING_SNAKE_CASE__ = message[: 3000 - offset] + f'''\n...\n```\n{err}'''
print(f'''### {message}''')
else:
SCREAMING_SNAKE_CASE__ = "No failed tests! 🤗"
print(f'''## {message}''')
payload.append(no_error_payload)
if os.environ.get("TEST_TYPE", "") != "":
from slack_sdk import WebClient
SCREAMING_SNAKE_CASE__ = WebClient(token=os.environ["SLACK_API_TOKEN"])
if message != "No failed tests! 🤗":
SCREAMING_SNAKE_CASE__ = {
"type": "section",
"text": {
"type": "mrkdwn",
"text": message,
},
}
payload.append(md_report)
SCREAMING_SNAKE_CASE__ = {
"type": "section",
"text": {
"type": "mrkdwn",
"text": "*For more details:*",
},
"accessory": {
"type": "button",
"text": {
"type": "plain_text",
"text": "Check Action results",
"emoji": True,
},
"url": f'''https://github.com/{os.environ['GITHUB_REPOSITORY']}/actions/runs/{os.environ['GITHUB_RUN_ID']}''',
},
}
payload.append(action_button)
SCREAMING_SNAKE_CASE__ = {
"type": "context",
"elements": [
{
"type": "plain_text",
"text": f'''Nightly {os.environ.get('TEST_TYPE')} test results for {date.today()}''',
}
],
}
payload.append(date_report)
SCREAMING_SNAKE_CASE__ = client.chat_postMessage(channel="#accelerate-ci-daily", text=message, blocks=payload)
SCREAMING_SNAKE_CASE__ = response.data["ts"]
for failed_file in all_filesafailed:
for test_location, test_failures in failed_file.items():
# Keep only the first instance of the test name
SCREAMING_SNAKE_CASE__ = ""
for i, row in enumerate(test_failures):
if row[0] != test_class:
SCREAMING_SNAKE_CASE__ = row[0]
else:
SCREAMING_SNAKE_CASE__ = ""
SCREAMING_SNAKE_CASE__ = {
"type": "section",
"text": {
"type": "mrkdwn",
"text": f'''Test location: {test_location}\n```\n{tabulate(test_failures, headers=['Class', 'Test'], tablefmt=hf_table_format, stralign='right')}\n```''',
},
}
client.chat_postMessage(
channel="#accelerate-ci-daily",
thread_ts=ts,
blocks=[payload],
)
| 708
|
'''simple docstring'''
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all feature extractors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...feature_extraction_utils import FeatureExtractionMixin
from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = OrderedDict(
[
("audio-spectrogram-transformer", "ASTFeatureExtractor"),
("beit", "BeitFeatureExtractor"),
("chinese_clip", "ChineseCLIPFeatureExtractor"),
("clap", "ClapFeatureExtractor"),
("clip", "CLIPFeatureExtractor"),
("clipseg", "ViTFeatureExtractor"),
("conditional_detr", "ConditionalDetrFeatureExtractor"),
("convnext", "ConvNextFeatureExtractor"),
("cvt", "ConvNextFeatureExtractor"),
("data2vec-audio", "Wav2Vec2FeatureExtractor"),
("data2vec-vision", "BeitFeatureExtractor"),
("deformable_detr", "DeformableDetrFeatureExtractor"),
("deit", "DeiTFeatureExtractor"),
("detr", "DetrFeatureExtractor"),
("dinat", "ViTFeatureExtractor"),
("donut-swin", "DonutFeatureExtractor"),
("dpt", "DPTFeatureExtractor"),
("encodec", "EncodecFeatureExtractor"),
("flava", "FlavaFeatureExtractor"),
("glpn", "GLPNFeatureExtractor"),
("groupvit", "CLIPFeatureExtractor"),
("hubert", "Wav2Vec2FeatureExtractor"),
("imagegpt", "ImageGPTFeatureExtractor"),
("layoutlmv2", "LayoutLMv2FeatureExtractor"),
("layoutlmv3", "LayoutLMv3FeatureExtractor"),
("levit", "LevitFeatureExtractor"),
("maskformer", "MaskFormerFeatureExtractor"),
("mctct", "MCTCTFeatureExtractor"),
("mobilenet_v1", "MobileNetV1FeatureExtractor"),
("mobilenet_v2", "MobileNetV2FeatureExtractor"),
("mobilevit", "MobileViTFeatureExtractor"),
("nat", "ViTFeatureExtractor"),
("owlvit", "OwlViTFeatureExtractor"),
("perceiver", "PerceiverFeatureExtractor"),
("poolformer", "PoolFormerFeatureExtractor"),
("regnet", "ConvNextFeatureExtractor"),
("resnet", "ConvNextFeatureExtractor"),
("segformer", "SegformerFeatureExtractor"),
("sew", "Wav2Vec2FeatureExtractor"),
("sew-d", "Wav2Vec2FeatureExtractor"),
("speech_to_text", "Speech2TextFeatureExtractor"),
("speecht5", "SpeechT5FeatureExtractor"),
("swiftformer", "ViTFeatureExtractor"),
("swin", "ViTFeatureExtractor"),
("swinv2", "ViTFeatureExtractor"),
("table-transformer", "DetrFeatureExtractor"),
("timesformer", "VideoMAEFeatureExtractor"),
("tvlt", "TvltFeatureExtractor"),
("unispeech", "Wav2Vec2FeatureExtractor"),
("unispeech-sat", "Wav2Vec2FeatureExtractor"),
("van", "ConvNextFeatureExtractor"),
("videomae", "VideoMAEFeatureExtractor"),
("vilt", "ViltFeatureExtractor"),
("vit", "ViTFeatureExtractor"),
("vit_mae", "ViTFeatureExtractor"),
("vit_msn", "ViTFeatureExtractor"),
("wav2vec2", "Wav2Vec2FeatureExtractor"),
("wav2vec2-conformer", "Wav2Vec2FeatureExtractor"),
("wavlm", "Wav2Vec2FeatureExtractor"),
("whisper", "WhisperFeatureExtractor"),
("xclip", "CLIPFeatureExtractor"),
("yolos", "YolosFeatureExtractor"),
]
)
SCREAMING_SNAKE_CASE__ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES)
def lowerCamelCase ( _snake_case : str ):
'''simple docstring'''
for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items():
if class_name in extractors:
lowercase__ = model_type_to_module_name(_snake_case )
lowercase__ = importlib.import_module(f'''.{module_name}''' ,"transformers.models" )
try:
return getattr(_snake_case ,_snake_case )
except AttributeError:
continue
for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items():
if getattr(_snake_case ,"__name__" ,_snake_case ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
lowercase__ = importlib.import_module("transformers" )
if hasattr(_snake_case ,_snake_case ):
return getattr(_snake_case ,_snake_case )
return None
def lowerCamelCase ( _snake_case : Union[str, os.PathLike] ,_snake_case : Optional[Union[str, os.PathLike]] = None ,_snake_case : bool = False ,_snake_case : bool = False ,_snake_case : Optional[Dict[str, str]] = None ,_snake_case : Optional[Union[bool, str]] = None ,_snake_case : Optional[str] = None ,_snake_case : bool = False ,**_snake_case : Union[str, Any] ,):
'''simple docstring'''
lowercase__ = get_file_from_repo(
_snake_case ,_snake_case ,cache_dir=_snake_case ,force_download=_snake_case ,resume_download=_snake_case ,proxies=_snake_case ,use_auth_token=_snake_case ,revision=_snake_case ,local_files_only=_snake_case ,)
if resolved_config_file is None:
logger.info(
"Could not locate the feature extractor configuration file, will try to use the model config instead." )
return {}
with open(_snake_case ,encoding="utf-8" ) as reader:
return json.load(_snake_case )
class snake_case :
def __init__( self ) -> List[str]:
raise EnvironmentError(
"AutoFeatureExtractor is designed to be instantiated "
"using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method." )
@classmethod
@replace_list_option_in_docstrings(UpperCAmelCase_ )
def _a ( cls ,UpperCAmelCase_ ,**UpperCAmelCase_ ) -> Tuple:
lowercase__ = kwargs.pop("config" ,UpperCAmelCase_ )
lowercase__ = kwargs.pop("trust_remote_code" ,UpperCAmelCase_ )
lowercase__ = True
lowercase__ , lowercase__ = FeatureExtractionMixin.get_feature_extractor_dict(UpperCAmelCase_ ,**UpperCAmelCase_ )
lowercase__ = config_dict.get("feature_extractor_type" ,UpperCAmelCase_ )
lowercase__ = None
if "AutoFeatureExtractor" in config_dict.get("auto_map" ,{} ):
lowercase__ = config_dict["auto_map"]["AutoFeatureExtractor"]
# If we don't find the feature extractor class in the feature extractor config, let's try the model config.
if feature_extractor_class is None and feature_extractor_auto_map is None:
if not isinstance(UpperCAmelCase_ ,UpperCAmelCase_ ):
lowercase__ = AutoConfig.from_pretrained(UpperCAmelCase_ ,**UpperCAmelCase_ )
# It could be in `config.feature_extractor_type``
lowercase__ = getattr(UpperCAmelCase_ ,"feature_extractor_type" ,UpperCAmelCase_ )
if hasattr(UpperCAmelCase_ ,"auto_map" ) and "AutoFeatureExtractor" in config.auto_map:
lowercase__ = config.auto_map["AutoFeatureExtractor"]
if feature_extractor_class is not None:
lowercase__ = feature_extractor_class_from_name(UpperCAmelCase_ )
lowercase__ = feature_extractor_auto_map is not None
lowercase__ = feature_extractor_class is not None or type(UpperCAmelCase_ ) in FEATURE_EXTRACTOR_MAPPING
lowercase__ = resolve_trust_remote_code(
UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ )
if has_remote_code and trust_remote_code:
lowercase__ = get_class_from_dynamic_module(
UpperCAmelCase_ ,UpperCAmelCase_ ,**UpperCAmelCase_ )
lowercase__ = kwargs.pop("code_revision" ,UpperCAmelCase_ )
if os.path.isdir(UpperCAmelCase_ ):
feature_extractor_class.register_for_auto_class()
return feature_extractor_class.from_dict(UpperCAmelCase_ ,**UpperCAmelCase_ )
elif feature_extractor_class is not None:
return feature_extractor_class.from_dict(UpperCAmelCase_ ,**UpperCAmelCase_ )
# Last try: we use the FEATURE_EXTRACTOR_MAPPING.
elif type(UpperCAmelCase_ ) in FEATURE_EXTRACTOR_MAPPING:
lowercase__ = FEATURE_EXTRACTOR_MAPPING[type(UpperCAmelCase_ )]
return feature_extractor_class.from_dict(UpperCAmelCase_ ,**UpperCAmelCase_ )
raise ValueError(
F'''Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a '''
F'''`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following '''
F'''`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}''' )
@staticmethod
def _a ( UpperCAmelCase_ ,UpperCAmelCase_ ) -> Any:
FEATURE_EXTRACTOR_MAPPING.register(UpperCAmelCase_ ,UpperCAmelCase_ )
| 539
| 0
|
import os
from collections import deque
import torch
from torch.utils.data import Dataset
class _A ( __UpperCamelCase ):
def __init__(self , SCREAMING_SNAKE_CASE_="" , SCREAMING_SNAKE_CASE_="train" ) -> Optional[int]:
'''simple docstring'''
assert os.path.isdir(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = []
UpperCamelCase__ = os.listdir(SCREAMING_SNAKE_CASE_ )
for story_filename in story_filenames_list:
if "summary" in story_filename:
continue
UpperCamelCase__ = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if not os.path.isfile(SCREAMING_SNAKE_CASE_ ):
continue
self.documents.append(SCREAMING_SNAKE_CASE_ )
def __len__(self ) -> List[Any]:
'''simple docstring'''
return len(self.documents )
def __getitem__(self , SCREAMING_SNAKE_CASE_ ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ = self.documents[idx]
UpperCamelCase__ = document_path.split('''/''' )[-1]
with open(SCREAMING_SNAKE_CASE_ , encoding='''utf-8''' ) as source:
UpperCamelCase__ = source.read()
UpperCamelCase__ , UpperCamelCase__ = process_story(SCREAMING_SNAKE_CASE_ )
return document_name, story_lines, summary_lines
def __UpperCamelCase ( A ):
UpperCamelCase__ = list(filter(lambda A : len(A ) != 0 , [line.strip() for line in raw_story.split('''\n''' )] ) )
# for some unknown reason some lines miss a period, add it
UpperCamelCase__ = [_add_missing_period(A ) for line in nonempty_lines]
# gather article lines
UpperCamelCase__ = []
UpperCamelCase__ = deque(A )
while True:
try:
UpperCamelCase__ = lines.popleft()
if element.startswith('''@highlight''' ):
break
story_lines.append(A )
except IndexError:
# if "@highlight" is absent from the file we pop
# all elements until there is None, raising an exception.
return story_lines, []
# gather summary lines
UpperCamelCase__ = list(filter(lambda A : not t.startswith('''@highlight''' ) , A ) )
return story_lines, summary_lines
def __UpperCamelCase ( A ):
UpperCamelCase__ = ['''.''', '''!''', '''?''', '''...''', '''\'''', '''`''', '''"''', '''\u2019''', '''\u2019''', ''')''']
if line.startswith('''@highlight''' ):
return line
if line[-1] in END_TOKENS:
return line
return line + "."
def __UpperCamelCase ( A , A , A ):
if len(A ) > block_size:
return sequence[:block_size]
else:
sequence.extend([pad_token_id] * (block_size - len(A )) )
return sequence
def __UpperCamelCase ( A , A ):
UpperCamelCase__ = torch.ones_like(A )
UpperCamelCase__ = sequence == pad_token_id
UpperCamelCase__ = 0
return mask
def __UpperCamelCase ( A , A , A ):
UpperCamelCase__ = [tokenizer.encode(A ) for line in story_lines]
UpperCamelCase__ = [token for sentence in story_lines_token_ids for token in sentence]
UpperCamelCase__ = [tokenizer.encode(A ) for line in summary_lines]
UpperCamelCase__ = [token for sentence in summary_lines_token_ids for token in sentence]
return story_token_ids, summary_token_ids
def __UpperCamelCase ( A , A ):
UpperCamelCase__ = []
for sequence in batch:
UpperCamelCase__ = -1
UpperCamelCase__ = []
for s in sequence:
if s == separator_token_id:
sentence_num += 1
embeddings.append(sentence_num % 2 )
batch_embeddings.append(A )
return torch.tensor(A )
| 415
|
class _A ( __UpperCamelCase ):
pass
class _A ( __UpperCamelCase ):
pass
class _A :
def __init__(self ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ = [
[],
[],
[],
]
def _a (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> None:
'''simple docstring'''
try:
if len(self.queues[priority] ) >= 100:
raise OverflowError('''Maximum queue size is 100''' )
self.queues[priority].append(SCREAMING_SNAKE_CASE_ )
except IndexError:
raise ValueError('''Valid priorities are 0, 1, and 2''' )
def _a (self ) -> int:
'''simple docstring'''
for queue in self.queues:
if queue:
return queue.pop(0 )
raise UnderFlowError('''All queues are empty''' )
def __str__(self ) -> str:
'''simple docstring'''
return "\n".join(F"Priority {i}: {q}" for i, q in enumerate(self.queues ) )
class _A :
def __init__(self ) -> str:
'''simple docstring'''
UpperCamelCase__ = []
def _a (self , SCREAMING_SNAKE_CASE_ ) -> None:
'''simple docstring'''
if len(self.queue ) == 100:
raise OverFlowError('''Maximum queue size is 100''' )
self.queue.append(SCREAMING_SNAKE_CASE_ )
def _a (self ) -> int:
'''simple docstring'''
if not self.queue:
raise UnderFlowError('''The queue is empty''' )
else:
UpperCamelCase__ = min(self.queue )
self.queue.remove(SCREAMING_SNAKE_CASE_ )
return data
def __str__(self ) -> str:
'''simple docstring'''
return str(self.queue )
def __UpperCamelCase ( ):
UpperCamelCase__ = FixedPriorityQueue()
fpq.enqueue(0 , 10 )
fpq.enqueue(1 , 70 )
fpq.enqueue(0 , 100 )
fpq.enqueue(2 , 1 )
fpq.enqueue(2 , 5 )
fpq.enqueue(1 , 7 )
fpq.enqueue(2 , 4 )
fpq.enqueue(1 , 64 )
fpq.enqueue(0 , 128 )
print(A )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(A )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
def __UpperCamelCase ( ):
UpperCamelCase__ = ElementPriorityQueue()
epq.enqueue(10 )
epq.enqueue(70 )
epq.enqueue(100 )
epq.enqueue(1 )
epq.enqueue(5 )
epq.enqueue(7 )
epq.enqueue(4 )
epq.enqueue(64 )
epq.enqueue(128 )
print(A )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(A )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
if __name__ == "__main__":
fixed_priority_queue()
element_priority_queue()
| 415
| 1
|
from collections import deque
from .hash_table import HashTable
class snake_case_ ( a_ ):
def __init__( self , *a_ , **a_ ):
super().__init__(*a_ , **a_ )
def snake_case_ ( self , a_ , a_ ):
a_ : Union[str, Any] = deque([] ) if self.values[key] is None else self.values[key]
self.values[key].appendleft(a_ )
a_ : List[Any] = self.values[key]
def snake_case_ ( self ):
return (
sum(self.charge_factor - len(a_ ) for slot in self.values )
/ self.size_table
* self.charge_factor
)
def snake_case_ ( self , a_ , a_=None ):
if not (
len(self.values[key] ) == self.charge_factor and self.values.count(a_ ) == 0
):
return key
return super()._collision_resolution(a_ , a_ )
| 721
|
"""simple docstring"""
import os
import pickle
import unittest
from transformers import AutoTokenizer
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.models.bert_japanese.tokenization_bert_japanese import (
VOCAB_FILES_NAMES,
BertJapaneseTokenizer,
CharacterTokenizer,
JumanppTokenizer,
MecabTokenizer,
SudachiTokenizer,
WordpieceTokenizer,
)
from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi
from ...test_tokenization_common import TokenizerTesterMixin
@custom_tokenizers
class snake_case_ ( a_ ,unittest.TestCase ):
__lowerCAmelCase = BertJapaneseTokenizer
__lowerCAmelCase = False
__lowerCAmelCase = True
def snake_case_ ( self ):
super().setUp()
a_ : str = [
"[UNK]",
"[CLS]",
"[SEP]",
"こんにちは",
"こん",
"にちは",
"ばんは",
"##こん",
"##にちは",
"##ばんは",
"世界",
"##世界",
"、",
"##、",
"。",
"##。",
]
a_ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def snake_case_ ( self , a_ ):
a_ : Dict = "こんにちは、世界。 \nこんばんは、世界。"
a_ : int = "こんにちは 、 世界 。 こんばんは 、 世界 。"
return input_text, output_text
def snake_case_ ( self , a_ ):
a_ , a_ : List[str] = self.get_input_output_texts(a_ )
a_ : Tuple = tokenizer.encode(a_ , add_special_tokens=a_ )
a_ : Dict = tokenizer.decode(a_ , clean_up_tokenization_spaces=a_ )
return text, ids
def snake_case_ ( self ):
pass # TODO add if relevant
def snake_case_ ( self ):
pass # TODO add if relevant
def snake_case_ ( self ):
pass # TODO add if relevant
def snake_case_ ( self ):
a_ : int = self.tokenizer_class(self.vocab_file )
a_ : List[Any] = tokenizer.tokenize("こんにちは、世界。\nこんばんは、世界。" )
self.assertListEqual(a_ , ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(a_ ) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4] )
def snake_case_ ( self ):
a_ : List[str] = self.tokenizer_class(self.vocab_file , word_tokenizer_type="mecab" )
self.assertIsNotNone(a_ )
a_ : Union[str, Any] = "こんにちは、世界。\nこんばんは、世界。"
a_ : Union[str, Any] = tokenizer.tokenize(a_ )
self.assertListEqual(a_ , ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(a_ ) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4] )
a_ : Any = os.path.join(self.tmpdirname , "tokenizer.bin" )
with open(a_ , "wb" ) as handle:
pickle.dump(a_ , a_ )
with open(a_ , "rb" ) as handle:
a_ : Optional[Any] = pickle.load(a_ )
a_ : int = tokenizer_new.tokenize(a_ )
self.assertListEqual(a_ , a_ )
def snake_case_ ( self ):
a_ : Tuple = MecabTokenizer(mecab_dic="ipadic" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップルストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
def snake_case_ ( self ):
try:
a_ : Dict = MecabTokenizer(mecab_dic="unidic_lite" )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
def snake_case_ ( self ):
try:
a_ : List[Any] = MecabTokenizer(mecab_dic="unidic" )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
def snake_case_ ( self ):
a_ : List[str] = MecabTokenizer(do_lower_case=a_ , mecab_dic="ipadic" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップルストア", "で", "iphone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
def snake_case_ ( self ):
try:
a_ : int = MecabTokenizer(
do_lower_case=a_ , normalize_text=a_ , mecab_option="-d /usr/local/lib/mecab/dic/jumandic" )
except RuntimeError:
# if dict doesn't exist in the system, previous code raises this error.
return
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップルストア", "で", "iPhone", "8", "が", "発売", "さ", "れた", "\u3000", "。"] , )
def snake_case_ ( self ):
a_ : List[Any] = MecabTokenizer(normalize_text=a_ , mecab_dic="ipadic" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップルストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", " ", "。"] , )
@require_sudachi
def snake_case_ ( self ):
a_ : int = self.tokenizer_class(self.vocab_file , word_tokenizer_type="sudachi" )
self.assertIsNotNone(a_ )
a_ : Optional[Any] = "こんにちは、世界。\nこんばんは、世界。"
a_ : str = tokenizer.tokenize(a_ )
self.assertListEqual(a_ , ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(a_ ) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4] )
a_ : Dict = os.path.join(self.tmpdirname , "tokenizer.bin" )
with open(a_ , "wb" ) as handle:
pickle.dump(a_ , a_ )
with open(a_ , "rb" ) as handle:
a_ : Optional[Any] = pickle.load(a_ )
a_ : int = tokenizer_new.tokenize(a_ )
self.assertListEqual(a_ , a_ )
@require_sudachi
def snake_case_ ( self ):
a_ : int = SudachiTokenizer(sudachi_dict_type="core" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , [" ", "\t", "アップル", "ストア", "で", "iPhone", "8", " ", "が", " ", " ", "\n ", "発売", "さ", "れ", "た", " ", "。", " ", " "] , )
@require_sudachi
def snake_case_ ( self ):
a_ : Any = SudachiTokenizer(sudachi_dict_type="core" , sudachi_split_mode="A" )
self.assertListEqual(tokenizer.tokenize("外国人参政権" ) , ["外国", "人", "参政", "権"] )
@require_sudachi
def snake_case_ ( self ):
a_ : Union[str, Any] = SudachiTokenizer(sudachi_dict_type="core" , sudachi_split_mode="B" )
self.assertListEqual(tokenizer.tokenize("外国人参政権" ) , ["外国人", "参政権"] )
@require_sudachi
def snake_case_ ( self ):
a_ : int = SudachiTokenizer(sudachi_dict_type="core" , sudachi_split_mode="C" )
self.assertListEqual(tokenizer.tokenize("外国人参政権" ) , ["外国人参政権"] )
@require_sudachi
def snake_case_ ( self ):
a_ : List[str] = SudachiTokenizer(do_lower_case=a_ , sudachi_dict_type="core" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , [" ", "\t", "アップル", "ストア", "で", "iphone", "8", " ", "が", " ", " ", "\n ", "発売", "さ", "れ", "た", " ", "。", " ", " "] , )
@require_sudachi
def snake_case_ ( self ):
a_ : str = SudachiTokenizer(normalize_text=a_ , sudachi_dict_type="core" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , [" ", "\t", "アップル", "ストア", "で", "iPhone", "8", " ", "が", " ", " ", "\n ", "発売", "さ", "れ", "た", "\u3000", "。", " ", " "] , )
@require_sudachi
def snake_case_ ( self ):
a_ : Optional[int] = SudachiTokenizer(trim_whitespace=a_ , sudachi_dict_type="core" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
@require_jumanpp
def snake_case_ ( self ):
a_ : Union[str, Any] = self.tokenizer_class(self.vocab_file , word_tokenizer_type="jumanpp" )
self.assertIsNotNone(a_ )
a_ : Optional[int] = "こんにちは、世界。\nこんばんは、世界。"
a_ : Any = tokenizer.tokenize(a_ )
self.assertListEqual(a_ , ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(a_ ) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4] )
a_ : int = os.path.join(self.tmpdirname , "tokenizer.bin" )
with open(a_ , "wb" ) as handle:
pickle.dump(a_ , a_ )
with open(a_ , "rb" ) as handle:
a_ : Tuple = pickle.load(a_ )
a_ : Union[str, Any] = tokenizer_new.tokenize(a_ )
self.assertListEqual(a_ , a_ )
@require_jumanpp
def snake_case_ ( self ):
a_ : Any = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "\u3000", "が", "\u3000", "\u3000", "\u3000", "発売", "さ", "れた", "\u3000", "。"] , )
@require_jumanpp
def snake_case_ ( self ):
a_ : Optional[Any] = JumanppTokenizer(do_lower_case=a_ )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iphone", "8", "\u3000", "が", "\u3000", "\u3000", "\u3000", "発売", "さ", "れた", "\u3000", "。"] , )
@require_jumanpp
def snake_case_ ( self ):
a_ : int = JumanppTokenizer(normalize_text=a_ )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["ア", "ッ", "フ", "゚", "ル", "ストア", "で", "iPhone", "8", "\u3000", "が", "\u3000", "\u3000", "\u3000", "発売", "さ", "れた", "\u3000", "。"] , )
@require_jumanpp
def snake_case_ ( self ):
a_ : Optional[Any] = JumanppTokenizer(trim_whitespace=a_ )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れた", "。"] , )
@require_jumanpp
def snake_case_ ( self ):
a_ : Any = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize("ありがとうございますm(_ _)m見つけるのが大変です。" ) , ["ありがとう", "ございます", "m(_ _)m", "見つける", "の", "が", "大変です", "。"] , )
def snake_case_ ( self ):
a_ : str = ["[UNK]", "[CLS]", "[SEP]", "こんにちは", "こん", "にちは", "ばんは", "##こん", "##にちは", "##ばんは"]
a_ : int = {}
for i, token in enumerate(a_ ):
a_ : Optional[int] = i
a_ : Optional[int] = WordpieceTokenizer(vocab=a_ , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("こんにちは" ) , ["こんにちは"] )
self.assertListEqual(tokenizer.tokenize("こんばんは" ) , ["こん", "##ばんは"] )
self.assertListEqual(tokenizer.tokenize("こんばんは こんばんにちは こんにちは" ) , ["こん", "##ばんは", "[UNK]", "こんにちは"] )
def snake_case_ ( self ):
a_ : Union[str, Any] = BertJapaneseTokenizer.from_pretrained("nlp-waseda/roberta-base-japanese-with-auto-jumanpp" )
a_ : Any = tokenizer.subword_tokenizer
a_ : Union[str, Any] = subword_tokenizer.tokenize("国境 の 長い トンネル を 抜ける と 雪国 であった 。" )
self.assertListEqual(a_ , ["▁国境", "▁の", "▁長い", "▁トンネル", "▁を", "▁抜ける", "▁と", "▁雪", "国", "▁であった", "▁。"] )
a_ : int = subword_tokenizer.tokenize("こんばんは こんばん にち は こんにちは" )
self.assertListEqual(a_ , ["▁こん", "ばん", "は", "▁こん", "ばん", "▁に", "ち", "▁は", "▁こんにちは"] )
def snake_case_ ( self ):
a_ : List[str] = self.tokenizer_class.from_pretrained("cl-tohoku/bert-base-japanese" )
a_ : Any = tokenizer.encode("ありがとう。" , add_special_tokens=a_ )
a_ : Optional[Any] = tokenizer.encode("どういたしまして。" , add_special_tokens=a_ )
a_ : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(a_ )
a_ : Tuple = tokenizer.build_inputs_with_special_tokens(a_ , a_ )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class snake_case_ ( a_ ,unittest.TestCase ):
__lowerCAmelCase = BertJapaneseTokenizer
__lowerCAmelCase = False
def snake_case_ ( self ):
super().setUp()
a_ : Any = ["[UNK]", "[CLS]", "[SEP]", "こ", "ん", "に", "ち", "は", "ば", "世", "界", "、", "。"]
a_ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def snake_case_ ( self , **a_ ):
return BertJapaneseTokenizer.from_pretrained(self.tmpdirname , subword_tokenizer_type="character" , **a_ )
def snake_case_ ( self , a_ ):
a_ : int = "こんにちは、世界。 \nこんばんは、世界。"
a_ : List[str] = "こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。"
return input_text, output_text
def snake_case_ ( self ):
pass # TODO add if relevant
def snake_case_ ( self ):
pass # TODO add if relevant
def snake_case_ ( self ):
pass # TODO add if relevant
def snake_case_ ( self ):
a_ : Dict = self.tokenizer_class(self.vocab_file , subword_tokenizer_type="character" )
a_ : int = tokenizer.tokenize("こんにちは、世界。 \nこんばんは、世界。" )
self.assertListEqual(
a_ , ["こ", "ん", "に", "ち", "は", "、", "世", "界", "。", "こ", "ん", "ば", "ん", "は", "、", "世", "界", "。"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(a_ ) , [3, 4, 5, 6, 7, 1_1, 9, 1_0, 1_2, 3, 4, 8, 4, 7, 1_1, 9, 1_0, 1_2] )
def snake_case_ ( self ):
a_ : Union[str, Any] = ["[UNK]", "[CLS]", "[SEP]", "こ", "ん", "に", "ち", "は", "ば", "世", "界", "、", "。"]
a_ : Any = {}
for i, token in enumerate(a_ ):
a_ : str = i
a_ : List[str] = CharacterTokenizer(vocab=a_ , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("こんにちは" ) , ["こ", "ん", "に", "ち", "は"] )
self.assertListEqual(tokenizer.tokenize("こんにちほ" ) , ["こ", "ん", "に", "ち", "[UNK]"] )
def snake_case_ ( self ):
a_ : str = self.tokenizer_class.from_pretrained("cl-tohoku/bert-base-japanese-char" )
a_ : Optional[int] = tokenizer.encode("ありがとう。" , add_special_tokens=a_ )
a_ : Tuple = tokenizer.encode("どういたしまして。" , add_special_tokens=a_ )
a_ : Optional[Any] = tokenizer.build_inputs_with_special_tokens(a_ )
a_ : Any = tokenizer.build_inputs_with_special_tokens(a_ , a_ )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class snake_case_ ( unittest.TestCase ):
def snake_case_ ( self ):
a_ : List[Any] = "cl-tohoku/bert-base-japanese"
a_ : Dict = AutoTokenizer.from_pretrained(a_ )
self.assertIsInstance(a_ , a_ )
class snake_case_ ( unittest.TestCase ):
def snake_case_ ( self ):
a_ : Any = "cl-tohoku/bert-base-japanese"
with self.assertLogs("transformers" , level="WARNING" ) as cm:
BertTokenizer.from_pretrained(a_ )
self.assertTrue(
cm.records[0].message.startswith(
"The tokenizer class you load from this checkpoint is not the same type as the class this function"
" is called from." ) )
a_ : Any = "bert-base-cased"
with self.assertLogs("transformers" , level="WARNING" ) as cm:
BertJapaneseTokenizer.from_pretrained(a_ )
self.assertTrue(
cm.records[0].message.startswith(
"The tokenizer class you load from this checkpoint is not the same type as the class this function"
" is called from." ) )
| 370
| 0
|
from typing import TYPE_CHECKING
from ...utils import _LazyModule
__lowerCamelCase : int = {"tokenization_byt5": ["ByT5Tokenizer"]}
if TYPE_CHECKING:
from .tokenization_byta import ByTaTokenizer
else:
import sys
__lowerCamelCase : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 323
|
from manim import *
class __magic_name__ ( A__ ):
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> int:
'''simple docstring'''
UpperCAmelCase = Rectangle(height=0.5 , width=0.5 )
UpperCAmelCase = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
UpperCAmelCase = [mem.copy() for i in range(6 )]
UpperCAmelCase = [mem.copy() for i in range(6 )]
UpperCAmelCase = VGroup(*UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0 )
UpperCAmelCase = VGroup(*UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0 )
UpperCAmelCase = VGroup(UpperCamelCase__ , UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0 )
UpperCAmelCase = Text("CPU" , font_size=24 )
UpperCAmelCase = Group(UpperCamelCase__ , UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0.5 , aligned_edge=UpperCamelCase__ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(UpperCamelCase__ )
UpperCAmelCase = [mem.copy() for i in range(1 )]
UpperCAmelCase = VGroup(*UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0 )
UpperCAmelCase = Text("GPU" , font_size=24 )
UpperCAmelCase = Group(UpperCamelCase__ , UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0.5 , aligned_edge=UpperCamelCase__ )
gpu.align_to(UpperCamelCase__ , UpperCamelCase__ )
gpu.set_x(gpu.get_x() - 1 )
self.add(UpperCamelCase__ )
UpperCAmelCase = [mem.copy() for i in range(6 )]
UpperCAmelCase = VGroup(*UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0 )
UpperCAmelCase = Text("Model" , font_size=24 )
UpperCAmelCase = Group(UpperCamelCase__ , UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0.5 , aligned_edge=UpperCamelCase__ )
model.move_to([3, -1.0, 0] )
self.play(
Create(UpperCamelCase__ , run_time=1 ) , Create(UpperCamelCase__ , run_time=1 ) , Create(UpperCamelCase__ , run_time=1 ) , )
UpperCAmelCase = MarkupText(
F'First, an empty model skeleton is loaded\ninto <span fgcolor=\'{YELLOW}\'>memory</span> without using much RAM.' , font_size=24 , )
UpperCAmelCase = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
UpperCAmelCase = MarkupText(
F'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(UpperCamelCase__ , run_time=2.5 ) , Write(UpperCamelCase__ ) , Write(UpperCamelCase__ ) )
self.add(UpperCamelCase__ )
UpperCAmelCase = []
UpperCAmelCase = []
UpperCAmelCase = []
for i, rect in enumerate(UpperCamelCase__ ):
UpperCAmelCase = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(UpperCamelCase__ , opacity=0.7 )
cpu_target.move_to(UpperCamelCase__ )
cpu_target.generate_target()
UpperCAmelCase = 0.46 / 4
UpperCAmelCase = 0.46 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=UpperCamelCase__ )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target , direction=UpperCamelCase__ , buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target , direction=UpperCamelCase__ , buff=0.0 )
cpu_targs.append(UpperCamelCase__ )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(UpperCamelCase__ ) )
second_animations.append(MoveToTarget(UpperCamelCase__ , run_time=1.5 ) )
self.play(*UpperCamelCase__ )
self.play(*UpperCamelCase__ )
self.wait()
| 323
| 1
|
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
with open(_lowercase ) as metadata_file:
UpperCAmelCase_ : Union[str, Any] = json.load(_lowercase )
UpperCAmelCase_ : str = LukeConfig(use_entity_aware_attention=_lowercase , **metadata['''model_config'''] )
# Load in the weights from the checkpoint_path
UpperCAmelCase_ : Union[str, Any] = torch.load(_lowercase , map_location='''cpu''' )['''module''']
# Load the entity vocab file
UpperCAmelCase_ : List[Any] = load_original_entity_vocab(_lowercase )
# add an entry for [MASK2]
UpperCAmelCase_ : Optional[int] = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
UpperCAmelCase_ : List[str] = XLMRobertaTokenizer.from_pretrained(metadata['''model_config''']['''bert_model_name'''] )
# Add special tokens to the token vocabulary for downstream tasks
UpperCAmelCase_ : Any = AddedToken('''<ent>''' , lstrip=_lowercase , rstrip=_lowercase )
UpperCAmelCase_ : Optional[int] = AddedToken('''<ent2>''' , lstrip=_lowercase , rstrip=_lowercase )
tokenizer.add_special_tokens({'''additional_special_tokens''': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(f'''Saving tokenizer to {pytorch_dump_folder_path}''' )
tokenizer.save_pretrained(_lowercase )
with open(os.path.join(_lowercase , '''tokenizer_config.json''' ) , '''r''' ) as f:
UpperCAmelCase_ : str = json.load(_lowercase )
UpperCAmelCase_ : Any = '''MLukeTokenizer'''
with open(os.path.join(_lowercase , '''tokenizer_config.json''' ) , '''w''' ) as f:
json.dump(_lowercase , _lowercase )
with open(os.path.join(_lowercase , MLukeTokenizer.vocab_files_names['''entity_vocab_file'''] ) , '''w''' ) as f:
json.dump(_lowercase , _lowercase )
UpperCAmelCase_ : str = MLukeTokenizer.from_pretrained(_lowercase )
# Initialize the embeddings of the special tokens
UpperCAmelCase_ : Optional[int] = tokenizer.convert_tokens_to_ids(['''@'''] )[0]
UpperCAmelCase_ : List[str] = tokenizer.convert_tokens_to_ids(['''#'''] )[0]
UpperCAmelCase_ : Optional[Any] = state_dict['''embeddings.word_embeddings.weight''']
UpperCAmelCase_ : Any = word_emb[ent_init_index].unsqueeze(0 )
UpperCAmelCase_ : int = word_emb[enta_init_index].unsqueeze(0 )
UpperCAmelCase_ : Union[str, Any] = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
UpperCAmelCase_ : Dict = state_dict[bias_name]
UpperCAmelCase_ : Optional[Any] = decoder_bias[ent_init_index].unsqueeze(0 )
UpperCAmelCase_ : Optional[Any] = decoder_bias[enta_init_index].unsqueeze(0 )
UpperCAmelCase_ : Union[str, Any] = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
UpperCAmelCase_ : Optional[int] = f'''encoder.layer.{layer_index}.attention.self.'''
UpperCAmelCase_ : Union[str, Any] = state_dict[prefix + matrix_name]
UpperCAmelCase_ : str = state_dict[prefix + matrix_name]
UpperCAmelCase_ : Tuple = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
UpperCAmelCase_ : Tuple = state_dict['''entity_embeddings.entity_embeddings.weight''']
UpperCAmelCase_ : List[str] = entity_emb[entity_vocab['''[MASK]''']].unsqueeze(0 )
UpperCAmelCase_ : int = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
UpperCAmelCase_ : Tuple = state_dict['''entity_predictions.bias''']
UpperCAmelCase_ : Any = entity_prediction_bias[entity_vocab['''[MASK]''']].unsqueeze(0 )
UpperCAmelCase_ : Optional[Any] = torch.cat([entity_prediction_bias, entity_mask_bias] )
UpperCAmelCase_ : List[Any] = LukeForMaskedLM(config=_lowercase ).eval()
state_dict.pop('''entity_predictions.decoder.weight''' )
state_dict.pop('''lm_head.decoder.weight''' )
state_dict.pop('''lm_head.decoder.bias''' )
UpperCAmelCase_ : Tuple = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith('''lm_head''' ) or key.startswith('''entity_predictions''' )):
UpperCAmelCase_ : str = state_dict[key]
else:
UpperCAmelCase_ : Dict = state_dict[key]
UpperCAmelCase_, UpperCAmelCase_ : Union[str, Any] = model.load_state_dict(_lowercase , strict=_lowercase )
if set(_lowercase ) != {"luke.embeddings.position_ids"}:
raise ValueError(f'''Unexpected unexpected_keys: {unexpected_keys}''' )
if set(_lowercase ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(f'''Unexpected missing_keys: {missing_keys}''' )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
UpperCAmelCase_ : List[str] = MLukeTokenizer.from_pretrained(_lowercase , task='''entity_classification''' )
UpperCAmelCase_ : Optional[Any] = '''ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan).'''
UpperCAmelCase_ : Optional[int] = (0, 9)
UpperCAmelCase_ : List[str] = tokenizer(_lowercase , entity_spans=[span] , return_tensors='''pt''' )
UpperCAmelCase_ : Union[str, Any] = model(**_lowercase )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
UpperCAmelCase_ : List[str] = torch.Size((1, 33, 768) )
UpperCAmelCase_ : int = torch.tensor([[0.0892, 0.0596, -0.2819], [0.0134, 0.1199, 0.0573], [-0.0169, 0.0927, 0.0644]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
f'''Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}''' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , _lowercase , atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
UpperCAmelCase_ : Tuple = torch.Size((1, 1, 768) )
UpperCAmelCase_ : Union[str, Any] = torch.tensor([[-0.1482, 0.0609, 0.0322]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
f'''Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'''
f''' {expected_shape}''' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , _lowercase , atol=1E-4 ):
raise ValueError
# Verify masked word/entity prediction
UpperCAmelCase_ : int = MLukeTokenizer.from_pretrained(_lowercase )
UpperCAmelCase_ : Dict = '''Tokyo is the capital of <mask>.'''
UpperCAmelCase_ : Dict = (24, 30)
UpperCAmelCase_ : List[Any] = tokenizer(_lowercase , entity_spans=[span] , return_tensors='''pt''' )
UpperCAmelCase_ : int = model(**_lowercase )
UpperCAmelCase_ : Optional[Any] = encoding['''input_ids'''][0].tolist()
UpperCAmelCase_ : Tuple = input_ids.index(tokenizer.convert_tokens_to_ids('''<mask>''' ) )
UpperCAmelCase_ : Tuple = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(_lowercase )
UpperCAmelCase_ : Optional[int] = outputs.entity_logits[0][0].argmax().item()
UpperCAmelCase_ : str = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith('''en:''' )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print('''Saving PyTorch model to {}'''.format(_lowercase ) )
model.save_pretrained(_lowercase )
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = ['''[MASK]''', '''[PAD]''', '''[UNK]''']
UpperCAmelCase_ : List[Any] = [json.loads(_lowercase ) for line in open(_lowercase )]
UpperCAmelCase_ : Optional[int] = {}
for entry in data:
UpperCAmelCase_ : List[str] = entry['''id''']
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
UpperCAmelCase_ : Any = entity_id
break
UpperCAmelCase_ : Dict = f'''{language}:{entity_name}'''
UpperCAmelCase_ : Union[str, Any] = entity_id
return new_mapping
if __name__ == "__main__":
__a = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Path to a pytorch_model.bin file.')
parser.add_argument(
'--metadata_path', default=None, type=str, help='Path to a metadata.json file, defining the configuration.'
)
parser.add_argument(
'--entity_vocab_path',
default=None,
type=str,
help='Path to an entity_vocab.tsv file, containing the entity vocabulary.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to where to dump the output PyTorch model.'
)
parser.add_argument(
'--model_size', default='base', type=str, choices=['base', 'large'], help='Size of the model to be converted.'
)
__a = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 300
|
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import (
TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaubertConfig,
TFFlaubertForMultipleChoice,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForSequenceClassification,
TFFlaubertForTokenClassification,
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
)
class __a:
"""simple docstring"""
def __init__( self ,_SCREAMING_SNAKE_CASE ,) -> Tuple:
UpperCAmelCase_ : Dict = parent
UpperCAmelCase_ : Optional[Any] = 13
UpperCAmelCase_ : Optional[Any] = 7
UpperCAmelCase_ : Union[str, Any] = True
UpperCAmelCase_ : Optional[Any] = True
UpperCAmelCase_ : str = True
UpperCAmelCase_ : Tuple = True
UpperCAmelCase_ : str = True
UpperCAmelCase_ : List[Any] = False
UpperCAmelCase_ : Dict = False
UpperCAmelCase_ : Tuple = False
UpperCAmelCase_ : Dict = 2
UpperCAmelCase_ : Tuple = 99
UpperCAmelCase_ : Dict = 0
UpperCAmelCase_ : Optional[int] = 32
UpperCAmelCase_ : Optional[int] = 2
UpperCAmelCase_ : Tuple = 4
UpperCAmelCase_ : List[Any] = 0.1
UpperCAmelCase_ : int = 0.1
UpperCAmelCase_ : List[str] = 512
UpperCAmelCase_ : Any = 16
UpperCAmelCase_ : Union[str, Any] = 2
UpperCAmelCase_ : Any = 0.02
UpperCAmelCase_ : Tuple = 3
UpperCAmelCase_ : List[Any] = 4
UpperCAmelCase_ : Dict = '''last'''
UpperCAmelCase_ : Dict = True
UpperCAmelCase_ : Tuple = None
UpperCAmelCase_ : Union[str, Any] = 0
def a__ ( self ) -> List[str]:
UpperCAmelCase_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
UpperCAmelCase_ : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] ,dtype=tf.floataa )
UpperCAmelCase_ : Optional[Any] = None
if self.use_input_lengths:
UpperCAmelCase_ : Optional[int] = (
ids_tensor([self.batch_size] ,vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
UpperCAmelCase_ : List[str] = None
if self.use_token_type_ids:
UpperCAmelCase_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.n_langs )
UpperCAmelCase_ : str = None
UpperCAmelCase_ : Tuple = None
UpperCAmelCase_ : Any = None
if self.use_labels:
UpperCAmelCase_ : int = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
UpperCAmelCase_ : Any = ids_tensor([self.batch_size] ,2 ,dtype=tf.floataa )
UpperCAmelCase_ : List[Any] = ids_tensor([self.batch_size] ,self.num_choices )
UpperCAmelCase_ : int = FlaubertConfig(
vocab_size=self.vocab_size ,n_special=self.n_special ,emb_dim=self.hidden_size ,n_layers=self.num_hidden_layers ,n_heads=self.num_attention_heads ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,gelu_activation=self.gelu_activation ,sinusoidal_embeddings=self.sinusoidal_embeddings ,asm=self.asm ,causal=self.causal ,n_langs=self.n_langs ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,summary_type=self.summary_type ,use_proj=self.use_proj ,bos_token_id=self.bos_token_id ,)
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,) -> Any:
UpperCAmelCase_ : Tuple = TFFlaubertModel(config=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[Any] = {'''input_ids''': input_ids, '''lengths''': input_lengths, '''langs''': token_type_ids}
UpperCAmelCase_ : List[Any] = model(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Union[str, Any] = [input_ids, input_mask]
UpperCAmelCase_ : Union[str, Any] = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,) -> str:
UpperCAmelCase_ : int = TFFlaubertWithLMHeadModel(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Union[str, Any] = {'''input_ids''': input_ids, '''lengths''': input_lengths, '''langs''': token_type_ids}
UpperCAmelCase_ : Optional[Any] = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,) -> Tuple:
UpperCAmelCase_ : List[Any] = TFFlaubertForQuestionAnsweringSimple(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Tuple = {'''input_ids''': input_ids, '''lengths''': input_lengths}
UpperCAmelCase_ : Optional[int] = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,) -> int:
UpperCAmelCase_ : List[Any] = TFFlaubertForSequenceClassification(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Any = {'''input_ids''': input_ids, '''lengths''': input_lengths}
UpperCAmelCase_ : str = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,) -> Optional[Any]:
UpperCAmelCase_ : str = self.num_labels
UpperCAmelCase_ : List[str] = TFFlaubertForTokenClassification(config=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[int] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
UpperCAmelCase_ : List[str] = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,) -> str:
UpperCAmelCase_ : List[Any] = self.num_choices
UpperCAmelCase_ : Any = TFFlaubertForMultipleChoice(config=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[str] = tf.tile(tf.expand_dims(_SCREAMING_SNAKE_CASE ,1 ) ,(1, self.num_choices, 1) )
UpperCAmelCase_ : Union[str, Any] = tf.tile(tf.expand_dims(_SCREAMING_SNAKE_CASE ,1 ) ,(1, self.num_choices, 1) )
UpperCAmelCase_ : str = tf.tile(tf.expand_dims(_SCREAMING_SNAKE_CASE ,1 ) ,(1, self.num_choices, 1) )
UpperCAmelCase_ : Dict = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
UpperCAmelCase_ : str = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def a__ ( self ) -> List[Any]:
UpperCAmelCase_ : List[Any] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
), (
UpperCAmelCase_
), (
UpperCAmelCase_
), (
UpperCAmelCase_
), (
UpperCAmelCase_
), (
UpperCAmelCase_
), (
UpperCAmelCase_
), (
UpperCAmelCase_
), (
UpperCAmelCase_
),
) : Any = config_and_inputs
UpperCAmelCase_ : Tuple = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''langs''': token_type_ids,
'''lengths''': input_lengths,
}
return config, inputs_dict
@require_tf
class __a( _a , _a , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase = (
(
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
TFFlaubertForSequenceClassification,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForTokenClassification,
TFFlaubertForMultipleChoice,
)
if is_tf_available()
else ()
)
lowerCAmelCase = (
(TFFlaubertWithLMHeadModel,) if is_tf_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
lowerCAmelCase = (
{
'''feature-extraction''': TFFlaubertModel,
'''fill-mask''': TFFlaubertWithLMHeadModel,
'''question-answering''': TFFlaubertForQuestionAnsweringSimple,
'''text-classification''': TFFlaubertForSequenceClassification,
'''token-classification''': TFFlaubertForTokenClassification,
'''zero-shot''': TFFlaubertForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCAmelCase = False
lowerCAmelCase = False
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('''Fast''' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def a__ ( self ) -> Any:
UpperCAmelCase_ : Optional[int] = TFFlaubertModelTester(self )
UpperCAmelCase_ : Union[str, Any] = ConfigTester(self ,config_class=_SCREAMING_SNAKE_CASE ,emb_dim=37 )
def a__ ( self ) -> Union[str, Any]:
self.config_tester.run_common_tests()
def a__ ( self ) -> Tuple:
UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> Optional[int]:
UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> Optional[int]:
UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> str:
UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> Tuple:
UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_token_classification(*_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> Optional[int]:
UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_multiple_choice(*_SCREAMING_SNAKE_CASE )
@slow
def a__ ( self ) -> Any:
for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : Any = TFFlaubertModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
@require_tf
@require_sentencepiece
@require_tokenizers
class __a( unittest.TestCase ):
"""simple docstring"""
@slow
def a__ ( self ) -> int:
UpperCAmelCase_ : Optional[Any] = TFFlaubertModel.from_pretrained('''jplu/tf-flaubert-small-cased''' )
UpperCAmelCase_ : Dict = tf.convert_to_tensor(
[[0, 158, 735, 2_592, 1_424, 6_727, 82, 1]] ,dtype=tf.intaa ,) # "J'aime flaubert !"
UpperCAmelCase_ : str = model(_SCREAMING_SNAKE_CASE )[0]
UpperCAmelCase_ : Optional[int] = tf.TensorShape((1, 8, 512) )
self.assertEqual(output.shape ,_SCREAMING_SNAKE_CASE )
# compare the actual values for a slice.
UpperCAmelCase_ : List[Any] = tf.convert_to_tensor(
[
[
[-1.8_76_87_73, -1.56_65_55, 0.27_07_24_18],
[-1.6_92_00_38, -0.5_87_35_05, 1.9_32_95_99],
[-2.9_56_39_85, -1.6_99_38_35, 1.7_97_20_52],
]
] ,dtype=tf.floataa ,)
self.assertTrue(np.allclose(output[:, :3, :3].numpy() ,expected_slice.numpy() ,atol=1e-4 ) )
| 300
| 1
|
class __A :
def __init__( self :Optional[int] , __snake_case :int ):
'''simple docstring'''
__magic_name__ : Optional[Any] =size
__magic_name__ : Union[str, Any] =[0] * size
__magic_name__ : Optional[int] =[0] * size
@staticmethod
def A__ ( __snake_case :int ):
'''simple docstring'''
return index | (index + 1)
@staticmethod
def A__ ( __snake_case :int ):
'''simple docstring'''
return (index & (index + 1)) - 1
def A__ ( self :Optional[Any] , __snake_case :int , __snake_case :int ):
'''simple docstring'''
__magic_name__ : Optional[int] =value
while index < self.size:
__magic_name__ : List[Any] =self.get_prev(__snake_case ) + 1
if current_left_border == index:
__magic_name__ : str =value
else:
__magic_name__ : Tuple =max(__snake_case , __snake_case , __snake_case )
__magic_name__ : Tuple =self.get_next(__snake_case )
def A__ ( self :List[Any] , __snake_case :int , __snake_case :int ):
'''simple docstring'''
right -= 1 # Because of right is exclusive
__magic_name__ : Optional[Any] =0
while left <= right:
__magic_name__ : int =self.get_prev(__snake_case )
if left <= current_left:
__magic_name__ : str =max(__snake_case , self.tree[right] )
__magic_name__ : int =current_left
else:
__magic_name__ : int =max(__snake_case , self.arr[right] )
right -= 1
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 21
|
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self: int , __lowerCAmelCase: Optional[int] , __lowerCAmelCase: int=13 , __lowerCAmelCase: Any=7 , __lowerCAmelCase: List[Any]=True , __lowerCAmelCase: Dict=True , __lowerCAmelCase: Union[str, Any]=True , __lowerCAmelCase: List[Any]=True , __lowerCAmelCase: int=99 , __lowerCAmelCase: Dict=64 , __lowerCAmelCase: Optional[Any]=32 , __lowerCAmelCase: Tuple=5 , __lowerCAmelCase: List[str]=4 , __lowerCAmelCase: Tuple=37 , __lowerCAmelCase: Any="gelu" , __lowerCAmelCase: Union[str, Any]=0.1 , __lowerCAmelCase: List[Any]=0.1 , __lowerCAmelCase: int=512 , __lowerCAmelCase: Union[str, Any]=16 , __lowerCAmelCase: Dict=2 , __lowerCAmelCase: Tuple=0.02 , __lowerCAmelCase: Dict=3 , __lowerCAmelCase: Optional[int]=4 , __lowerCAmelCase: Union[str, Any]=None , ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase = parent
__UpperCAmelCase = batch_size
__UpperCAmelCase = seq_length
__UpperCAmelCase = is_training
__UpperCAmelCase = use_input_mask
__UpperCAmelCase = use_token_type_ids
__UpperCAmelCase = use_labels
__UpperCAmelCase = vocab_size
__UpperCAmelCase = hidden_size
__UpperCAmelCase = embedding_size
__UpperCAmelCase = num_hidden_layers
__UpperCAmelCase = num_attention_heads
__UpperCAmelCase = intermediate_size
__UpperCAmelCase = hidden_act
__UpperCAmelCase = hidden_dropout_prob
__UpperCAmelCase = attention_probs_dropout_prob
__UpperCAmelCase = max_position_embeddings
__UpperCAmelCase = type_vocab_size
__UpperCAmelCase = type_sequence_label_size
__UpperCAmelCase = initializer_range
__UpperCAmelCase = num_labels
__UpperCAmelCase = num_choices
__UpperCAmelCase = scope
def _UpperCAmelCase ( self: Dict ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase = None
if self.use_input_mask:
__UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCAmelCase = None
if self.use_token_type_ids:
__UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCAmelCase = None
__UpperCAmelCase = None
__UpperCAmelCase = None
if self.use_labels:
__UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
__UpperCAmelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _UpperCAmelCase ( self: Tuple ) -> Optional[int]:
'''simple docstring'''
return MegatronBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowerCAmelCase , initializer_range=self.initializer_range , )
def _UpperCAmelCase ( self: Optional[int] , __lowerCAmelCase: Any , __lowerCAmelCase: Dict , __lowerCAmelCase: Union[str, Any] , __lowerCAmelCase: List[Any] , __lowerCAmelCase: Union[str, Any] , __lowerCAmelCase: Optional[Any] , __lowerCAmelCase: int ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase = MegatronBertModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
__UpperCAmelCase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase )
__UpperCAmelCase = model(__lowerCAmelCase , token_type_ids=__lowerCAmelCase )
__UpperCAmelCase = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _UpperCAmelCase ( self: Optional[int] , __lowerCAmelCase: int , __lowerCAmelCase: Optional[Any] , __lowerCAmelCase: List[str] , __lowerCAmelCase: str , __lowerCAmelCase: Optional[int] , __lowerCAmelCase: Dict , __lowerCAmelCase: Optional[int] ) -> Dict:
'''simple docstring'''
__UpperCAmelCase = MegatronBertForMaskedLM(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
__UpperCAmelCase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCAmelCase ( self: Union[str, Any] , __lowerCAmelCase: Union[str, Any] , __lowerCAmelCase: Union[str, Any] , __lowerCAmelCase: Optional[Any] , __lowerCAmelCase: str , __lowerCAmelCase: Dict , __lowerCAmelCase: Tuple , __lowerCAmelCase: Tuple ) -> Any:
'''simple docstring'''
__UpperCAmelCase = MegatronBertForCausalLM(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
__UpperCAmelCase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCAmelCase ( self: List[str] , __lowerCAmelCase: Any , __lowerCAmelCase: Tuple , __lowerCAmelCase: Any , __lowerCAmelCase: Any , __lowerCAmelCase: Dict , __lowerCAmelCase: Dict , __lowerCAmelCase: Optional[int] ) -> Dict:
'''simple docstring'''
__UpperCAmelCase = MegatronBertForNextSentencePrediction(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
__UpperCAmelCase = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def _UpperCAmelCase ( self: Optional[Any] , __lowerCAmelCase: str , __lowerCAmelCase: List[Any] , __lowerCAmelCase: Tuple , __lowerCAmelCase: Optional[int] , __lowerCAmelCase: int , __lowerCAmelCase: Optional[int] , __lowerCAmelCase: Dict ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase = MegatronBertForPreTraining(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
__UpperCAmelCase = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase , next_sentence_label=__lowerCAmelCase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def _UpperCAmelCase ( self: List[str] , __lowerCAmelCase: List[Any] , __lowerCAmelCase: Optional[Any] , __lowerCAmelCase: List[str] , __lowerCAmelCase: Union[str, Any] , __lowerCAmelCase: int , __lowerCAmelCase: Union[str, Any] , __lowerCAmelCase: Tuple ) -> Optional[Any]:
'''simple docstring'''
__UpperCAmelCase = MegatronBertForQuestionAnswering(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
__UpperCAmelCase = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , start_positions=__lowerCAmelCase , end_positions=__lowerCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _UpperCAmelCase ( self: Any , __lowerCAmelCase: int , __lowerCAmelCase: Tuple , __lowerCAmelCase: Optional[Any] , __lowerCAmelCase: Optional[Any] , __lowerCAmelCase: List[Any] , __lowerCAmelCase: int , __lowerCAmelCase: Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase = self.num_labels
__UpperCAmelCase = MegatronBertForSequenceClassification(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
__UpperCAmelCase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _UpperCAmelCase ( self: str , __lowerCAmelCase: Optional[Any] , __lowerCAmelCase: Tuple , __lowerCAmelCase: str , __lowerCAmelCase: List[Any] , __lowerCAmelCase: Dict , __lowerCAmelCase: Tuple , __lowerCAmelCase: List[str] ) -> Dict:
'''simple docstring'''
__UpperCAmelCase = self.num_labels
__UpperCAmelCase = MegatronBertForTokenClassification(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
__UpperCAmelCase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _UpperCAmelCase ( self: Any , __lowerCAmelCase: Dict , __lowerCAmelCase: Tuple , __lowerCAmelCase: Tuple , __lowerCAmelCase: Optional[Any] , __lowerCAmelCase: Union[str, Any] , __lowerCAmelCase: str , __lowerCAmelCase: int ) -> Any:
'''simple docstring'''
__UpperCAmelCase = self.num_choices
__UpperCAmelCase = MegatronBertForMultipleChoice(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
__UpperCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCAmelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCAmelCase = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _UpperCAmelCase ( self: Dict ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase = self.prepare_config_and_inputs()
(
(
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) ,
) = config_and_inputs
__UpperCAmelCase = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase__ ( snake_case , snake_case , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ : str = (
(
MegatronBertModel,
MegatronBertForMaskedLM,
MegatronBertForCausalLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
)
if is_torch_available()
else ()
)
lowerCAmelCase__ : Optional[Any] = (
{
'feature-extraction': MegatronBertModel,
'fill-mask': MegatronBertForMaskedLM,
'question-answering': MegatronBertForQuestionAnswering,
'text-classification': MegatronBertForSequenceClassification,
'text-generation': MegatronBertForCausalLM,
'token-classification': MegatronBertForTokenClassification,
'zero-shot': MegatronBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase__ : Optional[int] = True
# test_resize_embeddings = False
lowerCAmelCase__ : Any = False
def _UpperCAmelCase ( self: int , __lowerCAmelCase: Any , __lowerCAmelCase: str , __lowerCAmelCase: int=False ) -> str:
'''simple docstring'''
__UpperCAmelCase = super()._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase )
if return_labels:
if model_class in get_values(__lowerCAmelCase ):
__UpperCAmelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__lowerCAmelCase )
__UpperCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__lowerCAmelCase )
return inputs_dict
def _UpperCAmelCase ( self: int ) -> Any:
'''simple docstring'''
__UpperCAmelCase = MegatronBertModelTester(self )
__UpperCAmelCase = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=37 )
def _UpperCAmelCase ( self: List[Any] ) -> List[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self: List[Any] ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_model(*__lowerCAmelCase )
def _UpperCAmelCase ( self: Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_masked_lm(*__lowerCAmelCase )
def _UpperCAmelCase ( self: str ) -> Dict:
'''simple docstring'''
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*__lowerCAmelCase )
def _UpperCAmelCase ( self: Tuple ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*__lowerCAmelCase )
def _UpperCAmelCase ( self: List[str] ) -> Dict:
'''simple docstring'''
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_pretraining(*__lowerCAmelCase )
def _UpperCAmelCase ( self: Union[str, Any] ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_question_answering(*__lowerCAmelCase )
def _UpperCAmelCase ( self: Optional[int] ) -> Any:
'''simple docstring'''
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*__lowerCAmelCase )
def _UpperCAmelCase ( self: int ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_token_classification(*__lowerCAmelCase )
def __lowerCAmelCase ( A_ : List[str] ) -> List[Any]:
return torch.tensor(
A_ , dtype=torch.long , device=A_ , )
a_ = 1e-4
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
@unittest.skip("Model is not available." )
def _UpperCAmelCase ( self: List[Any] ) -> int:
'''simple docstring'''
__UpperCAmelCase = "nvidia/megatron-bert-uncased-345m"
if "MYDIR" in os.environ:
__UpperCAmelCase = os.path.join(os.environ["MYDIR"] , __lowerCAmelCase )
__UpperCAmelCase = MegatronBertModel.from_pretrained(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.half()
__UpperCAmelCase = _long_tensor([[101, 7_110, 1_005, 1_056, 2_023, 11_333, 17_413, 1_029, 102]] )
with torch.no_grad():
__UpperCAmelCase = model(__lowerCAmelCase )[0]
__UpperCAmelCase = torch.Size((1, 9, 1_024) )
self.assertEqual(output.shape , __lowerCAmelCase )
__UpperCAmelCase = [-0.6040, -0.2517, -0.1025, 0.3420, -0.6758, -0.0017, -0.1089, -0.1990, 0.5728]
for ii in range(3 ):
for jj in range(3 ):
__UpperCAmelCase = output[0, ii, jj]
__UpperCAmelCase = expected[3 * ii + jj]
__UpperCAmelCase = "ii={} jj={} a={} b={}".format(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
self.assertTrue(math.isclose(__lowerCAmelCase , __lowerCAmelCase , rel_tol=__lowerCAmelCase , abs_tol=__lowerCAmelCase ) , msg=__lowerCAmelCase )
| 221
| 0
|
'''simple docstring'''
from math import asin, atan, cos, radians, sin, sqrt, tan
__snake_case: Optional[int] = 6_37_81_37.0
__snake_case: Tuple = 6_35_67_52.31_42_45
__snake_case: Tuple = 6_37_81_37
def _snake_case ( A_ : List[Any] , A_ : List[Any] , A_ : str , A_ : int ):
"""simple docstring"""
a_ : List[Any] = (AXIS_A - AXIS_B) / AXIS_A
a_ : Optional[int] = atan((1 - flattening) * tan(radians(_lowerCAmelCase ) ) )
a_ : Tuple = atan((1 - flattening) * tan(radians(_lowerCAmelCase ) ) )
a_ : Tuple = radians(_lowerCAmelCase )
a_ : Any = radians(_lowerCAmelCase )
# Equation
a_ : Dict = sin((phi_a - phi_a) / 2 )
a_ : Tuple = sin((lambda_a - lambda_a) / 2 )
# Square both values
sin_sq_phi *= sin_sq_phi
sin_sq_lambda *= sin_sq_lambda
a_ : int = sqrt(sin_sq_phi + (cos(_lowerCAmelCase ) * cos(_lowerCAmelCase ) * sin_sq_lambda) )
return 2 * RADIUS * asin(_lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 716
|
'''simple docstring'''
from __future__ import annotations
def _snake_case ( A_ : int ):
"""simple docstring"""
a_ : Optional[Any] = 2
a_ : int = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(A_ )
if n > 1:
factors.append(A_ )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 460
| 0
|
"""simple docstring"""
def __magic_name__ ( __snake_case : str ) -> list:
return [
txt[:a] + txt[a].upper() + txt[a + 1 :]
for a in range(len(__snake_case ) )
if txt[a].isalpha()
]
if __name__ == "__main__":
__import__("""doctest""").testmod()
| 361
|
"""simple docstring"""
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
_A : List[str] = get_logger(__name__)
class a__ :
__lowerCAmelCase = """dummy_data"""
__lowerCAmelCase = """datasets"""
__lowerCAmelCase = False
def __init__( self , _a , _a , _a , _a = None , _a = False , _a = True , _a = None , ):
lowercase : int = 0
lowercase : Optional[Any] = dataset_name
lowercase : List[str] = cache_dir
lowercase : Union[str, Any] = use_local_dummy_data
lowercase : str = config
# download_callbacks take a single url as input
lowercase : List[Callable] = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
lowercase : List[Any] = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
lowercase : Tuple = str(_a )
# to be downloaded
lowercase : Tuple = None
lowercase : List[Any] = None
@property
def __magic_name__ ( self ):
if self._dummy_file is None:
lowercase : Optional[int] = self.download_dummy_data()
return self._dummy_file
@property
def __magic_name__ ( self ):
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join("dummy" , self.config.name , self.version_name )
# structure is dummy / version_name
return os.path.join("dummy" , self.version_name )
@property
def __magic_name__ ( self ):
return os.path.join(self.dummy_data_folder , "dummy_data.zip" )
def __magic_name__ ( self ):
lowercase : Optional[Any] = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
lowercase : str = cached_path(
_a , cache_dir=self.cache_dir , extract_compressed_file=_a , force_extract=_a )
return os.path.join(_a , self.dummy_file_name )
@property
def __magic_name__ ( self ):
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file )
@property
def __magic_name__ ( self ):
if self._bucket_url is None:
lowercase : Dict = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , "/" ) )
return self._bucket_url
@property
def __magic_name__ ( self ):
# return full path if its a dir
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , "/" ).split("/" )[:-1] )
def __magic_name__ ( self , _a , *_a ):
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
lowercase : Optional[int] = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
lowercase : Optional[Any] = self.dummy_file_name
# special case when data_url is a dict
if isinstance(_a , _a ):
return self.create_dummy_data_dict(_a , _a )
elif isinstance(_a , (list, tuple) ):
return self.create_dummy_data_list(_a , _a )
else:
return self.create_dummy_data_single(_a , _a )
def __magic_name__ ( self , _a , *_a ):
return self.download_and_extract(_a )
def __magic_name__ ( self , _a , _a ):
return self.download_and_extract(_a )
def __magic_name__ ( self , _a , *_a , **_a ):
return path
def __magic_name__ ( self ):
return {}
def __magic_name__ ( self , _a , _a ):
lowercase : List[str] = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(_a , _a ):
for single_url in single_urls:
download_callback(_a )
else:
lowercase : Union[str, Any] = single_urls
download_callback(_a )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(_a , _a ):
lowercase : Any = [os.path.join(_a , urllib.parse.quote_plus(Path(_a ).name ) ) for x in single_urls]
else:
lowercase : int = single_urls
lowercase : Tuple = os.path.join(_a , urllib.parse.quote_plus(Path(_a ).name ) )
lowercase : List[str] = value
# make sure that values are unique
if all(isinstance(_a , _a ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
lowercase : str = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def __magic_name__ ( self , _a , _a ):
lowercase : Union[str, Any] = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
lowercase : Any = all(bool(re.findall("[0-9]{3,}-of-[0-9]{3,}" , _a ) ) for url in data_url )
lowercase : List[Any] = all(
url.startswith("https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed" ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
lowercase : Tuple = [data_url[0]] * len(_a )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(_a )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
lowercase : Union[str, Any] = os.path.join(_a , urllib.parse.quote_plus(single_url.split("/" )[-1] ) )
dummy_data_list.append(_a )
return dummy_data_list
def __magic_name__ ( self , _a , _a ):
for download_callback in self.download_callbacks:
download_callback(_a )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
lowercase : Tuple = os.path.join(_a , urllib.parse.quote_plus(data_url.split("/" )[-1] ) )
if os.path.exists(_a ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def __magic_name__ ( self ):
pass
def __magic_name__ ( self ):
pass
def __magic_name__ ( self , _a ):
def _iter_archive_members(_a ):
# this preserves the order of the members inside the ZIP archive
lowercase : Optional[int] = Path(self.dummy_file ).parent
lowercase : List[str] = path.relative_to(_a )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
lowercase : List[str] = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(_a )
lowercase : Union[str, Any] = Path(_a )
lowercase : List[Any] = _iter_archive_members(_a ) if self.use_local_dummy_data else path.rglob("*" )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith((".", "__") ):
yield file_path.relative_to(_a ).as_posix(), file_path.open("rb" )
def __magic_name__ ( self , _a ):
if not isinstance(_a , _a ):
lowercase : Any = [paths]
for path in paths:
if os.path.isfile(_a ):
if os.path.basename(_a ).startswith((".", "__") ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(_a ):
if os.path.basename(_a ).startswith((".", "__") ):
continue
dirnames.sort()
for filename in sorted(_a ):
if filename.startswith((".", "__") ):
continue
yield os.path.join(_a , _a )
| 361
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"""shi-labs/dinat-mini-in1k-224""": """https://huggingface.co/shi-labs/dinat-mini-in1k-224/resolve/main/config.json""",
# See all Dinat models at https://huggingface.co/models?filter=dinat
}
class A__ ( _lowerCamelCase , _lowerCamelCase):
A_ : Union[str, Any] = 'dinat'
A_ : str = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=64 , _SCREAMING_SNAKE_CASE=[3, 4, 6, 5] , _SCREAMING_SNAKE_CASE=[2, 4, 8, 16] , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=[[1, 8, 1], [1, 4, 1, 4], [1, 2, 1, 2, 1, 2], [1, 1, 1, 1, 1]] , _SCREAMING_SNAKE_CASE=3.0 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=1E-5 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE , ):
super().__init__(**_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = patch_size
__lowerCAmelCase : Any = num_channels
__lowerCAmelCase : Optional[Any] = embed_dim
__lowerCAmelCase : Optional[Any] = depths
__lowerCAmelCase : Tuple = len(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = num_heads
__lowerCAmelCase : Tuple = kernel_size
__lowerCAmelCase : Union[str, Any] = dilations
__lowerCAmelCase : Optional[int] = mlp_ratio
__lowerCAmelCase : int = qkv_bias
__lowerCAmelCase : int = hidden_dropout_prob
__lowerCAmelCase : Optional[int] = attention_probs_dropout_prob
__lowerCAmelCase : str = drop_path_rate
__lowerCAmelCase : List[str] = hidden_act
__lowerCAmelCase : Optional[Any] = layer_norm_eps
__lowerCAmelCase : str = initializer_range
# we set the hidden_size attribute in order to make Dinat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__lowerCAmelCase : Optional[int] = int(embed_dim * 2 ** (len(_SCREAMING_SNAKE_CASE ) - 1) )
__lowerCAmelCase : str = layer_scale_init_value
__lowerCAmelCase : Optional[Any] = ['stem'] + [f"stage{idx}" for idx in range(1 , len(_SCREAMING_SNAKE_CASE ) + 1 )]
__lowerCAmelCase , __lowerCAmelCase : Any = get_aligned_output_features_output_indices(
out_features=_SCREAMING_SNAKE_CASE , out_indices=_SCREAMING_SNAKE_CASE , stage_names=self.stage_names )
| 549
|
"""simple docstring"""
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
lowerCamelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
lowerCamelCase__ = """
Examples:
```py
>>> from PIL import Image
>>> import torch
>>> from diffusers import DiffusionPipeline
>>> from diffusers.utils import export_to_gif, load_image
>>> device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")
>>> repo = \"openai/shap-e-img2img\"
>>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)
>>> pipe = pipe.to(device)
>>> guidance_scale = 3.0
>>> image_url = \"https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png\"
>>> image = load_image(image_url).convert(\"RGB\")
>>> images = pipe(
... image,
... guidance_scale=guidance_scale,
... num_inference_steps=64,
... frame_size=256,
... ).images
>>> gif_path = export_to_gif(images[0], \"corgi_3d.gif\")
```
"""
@dataclass
class A__ ( _lowerCamelCase):
A_ : Union[PIL.Image.Image, np.ndarray]
class A__ ( _lowerCamelCase):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ):
super().__init__()
self.register_modules(
prior=_SCREAMING_SNAKE_CASE , image_encoder=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE , renderer=_SCREAMING_SNAKE_CASE , )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if latents is None:
__lowerCAmelCase : List[str] = randn_tensor(_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , device=_SCREAMING_SNAKE_CASE , dtype=_SCREAMING_SNAKE_CASE )
else:
if latents.shape != shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}" )
__lowerCAmelCase : Any = latents.to(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = latents * scheduler.init_noise_sigma
return latents
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
__lowerCAmelCase : Tuple = torch.device(f"cuda:{gpu_id}" )
__lowerCAmelCase : Union[str, Any] = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@property
def __lowerCamelCase ( self ):
if self.device != torch.device('meta' ) or not hasattr(self.image_encoder , '_hf_hook' ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(_SCREAMING_SNAKE_CASE , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ):
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and isinstance(image[0] , torch.Tensor ):
__lowerCAmelCase : str = torch.cat(_SCREAMING_SNAKE_CASE , axis=0 ) if image[0].ndim == 4 else torch.stack(_SCREAMING_SNAKE_CASE , axis=0 )
if not isinstance(_SCREAMING_SNAKE_CASE , torch.Tensor ):
__lowerCAmelCase : Optional[int] = self.image_processor(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values[0].unsqueeze(0 )
__lowerCAmelCase : Dict = image.to(dtype=self.image_encoder.dtype , device=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = self.image_encoder(_SCREAMING_SNAKE_CASE )['last_hidden_state']
__lowerCAmelCase : Optional[int] = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
__lowerCAmelCase : Tuple = image_embeds.repeat_interleave(_SCREAMING_SNAKE_CASE , dim=0 )
if do_classifier_free_guidance:
__lowerCAmelCase : List[Any] = torch.zeros_like(_SCREAMING_SNAKE_CASE )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__lowerCAmelCase : Any = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(_SCREAMING_SNAKE_CASE )
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = 25 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 4.0 , _SCREAMING_SNAKE_CASE = 64 , _SCREAMING_SNAKE_CASE = "pil" , _SCREAMING_SNAKE_CASE = True , ):
if isinstance(_SCREAMING_SNAKE_CASE , PIL.Image.Image ):
__lowerCAmelCase : Union[str, Any] = 1
elif isinstance(_SCREAMING_SNAKE_CASE , torch.Tensor ):
__lowerCAmelCase : Tuple = image.shape[0]
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ):
__lowerCAmelCase : Any = len(_SCREAMING_SNAKE_CASE )
else:
raise ValueError(
f"`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(_SCREAMING_SNAKE_CASE )}" )
__lowerCAmelCase : Optional[Any] = self._execution_device
__lowerCAmelCase : Optional[Any] = batch_size * num_images_per_prompt
__lowerCAmelCase : Any = guidance_scale > 1.0
__lowerCAmelCase : List[Any] = self._encode_image(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# prior
self.scheduler.set_timesteps(_SCREAMING_SNAKE_CASE , device=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = self.scheduler.timesteps
__lowerCAmelCase : Optional[int] = self.prior.config.num_embeddings
__lowerCAmelCase : List[str] = self.prior.config.embedding_dim
__lowerCAmelCase : Any = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , self.scheduler , )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
__lowerCAmelCase : str = latents.reshape(latents.shape[0] , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for i, t in enumerate(self.progress_bar(_SCREAMING_SNAKE_CASE ) ):
# expand the latents if we are doing classifier free guidance
__lowerCAmelCase : Optional[int] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__lowerCAmelCase : Optional[int] = self.scheduler.scale_model_input(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = self.prior(
_SCREAMING_SNAKE_CASE , timestep=_SCREAMING_SNAKE_CASE , proj_embedding=_SCREAMING_SNAKE_CASE , ).predicted_image_embedding
# remove the variance
__lowerCAmelCase , __lowerCAmelCase : Tuple = noise_pred.split(
scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
__lowerCAmelCase , __lowerCAmelCase : List[Any] = noise_pred.chunk(2 )
__lowerCAmelCase : Optional[int] = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
__lowerCAmelCase : Dict = self.scheduler.step(
_SCREAMING_SNAKE_CASE , timestep=_SCREAMING_SNAKE_CASE , sample=_SCREAMING_SNAKE_CASE , ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = []
for i, latent in enumerate(_SCREAMING_SNAKE_CASE ):
print()
__lowerCAmelCase : int = self.renderer.decode(
latent[None, :] , _SCREAMING_SNAKE_CASE , size=_SCREAMING_SNAKE_CASE , ray_batch_size=40_96 , n_coarse_samples=64 , n_fine_samples=1_28 , )
images.append(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = torch.stack(_SCREAMING_SNAKE_CASE )
if output_type not in ["np", "pil"]:
raise ValueError(f"Only the output types `pil` and `np` are supported not output_type={output_type}" )
__lowerCAmelCase : int = images.cpu().numpy()
if output_type == "pil":
__lowerCAmelCase : Dict = [self.numpy_to_pil(_SCREAMING_SNAKE_CASE ) for image in images]
# Offload last model to CPU
if hasattr(self , 'final_offload_hook' ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=_SCREAMING_SNAKE_CASE )
| 549
| 1
|
'''simple docstring'''
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class a_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , A , A=13 , A=7 , A=True , A=True , A=True , A=True , A=99 , A=32 , A=5 , A=4 , A=37 , A="gelu" , A=0.1 , A=0.1 , A=512 , A=16 , A=2 , A=0.02 , A=4 , ) -> List[Any]:
_SCREAMING_SNAKE_CASE = parent
_SCREAMING_SNAKE_CASE = batch_size
_SCREAMING_SNAKE_CASE = seq_length
_SCREAMING_SNAKE_CASE = is_training
_SCREAMING_SNAKE_CASE = use_attention_mask
_SCREAMING_SNAKE_CASE = use_token_type_ids
_SCREAMING_SNAKE_CASE = use_labels
_SCREAMING_SNAKE_CASE = vocab_size
_SCREAMING_SNAKE_CASE = hidden_size
_SCREAMING_SNAKE_CASE = num_hidden_layers
_SCREAMING_SNAKE_CASE = num_attention_heads
_SCREAMING_SNAKE_CASE = intermediate_size
_SCREAMING_SNAKE_CASE = hidden_act
_SCREAMING_SNAKE_CASE = hidden_dropout_prob
_SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE = max_position_embeddings
_SCREAMING_SNAKE_CASE = type_vocab_size
_SCREAMING_SNAKE_CASE = type_sequence_label_size
_SCREAMING_SNAKE_CASE = initializer_range
_SCREAMING_SNAKE_CASE = num_choices
def snake_case_( self ) -> str:
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_SCREAMING_SNAKE_CASE = None
if self.use_attention_mask:
_SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
_SCREAMING_SNAKE_CASE = None
if self.use_token_type_ids:
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_SCREAMING_SNAKE_CASE = RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def snake_case_( self ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = config_and_inputs
_SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def snake_case_( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = config_and_inputs
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class a_ ( snake_case_ , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = True
UpperCamelCase = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def snake_case_( self ) -> Dict:
_SCREAMING_SNAKE_CASE = FlaxRobertaModelTester(self )
@slow
def snake_case_( self ) -> Union[str, Any]:
for model_class_name in self.all_model_classes:
_SCREAMING_SNAKE_CASE = model_class_name.from_pretrained("""roberta-base""" , from_pt=A )
_SCREAMING_SNAKE_CASE = model(np.ones((1, 1) ) )
self.assertIsNotNone(A )
| 314
|
'''simple docstring'''
from __future__ import annotations
import os
from collections.abc import Mapping
lowercase_ = tuple[int, int]
class a_ :
'''simple docstring'''
def __init__( self , A , A ) -> None:
_SCREAMING_SNAKE_CASE = vertices
_SCREAMING_SNAKE_CASE = {
(min(A ), max(A )): weight for edge, weight in edges.items()
}
def snake_case_( self , A , A ) -> None:
self.vertices.add(edge[0] )
self.vertices.add(edge[1] )
_SCREAMING_SNAKE_CASE = weight
def snake_case_( self ) -> Graph:
_SCREAMING_SNAKE_CASE = Graph({min(self.vertices )} , {} )
_SCREAMING_SNAKE_CASE = 42
_SCREAMING_SNAKE_CASE = 42
_SCREAMING_SNAKE_CASE = 42
_SCREAMING_SNAKE_CASE = 42
while len(subgraph.vertices ) < len(self.vertices ):
_SCREAMING_SNAKE_CASE = max(self.edges.values() ) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
_SCREAMING_SNAKE_CASE = edge
_SCREAMING_SNAKE_CASE = weight
subgraph.add_edge(A , A )
return subgraph
def lowerCamelCase ( __lowerCamelCase : str = "p107_network.txt" ) ->int:
_SCREAMING_SNAKE_CASE = os.path.abspath(os.path.dirname(__lowerCamelCase ) )
_SCREAMING_SNAKE_CASE = os.path.join(__lowerCamelCase , __lowerCamelCase )
_SCREAMING_SNAKE_CASE = {}
_SCREAMING_SNAKE_CASE = 42
_SCREAMING_SNAKE_CASE = 42
_SCREAMING_SNAKE_CASE = 42
with open(__lowerCamelCase ) as f:
_SCREAMING_SNAKE_CASE = f.read().strip().split("""\n""" )
_SCREAMING_SNAKE_CASE = [line.split(""",""" ) for line in data]
for edgea in range(1 , len(__lowerCamelCase ) ):
for edgea in range(__lowerCamelCase ):
if adjaceny_matrix[edgea][edgea] != "-":
_SCREAMING_SNAKE_CASE = int(adjaceny_matrix[edgea][edgea] )
_SCREAMING_SNAKE_CASE = Graph(set(range(len(__lowerCamelCase ) ) ) , __lowerCamelCase )
_SCREAMING_SNAKE_CASE = graph.prims_algorithm()
_SCREAMING_SNAKE_CASE = sum(graph.edges.values() )
_SCREAMING_SNAKE_CASE = sum(subgraph.edges.values() )
return initial_total - optimal_total
if __name__ == "__main__":
print(f"""{solution() = }""")
| 314
| 1
|
'''simple docstring'''
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ,_UpperCAmelCase : Optional[int] ,_UpperCAmelCase : int ) -> int:
# Initialise PyTorch model
_a : Optional[int] =RemBertConfig.from_json_file(_UpperCAmelCase )
print("""Building PyTorch model from configuration: {}""".format(str(_UpperCAmelCase ) ) )
_a : Dict =RemBertModel(_UpperCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
# Save pytorch-model
print("""Save PyTorch model to {}""".format(_UpperCAmelCase ) )
torch.save(model.state_dict() ,_UpperCAmelCase )
if __name__ == "__main__":
A__: Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--rembert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained RemBERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
A__: Any = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 714
|
'''simple docstring'''
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class A__ :
def __init__( self :Any , SCREAMING_SNAKE_CASE :Dict , SCREAMING_SNAKE_CASE :Optional[Any]=1_2 , SCREAMING_SNAKE_CASE :List[str]=7 , SCREAMING_SNAKE_CASE :str=True , SCREAMING_SNAKE_CASE :List[Any]=True , SCREAMING_SNAKE_CASE :Union[str, Any]=True , SCREAMING_SNAKE_CASE :List[str]=9_9 , SCREAMING_SNAKE_CASE :Optional[int]=3_2 , SCREAMING_SNAKE_CASE :Union[str, Any]=3_2 , SCREAMING_SNAKE_CASE :Union[str, Any]=2 , SCREAMING_SNAKE_CASE :List[str]=4 , SCREAMING_SNAKE_CASE :List[str]=3_7 , SCREAMING_SNAKE_CASE :Optional[int]=0.1 , SCREAMING_SNAKE_CASE :Any=0.1 , SCREAMING_SNAKE_CASE :str=5_1_2 , SCREAMING_SNAKE_CASE :Union[str, Any]=0.02 , SCREAMING_SNAKE_CASE :Optional[int]=0 , SCREAMING_SNAKE_CASE :List[Any]=None , ) -> List[Any]:
'''simple docstring'''
_a : List[str] =parent
_a : Dict =batch_size
_a : Optional[int] =seq_length
_a : Any =is_training
_a : Optional[Any] =use_input_mask
_a : List[str] =use_labels
_a : List[str] =vocab_size
_a : Any =hidden_size
_a : Optional[Any] =projection_dim
_a : Any =num_hidden_layers
_a : List[str] =num_attention_heads
_a : Any =intermediate_size
_a : List[Any] =dropout
_a : Any =attention_dropout
_a : Any =max_position_embeddings
_a : Optional[int] =initializer_range
_a : int =scope
_a : Dict =bos_token_id
def __UpperCAmelCase ( self :Tuple ) -> int:
'''simple docstring'''
_a : Union[str, Any] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_a : Union[str, Any] =None
if self.use_input_mask:
_a : int =random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
_a : Any =input_mask.numpy()
_a , _a : Any =input_mask.shape
_a : str =np.random.randint(1 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(SCREAMING_SNAKE_CASE ):
_a : str =1
_a : int =0
_a : int =self.get_config()
return config, input_ids, tf.convert_to_tensor(SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :Any ) -> Optional[int]:
'''simple docstring'''
return BlipTextConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , )
def __UpperCAmelCase ( self :Optional[Any] , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :Union[str, Any] , SCREAMING_SNAKE_CASE :Tuple ) -> Optional[int]:
'''simple docstring'''
_a : Optional[Any] =TFBlipTextModel(config=SCREAMING_SNAKE_CASE )
_a : Dict =model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , training=SCREAMING_SNAKE_CASE )
_a : Tuple =model(SCREAMING_SNAKE_CASE , training=SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __UpperCAmelCase ( self :Optional[int] ) -> List[Any]:
'''simple docstring'''
_a : Dict =self.prepare_config_and_inputs()
_a , _a , _a : List[Any] =config_and_inputs
_a : List[str] ={"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class A__ ( UpperCAmelCase__ , unittest.TestCase ):
__UpperCamelCase : Tuple = (TFBlipTextModel,) if is_tf_available() else ()
__UpperCamelCase : Tuple = False
__UpperCamelCase : List[Any] = False
__UpperCamelCase : Union[str, Any] = False
def __UpperCAmelCase ( self :int ) -> Optional[Any]:
'''simple docstring'''
_a : Tuple =BlipTextModelTester(self )
_a : int =ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , hidden_size=3_7 )
def __UpperCAmelCase ( self :List[str] ) -> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self :List[str] ) -> Tuple:
'''simple docstring'''
_a : Optional[int] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :Dict ) -> Dict:
'''simple docstring'''
pass
def __UpperCAmelCase ( self :List[Any] ) -> Any:
'''simple docstring'''
pass
@unittest.skip(reason="""Blip does not use inputs_embeds""" )
def __UpperCAmelCase ( self :Optional[int] ) -> Any:
'''simple docstring'''
pass
@unittest.skip(reason="""BlipTextModel has no base class and is not available in MODEL_MAPPING""" )
def __UpperCAmelCase ( self :Union[str, Any] ) -> Any:
'''simple docstring'''
pass
@unittest.skip(reason="""BlipTextModel has no base class and is not available in MODEL_MAPPING""" )
def __UpperCAmelCase ( self :Tuple ) -> Any:
'''simple docstring'''
pass
@slow
def __UpperCAmelCase ( self :int ) -> Tuple:
'''simple docstring'''
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a : Any =TFBlipTextModel.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :List[str] , SCREAMING_SNAKE_CASE :Tuple=True ) -> str:
'''simple docstring'''
super().test_pt_tf_model_equivalence(allow_missing_keys=SCREAMING_SNAKE_CASE )
| 506
| 0
|
from ...utils import logging
from ..ta.modeling_tf_ta import TFTaEncoderModel, TFTaForConditionalGeneration, TFTaModel
from .configuration_mta import MTaConfig
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = 'T5Config'
class _a ( UpperCamelCase__ ):
_lowercase : Optional[int] = '''mt5'''
_lowercase : str = MTaConfig
class _a ( UpperCamelCase__ ):
_lowercase : Optional[Any] = '''mt5'''
_lowercase : Optional[Any] = MTaConfig
class _a ( UpperCamelCase__ ):
_lowercase : Tuple = '''mt5'''
_lowercase : Optional[Any] = MTaConfig
| 43
|
"""simple docstring"""
from collections import namedtuple
A = namedtuple("""from_to""", """from_ to""")
A = {
"""cubicmeter""": from_to(1, 1),
"""litre""": from_to(0.001, 1_000),
"""kilolitre""": from_to(1, 1),
"""gallon""": from_to(0.00454, 264.172),
"""cubicyard""": from_to(0.76455, 1.30795),
"""cubicfoot""": from_to(0.028, 35.3147),
"""cup""": from_to(0.000236588, 4226.75),
}
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> float:
"""simple docstring"""
if from_type not in METRIC_CONVERSION:
raise ValueError(
f"Invalid 'from_type' value: {from_type!r} Supported values are:\n"
+ ", ".join(UpperCamelCase ) )
if to_type not in METRIC_CONVERSION:
raise ValueError(
f"Invalid 'to_type' value: {to_type!r}. Supported values are:\n"
+ ", ".join(UpperCamelCase ) )
return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to
if __name__ == "__main__":
import doctest
doctest.testmod()
| 77
| 0
|
import string
# frequency taken from https://en.wikipedia.org/wiki/Letter_frequency
__magic_name__ = {
'''E''': 12.70,
'''T''': 9.06,
'''A''': 8.17,
'''O''': 7.51,
'''I''': 6.97,
'''N''': 6.75,
'''S''': 6.33,
'''H''': 6.09,
'''R''': 5.99,
'''D''': 4.25,
'''L''': 4.03,
'''C''': 2.78,
'''U''': 2.76,
'''M''': 2.41,
'''W''': 2.36,
'''F''': 2.23,
'''G''': 2.02,
'''Y''': 1.97,
'''P''': 1.93,
'''B''': 1.29,
'''V''': 0.98,
'''K''': 0.77,
'''J''': 0.15,
'''X''': 0.15,
'''Q''': 0.10,
'''Z''': 0.07,
}
__magic_name__ = '''ETAOINSHRDLCUMWFGYPBVKJXQZ'''
__magic_name__ = '''ABCDEFGHIJKLMNOPQRSTUVWXYZ'''
def UpperCAmelCase__( __UpperCAmelCase : str ):
__snake_case : str = {letter: 0 for letter in string.ascii_uppercase}
for letter in message.upper():
if letter in LETTERS:
letter_count[letter] += 1
return letter_count
def UpperCAmelCase__( __UpperCAmelCase : tuple ):
return x[0]
def UpperCAmelCase__( __UpperCAmelCase : str ):
__snake_case : Optional[int] = get_letter_count(__UpperCAmelCase )
__snake_case : dict[int, list[str]] = {
freq: [] for letter, freq in letter_to_freq.items()
}
for letter in LETTERS:
freq_to_letter[letter_to_freq[letter]].append(__UpperCAmelCase )
__snake_case : dict[int, str] = {}
for freq in freq_to_letter:
freq_to_letter[freq].sort(key=ETAOIN.find , reverse=__UpperCAmelCase )
__snake_case : List[Any] = ''.join(freq_to_letter[freq] )
__snake_case : List[str] = list(freq_to_letter_str.items() )
freq_pairs.sort(key=__UpperCAmelCase , reverse=__UpperCAmelCase )
__snake_case : list[str] = [freq_pair[1] for freq_pair in freq_pairs]
return "".join(__UpperCAmelCase )
def UpperCAmelCase__( __UpperCAmelCase : str ):
__snake_case : Optional[Any] = get_frequency_order(__UpperCAmelCase )
__snake_case : Dict = 0
for common_letter in ETAOIN[:6]:
if common_letter in freq_order[:6]:
match_score += 1
for uncommon_letter in ETAOIN[-6:]:
if uncommon_letter in freq_order[-6:]:
match_score += 1
return match_score
if __name__ == "__main__":
import doctest
doctest.testmod()
| 679
|
def UpperCAmelCase__( __UpperCAmelCase : int | float | str ):
try:
__snake_case : int = float(__UpperCAmelCase )
except ValueError:
raise ValueError('Please enter a valid number' )
__snake_case : Any = decimal - int(__UpperCAmelCase )
if fractional_part == 0:
return int(__UpperCAmelCase ), 1
else:
__snake_case : Tuple = len(str(__UpperCAmelCase ).split('.' )[1] )
__snake_case : Tuple = int(decimal * (10**number_of_frac_digits) )
__snake_case : List[Any] = 10**number_of_frac_digits
__snake_case , __snake_case : List[Any] = denominator, numerator
while True:
__snake_case : Any = dividend % divisor
if remainder == 0:
break
__snake_case , __snake_case : Optional[int] = divisor, remainder
__snake_case , __snake_case : Union[str, Any] = numerator / divisor, denominator / divisor
return int(__UpperCAmelCase ), int(__UpperCAmelCase )
if __name__ == "__main__":
print(F'''{decimal_to_fraction(2) = }''')
print(F'''{decimal_to_fraction(89.0) = }''')
print(F'''{decimal_to_fraction("67") = }''')
print(F'''{decimal_to_fraction("45.0") = }''')
print(F'''{decimal_to_fraction(1.5) = }''')
print(F'''{decimal_to_fraction("6.25") = }''')
print(F'''{decimal_to_fraction("78td") = }''')
| 679
| 1
|
from ..utils import DummyObject, requires_backends
class lowerCamelCase_ ( metaclass=lowerCamelCase ):
a__ = ['''transformers''', '''torch''', '''note_seq''']
def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
requires_backends(self , ['''transformers''', '''torch''', '''note_seq'''] )
@classmethod
def A ( cls , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
requires_backends(cls , ['''transformers''', '''torch''', '''note_seq'''] )
@classmethod
def A ( cls , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
requires_backends(cls , ['''transformers''', '''torch''', '''note_seq'''] )
| 0
|
"""simple docstring"""
import re
import time
from typing import Optional
import IPython.display as disp
from ..trainer_callback import TrainerCallback
from ..trainer_utils import IntervalStrategy, has_length
def a__ ( __SCREAMING_SNAKE_CASE ) -> Any:
__lowerCAmelCase: Dict = int(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase: Dict = t // 3_6_0_0, (t // 6_0) % 6_0, t % 6_0
return F"{h}:{m:02d}:{s:02d}" if h != 0 else F"{m:02d}:{s:02d}"
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=3_0_0 ) -> Tuple:
# docstyle-ignore
return F"\n <div>\n {prefix}\n <progress value='{value}' max='{total}' style='width:{width}px; height:20px; vertical-align: middle;'></progress>\n {label}\n </div>\n "
def a__ ( __SCREAMING_SNAKE_CASE ) -> Optional[int]:
__lowerCAmelCase: Dict = "<table border=\"1\" class=\"dataframe\">\n"
html_code += """ <thead>\n <tr style="text-align: left;">\n"""
for i in items[0]:
html_code += F" <th>{i}</th>\n"
html_code += " </tr>\n </thead>\n <tbody>\n"
for line in items[1:]:
html_code += " <tr>\n"
for elt in line:
__lowerCAmelCase: int = F"{elt:.6f}" if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else str(__SCREAMING_SNAKE_CASE )
html_code += F" <td>{elt}</td>\n"
html_code += " </tr>\n"
html_code += " </tbody>\n</table><p>"
return html_code
class snake_case :
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 5
SCREAMING_SNAKE_CASE_ : Optional[Any] = 0.2
def __init__( self : Union[str, Any] , UpperCamelCase__ : int , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional["NotebookTrainingTracker"] = None , UpperCamelCase__ : int = 3_0_0 , )-> int:
'''simple docstring'''
__lowerCAmelCase: Tuple = total
__lowerCAmelCase: str = "" if prefix is None else prefix
__lowerCAmelCase: Dict = leave
__lowerCAmelCase: Optional[Any] = parent
__lowerCAmelCase: List[Any] = width
__lowerCAmelCase: str = None
__lowerCAmelCase: int = None
__lowerCAmelCase: Any = None
def lowercase_ ( self : Optional[int] , UpperCamelCase__ : int , UpperCamelCase__ : bool = False , UpperCamelCase__ : str = None)-> Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase: Tuple = value
if comment is not None:
__lowerCAmelCase: Any = comment
if self.last_value is None:
__lowerCAmelCase: str = time.time()
__lowerCAmelCase: int = value
__lowerCAmelCase: int = None
__lowerCAmelCase: List[Any] = self.warmup
__lowerCAmelCase: Union[str, Any] = 1
self.update_bar(UpperCamelCase__)
elif value <= self.last_value and not force_update:
return
elif force_update or self.first_calls > 0 or value >= min(self.last_value + self.wait_for , self.total):
if self.first_calls > 0:
self.first_calls -= 1
__lowerCAmelCase: int = time.time()
__lowerCAmelCase: Optional[Any] = current_time - self.start_time
# We could have value = self.start_value if the update is called twixe with the same start value.
if value > self.start_value:
__lowerCAmelCase: Any = self.elapsed_time / (value - self.start_value)
else:
__lowerCAmelCase: Tuple = None
if value >= self.total:
__lowerCAmelCase: Dict = self.total
__lowerCAmelCase: List[str] = None
if not self.leave:
self.close()
elif self.average_time_per_item is not None:
__lowerCAmelCase: Optional[int] = self.average_time_per_item * (self.total - value)
self.update_bar(UpperCamelCase__)
__lowerCAmelCase: Optional[int] = value
__lowerCAmelCase: List[Any] = current_time
if self.average_time_per_item is None:
__lowerCAmelCase: List[Any] = 1
else:
__lowerCAmelCase: Dict = max(int(self.update_every / self.average_time_per_item) , 1)
def lowercase_ ( self : List[Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : int=None)-> List[str]:
'''simple docstring'''
__lowerCAmelCase: int = " " * (len(str(self.total)) - len(str(UpperCamelCase__))) + str(UpperCamelCase__)
if self.elapsed_time is None:
__lowerCAmelCase: int = f"[{spaced_value}/{self.total} : < :"
elif self.predicted_remaining is None:
__lowerCAmelCase: Optional[int] = f"[{spaced_value}/{self.total} {format_time(self.elapsed_time)}"
else:
__lowerCAmelCase: Optional[int] = (
f"[{spaced_value}/{self.total} {format_time(self.elapsed_time)} <"
f" {format_time(self.predicted_remaining)}"
)
self.label += f", {1/self.average_time_per_item:.2f} it/s"
self.label += "]" if self.comment is None or len(self.comment) == 0 else f", {self.comment}]"
self.display()
def lowercase_ ( self : List[str])-> str:
'''simple docstring'''
__lowerCAmelCase: str = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width)
if self.parent is not None:
# If this is a child bar, the parent will take care of the display.
self.parent.display()
return
if self.output is None:
__lowerCAmelCase: Optional[Any] = disp.display(disp.HTML(self.html_code) , display_id=UpperCamelCase__)
else:
self.output.update(disp.HTML(self.html_code))
def lowercase_ ( self : Dict)-> int:
'''simple docstring'''
if self.parent is None and self.output is not None:
self.output.update(disp.HTML(""))
class snake_case ( __snake_case ):
def __init__( self : Dict , UpperCamelCase__ : str , UpperCamelCase__ : Union[str, Any]=None)-> List[str]:
'''simple docstring'''
super().__init__(UpperCamelCase__)
__lowerCAmelCase: List[str] = None if column_names is None else [column_names]
__lowerCAmelCase: Union[str, Any] = None
def lowercase_ ( self : List[str])-> List[Any]:
'''simple docstring'''
__lowerCAmelCase: Optional[Any] = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width)
if self.inner_table is not None:
self.html_code += text_to_html_table(self.inner_table)
if self.child_bar is not None:
self.html_code += self.child_bar.html_code
if self.output is None:
__lowerCAmelCase: Optional[Any] = disp.display(disp.HTML(self.html_code) , display_id=UpperCamelCase__)
else:
self.output.update(disp.HTML(self.html_code))
def lowercase_ ( self : List[str] , UpperCamelCase__ : Any)-> int:
'''simple docstring'''
if self.inner_table is None:
__lowerCAmelCase: Tuple = [list(values.keys()), list(values.values())]
else:
__lowerCAmelCase: Union[str, Any] = self.inner_table[0]
if len(self.inner_table) == 1:
# We give a chance to update the column names at the first iteration
for key in values.keys():
if key not in columns:
columns.append(UpperCamelCase__)
__lowerCAmelCase: int = columns
self.inner_table.append([values[c] for c in columns])
def lowercase_ ( self : Optional[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : List[str]=3_0_0)-> Dict:
'''simple docstring'''
__lowerCAmelCase: Union[str, Any] = NotebookProgressBar(UpperCamelCase__ , prefix=UpperCamelCase__ , parent=self , width=UpperCamelCase__)
return self.child_bar
def lowercase_ ( self : Any)-> Optional[int]:
'''simple docstring'''
__lowerCAmelCase: Optional[Any] = None
self.display()
class snake_case ( __snake_case ):
def __init__( self : Tuple)-> Tuple:
'''simple docstring'''
__lowerCAmelCase: str = None
__lowerCAmelCase: Dict = None
__lowerCAmelCase: List[str] = False
def lowercase_ ( self : List[str] , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[str] , UpperCamelCase__ : Union[str, Any] , **UpperCamelCase__ : List[str])-> str:
'''simple docstring'''
__lowerCAmelCase: Union[str, Any] = "Epoch" if args.evaluation_strategy == IntervalStrategy.EPOCH else "Step"
__lowerCAmelCase: int = 0
__lowerCAmelCase: int = 0
__lowerCAmelCase: Optional[int] = [self.first_column] + ["Training Loss"]
if args.evaluation_strategy != IntervalStrategy.NO:
column_names.append("Validation Loss")
__lowerCAmelCase: List[str] = NotebookTrainingTracker(state.max_steps , UpperCamelCase__)
def lowercase_ ( self : Union[str, Any] , UpperCamelCase__ : Any , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Union[str, Any] , **UpperCamelCase__ : str)-> int:
'''simple docstring'''
__lowerCAmelCase: Tuple = int(state.epoch) if int(state.epoch) == state.epoch else f"{state.epoch:.2f}"
self.training_tracker.update(
state.global_step + 1 , comment=f"Epoch {epoch}/{state.num_train_epochs}" , force_update=self._force_next_update , )
__lowerCAmelCase: Dict = False
def lowercase_ ( self : List[str] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[int]=None , **UpperCamelCase__ : List[str])-> Tuple:
'''simple docstring'''
if not has_length(UpperCamelCase__):
return
if self.prediction_bar is None:
if self.training_tracker is not None:
__lowerCAmelCase: Union[str, Any] = self.training_tracker.add_child(len(UpperCamelCase__))
else:
__lowerCAmelCase: Union[str, Any] = NotebookProgressBar(len(UpperCamelCase__))
self.prediction_bar.update(1)
else:
self.prediction_bar.update(self.prediction_bar.value + 1)
def lowercase_ ( self : Union[str, Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[int] , **UpperCamelCase__ : Optional[Any])-> int:
'''simple docstring'''
if self.prediction_bar is not None:
self.prediction_bar.close()
__lowerCAmelCase: int = None
def lowercase_ ( self : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int=None , **UpperCamelCase__ : Optional[Any])-> List[str]:
'''simple docstring'''
if args.evaluation_strategy == IntervalStrategy.NO and "loss" in logs:
__lowerCAmelCase: Dict = {"Training Loss": logs["loss"]}
# First column is necessarily Step sine we're not in epoch eval strategy
__lowerCAmelCase: Optional[int] = state.global_step
self.training_tracker.write_line(UpperCamelCase__)
def lowercase_ ( self : Union[str, Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : str=None , **UpperCamelCase__ : Tuple)-> Optional[int]:
'''simple docstring'''
if self.training_tracker is not None:
__lowerCAmelCase: Dict = {"Training Loss": "No log", "Validation Loss": "No log"}
for log in reversed(state.log_history):
if "loss" in log:
__lowerCAmelCase: List[str] = log["loss"]
break
if self.first_column == "Epoch":
__lowerCAmelCase: str = int(state.epoch)
else:
__lowerCAmelCase: Dict = state.global_step
__lowerCAmelCase: int = "eval"
for k in metrics:
if k.endswith("_loss"):
__lowerCAmelCase: List[str] = re.sub(R"\_loss$" , "" , UpperCamelCase__)
__lowerCAmelCase: Tuple = metrics.pop("total_flos" , UpperCamelCase__)
__lowerCAmelCase: Any = metrics.pop("epoch" , UpperCamelCase__)
__lowerCAmelCase: Union[str, Any] = metrics.pop(f"{metric_key_prefix}_runtime" , UpperCamelCase__)
__lowerCAmelCase: List[str] = metrics.pop(f"{metric_key_prefix}_samples_per_second" , UpperCamelCase__)
__lowerCAmelCase: int = metrics.pop(f"{metric_key_prefix}_steps_per_second" , UpperCamelCase__)
__lowerCAmelCase: Union[str, Any] = metrics.pop(f"{metric_key_prefix}_jit_compilation_time" , UpperCamelCase__)
for k, v in metrics.items():
if k == f"{metric_key_prefix}_loss":
__lowerCAmelCase: Optional[Any] = v
else:
__lowerCAmelCase: Optional[int] = k.split("_")
__lowerCAmelCase: Dict = " ".join([part.capitalize() for part in splits[1:]])
__lowerCAmelCase: Tuple = v
self.training_tracker.write_line(UpperCamelCase__)
self.training_tracker.remove_child()
__lowerCAmelCase: Optional[int] = None
# Evaluation takes a long time so we should force the next update.
__lowerCAmelCase: Optional[int] = True
def lowercase_ ( self : Optional[Any] , UpperCamelCase__ : str , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Tuple , **UpperCamelCase__ : List[str])-> Optional[Any]:
'''simple docstring'''
self.training_tracker.update(
state.global_step , comment=f"Epoch {int(state.epoch)}/{state.num_train_epochs}" , force_update=UpperCamelCase__)
__lowerCAmelCase: Dict = None
| 346
| 0
|
import doctest
import glob
import importlib
import inspect
import os
import re
from contextlib import contextmanager
from functools import wraps
from unittest.mock import patch
import numpy as np
import pytest
from absl.testing import parameterized
import datasets
from datasets import load_metric
from .utils import for_all_test_methods, local, slow
# mark all tests as integration
UpperCAmelCase = pytest.mark.integration
UpperCAmelCase = {'''comet'''}
UpperCAmelCase = importlib.util.find_spec('''fairseq''') is not None
UpperCAmelCase = {'''code_eval'''}
UpperCAmelCase = os.name == '''nt'''
UpperCAmelCase = {'''bertscore''', '''frugalscore''', '''perplexity'''}
UpperCAmelCase = importlib.util.find_spec('''transformers''') is not None
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
@wraps(__SCREAMING_SNAKE_CASE )
def wrapper(self , __SCREAMING_SNAKE_CASE ):
if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ:
self.skipTest('"test requires Fairseq"' )
else:
test_case(self , __SCREAMING_SNAKE_CASE )
return wrapper
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
@wraps(__SCREAMING_SNAKE_CASE )
def wrapper(self , __SCREAMING_SNAKE_CASE ):
if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS:
self.skipTest('"test requires transformers"' )
else:
test_case(self , __SCREAMING_SNAKE_CASE )
return wrapper
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
@wraps(__SCREAMING_SNAKE_CASE )
def wrapper(self , __SCREAMING_SNAKE_CASE ):
if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS:
self.skipTest('"test not supported on Windows"' )
else:
test_case(self , __SCREAMING_SNAKE_CASE )
return wrapper
def UpperCAmelCase_ ( ):
lowercase = [metric_dir.split(os.sep )[-2] for metric_dir in glob.glob('./metrics/*/' )]
return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished
@parameterized.named_parameters(get_local_metric_names() )
@for_all_test_methods(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
@local
class A_ ( parameterized.TestCase ):
'''simple docstring'''
_UpperCamelCase : Dict = {}
_UpperCamelCase : Union[str, Any] = None
@pytest.mark.filterwarnings('ignore:metric_module_factory is deprecated:FutureWarning' )
@pytest.mark.filterwarnings('ignore:load_metric is deprecated:FutureWarning' )
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
lowercase = '[...]'
lowercase = importlib.import_module(
datasets.load.metric_module_factory(os.path.join('metrics' , snake_case ) ).module_path )
lowercase = datasets.load.import_main_class(metric_module.__name__ , dataset=snake_case )
# check parameters
lowercase = inspect.signature(metric._compute ).parameters
self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values() ) ) # no **kwargs
# run doctest
with self.patch_intensive_calls(snake_case , metric_module.__name__ ):
with self.use_local_metrics():
try:
lowercase = doctest.testmod(snake_case , verbose=snake_case , raise_on_error=snake_case )
except doctest.UnexpectedException as e:
raise e.exc_info[1] # raise the exception that doctest caught
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@slow
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
lowercase = '[...]'
lowercase = importlib.import_module(
datasets.load.metric_module_factory(os.path.join('metrics' , snake_case ) ).module_path )
# run doctest
with self.use_local_metrics():
lowercase = doctest.testmod(snake_case , verbose=snake_case , raise_on_error=snake_case )
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@contextmanager
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case ):
if metric_name in self.INTENSIVE_CALLS_PATCHER:
with self.INTENSIVE_CALLS_PATCHER[metric_name](snake_case ):
yield
else:
yield
@contextmanager
def SCREAMING_SNAKE_CASE__ ( self ):
def load_local_metric(snake_case , *snake_case , **snake_case ):
return load_metric(os.path.join('metrics' , snake_case ) , *snake_case , **snake_case )
with patch('datasets.load_metric' ) as mock_load_metric:
lowercase = load_local_metric
yield
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls , snake_case ):
def wrapper(snake_case ):
lowercase = contextmanager(snake_case )
lowercase = patcher
return patcher
return wrapper
@LocalMetricTest.register_intensive_calls_patcher('bleurt' )
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
import tensorflow.compat.va as tf
from bleurt.score import Predictor
tf.flags.DEFINE_string('sv' , '' , '' ) # handle pytest cli flags
class A_ ( __lowerCamelCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
assert len(input_dict['input_ids'] ) == 2
return np.array([1.03, 1.04] )
# mock predict_fn which is supposed to do a forward pass with a bleurt model
with patch('bleurt.score._create_predictor' ) as mock_create_predictor:
lowercase = MockedPredictor()
yield
@LocalMetricTest.register_intensive_calls_patcher('bertscore' )
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
import torch
def bert_cos_score_idf(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
return torch.tensor([[1.0, 1.0, 1.0]] * len(__SCREAMING_SNAKE_CASE ) )
# mock get_model which is supposed to do download a bert model
# mock bert_cos_score_idf which is supposed to do a forward pass with a bert model
with patch('bert_score.scorer.get_model' ), patch(
'bert_score.scorer.bert_cos_score_idf' ) as mock_bert_cos_score_idf:
lowercase = bert_cos_score_idf
yield
@LocalMetricTest.register_intensive_calls_patcher('comet' )
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
def load_from_checkpoint(__SCREAMING_SNAKE_CASE ):
class A_ :
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self , snake_case , *snake_case , **snake_case ):
assert len(snake_case ) == 2
lowercase = [0.19, 0.92]
return scores, sum(snake_case ) / len(snake_case )
return Model()
# mock load_from_checkpoint which is supposed to do download a bert model
# mock load_from_checkpoint which is supposed to do download a bert model
with patch('comet.download_model' ) as mock_download_model:
lowercase = None
with patch('comet.load_from_checkpoint' ) as mock_load_from_checkpoint:
lowercase = load_from_checkpoint
yield
def UpperCAmelCase_ ( ):
lowercase = load_metric(os.path.join('metrics' , 'seqeval' ) )
lowercase = 'ERROR'
lowercase = F'''Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}'''
with pytest.raises(__SCREAMING_SNAKE_CASE , match=re.escape(__SCREAMING_SNAKE_CASE ) ):
metric.compute(predictions=[] , references=[] , scheme=__SCREAMING_SNAKE_CASE )
| 565
|
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class A_ ( __lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : Optional[int] = (UnCLIPScheduler,)
def SCREAMING_SNAKE_CASE__ ( self , **snake_case ):
lowercase = {
'num_train_timesteps': 1000,
'variance_type': 'fixed_small_log',
'clip_sample': True,
'clip_sample_range': 1.0,
'prediction_type': 'epsilon',
}
config.update(**snake_case )
return config
def SCREAMING_SNAKE_CASE__ ( self ):
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
for time_step in [0, 500, 999]:
for prev_timestep in [None, 5, 100, 250, 500, 750]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=snake_case , prev_timestep=snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.scheduler_classes[0]
lowercase = self.get_scheduler_config(variance_type='fixed_small_log' )
lowercase = scheduler_class(**snake_case )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0_000E-10 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_549_625 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.9_994_987 ) ) < 1E-5
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.scheduler_classes[0]
lowercase = self.get_scheduler_config(variance_type='learned_range' )
lowercase = scheduler_class(**snake_case )
lowercase = 0.5
assert scheduler._get_variance(1 , predicted_variance=snake_case ) - -10.1_712_790 < 1E-5
assert scheduler._get_variance(487 , predicted_variance=snake_case ) - -5.7_998_052 < 1E-5
assert scheduler._get_variance(999 , predicted_variance=snake_case ) - -0.0_010_011 < 1E-5
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.scheduler_classes[0]
lowercase = self.get_scheduler_config()
lowercase = scheduler_class(**snake_case )
lowercase = scheduler.timesteps
lowercase = self.dummy_model()
lowercase = self.dummy_sample_deter
lowercase = torch.manual_seed(0 )
for i, t in enumerate(snake_case ):
# 1. predict noise residual
lowercase = model(snake_case , snake_case )
# 2. predict previous mean of sample x_t-1
lowercase = scheduler.step(snake_case , snake_case , snake_case , generator=snake_case ).prev_sample
lowercase = pred_prev_sample
lowercase = torch.sum(torch.abs(snake_case ) )
lowercase = torch.mean(torch.abs(snake_case ) )
assert abs(result_sum.item() - 252.2_682_495 ) < 1E-2
assert abs(result_mean.item() - 0.3_284_743 ) < 1E-3
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = self.scheduler_classes[0]
lowercase = self.get_scheduler_config()
lowercase = scheduler_class(**snake_case )
scheduler.set_timesteps(25 )
lowercase = scheduler.timesteps
lowercase = self.dummy_model()
lowercase = self.dummy_sample_deter
lowercase = torch.manual_seed(0 )
for i, t in enumerate(snake_case ):
# 1. predict noise residual
lowercase = model(snake_case , snake_case )
if i + 1 == timesteps.shape[0]:
lowercase = None
else:
lowercase = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
lowercase = scheduler.step(
snake_case , snake_case , snake_case , prev_timestep=snake_case , generator=snake_case ).prev_sample
lowercase = pred_prev_sample
lowercase = torch.sum(torch.abs(snake_case ) )
lowercase = torch.mean(torch.abs(snake_case ) )
assert abs(result_sum.item() - 258.2_044_983 ) < 1E-2
assert abs(result_mean.item() - 0.3_362_038 ) < 1E-3
def SCREAMING_SNAKE_CASE__ ( self ):
pass
def SCREAMING_SNAKE_CASE__ ( self ):
pass
| 565
| 1
|
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
lowerCamelCase__ : List[Any] = logging.getLogger()
def UpperCamelCase ( ) -> int:
'''simple docstring'''
lowercase__ : str = argparse.ArgumentParser()
parser.add_argument("""-f""" )
lowercase__ : str = parser.parse_args()
return args.f
class _snake_case ( UpperCAmelCase_ ):
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Optional[Any] = logging.StreamHandler(sys.stdout)
logger.addHandler(SCREAMING_SNAKE_CASE_)
def lowercase__ ( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : Dict = get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0 , """run_glue_deebert.py""")
with patch.object(SCREAMING_SNAKE_CASE_ , """argv""" , SCREAMING_SNAKE_CASE_):
lowercase__ : Any = run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(SCREAMING_SNAKE_CASE_ , 0.6_6_6)
@slow
@require_torch_non_multi_gpu
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Dict = """
--model_type roberta
--model_name_or_path roberta-base
--task_name MRPC
--do_train
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--max_seq_length 128
--per_gpu_eval_batch_size=1
--per_gpu_train_batch_size=8
--learning_rate 2e-4
--num_train_epochs 3
--overwrite_output_dir
--seed 42
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--save_steps 0
--overwrite_cache
--eval_after_first_stage
""".split()
self.run_and_check(SCREAMING_SNAKE_CASE_)
lowercase__ : List[str] = """
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--eval_each_highway
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
""".split()
self.run_and_check(SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[int] = """
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--early_exit_entropy 0.1
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
""".split()
self.run_and_check(SCREAMING_SNAKE_CASE_)
| 12
|
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinvaConfig, SwinvaForImageClassification
def lowerCAmelCase_ ( __A ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase__ = SwinvaConfig()
UpperCAmelCase__ = swinva_name.split("_" )
UpperCAmelCase__ = name_split[1]
if "to" in name_split[3]:
UpperCAmelCase__ = int(name_split[3][-3:] )
else:
UpperCAmelCase__ = int(name_split[3] )
if "to" in name_split[2]:
UpperCAmelCase__ = int(name_split[2][-2:] )
else:
UpperCAmelCase__ = int(name_split[2][6:] )
if model_size == "tiny":
UpperCAmelCase__ = 96
UpperCAmelCase__ = (2, 2, 6, 2)
UpperCAmelCase__ = (3, 6, 12, 24)
elif model_size == "small":
UpperCAmelCase__ = 96
UpperCAmelCase__ = (2, 2, 18, 2)
UpperCAmelCase__ = (3, 6, 12, 24)
elif model_size == "base":
UpperCAmelCase__ = 128
UpperCAmelCase__ = (2, 2, 18, 2)
UpperCAmelCase__ = (4, 8, 16, 32)
else:
UpperCAmelCase__ = 192
UpperCAmelCase__ = (2, 2, 18, 2)
UpperCAmelCase__ = (6, 12, 24, 48)
if "to" in swinva_name:
UpperCAmelCase__ = (12, 12, 12, 6)
if ("22k" in swinva_name) and ("to" not in swinva_name):
UpperCAmelCase__ = 21_841
UpperCAmelCase__ = "huggingface/label-files"
UpperCAmelCase__ = "imagenet-22k-id2label.json"
UpperCAmelCase__ = json.load(open(hf_hub_download(__A, __A, repo_type="dataset" ), "r" ) )
UpperCAmelCase__ = {int(__A ): v for k, v in idalabel.items()}
UpperCAmelCase__ = idalabel
UpperCAmelCase__ = {v: k for k, v in idalabel.items()}
else:
UpperCAmelCase__ = 1_000
UpperCAmelCase__ = "huggingface/label-files"
UpperCAmelCase__ = "imagenet-1k-id2label.json"
UpperCAmelCase__ = json.load(open(hf_hub_download(__A, __A, repo_type="dataset" ), "r" ) )
UpperCAmelCase__ = {int(__A ): v for k, v in idalabel.items()}
UpperCAmelCase__ = idalabel
UpperCAmelCase__ = {v: k for k, v in idalabel.items()}
UpperCAmelCase__ = img_size
UpperCAmelCase__ = num_classes
UpperCAmelCase__ = embed_dim
UpperCAmelCase__ = depths
UpperCAmelCase__ = num_heads
UpperCAmelCase__ = window_size
return config
def lowerCAmelCase_ ( __A ) -> Any:
'''simple docstring'''
if "patch_embed.proj" in name:
UpperCAmelCase__ = name.replace("patch_embed.proj", "embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
UpperCAmelCase__ = name.replace("patch_embed.norm", "embeddings.norm" )
if "layers" in name:
UpperCAmelCase__ = "encoder." + name
if "attn.proj" in name:
UpperCAmelCase__ = name.replace("attn.proj", "attention.output.dense" )
if "attn" in name:
UpperCAmelCase__ = name.replace("attn", "attention.self" )
if "norm1" in name:
UpperCAmelCase__ = name.replace("norm1", "layernorm_before" )
if "norm2" in name:
UpperCAmelCase__ = name.replace("norm2", "layernorm_after" )
if "mlp.fc1" in name:
UpperCAmelCase__ = name.replace("mlp.fc1", "intermediate.dense" )
if "mlp.fc2" in name:
UpperCAmelCase__ = name.replace("mlp.fc2", "output.dense" )
if "q_bias" in name:
UpperCAmelCase__ = name.replace("q_bias", "query.bias" )
if "k_bias" in name:
UpperCAmelCase__ = name.replace("k_bias", "key.bias" )
if "v_bias" in name:
UpperCAmelCase__ = name.replace("v_bias", "value.bias" )
if "cpb_mlp" in name:
UpperCAmelCase__ = name.replace("cpb_mlp", "continuous_position_bias_mlp" )
if name == "norm.weight":
UpperCAmelCase__ = "layernorm.weight"
if name == "norm.bias":
UpperCAmelCase__ = "layernorm.bias"
if "head" in name:
UpperCAmelCase__ = name.replace("head", "classifier" )
else:
UpperCAmelCase__ = "swinv2." + name
return name
def lowerCAmelCase_ ( __A, __A ) -> List[Any]:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
UpperCAmelCase__ = orig_state_dict.pop(__A )
if "mask" in key:
continue
elif "qkv" in key:
UpperCAmelCase__ = key.split("." )
UpperCAmelCase__ = int(key_split[1] )
UpperCAmelCase__ = int(key_split[3] )
UpperCAmelCase__ = model.swinva.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
UpperCAmelCase__ = val[:dim, :]
UpperCAmelCase__ = val[dim : dim * 2, :]
UpperCAmelCase__ = val[-dim:, :]
else:
UpperCAmelCase__ = val[:dim]
UpperCAmelCase__ = val[
dim : dim * 2
]
UpperCAmelCase__ = val[-dim:]
else:
UpperCAmelCase__ = val
return orig_state_dict
def lowerCAmelCase_ ( __A, __A ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase__ = timm.create_model(__A, pretrained=__A )
timm_model.eval()
UpperCAmelCase__ = get_swinva_config(__A )
UpperCAmelCase__ = SwinvaForImageClassification(__A )
model.eval()
UpperCAmelCase__ = convert_state_dict(timm_model.state_dict(), __A )
model.load_state_dict(__A )
UpperCAmelCase__ = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCAmelCase__ = AutoImageProcessor.from_pretrained("microsoft/{}".format(swinva_name.replace("_", "-" ) ) )
UpperCAmelCase__ = Image.open(requests.get(__A, stream=__A ).raw )
UpperCAmelCase__ = image_processor(images=__A, return_tensors="pt" )
UpperCAmelCase__ = timm_model(inputs["pixel_values"] )
UpperCAmelCase__ = model(**__A ).logits
assert torch.allclose(__A, __A, atol=1e-3 )
print(f"""Saving model {swinva_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__A )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__A )
model.push_to_hub(
repo_path_or_name=Path(__A, __A ), organization="nandwalritik", commit_message="Add model", )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--swinv2_name',
default='swinv2_tiny_patch4_window8_256',
type=str,
help='Name of the Swinv2 timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
UpperCamelCase__ = parser.parse_args()
convert_swinva_checkpoint(args.swinva_name, args.pytorch_dump_folder_path)
| 486
| 0
|
# flake8: noqa
# Lint as: python3
_UpperCAmelCase = [
"""VerificationMode""",
"""Version""",
"""disable_progress_bar""",
"""enable_progress_bar""",
"""is_progress_bar_enabled""",
"""experimental""",
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental
| 70
|
def UpperCamelCase ( __lowercase : int ):
'''simple docstring'''
if length <= 0 or not isinstance(__lowercase ,__lowercase ):
raise ValueError('Length must be a positive integer.' )
return [n * (2 * n - 1) for n in range(__lowercase )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 70
| 1
|
'''simple docstring'''
import math
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> str:
A_ = 0
A_ = 0
while num > 0:
A_ = num % 8
A_ = octal + (remainder * math.floor(math.pow(10, UpperCAmelCase__ ) ))
counter += 1
A_ = math.floor(num / 8 ) # basically /= 8 without remainder if any
# This formatting removes trailing '.0' from `octal`.
return F'''0o{int(UpperCAmelCase__ )}'''
def UpperCAmelCase__ ( ) -> None:
print("""\n2 in octal is:""" )
print(decimal_to_octal(2 ) ) # = 2
print("""\n8 in octal is:""" )
print(decimal_to_octal(8 ) ) # = 10
print("""\n65 in octal is:""" )
print(decimal_to_octal(65 ) ) # = 101
print("""\n216 in octal is:""" )
print(decimal_to_octal(2_16 ) ) # = 330
print("""\n512 in octal is:""" )
print(decimal_to_octal(5_12 ) ) # = 1000
print("""\n""" )
if __name__ == "__main__":
main()
| 288
|
'''simple docstring'''
import numpy as np
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ = 1e-12, UpperCAmelCase__ = 1_00, ) -> tuple[float, np.ndarray]:
assert np.shape(UpperCAmelCase__ )[0] == np.shape(UpperCAmelCase__ )[1]
# Ensure proper dimensionality.
assert np.shape(UpperCAmelCase__ )[0] == np.shape(UpperCAmelCase__ )[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(UpperCAmelCase__ ) == np.iscomplexobj(UpperCAmelCase__ )
A_ = np.iscomplexobj(UpperCAmelCase__ )
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(UpperCAmelCase__, input_matrix.conj().T )
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
A_ = False
A_ = 0
A_ = 0
A_ = 1e12
while not convergence:
# Multiple matrix by the vector.
A_ = np.dot(UpperCAmelCase__, UpperCAmelCase__ )
# Normalize the resulting output vector.
A_ = w / np.linalg.norm(UpperCAmelCase__ )
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
A_ = vector.conj().T if is_complex else vector.T
A_ = np.dot(UpperCAmelCase__, np.dot(UpperCAmelCase__, UpperCAmelCase__ ) )
# Check convergence.
A_ = np.abs(lambda_ - lambda_previous ) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
A_ = True
A_ = lambda_
if is_complex:
A_ = np.real(lambda_ )
return lambda_, vector
def UpperCAmelCase__ ( ) -> None:
A_ = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]] )
A_ = np.array([41, 4, 20] )
A_ = real_input_matrix.astype(np.complexaaa )
A_ = np.triu(1j * complex_input_matrix, 1 )
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
A_ = np.array([41, 4, 20] ).astype(np.complexaaa )
for problem_type in ["real", "complex"]:
if problem_type == "real":
A_ = real_input_matrix
A_ = real_vector
elif problem_type == "complex":
A_ = complex_input_matrix
A_ = complex_vector
# Our implementation.
A_ , A_ = power_iteration(UpperCAmelCase__, UpperCAmelCase__ )
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
A_ , A_ = np.linalg.eigh(UpperCAmelCase__ )
# Last eigenvalue is the maximum one.
A_ = eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
A_ = eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max ) <= 1e-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(UpperCAmelCase__ ) - np.abs(UpperCAmelCase__ ) ) <= 1e-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration()
| 288
| 1
|
def lowerCAmelCase_ ( __a ) -> str:
"""simple docstring"""
lowerCamelCase__: List[str] =""
for ch in key:
if ch == " " or ch not in key_no_dups and ch.isalpha():
key_no_dups += ch
return key_no_dups
def lowerCAmelCase_ ( __a ) -> dict[str, str]:
"""simple docstring"""
lowerCamelCase__: Dict =[chr(i + 65 ) for i in range(26 )]
# Remove duplicate characters from key
lowerCamelCase__: Union[str, Any] =remove_duplicates(key.upper() )
lowerCamelCase__: List[Any] =len(__a )
# First fill cipher with key characters
lowerCamelCase__: Optional[Any] ={alphabet[i]: char for i, char in enumerate(__a )}
# Then map remaining characters in alphabet to
# the alphabet from the beginning
for i in range(len(__a ) , 26 ):
lowerCamelCase__: Optional[int] =alphabet[i - offset]
# Ensure we are not mapping letters to letters previously mapped
while char in key:
offset -= 1
lowerCamelCase__: List[Any] =alphabet[i - offset]
lowerCamelCase__: str =char
return cipher_alphabet
def lowerCAmelCase_ ( __a , __a ) -> str:
"""simple docstring"""
return "".join(cipher_map.get(__a , __a ) for ch in message.upper() )
def lowerCAmelCase_ ( __a , __a ) -> str:
"""simple docstring"""
lowerCamelCase__: List[Any] ={v: k for k, v in cipher_map.items()}
return "".join(rev_cipher_map.get(__a , __a ) for ch in message.upper() )
def lowerCAmelCase_ ( ) -> None:
"""simple docstring"""
lowerCamelCase__: Optional[Any] =input("Enter message to encode or decode: " ).strip()
lowerCamelCase__: Dict =input("Enter keyword: " ).strip()
lowerCamelCase__: Any =input("Encipher or decipher? E/D:" ).strip()[0].lower()
try:
lowerCamelCase__: Union[str, Any] ={"e": encipher, "d": decipher}[option]
except KeyError:
raise KeyError("invalid input option" )
lowerCamelCase__: List[Any] =create_cipher_map(__a )
print(func(__a , __a ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 437
|
def lowerCAmelCase_ ( __a , __a ) -> float:
"""simple docstring"""
if density <= 0:
raise ValueError("Impossible fluid density" )
if bulk_modulus <= 0:
raise ValueError("Impossible bulk modulus" )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 437
| 1
|
'''simple docstring'''
from typing import List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a = logging.get_logger(__name__)
a = {
"huggingface/autoformer-tourism-monthly": "https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json",
}
class __a ( _snake_case ):
__UpperCamelCase : Optional[int] = 'autoformer'
__UpperCamelCase : int = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
'num_hidden_layers': 'encoder_layers',
}
def __init__( self : Optional[int] ,lowerCamelCase : Optional[int] = None ,lowerCamelCase : Optional[int] = None ,lowerCamelCase : str = "student_t" ,lowerCamelCase : str = "nll" ,lowerCamelCase : int = 1 ,lowerCamelCase : List[int] = [1, 2, 3, 4, 5, 6, 7] ,lowerCamelCase : bool = True ,lowerCamelCase : int = 0 ,lowerCamelCase : int = 0 ,lowerCamelCase : int = 0 ,lowerCamelCase : int = 0 ,lowerCamelCase : Optional[List[int]] = None ,lowerCamelCase : Optional[List[int]] = None ,lowerCamelCase : int = 64 ,lowerCamelCase : int = 2 ,lowerCamelCase : int = 2 ,lowerCamelCase : int = 2 ,lowerCamelCase : int = 2 ,lowerCamelCase : int = 32 ,lowerCamelCase : int = 32 ,lowerCamelCase : str = "gelu" ,lowerCamelCase : float = 0.1 ,lowerCamelCase : float = 0.1 ,lowerCamelCase : float = 0.1 ,lowerCamelCase : float = 0.1 ,lowerCamelCase : float = 0.1 ,lowerCamelCase : int = 100 ,lowerCamelCase : float = 0.02 ,lowerCamelCase : bool = True ,lowerCamelCase : str=True ,lowerCamelCase : int = 10 ,lowerCamelCase : int = 25 ,lowerCamelCase : int = 3 ,**lowerCamelCase : Dict ,):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = prediction_length
__SCREAMING_SNAKE_CASE = context_length if context_length is not None else prediction_length
__SCREAMING_SNAKE_CASE = distribution_output
__SCREAMING_SNAKE_CASE = loss
__SCREAMING_SNAKE_CASE = input_size
__SCREAMING_SNAKE_CASE = num_time_features
__SCREAMING_SNAKE_CASE = lags_sequence
__SCREAMING_SNAKE_CASE = scaling
__SCREAMING_SNAKE_CASE = num_dynamic_real_features
__SCREAMING_SNAKE_CASE = num_static_real_features
__SCREAMING_SNAKE_CASE = num_static_categorical_features
if cardinality is not None and num_static_categorical_features > 0:
if len(lowerCamelCase ) != num_static_categorical_features:
raise ValueError(
"""The cardinality should be a list of the same length as `num_static_categorical_features`""" )
__SCREAMING_SNAKE_CASE = cardinality
else:
__SCREAMING_SNAKE_CASE = [0]
if embedding_dimension is not None and num_static_categorical_features > 0:
if len(lowerCamelCase ) != num_static_categorical_features:
raise ValueError(
"""The embedding dimension should be a list of the same length as `num_static_categorical_features`""" )
__SCREAMING_SNAKE_CASE = embedding_dimension
else:
__SCREAMING_SNAKE_CASE = [min(50 ,(cat + 1) // 2 ) for cat in self.cardinality]
__SCREAMING_SNAKE_CASE = num_parallel_samples
# Transformer architecture configuration
__SCREAMING_SNAKE_CASE = input_size * len(self.lags_sequence ) + self._number_of_features
__SCREAMING_SNAKE_CASE = d_model
__SCREAMING_SNAKE_CASE = encoder_attention_heads
__SCREAMING_SNAKE_CASE = decoder_attention_heads
__SCREAMING_SNAKE_CASE = encoder_ffn_dim
__SCREAMING_SNAKE_CASE = decoder_ffn_dim
__SCREAMING_SNAKE_CASE = encoder_layers
__SCREAMING_SNAKE_CASE = decoder_layers
__SCREAMING_SNAKE_CASE = dropout
__SCREAMING_SNAKE_CASE = attention_dropout
__SCREAMING_SNAKE_CASE = activation_dropout
__SCREAMING_SNAKE_CASE = encoder_layerdrop
__SCREAMING_SNAKE_CASE = decoder_layerdrop
__SCREAMING_SNAKE_CASE = activation_function
__SCREAMING_SNAKE_CASE = init_std
__SCREAMING_SNAKE_CASE = use_cache
# Autoformer
__SCREAMING_SNAKE_CASE = label_length
__SCREAMING_SNAKE_CASE = moving_average
__SCREAMING_SNAKE_CASE = autocorrelation_factor
super().__init__(is_encoder_decoder=lowerCamelCase ,**lowerCamelCase )
@property
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 109
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
A_ = {"configuration_plbart": ["PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP", "PLBartConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = ["PLBartTokenizer"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ = [
"PLBART_PRETRAINED_MODEL_ARCHIVE_LIST",
"PLBartForCausalLM",
"PLBartForConditionalGeneration",
"PLBartForSequenceClassification",
"PLBartModel",
"PLBartPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_plbart import PLBartTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
else:
import sys
A_ = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 391
| 0
|
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[Any] , UpperCamelCase__: Optional[Any] , UpperCamelCase__: Any ):
SCREAMING_SNAKE_CASE__ = {
"""en""": """Machine learning is great, isn't it?""",
"""ru""": """Машинное обучение - это здорово, не так ли?""",
"""de""": """Maschinelles Lernen ist großartig, oder?""",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
SCREAMING_SNAKE_CASE__ = {
"""ru-en""": ["""[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)""", """39.20"""],
"""en-ru""": ["""[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)""", """33.47"""],
"""en-de""": ["""[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)""", """42.83"""],
"""de-en""": ["""[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)""", """41.35"""],
}
SCREAMING_SNAKE_CASE__ = f'''{src_lang}-{tgt_lang}'''
SCREAMING_SNAKE_CASE__ = f'''
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt19
- facebook
license: apache-2.0
datasets:
- wmt19
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.
For more details, please see, [Facebook FAIR\'s WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).
The abbreviation FSMT stands for FairSeqMachineTranslation
All four models are available:
* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)
* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)
* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)
* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = "facebook/wmt19-{src_lang}-{tgt_lang}"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = "{texts[src_lang]}"
input_ids = tokenizer.encode(input, return_tensors="pt")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
- The original (and this ported model) doesn\'t seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)
## Training data
Pretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).
## Eval results
pair | fairseq | transformers
-------|---------|----------
{pair} | {scores[pair][0]} | {scores[pair][1]}
The score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn\'t support:
- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).
- re-ranking
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=15
mkdir -p $DATA_DIR
sacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
note: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.
## Data Sources
- [training, etc.](http://www.statmt.org/wmt19/)
- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)
### BibTeX entry and citation info
```bibtex
@inproceedings{{...,
year={{2020}},
title={{Facebook FAIR\'s WMT19 News Translation Task Submission}},
author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},
booktitle={{Proc. of WMT}},
}}
```
## TODO
- port model ensemble (fairseq uses 4 model checkpoints)
'''
os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = os.path.join(UpperCamelCase__ , """README.md""" )
print(f'''Generating {path}''' )
with open(UpperCamelCase__ , """w""" , encoding="""utf-8""" ) as f:
f.write(UpperCamelCase__ )
# make sure we are under the root of the project
_lowerCamelCase = Path(__file__).resolve().parent.parent.parent
_lowerCamelCase = repo_dir / 'model_cards'
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = model_name.split('-')
_lowerCamelCase = model_cards_dir / 'facebook' / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 59
|
import warnings
from functools import wraps
from typing import Callable
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Callable ):
@wraps(UpperCamelCase__ )
def _inner_fn(*UpperCamelCase__: Dict , **UpperCamelCase__: Any ):
warnings.warn(
(f'''\'{fn.__name__}\' is experimental and might be subject to breaking changes in the future.''') , UpperCamelCase__ , )
return fn(*UpperCamelCase__ , **UpperCamelCase__ )
return _inner_fn
| 59
| 1
|
import inspect
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
SCREAMING_SNAKE_CASE = 'src/transformers'
# This is to make sure the transformers module imported is the one in the repo.
SCREAMING_SNAKE_CASE = direct_transformers_import(PATH_TO_TRANSFORMERS)
SCREAMING_SNAKE_CASE = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
SCREAMING_SNAKE_CASE = re.compile(R'\[(.+?)\]\((https://huggingface\.co/.+?)\)')
SCREAMING_SNAKE_CASE = {
'DecisionTransformerConfig',
'EncoderDecoderConfig',
'MusicgenConfig',
'RagConfig',
'SpeechEncoderDecoderConfig',
'TimmBackboneConfig',
'VisionEncoderDecoderConfig',
'VisionTextDualEncoderConfig',
'LlamaConfig',
}
def _lowerCamelCase ( __A : Tuple ) -> List[str]:
_UpperCAmelCase : Union[str, Any] = None
# source code of `config_class`
_UpperCAmelCase : Dict = inspect.getsource(__A )
_UpperCAmelCase : Dict = _re_checkpoint.findall(__A )
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
for ckpt_name, ckpt_link in checkpoints:
# allow the link to end with `/`
if ckpt_link.endswith('''/''' ):
_UpperCAmelCase : int = ckpt_link[:-1]
# verify the checkpoint name corresponds to the checkpoint link
_UpperCAmelCase : Any = f'''https://huggingface.co/{ckpt_name}'''
if ckpt_link == ckpt_link_from_name:
_UpperCAmelCase : str = ckpt_name
break
return checkpoint
def _lowerCamelCase ( ) -> Any:
_UpperCAmelCase : Optional[int] = []
for config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in config_class.__module__:
continue
_UpperCAmelCase : int = get_checkpoint_from_config_class(__A )
_UpperCAmelCase : Optional[int] = config_class.__name__
if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(__A )
if len(__A ) > 0:
_UpperCAmelCase : Any = '''\n'''.join(sorted(__A ) )
raise ValueError(f'''The following configurations don\'t contain any valid checkpoint:\n{message}''' )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 485
|
def _lowerCamelCase ( __A : list ) -> list:
if any(not isinstance(__A , __A ) or x < 0 for x in sequence ):
raise TypeError('''Sequence must be list of non-negative integers''' )
for _ in range(len(__A ) ):
for i, (rod_upper, rod_lower) in enumerate(zip(__A , sequence[1:] ) ):
if rod_upper > rod_lower:
sequence[i] -= rod_upper - rod_lower
sequence[i + 1] += rod_upper - rod_lower
return sequence
if __name__ == "__main__":
assert bead_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bead_sort([7, 9, 4, 3, 5]) == [3, 4, 5, 7, 9]
| 485
| 1
|
from typing import Optional, Tuple, Union
import torch
from einops import rearrange, reduce
from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel
from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput
from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput
snake_case_ = 8
def A__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=BITS ):
lowerCamelCase : List[str] =x.device
lowerCamelCase : Tuple =(x * 2_5_5).int().clamp(0 , 2_5_5 )
lowerCamelCase : int =2 ** torch.arange(bits - 1 , -1 , -1 , device=SCREAMING_SNAKE_CASE_ )
lowerCamelCase : Union[str, Any] =rearrange(SCREAMING_SNAKE_CASE_ , '''d -> d 1 1''' )
lowerCamelCase : Union[str, Any] =rearrange(SCREAMING_SNAKE_CASE_ , '''b c h w -> b c 1 h w''' )
lowerCamelCase : Any =((x & mask) != 0).float()
lowerCamelCase : Optional[int] =rearrange(SCREAMING_SNAKE_CASE_ , '''b c d h w -> b (c d) h w''' )
lowerCamelCase : List[Any] =bits * 2 - 1
return bits
def A__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=BITS ):
lowerCamelCase : Union[str, Any] =x.device
lowerCamelCase : Union[str, Any] =(x > 0).int()
lowerCamelCase : List[str] =2 ** torch.arange(bits - 1 , -1 , -1 , device=SCREAMING_SNAKE_CASE_ , dtype=torch.intaa )
lowerCamelCase : List[Any] =rearrange(SCREAMING_SNAKE_CASE_ , '''d -> d 1 1''' )
lowerCamelCase : List[str] =rearrange(SCREAMING_SNAKE_CASE_ , '''b (c d) h w -> b c d h w''' , d=8 )
lowerCamelCase : List[str] =reduce(x * mask , '''b c d h w -> b c h w''' , '''sum''' )
return (dec / 2_5_5).clamp(0.0 , 1.0 )
def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 0.0 , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_ = True , ):
if self.num_inference_steps is None:
raise ValueError(
'''Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler''' )
# See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf
# Ideally, read DDIM paper in-detail understanding
# Notation (<variable name> -> <name in paper>
# - pred_noise_t -> e_theta(x_t, t)
# - pred_original_sample -> f_theta(x_t, t) or x_0
# - std_dev_t -> sigma_t
# - eta -> η
# - pred_sample_direction -> "direction pointing to x_t"
# - pred_prev_sample -> "x_t-1"
# 1. get previous step value (=t-1)
lowerCamelCase : Union[str, Any] =timestep - self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
lowerCamelCase : Any =self.alphas_cumprod[timestep]
lowerCamelCase : Tuple =self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
lowerCamelCase : Optional[int] =1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
lowerCamelCase : Tuple =(sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
# 4. Clip "predicted x_0"
lowerCamelCase : Dict =self.bit_scale
if self.config.clip_sample:
lowerCamelCase : Union[str, Any] =torch.clamp(SCREAMING_SNAKE_CASE_ , -scale , SCREAMING_SNAKE_CASE_ )
# 5. compute variance: "sigma_t(η)" -> see formula (16)
# σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1)
lowerCamelCase : str =self._get_variance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCamelCase : int =eta * variance ** 0.5
if use_clipped_model_output:
# the model_output is always re-derived from the clipped x_0 in Glide
lowerCamelCase : Optional[int] =(sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
# 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
lowerCamelCase : Optional[int] =(1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output
# 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
lowerCamelCase : Tuple =alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if eta > 0:
# randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072
lowerCamelCase : Union[str, Any] =model_output.device if torch.is_tensor(SCREAMING_SNAKE_CASE_ ) else '''cpu'''
lowerCamelCase : int =torch.randn(model_output.shape , dtype=model_output.dtype , generator=SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ )
lowerCamelCase : List[str] =self._get_variance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ** 0.5 * eta * noise
lowerCamelCase : Any =prev_sample + variance
if not return_dict:
return (prev_sample,)
return DDIMSchedulerOutput(prev_sample=SCREAMING_SNAKE_CASE_ , pred_original_sample=SCREAMING_SNAKE_CASE_ )
def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_="epsilon" , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_ = True , ):
lowerCamelCase : str =timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]:
lowerCamelCase : Any =torch.split(SCREAMING_SNAKE_CASE_ , sample.shape[1] , dim=1 )
else:
lowerCamelCase : Tuple =None
# 1. compute alphas, betas
lowerCamelCase : Dict =self.alphas_cumprod[t]
lowerCamelCase : Union[str, Any] =self.alphas_cumprod[t - 1] if t > 0 else self.one
lowerCamelCase : Optional[int] =1 - alpha_prod_t
lowerCamelCase : str =1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if prediction_type == "epsilon":
lowerCamelCase : Tuple =(sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif prediction_type == "sample":
lowerCamelCase : Any =model_output
else:
raise ValueError(F"Unsupported prediction_type {prediction_type}." )
# 3. Clip "predicted x_0"
lowerCamelCase : List[Any] =self.bit_scale
if self.config.clip_sample:
lowerCamelCase : List[Any] =torch.clamp(SCREAMING_SNAKE_CASE_ , -scale , SCREAMING_SNAKE_CASE_ )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowerCamelCase : str =(alpha_prod_t_prev ** 0.5 * self.betas[t]) / beta_prod_t
lowerCamelCase : List[Any] =self.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowerCamelCase : Tuple =pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
lowerCamelCase : Union[str, Any] =0
if t > 0:
lowerCamelCase : Optional[int] =torch.randn(
model_output.size() , dtype=model_output.dtype , layout=model_output.layout , generator=SCREAMING_SNAKE_CASE_ ).to(model_output.device )
lowerCamelCase : int =(self._get_variance(SCREAMING_SNAKE_CASE_ , predicted_variance=SCREAMING_SNAKE_CASE_ ) ** 0.5) * noise
lowerCamelCase : Tuple =pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return DDPMSchedulerOutput(prev_sample=SCREAMING_SNAKE_CASE_ , pred_original_sample=SCREAMING_SNAKE_CASE_ )
class snake_case_ ( _A):
def __init__( self , __lowercase , __lowercase , __lowercase = 1.0 , ) -> Tuple:
super().__init__()
lowerCamelCase : List[str] =bit_scale
lowerCamelCase : List[Any] =(
ddim_bit_scheduler_step if isinstance(__lowercase , __lowercase ) else ddpm_bit_scheduler_step
)
self.register_modules(unet=__lowercase , scheduler=__lowercase )
@torch.no_grad()
def __call__( self , __lowercase = 2_5_6 , __lowercase = 2_5_6 , __lowercase = 5_0 , __lowercase = None , __lowercase = 1 , __lowercase = "pil" , __lowercase = True , **__lowercase , ) -> Union[Tuple, ImagePipelineOutput]:
lowerCamelCase : Dict =torch.randn(
(batch_size, self.unet.config.in_channels, height, width) , generator=__lowercase , )
lowerCamelCase : Optional[Any] =decimal_to_bits(__lowercase ) * self.bit_scale
lowerCamelCase : List[str] =latents.to(self.device )
self.scheduler.set_timesteps(__lowercase )
for t in self.progress_bar(self.scheduler.timesteps ):
# predict the noise residual
lowerCamelCase : Optional[int] =self.unet(__lowercase , __lowercase ).sample
# compute the previous noisy sample x_t -> x_t-1
lowerCamelCase : Optional[int] =self.scheduler.step(__lowercase , __lowercase , __lowercase ).prev_sample
lowerCamelCase : List[str] =bits_to_decimal(__lowercase )
if output_type == "pil":
lowerCamelCase : Optional[int] =self.numpy_to_pil(__lowercase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__lowercase )
| 717
|
from __future__ import annotations
def A__ ( SCREAMING_SNAKE_CASE_ ) -> bool:
if len(SCREAMING_SNAKE_CASE_ ) < 2:
raise ValueError('''Monogons and Digons are not polygons in the Euclidean space''' )
if any(i <= 0 for i in nums ):
raise ValueError('''All values must be greater than 0''' )
lowerCamelCase : List[Any] =nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 262
| 0
|
'''simple docstring'''
def a_ ( __snake_case : List[Any] ) -> List[str]:
"""simple docstring"""
stooge(__snake_case , 0 , len(__snake_case ) - 1 )
return arr
def a_ ( __snake_case : Tuple , __snake_case : Optional[int] , __snake_case : List[Any] ) -> Optional[int]:
"""simple docstring"""
if i >= h:
return
# If first element is smaller than the last then swap them
if arr[i] > arr[h]:
lowerCamelCase_, lowerCamelCase_ =arr[h], arr[i]
# If there are more than 2 elements in the array
if h - i + 1 > 2:
lowerCamelCase_ =(int)((h - i + 1) / 3 )
# Recursively sort first 2/3 elements
stooge(__snake_case , __snake_case , (h - t) )
# Recursively sort last 2/3 elements
stooge(__snake_case , i + t , (__snake_case) )
# Recursively sort first 2/3 elements
stooge(__snake_case , __snake_case , (h - t) )
if __name__ == "__main__":
a_ : Any = input("""Enter numbers separated by a comma:\n""").strip()
a_ : Dict = [int(item) for item in user_input.split(""",""")]
print(stooge_sort(unsorted))
| 676
|
'''simple docstring'''
import math
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
'''facebook/data2vec-base-960h''': '''https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json''',
# See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio
}
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
snake_case = """data2vec-audio"""
def __init__( self , UpperCAmelCase_=32 , UpperCAmelCase_=7_68 , UpperCAmelCase_=12 , UpperCAmelCase_=12 , UpperCAmelCase_=30_72 , UpperCAmelCase_="gelu" , UpperCAmelCase_=0.1 , UpperCAmelCase_=0.1 , UpperCAmelCase_=0.1 , UpperCAmelCase_=0.0 , UpperCAmelCase_=0.1 , UpperCAmelCase_=0.1 , UpperCAmelCase_=0.02 , UpperCAmelCase_=1e-5 , UpperCAmelCase_="gelu" , UpperCAmelCase_=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , UpperCAmelCase_=(5, 2, 2, 2, 2, 2, 2) , UpperCAmelCase_=(10, 3, 3, 3, 3, 2, 2) , UpperCAmelCase_=False , UpperCAmelCase_=16 , UpperCAmelCase_=19 , UpperCAmelCase_=5 , UpperCAmelCase_=0.05 , UpperCAmelCase_=10 , UpperCAmelCase_=2 , UpperCAmelCase_=0.0 , UpperCAmelCase_=10 , UpperCAmelCase_=0 , UpperCAmelCase_="sum" , UpperCAmelCase_=False , UpperCAmelCase_=False , UpperCAmelCase_=2_56 , UpperCAmelCase_=(5_12, 5_12, 5_12, 5_12, 15_00) , UpperCAmelCase_=(5, 3, 3, 1, 1) , UpperCAmelCase_=(1, 2, 3, 1, 1) , UpperCAmelCase_=5_12 , UpperCAmelCase_=0 , UpperCAmelCase_=1 , UpperCAmelCase_=2 , UpperCAmelCase_=False , UpperCAmelCase_=3 , UpperCAmelCase_=2 , UpperCAmelCase_=3 , UpperCAmelCase_=None , **UpperCAmelCase_ , ):
super().__init__(**UpperCAmelCase_ , pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ )
snake_case_ = hidden_size
snake_case_ = feat_extract_activation
snake_case_ = list(UpperCAmelCase_ )
snake_case_ = list(UpperCAmelCase_ )
snake_case_ = list(UpperCAmelCase_ )
snake_case_ = conv_bias
snake_case_ = num_conv_pos_embeddings
snake_case_ = num_conv_pos_embedding_groups
snake_case_ = conv_pos_kernel_size
snake_case_ = len(self.conv_dim )
snake_case_ = num_hidden_layers
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = num_attention_heads
snake_case_ = hidden_dropout
snake_case_ = attention_dropout
snake_case_ = activation_dropout
snake_case_ = feat_proj_dropout
snake_case_ = final_dropout
snake_case_ = layerdrop
snake_case_ = layer_norm_eps
snake_case_ = initializer_range
snake_case_ = vocab_size
snake_case_ = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
f''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
f''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
snake_case_ = mask_time_prob
snake_case_ = mask_time_length
snake_case_ = mask_time_min_masks
snake_case_ = mask_feature_prob
snake_case_ = mask_feature_length
snake_case_ = mask_feature_min_masks
# ctc loss
snake_case_ = ctc_loss_reduction
snake_case_ = ctc_zero_infinity
# adapter
snake_case_ = add_adapter
snake_case_ = adapter_kernel_size
snake_case_ = adapter_stride
snake_case_ = num_adapter_layers
snake_case_ = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
snake_case_ = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
snake_case_ = list(UpperCAmelCase_ )
snake_case_ = list(UpperCAmelCase_ )
snake_case_ = list(UpperCAmelCase_ )
snake_case_ = xvector_output_dim
@property
def _lowercase ( self ):
return math.prod(self.conv_stride )
| 508
| 0
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
UpperCamelCase = logging.get_logger(__name__)
if is_vision_available():
import PIL
class lowerCAmelCase_ ( UpperCamelCase_ ):
"""simple docstring"""
_snake_case : List[Any] = ["""pixel_values"""]
def __init__( self :Optional[int] , lowerCamelCase__ :int = True , lowerCamelCase__ :List[Any] = None , lowerCamelCase__ :List[Any] = PILImageResampling.BICUBIC , lowerCamelCase__ :Optional[Any] = True , lowerCamelCase__ :Tuple = None , lowerCamelCase__ :List[str] = True , lowerCamelCase__ :Union[str, Any] = 1 / 2_55 , lowerCamelCase__ :Any = True , lowerCamelCase__ :str = None , lowerCamelCase__ :List[Any] = None , lowerCamelCase__ :Dict = True , **lowerCamelCase__ :List[Any] , ):
super().__init__(**__a )
UpperCamelCase__ :int = size if size is not None else {'shortest_edge': 2_24}
UpperCamelCase__ :str = get_size_dict(__a , default_to_square=__a )
UpperCamelCase__ :Tuple = crop_size if crop_size is not None else {'height': 2_24, 'width': 2_24}
UpperCamelCase__ :str = get_size_dict(__a , default_to_square=__a , param_name="""crop_size""" )
UpperCamelCase__ :Any = do_resize
UpperCamelCase__ :Optional[int] = size
UpperCamelCase__ :int = resample
UpperCamelCase__ :List[Any] = do_center_crop
UpperCamelCase__ :Tuple = crop_size
UpperCamelCase__ :Union[str, Any] = do_rescale
UpperCamelCase__ :int = rescale_factor
UpperCamelCase__ :Tuple = do_normalize
UpperCamelCase__ :Tuple = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
UpperCamelCase__ :Any = image_std if image_std is not None else OPENAI_CLIP_STD
UpperCamelCase__ :int = do_convert_rgb
def __a ( self :Optional[int] , lowerCamelCase__ :Union[str, Any] , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :int = PILImageResampling.BICUBIC , lowerCamelCase__ :Optional[int] = None , **lowerCamelCase__ :Dict , ):
UpperCamelCase__ :List[str] = get_size_dict(__a , default_to_square=__a )
if "shortest_edge" not in size:
raise ValueError(f"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
UpperCamelCase__ :str = get_resize_output_image_size(__a , size=size["""shortest_edge"""] , default_to_square=__a )
return resize(__a , size=__a , resample=__a , data_format=__a , **__a )
def __a ( self :List[Any] , lowerCamelCase__ :Any , lowerCamelCase__ :Any , lowerCamelCase__ :List[str] = None , **lowerCamelCase__ :Union[str, Any] , ):
UpperCamelCase__ :Union[str, Any] = get_size_dict(__a )
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""" )
return center_crop(__a , size=(size["""height"""], size["""width"""]) , data_format=__a , **__a )
def __a ( self :Dict , lowerCamelCase__ :str , lowerCamelCase__ :Tuple , lowerCamelCase__ :Union[str, Any] = None , **lowerCamelCase__ :Optional[int] , ):
return rescale(__a , scale=__a , data_format=__a , **__a )
def __a ( self :List[Any] , lowerCamelCase__ :Any , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :Dict , lowerCamelCase__ :Optional[Any] = None , **lowerCamelCase__ :Any , ):
return normalize(__a , mean=__a , std=__a , data_format=__a , **__a )
def __a ( self :Union[str, Any] , lowerCamelCase__ :List[str] , lowerCamelCase__ :Tuple = None , lowerCamelCase__ :Tuple = None , lowerCamelCase__ :Dict = None , lowerCamelCase__ :List[str] = None , lowerCamelCase__ :str = None , lowerCamelCase__ :Optional[int] = None , lowerCamelCase__ :Optional[int] = None , lowerCamelCase__ :int = None , lowerCamelCase__ :Union[str, Any] = None , lowerCamelCase__ :Tuple = None , lowerCamelCase__ :int = None , lowerCamelCase__ :Optional[int] = None , lowerCamelCase__ :Optional[Any] = ChannelDimension.FIRST , **lowerCamelCase__ :str , ):
UpperCamelCase__ :Optional[Any] = do_resize if do_resize is not None else self.do_resize
UpperCamelCase__ :List[Any] = size if size is not None else self.size
UpperCamelCase__ :Any = get_size_dict(__a , param_name="""size""" , default_to_square=__a )
UpperCamelCase__ :Optional[int] = resample if resample is not None else self.resample
UpperCamelCase__ :Any = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCamelCase__ :str = crop_size if crop_size is not None else self.crop_size
UpperCamelCase__ :Optional[int] = get_size_dict(__a , param_name="""crop_size""" , default_to_square=__a )
UpperCamelCase__ :Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
UpperCamelCase__ :Tuple = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCamelCase__ :Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize
UpperCamelCase__ :Tuple = image_mean if image_mean is not None else self.image_mean
UpperCamelCase__ :Union[str, Any] = image_std if image_std is not None else self.image_std
UpperCamelCase__ :Dict = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
UpperCamelCase__ :Any = make_list_of_images(__a )
if not valid_images(__a ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
UpperCamelCase__ :str = [convert_to_rgb(__a ) for image in images]
# All transformations expect numpy arrays.
UpperCamelCase__ :List[Any] = [to_numpy_array(__a ) for image in images]
if do_resize:
UpperCamelCase__ :Union[str, Any] = [self.resize(image=__a , size=__a , resample=__a ) for image in images]
if do_center_crop:
UpperCamelCase__ :Union[str, Any] = [self.center_crop(image=__a , size=__a ) for image in images]
if do_rescale:
UpperCamelCase__ :Any = [self.rescale(image=__a , scale=__a ) for image in images]
if do_normalize:
UpperCamelCase__ :Optional[int] = [self.normalize(image=__a , mean=__a , std=__a ) for image in images]
UpperCamelCase__ :int = [to_channel_dimension_format(__a , __a ) for image in images]
UpperCamelCase__ :Tuple = {'pixel_values': images}
return BatchFeature(data=__a , tensor_type=__a )
| 717
|
from ...configuration_utils import PretrainedConfig
UpperCamelCase = {
"google/tapas-base-finetuned-sqa": (
"https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json"
),
"google/tapas-base-finetuned-wtq": (
"https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json"
),
"google/tapas-base-finetuned-wikisql-supervised": (
"https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json"
),
"google/tapas-base-finetuned-tabfact": (
"https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json"
),
}
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
_snake_case : Dict = """tapas"""
def __init__( self :List[Any] , lowerCamelCase__ :List[str]=3_05_22 , lowerCamelCase__ :str=7_68 , lowerCamelCase__ :List[Any]=12 , lowerCamelCase__ :Any=12 , lowerCamelCase__ :Tuple=30_72 , lowerCamelCase__ :int="gelu" , lowerCamelCase__ :Dict=0.1 , lowerCamelCase__ :str=0.1 , lowerCamelCase__ :List[str]=10_24 , lowerCamelCase__ :List[Any]=[3, 2_56, 2_56, 2, 2_56, 2_56, 10] , lowerCamelCase__ :Tuple=0.02 , lowerCamelCase__ :str=1e-12 , lowerCamelCase__ :str=0 , lowerCamelCase__ :Optional[int]=10.0 , lowerCamelCase__ :int=0 , lowerCamelCase__ :Dict=1.0 , lowerCamelCase__ :Union[str, Any]=None , lowerCamelCase__ :Optional[int]=1.0 , lowerCamelCase__ :List[Any]=False , lowerCamelCase__ :Any=None , lowerCamelCase__ :Optional[int]=1.0 , lowerCamelCase__ :Union[str, Any]=1.0 , lowerCamelCase__ :Optional[int]=False , lowerCamelCase__ :List[Any]=False , lowerCamelCase__ :Any="ratio" , lowerCamelCase__ :int=None , lowerCamelCase__ :Union[str, Any]=None , lowerCamelCase__ :int=64 , lowerCamelCase__ :int=32 , lowerCamelCase__ :List[str]=False , lowerCamelCase__ :Optional[int]=True , lowerCamelCase__ :str=False , lowerCamelCase__ :int=False , lowerCamelCase__ :Optional[Any]=True , lowerCamelCase__ :Optional[Any]=False , lowerCamelCase__ :str=None , lowerCamelCase__ :List[Any]=None , **lowerCamelCase__ :Optional[Any] , ):
super().__init__(pad_token_id=lowerCamelCase__ , **lowerCamelCase__ )
# BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes)
UpperCamelCase__ :List[Any] = vocab_size
UpperCamelCase__ :Optional[int] = hidden_size
UpperCamelCase__ :Any = num_hidden_layers
UpperCamelCase__ :str = num_attention_heads
UpperCamelCase__ :Dict = hidden_act
UpperCamelCase__ :Tuple = intermediate_size
UpperCamelCase__ :int = hidden_dropout_prob
UpperCamelCase__ :List[str] = attention_probs_dropout_prob
UpperCamelCase__ :Any = max_position_embeddings
UpperCamelCase__ :List[Any] = type_vocab_sizes
UpperCamelCase__ :List[Any] = initializer_range
UpperCamelCase__ :List[str] = layer_norm_eps
# Fine-tuning task hyperparameters
UpperCamelCase__ :List[str] = positive_label_weight
UpperCamelCase__ :int = num_aggregation_labels
UpperCamelCase__ :str = aggregation_loss_weight
UpperCamelCase__ :Optional[Any] = use_answer_as_supervision
UpperCamelCase__ :Tuple = answer_loss_importance
UpperCamelCase__ :Dict = use_normalized_answer_loss
UpperCamelCase__ :Optional[Any] = huber_loss_delta
UpperCamelCase__ :Any = temperature
UpperCamelCase__ :Union[str, Any] = aggregation_temperature
UpperCamelCase__ :Tuple = use_gumbel_for_cells
UpperCamelCase__ :Tuple = use_gumbel_for_aggregation
UpperCamelCase__ :Optional[int] = average_approximation_function
UpperCamelCase__ :Optional[Any] = cell_selection_preference
UpperCamelCase__ :Any = answer_loss_cutoff
UpperCamelCase__ :Dict = max_num_rows
UpperCamelCase__ :Optional[int] = max_num_columns
UpperCamelCase__ :Tuple = average_logits_per_cell
UpperCamelCase__ :Any = select_one_column
UpperCamelCase__ :Dict = allow_empty_column_selection
UpperCamelCase__ :Union[str, Any] = init_cell_selection_weights_to_zero
UpperCamelCase__ :Optional[Any] = reset_position_index_per_cell
UpperCamelCase__ :List[str] = disable_per_token_loss
# Aggregation hyperparameters
UpperCamelCase__ :Tuple = aggregation_labels
UpperCamelCase__ :str = no_aggregation_label_index
if isinstance(self.aggregation_labels , lowerCamelCase__ ):
UpperCamelCase__ :Optional[Any] = {int(lowerCamelCase__ ): v for k, v in aggregation_labels.items()}
| 383
| 0
|
'''simple docstring'''
import contextlib
import importlib
import io
import unittest
import transformers
# Try to import everything from transformers to ensure every object can be loaded.
from transformers import * # noqa F406
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, require_tf, require_torch
from transformers.utils import ContextManagers, find_labels, is_flax_available, is_tf_available, is_torch_available
if is_torch_available():
from transformers import BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification
if is_tf_available():
from transformers import TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification
if is_flax_available():
from transformers import FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification
lowerCAmelCase = DUMMY_UNKNOWN_IDENTIFIER
# An actual model hosted on huggingface.co
lowerCAmelCase = """main"""
# Default branch name
lowerCAmelCase = """f2c752cfc5c0ab6f4bdec59acea69eefbee381c2"""
# One particular commit (not the top of `main`)
lowerCAmelCase = """aaaaaaa"""
# This commit does not exist, so we should 404.
lowerCAmelCase = """d9e9f15bc825e4b2c9249e9578f884bbcb5e3684"""
# Sha-1 of config.json on the top of `main`, for checking purposes
lowerCAmelCase = """4b243c475af8d0a7754e87d7d096c92e5199ec2fe168a2ee7998e3b8e9bcb1d3"""
@contextlib.contextmanager
def lowerCamelCase_ ( ) -> Tuple:
"""simple docstring"""
print('Welcome!' )
yield
print('Bye!' )
@contextlib.contextmanager
def lowerCamelCase_ ( ) -> Dict:
"""simple docstring"""
print('Bonjour!' )
yield
print('Au revoir!' )
class lowerCAmelCase_ ( unittest.TestCase ):
def UpperCamelCase ( self )-> Optional[int]:
# If the spec is missing, importlib would not be able to import the module dynamically.
assert transformers.__spec__ is not None
assert importlib.util.find_spec('transformers' ) is not None
class lowerCAmelCase_ ( unittest.TestCase ):
@unittest.mock.patch('sys.stdout' , new_callable=io.StringIO )
def UpperCamelCase ( self , _UpperCamelCase )-> Any:
with ContextManagers([] ):
print('Transformers are awesome!' )
# The print statement adds a new line at the end of the output
self.assertEqual(mock_stdout.getvalue() , 'Transformers are awesome!\n' )
@unittest.mock.patch('sys.stdout' , new_callable=io.StringIO )
def UpperCamelCase ( self , _UpperCamelCase )-> Optional[int]:
with ContextManagers([context_en()] ):
print('Transformers are awesome!' )
# The output should be wrapped with an English welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , 'Welcome!\nTransformers are awesome!\nBye!\n' )
@unittest.mock.patch('sys.stdout' , new_callable=io.StringIO )
def UpperCamelCase ( self , _UpperCamelCase )-> str:
with ContextManagers([context_fr(), context_en()] ):
print('Transformers are awesome!' )
# The output should be wrapped with an English and French welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , 'Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n' )
@require_torch
def UpperCamelCase ( self )-> Dict:
self.assertEqual(find_labels(_UpperCamelCase ) , ['labels'] )
self.assertEqual(find_labels(_UpperCamelCase ) , ['labels', 'next_sentence_label'] )
self.assertEqual(find_labels(_UpperCamelCase ) , ['start_positions', 'end_positions'] )
class lowerCAmelCase_ ( UpperCAmelCase ):
pass
self.assertEqual(find_labels(_UpperCamelCase ) , ['labels'] )
@require_tf
def UpperCamelCase ( self )-> Optional[int]:
self.assertEqual(find_labels(_UpperCamelCase ) , ['labels'] )
self.assertEqual(find_labels(_UpperCamelCase ) , ['labels', 'next_sentence_label'] )
self.assertEqual(find_labels(_UpperCamelCase ) , ['start_positions', 'end_positions'] )
class lowerCAmelCase_ ( UpperCAmelCase ):
pass
self.assertEqual(find_labels(_UpperCamelCase ) , ['labels'] )
@require_flax
def UpperCamelCase ( self )-> List[Any]:
# Flax models don't have labels
self.assertEqual(find_labels(_UpperCamelCase ) , [] )
self.assertEqual(find_labels(_UpperCamelCase ) , [] )
self.assertEqual(find_labels(_UpperCamelCase ) , [] )
class lowerCAmelCase_ ( UpperCAmelCase ):
pass
self.assertEqual(find_labels(_UpperCamelCase ) , [] )
| 292
|
'''simple docstring'''
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
lowerCAmelCase = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class lowerCAmelCase_ ( unittest.TestCase ):
def __init__( self , _UpperCamelCase , _UpperCamelCase=7 , _UpperCamelCase=3 , _UpperCamelCase=18 , _UpperCamelCase=30 , _UpperCamelCase=400 , _UpperCamelCase=None , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=None , )-> Dict:
_A = size if size is not None else {'height': 20, 'width': 20}
_A = parent
_A = batch_size
_A = num_channels
_A = image_size
_A = min_resolution
_A = max_resolution
_A = size
_A = do_normalize
_A = do_convert_rgb
_A = [512, 1024, 2048, 4096]
_A = patch_size if patch_size is not None else {'height': 16, 'width': 16}
def UpperCamelCase ( self )-> Dict:
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def UpperCamelCase ( self )-> Tuple:
_A = 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg'
_A = Image.open(requests.get(_UpperCamelCase , stream=_UpperCamelCase ).raw ).convert('RGB' )
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='`Pix2StructImageProcessor` requires `torch>=1.11.0`.' , )
@require_torch
@require_vision
class lowerCAmelCase_ ( UpperCAmelCase , unittest.TestCase ):
__UpperCAmelCase =PixaStructImageProcessor if is_vision_available() else None
def UpperCamelCase ( self )-> Optional[int]:
_A = PixaStructImageProcessingTester(self )
@property
def UpperCamelCase ( self )-> Tuple:
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase ( self )-> List[Any]:
_A = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCamelCase , 'do_normalize' ) )
self.assertTrue(hasattr(_UpperCamelCase , 'do_convert_rgb' ) )
def UpperCamelCase ( self )-> Any:
_A = self.image_processor_tester.prepare_dummy_image()
_A = self.image_processing_class(**self.image_processor_dict )
_A = 2048
_A = image_processor(_UpperCamelCase , return_tensors='pt' , max_patches=_UpperCamelCase )
self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.0606 ) , atol=1e-3 , rtol=1e-3 ) )
def UpperCamelCase ( self )-> int:
# Initialize image_processor
_A = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_A = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase , Image.Image )
# Test not batched input
_A = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
_A = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=_UpperCamelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_A = image_processor(
_UpperCamelCase , return_tensors='pt' , max_patches=_UpperCamelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def UpperCamelCase ( self )-> List[str]:
# Initialize image_processor
_A = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_A = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase , Image.Image )
# Test not batched input
_A = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
_A = True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(_UpperCamelCase ):
_A = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=_UpperCamelCase ).flattened_patches
_A = 'Hello'
_A = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=_UpperCamelCase , header_text=_UpperCamelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_A = image_processor(
_UpperCamelCase , return_tensors='pt' , max_patches=_UpperCamelCase , header_text=_UpperCamelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def UpperCamelCase ( self )-> Optional[int]:
# Initialize image_processor
_A = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_A = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase , numpify=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase , np.ndarray )
_A = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
_A = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=_UpperCamelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_A = image_processor(
_UpperCamelCase , return_tensors='pt' , max_patches=_UpperCamelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def UpperCamelCase ( self )-> Union[str, Any]:
# Initialize image_processor
_A = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_A = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase , torchify=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase , torch.Tensor )
# Test not batched input
_A = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
_A = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=_UpperCamelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_A = image_processor(
_UpperCamelCase , return_tensors='pt' , max_patches=_UpperCamelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='`Pix2StructImageProcessor` requires `torch>=1.11.0`.' , )
@require_torch
@require_vision
class lowerCAmelCase_ ( UpperCAmelCase , unittest.TestCase ):
__UpperCAmelCase =PixaStructImageProcessor if is_vision_available() else None
def UpperCamelCase ( self )-> str:
_A = PixaStructImageProcessingTester(self , num_channels=4 )
_A = 3
@property
def UpperCamelCase ( self )-> str:
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase ( self )-> Any:
_A = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCamelCase , 'do_normalize' ) )
self.assertTrue(hasattr(_UpperCamelCase , 'do_convert_rgb' ) )
def UpperCamelCase ( self )-> Optional[Any]:
# Initialize image_processor
_A = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_A = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase , Image.Image )
# Test not batched input
_A = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
_A = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=_UpperCamelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
_A = image_processor(
_UpperCamelCase , return_tensors='pt' , max_patches=_UpperCamelCase ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
| 292
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowerCamelCase = {
'configuration_whisper': ['WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'WhisperConfig', 'WhisperOnnxConfig'],
'feature_extraction_whisper': ['WhisperFeatureExtractor'],
'processing_whisper': ['WhisperProcessor'],
'tokenization_whisper': ['WhisperTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = ['WhisperTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = [
'WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST',
'WhisperForConditionalGeneration',
'WhisperModel',
'WhisperPreTrainedModel',
'WhisperForAudioClassification',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = [
'TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFWhisperForConditionalGeneration',
'TFWhisperModel',
'TFWhisperPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = [
'FlaxWhisperForConditionalGeneration',
'FlaxWhisperModel',
'FlaxWhisperPreTrainedModel',
'FlaxWhisperForAudioClassification',
]
if TYPE_CHECKING:
from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig
from .feature_extraction_whisper import WhisperFeatureExtractor
from .processing_whisper import WhisperProcessor
from .tokenization_whisper import WhisperTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_whisper_fast import WhisperTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_whisper import (
WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
WhisperForAudioClassification,
WhisperForConditionalGeneration,
WhisperModel,
WhisperPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_whisper import (
TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWhisperForConditionalGeneration,
TFWhisperModel,
TFWhisperPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_whisper import (
FlaxWhisperForAudioClassification,
FlaxWhisperForConditionalGeneration,
FlaxWhisperModel,
FlaxWhisperPreTrainedModel,
)
else:
import sys
_lowerCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 112
|
"""simple docstring"""
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def __lowercase ( lowerCamelCase_ : int ):
SCREAMING_SNAKE_CASE__ = prime_factors(lowerCamelCase_ )
if is_square_free(lowerCamelCase_ ):
return -1 if len(lowerCamelCase_ ) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 112
| 1
|
import json
import logging
import os
import socket
import git
import numpy as np
import torch
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
UpperCamelCase = logging.getLogger(__name__)
def A ( lowercase__ : Union[str, Any] ) -> Optional[Any]:
UpperCamelCase__ :Optional[int] = git.Repo(search_parent_directories=SCREAMING_SNAKE_CASE__ )
UpperCamelCase__ :List[str] = {
"""repo_id""": str(SCREAMING_SNAKE_CASE__ ),
"""repo_sha""": str(repo.head.object.hexsha ),
"""repo_branch""": str(repo.active_branch ),
}
with open(os.path.join(SCREAMING_SNAKE_CASE__ , """git_log.json""" ) , """w""" ) as f:
json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , indent=4 )
def A ( lowercase__ : Tuple ) -> Dict:
if params.n_gpu <= 0:
UpperCamelCase__ :List[Any] = 0
UpperCamelCase__ :Optional[int] = -1
UpperCamelCase__ :Dict = True
UpperCamelCase__ :List[Any] = False
return
assert torch.cuda.is_available()
logger.info("""Initializing GPUs""" )
if params.n_gpu > 1:
assert params.local_rank != -1
UpperCamelCase__ :Optional[int] = int(os.environ["""WORLD_SIZE"""] )
UpperCamelCase__ :Optional[Any] = int(os.environ["""N_GPU_NODE"""] )
UpperCamelCase__ :Optional[int] = int(os.environ["""RANK"""] )
# number of nodes / node ID
UpperCamelCase__ :Optional[int] = params.world_size // params.n_gpu_per_node
UpperCamelCase__ :Any = params.global_rank // params.n_gpu_per_node
UpperCamelCase__ :Optional[int] = True
assert params.n_nodes == int(os.environ["""N_NODES"""] )
assert params.node_id == int(os.environ["""NODE_RANK"""] )
# local job (single GPU)
else:
assert params.local_rank == -1
UpperCamelCase__ :Optional[Any] = 1
UpperCamelCase__ :Optional[Any] = 0
UpperCamelCase__ :int = 0
UpperCamelCase__ :List[str] = 0
UpperCamelCase__ :Tuple = 1
UpperCamelCase__ :Optional[Any] = 1
UpperCamelCase__ :Union[str, Any] = False
# sanity checks
assert params.n_nodes >= 1
assert 0 <= params.node_id < params.n_nodes
assert 0 <= params.local_rank <= params.global_rank < params.world_size
assert params.world_size == params.n_nodes * params.n_gpu_per_node
# define whether this is the master process / if we are in multi-node distributed mode
UpperCamelCase__ :Optional[int] = params.node_id == 0 and params.local_rank == 0
UpperCamelCase__ :int = params.n_nodes > 1
# summary
UpperCamelCase__ :Any = f"""--- Global rank: {params.global_rank} - """
logger.info(PREFIX + """Number of nodes: %i""" % params.n_nodes )
logger.info(PREFIX + """Node ID : %i""" % params.node_id )
logger.info(PREFIX + """Local rank : %i""" % params.local_rank )
logger.info(PREFIX + """World size : %i""" % params.world_size )
logger.info(PREFIX + """GPUs per node : %i""" % params.n_gpu_per_node )
logger.info(PREFIX + """Master : %s""" % str(params.is_master ) )
logger.info(PREFIX + """Multi-node : %s""" % str(params.multi_node ) )
logger.info(PREFIX + """Multi-GPU : %s""" % str(params.multi_gpu ) )
logger.info(PREFIX + """Hostname : %s""" % socket.gethostname() )
# set GPU device
torch.cuda.set_device(params.local_rank )
# initialize multi-GPU
if params.multi_gpu:
logger.info("""Initializing PyTorch distributed""" )
torch.distributed.init_process_group(
init_method="""env://""" , backend="""nccl""" , )
def A ( lowercase__ : Dict ) -> List[str]:
np.random.seed(args.seed )
torch.manual_seed(args.seed )
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed )
| 45
|
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinvaConfig, SwinvaForImageClassification
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> str:
lowercase : Dict = SwinvaConfig()
lowercase : Union[str, Any] = swinva_name.split("""_""" )
lowercase : Dict = name_split[1]
if "to" in name_split[3]:
lowercase : Any = int(name_split[3][-3:] )
else:
lowercase : Optional[int] = int(name_split[3] )
if "to" in name_split[2]:
lowercase : List[str] = int(name_split[2][-2:] )
else:
lowercase : Optional[Any] = int(name_split[2][6:] )
if model_size == "tiny":
lowercase : Optional[int] = 96
lowercase : Union[str, Any] = (2, 2, 6, 2)
lowercase : List[str] = (3, 6, 12, 24)
elif model_size == "small":
lowercase : Any = 96
lowercase : List[Any] = (2, 2, 18, 2)
lowercase : Tuple = (3, 6, 12, 24)
elif model_size == "base":
lowercase : Dict = 128
lowercase : Dict = (2, 2, 18, 2)
lowercase : int = (4, 8, 16, 32)
else:
lowercase : Optional[int] = 192
lowercase : Optional[Any] = (2, 2, 18, 2)
lowercase : List[Any] = (6, 12, 24, 48)
if "to" in swinva_name:
lowercase : Tuple = (12, 12, 12, 6)
if ("22k" in swinva_name) and ("to" not in swinva_name):
lowercase : Any = 21_841
lowercase : str = """huggingface/label-files"""
lowercase : Dict = """imagenet-22k-id2label.json"""
lowercase : List[str] = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , repo_type="""dataset""" ) , """r""" ) )
lowercase : List[Any] = {int(SCREAMING_SNAKE_CASE__ ): v for k, v in idalabel.items()}
lowercase : str = idalabel
lowercase : Union[str, Any] = {v: k for k, v in idalabel.items()}
else:
lowercase : int = 1_000
lowercase : List[Any] = """huggingface/label-files"""
lowercase : Tuple = """imagenet-1k-id2label.json"""
lowercase : Tuple = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , repo_type="""dataset""" ) , """r""" ) )
lowercase : Union[str, Any] = {int(SCREAMING_SNAKE_CASE__ ): v for k, v in idalabel.items()}
lowercase : Optional[Any] = idalabel
lowercase : str = {v: k for k, v in idalabel.items()}
lowercase : Optional[Any] = img_size
lowercase : Optional[int] = num_classes
lowercase : Any = embed_dim
lowercase : List[str] = depths
lowercase : Union[str, Any] = num_heads
lowercase : List[Any] = window_size
return config
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> List[Any]:
if "patch_embed.proj" in name:
lowercase : str = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
lowercase : Union[str, Any] = name.replace("""patch_embed.norm""" , """embeddings.norm""" )
if "layers" in name:
lowercase : Optional[int] = """encoder.""" + name
if "attn.proj" in name:
lowercase : Dict = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
lowercase : str = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
lowercase : Optional[Any] = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
lowercase : Optional[Any] = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
lowercase : Optional[int] = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
lowercase : Union[str, Any] = name.replace("""mlp.fc2""" , """output.dense""" )
if "q_bias" in name:
lowercase : Optional[int] = name.replace("""q_bias""" , """query.bias""" )
if "k_bias" in name:
lowercase : Any = name.replace("""k_bias""" , """key.bias""" )
if "v_bias" in name:
lowercase : str = name.replace("""v_bias""" , """value.bias""" )
if "cpb_mlp" in name:
lowercase : Union[str, Any] = name.replace("""cpb_mlp""" , """continuous_position_bias_mlp""" )
if name == "norm.weight":
lowercase : Tuple = """layernorm.weight"""
if name == "norm.bias":
lowercase : Any = """layernorm.bias"""
if "head" in name:
lowercase : int = name.replace("""head""" , """classifier""" )
else:
lowercase : int = """swinv2.""" + name
return name
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> List[str]:
for key in orig_state_dict.copy().keys():
lowercase : Tuple = orig_state_dict.pop(SCREAMING_SNAKE_CASE__ )
if "mask" in key:
continue
elif "qkv" in key:
lowercase : Union[str, Any] = key.split(""".""" )
lowercase : List[str] = int(key_split[1] )
lowercase : str = int(key_split[3] )
lowercase : Any = model.swinva.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
lowercase : Tuple = val[:dim, :]
lowercase : int = val[dim : dim * 2, :]
lowercase : List[str] = val[-dim:, :]
else:
lowercase : List[str] = val[:dim]
lowercase : str = val[
dim : dim * 2
]
lowercase : str = val[-dim:]
else:
lowercase : Any = val
return orig_state_dict
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
lowercase : Optional[Any] = timm.create_model(SCREAMING_SNAKE_CASE__ , pretrained=SCREAMING_SNAKE_CASE__ )
timm_model.eval()
lowercase : List[Any] = get_swinva_config(SCREAMING_SNAKE_CASE__ )
lowercase : Any = SwinvaForImageClassification(SCREAMING_SNAKE_CASE__ )
model.eval()
lowercase : Optional[int] = convert_state_dict(timm_model.state_dict() , SCREAMING_SNAKE_CASE__ )
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
lowercase : Union[str, Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowercase : List[str] = AutoImageProcessor.from_pretrained("""microsoft/{}""".format(swinva_name.replace("""_""" , """-""" ) ) )
lowercase : Any = Image.open(requests.get(SCREAMING_SNAKE_CASE__ , stream=SCREAMING_SNAKE_CASE__ ).raw )
lowercase : Optional[int] = image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors="""pt""" )
lowercase : Tuple = timm_model(inputs["""pixel_values"""] )
lowercase : int = model(**SCREAMING_SNAKE_CASE__ ).logits
assert torch.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=1e-3 )
print(f"Saving model {swinva_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
model.push_to_hub(
repo_path_or_name=Path(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , organization="""nandwalritik""" , commit_message="""Add model""" , )
if __name__ == "__main__":
lowercase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--swinv2_name""",
default="""swinv2_tiny_patch4_window8_256""",
type=str,
help="""Name of the Swinv2 timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
lowercase : Dict = parser.parse_args()
convert_swinva_checkpoint(args.swinva_name, args.pytorch_dump_folder_path)
| 336
| 0
|
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
_UpperCamelCase : Dict =get_tests_dir('fixtures')
_UpperCamelCase : List[str] =get_tests_dir('fixtures/dummy_feature_extractor_config.json')
_UpperCamelCase : Dict =get_tests_dir('fixtures/dummy-config.json')
class UpperCAmelCase__ ( unittest.TestCase ):
def A__ ( self ):
_A : List[Any] = 0
def A__ ( self ):
_A : List[Any] = AutoFeatureExtractor.from_pretrained('''facebook/wav2vec2-base-960h''' )
self.assertIsInstance(A__ ,A__ )
def A__ ( self ):
_A : Optional[int] = AutoFeatureExtractor.from_pretrained(A__ )
self.assertIsInstance(A__ ,A__ )
def A__ ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
_A : int = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
_A : Any = AutoFeatureExtractor.from_pretrained(A__ ).to_dict()
config_dict.pop('''feature_extractor_type''' )
_A : str = WavaVecaFeatureExtractor(**A__ )
# save in new folder
model_config.save_pretrained(A__ )
config.save_pretrained(A__ )
_A : Optional[int] = AutoFeatureExtractor.from_pretrained(A__ )
# make sure private variable is not incorrectly saved
_A : List[Any] = json.loads(config.to_json_string() )
self.assertTrue('''_processor_class''' not in dict_as_saved )
self.assertIsInstance(A__ ,A__ )
def A__ ( self ):
_A : Union[str, Any] = AutoFeatureExtractor.from_pretrained(A__ )
self.assertIsInstance(A__ ,A__ )
def A__ ( self ):
with self.assertRaisesRegex(
A__ ,'''bert-base is not a local folder and is not a valid model identifier''' ):
_A : int = AutoFeatureExtractor.from_pretrained('''bert-base''' )
def A__ ( self ):
with self.assertRaisesRegex(
A__ ,r'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
_A : Union[str, Any] = AutoFeatureExtractor.from_pretrained(A__ ,revision='''aaaaaa''' )
def A__ ( self ):
with self.assertRaisesRegex(
A__ ,'''hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.''' ,):
_A : Optional[int] = AutoFeatureExtractor.from_pretrained('''hf-internal-testing/config-no-model''' )
def A__ ( self ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(A__ ):
_A : str = AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(A__ ):
_A : Tuple = AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''' ,trust_remote_code=A__ )
_A : Optional[int] = AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''' ,trust_remote_code=A__ )
self.assertEqual(feature_extractor.__class__.__name__ ,'''NewFeatureExtractor''' )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(A__ )
_A : Any = AutoFeatureExtractor.from_pretrained(A__ ,trust_remote_code=A__ )
self.assertEqual(reloaded_feature_extractor.__class__.__name__ ,'''NewFeatureExtractor''' )
def A__ ( self ):
try:
AutoConfig.register('''custom''' ,A__ )
AutoFeatureExtractor.register(A__ ,A__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(A__ ):
AutoFeatureExtractor.register(A__ ,A__ )
# Now that the config is registered, it can be used as any other config with the auto-API
_A : Dict = CustomFeatureExtractor.from_pretrained(A__ )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(A__ )
_A : Optional[Any] = AutoFeatureExtractor.from_pretrained(A__ )
self.assertIsInstance(A__ ,A__ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def A__ ( self ):
class UpperCAmelCase__ ( __snake_case ):
__snake_case : Dict = True
try:
AutoConfig.register('''custom''' ,A__ )
AutoFeatureExtractor.register(A__ ,A__ )
# If remote code is not set, the default is to use local
_A : Tuple = AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''' )
self.assertEqual(feature_extractor.__class__.__name__ ,'''NewFeatureExtractor''' )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
_A : Optional[Any] = AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''' ,trust_remote_code=A__ )
self.assertEqual(feature_extractor.__class__.__name__ ,'''NewFeatureExtractor''' )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
_A : Any = AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''' ,trust_remote_code=A__ )
self.assertEqual(feature_extractor.__class__.__name__ ,'''NewFeatureExtractor''' )
self.assertTrue(not hasattr(A__ ,'''is_local''' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 720
|
import math
from typing import Any, Callable, List, Optional, Tuple, Union
import numpy as np
import torch
from ...models import TaFilmDecoder
from ...schedulers import DDPMScheduler
from ...utils import is_onnx_available, logging, randn_tensor
if is_onnx_available():
from ..onnx_utils import OnnxRuntimeModel
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
from .continous_encoder import SpectrogramContEncoder
from .notes_encoder import SpectrogramNotesEncoder
_UpperCamelCase : Union[str, Any] =logging.get_logger(__name__) # pylint: disable=invalid-name
_UpperCamelCase : List[Any] =256
class UpperCAmelCase__ ( __snake_case ):
__snake_case : Tuple = ["melgan"]
def __init__( self ,A__ ,A__ ,A__ ,A__ ,A__ ,):
super().__init__()
# From MELGAN
_A : Any = math.log(1E-5 ) # Matches MelGAN training.
_A : int = 4.0 # Largest value for most examples
_A : int = 128
self.register_modules(
notes_encoder=A__ ,continuous_encoder=A__ ,decoder=A__ ,scheduler=A__ ,melgan=A__ ,)
def A__ ( self ,A__ ,A__=(-1.0, 1.0) ,A__=False ):
_A , _A : int = output_range
if clip:
_A : int = torch.clip(A__ ,self.min_value ,self.max_value )
# Scale to [0, 1].
_A : Optional[Any] = (features - self.min_value) / (self.max_value - self.min_value)
# Scale to [min_out, max_out].
return zero_one * (max_out - min_out) + min_out
def A__ ( self ,A__ ,A__=(-1.0, 1.0) ,A__=False ):
_A , _A : Dict = input_range
_A : Tuple = torch.clip(A__ ,A__ ,A__ ) if clip else outputs
# Scale to [0, 1].
_A : Any = (outputs - min_out) / (max_out - min_out)
# Scale to [self.min_value, self.max_value].
return zero_one * (self.max_value - self.min_value) + self.min_value
def A__ ( self ,A__ ,A__ ,A__ ):
_A : Tuple = input_tokens > 0
_A , _A : str = self.notes_encoder(
encoder_input_tokens=A__ ,encoder_inputs_mask=A__ )
_A , _A : List[str] = self.continuous_encoder(
encoder_inputs=A__ ,encoder_inputs_mask=A__ )
return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)]
def A__ ( self ,A__ ,A__ ,A__ ):
_A : str = noise_time
if not torch.is_tensor(A__ ):
_A : Any = torch.tensor([timesteps] ,dtype=torch.long ,device=input_tokens.device )
elif torch.is_tensor(A__ ) and len(timesteps.shape ) == 0:
_A : Union[str, Any] = timesteps[None].to(input_tokens.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
_A : int = timesteps * torch.ones(input_tokens.shape[0] ,dtype=timesteps.dtype ,device=timesteps.device )
_A : Dict = self.decoder(
encodings_and_masks=A__ ,decoder_input_tokens=A__ ,decoder_noise_time=A__ )
return logits
@torch.no_grad()
def __call__( self ,A__ ,A__ = None ,A__ = 100 ,A__ = True ,A__ = "numpy" ,A__ = None ,A__ = 1 ,):
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(A__ ,A__ ) or callback_steps <= 0)
):
raise ValueError(
f"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
f""" {type(A__ )}.""" )
_A : Any = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] ,dtype=np.floataa )
_A : Optional[int] = np.zeros([1, 0, self.n_dims] ,np.floataa )
_A : Dict = torch.ones((1, TARGET_FEATURE_LENGTH) ,dtype=A__ ,device=self.device )
for i, encoder_input_tokens in enumerate(A__ ):
if i == 0:
_A : str = torch.from_numpy(pred_mel[:1].copy() ).to(
device=self.device ,dtype=self.decoder.dtype )
# The first chunk has no previous context.
_A : Dict = torch.zeros((1, TARGET_FEATURE_LENGTH) ,dtype=A__ ,device=self.device )
else:
# The full song pipeline does not feed in a context feature, so the mask
# will be all 0s after the feature converter. Because we know we're
# feeding in a full context chunk from the previous prediction, set it
# to all 1s.
_A : Optional[int] = ones
_A : Tuple = self.scale_features(
A__ ,output_range=[-1.0, 1.0] ,clip=A__ )
_A : Tuple = self.encode(
input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ) ,continuous_inputs=A__ ,continuous_mask=A__ ,)
# Sample encoder_continuous_inputs shaped gaussian noise to begin loop
_A : Any = randn_tensor(
shape=encoder_continuous_inputs.shape ,generator=A__ ,device=self.device ,dtype=self.decoder.dtype ,)
# set step values
self.scheduler.set_timesteps(A__ )
# Denoising diffusion loop
for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
_A : Union[str, Any] = self.decode(
encodings_and_masks=A__ ,input_tokens=A__ ,noise_time=t / self.scheduler.config.num_train_timesteps ,)
# Compute previous output: x_t -> x_t-1
_A : List[str] = self.scheduler.step(A__ ,A__ ,A__ ,generator=A__ ).prev_sample
_A : Union[str, Any] = self.scale_to_features(A__ ,input_range=[-1.0, 1.0] )
_A : Optional[Any] = mel[:1]
_A : int = mel.cpu().float().numpy()
_A : Optional[Any] = np.concatenate([full_pred_mel, pred_mel[:1]] ,axis=1 )
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(A__ ,A__ )
logger.info('''Generated segment''' ,A__ )
if output_type == "numpy" and not is_onnx_available():
raise ValueError(
'''Cannot return output in \'np\' format if ONNX is not available. Make sure to have ONNX installed or set \'output_type\' to \'mel\'.''' )
elif output_type == "numpy" and self.melgan is None:
raise ValueError(
'''Cannot return output in \'np\' format if melgan component is not defined. Make sure to define `self.melgan` or set \'output_type\' to \'mel\'.''' )
if output_type == "numpy":
_A : Optional[Any] = self.melgan(input_features=full_pred_mel.astype(np.floataa ) )
else:
_A : Dict = full_pred_mel
if not return_dict:
return (output,)
return AudioPipelineOutput(audios=A__ )
| 332
| 0
|
"""simple docstring"""
import cva
import numpy as np
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self , _lowercase , _lowercase ) -> Optional[Any]:
if k in (0.04, 0.06):
_lowerCamelCase : List[Any] = k
_lowerCamelCase : str = window_size
else:
raise ValueError('''invalid k value''' )
def __str__( self ) -> str:
return str(self.k )
def a__ ( self , _lowercase ) -> tuple[cva.Mat, list[list[int]]]:
_lowerCamelCase : str = cva.imread(SCREAMING_SNAKE_CASE__ , 0 )
_lowerCamelCase, _lowerCamelCase : Optional[Any] = img.shape
_lowerCamelCase : Dict = []
_lowerCamelCase : int = img.copy()
_lowerCamelCase : int = cva.cvtColor(SCREAMING_SNAKE_CASE__ , cva.COLOR_GRAY2RGB )
_lowerCamelCase, _lowerCamelCase : List[Any] = np.gradient(SCREAMING_SNAKE_CASE__ )
_lowerCamelCase : List[str] = dx**2
_lowerCamelCase : int = dy**2
_lowerCamelCase : Union[str, Any] = dx * dy
_lowerCamelCase : Dict = 0.04
_lowerCamelCase : int = self.window_size // 2
for y in range(SCREAMING_SNAKE_CASE__ , h - offset ):
for x in range(SCREAMING_SNAKE_CASE__ , w - offset ):
_lowerCamelCase : int = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
_lowerCamelCase : Dict = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
_lowerCamelCase : List[Any] = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
_lowerCamelCase : str = (wxx * wyy) - (wxy**2)
_lowerCamelCase : Union[str, Any] = wxx + wyy
_lowerCamelCase : Optional[int] = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 255 )
return color_img, corner_list
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Tuple =HarrisCorner(0.04, 3)
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] =edge_detect.detect('path_to_image')
cva.imwrite('detect.png', color_img)
| 434
|
from __future__ import annotations
UpperCamelCase = '#'
class __lowerCamelCase :
"""simple docstring"""
def __init__( self : Dict ) -> None:
lowerCAmelCase__ = {}
def a ( self : Any , SCREAMING_SNAKE_CASE__ : str ) -> None:
lowerCAmelCase__ = self._trie
for char in text:
if char not in trie:
lowerCAmelCase__ = {}
lowerCAmelCase__ = trie[char]
lowerCAmelCase__ = True
def a ( self : List[str] , SCREAMING_SNAKE_CASE__ : str ) -> tuple | list:
lowerCAmelCase__ = self._trie
for char in prefix:
if char in trie:
lowerCAmelCase__ = trie[char]
else:
return []
return self._elements(SCREAMING_SNAKE_CASE__ )
def a ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : dict ) -> tuple:
lowerCAmelCase__ = []
for c, v in d.items():
lowerCAmelCase__ = [" "] if c == END else [(c + s) for s in self._elements(SCREAMING_SNAKE_CASE__ )]
result.extend(SCREAMING_SNAKE_CASE__ )
return tuple(SCREAMING_SNAKE_CASE__ )
UpperCamelCase = Trie()
UpperCamelCase = ('depart', 'detergent', 'daring', 'dog', 'deer', 'deal')
for word in words:
trie.insert_word(word)
def _A ( lowerCAmelCase_ : str ):
"""simple docstring"""
lowerCAmelCase__ = trie.find_word(lowerCAmelCase_ )
return tuple(string + word for word in suffixes )
def _A ( ):
"""simple docstring"""
print(autocomplete_using_trie("de" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 61
| 0
|
'''simple docstring'''
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase ):
if a < 0 or b < 0:
raise ValueError("the value of both inputs must be positive" )
A : str = str(bin(__A ) )[2:] # remove the leading "0b"
A : Dict = str(bin(__A ) )[2:] # remove the leading "0b"
A : Optional[Any] = max(len(__A ) , len(__A ) )
return "0b" + "".join(
str(int(char_a != char_b ) )
for char_a, char_b in zip(a_binary.zfill(__A ) , b_binary.zfill(__A ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 715
|
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Sequence, Value
from .base import TaskTemplate
@dataclass(frozen=_A )
class lowerCamelCase_ ( _A ):
'''simple docstring'''
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
a__ = field(default="question-answering-extractive" ,metadata={"include_in_asdict_even_if_is_default": True} )
a__ = Features({"question": Value("string" ), "context": Value("string" )} )
a__ = Features(
{
"answers": Sequence(
{
"text": Value("string" ),
"answer_start": Value("int32" ),
} )
} )
a__ = "question"
a__ = "context"
a__ = "answers"
@property
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Dict[str, str]:
return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
| 17
| 0
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AlignProcessor, EfficientNetImageProcessor
@require_vision
class lowercase_ ( unittest.TestCase ):
def UpperCamelCase ( self ):
_snake_case : Union[str, Any] = tempfile.mkdtemp()
_snake_case : int = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
_snake_case : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
_snake_case : Optional[int] = {
"do_resize": True,
"size": 20,
"do_center_crop": True,
"crop_size": 18,
"do_normalize": True,
"image_mean": [0.48_145_466, 0.4_578_275, 0.40_821_073],
"image_std": [0.26_862_954, 0.26_130_258, 0.27_577_711],
}
_snake_case : Dict = os.path.join(self.tmpdirname , lowercase_ )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(lowercase_ , lowercase_ )
def UpperCamelCase ( self , **lowercase_ ):
return BertTokenizer.from_pretrained(self.tmpdirname , **lowercase_ )
def UpperCamelCase ( self , **lowercase_ ):
return BertTokenizerFast.from_pretrained(self.tmpdirname , **lowercase_ )
def UpperCamelCase ( self , **lowercase_ ):
return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **lowercase_ )
def UpperCamelCase ( self ):
shutil.rmtree(self.tmpdirname )
def UpperCamelCase ( self ):
_snake_case : Optional[int] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
_snake_case : Optional[Any] = [Image.fromarray(np.moveaxis(lowercase_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCamelCase ( self ):
_snake_case : Optional[Any] = self.get_tokenizer()
_snake_case : str = self.get_rust_tokenizer()
_snake_case : Any = self.get_image_processor()
_snake_case : Optional[int] = AlignProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
processor_slow.save_pretrained(self.tmpdirname )
_snake_case : Union[str, Any] = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=lowercase_ )
_snake_case : List[Any] = AlignProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
processor_fast.save_pretrained(self.tmpdirname )
_snake_case : Dict = AlignProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , lowercase_ )
self.assertIsInstance(processor_fast.tokenizer , lowercase_ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , lowercase_ )
self.assertIsInstance(processor_fast.image_processor , lowercase_ )
def UpperCamelCase ( self ):
_snake_case : Any = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_snake_case : Optional[int] = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
_snake_case : Any = self.get_image_processor(do_normalize=lowercase_ , padding_value=1.0 )
_snake_case : int = AlignProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=lowercase_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowercase_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowercase_ )
def UpperCamelCase ( self ):
_snake_case : List[Any] = self.get_image_processor()
_snake_case : List[str] = self.get_tokenizer()
_snake_case : Dict = AlignProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
_snake_case : int = self.prepare_image_inputs()
_snake_case : Any = image_processor(lowercase_ , return_tensors="np" )
_snake_case : Tuple = processor(images=lowercase_ , return_tensors="np" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def UpperCamelCase ( self ):
_snake_case : Union[str, Any] = self.get_image_processor()
_snake_case : str = self.get_tokenizer()
_snake_case : Tuple = AlignProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
_snake_case : List[str] = "lower newer"
_snake_case : str = processor(text=lowercase_ )
_snake_case : int = tokenizer(lowercase_ , padding="max_length" , max_length=64 )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCamelCase ( self ):
_snake_case : Union[str, Any] = self.get_image_processor()
_snake_case : Optional[Any] = self.get_tokenizer()
_snake_case : List[Any] = AlignProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
_snake_case : Dict = "lower newer"
_snake_case : Dict = self.prepare_image_inputs()
_snake_case : Optional[Any] = processor(text=lowercase_ , images=lowercase_ )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "token_type_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(lowercase_ ):
processor()
def UpperCamelCase ( self ):
_snake_case : Dict = self.get_image_processor()
_snake_case : Dict = self.get_tokenizer()
_snake_case : str = AlignProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
_snake_case : Dict = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_snake_case : Any = processor.batch_decode(lowercase_ )
_snake_case : List[str] = tokenizer.batch_decode(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
def UpperCamelCase ( self ):
_snake_case : Union[str, Any] = self.get_image_processor()
_snake_case : Optional[Any] = self.get_tokenizer()
_snake_case : Union[str, Any] = AlignProcessor(tokenizer=lowercase_ , image_processor=lowercase_ )
_snake_case : Dict = "lower newer"
_snake_case : List[Any] = self.prepare_image_inputs()
_snake_case : int = processor(text=lowercase_ , images=lowercase_ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 670
|
def snake_case (__lowercase ) -> int:
'''simple docstring'''
if not grid or not grid[0]:
raise TypeError("The grid does not contain the appropriate information" )
for cell_n in range(1 , len(grid[0] ) ):
grid[0][cell_n] += grid[0][cell_n - 1]
_snake_case : Union[str, Any] = grid[0]
for row_n in range(1 , len(__lowercase ) ):
_snake_case : Union[str, Any] = grid[row_n]
_snake_case : List[Any] = fill_row(__lowercase , __lowercase )
_snake_case : List[Any] = grid[row_n]
return grid[-1][-1]
def snake_case (__lowercase , __lowercase ) -> list:
'''simple docstring'''
current_row[0] += row_above[0]
for cell_n in range(1 , len(__lowercase ) ):
current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n] )
return current_row
if __name__ == "__main__":
import doctest
doctest.testmod()
| 670
| 1
|
"""simple docstring"""
import logging
import os
import quant_trainer
import torch
from torch.utils.data import DataLoader
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput
_A = logging.getLogger(__name__)
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class _lowercase ( __UpperCAmelCase ):
def __init__( self , *UpperCAmelCase_ , UpperCAmelCase_=None , UpperCAmelCase_=None , UpperCAmelCase_=None , **UpperCAmelCase_ ) -> Union[str, Any]:
super().__init__(*UpperCAmelCase_ , **UpperCAmelCase_ )
lowerCamelCase : Dict = eval_examples
lowerCamelCase : List[str] = post_process_function
lowerCamelCase : int = quant_trainer_args
lowerCamelCase : Dict = 128 # default number of calibration samples
def _UpperCamelCase ( self , UpperCAmelCase_=None ) -> List[str]:
if calib_dataset is None and self.calib_dataset is None:
raise ValueError('Trainer: calibration requires an calib_dataset.' )
lowerCamelCase : Optional[int] = calib_dataset if calib_dataset is not None else self.calib_dataset
lowerCamelCase : Any = self._remove_unused_columns(UpperCAmelCase_ , description='Calibration' )
return DataLoader(
UpperCAmelCase_ , batch_size=self.args.eval_batch_size , collate_fn=self.data_collator , drop_last=self.args.dataloader_drop_last , num_workers=self.args.dataloader_num_workers , pin_memory=self.args.dataloader_pin_memory , shuffle=UpperCAmelCase_ , )
def _UpperCamelCase ( self , UpperCAmelCase_=None ) -> List[Any]:
lowerCamelCase : Optional[int] = self.train_dataset if calib_dataset is None else calib_dataset
lowerCamelCase : int = self.get_calib_dataloader(UpperCAmelCase_ )
lowerCamelCase : Tuple = self.model
quant_trainer.configure_model(UpperCAmelCase_ , self.quant_trainer_args , calib=UpperCAmelCase_ )
model.eval()
quant_trainer.enable_calibration(UpperCAmelCase_ )
logger.info('***** Running calibration *****' )
logger.info(F""" Num examples = {self.calib_num}""" )
logger.info(F""" Batch size = {calib_dataloader.batch_size}""" )
for step, inputs in enumerate(UpperCAmelCase_ ):
# Prediction step
lowerCamelCase , lowerCamelCase , lowerCamelCase : Optional[Any] = self.prediction_step(UpperCAmelCase_ , UpperCAmelCase_ , prediction_loss_only=UpperCAmelCase_ )
if (step + 1) * calib_dataloader.batch_size >= self.calib_num:
break
quant_trainer.finish_calibration(UpperCAmelCase_ , self.quant_trainer_args )
lowerCamelCase : Tuple = model
def _UpperCamelCase ( self , UpperCAmelCase_=None , UpperCAmelCase_=None , UpperCAmelCase_=None , UpperCAmelCase_ = "eval" ) -> List[str]:
lowerCamelCase : List[str] = self.eval_dataset if eval_dataset is None else eval_dataset
lowerCamelCase : int = self.get_eval_dataloader(UpperCAmelCase_ )
lowerCamelCase : Dict = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
lowerCamelCase : Union[str, Any] = self.compute_metrics
lowerCamelCase : str = None
lowerCamelCase : Tuple = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
lowerCamelCase : Any = eval_loop(
UpperCAmelCase_ , description='Evaluation' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCAmelCase_ , )
finally:
lowerCamelCase : Dict = compute_metrics
if self.post_process_function is not None and self.compute_metrics is not None:
lowerCamelCase : Dict = self.post_process_function(UpperCAmelCase_ , UpperCAmelCase_ , output.predictions )
lowerCamelCase : List[str] = self.compute_metrics(UpperCAmelCase_ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"""{metric_key_prefix}_""" ):
lowerCamelCase : Optional[int] = metrics.pop(UpperCAmelCase_ )
self.log(UpperCAmelCase_ )
else:
lowerCamelCase : List[Any] = {}
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
lowerCamelCase : List[str] = self.callback_handler.on_evaluate(self.args , self.state , self.control , UpperCAmelCase_ )
return metrics
def _UpperCamelCase ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=None , UpperCAmelCase_ = "test" ) -> int:
lowerCamelCase : List[Any] = self.get_test_dataloader(UpperCAmelCase_ )
# Temporarily disable metric computation, we will do it in the loop here.
lowerCamelCase : str = self.compute_metrics
lowerCamelCase : Dict = None
lowerCamelCase : Tuple = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
lowerCamelCase : List[str] = eval_loop(
UpperCAmelCase_ , description='Prediction' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCAmelCase_ , )
finally:
lowerCamelCase : str = compute_metrics
if self.post_process_function is None or self.compute_metrics is None:
return output
lowerCamelCase : int = self.post_process_function(UpperCAmelCase_ , UpperCAmelCase_ , output.predictions , 'predict' )
lowerCamelCase : List[str] = self.compute_metrics(UpperCAmelCase_ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"""{metric_key_prefix}_""" ):
lowerCamelCase : Dict = metrics.pop(UpperCAmelCase_ )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=UpperCAmelCase_ )
def _UpperCamelCase ( self , UpperCAmelCase_="./" ) -> List[str]:
lowerCamelCase : List[Any] = self.eval_dataset
lowerCamelCase : List[str] = self.get_eval_dataloader(UpperCAmelCase_ )
lowerCamelCase : Optional[Any] = next(iter(UpperCAmelCase_ ) )
# saving device - to make it consistent
lowerCamelCase : int = torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
# convert to tuple
lowerCamelCase : Optional[Any] = tuple(v.to(UpperCAmelCase_ ) for k, v in batch.items() )
logger.info('Converting model to be onnx compatible' )
from pytorch_quantization.nn import TensorQuantizer
lowerCamelCase : List[Any] = True
lowerCamelCase : Optional[Any] = self.model.to(UpperCAmelCase_ )
model.eval()
model.float()
lowerCamelCase : Tuple = model.module if hasattr(UpperCAmelCase_ , 'module' ) else model
quant_trainer.configure_model(UpperCAmelCase_ , self.quant_trainer_args )
lowerCamelCase : Union[str, Any] = os.path.join(UpperCAmelCase_ , 'model.onnx' )
logger.info(F"""exporting model to {output_model_file}""" )
lowerCamelCase : str = {0: 'batch_size', 1: 'seq_len'}
torch.onnx.export(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , export_params=UpperCAmelCase_ , opset_version=13 , do_constant_folding=UpperCAmelCase_ , input_names=['input_ids', 'attention_mask', 'token_type_ids'] , output_names=['output_start_logits', 'output_end_logits'] , dynamic_axes={
'input_ids': axes,
'attention_mask': axes,
'token_type_ids': axes,
'output_start_logits': axes,
'output_end_logits': axes,
} , verbose=UpperCAmelCase_ , )
logger.info('onnx export finished' )
| 133
|
"""simple docstring"""
from copy import deepcopy
class _lowercase :
def __init__( self , UpperCAmelCase_ = None , UpperCAmelCase_ = None ) -> None:
if arr is None and size is not None:
lowerCamelCase : Any = size
lowerCamelCase : Optional[int] = [0] * size
elif arr is not None:
self.init(UpperCAmelCase_ )
else:
raise ValueError('Either arr or size must be specified' )
def _UpperCamelCase ( self , UpperCAmelCase_ ) -> None:
lowerCamelCase : Optional[int] = len(UpperCAmelCase_ )
lowerCamelCase : Union[str, Any] = deepcopy(UpperCAmelCase_ )
for i in range(1 , self.size ):
lowerCamelCase : Union[str, Any] = self.next_(UpperCAmelCase_ )
if j < self.size:
self.tree[j] += self.tree[i]
def _UpperCamelCase ( self ) -> list[int]:
lowerCamelCase : Dict = self.tree[:]
for i in range(self.size - 1 , 0 , -1 ):
lowerCamelCase : List[str] = self.next_(UpperCAmelCase_ )
if j < self.size:
arr[j] -= arr[i]
return arr
@staticmethod
def _UpperCamelCase ( UpperCAmelCase_ ) -> int:
return index + (index & (-index))
@staticmethod
def _UpperCamelCase ( UpperCAmelCase_ ) -> int:
return index - (index & (-index))
def _UpperCamelCase ( self , UpperCAmelCase_ , UpperCAmelCase_ ) -> None:
if index == 0:
self.tree[0] += value
return
while index < self.size:
self.tree[index] += value
lowerCamelCase : Union[str, Any] = self.next_(UpperCAmelCase_ )
def _UpperCamelCase ( self , UpperCAmelCase_ , UpperCAmelCase_ ) -> None:
self.add(UpperCAmelCase_ , value - self.get(UpperCAmelCase_ ) )
def _UpperCamelCase ( self , UpperCAmelCase_ ) -> int:
if right == 0:
return 0
lowerCamelCase : Dict = self.tree[0]
right -= 1 # make right inclusive
while right > 0:
result += self.tree[right]
lowerCamelCase : Optional[int] = self.prev(UpperCAmelCase_ )
return result
def _UpperCamelCase ( self , UpperCAmelCase_ , UpperCAmelCase_ ) -> int:
return self.prefix(UpperCAmelCase_ ) - self.prefix(UpperCAmelCase_ )
def _UpperCamelCase ( self , UpperCAmelCase_ ) -> int:
return self.query(UpperCAmelCase_ , index + 1 )
def _UpperCamelCase ( self , UpperCAmelCase_ ) -> int:
value -= self.tree[0]
if value < 0:
return -1
lowerCamelCase : str = 1 # Largest power of 2 <= size
while j * 2 < self.size:
j *= 2
lowerCamelCase : str = 0
while j > 0:
if i + j < self.size and self.tree[i + j] <= value:
value -= self.tree[i + j]
i += j
j //= 2
return i
if __name__ == "__main__":
import doctest
doctest.testmod()
| 133
| 1
|
def snake_case ( lowerCamelCase = 1_000 ):
'''simple docstring'''
__lowercase = -1
__lowercase = 0
for a in range(1 , n // 3 ):
# Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c
__lowercase = (n * n - 2 * a * n) // (2 * n - 2 * a)
__lowercase = n - a - b
if c * c == (a * a + b * b):
__lowercase = a * b * c
if candidate >= product:
__lowercase = candidate
return product
if __name__ == "__main__":
print(F'''{solution() = }''')
| 80
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_UpperCAmelCase : List[Any] = {
"configuration_distilbert": [
"DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"DistilBertConfig",
"DistilBertOnnxConfig",
],
"tokenization_distilbert": ["DistilBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Tuple = ["DistilBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : List[Any] = [
"DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"DistilBertForMaskedLM",
"DistilBertForMultipleChoice",
"DistilBertForQuestionAnswering",
"DistilBertForSequenceClassification",
"DistilBertForTokenClassification",
"DistilBertModel",
"DistilBertPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : List[Any] = [
"TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFDistilBertForMaskedLM",
"TFDistilBertForMultipleChoice",
"TFDistilBertForQuestionAnswering",
"TFDistilBertForSequenceClassification",
"TFDistilBertForTokenClassification",
"TFDistilBertMainLayer",
"TFDistilBertModel",
"TFDistilBertPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Union[str, Any] = [
"FlaxDistilBertForMaskedLM",
"FlaxDistilBertForMultipleChoice",
"FlaxDistilBertForQuestionAnswering",
"FlaxDistilBertForSequenceClassification",
"FlaxDistilBertForTokenClassification",
"FlaxDistilBertModel",
"FlaxDistilBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
_UpperCAmelCase : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 668
| 0
|
'''simple docstring'''
import darl # noqa
import gym
import tqdm
from diffusers.experimental import ValueGuidedRLPipeline
a__ : str = {
"n_samples": 64,
"horizon": 32,
"num_inference_steps": 20,
"n_guide_steps": 2, # can set to 0 for faster sampling, does not use value network
"scale_grad_by_std": True,
"scale": 0.1,
"eta": 0.0,
"t_grad_cutoff": 2,
"device": "cpu",
}
if __name__ == "__main__":
a__ : Union[str, Any] = "hopper-medium-v2"
a__ : str = gym.make(env_name)
a__ : Any = ValueGuidedRLPipeline.from_pretrained(
'bglick13/hopper-medium-v2-value-function-hor32',
env=env,
)
env.seed(0)
a__ : List[Any] = env.reset()
a__ : Any = 0
a__ : Dict = 0
a__ : Optional[int] = 1_000
a__ : int = [obs.copy()]
try:
for t in tqdm.tqdm(range(T)):
# call the policy
a__ : Optional[int] = pipeline(obs, planning_horizon=32)
# execute action in environment
a__ : List[Any] = env.step(denorm_actions)
a__ : Optional[Any] = env.get_normalized_score(total_reward)
# update return
total_reward += reward
total_score += score
print(
F"""Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:"""
F""" {total_score}"""
)
# save observations for rendering
rollout.append(next_observation.copy())
a__ : Dict = next_observation
except KeyboardInterrupt:
pass
print(F"""Total reward: {total_reward}""")
| 705
|
'''simple docstring'''
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
a__ : List[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
a__ : List[str] = '\n Examples:\n ```py\n >>> import torch\n >>> import numpy as np\n\n >>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline\n >>> from transformers import pipeline\n >>> from diffusers.utils import load_image\n\n\n >>> def make_hint(image, depth_estimator):\n ... image = depth_estimator(image)["depth"]\n ... image = np.array(image)\n ... image = image[:, :, None]\n ... image = np.concatenate([image, image, image], axis=2)\n ... detected_map = torch.from_numpy(image).float() / 255.0\n ... hint = detected_map.permute(2, 0, 1)\n ... return hint\n\n\n >>> depth_estimator = pipeline("depth-estimation")\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior = pipe_prior.to("cuda")\n\n >>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-controlnet-depth", torch_dtype=torch.float16\n ... )\n >>> pipe = pipe.to("cuda")\n\n\n >>> img = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/cat.png"\n ... ).resize((768, 768))\n\n >>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to("cuda")\n\n >>> prompt = "A robot, 4k photo"\n >>> negative_prior_prompt = "lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature"\n\n >>> generator = torch.Generator(device="cuda").manual_seed(43)\n\n >>> image_emb, zero_image_emb = pipe_prior(\n ... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator\n ... ).to_tuple()\n\n >>> images = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... hint=hint,\n ... num_inference_steps=50,\n ... generator=generator,\n ... height=768,\n ... width=768,\n ... ).images\n\n >>> images[0].save("robot_cat.png")\n ```\n'
def __snake_case ( SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[str]=8 ) -> str:
"""simple docstring"""
UpperCAmelCase = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
UpperCAmelCase = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class lowerCAmelCase__ ( UpperCAmelCase_ ):
'''simple docstring'''
def __init__( self : Tuple , a__ : UNetaDConditionModel , a__ : DDPMScheduler , a__ : VQModel , ):
super().__init__()
self.register_modules(
unet=a__ , scheduler=a__ , movq=a__ , )
UpperCAmelCase = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def __snake_case ( self : str , a__ : Union[str, Any] , a__ : List[str] , a__ : int , a__ : Optional[Any] , a__ : List[Any] , a__ : Union[str, Any] ):
if latents is None:
UpperCAmelCase = randn_tensor(a__ , generator=a__ , device=a__ , dtype=a__ )
else:
if latents.shape != shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}" )
UpperCAmelCase = latents.to(a__ )
UpperCAmelCase = latents * scheduler.init_noise_sigma
return latents
def __snake_case ( self : Optional[Any] , a__ : Union[str, Any]=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
UpperCAmelCase = torch.device(f"cuda:{gpu_id}" )
UpperCAmelCase = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(a__ , a__ )
def __snake_case ( self : Union[str, Any] , a__ : List[str]=0 ):
if is_accelerate_available() and is_accelerate_version('''>=''' , '''0.17.0.dev0''' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('''`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.''' )
UpperCAmelCase = torch.device(f"cuda:{gpu_id}" )
if self.device.type != "cpu":
self.to('''cpu''' , silence_dtype_warnings=a__ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
UpperCAmelCase = None
for cpu_offloaded_model in [self.unet, self.movq]:
UpperCAmelCase, UpperCAmelCase = cpu_offload_with_hook(a__ , a__ , prev_module_hook=a__ )
# We'll offload the last model manually.
UpperCAmelCase = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __snake_case ( self : List[Any] ):
if not hasattr(self.unet , '''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(a__ , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(a__ )
def __call__( self : Union[str, Any] , a__ : Union[torch.FloatTensor, List[torch.FloatTensor]] , a__ : Union[torch.FloatTensor, List[torch.FloatTensor]] , a__ : torch.FloatTensor , a__ : int = 512 , a__ : int = 512 , a__ : int = 100 , a__ : float = 4.0 , a__ : int = 1 , a__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , a__ : Optional[torch.FloatTensor] = None , a__ : Optional[str] = "pil" , a__ : bool = True , ):
UpperCAmelCase = self._execution_device
UpperCAmelCase = guidance_scale > 1.0
if isinstance(a__ , a__ ):
UpperCAmelCase = torch.cat(a__ , dim=0 )
if isinstance(a__ , a__ ):
UpperCAmelCase = torch.cat(a__ , dim=0 )
if isinstance(a__ , a__ ):
UpperCAmelCase = torch.cat(a__ , dim=0 )
UpperCAmelCase = image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
UpperCAmelCase = image_embeds.repeat_interleave(a__ , dim=0 )
UpperCAmelCase = negative_image_embeds.repeat_interleave(a__ , dim=0 )
UpperCAmelCase = hint.repeat_interleave(a__ , dim=0 )
UpperCAmelCase = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=a__ )
UpperCAmelCase = torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=a__ )
self.scheduler.set_timesteps(a__ , device=a__ )
UpperCAmelCase = self.scheduler.timesteps
UpperCAmelCase = self.movq.config.latent_channels
UpperCAmelCase, UpperCAmelCase = downscale_height_and_width(a__ , a__ , self.movq_scale_factor )
# create initial latent
UpperCAmelCase = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , a__ , a__ , a__ , self.scheduler , )
for i, t in enumerate(self.progress_bar(a__ ) ):
# expand the latents if we are doing classifier free guidance
UpperCAmelCase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCAmelCase = {'''image_embeds''': image_embeds, '''hint''': hint}
UpperCAmelCase = self.unet(
sample=a__ , timestep=a__ , encoder_hidden_states=a__ , added_cond_kwargs=a__ , return_dict=a__ , )[0]
if do_classifier_free_guidance:
UpperCAmelCase, UpperCAmelCase = noise_pred.split(latents.shape[1] , dim=1 )
UpperCAmelCase, UpperCAmelCase = noise_pred.chunk(2 )
UpperCAmelCase, UpperCAmelCase = variance_pred.chunk(2 )
UpperCAmelCase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
UpperCAmelCase = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , '''variance_type''' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
UpperCAmelCase, UpperCAmelCase = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase = self.scheduler.step(
a__ , a__ , a__ , generator=a__ , )[0]
# post-processing
UpperCAmelCase = self.movq.decode(a__ , force_not_quantize=a__ )['''sample''']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}" )
if output_type in ["np", "pil"]:
UpperCAmelCase = image * 0.5 + 0.5
UpperCAmelCase = image.clamp(0 , 1 )
UpperCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
UpperCAmelCase = self.numpy_to_pil(a__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=a__ )
| 570
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_a: int = {
"""configuration_time_series_transformer""": [
"""TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""TimeSeriesTransformerConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a: Dict = [
"""TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TimeSeriesTransformerForPrediction""",
"""TimeSeriesTransformerModel""",
"""TimeSeriesTransformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
_a: int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 162
|
from __future__ import annotations
def __lowerCAmelCase ( A , A ):
UpperCAmelCase_ = []
UpperCAmelCase_ = []
UpperCAmelCase_ = 0
UpperCAmelCase_ = sum(A )
create_state_space_tree(A , A , A , A , A , A )
return result
def __lowerCAmelCase ( A , A , A , A , A , A , ):
if sum(A ) > max_sum or (remaining_nums_sum + sum(A )) < max_sum:
return
if sum(A ) == max_sum:
result.append(A )
return
for index in range(A , len(A ) ):
create_state_space_tree(
A , A , index + 1 , [*path, nums[index]] , A , remaining_nums_sum - nums[index] , )
_a: Optional[int] = [3, 34, 4, 12, 5, 2]
_a: int = 9
_a: List[Any] = generate_sum_of_subsets_soln(nums, max_sum)
print(*result)
| 162
| 1
|
import importlib
import os
import sys
# This is required to make the module import works (when the python process is running from the root of the repo)
sys.path.append(""".""")
def _a ( SCREAMING_SNAKE_CASE_ : Tuple ):
__lowerCAmelCase = test_file.split(os.path.sep )
if components[0:2] != ["tests", "models"]:
raise ValueError(
"`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got "
F"""{test_file} instead.""" )
__lowerCAmelCase = components[-1]
if not test_fn.endswith("py" ):
raise ValueError(F"""`test_file` should be a python file. Got {test_fn} instead.""" )
if not test_fn.startswith("test_modeling_" ):
raise ValueError(
F"""`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.""" )
__lowerCAmelCase = components[:-1] + [test_fn.replace(".py" , "" )]
__lowerCAmelCase = ".".join(SCREAMING_SNAKE_CASE_ )
return test_module_path
def _a ( SCREAMING_SNAKE_CASE_ : Tuple ):
__lowerCAmelCase = get_module_path(SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = importlib.import_module(SCREAMING_SNAKE_CASE_ )
return test_module
def _a ( SCREAMING_SNAKE_CASE_ : int ):
__lowerCAmelCase = []
__lowerCAmelCase = get_test_module(SCREAMING_SNAKE_CASE_ )
for attr in dir(SCREAMING_SNAKE_CASE_ ):
if attr.endswith("ModelTester" ):
tester_classes.append(getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
# sort with class names
return sorted(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_ : x.__name__ )
def _a ( SCREAMING_SNAKE_CASE_ : Tuple ):
__lowerCAmelCase = []
__lowerCAmelCase = get_test_module(SCREAMING_SNAKE_CASE_ )
for attr in dir(SCREAMING_SNAKE_CASE_ ):
__lowerCAmelCase = getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking
# `all_model_classes` is not empty (which also excludes other special classes).
__lowerCAmelCase = getattr(SCREAMING_SNAKE_CASE_ , "all_model_classes" , [] )
if len(SCREAMING_SNAKE_CASE_ ) > 0:
test_classes.append(SCREAMING_SNAKE_CASE_ )
# sort with class names
return sorted(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_ : x.__name__ )
def _a ( SCREAMING_SNAKE_CASE_ : Tuple ):
__lowerCAmelCase = get_test_classes(SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = set()
for test_class in test_classes:
model_classes.update(test_class.all_model_classes )
# sort with class names
return sorted(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_ : x.__name__ )
def _a ( SCREAMING_SNAKE_CASE_ : Tuple ):
__lowerCAmelCase = test_class()
if hasattr(SCREAMING_SNAKE_CASE_ , "setUp" ):
test.setUp()
__lowerCAmelCase = None
if hasattr(SCREAMING_SNAKE_CASE_ , "model_tester" ):
# `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case.
if test.model_tester is not None:
__lowerCAmelCase = test.model_tester.__class__
return model_tester
def _a ( SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : List[str] ):
__lowerCAmelCase = get_test_classes(SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = []
for test_class in test_classes:
if model_class in test_class.all_model_classes:
target_test_classes.append(SCREAMING_SNAKE_CASE_ )
# sort with class names
return sorted(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_ : x.__name__ )
def _a ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Optional[Any] ):
__lowerCAmelCase = get_test_classes_for_model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = []
for test_class in test_classes:
__lowerCAmelCase = get_model_tester_from_test_class(SCREAMING_SNAKE_CASE_ )
if tester_class is not None:
tester_classes.append(SCREAMING_SNAKE_CASE_ )
# sort with class names
return sorted(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_ : x.__name__ )
def _a ( SCREAMING_SNAKE_CASE_ : Any ):
__lowerCAmelCase = get_test_classes(SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = {test_class: get_model_tester_from_test_class(SCREAMING_SNAKE_CASE_ ) for test_class in test_classes}
return test_tester_mapping
def _a ( SCREAMING_SNAKE_CASE_ : int ):
__lowerCAmelCase = get_model_classes(SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = {
model_class: get_test_classes_for_model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for model_class in model_classes
}
return model_test_mapping
def _a ( SCREAMING_SNAKE_CASE_ : Any ):
__lowerCAmelCase = get_model_classes(SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = {
model_class: get_tester_classes_for_model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for model_class in model_classes
}
return model_to_tester_mapping
def _a ( SCREAMING_SNAKE_CASE_ : int ):
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return o
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return o.__name__
elif isinstance(SCREAMING_SNAKE_CASE_ , (list, tuple) ):
return [to_json(SCREAMING_SNAKE_CASE_ ) for x in o]
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return {to_json(SCREAMING_SNAKE_CASE_ ): to_json(SCREAMING_SNAKE_CASE_ ) for k, v in o.items()}
else:
return o
| 720
|
from typing import TYPE_CHECKING
from ....utils import _LazyModule
UpperCamelCase__ = {"""tokenization_tapex""": ["""TapexTokenizer"""]}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 552
| 0
|
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class snake_case ( lowercase ):
"""simple docstring"""
_lowerCamelCase = 42
class snake_case ( lowercase , lowercase ):
"""simple docstring"""
@register_to_config
def __init__( self , UpperCamelCase = 3 , UpperCamelCase = 3 , UpperCamelCase = ("DownEncoderBlock2D",) , UpperCamelCase = ("UpDecoderBlock2D",) , UpperCamelCase = (64,) , UpperCamelCase = 1 , UpperCamelCase = "silu" , UpperCamelCase = 3 , UpperCamelCase = 32 , UpperCamelCase = 256 , UpperCamelCase = 32 , UpperCamelCase = None , UpperCamelCase = 0.18_215 , UpperCamelCase = "group" , ):
"""simple docstring"""
super().__init__()
# pass init params to Encoder
lowerCamelCase_ = Encoder(
in_channels=UpperCamelCase , out_channels=UpperCamelCase , down_block_types=UpperCamelCase , block_out_channels=UpperCamelCase , layers_per_block=UpperCamelCase , act_fn=UpperCamelCase , norm_num_groups=UpperCamelCase , double_z=UpperCamelCase , )
lowerCamelCase_ = vq_embed_dim if vq_embed_dim is not None else latent_channels
lowerCamelCase_ = nn.Convad(UpperCamelCase , UpperCamelCase , 1 )
lowerCamelCase_ = VectorQuantizer(UpperCamelCase , UpperCamelCase , beta=0.25 , remap=UpperCamelCase , sane_index_shape=UpperCamelCase )
lowerCamelCase_ = nn.Convad(UpperCamelCase , UpperCamelCase , 1 )
# pass init params to Decoder
lowerCamelCase_ = Decoder(
in_channels=UpperCamelCase , out_channels=UpperCamelCase , up_block_types=UpperCamelCase , block_out_channels=UpperCamelCase , layers_per_block=UpperCamelCase , act_fn=UpperCamelCase , norm_num_groups=UpperCamelCase , norm_type=UpperCamelCase , )
@apply_forward_hook
def snake_case ( self , UpperCamelCase , UpperCamelCase = True ):
"""simple docstring"""
lowerCamelCase_ = self.encoder(UpperCamelCase )
lowerCamelCase_ = self.quant_conv(UpperCamelCase )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=UpperCamelCase )
@apply_forward_hook
def snake_case ( self , UpperCamelCase , UpperCamelCase = False , UpperCamelCase = True ):
"""simple docstring"""
# also go through quantization layer
if not force_not_quantize:
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = self.quantize(UpperCamelCase )
else:
lowerCamelCase_ = h
lowerCamelCase_ = self.post_quant_conv(UpperCamelCase )
lowerCamelCase_ = self.decoder(UpperCamelCase , quant if self.config.norm_type == "spatial" else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=UpperCamelCase )
def snake_case ( self , UpperCamelCase , UpperCamelCase = True ):
"""simple docstring"""
lowerCamelCase_ = sample
lowerCamelCase_ = self.encode(UpperCamelCase ).latents
lowerCamelCase_ = self.decode(UpperCamelCase ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=UpperCamelCase )
| 675
|
'''simple docstring'''
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class snake_case ( lowercase ):
"""simple docstring"""
def __init__( self , UpperCamelCase , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = True , UpperCamelCase = None , UpperCamelCase = False , UpperCamelCase = None , UpperCamelCase = True , UpperCamelCase = "arrow" , **UpperCamelCase , ):
"""simple docstring"""
super().__init__(
split=UpperCamelCase , features=UpperCamelCase , cache_dir=UpperCamelCase , keep_in_memory=UpperCamelCase , streaming=UpperCamelCase , **UpperCamelCase , )
lowerCamelCase_ = load_from_cache_file
lowerCamelCase_ = file_format
lowerCamelCase_ = Spark(
df=UpperCamelCase , features=UpperCamelCase , cache_dir=UpperCamelCase , working_dir=UpperCamelCase , **UpperCamelCase , )
def snake_case ( self ):
"""simple docstring"""
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
lowerCamelCase_ = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=UpperCamelCase , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split )
| 675
| 1
|
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
_A = argparse.ArgumentParser()
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
parser.add_argument(
"--txt2img_unclip",
default="kakaobrain/karlo-v1-alpha",
type=str,
required=False,
help="The pretrained txt2img unclip.",
)
_A = parser.parse_args()
_A = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
_A = CLIPImageProcessor()
_A = CLIPVisionModelWithProjection.from_pretrained("openai/clip-vit-large-patch14")
_A = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 714
|
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def lowercase_ ( A__ , A__ , A__ , A__ , A__ , A__ = None , ) -> List[Any]:
"""simple docstring"""
snake_case = {}
if train_file is not None:
snake_case = [train_file]
if eval_file is not None:
snake_case = [eval_file]
if test_file is not None:
snake_case = [test_file]
snake_case = datasets.load_dataset("csv" , data_files=A__ )
snake_case = list(ds[list(files.keys() )[0]].features.keys() )
snake_case = features_name.pop(A__ )
snake_case = list(set(ds[list(files.keys() )[0]][label_name] ) )
snake_case = {label: i for i, label in enumerate(A__ )}
snake_case = tokenizer.model_input_names
snake_case = {}
if len(A__ ) == 1:
for k in files.keys():
snake_case = ds[k].map(
lambda A__ : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=A__ , max_length=A__ , padding="max_length" ) , batched=A__ , )
elif len(A__ ) == 2:
for k in files.keys():
snake_case = ds[k].map(
lambda A__ : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=A__ , max_length=A__ , padding="max_length" , ) , batched=A__ , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
snake_case = {k: v for k, v in ex.items() if k in input_names}
snake_case = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
snake_case = {k: v for k, v in ex.items() if k in input_names}
snake_case = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
snake_case = {k: v for k, v in ex.items() if k in input_names}
snake_case = labelaid[ex[label_name]]
yield (d, label)
snake_case = (
tf.data.Dataset.from_generator(
A__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
snake_case = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
snake_case = (
tf.data.Dataset.from_generator(
A__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
snake_case = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
snake_case = (
tf.data.Dataset.from_generator(
A__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
snake_case = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
_A = logging.getLogger(__name__)
@dataclass
class lowerCamelCase :
UpperCAmelCase__ : int = field(metadata={"help": "Which column contains the label"} )
UpperCAmelCase__ : str = field(default=A_ , metadata={"help": "The path of the training file"} )
UpperCAmelCase__ : Optional[str] = field(default=A_ , metadata={"help": "The path of the development file"} )
UpperCAmelCase__ : Optional[str] = field(default=A_ , metadata={"help": "The path of the test file"} )
UpperCAmelCase__ : int = field(
default=1_28 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
UpperCAmelCase__ : bool = field(
default=A_ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
@dataclass
class lowerCamelCase :
UpperCAmelCase__ : str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
UpperCAmelCase__ : Optional[str] = field(
default=A_ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
UpperCAmelCase__ : Optional[str] = field(
default=A_ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
UpperCAmelCase__ : bool = field(default=A_ , metadata={"help": "Set this flag to use fast tokenization."} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
UpperCAmelCase__ : Optional[str] = field(
default=A_ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
def lowercase_ ( ) -> Dict:
"""simple docstring"""
snake_case = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
snake_case , snake_case , snake_case = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. Use'
" --overwrite_output_dir to overcome." )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO , )
logger.info(
F'n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, '
F'16-bits training: {training_args.fpaa}' )
logger.info(F'Training/evaluation parameters {training_args}' )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
snake_case = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
snake_case , snake_case , snake_case , snake_case = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=A__ , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
snake_case = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(A__ ) , labelaid=A__ , idalabel={id: label for label, id in labelaid.items()} , finetuning_task="text-classification" , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
snake_case = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool(".bin" in model_args.model_name_or_path ) , config=A__ , cache_dir=model_args.cache_dir , )
def compute_metrics(A__ ) -> Dict:
snake_case = np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
snake_case = TFTrainer(
model=A__ , args=A__ , train_dataset=A__ , eval_dataset=A__ , compute_metrics=A__ , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
snake_case = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
snake_case = trainer.evaluate()
snake_case = os.path.join(training_args.output_dir , "eval_results.txt" )
with open(A__ , "w" ) as writer:
logger.info("***** Eval results *****" )
for key, value in result.items():
logger.info(F' {key} = {value}' )
writer.write(F'{key} = {value}\n' )
results.update(A__ )
return results
if __name__ == "__main__":
main()
| 294
| 0
|
import importlib
import math
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Tuple, Union
import flax
import jax.numpy as jnp
from ..utils import BaseOutput
__a = 'scheduler_config.json'
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
a :Optional[Any] = 1
a :Any = 2
a :Dict = 3
a :Any = 4
a :Any = 5
@dataclass
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
a :jnp.ndarray
class lowercase__:
"""simple docstring"""
a :Dict = SCHEDULER_CONFIG_NAME
a :str = ['dtype']
a :List[str] = []
a :int = True
@classmethod
def _lowercase ( cls : List[Any] , SCREAMING_SNAKE_CASE_ : Dict[str, Any] = None , SCREAMING_SNAKE_CASE_ : Optional[str] = None , SCREAMING_SNAKE_CASE_ : Union[str, Any]=False , **SCREAMING_SNAKE_CASE_ : Any , ) -> Dict:
lowercase_ , lowercase_ = cls.load_config(
pretrained_model_name_or_path=SCREAMING_SNAKE_CASE_ , subfolder=SCREAMING_SNAKE_CASE_ , return_unused_kwargs=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
lowercase_ , lowercase_ = cls.from_config(SCREAMING_SNAKE_CASE_ , return_unused_kwargs=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
if hasattr(SCREAMING_SNAKE_CASE_ , '''create_state''' ) and getattr(SCREAMING_SNAKE_CASE_ , '''has_state''' , SCREAMING_SNAKE_CASE_ ):
lowercase_ = scheduler.create_state()
if return_unused_kwargs:
return scheduler, state, unused_kwargs
return scheduler, state
def _lowercase ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Union[str, os.PathLike] , SCREAMING_SNAKE_CASE_ : bool = False , **SCREAMING_SNAKE_CASE_ : int ) -> Dict:
self.save_config(save_directory=SCREAMING_SNAKE_CASE_ , push_to_hub=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
@property
def _lowercase ( self : Union[str, Any] ) -> List[Any]:
return self._get_compatibles()
@classmethod
def _lowercase ( cls : Union[str, Any] ) -> List[str]:
lowercase_ = list(set([cls.__name__] + cls._compatibles ) )
lowercase_ = importlib.import_module(__name__.split('''.''' )[0] )
lowercase_ = [
getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for c in compatible_classes_str if hasattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
]
return compatible_classes
def a ( snake_case__: jnp.ndarray , snake_case__: Tuple[int] ):
'''simple docstring'''
assert len(snake_case__ ) >= x.ndim
return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(snake_case__ ) - x.ndim) ) , snake_case__ )
def a ( snake_case__: int , snake_case__: Tuple=0.9_9_9 , snake_case__: Dict=jnp.floataa ):
'''simple docstring'''
def alpha_bar(snake_case__: Dict ):
return math.cos((time_step + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2
lowercase_ = []
for i in range(snake_case__ ):
lowercase_ = i / num_diffusion_timesteps
lowercase_ = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(snake_case__ ) / alpha_bar(snake_case__ ) , snake_case__ ) )
return jnp.array(snake_case__ , dtype=snake_case__ )
@flax.struct.dataclass
class lowercase__:
"""simple docstring"""
a :jnp.ndarray
a :jnp.ndarray
a :jnp.ndarray
@classmethod
def _lowercase ( cls : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> List[str]:
lowercase_ = scheduler.config
if config.trained_betas is not None:
lowercase_ = jnp.asarray(config.trained_betas , dtype=scheduler.dtype )
elif config.beta_schedule == "linear":
lowercase_ = jnp.linspace(config.beta_start , config.beta_end , config.num_train_timesteps , dtype=scheduler.dtype )
elif config.beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
lowercase_ = (
jnp.linspace(
config.beta_start**0.5 , config.beta_end**0.5 , config.num_train_timesteps , dtype=scheduler.dtype )
** 2
)
elif config.beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
lowercase_ = betas_for_alpha_bar(config.num_train_timesteps , dtype=scheduler.dtype )
else:
raise NotImplementedError(
f'''beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}''' )
lowercase_ = 1.0 - betas
lowercase_ = jnp.cumprod(SCREAMING_SNAKE_CASE_ , axis=0 )
return cls(
alphas=SCREAMING_SNAKE_CASE_ , betas=SCREAMING_SNAKE_CASE_ , alphas_cumprod=SCREAMING_SNAKE_CASE_ , )
def a ( snake_case__: CommonSchedulerState , snake_case__: jnp.ndarray , snake_case__: jnp.ndarray , snake_case__: jnp.ndarray ):
'''simple docstring'''
lowercase_ = state.alphas_cumprod
lowercase_ = alphas_cumprod[timesteps] ** 0.5
lowercase_ = sqrt_alpha_prod.flatten()
lowercase_ = broadcast_to_shape_from_left(snake_case__ , original_samples.shape )
lowercase_ = (1 - alphas_cumprod[timesteps]) ** 0.5
lowercase_ = sqrt_one_minus_alpha_prod.flatten()
lowercase_ = broadcast_to_shape_from_left(snake_case__ , original_samples.shape )
return sqrt_alpha_prod, sqrt_one_minus_alpha_prod
def a ( snake_case__: CommonSchedulerState , snake_case__: jnp.ndarray , snake_case__: jnp.ndarray , snake_case__: jnp.ndarray ):
'''simple docstring'''
lowercase_ , lowercase_ = get_sqrt_alpha_prod(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
lowercase_ = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
def a ( snake_case__: CommonSchedulerState , snake_case__: jnp.ndarray , snake_case__: jnp.ndarray , snake_case__: jnp.ndarray ):
'''simple docstring'''
lowercase_ , lowercase_ = get_sqrt_alpha_prod(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
lowercase_ = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
return velocity
| 97
|
import argparse
import requests
import torch
from PIL import Image
from transformers import ViTMAEConfig, ViTMAEForPreTraining, ViTMAEImageProcessor
def a ( snake_case__: List[Any] ):
'''simple docstring'''
if "cls_token" in name:
lowercase_ = name.replace('''cls_token''' , '''vit.embeddings.cls_token''' )
if "mask_token" in name:
lowercase_ = name.replace('''mask_token''' , '''decoder.mask_token''' )
if "decoder_pos_embed" in name:
lowercase_ = name.replace('''decoder_pos_embed''' , '''decoder.decoder_pos_embed''' )
if "pos_embed" in name and "decoder" not in name:
lowercase_ = name.replace('''pos_embed''' , '''vit.embeddings.position_embeddings''' )
if "patch_embed.proj" in name:
lowercase_ = name.replace('''patch_embed.proj''' , '''vit.embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
lowercase_ = name.replace('''patch_embed.norm''' , '''vit.embeddings.norm''' )
if "decoder_blocks" in name:
lowercase_ = name.replace('''decoder_blocks''' , '''decoder.decoder_layers''' )
if "blocks" in name:
lowercase_ = name.replace('''blocks''' , '''vit.encoder.layer''' )
if "attn.proj" in name:
lowercase_ = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
lowercase_ = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
lowercase_ = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
lowercase_ = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
lowercase_ = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
lowercase_ = name.replace('''mlp.fc2''' , '''output.dense''' )
if "decoder_embed" in name:
lowercase_ = name.replace('''decoder_embed''' , '''decoder.decoder_embed''' )
if "decoder_norm" in name:
lowercase_ = name.replace('''decoder_norm''' , '''decoder.decoder_norm''' )
if "decoder_pred" in name:
lowercase_ = name.replace('''decoder_pred''' , '''decoder.decoder_pred''' )
if "norm.weight" in name and "decoder" not in name:
lowercase_ = name.replace('''norm.weight''' , '''vit.layernorm.weight''' )
if "norm.bias" in name and "decoder" not in name:
lowercase_ = name.replace('''norm.bias''' , '''vit.layernorm.bias''' )
return name
def a ( snake_case__: Optional[int] , snake_case__: Any ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
lowercase_ = orig_state_dict.pop(snake_case__ )
if "qkv" in key:
lowercase_ = key.split('''.''' )
lowercase_ = int(key_split[1] )
if "decoder_blocks" in key:
lowercase_ = config.decoder_hidden_size
lowercase_ = '''decoder.decoder_layers.'''
if "weight" in key:
lowercase_ = val[:dim, :]
lowercase_ = val[dim : dim * 2, :]
lowercase_ = val[-dim:, :]
elif "bias" in key:
lowercase_ = val[:dim]
lowercase_ = val[dim : dim * 2]
lowercase_ = val[-dim:]
else:
lowercase_ = config.hidden_size
lowercase_ = '''vit.encoder.layer.'''
if "weight" in key:
lowercase_ = val[:dim, :]
lowercase_ = val[dim : dim * 2, :]
lowercase_ = val[-dim:, :]
elif "bias" in key:
lowercase_ = val[:dim]
lowercase_ = val[dim : dim * 2]
lowercase_ = val[-dim:]
else:
lowercase_ = val
return orig_state_dict
def a ( snake_case__: str , snake_case__: int ):
'''simple docstring'''
lowercase_ = ViTMAEConfig()
if "large" in checkpoint_url:
lowercase_ = 1_024
lowercase_ = 4_096
lowercase_ = 24
lowercase_ = 16
elif "huge" in checkpoint_url:
lowercase_ = 14
lowercase_ = 1_280
lowercase_ = 5_120
lowercase_ = 32
lowercase_ = 16
lowercase_ = ViTMAEForPreTraining(snake_case__ )
lowercase_ = torch.hub.load_state_dict_from_url(snake_case__ , map_location='''cpu''' )['''model''']
lowercase_ = ViTMAEImageProcessor(size=config.image_size )
lowercase_ = convert_state_dict(snake_case__ , snake_case__ )
model.load_state_dict(snake_case__ )
model.eval()
lowercase_ = '''https://user-images.githubusercontent.com/11435359/147738734-196fd92f-9260-48d5-ba7e-bf103d29364d.jpg'''
lowercase_ = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw )
lowercase_ = ViTMAEImageProcessor(size=config.image_size )
lowercase_ = image_processor(images=snake_case__ , return_tensors='''pt''' )
# forward pass
torch.manual_seed(2 )
lowercase_ = model(**snake_case__ )
lowercase_ = outputs.logits
if "large" in checkpoint_url:
lowercase_ = torch.tensor(
[[-0.7_3_0_9, -0.7_1_2_8, -1.0_1_6_9], [-1.0_1_6_1, -0.9_0_5_8, -1.1_8_7_8], [-1.0_4_7_8, -0.9_4_1_1, -1.1_9_1_1]] )
elif "huge" in checkpoint_url:
lowercase_ = torch.tensor(
[[-1.1_5_9_9, -0.9_1_9_9, -1.2_2_2_1], [-1.1_9_5_2, -0.9_2_6_9, -1.2_3_0_7], [-1.2_1_4_3, -0.9_3_3_7, -1.2_2_6_2]] )
else:
lowercase_ = torch.tensor(
[[-0.9_1_9_2, -0.8_4_8_1, -1.1_2_5_9], [-1.1_3_4_9, -1.0_0_3_4, -1.2_5_9_9], [-1.1_7_5_7, -1.0_4_2_9, -1.2_7_2_6]] )
# verify logits
assert torch.allclose(logits[0, :3, :3] , snake_case__ , atol=1e-4 )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(snake_case__ )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(snake_case__ )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://dl.fbaipublicfiles.com/mae/visualize/mae_visualize_vit_base.pth',
type=str,
help='URL of the checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
__a = parser.parse_args()
convert_vit_mae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 97
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {}
class A__ ( __magic_name__ ):
lowercase = 'llama'
lowercase = ['past_key_values']
def __init__( self : int , a : Dict=32_000 , a : Union[str, Any]=4_096 , a : str=11_008 , a : List[str]=32 , a : Tuple=32 , a : int=None , a : List[Any]="silu" , a : Optional[Any]=2_048 , a : Any=0.0_2 , a : List[str]=1E-6 , a : Tuple=True , a : Union[str, Any]=0 , a : int=1 , a : List[Any]=2 , a : Optional[Any]=1 , a : Dict=False , a : Optional[Any]=None , **a : Optional[Any] , ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = vocab_size
lowerCAmelCase__ : str = max_position_embeddings
lowerCAmelCase__ : Optional[int] = hidden_size
lowerCAmelCase__ : str = intermediate_size
lowerCAmelCase__ : Any = num_hidden_layers
lowerCAmelCase__ : List[Any] = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
lowerCAmelCase__ : List[Any] = num_attention_heads
lowerCAmelCase__ : str = num_key_value_heads
lowerCAmelCase__ : str = hidden_act
lowerCAmelCase__ : Optional[int] = initializer_range
lowerCAmelCase__ : List[str] = rms_norm_eps
lowerCAmelCase__ : str = pretraining_tp
lowerCAmelCase__ : Tuple = use_cache
lowerCAmelCase__ : Dict = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=a , bos_token_id=a , eos_token_id=a , tie_word_embeddings=a , **a , )
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , a ) or len(self.rope_scaling ) != 2:
raise ValueError(
'`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '
f'''got {self.rope_scaling}''' )
lowerCAmelCase__ : str = self.rope_scaling.get('type' , a )
lowerCAmelCase__ : Tuple = self.rope_scaling.get('factor' , a )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' )
if rope_scaling_factor is None or not isinstance(a , a ) or rope_scaling_factor <= 1.0:
raise ValueError(f'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
| 69
|
import os
from collections import deque
import torch
from torch.utils.data import Dataset
class A__ ( __magic_name__ ):
def __init__( self : Union[str, Any] , a : str="" , a : str="train" ):
'''simple docstring'''
assert os.path.isdir(a )
lowerCAmelCase__ : Optional[Any] = []
lowerCAmelCase__ : Dict = os.listdir(a )
for story_filename in story_filenames_list:
if "summary" in story_filename:
continue
lowerCAmelCase__ : Union[str, Any] = os.path.join(a , a )
if not os.path.isfile(a ):
continue
self.documents.append(a )
def __len__( self : Any ):
'''simple docstring'''
return len(self.documents )
def __getitem__( self : Dict , a : Any ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = self.documents[idx]
lowerCAmelCase__ : Union[str, Any] = document_path.split('/' )[-1]
with open(a , encoding='utf-8' ) as source:
lowerCAmelCase__ : List[Any] = source.read()
lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = process_story(a )
return document_name, story_lines, summary_lines
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> Tuple:
lowerCAmelCase__ : Optional[int] = list(filter(lambda SCREAMING_SNAKE_CASE_ : len(SCREAMING_SNAKE_CASE_ ) != 0 , [line.strip() for line in raw_story.split('\n' )] ) )
# for some unknown reason some lines miss a period, add it
lowerCAmelCase__ : List[Any] = [_add_missing_period(SCREAMING_SNAKE_CASE_ ) for line in nonempty_lines]
# gather article lines
lowerCAmelCase__ : int = []
lowerCAmelCase__ : Any = deque(SCREAMING_SNAKE_CASE_ )
while True:
try:
lowerCAmelCase__ : int = lines.popleft()
if element.startswith('@highlight' ):
break
story_lines.append(SCREAMING_SNAKE_CASE_ )
except IndexError:
# if "@highlight" is absent from the file we pop
# all elements until there is None, raising an exception.
return story_lines, []
# gather summary lines
lowerCAmelCase__ : Tuple = list(filter(lambda SCREAMING_SNAKE_CASE_ : not t.startswith('@highlight' ) , SCREAMING_SNAKE_CASE_ ) )
return story_lines, summary_lines
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> Any:
lowerCAmelCase__ : int = ['.', '!', '?', '...', '\'', '`', '"', '\u2019', '\u2019', ')']
if line.startswith('@highlight' ):
return line
if line[-1] in END_TOKENS:
return line
return line + "."
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str:
if len(SCREAMING_SNAKE_CASE_ ) > block_size:
return sequence[:block_size]
else:
sequence.extend([pad_token_id] * (block_size - len(SCREAMING_SNAKE_CASE_ )) )
return sequence
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
lowerCAmelCase__ : str = torch.ones_like(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : int = sequence == pad_token_id
lowerCAmelCase__ : Optional[int] = 0
return mask
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Tuple:
lowerCAmelCase__ : Any = [tokenizer.encode(SCREAMING_SNAKE_CASE_ ) for line in story_lines]
lowerCAmelCase__ : str = [token for sentence in story_lines_token_ids for token in sentence]
lowerCAmelCase__ : Dict = [tokenizer.encode(SCREAMING_SNAKE_CASE_ ) for line in summary_lines]
lowerCAmelCase__ : str = [token for sentence in summary_lines_token_ids for token in sentence]
return story_token_ids, summary_token_ids
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
lowerCAmelCase__ : Optional[int] = []
for sequence in batch:
lowerCAmelCase__ : Union[str, Any] = -1
lowerCAmelCase__ : int = []
for s in sequence:
if s == separator_token_id:
sentence_num += 1
embeddings.append(sentence_num % 2 )
batch_embeddings.append(SCREAMING_SNAKE_CASE_ )
return torch.tensor(SCREAMING_SNAKE_CASE_ )
| 69
| 1
|
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
lowercase_ = logging.get_logger(__name__)
class A__ ( __SCREAMING_SNAKE_CASE ):
def __init__( self , *lowerCamelCase , **lowerCamelCase ) -> None:
"""simple docstring"""
warnings.warn(
'''The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use MobileViTImageProcessor instead.''' , lowerCamelCase , )
super().__init__(*lowerCamelCase , **lowerCamelCase )
| 154
|
def lowerCAmelCase ( UpperCAmelCase ) ->bool:
"""simple docstring"""
if p < 2:
raise ValueError('''p should not be less than 2!''' )
elif p == 2:
return True
__magic_name__ : Tuple = 4
__magic_name__ : List[str] = (1 << p) - 1
for _ in range(p - 2 ):
__magic_name__ : str = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(11))
| 154
| 1
|
from __future__ import annotations
import math
from collections.abc import Callable
def UpperCAmelCase_ ( _A , _A , _A , _A = 1_00 , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = x_start
SCREAMING_SNAKE_CASE__ = fnc(_A )
SCREAMING_SNAKE_CASE__ = 0.0
for _ in range(_A ):
# Approximates curve as a sequence of linear lines and sums their length
SCREAMING_SNAKE_CASE__ = (x_end - x_start) / steps + xa
SCREAMING_SNAKE_CASE__ = fnc(_A )
length += math.hypot(xa - xa , fxa - fxa )
# Increment step
SCREAMING_SNAKE_CASE__ = xa
SCREAMING_SNAKE_CASE__ = fxa
return length
if __name__ == "__main__":
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
return math.sin(10 * x )
print('''f(x) = sin(10 * x)''')
print('''The length of the curve from x = -10 to x = 10 is:''')
_SCREAMING_SNAKE_CASE : Optional[int] = 10
while i <= 100000:
print(F"With {i} steps: {line_length(f, -10, 10, i)}")
i *= 10
| 713
|
from dataclasses import dataclass, field
from typing import Optional
@dataclass
class UpperCAmelCase__ :
"""simple docstring"""
a = field(
default="codeparrot/codeparrot" , metadata={"help": "Model name or path of model to be trained."} )
a = field(
default="./" , metadata={"help": "Save dir where model repo is cloned and models updates are saved to."} )
a = field(
default="codeparrot/codeparrot-clean-train" , metadata={"help": "Name or path of training dataset."} )
a = field(
default="codeparrot/codeparrot-clean-valid" , metadata={"help": "Name or path of validation dataset."} )
a = field(default=2 , metadata={"help": "Batch size for training."} )
a = field(default=2 , metadata={"help": "Batch size for evaluation."} )
a = field(default=0.1 , metadata={"help": "Value of weight decay."} )
a = field(
default=1_00_00 , metadata={"help": "Size of buffer used to shuffle streaming dataset."} )
a = field(default=2e-4 , metadata={"help": "Learning rate fo training."} )
a = field(default="cosine" , metadata={"help": "Learning rate."} )
a = field(
default=7_50 , metadata={"help": "Number of warmup steps in the learning rate schedule."} )
a = field(
default=16 , metadata={"help": "Number of gradient accumulation steps."} )
a = field(
default=A__ , metadata={"help": "Use gradient checkpointing to reduce memory footprint."} )
a = field(default=5_00_00 , metadata={"help": "Maximum number of training steps."} )
a = field(
default=-1 , metadata={"help": "Maximum number of evaluation steps. If -1 the full dataset is evaluated."} )
a = field(default=10_24 , metadata={"help": "Sequence lengths used for training."} )
a = field(default=1 , metadata={"help": "Training seed."} )
a = field(
default=10_24 , metadata={"help": "Interval to save checkpoints. Measured as number of forward passes not training steps."} , )
a = field(
default=A__ , metadata={"help": "States path if the training should continue from a checkpoint folder."} )
a = field(default=A__ , metadata={"help": "If True the data is pretokenized."} )
@dataclass
class UpperCAmelCase__ :
"""simple docstring"""
a = field(
default="codeparrot/codeparrot" , metadata={"help": "Model name or path of model to be evaluated."} )
a = field(
default="codeparrot/codeparrot-clean-valid" , metadata={"help": "Name or path of validation dataset."} )
a = field(default=2 , metadata={"help": "Batch size used for evaluation."} )
a = field(
default=-1 , metadata={"help": "Maximum number of evaluation steps. If -1 the full dataset is evaluated."} )
a = field(default=10_24 , metadata={"help": "Length of sequences to be evaluated."} )
a = field(default=1 , metadata={"help": "Random seed used for evaluation."} )
@dataclass
class UpperCAmelCase__ :
"""simple docstring"""
a = field(
default="codeparrot/codeparrot" , metadata={"help": "Model name or path of model to be evaluated."} )
a = field(default=A__ , metadata={"help": "Number of workers used for code evaluation."} )
a = field(
default=A__ , metadata={"help": "The number of human-eval tasks to run. If not included all tasks are evaluated."} , )
a = field(
default=A__ , metadata={"help": "Sample from the language model's output distribution."} )
a = field(default=0.2 , metadata={"help": "Sampling temperature used for generation."} )
a = field(default=2_56 , metadata={"help": "Maximum number of newly generated tokens."} )
a = field(default=0 , metadata={"help": "Top-k parameter used for generation."} )
a = field(default=0.9_5 , metadata={"help": "Top-p parameter used for nucleus sampling."} )
a = field(default=10 , metadata={"help": "Number of generations to run in parallel."} )
a = field(
default=2_00 , metadata={"help": "Number of completions to generate for each sample."} )
a = field(default=1 , metadata={"help": "Random seed used for evaluation."} )
a = field(
default="eval_results.json" , metadata={"help": "Random seed used for evaluation."} )
a = field(
default="0" , metadata={"help": "Allow `code_eval` to execute Python code on machine"} )
a = field(
default=-1 , metadata={
"help": (
"Determine which device to run the `text-generation` Pipeline on. -1 is CPU and any zero or positive"
" number corresponds to which GPU device id to run on."
)
} , )
@dataclass
class UpperCAmelCase__ :
"""simple docstring"""
a = field(
default=A__ , metadata={
"help": "The number of CPU cores to use for parallel preprocessing. Default uses the maximum available."
} , )
a = field(
default="transformersbook/codeparrot" , metadata={"help": "Folder or name of dataset to process."} )
a = field(
default="codeparrot-clean" , metadata={"help": "Folder to save processed processed dataset."} )
a = field(
default=10_00_00 , metadata={"help": "Number of files to save per JSON output file."} )
a = field(default="content" , metadata={"help": "Column containing text data to process."} )
a = field(
default=10_00 , metadata={"help": "Maximum line length in file, otherwise file is filtered."} )
a = field(
default=1_00 , metadata={"help": "Maximum mean line length in file, otherwise file is filtered."} )
a = field(
default=0.2_5 , metadata={"help": "Maximum fraction of non-alphanumeric characters, otherwise file is filtered."} )
a = field(
default=1.5 , metadata={"help": "Minimum character token ratio for the file, otherwise file is filtered."} )
a = field(
default=0.7 , metadata={"help": "Probability for filtering config, test and uncommon files."} )
a = field(
default="codeparrot/codeparrot" , metadata={"help": "Name or path to the tokenizer."} , )
a = field(
default=A__ , metadata={"help": "If True, near-duplicate samples are removed."} )
a = field(
default=0.8_5 , metadata={"help": "Jaccard threshold for near-duplicate samples."} )
@dataclass
class UpperCAmelCase__ :
"""simple docstring"""
a = field(
default="gpt2" , metadata={"help": "Base tokenizer to build new tokenizer from."} )
a = field(
default="transformersbook/codeparrot-train" , metadata={"help": "Dataset to train tokenizer on."} )
a = field(default="content" , metadata={"help": "Column containing text data to process."} )
a = field(default=20_00_00 , metadata={"help": "Number of examples to train tokenizer on."} )
a = field(
default=3_27_68 , metadata={"help": "Number of examples to train the tokenizer on."} )
a = field(default="codeparrot" , metadata={"help": "Name of new tokenizer."} )
a = field(default=A__ , metadata={"help": "Push saved tokenizer to the hub."} )
@dataclass
class UpperCAmelCase__ :
"""simple docstring"""
a = field(
default="codeparrot/codeparrot" , metadata={"help": "Name or path to the tokenizer."} )
a = field(
default="codeparrot/codeparrot-clean-train" , metadata={"help": "Name or path to the dataset to pretokenize."} )
a = field(
default="tokenized-codeparrot-train" , metadata={"help": "Repo name of the pretokenized data."} )
a = field(default=A__ , metadata={"help": "Number of workers used for code evaluation."} )
@dataclass
class UpperCAmelCase__ :
"""simple docstring"""
a = field(
default="gpt2-large" , metadata={"help": "Configuration to use for model initialization."} )
a = field(
default="codeparrot/codeparrot" , metadata={"help": "Tokenizer attached to model."} )
a = field(default="codeparrot" , metadata={"help": "Name of the created model."} )
a = field(default=A__ , metadata={"help": "Push saved tokenizer to the hub."} )
| 472
| 0
|
"""simple docstring"""
def UpperCAmelCase ( a__ ):
'''simple docstring'''
if collection == []:
return []
# get some information about the collection
lowerCAmelCase :List[str] = len(a__ )
lowerCAmelCase :str = max(a__ )
lowerCAmelCase :Union[str, Any] = min(a__ )
# create the counting array
lowerCAmelCase :Optional[Any] = coll_max + 1 - coll_min
lowerCAmelCase :Dict = [0] * counting_arr_length
# count how much a number appears in the collection
for number in collection:
counting_arr[number - coll_min] += 1
# sum each position with it's predecessors. now, counting_arr[i] tells
# us how many elements <= i has in the collection
for i in range(1 , a__ ):
lowerCAmelCase :Optional[Any] = counting_arr[i] + counting_arr[i - 1]
# create the output collection
lowerCAmelCase :Optional[Any] = [0] * coll_len
# place the elements in the output, respecting the original order (stable
# sort) from end to begin, updating counting_arr
for i in reversed(range(0 , a__ ) ):
lowerCAmelCase :int = collection[i]
counting_arr[collection[i] - coll_min] -= 1
return ordered
def UpperCAmelCase ( a__ ):
'''simple docstring'''
return "".join([chr(a__ ) for i in counting_sort([ord(a__ ) for c in string] )] )
if __name__ == "__main__":
# Test string sort
assert counting_sort_string('thisisthestring') == "eghhiiinrsssttt"
__SCREAMING_SNAKE_CASE = input('Enter numbers separated by a comma:\n').strip()
__SCREAMING_SNAKE_CASE = [int(item) for item in user_input.split(',')]
print(counting_sort(unsorted))
| 553
|
"""simple docstring"""
# limitations under the License.
from typing import Optional, Tuple, Union
import torch
from diffusers import DiffusionPipeline, ImagePipelineOutput
class __UpperCamelCase ( UpperCamelCase ):
def __init__( self : str , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : str ) -> Any:
super().__init__()
self.register_modules(unet=UpperCAmelCase , scheduler=UpperCAmelCase )
@torch.no_grad()
def __call__( self : int , UpperCAmelCase : int = 1 , UpperCAmelCase : Optional[torch.Generator] = None , UpperCAmelCase : int = 50 , UpperCAmelCase : Optional[str] = "pil" , UpperCAmelCase : bool = True , **UpperCAmelCase : Dict , ) -> Union[ImagePipelineOutput, Tuple]:
lowerCAmelCase :Optional[Any] = torch.randn(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=UpperCAmelCase , )
lowerCAmelCase :str = image.to(self.device )
# set step values
self.scheduler.set_timesteps(UpperCAmelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
lowerCAmelCase :Dict = self.unet(UpperCAmelCase , UpperCAmelCase ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
lowerCAmelCase :List[str] = self.scheduler.step(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ).prev_sample
lowerCAmelCase :Any = (image / 2 + 0.5).clamp(0 , 1 )
lowerCAmelCase :Tuple = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowerCAmelCase :Dict = self.numpy_to_pil(UpperCAmelCase )
if not return_dict:
return (image,), "This is a local test"
return ImagePipelineOutput(images=UpperCAmelCase ), "This is a local test"
| 553
| 1
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
def __A ( _A , _A=False , _A=False ):
"""simple docstring"""
__a = "backbone." if is_semantic else ""
__a = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"""{prefix}blocks.{i}.norm1.weight""", f"""beit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((f"""{prefix}blocks.{i}.norm1.bias""", f"""beit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(f"""{prefix}blocks.{i}.attn.proj.weight""", f"""beit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append(
(f"""{prefix}blocks.{i}.attn.proj.bias""", f"""beit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((f"""{prefix}blocks.{i}.norm2.weight""", f"""beit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((f"""{prefix}blocks.{i}.norm2.bias""", f"""beit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((f"""{prefix}blocks.{i}.mlp.fc1.weight""", f"""beit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((f"""{prefix}blocks.{i}.mlp.fc1.bias""", f"""beit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((f"""{prefix}blocks.{i}.mlp.fc2.weight""", f"""beit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((f"""{prefix}blocks.{i}.mlp.fc2.bias""", f"""beit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
(f"""{prefix}cls_token""", "beit.embeddings.cls_token"),
(f"""{prefix}patch_embed.proj.weight""", "beit.embeddings.patch_embeddings.projection.weight"),
(f"""{prefix}patch_embed.proj.bias""", "beit.embeddings.patch_embeddings.projection.bias"),
(f"""{prefix}pos_embed""", "beit.embeddings.position_embeddings"),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
("mask_token", "beit.embeddings.mask_token"),
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
("fc_norm.weight", "beit.pooler.layernorm.weight"),
("fc_norm.bias", "beit.pooler.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def __A ( _A , _A , _A=False , _A=False ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
__a = "backbone." if is_semantic else ""
# queries, keys and values
__a = state_dict.pop(f"""{prefix}blocks.{i}.attn.qkv.weight""" )
__a = state_dict.pop(f"""{prefix}blocks.{i}.attn.q_bias""" )
__a = state_dict.pop(f"""{prefix}blocks.{i}.attn.v_bias""" )
__a = in_proj_weight[
: config.hidden_size, :
]
__a = q_bias
__a = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__a = in_proj_weight[
-config.hidden_size :, :
]
__a = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
__a = state_dict.pop(f"""{prefix}blocks.{i}.gamma_1""" )
__a = state_dict.pop(f"""{prefix}blocks.{i}.gamma_2""" )
__a = gamma_a
__a = gamma_a
def __A ( _A , _A , _A ):
"""simple docstring"""
__a = dct.pop(_A )
__a = val
def __A ( ):
"""simple docstring"""
__a = "http://images.cocodataset.org/val2017/000000039769.jpg"
__a = Image.open(requests.get(_A , stream=_A ).raw )
return im
@torch.no_grad()
def __A ( _A , _A , _A=False ):
"""simple docstring"""
__a = False if "rvlcdip" in checkpoint_url else True
__a = BeitConfig(use_absolute_position_embeddings=_A , use_mask_token=_A )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
__a = 1024
__a = 4096
__a = 24
__a = 16
# labels
if "rvlcdip" in checkpoint_url:
__a = 16
__a = "huggingface/label-files"
__a = "rvlcdip-id2label.json"
__a = json.load(open(hf_hub_download(_A , _A , repo_type="dataset" ) , "r" ) )
__a = {int(_A ): v for k, v in idalabel.items()}
__a = idalabel
__a = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
__a = torch.hub.load_state_dict_from_url(_A , map_location="cpu" )["model"]
__a = create_rename_keys(_A , has_lm_head=_A )
for src, dest in rename_keys:
rename_key(_A , _A , _A )
read_in_q_k_v(_A , _A , has_lm_head=_A )
# load HuggingFace model
__a = BeitForMaskedImageModeling(_A ) if has_lm_head else BeitForImageClassification(_A )
model.eval()
model.load_state_dict(_A )
# Check outputs on an image
__a = BeitImageProcessor(
size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=_A )
__a = prepare_img()
__a = image_processor(images=_A , return_tensors="pt" )
__a = encoding["pixel_values"]
__a = model(_A )
__a = outputs.logits
# verify logits
__a = [1, 16] if "rvlcdip" in checkpoint_url else [1, 196, 8192]
assert logits.shape == torch.Size(_A ), "Shape of logits not as expected"
Path(_A ).mkdir(exist_ok=_A )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(_A )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_A )
if push_to_hub:
if has_lm_head:
__a = "dit-base" if "base" in checkpoint_url else "dit-large"
else:
__a = "dit-base-finetuned-rvlcdip" if "dit-b" in checkpoint_url else "dit-large-finetuned-rvlcdip"
image_processor.push_to_hub(
repo_path_or_name=Path(_A , _A ) , organization="nielsr" , commit_message="Add image processor" , use_temp_dir=_A , )
model.push_to_hub(
repo_path_or_name=Path(_A , _A ) , organization="nielsr" , commit_message="Add model" , use_temp_dir=_A , )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : int = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_url""",
default="""https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth""",
type=str,
help="""URL to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
)
SCREAMING_SNAKE_CASE : Union[str, Any] = parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 719
|
# Function to print upper half of diamond (pyramid)
def __A ( _A ):
"""simple docstring"""
for i in range(0 , _A ):
for _ in range(0 , n - i - 1 ): # printing spaces
print(" " , end="" )
for _ in range(0 , i + 1 ): # printing stars
print("* " , end="" )
print()
def __A ( _A ):
"""simple docstring"""
for i in range(_A , 0 , -1 ):
for _ in range(_A , 0 , -1 ): # printing stars
print("* " , end="" )
print()
for _ in range(n - i + 1 , 0 , -1 ): # printing spaces
print(" " , end="" )
def __A ( _A ):
"""simple docstring"""
if n <= 0:
print(" ... .... nothing printing :(" )
return
floyd(_A ) # upper half
reverse_floyd(_A ) # lower half
if __name__ == "__main__":
print(R"""| /\ | |- | |- |--| |\ /| |-""")
print(R"""|/ \| |- |_ |_ |__| | \/ | |_""")
SCREAMING_SNAKE_CASE : Tuple = 1
while K:
SCREAMING_SNAKE_CASE : Tuple = int(input("""enter the number and , and see the magic : """))
print()
pretty_print(user_number)
SCREAMING_SNAKE_CASE : Optional[int] = int(input("""press 0 to exit... and 1 to continue..."""))
print("""Good Bye...""")
| 525
| 0
|
from __future__ import annotations
from decimal import Decimal
from numpy import array
def _lowercase( __a : list[list[float]] ):
a__ =Decimal
# Check if the provided matrix has 2 rows and 2 columns
# since this implementation only works for 2x2 matrices
if len(__a ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2:
# Calculate the determinant of the matrix
a__ =float(
d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) )
if determinant == 0:
raise ValueError('This matrix has no inverse.' )
# Creates a copy of the matrix with swapped positions of the elements
a__ =[[0.0, 0.0], [0.0, 0.0]]
a__ , a__ =matrix[1][1], matrix[0][0]
a__ , a__ =-matrix[1][0], -matrix[0][1]
# Calculate the inverse of the matrix
return [
[(float(d(__a ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix
]
elif (
len(__a ) == 3
and len(matrix[0] ) == 3
and len(matrix[1] ) == 3
and len(matrix[2] ) == 3
):
# Calculate the determinant of the matrix using Sarrus rule
a__ =float(
(
(d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] ))
+ (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] ))
+ (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] ))
)
- (
(d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] ))
+ (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] ))
+ (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] ))
) )
if determinant == 0:
raise ValueError('This matrix has no inverse.' )
# Creating cofactor matrix
a__ =[
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
]
a__ =(d(matrix[1][1] ) * d(matrix[2][2] )) - (
d(matrix[1][2] ) * d(matrix[2][1] )
)
a__ =-(
(d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] ))
)
a__ =(d(matrix[1][0] ) * d(matrix[2][1] )) - (
d(matrix[1][1] ) * d(matrix[2][0] )
)
a__ =-(
(d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] ))
)
a__ =(d(matrix[0][0] ) * d(matrix[2][2] )) - (
d(matrix[0][2] ) * d(matrix[2][0] )
)
a__ =-(
(d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] ))
)
a__ =(d(matrix[0][1] ) * d(matrix[1][2] )) - (
d(matrix[0][2] ) * d(matrix[1][1] )
)
a__ =-(
(d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] ))
)
a__ =(d(matrix[0][0] ) * d(matrix[1][1] )) - (
d(matrix[0][1] ) * d(matrix[1][0] )
)
# Transpose the cofactor matrix (Adjoint matrix)
a__ =array(__a )
for i in range(3 ):
for j in range(3 ):
a__ =cofactor_matrix[j][i]
# Inverse of the matrix using the formula (1/determinant) * adjoint matrix
a__ =array(__a )
for i in range(3 ):
for j in range(3 ):
inverse_matrix[i][j] /= d(__a )
# Calculate the inverse of the matrix
return [[float(d(__a ) ) or 0.0 for n in row] for row in inverse_matrix]
raise ValueError('Please provide a matrix of size 2x2 or 3x3.' )
| 20
|
_lowerCAmelCase: List[str] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
def _lowercase( ):
a__ =input('Enter message: ' )
a__ =input('Enter key [alphanumeric]: ' )
a__ =input('Encrypt/Decrypt [e/d]: ' )
if mode.lower().startswith('e' ):
a__ ='encrypt'
a__ =encrypt_message(__a , __a )
elif mode.lower().startswith('d' ):
a__ ='decrypt'
a__ =decrypt_message(__a , __a )
print(f"""\n{mode.title()}ed message:""" )
print(__a )
def _lowercase( __a : str , __a : str ):
return translate_message(__a , __a , 'encrypt' )
def _lowercase( __a : str , __a : str ):
return translate_message(__a , __a , 'decrypt' )
def _lowercase( __a : str , __a : str , __a : str ):
a__ =[]
a__ =0
a__ =key.upper()
for symbol in message:
a__ =LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(__a )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(__a ):
a__ =0
else:
translated.append(__a )
return "".join(__a )
if __name__ == "__main__":
main()
| 20
| 1
|
import json
import os
import tempfile
from transformers.testing_utils import check_json_file_has_correct_format
class lowerCamelCase :
"""simple docstring"""
UpperCAmelCase_ = None
def A_ ( self : Optional[int] ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = self.feature_extraction_class(**self.feat_extract_dict )
SCREAMING_SNAKE_CASE__ : Optional[Any] = json.loads(feat_extract.to_json_string() )
for key, value in self.feat_extract_dict.items():
self.assertEqual(obj[key], UpperCamelCase__ )
def A_ ( self : Union[str, Any] ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = os.path.join(UpperCamelCase__, "feat_extract.json" )
feat_extract_first.to_json_file(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Dict = self.feature_extraction_class.from_json_file(UpperCamelCase__ )
self.assertEqual(feat_extract_second.to_dict(), feat_extract_first.to_dict() )
def A_ ( self : str ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ : Any = feat_extract_first.save_pretrained(UpperCamelCase__ )[0]
check_json_file_has_correct_format(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Any = self.feature_extraction_class.from_pretrained(UpperCamelCase__ )
self.assertEqual(feat_extract_second.to_dict(), feat_extract_first.to_dict() )
def A_ ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = self.feature_extraction_class()
self.assertIsNotNone(UpperCamelCase__ )
| 710
|
def _a ( SCREAMING_SNAKE_CASE__ : int = 4_00_00_00 ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = [0, 1]
SCREAMING_SNAKE_CASE__ : Any = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1] )
if fib[i + 2] > n:
break
i += 1
SCREAMING_SNAKE_CASE__ : List[Any] = 0
for j in range(len(SCREAMING_SNAKE_CASE__ ) - 1 ):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(f"{solution() = }")
| 157
| 0
|
import torch
from torch import nn
from transformers import CLIPPreTrainedModel, CLIPVisionModel
from ...models.attention import BasicTransformerBlock
from ...utils import logging
lowerCamelCase : Optional[int] =logging.get_logger(__name__) # pylint: disable=invalid-name
class __a ( A__ ):
def __init__( self : int , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str=7_68 ):
'''simple docstring'''
super().__init__(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Any = proj_size
UpperCamelCase__ : List[Any] = CLIPVisionModel(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[int] = PaintByExampleMapper(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Any = nn.LayerNorm(config.hidden_size )
UpperCamelCase__ : int = nn.Linear(config.hidden_size , self.proj_size )
# uncondition for scaling
UpperCamelCase__ : int = nn.Parameter(torch.randn((1, 1, self.proj_size) ) )
def __lowercase ( self : int , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Optional[int]=False ):
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] = self.model(pixel_values=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Union[str, Any] = clip_output.pooler_output
UpperCamelCase__ : Union[str, Any] = self.mapper(latent_states[:, None] )
UpperCamelCase__ : Dict = self.final_layer_norm(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : int = self.proj_out(SCREAMING_SNAKE_CASE )
if return_uncond_vector:
return latent_states, self.uncond_vector
return latent_states
class __a ( nn.Module ):
def __init__( self : str , SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
super().__init__()
UpperCamelCase__ : str = (config.num_hidden_layers + 1) // 5
UpperCamelCase__ : Any = config.hidden_size
UpperCamelCase__ : Optional[Any] = 1
UpperCamelCase__ : Optional[Any] = nn.ModuleList(
[
BasicTransformerBlock(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , activation_fn="gelu" , attention_bias=SCREAMING_SNAKE_CASE )
for _ in range(SCREAMING_SNAKE_CASE )
] )
def __lowercase ( self : Optional[int] , SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
for block in self.blocks:
UpperCamelCase__ : Dict = block(SCREAMING_SNAKE_CASE )
return hidden_states
| 228
|
from typing import Dict, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
lowerCamelCase : List[Any] =logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> List[Any]:
return [
int(1000 * (box[0] / width) ),
int(1000 * (box[1] / height) ),
int(1000 * (box[2] / width) ),
int(1000 * (box[3] / height) ),
]
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None ) -> str:
UpperCamelCase__ : Any = tesseract_config if tesseract_config is not None else ""
# apply OCR
UpperCamelCase__ : int = to_pil_image(__lowerCAmelCase )
UpperCamelCase__ , UpperCamelCase__ : Dict = pil_image.size
UpperCamelCase__ : Optional[Any] = pytesseract.image_to_data(__lowerCAmelCase , lang=__lowerCAmelCase , output_type="dict" , config=__lowerCAmelCase )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ : List[str] = data["text"], data["left"], data["top"], data["width"], data["height"]
# filter empty words and corresponding coordinates
UpperCamelCase__ : Tuple = [idx for idx, word in enumerate(__lowerCAmelCase ) if not word.strip()]
UpperCamelCase__ : Tuple = [word for idx, word in enumerate(__lowerCAmelCase ) if idx not in irrelevant_indices]
UpperCamelCase__ : Union[str, Any] = [coord for idx, coord in enumerate(__lowerCAmelCase ) if idx not in irrelevant_indices]
UpperCamelCase__ : List[str] = [coord for idx, coord in enumerate(__lowerCAmelCase ) if idx not in irrelevant_indices]
UpperCamelCase__ : Union[str, Any] = [coord for idx, coord in enumerate(__lowerCAmelCase ) if idx not in irrelevant_indices]
UpperCamelCase__ : str = [coord for idx, coord in enumerate(__lowerCAmelCase ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
UpperCamelCase__ : List[Any] = []
for x, y, w, h in zip(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase__ : Optional[int] = [x, y, x + w, y + h]
actual_boxes.append(__lowerCAmelCase )
# finally, normalize the bounding boxes
UpperCamelCase__ : int = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) )
assert len(__lowerCAmelCase ) == len(__lowerCAmelCase ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class __a ( A__ ):
_lowerCAmelCase : int = ['''pixel_values''']
def __init__( self : Dict , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : Dict[str, int] = None , SCREAMING_SNAKE_CASE : PILImageResampling = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : Optional[str] = None , SCREAMING_SNAKE_CASE : Optional[str] = "" , **SCREAMING_SNAKE_CASE : Any , ):
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE )
UpperCamelCase__ : str = size if size is not None else {"height": 2_24, "width": 2_24}
UpperCamelCase__ : str = get_size_dict(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Tuple = do_resize
UpperCamelCase__ : Union[str, Any] = size
UpperCamelCase__ : List[str] = resample
UpperCamelCase__ : Dict = apply_ocr
UpperCamelCase__ : str = ocr_lang
UpperCamelCase__ : List[str] = tesseract_config
def __lowercase ( self : List[str] , SCREAMING_SNAKE_CASE : np.ndarray , SCREAMING_SNAKE_CASE : Dict[str, int] , SCREAMING_SNAKE_CASE : PILImageResampling = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE : Dict , ):
'''simple docstring'''
UpperCamelCase__ : Optional[Any] = get_size_dict(SCREAMING_SNAKE_CASE )
if "height" not in size or "width" not in size:
raise ValueError(F'The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}' )
UpperCamelCase__ : Union[str, Any] = (size["height"], size["width"])
return resize(SCREAMING_SNAKE_CASE , size=SCREAMING_SNAKE_CASE , resample=SCREAMING_SNAKE_CASE , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def __lowercase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : ImageInput , SCREAMING_SNAKE_CASE : bool = None , SCREAMING_SNAKE_CASE : Dict[str, int] = None , SCREAMING_SNAKE_CASE : PILImageResampling = None , SCREAMING_SNAKE_CASE : bool = None , SCREAMING_SNAKE_CASE : Optional[str] = None , SCREAMING_SNAKE_CASE : Optional[str] = None , SCREAMING_SNAKE_CASE : Optional[Union[str, TensorType]] = None , SCREAMING_SNAKE_CASE : ChannelDimension = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE : Tuple , ):
'''simple docstring'''
UpperCamelCase__ : List[str] = do_resize if do_resize is not None else self.do_resize
UpperCamelCase__ : Tuple = size if size is not None else self.size
UpperCamelCase__ : Any = get_size_dict(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Tuple = resample if resample is not None else self.resample
UpperCamelCase__ : Any = apply_ocr if apply_ocr is not None else self.apply_ocr
UpperCamelCase__ : Any = ocr_lang if ocr_lang is not None else self.ocr_lang
UpperCamelCase__ : str = tesseract_config if tesseract_config is not None else self.tesseract_config
UpperCamelCase__ : Dict = make_list_of_images(SCREAMING_SNAKE_CASE )
if not valid_images(SCREAMING_SNAKE_CASE ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
# All transformations expect numpy arrays.
UpperCamelCase__ : Optional[int] = [to_numpy_array(SCREAMING_SNAKE_CASE ) for image in images]
if apply_ocr:
requires_backends(self , "pytesseract" )
UpperCamelCase__ : Dict = []
UpperCamelCase__ : List[Any] = []
for image in images:
UpperCamelCase__ , UpperCamelCase__ : Any = apply_tesseract(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
words_batch.append(SCREAMING_SNAKE_CASE )
boxes_batch.append(SCREAMING_SNAKE_CASE )
if do_resize:
UpperCamelCase__ : Union[str, Any] = [self.resize(image=SCREAMING_SNAKE_CASE , size=SCREAMING_SNAKE_CASE , resample=SCREAMING_SNAKE_CASE ) for image in images]
# flip color channels from RGB to BGR (as Detectron2 requires this)
UpperCamelCase__ : Any = [flip_channel_order(SCREAMING_SNAKE_CASE ) for image in images]
UpperCamelCase__ : str = [to_channel_dimension_format(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for image in images]
UpperCamelCase__ : Optional[Any] = BatchFeature(data={"pixel_values": images} , tensor_type=SCREAMING_SNAKE_CASE )
if apply_ocr:
UpperCamelCase__ : Tuple = words_batch
UpperCamelCase__ : Dict = boxes_batch
return data
| 228
| 1
|
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
ImageTextPipelineOutput,
UniDiffuserPipeline,
)
else:
from .modeling_text_decoder import UniDiffuserTextDecoder
from .modeling_uvit import UniDiffuserModel, UTransformeraDModel
from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline
| 701
|
"""simple docstring"""
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE,unittest.TestCase ):
A__ : List[str] = '''hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'''
def __UpperCAmelCase ( self : Dict , __lowerCamelCase : Union[str, Any]=0 ):
"""simple docstring"""
_snake_case = floats_tensor((1, 3, 1_2_8, 1_2_8) , rng=random.Random(__lowerCamelCase ) )
_snake_case = np.random.RandomState(__lowerCamelCase )
_snake_case = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 3,
'''strength''': 0.7_5,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def __UpperCAmelCase ( self : Any ):
"""simple docstring"""
_snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
_snake_case = self.get_dummy_inputs()
_snake_case = pipe(**__lowerCamelCase ).images
_snake_case = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 1_2_8, 1_2_8, 3)
_snake_case = np.array([0.6_9_6_4_3, 0.5_8_4_8_4, 0.5_0_3_1_4, 0.5_8_7_6_0, 0.5_5_3_6_8, 0.5_9_6_4_3, 0.5_1_5_2_9, 0.4_1_2_1_7, 0.4_9_0_8_7] )
assert np.abs(image_slice - expected_slice ).max() < 1E-1
def __UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
_snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
_snake_case = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
_snake_case = self.get_dummy_inputs()
_snake_case = pipe(**__lowerCamelCase ).images
_snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
_snake_case = np.array([0.6_1_7_3_7, 0.5_4_6_4_2, 0.5_3_1_8_3, 0.5_4_4_6_5, 0.5_2_7_4_2, 0.6_0_5_2_5, 0.4_9_9_6_9, 0.4_0_6_5_5, 0.4_8_1_5_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def __UpperCAmelCase ( self : List[str] ):
"""simple docstring"""
_snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
_snake_case = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
# warmup pass to apply optimizations
_snake_case = pipe(**self.get_dummy_inputs() )
_snake_case = self.get_dummy_inputs()
_snake_case = pipe(**__lowerCamelCase ).images
_snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
_snake_case = np.array([0.5_2_7_6_1, 0.5_9_9_7_7, 0.4_9_0_3_3, 0.4_9_6_1_9, 0.5_4_2_8_2, 0.5_0_3_1_1, 0.4_7_6_0_0, 0.4_0_9_1_8, 0.4_5_2_0_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def __UpperCAmelCase ( self : int ):
"""simple docstring"""
_snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
_snake_case = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
_snake_case = self.get_dummy_inputs()
_snake_case = pipe(**__lowerCamelCase ).images
_snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
_snake_case = np.array([0.5_2_9_1_1, 0.6_0_0_0_4, 0.4_9_2_2_9, 0.4_9_8_0_5, 0.5_4_5_0_2, 0.5_0_6_8_0, 0.4_7_7_7_7, 0.4_1_0_2_8, 0.4_5_3_0_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def __UpperCAmelCase ( self : Dict ):
"""simple docstring"""
_snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
_snake_case = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
_snake_case = self.get_dummy_inputs()
_snake_case = pipe(**__lowerCamelCase ).images
_snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
_snake_case = np.array([0.5_2_9_1_1, 0.6_0_0_0_4, 0.4_9_2_2_9, 0.4_9_8_0_5, 0.5_4_5_0_2, 0.5_0_6_8_0, 0.4_7_7_7_7, 0.4_1_0_2_8, 0.4_5_3_0_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def __UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
_snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
_snake_case = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
_snake_case = self.get_dummy_inputs()
_snake_case = pipe(**__lowerCamelCase ).images
_snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
_snake_case = np.array([0.6_5_3_3_1, 0.5_8_2_7_7, 0.4_8_2_0_4, 0.5_6_0_5_9, 0.5_3_6_6_5, 0.5_6_2_3_5, 0.5_0_9_6_9, 0.4_0_0_0_9, 0.4_6_5_5_2] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
@nightly
@require_onnxruntime
@require_torch_gpu
class UpperCAmelCase ( unittest.TestCase ):
@property
def __UpperCAmelCase ( self : List[Any] ):
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def __UpperCAmelCase ( self : str ):
"""simple docstring"""
_snake_case = ort.SessionOptions()
_snake_case = False
return options
def __UpperCAmelCase ( self : Optional[int] ):
"""simple docstring"""
_snake_case = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
_snake_case = init_image.resize((7_6_8, 5_1_2) )
# using the PNDM scheduler by default
_snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''onnx''' , safety_checker=__lowerCamelCase , feature_extractor=__lowerCamelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
_snake_case = '''A fantasy landscape, trending on artstation'''
_snake_case = np.random.RandomState(0 )
_snake_case = pipe(
prompt=__lowerCamelCase , image=__lowerCamelCase , strength=0.7_5 , guidance_scale=7.5 , num_inference_steps=1_0 , generator=__lowerCamelCase , output_type='''np''' , )
_snake_case = output.images
_snake_case = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert images.shape == (1, 5_1_2, 7_6_8, 3)
_snake_case = np.array([0.4_9_0_9, 0.5_0_5_9, 0.5_3_7_2, 0.4_6_2_3, 0.4_8_7_6, 0.5_0_4_9, 0.4_8_2_0, 0.4_9_5_6, 0.5_0_1_9] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def __UpperCAmelCase ( self : Any ):
"""simple docstring"""
_snake_case = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
_snake_case = init_image.resize((7_6_8, 5_1_2) )
_snake_case = LMSDiscreteScheduler.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , subfolder='''scheduler''' , revision='''onnx''' )
_snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , scheduler=__lowerCamelCase , safety_checker=__lowerCamelCase , feature_extractor=__lowerCamelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
_snake_case = '''A fantasy landscape, trending on artstation'''
_snake_case = np.random.RandomState(0 )
_snake_case = pipe(
prompt=__lowerCamelCase , image=__lowerCamelCase , strength=0.7_5 , guidance_scale=7.5 , num_inference_steps=2_0 , generator=__lowerCamelCase , output_type='''np''' , )
_snake_case = output.images
_snake_case = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert images.shape == (1, 5_1_2, 7_6_8, 3)
_snake_case = np.array([0.8_0_4_3, 0.9_2_6, 0.9_5_8_1, 0.8_1_1_9, 0.8_9_5_4, 0.9_1_3, 0.7_2_0_9, 0.7_4_6_3, 0.7_4_3_1] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
| 404
| 0
|
"""simple docstring"""
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class lowerCamelCase__ :
def _UpperCamelCase ( self ,A ,A ,A ):
return None
class lowerCamelCase__ :
def _UpperCamelCase ( self ,A ,A ,A ,A ):
return None
class lowerCamelCase__ ( unittest.TestCase ):
SCREAMING_SNAKE_CASE = [
# (model_name, model_kwargs)
('''bert-base-cased''', {}),
('''gpt2''', {'''use_cache''': False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def _UpperCamelCase ( self ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(A ,"""tf""" ,12 ,**A )
@require_torch
@slow
def _UpperCamelCase ( self ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(A ,"""pt""" ,12 ,**A )
@require_torch
@slow
def _UpperCamelCase ( self ):
from transformers import BertModel
UpperCAmelCase = ["""[UNK]""", """[SEP]""", """[CLS]""", """[PAD]""", """[MASK]""", """some""", """other""", """words"""]
with NamedTemporaryFile(mode="""w+t""" ) as vocab_file:
vocab_file.write("""\n""".join(A ) )
vocab_file.flush()
UpperCAmelCase = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
UpperCAmelCase = BertModel(BertConfig(vocab_size=len(A ) ) )
model.save_pretrained(A )
self._test_export(A ,"""pt""" ,12 ,A )
@require_tf
@slow
def _UpperCamelCase ( self ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
UpperCAmelCase = self._test_export(A ,"""tf""" ,12 ,**A )
UpperCAmelCase = quantize(Path(A ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(A ).stat().st_size:
self.fail("""Quantized model is bigger than initial ONNX model""" )
@require_torch
@slow
def _UpperCamelCase ( self ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
UpperCAmelCase = self._test_export(A ,"""pt""" ,12 ,**A )
UpperCAmelCase = quantize(A )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(A ).stat().st_size:
self.fail("""Quantized model is bigger than initial ONNX model""" )
def _UpperCamelCase ( self ,A ,A ,A ,A=None ,**A ):
try:
# Compute path
with TemporaryDirectory() as tempdir:
UpperCAmelCase = Path(A ).joinpath("""model.onnx""" )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(A ,A ,A ,A ,A ,**A )
return path
except Exception as e:
self.fail(A )
@require_torch
@require_tokenizers
@slow
def _UpperCamelCase ( self ):
from transformers import BertModel
UpperCAmelCase = BertModel(BertConfig.from_pretrained("""lysandre/tiny-bert-random""" ) )
UpperCAmelCase = BertTokenizerFast.from_pretrained("""lysandre/tiny-bert-random""" )
self._test_infer_dynamic_axis(A ,A ,"""pt""" )
@require_tf
@require_tokenizers
@slow
def _UpperCamelCase ( self ):
from transformers import TFBertModel
UpperCAmelCase = TFBertModel(BertConfig.from_pretrained("""lysandre/tiny-bert-random""" ) )
UpperCAmelCase = BertTokenizerFast.from_pretrained("""lysandre/tiny-bert-random""" )
self._test_infer_dynamic_axis(A ,A ,"""tf""" )
def _UpperCamelCase ( self ,A ,A ,A ):
UpperCAmelCase = FeatureExtractionPipeline(A ,A )
UpperCAmelCase = ["""input_ids""", """token_type_ids""", """attention_mask""", """output_0""", """output_1"""]
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = infer_shapes(A ,A )
# Assert all variables are present
self.assertEqual(len(A ) ,len(A ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3] ,A )
self.assertSequenceEqual(variable_names[3:] ,A )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name] ,{0: """batch""", 1: """sequence"""} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes["""output_0"""] ,{0: """batch""", 1: """sequence"""} )
self.assertDictEqual(shapes["""output_1"""] ,{0: """batch"""} )
def _UpperCamelCase ( self ):
UpperCAmelCase = ["""input_ids""", """attention_mask""", """token_type_ids"""]
UpperCAmelCase = {"""input_ids""": [1, 2, 3, 4], """attention_mask""": [0, 0, 0, 0], """token_type_ids""": [1, 1, 1, 1]}
UpperCAmelCase , UpperCAmelCase = ensure_valid_input(FuncContiguousArgs() ,A ,A )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(A ) ,3 )
# Should have exactly the same input names
self.assertEqual(set(A ) ,set(A ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(A ,(tokens["""input_ids"""], tokens["""token_type_ids"""], tokens["""attention_mask"""]) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
UpperCAmelCase , UpperCAmelCase = ensure_valid_input(FuncNonContiguousArgs() ,A ,A )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(A ) ,1 )
self.assertEqual(len(A ) ,1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0] ,tokens["""input_ids"""] )
self.assertEqual(ordered_input_names[0] ,"""input_ids""" )
def _UpperCamelCase ( self ):
UpperCAmelCase = generate_identified_filename(Path("""/home/something/my_fake_model.onnx""" ) ,"""-test""" )
self.assertEqual("""/home/something/my_fake_model-test.onnx""" ,generated.as_posix() )
| 341
|
"""simple docstring"""
class lowerCamelCase__ :
def __init__( self ,A ):
UpperCAmelCase = n
UpperCAmelCase = [None] * self.n
UpperCAmelCase = 0 # index of the first element
UpperCAmelCase = 0
UpperCAmelCase = 0
def __len__( self ):
return self.size
def _UpperCamelCase ( self ):
return self.size == 0
def _UpperCamelCase ( self ):
return False if self.is_empty() else self.array[self.front]
def _UpperCamelCase ( self ,A ):
if self.size >= self.n:
raise Exception("""QUEUE IS FULL""" )
UpperCAmelCase = data
UpperCAmelCase = (self.rear + 1) % self.n
self.size += 1
return self
def _UpperCamelCase ( self ):
if self.size == 0:
raise Exception("""UNDERFLOW""" )
UpperCAmelCase = self.array[self.front]
UpperCAmelCase = None
UpperCAmelCase = (self.front + 1) % self.n
self.size -= 1
return temp
| 341
| 1
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_lowercase : int = logging.get_logger(__name__)
_lowercase : Tuple = torch.device("cpu")
def snake_case__ ( ):
"""simple docstring"""
lowerCamelCase__ : Optional[Any] ='''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCamelCase__ : List[Any] =Image.open(requests.get(__snake_case , stream=__snake_case ).raw )
return im
def snake_case__ ( __lowerCamelCase : Union[str, Any] ):
"""simple docstring"""
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.1_7_0_3e0_0, 2.1_1_0_7e0_0, -2.0_8_1_1e0_0, 8.8_6_8_5e-0_1, 2.4_3_6_0e-0_1] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.9_6_3_6e-0_1, 2.3_4_7_8e-0_1, -1.6_9_6_3e0_0, -1.7_3_8_1e0_0, -8.6_3_3_7e-0_1] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.2_7_6_8e-0_1, -4.7_4_2_9e-0_1, -1.0_8_9_7e0_0, -1.0_2_4_8e0_0, 3.5_5_2_3e-0_2] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.5_3_3_0e-0_1, 2.4_2_1_1e-0_1, -6.0_1_8_5e-0_1, -8.2_7_8_9e-0_1, -6.0_4_4_6e-0_2] )
def snake_case__ ( __lowerCamelCase : Tuple , __lowerCamelCase : List[Any] , __lowerCamelCase : Union[str, Any] ):
"""simple docstring"""
lowerCamelCase__ : Any =dct.pop(__snake_case )
lowerCamelCase__ : Any =val
def snake_case__ ( __lowerCamelCase : str ):
"""simple docstring"""
lowerCamelCase__ : List[str] =[]
for k in state_dict.keys():
lowerCamelCase__ : int =k
if ".pwconv" in k:
lowerCamelCase__ : List[Any] =k_new.replace('''.pwconv''' , '''.point_wise_conv''' )
if ".dwconv" in k:
lowerCamelCase__ : Tuple =k_new.replace('''.dwconv''' , '''.depth_wise_conv''' )
if ".Proj." in k:
lowerCamelCase__ : Any =k_new.replace('''.Proj.''' , '''.proj.''' )
if "patch_embed" in k_new:
lowerCamelCase__ : str =k_new.replace('''patch_embed''' , '''swiftformer.patch_embed.patch_embedding''' )
if "network" in k_new:
lowerCamelCase__ : Optional[Any] =k_new.split('''.''' )
if ls[2].isdigit():
lowerCamelCase__ : int ='''swiftformer.encoder.network.''' + ls[1] + '''.blocks.''' + ls[2] + '''.''' + '''.'''.join(ls[3:] )
else:
lowerCamelCase__ : Union[str, Any] =k_new.replace('''network''' , '''swiftformer.encoder.network''' )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def snake_case__ ( __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Any ):
"""simple docstring"""
lowerCamelCase__ : Any =SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
lowerCamelCase__ : List[str] =1000
lowerCamelCase__ : Optional[int] ='''huggingface/label-files'''
lowerCamelCase__ : Union[str, Any] ='''imagenet-1k-id2label.json'''
lowerCamelCase__ : Optional[Any] =json.load(open(hf_hub_download(__snake_case , __snake_case , repo_type='''dataset''' ) , '''r''' ) )
lowerCamelCase__ : Tuple ={int(__snake_case ): v for k, v in idalabel.items()}
lowerCamelCase__ : List[Any] =idalabel
lowerCamelCase__ : List[str] ={v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
lowerCamelCase__ : int =[3, 3, 6, 4]
lowerCamelCase__ : Union[str, Any] =[48, 56, 112, 220]
elif swiftformer_name == "swiftformer_s":
lowerCamelCase__ : int =[3, 3, 9, 6]
lowerCamelCase__ : List[Any] =[48, 64, 168, 224]
elif swiftformer_name == "swiftformer_l1":
lowerCamelCase__ : str =[4, 3, 10, 5]
lowerCamelCase__ : int =[48, 96, 192, 384]
elif swiftformer_name == "swiftformer_l3":
lowerCamelCase__ : str =[4, 4, 12, 6]
lowerCamelCase__ : List[Any] =[64, 128, 320, 512]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith('''https''' ):
lowerCamelCase__ : List[Any] =torch.hub.load_state_dict_from_url(__snake_case , map_location='''cpu''' , check_hash=__snake_case )
else:
lowerCamelCase__ : Tuple =torch.load(__snake_case , map_location='''cpu''' )
lowerCamelCase__ : Union[str, Any] =checkpoint
lowerCamelCase__ : Dict =create_rename_keys(__snake_case )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(__snake_case , __snake_case , __snake_case )
# load HuggingFace model
lowerCamelCase__ : Any =SwiftFormerForImageClassification(__snake_case ).eval()
hf_model.load_state_dict(__snake_case )
# prepare test inputs
lowerCamelCase__ : List[str] =prepare_img()
lowerCamelCase__ : int =ViTImageProcessor.from_pretrained('''preprocessor_config''' )
lowerCamelCase__ : Optional[Any] =processor(images=__snake_case , return_tensors='''pt''' )
# compare outputs from both models
lowerCamelCase__ : int =get_expected_output(__snake_case )
lowerCamelCase__ : Tuple =hf_model(inputs['''pixel_values'''] ).logits
assert hf_logits.shape == torch.Size([1, 1000] )
assert torch.allclose(hf_logits[0, 0:5] , __snake_case , atol=1e-3 )
Path(__snake_case ).mkdir(exist_ok=__snake_case )
print(f'''Saving model {swiftformer_name} to {pytorch_dump_folder_path}''' )
hf_model.save_pretrained(__snake_case )
if __name__ == "__main__":
_lowercase : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--swiftformer_name",
default="swiftformer_xs",
choices=["swiftformer_xs", "swiftformer_s", "swiftformer_l1", "swiftformer_l3"],
type=str,
help="Name of the SwiftFormer model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="./converted_outputs/",
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument("--original_ckpt", default=None, type=str, help="Path to the original model checkpoint.")
_lowercase : Optional[Any] = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 707
|
"""simple docstring"""
from __future__ import annotations
from collections.abc import Iterator
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : str, lowerCamelCase : int )-> None:
lowerCamelCase__ : str =value
lowerCamelCase__ : Node | None =None
lowerCamelCase__ : Node | None =None
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : int, lowerCamelCase : Node )-> None:
lowerCamelCase__ : Any =tree
def snake_case ( self : str, lowerCamelCase : Node | None )-> int:
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left ) + self.depth_first_search(node.right )
)
def __iter__( self : Dict )-> Iterator[int]:
yield self.depth_first_search(self.tree )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 625
| 0
|
'''simple docstring'''
from __future__ import annotations
def _lowerCAmelCase (_lowercase , _lowercase = None ):
"""simple docstring"""
a__ = word_bank or []
# create a table
a__ = len(_lowercase ) + 1
a__ = []
for _ in range(_lowercase ):
table.append([] )
# seed value
a__ = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(_lowercase ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(_lowercase )] == word:
a__ = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(_lowercase )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(_lowercase )]:
combination.reverse()
return table[len(_lowercase )]
if __name__ == "__main__":
print(all_construct("""jwajalapa""", ["""jwa""", """j""", """w""", """a""", """la""", """lapa"""]))
print(all_construct("""rajamati""", ["""s""", """raj""", """amat""", """raja""", """ma""", """i""", """t"""]))
print(
all_construct(
"""hexagonosaurus""",
["""h""", """ex""", """hex""", """ag""", """ago""", """ru""", """auru""", """rus""", """go""", """no""", """o""", """s"""],
)
)
| 331
|
'''simple docstring'''
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def _lowerCAmelCase (_lowercase , _lowercase=0.999 , _lowercase="cosine" , ):
"""simple docstring"""
if alpha_transform_type == "cosine":
def alpha_bar_fn(_lowercase ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(_lowercase ):
return math.exp(t * -12.0 )
else:
raise ValueError(F'Unsupported alpha_tranform_type: {alpha_transform_type}' )
a__ = []
for i in range(_lowercase ):
a__ = i / num_diffusion_timesteps
a__ = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(_lowercase ) / alpha_bar_fn(_lowercase ) , _lowercase ) )
return torch.tensor(_lowercase , dtype=torch.floataa )
class lowerCamelCase__ ( __lowerCamelCase , __lowerCamelCase ):
"""simple docstring"""
UpperCamelCase__ = [e.name for e in KarrasDiffusionSchedulers]
UpperCamelCase__ = 2
@register_to_config
def __init__( self : Optional[int] ,a__ : int = 10_00 ,a__ : float = 0.0_0085 ,a__ : float = 0.012 ,a__ : str = "linear" ,a__ : Optional[Union[np.ndarray, List[float]]] = None ,a__ : str = "epsilon" ,a__ : str = "linspace" ,a__ : int = 0 ,):
if trained_betas is not None:
a__ = torch.tensor(a__ ,dtype=torch.floataa )
elif beta_schedule == "linear":
a__ = torch.linspace(a__ ,a__ ,a__ ,dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
a__ = (
torch.linspace(beta_start**0.5 ,beta_end**0.5 ,a__ ,dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
a__ = betas_for_alpha_bar(a__ )
else:
raise NotImplementedError(f'{beta_schedule} does is not implemented for {self.__class__}' )
a__ = 1.0 - self.betas
a__ = torch.cumprod(self.alphas ,dim=0 )
# set all values
self.set_timesteps(a__ ,a__ ,a__ )
def lowerCAmelCase_ ( self : str ,a__ : str ,a__ : List[Any]=None ):
if schedule_timesteps is None:
a__ = self.timesteps
a__ = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
a__ = 1 if len(a__ ) > 1 else 0
else:
a__ = timestep.cpu().item() if torch.is_tensor(a__ ) else timestep
a__ = self._index_counter[timestep_int]
return indices[pos].item()
@property
def lowerCAmelCase_ ( self : List[Any] ):
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def lowerCAmelCase_ ( self : Union[str, Any] ,a__ : torch.FloatTensor ,a__ : Union[float, torch.FloatTensor] ,):
a__ = self.index_for_timestep(a__ )
if self.state_in_first_order:
a__ = self.sigmas[step_index]
else:
a__ = self.sigmas_interpol[step_index]
a__ = sample / ((sigma**2 + 1) ** 0.5)
return sample
def lowerCAmelCase_ ( self : int ,a__ : int ,a__ : Union[str, torch.device] = None ,a__ : Optional[int] = None ,):
a__ = num_inference_steps
a__ = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
a__ = np.linspace(0 ,num_train_timesteps - 1 ,a__ ,dtype=a__ )[::-1].copy()
elif self.config.timestep_spacing == "leading":
a__ = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
a__ = (np.arange(0 ,a__ ) * step_ratio).round()[::-1].copy().astype(a__ )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
a__ = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
a__ = (np.arange(a__ ,0 ,-step_ratio )).round().copy().astype(a__ )
timesteps -= 1
else:
raise ValueError(
f'{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.' )
a__ = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
a__ = torch.from_numpy(np.log(a__ ) ).to(a__ )
a__ = np.interp(a__ ,np.arange(0 ,len(a__ ) ) ,a__ )
a__ = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
a__ = torch.from_numpy(a__ ).to(device=a__ )
# interpolate sigmas
a__ = sigmas.log().lerp(sigmas.roll(1 ).log() ,0.5 ).exp()
a__ = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] )
a__ = torch.cat(
[sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] )
if str(a__ ).startswith("mps" ):
# mps does not support float64
a__ = torch.from_numpy(a__ ).to(a__ ,dtype=torch.floataa )
else:
a__ = torch.from_numpy(a__ ).to(a__ )
# interpolate timesteps
a__ = self.sigma_to_t(a__ ).to(a__ ,dtype=timesteps.dtype )
a__ = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) ,dim=-1 ).flatten()
a__ = torch.cat([timesteps[:1], interleaved_timesteps] )
a__ = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
a__ = defaultdict(a__ )
def lowerCAmelCase_ ( self : Dict ,a__ : Any ):
# get log sigma
a__ = sigma.log()
# get distribution
a__ = log_sigma - self.log_sigmas[:, None]
# get sigmas range
a__ = dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 )
a__ = low_idx + 1
a__ = self.log_sigmas[low_idx]
a__ = self.log_sigmas[high_idx]
# interpolate sigmas
a__ = (low - log_sigma) / (low - high)
a__ = w.clamp(0 ,1 )
# transform interpolation to time range
a__ = (1 - w) * low_idx + w * high_idx
a__ = t.view(sigma.shape )
return t
@property
def lowerCAmelCase_ ( self : Tuple ):
return self.sample is None
def lowerCAmelCase_ ( self : Any ,a__ : Union[torch.FloatTensor, np.ndarray] ,a__ : Union[float, torch.FloatTensor] ,a__ : Union[torch.FloatTensor, np.ndarray] ,a__ : bool = True ,):
a__ = self.index_for_timestep(a__ )
# advance index counter by 1
a__ = timestep.cpu().item() if torch.is_tensor(a__ ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
a__ = self.sigmas[step_index]
a__ = self.sigmas_interpol[step_index + 1]
a__ = self.sigmas[step_index + 1]
else:
# 2nd order / KDPM2's method
a__ = self.sigmas[step_index - 1]
a__ = self.sigmas_interpol[step_index]
a__ = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
a__ = 0
a__ = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
a__ = sigma_hat if self.state_in_first_order else sigma_interpol
a__ = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
a__ = sigma_hat if self.state_in_first_order else sigma_interpol
a__ = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
raise NotImplementedError("prediction_type not implemented yet: sample" )
else:
raise ValueError(
f'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`' )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
a__ = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
a__ = sigma_interpol - sigma_hat
# store for 2nd order step
a__ = sample
else:
# DPM-Solver-2
# 2. Convert to an ODE derivative for 2nd order
a__ = (sample - pred_original_sample) / sigma_interpol
# 3. delta timestep
a__ = sigma_next - sigma_hat
a__ = self.sample
a__ = None
a__ = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=a__ )
def lowerCAmelCase_ ( self : str ,a__ : torch.FloatTensor ,a__ : torch.FloatTensor ,a__ : torch.FloatTensor ,):
# Make sure sigmas and timesteps have the same device and dtype as original_samples
a__ = self.sigmas.to(device=original_samples.device ,dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(a__ ):
# mps does not support float64
a__ = self.timesteps.to(original_samples.device ,dtype=torch.floataa )
a__ = timesteps.to(original_samples.device ,dtype=torch.floataa )
else:
a__ = self.timesteps.to(original_samples.device )
a__ = timesteps.to(original_samples.device )
a__ = [self.index_for_timestep(a__ ,a__ ) for t in timesteps]
a__ = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
a__ = sigma.unsqueeze(-1 )
a__ = original_samples + noise * sigma
return noisy_samples
def __len__( self : str ):
return self.config.num_train_timesteps
| 331
| 1
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..utils import cached_file
# docstyle-ignore
a_ = """
Human: <<task>>
Assistant: """
a_ = """huggingface-tools/default-prompts"""
a_ = {"""chat""": """chat_prompt_template.txt""", """run""": """run_prompt_template.txt"""}
def a__ ( _UpperCamelCase : Optional[Any] ,_UpperCamelCase : Optional[int] ,_UpperCamelCase : Any="run" ):
if prompt_or_repo_id is None:
__lowerCamelCase = DEFAULT_PROMPTS_REPO
# prompt is considered a repo ID when it does not contain any kind of space
if re.search('''\\s''' ,_UpperCamelCase ) is not None:
return prompt_or_repo_id
__lowerCamelCase = cached_file(
_UpperCamelCase ,PROMPT_FILES[mode] ,repo_type='''dataset''' ,user_agent={'''agent''': agent_name} )
with open(_UpperCamelCase ,'''r''' ,encoding='''utf-8''' ) as f:
return f.read()
| 709
|
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
a_ = False
class __lowerCAmelCase ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
def lowerCamelCase ( self ):
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
__lowerCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
__lowerCamelCase = torch.manual_seed(0 )
__lowerCamelCase = pipe.dual_guided(
prompt='''first prompt''' , image=__UpperCAmelCase , text_to_image_strength=0.75 , generator=__UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(__UpperCAmelCase )
__lowerCamelCase = VersatileDiffusionPipeline.from_pretrained(__UpperCAmelCase , torch_dtype=torch.floataa )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
__lowerCamelCase = generator.manual_seed(0 )
__lowerCamelCase = pipe.dual_guided(
prompt='''first prompt''' , image=__UpperCAmelCase , text_to_image_strength=0.75 , generator=__UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
__lowerCamelCase = '''cyberpunk 2077'''
__lowerCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
__lowerCamelCase = torch.manual_seed(0 )
__lowerCamelCase = pipe.dual_guided(
prompt=__UpperCAmelCase , image=__UpperCAmelCase , text_to_image_strength=0.75 , generator=__UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' , ).images
__lowerCamelCase = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
__lowerCamelCase = np.array([0.1_448, 0.1_619, 0.1_741, 0.1_086, 0.1_147, 0.1_128, 0.1_199, 0.1_165, 0.1_001] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
__lowerCamelCase = '''A painting of a squirrel eating a burger '''
__lowerCamelCase = torch.manual_seed(0 )
__lowerCamelCase = pipe.text_to_image(
prompt=__UpperCAmelCase , generator=__UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' ).images
__lowerCamelCase = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
__lowerCamelCase = np.array([0.3_367, 0.3_169, 0.2_656, 0.3_870, 0.4_790, 0.3_796, 0.4_009, 0.4_878, 0.4_778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
__lowerCamelCase = pipe.image_variation(__UpperCAmelCase , generator=__UpperCAmelCase , output_type='''numpy''' ).images
__lowerCamelCase = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
__lowerCamelCase = np.array([0.3_076, 0.3_123, 0.3_284, 0.3_782, 0.3_770, 0.3_894, 0.4_297, 0.4_331, 0.4_456] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
| 622
| 0
|
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __init__(self : Tuple , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Tuple=7 , UpperCAmelCase_ : int=3 , UpperCAmelCase_ : int=18 , UpperCAmelCase_ : Union[str, Any]=30 , UpperCAmelCase_ : List[Any]=400 , UpperCAmelCase_ : Optional[Any]=True , UpperCAmelCase_ : List[Any]=None , UpperCAmelCase_ : List[Any]=True , UpperCAmelCase_ : Union[str, Any]=[0.5, 0.5, 0.5] , UpperCAmelCase_ : Optional[Any]=[0.5, 0.5, 0.5] , ) ->Tuple:
'''simple docstring'''
lowerCamelCase__: int =size if size is not None else {"height": 18, "width": 18}
lowerCamelCase__: int =parent
lowerCamelCase__: Optional[Any] =batch_size
lowerCamelCase__: List[Any] =num_channels
lowerCamelCase__: Optional[Any] =image_size
lowerCamelCase__: List[Any] =min_resolution
lowerCamelCase__: Any =max_resolution
lowerCamelCase__: Optional[int] =do_resize
lowerCamelCase__: Optional[int] =size
lowerCamelCase__: Any =do_normalize
lowerCamelCase__: str =image_mean
lowerCamelCase__: Dict =image_std
def SCREAMING_SNAKE_CASE_ (self : Optional[int]) ->List[Any]:
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class _SCREAMING_SNAKE_CASE ( __snake_case , unittest.TestCase ):
'''simple docstring'''
lowercase_ = DPTImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->str:
'''simple docstring'''
lowerCamelCase__: str =DPTImageProcessingTester(self)
@property
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->List[Any]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->Any:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] =self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(lowerCAmelCase_ , "image_mean"))
self.assertTrue(hasattr(lowerCAmelCase_ , "image_std"))
self.assertTrue(hasattr(lowerCAmelCase_ , "do_normalize"))
self.assertTrue(hasattr(lowerCAmelCase_ , "do_resize"))
self.assertTrue(hasattr(lowerCAmelCase_ , "size"))
def SCREAMING_SNAKE_CASE_ (self : Any) ->Any:
'''simple docstring'''
lowerCamelCase__: Tuple =self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {"height": 18, "width": 18})
lowerCamelCase__: str =self.image_processing_class.from_dict(self.image_processor_dict , size=42)
self.assertEqual(image_processor.size , {"height": 42, "width": 42})
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->List[str]:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] =self.image_processing_class(**self.image_processor_dict)
# create random PIL images
lowerCamelCase__: Optional[Any] =prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase_)
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase_ , Image.Image)
# Test not batched input
lowerCamelCase__: Optional[Any] =image_processing(image_inputs[0] , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
lowerCamelCase__: Any =image_processing(lowerCAmelCase_ , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Any:
'''simple docstring'''
lowerCamelCase__: Optional[Any] =self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
lowerCamelCase__: Dict =prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase_ , numpify=lowerCAmelCase_)
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase_ , np.ndarray)
# Test not batched input
lowerCamelCase__: Tuple =image_processing(image_inputs[0] , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
lowerCamelCase__: str =image_processing(lowerCAmelCase_ , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->Any:
'''simple docstring'''
lowerCamelCase__: Any =self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
lowerCamelCase__: str =prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase_ , torchify=lowerCAmelCase_)
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase_ , torch.Tensor)
# Test not batched input
lowerCamelCase__: Any =image_processing(image_inputs[0] , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
lowerCamelCase__: Union[str, Any] =image_processing(lowerCAmelCase_ , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
| 59
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_lowerCamelCase : Dict = {
'configuration_bloom': ['BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BloomConfig', 'BloomOnnxConfig'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : List[Any] = ['BloomTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Optional[Any] = [
'BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST',
'BloomForCausalLM',
'BloomModel',
'BloomPreTrainedModel',
'BloomForSequenceClassification',
'BloomForTokenClassification',
'BloomForQuestionAnswering',
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
_lowerCamelCase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 121
| 0
|
"""simple docstring"""
# flake8: noqa
# Lint as: python3
UpperCAmelCase_ : Tuple = [
'''VerificationMode''',
'''Version''',
'''disable_progress_bar''',
'''enable_progress_bar''',
'''is_progress_bar_enabled''',
'''experimental''',
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental
| 165
|
"""simple docstring"""
import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
UpperCAmelCase_ : List[str] = '''0.12''' # assumed parallelism: 8
if is_torch_available():
import torch
def _lowerCAmelCase(a : Tuple , a : int , a : Any=None ) -> Any:
if rng is None:
_SCREAMING_SNAKE_CASE =random.Random()
_SCREAMING_SNAKE_CASE =1
for dim in shape:
total_dims *= dim
_SCREAMING_SNAKE_CASE =[]
for _ in range(a ):
values.append(rng.randint(0 , vocab_size - 1 ) )
_SCREAMING_SNAKE_CASE =np.array(a , dtype=jnp.intaa ).reshape(a )
return output
def _lowerCAmelCase(a : Union[str, Any] , a : List[str]=None ) -> int:
_SCREAMING_SNAKE_CASE =ids_tensor(a , vocab_size=2 , rng=a )
# make sure that at least one token is attended to for each batch
_SCREAMING_SNAKE_CASE =1
return attn_mask
@require_flax
class __UpperCAmelCase :
'''simple docstring'''
lowercase : Union[str, Any] = None
lowercase : str = ()
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
_SCREAMING_SNAKE_CASE =2
_SCREAMING_SNAKE_CASE =inputs['''input_ids'''].shape[-1] // 2
_SCREAMING_SNAKE_CASE =inputs['''input_ids'''][:max_batch_size, :sequence_length]
_SCREAMING_SNAKE_CASE =jnp.ones_like(_A )
_SCREAMING_SNAKE_CASE =attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
_SCREAMING_SNAKE_CASE =input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
_SCREAMING_SNAKE_CASE =config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self._get_input_ids_and_config()
_SCREAMING_SNAKE_CASE =False
_SCREAMING_SNAKE_CASE =max_length
_SCREAMING_SNAKE_CASE =0
for model_class in self.all_generative_model_classes:
_SCREAMING_SNAKE_CASE =model_class(_A )
_SCREAMING_SNAKE_CASE =model_class.__name__[4:] # Skip the "Flax" at the beginning
_SCREAMING_SNAKE_CASE =getattr(_A , _A )
_SCREAMING_SNAKE_CASE =pt_model_class(_A ).eval()
_SCREAMING_SNAKE_CASE =load_flax_weights_in_pytorch_model(_A , flax_model.params )
_SCREAMING_SNAKE_CASE =flax_model.generate(_A ).sequences
_SCREAMING_SNAKE_CASE =pt_model.generate(torch.tensor(_A , dtype=torch.long ) )
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
_SCREAMING_SNAKE_CASE =flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist() , flax_generation_outputs.tolist() )
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self._get_input_ids_and_config()
_SCREAMING_SNAKE_CASE =False
_SCREAMING_SNAKE_CASE =max_length
for model_class in self.all_generative_model_classes:
_SCREAMING_SNAKE_CASE =model_class(_A )
_SCREAMING_SNAKE_CASE =model.generate(_A ).sequences
self.assertEqual(generation_outputs.shape[-1] , _A )
_SCREAMING_SNAKE_CASE =jit(model.generate )
_SCREAMING_SNAKE_CASE =jit_generate(_A ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self._get_input_ids_and_config()
_SCREAMING_SNAKE_CASE =True
_SCREAMING_SNAKE_CASE =max_length
for model_class in self.all_generative_model_classes:
_SCREAMING_SNAKE_CASE =model_class(_A )
_SCREAMING_SNAKE_CASE =model.generate(_A ).sequences
self.assertEqual(generation_outputs.shape[-1] , _A )
_SCREAMING_SNAKE_CASE =jit(model.generate )
_SCREAMING_SNAKE_CASE =jit_generate(_A ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self._get_input_ids_and_config()
_SCREAMING_SNAKE_CASE =False
_SCREAMING_SNAKE_CASE =max_length
_SCREAMING_SNAKE_CASE =2
for model_class in self.all_generative_model_classes:
_SCREAMING_SNAKE_CASE =model_class(_A )
_SCREAMING_SNAKE_CASE =model.generate(_A ).sequences
self.assertEqual(generation_outputs.shape[-1] , _A )
_SCREAMING_SNAKE_CASE =jit(model.generate )
_SCREAMING_SNAKE_CASE =jit_generate(_A ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self._get_input_ids_and_config()
_SCREAMING_SNAKE_CASE =False
_SCREAMING_SNAKE_CASE =max_length
_SCREAMING_SNAKE_CASE =2
_SCREAMING_SNAKE_CASE =2
for model_class in self.all_generative_model_classes:
_SCREAMING_SNAKE_CASE =model_class(_A )
_SCREAMING_SNAKE_CASE =model.generate(_A ).sequences
self.assertEqual(generation_outputs.shape[0] , input_ids.shape[0] * config.num_return_sequences )
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self._get_input_ids_and_config()
_SCREAMING_SNAKE_CASE =True
_SCREAMING_SNAKE_CASE =max_length
_SCREAMING_SNAKE_CASE =0.8
_SCREAMING_SNAKE_CASE =1_0
_SCREAMING_SNAKE_CASE =0.3
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =8
_SCREAMING_SNAKE_CASE =9
for model_class in self.all_generative_model_classes:
_SCREAMING_SNAKE_CASE =model_class(_A )
_SCREAMING_SNAKE_CASE =model.generate(_A ).sequences
self.assertEqual(generation_outputs.shape[-1] , _A )
_SCREAMING_SNAKE_CASE =jit(model.generate )
_SCREAMING_SNAKE_CASE =jit_generate(_A ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self._get_input_ids_and_config()
_SCREAMING_SNAKE_CASE =max_length
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =8
_SCREAMING_SNAKE_CASE =9
for model_class in self.all_generative_model_classes:
_SCREAMING_SNAKE_CASE =model_class(_A )
_SCREAMING_SNAKE_CASE =model.generate(_A ).sequences
self.assertEqual(generation_outputs.shape[-1] , _A )
_SCREAMING_SNAKE_CASE =jit(model.generate )
_SCREAMING_SNAKE_CASE =jit_generate(_A ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self._get_input_ids_and_config()
_SCREAMING_SNAKE_CASE =max_length
_SCREAMING_SNAKE_CASE =2
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =8
_SCREAMING_SNAKE_CASE =9
for model_class in self.all_generative_model_classes:
_SCREAMING_SNAKE_CASE =model_class(_A )
_SCREAMING_SNAKE_CASE =model.generate(_A ).sequences
self.assertEqual(generation_outputs.shape[-1] , _A )
_SCREAMING_SNAKE_CASE =jit(model.generate )
_SCREAMING_SNAKE_CASE =jit_generate(_A ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self._get_input_ids_and_config()
# pad attention mask on the left
_SCREAMING_SNAKE_CASE =attention_mask.at[(0, 0)].set(0 )
_SCREAMING_SNAKE_CASE =False
_SCREAMING_SNAKE_CASE =max_length
for model_class in self.all_generative_model_classes:
_SCREAMING_SNAKE_CASE =model_class(_A )
_SCREAMING_SNAKE_CASE =model.generate(_A , attention_mask=_A ).sequences
self.assertEqual(generation_outputs.shape[-1] , _A )
_SCREAMING_SNAKE_CASE =jit(model.generate )
_SCREAMING_SNAKE_CASE =jit_generate(_A , attention_mask=_A ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self._get_input_ids_and_config()
# pad attention mask on the left
_SCREAMING_SNAKE_CASE =attention_mask.at[(0, 0)].set(0 )
_SCREAMING_SNAKE_CASE =True
_SCREAMING_SNAKE_CASE =max_length
for model_class in self.all_generative_model_classes:
_SCREAMING_SNAKE_CASE =model_class(_A )
_SCREAMING_SNAKE_CASE =model.generate(_A , attention_mask=_A ).sequences
self.assertEqual(generation_outputs.shape[-1] , _A )
_SCREAMING_SNAKE_CASE =jit(model.generate )
_SCREAMING_SNAKE_CASE =jit_generate(_A , attention_mask=_A ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self._get_input_ids_and_config()
# pad attention mask on the left
_SCREAMING_SNAKE_CASE =attention_mask.at[(0, 0)].set(0 )
_SCREAMING_SNAKE_CASE =2
_SCREAMING_SNAKE_CASE =max_length
for model_class in self.all_generative_model_classes:
_SCREAMING_SNAKE_CASE =model_class(_A )
_SCREAMING_SNAKE_CASE =model.generate(_A , attention_mask=_A ).sequences
self.assertEqual(generation_outputs.shape[-1] , _A )
_SCREAMING_SNAKE_CASE =jit(model.generate )
_SCREAMING_SNAKE_CASE =jit_generate(_A , attention_mask=_A ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
@require_flax
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-bert''' )
_SCREAMING_SNAKE_CASE =FlaxAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
_SCREAMING_SNAKE_CASE ='''Hello world'''
_SCREAMING_SNAKE_CASE =tokenizer(_A , return_tensors='''np''' ).input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(_A , '''do_samples''' ):
model.generate(_A , do_samples=_A )
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(_A , '''foo''' ):
_SCREAMING_SNAKE_CASE ={'''foo''': '''bar'''}
model.generate(_A , **_A )
| 165
| 1
|
'''simple docstring'''
def _UpperCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
return number & 1 == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 436
|
'''simple docstring'''
import unittest
from transformers import MPNetConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
class SCREAMING_SNAKE_CASE__ :
def __init__( self : str , a_ : Any , a_ : Union[str, Any]=13 , a_ : Any=7 , a_ : Any=True , a_ : Dict=True , a_ : Union[str, Any]=False , a_ : Tuple=True , a_ : str=99 , a_ : Tuple=64 , a_ : Tuple=5 , a_ : Union[str, Any]=4 , a_ : Dict=64 , a_ : Union[str, Any]="gelu" , a_ : Dict=0.1 , a_ : List[str]=0.1 , a_ : Dict=512 , a_ : Tuple=16 , a_ : str=2 , a_ : Any=0.02 , a_ : List[Any]=3 , a_ : Tuple=4 , a_ : Optional[int]=None , ):
"""simple docstring"""
__snake_case = parent
__snake_case = batch_size
__snake_case = seq_length
__snake_case = is_training
__snake_case = use_input_mask
__snake_case = use_token_type_ids
__snake_case = use_labels
__snake_case = vocab_size
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = max_position_embeddings
__snake_case = type_vocab_size
__snake_case = type_sequence_label_size
__snake_case = initializer_range
__snake_case = num_labels
__snake_case = num_choices
__snake_case = scope
def A ( self : int ):
"""simple docstring"""
return MPNetConfig.from_pretrained("microsoft/mpnet-base" )
def A ( self : str ):
"""simple docstring"""
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case = None
if self.use_input_mask:
__snake_case = random_attention_mask([self.batch_size, self.seq_length] )
__snake_case = None
__snake_case = None
__snake_case = None
if self.use_labels:
__snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__snake_case = ids_tensor([self.batch_size] , self.num_choices )
__snake_case = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def A ( self : List[str] ):
"""simple docstring"""
return MPNetConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def A ( self : Tuple , a_ : int , a_ : str , a_ : Optional[int] , a_ : List[Any] , a_ : str , a_ : Optional[Any] ):
"""simple docstring"""
__snake_case = MPNetModel(config=a_ )
model.to(a_ )
model.eval()
__snake_case = model(a_ , a_ )
__snake_case = model(a_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def A ( self : Any , a_ : int , a_ : Tuple , a_ : str , a_ : int , a_ : str , a_ : List[Any] ):
"""simple docstring"""
__snake_case = MPNetForQuestionAnswering(config=a_ )
model.to(a_ )
model.eval()
__snake_case = model(
a_ , attention_mask=a_ , start_positions=a_ , end_positions=a_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A ( self : Any , a_ : Any , a_ : int , a_ : Union[str, Any] , a_ : Dict , a_ : Optional[Any] , a_ : Any ):
"""simple docstring"""
__snake_case = self.num_labels
__snake_case = MPNetForSequenceClassification(a_ )
model.to(a_ )
model.eval()
__snake_case = model(a_ , attention_mask=a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A ( self : Optional[Any] , a_ : Any , a_ : Union[str, Any] , a_ : Union[str, Any] , a_ : Union[str, Any] , a_ : List[Any] , a_ : List[Any] ):
"""simple docstring"""
__snake_case = self.num_choices
__snake_case = MPNetForMultipleChoice(config=a_ )
model.to(a_ )
model.eval()
__snake_case = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__snake_case = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__snake_case = model(
a_ , attention_mask=a_ , labels=a_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A ( self : Dict , a_ : List[str] , a_ : str , a_ : Union[str, Any] , a_ : str , a_ : Optional[int] , a_ : Optional[Any] ):
"""simple docstring"""
__snake_case = self.num_labels
__snake_case = MPNetForTokenClassification(config=a_ )
model.to(a_ )
model.eval()
__snake_case = model(a_ , attention_mask=a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A ( self : List[Any] ):
"""simple docstring"""
__snake_case = self.prepare_config_and_inputs()
((__snake_case) , (__snake_case) , (__snake_case) , (__snake_case) , (__snake_case) , (__snake_case)) = config_and_inputs
__snake_case = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE = (
(
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
if is_torch_available()
else ()
)
__SCREAMING_SNAKE_CASE = (
{
"""feature-extraction""": MPNetModel,
"""fill-mask""": MPNetForMaskedLM,
"""question-answering""": MPNetForQuestionAnswering,
"""text-classification""": MPNetForSequenceClassification,
"""token-classification""": MPNetForTokenClassification,
"""zero-shot""": MPNetForSequenceClassification,
}
if is_torch_available()
else {}
)
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = True
def A ( self : List[Any] ):
"""simple docstring"""
__snake_case = MPNetModelTester(self )
__snake_case = ConfigTester(self , config_class=a_ , hidden_size=37 )
def A ( self : List[Any] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def A ( self : List[Any] ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_model(*a_ )
def A ( self : Dict ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_sequence_classification(*a_ )
def A ( self : List[Any] ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_multiple_choice(*a_ )
def A ( self : int ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_token_classification(*a_ )
def A ( self : Union[str, Any] ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_question_answering(*a_ )
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@slow
def A ( self : Optional[Any] ):
"""simple docstring"""
__snake_case = MPNetModel.from_pretrained("microsoft/mpnet-base" )
__snake_case = torch.tensor([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] )
__snake_case = model(a_ )[0]
__snake_case = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , a_ )
__snake_case = torch.tensor(
[[[-0.0550, 0.1943, -0.0740], [-0.0562, 0.2211, -0.0579], [-0.0437, 0.3337, -0.0641]]] )
# compare the actual values for a slice.
self.assertTrue(torch.allclose(output[:, :3, :3] , a_ , atol=1e-4 ) )
| 69
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__ = logging.get_logger(__name__)
a__ = {'''openai-gpt''': '''https://huggingface.co/openai-gpt/resolve/main/config.json'''}
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : str = "openai-gpt"
UpperCAmelCase__ : List[str] = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self , _a=4_0_4_7_8 , _a=5_1_2 , _a=7_6_8 , _a=1_2 , _a=1_2 , _a="gelu" , _a=0.1 , _a=0.1 , _a=0.1 , _a=1e-5 , _a=0.02 , _a="cls_index" , _a=True , _a=None , _a=True , _a=0.1 , **_a , ) -> List[str]:
_a : int = vocab_size
_a : List[Any] = n_positions
_a : str = n_embd
_a : Optional[int] = n_layer
_a : Tuple = n_head
_a : List[str] = afn
_a : Union[str, Any] = resid_pdrop
_a : Tuple = embd_pdrop
_a : Optional[Any] = attn_pdrop
_a : Union[str, Any] = layer_norm_epsilon
_a : int = initializer_range
_a : int = summary_type
_a : Any = summary_use_proj
_a : Union[str, Any] = summary_activation
_a : Optional[Any] = summary_first_dropout
_a : Tuple = summary_proj_to_labels
super().__init__(**_a )
| 578
|
import re
from filelock import FileLock
try:
import nltk
a__ = True
except (ImportError, ModuleNotFoundError):
a__ = False
if NLTK_AVAILABLE:
with FileLock('''.lock''') as lock:
nltk.download('''punkt''', quiet=True)
def __UpperCAmelCase ( __a : str ) -> str:
"""simple docstring"""
re.sub('''<n>''' ,'''''' ,__a ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(__a ) )
| 578
| 1
|
"""simple docstring"""
def snake_case ( A__ ):
if not isinstance(A_ ,A_ ):
raise ValueError("check_bouncy() accepts only integer arguments" )
UpperCAmelCase_ : Any = str(A_ )
UpperCAmelCase_ : Optional[Any] = "".join(sorted(A_ ) )
return sorted_str_n != str_n and sorted_str_n[::-1] != str_n
def snake_case ( A__ = 99 ):
if not 0 < percent < 1_00:
raise ValueError("solution() only accepts values from 0 to 100" )
UpperCAmelCase_ : Optional[int] = 0
UpperCAmelCase_ : List[Any] = 1
while True:
if check_bouncy(A_ ):
bouncy_num += 1
if (bouncy_num / num) * 1_00 >= percent:
return num
num += 1
if __name__ == "__main__":
from doctest import testmod
testmod()
print(f'{solution(99)}')
| 95
|
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class _a ( unittest.TestCase):
"""simple docstring"""
def __init__( self: Dict , __lowerCamelCase: Any , __lowerCamelCase: Optional[int]=7 , __lowerCamelCase: Any=3 , __lowerCamelCase: List[str]=18 , __lowerCamelCase: List[Any]=30 , __lowerCamelCase: Tuple=400 , __lowerCamelCase: List[str]=True , __lowerCamelCase: Any=None , __lowerCamelCase: int=True , __lowerCamelCase: Any=None , __lowerCamelCase: Dict=True , __lowerCamelCase: List[Any]=[0.5, 0.5, 0.5] , __lowerCamelCase: Optional[Any]=[0.5, 0.5, 0.5] , __lowerCamelCase: int=False , ):
'''simple docstring'''
UpperCamelCase__: Optional[int] = size if size is not None else {"height": 20, "width": 20}
UpperCamelCase__: List[Any] = crop_size if crop_size is not None else {"height": 18, "width": 18}
UpperCamelCase__: Optional[int] = parent
UpperCamelCase__: int = batch_size
UpperCamelCase__: int = num_channels
UpperCamelCase__: str = image_size
UpperCamelCase__: Any = min_resolution
UpperCamelCase__: Union[str, Any] = max_resolution
UpperCamelCase__: Optional[Any] = do_resize
UpperCamelCase__: Any = size
UpperCamelCase__: str = do_center_crop
UpperCamelCase__: Any = crop_size
UpperCamelCase__: Any = do_normalize
UpperCamelCase__: int = image_mean
UpperCamelCase__: Tuple = image_std
UpperCamelCase__: int = do_reduce_labels
def UpperCAmelCase_ ( self: Tuple ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_reduce_labels": self.do_reduce_labels,
}
def lowerCAmelCase_ ( ):
UpperCamelCase__: Dict = load_dataset("hf-internal-testing/fixtures_ade20k" ,split="test")
UpperCamelCase__: Optional[Any] = Image.open(dataset[0]["file"])
UpperCamelCase__: str = Image.open(dataset[1]["file"])
return image, map
def lowerCAmelCase_ ( ):
UpperCamelCase__: Dict = load_dataset("hf-internal-testing/fixtures_ade20k" ,split="test")
UpperCamelCase__: int = Image.open(ds[0]["file"])
UpperCamelCase__: int = Image.open(ds[1]["file"])
UpperCamelCase__: List[str] = Image.open(ds[2]["file"])
UpperCamelCase__: List[Any] = Image.open(ds[3]["file"])
return [imagea, imagea], [mapa, mapa]
@require_torch
@require_vision
class _a ( UpperCamelCase__ , unittest.TestCase):
"""simple docstring"""
UpperCamelCase__ = BeitImageProcessor if is_vision_available() else None
def UpperCAmelCase_ ( self: Optional[int] ):
'''simple docstring'''
UpperCamelCase__: str = BeitImageProcessingTester(self )
@property
def UpperCAmelCase_ ( self: Any ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase_ ( self: Dict ):
'''simple docstring'''
UpperCamelCase__: int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowerCamelCase , "do_resize" ) )
self.assertTrue(hasattr(__lowerCamelCase , "size" ) )
self.assertTrue(hasattr(__lowerCamelCase , "do_center_crop" ) )
self.assertTrue(hasattr(__lowerCamelCase , "center_crop" ) )
self.assertTrue(hasattr(__lowerCamelCase , "do_normalize" ) )
self.assertTrue(hasattr(__lowerCamelCase , "image_mean" ) )
self.assertTrue(hasattr(__lowerCamelCase , "image_std" ) )
def UpperCAmelCase_ ( self: Optional[int] ):
'''simple docstring'''
UpperCamelCase__: Tuple = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 20, "width": 20} )
self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18} )
self.assertEqual(image_processor.do_reduce_labels , __lowerCamelCase )
UpperCamelCase__: int = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , crop_size=84 , reduce_labels=__lowerCamelCase )
self.assertEqual(image_processor.size , {"height": 42, "width": 42} )
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} )
self.assertEqual(image_processor.do_reduce_labels , __lowerCamelCase )
def UpperCAmelCase_ ( self: Tuple ):
'''simple docstring'''
pass
def UpperCAmelCase_ ( self: Dict ):
'''simple docstring'''
UpperCamelCase__: Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase__: Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , Image.Image )
# Test not batched input
UpperCamelCase__: List[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
UpperCamelCase__: str = image_processing(__lowerCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def UpperCAmelCase_ ( self: Dict ):
'''simple docstring'''
UpperCamelCase__: Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase__: List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , numpify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , np.ndarray )
# Test not batched input
UpperCamelCase__: Union[str, Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
UpperCamelCase__: Dict = image_processing(__lowerCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def UpperCAmelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
UpperCamelCase__: int = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase__: int = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , torchify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , torch.Tensor )
# Test not batched input
UpperCamelCase__: Dict = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
UpperCamelCase__: str = image_processing(__lowerCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def UpperCAmelCase_ ( self: Tuple ):
'''simple docstring'''
UpperCamelCase__: Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase__: Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , torchify=__lowerCamelCase )
UpperCamelCase__: str = []
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , torch.Tensor )
maps.append(torch.zeros(image.shape[-2:] ).long() )
# Test not batched input
UpperCamelCase__: Dict = image_processing(image_inputs[0] , maps[0] , return_tensors="pt" )
self.assertEqual(
encoding["pixel_values"].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(
encoding["labels"].shape , (
1,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(encoding["labels"].dtype , torch.long )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 255 )
# Test batched
UpperCamelCase__: Any = image_processing(__lowerCamelCase , __lowerCamelCase , return_tensors="pt" )
self.assertEqual(
encoding["pixel_values"].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(
encoding["labels"].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(encoding["labels"].dtype , torch.long )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 255 )
# Test not batched input (PIL images)
UpperCamelCase__ , UpperCamelCase__: str = prepare_semantic_single_inputs()
UpperCamelCase__: Any = image_processing(__lowerCamelCase , __lowerCamelCase , return_tensors="pt" )
self.assertEqual(
encoding["pixel_values"].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(
encoding["labels"].shape , (
1,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(encoding["labels"].dtype , torch.long )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 255 )
# Test batched input (PIL images)
UpperCamelCase__ , UpperCamelCase__: List[str] = prepare_semantic_batch_inputs()
UpperCamelCase__: Optional[int] = image_processing(__lowerCamelCase , __lowerCamelCase , return_tensors="pt" )
self.assertEqual(
encoding["pixel_values"].shape , (
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(
encoding["labels"].shape , (
2,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
self.assertEqual(encoding["labels"].dtype , torch.long )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 255 )
def UpperCAmelCase_ ( self: Any ):
'''simple docstring'''
UpperCamelCase__: Dict = self.image_processing_class(**self.image_processor_dict )
# ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150
UpperCamelCase__ , UpperCamelCase__: Any = prepare_semantic_single_inputs()
UpperCamelCase__: int = image_processing(__lowerCamelCase , __lowerCamelCase , return_tensors="pt" )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 150 )
UpperCamelCase__: List[Any] = True
UpperCamelCase__: List[str] = image_processing(__lowerCamelCase , __lowerCamelCase , return_tensors="pt" )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 255 )
| 380
| 0
|
'''simple docstring'''
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class lowercase ( A__ , A__ ):
"""simple docstring"""
@register_to_config
def __init__( self , UpperCamelCase_ = 768 , ):
'''simple docstring'''
super().__init__()
UpperCamelCase__ :Union[str, Any] = nn.Parameter(torch.zeros(1 , UpperCamelCase_ ) )
UpperCamelCase__ :Tuple = nn.Parameter(torch.ones(1 , UpperCamelCase_ ) )
def lowerCAmelCase__ ( self , UpperCamelCase_ = None , UpperCamelCase_ = None , ):
'''simple docstring'''
UpperCamelCase__ :str = nn.Parameter(self.mean.to(UpperCamelCase_ ).to(UpperCamelCase_ ) )
UpperCamelCase__ :Any = nn.Parameter(self.std.to(UpperCamelCase_ ).to(UpperCamelCase_ ) )
return self
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :Dict = (embeds - self.mean) * 1.0 / self.std
return embeds
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :List[str] = (embeds * self.std) + self.mean
return embeds
| 280
|
'''simple docstring'''
from .integrations import (
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_wandb_available,
run_hp_search_optuna,
run_hp_search_ray,
run_hp_search_sigopt,
run_hp_search_wandb,
)
from .trainer_utils import (
HPSearchBackend,
default_hp_space_optuna,
default_hp_space_ray,
default_hp_space_sigopt,
default_hp_space_wandb,
)
from .utils import logging
__snake_case = logging.get_logger(__name__)
class lowercase :
"""simple docstring"""
_a = 42
_a = None
@staticmethod
def lowerCAmelCase__ ( ):
'''simple docstring'''
raise NotImplementedError
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ ):
'''simple docstring'''
raise NotImplementedError
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
raise NotImplementedError
def lowerCAmelCase__ ( self ):
'''simple docstring'''
if not self.is_available():
raise RuntimeError(
F'''You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}.''' )
@classmethod
def lowerCAmelCase__ ( cls ):
'''simple docstring'''
return F'''`pip install {cls.pip_package or cls.name}`'''
class lowercase ( A__ ):
"""simple docstring"""
_a = 'optuna'
@staticmethod
def lowerCAmelCase__ ( ):
'''simple docstring'''
return is_optuna_available()
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ ):
'''simple docstring'''
return run_hp_search_optuna(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ )
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
return default_hp_space_optuna(UpperCamelCase_ )
class lowercase ( A__ ):
"""simple docstring"""
_a = 'ray'
_a = '\'ray[tune]\''
@staticmethod
def lowerCAmelCase__ ( ):
'''simple docstring'''
return is_ray_available()
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ ):
'''simple docstring'''
return run_hp_search_ray(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ )
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
return default_hp_space_ray(UpperCamelCase_ )
class lowercase ( A__ ):
"""simple docstring"""
_a = 'sigopt'
@staticmethod
def lowerCAmelCase__ ( ):
'''simple docstring'''
return is_sigopt_available()
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ ):
'''simple docstring'''
return run_hp_search_sigopt(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ )
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
return default_hp_space_sigopt(UpperCamelCase_ )
class lowercase ( A__ ):
"""simple docstring"""
_a = 'wandb'
@staticmethod
def lowerCAmelCase__ ( ):
'''simple docstring'''
return is_wandb_available()
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ ):
'''simple docstring'''
return run_hp_search_wandb(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ )
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
return default_hp_space_wandb(UpperCamelCase_ )
__snake_case = {
HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend]
}
def a ( ) -> str:
'''simple docstring'''
UpperCamelCase__ :List[Any] = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()]
if len(__a ) > 0:
UpperCamelCase__ :Tuple = available_backends[0].name
if len(__a ) > 1:
logger.info(
f'''{len(__a )} hyperparameter search backends available. Using {name} as the default.''' )
return name
raise RuntimeError(
'''No hyperparameter search backend available.\n'''
+ '''\n'''.join(
f''' - To install {backend.name} run {backend.pip_install()}'''
for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
| 280
| 1
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
'''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/config.json''',
'''umberto-commoncrawl-cased-v1''': (
'''https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json'''
),
'''umberto-wikipedia-uncased-v1''': (
'''https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json'''
),
}
class a ( __lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase :Dict = '''camembert'''
def __init__( self , lowerCAmelCase_=3_05_22 , lowerCAmelCase_=7_68 , lowerCAmelCase_=12 , lowerCAmelCase_=12 , lowerCAmelCase_=30_72 , lowerCAmelCase_="gelu" , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.1 , lowerCAmelCase_=5_12 , lowerCAmelCase_=2 , lowerCAmelCase_=0.02 , lowerCAmelCase_=1E-12 , lowerCAmelCase_=1 , lowerCAmelCase_=0 , lowerCAmelCase_=2 , lowerCAmelCase_="absolute" , lowerCAmelCase_=True , lowerCAmelCase_=None , **lowerCAmelCase_ , ) -> Optional[Any]:
super().__init__(pad_token_id=lowerCAmelCase_ , bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , **lowerCAmelCase_ )
_A = vocab_size
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = hidden_act
_A = intermediate_size
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = max_position_embeddings
_A = type_vocab_size
_A = initializer_range
_A = layer_norm_eps
_A = position_embedding_type
_A = use_cache
_A = classifier_dropout
class a ( __lowerCAmelCase ):
"""simple docstring"""
@property
def UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_A = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
_A = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 401
|
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self : Dict , lowerCamelCase : List[str] , lowerCamelCase : Optional[Any]=13 , lowerCamelCase : Dict=32 , lowerCamelCase : Optional[int]=3 , lowerCamelCase : List[Any]=4 , lowerCamelCase : Tuple=[10, 20, 30, 40] , lowerCamelCase : str=[2, 2, 3, 2] , lowerCamelCase : Optional[int]=True , lowerCamelCase : List[Any]=True , lowerCamelCase : Union[str, Any]=37 , lowerCamelCase : Any="gelu" , lowerCamelCase : Tuple=10 , lowerCamelCase : Optional[Any]=0.02 , lowerCamelCase : Union[str, Any]=["stage2", "stage3", "stage4"] , lowerCamelCase : Any=[2, 3, 4] , lowerCamelCase : Tuple=None , ) -> int:
"""simple docstring"""
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = image_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = num_stages
_UpperCAmelCase = hidden_sizes
_UpperCAmelCase = depths
_UpperCAmelCase = is_training
_UpperCAmelCase = use_labels
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = num_labels
_UpperCAmelCase = initializer_range
_UpperCAmelCase = out_features
_UpperCAmelCase = out_indices
_UpperCAmelCase = scope
def lowerCamelCase ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.num_labels )
_UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def lowerCamelCase ( self : Optional[Any] ) -> Any:
"""simple docstring"""
return ConvNextConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=lowerCamelCase , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def lowerCamelCase ( self : Union[str, Any] , lowerCamelCase : List[str] , lowerCamelCase : Optional[int] , lowerCamelCase : Any ) -> str:
"""simple docstring"""
_UpperCAmelCase = ConvNextModel(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
_UpperCAmelCase = model(lowerCamelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def lowerCamelCase ( self : Dict , lowerCamelCase : Tuple , lowerCamelCase : Tuple , lowerCamelCase : List[Any] ) -> int:
"""simple docstring"""
_UpperCAmelCase = ConvNextForImageClassification(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
_UpperCAmelCase = model(lowerCamelCase , labels=lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase ( self : Dict , lowerCamelCase : List[Any] , lowerCamelCase : Tuple , lowerCamelCase : Dict ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = ConvNextBackbone(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
_UpperCAmelCase = model(lowerCamelCase )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
_UpperCAmelCase = None
_UpperCAmelCase = ConvNextBackbone(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
_UpperCAmelCase = model(lowerCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def lowerCamelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = config_and_inputs
_UpperCAmelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase = (
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
_lowerCamelCase = (
{'''feature-extraction''': ConvNextModel, '''image-classification''': ConvNextForImageClassification}
if is_torch_available()
else {}
)
_lowerCamelCase = True
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
def lowerCamelCase ( self : Dict ) -> int:
"""simple docstring"""
_UpperCAmelCase = ConvNextModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=lowerCamelCase , has_text_modality=lowerCamelCase , hidden_size=37 )
def lowerCamelCase ( self : int ) -> int:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCamelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
return
@unittest.skip(reason="""ConvNext does not use inputs_embeds""" )
def lowerCamelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
pass
@unittest.skip(reason="""ConvNext does not support input and output embeddings""" )
def lowerCamelCase ( self : int ) -> int:
"""simple docstring"""
pass
@unittest.skip(reason="""ConvNext does not use feedforward chunking""" )
def lowerCamelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
pass
def lowerCamelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(lowerCamelCase )
_UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase = [*signature.parameters.keys()]
_UpperCAmelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCamelCase )
def lowerCamelCase ( self : Tuple ) -> Any:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase )
def lowerCamelCase ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*lowerCamelCase )
def lowerCamelCase ( self : int ) -> Any:
"""simple docstring"""
def check_hidden_states_output(lowerCamelCase : List[Any] , lowerCamelCase : Tuple , lowerCamelCase : Optional[int] ):
_UpperCAmelCase = model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
with torch.no_grad():
_UpperCAmelCase = model(**self._prepare_for_class(lowerCamelCase , lowerCamelCase ) )
_UpperCAmelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_UpperCAmelCase = self.model_tester.num_stages
self.assertEqual(len(lowerCamelCase ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = True
check_hidden_states_output(lowerCamelCase , lowerCamelCase , lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCAmelCase = True
check_hidden_states_output(lowerCamelCase , lowerCamelCase , lowerCamelCase )
def lowerCamelCase ( self : Any ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase )
@slow
def lowerCamelCase ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = ConvNextModel.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
def _SCREAMING_SNAKE_CASE ( ) -> Tuple:
_UpperCAmelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCamelCase ( self : Dict ) -> int:
"""simple docstring"""
return AutoImageProcessor.from_pretrained("""facebook/convnext-tiny-224""" ) if is_vision_available() else None
@slow
def lowerCamelCase ( self : List[Any] ) -> int:
"""simple docstring"""
_UpperCAmelCase = ConvNextForImageClassification.from_pretrained("""facebook/convnext-tiny-224""" ).to(lowerCamelCase )
_UpperCAmelCase = self.default_image_processor
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(images=lowerCamelCase , return_tensors="""pt""" ).to(lowerCamelCase )
# forward pass
with torch.no_grad():
_UpperCAmelCase = model(**lowerCamelCase )
# verify the logits
_UpperCAmelCase = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , lowerCamelCase )
_UpperCAmelCase = torch.tensor([-0.0260, -0.4739, 0.1911] ).to(lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase , atol=1E-4 ) )
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase , UpperCAmelCase ):
'''simple docstring'''
_lowerCamelCase = (ConvNextBackbone,) if is_torch_available() else ()
_lowerCamelCase = ConvNextConfig
_lowerCamelCase = False
def lowerCamelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = ConvNextModelTester(self )
| 108
| 0
|
"""simple docstring"""
import importlib
import os
import sys
# This is required to make the module import works (when the python process is running from the root of the repo)
sys.path.append(".")
def lowercase__ ( lowercase_ ) -> int:
"""simple docstring"""
_UpperCamelCase : Any = test_file.split(os.path.sep )
if components[0:2] != ["tests", "models"]:
raise ValueError(
"`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got "
F'''{test_file} instead.''' )
_UpperCamelCase : str = components[-1]
if not test_fn.endswith("py" ):
raise ValueError(F'''`test_file` should be a python file. Got {test_fn} instead.''' )
if not test_fn.startswith("test_modeling_" ):
raise ValueError(
F'''`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.''' )
_UpperCamelCase : Dict = components[:-1] + [test_fn.replace(".py" ,"" )]
_UpperCamelCase : List[str] = ".".join(lowercase_ )
return test_module_path
def lowercase__ ( lowercase_ ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase : Optional[Any] = get_module_path(lowercase_ )
_UpperCamelCase : str = importlib.import_module(lowercase_ )
return test_module
def lowercase__ ( lowercase_ ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase : Union[str, Any] = []
_UpperCamelCase : List[Any] = get_test_module(lowercase_ )
for attr in dir(lowercase_ ):
if attr.endswith("ModelTester" ):
tester_classes.append(getattr(lowercase_ ,lowercase_ ) )
# sort with class names
return sorted(lowercase_ ,key=lambda lowercase_ : x.__name__ )
def lowercase__ ( lowercase_ ) -> Tuple:
"""simple docstring"""
_UpperCamelCase : Optional[Any] = []
_UpperCamelCase : Any = get_test_module(lowercase_ )
for attr in dir(lowercase_ ):
_UpperCamelCase : int = getattr(lowercase_ ,lowercase_ )
# (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking
# `all_model_classes` is not empty (which also excludes other special classes).
_UpperCamelCase : Optional[Any] = getattr(lowercase_ ,"all_model_classes" ,[] )
if len(lowercase_ ) > 0:
test_classes.append(lowercase_ )
# sort with class names
return sorted(lowercase_ ,key=lambda lowercase_ : x.__name__ )
def lowercase__ ( lowercase_ ) -> Any:
"""simple docstring"""
_UpperCamelCase : Dict = get_test_classes(lowercase_ )
_UpperCamelCase : int = set()
for test_class in test_classes:
model_classes.update(test_class.all_model_classes )
# sort with class names
return sorted(lowercase_ ,key=lambda lowercase_ : x.__name__ )
def lowercase__ ( lowercase_ ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase : List[str] = test_class()
if hasattr(lowercase_ ,"setUp" ):
test.setUp()
_UpperCamelCase : Tuple = None
if hasattr(lowercase_ ,"model_tester" ):
# `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case.
if test.model_tester is not None:
_UpperCamelCase : Tuple = test.model_tester.__class__
return model_tester
def lowercase__ ( lowercase_ ,lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase : str = get_test_classes(lowercase_ )
_UpperCamelCase : Dict = []
for test_class in test_classes:
if model_class in test_class.all_model_classes:
target_test_classes.append(lowercase_ )
# sort with class names
return sorted(lowercase_ ,key=lambda lowercase_ : x.__name__ )
def lowercase__ ( lowercase_ ,lowercase_ ) -> Dict:
"""simple docstring"""
_UpperCamelCase : Any = get_test_classes_for_model(lowercase_ ,lowercase_ )
_UpperCamelCase : List[Any] = []
for test_class in test_classes:
_UpperCamelCase : List[Any] = get_model_tester_from_test_class(lowercase_ )
if tester_class is not None:
tester_classes.append(lowercase_ )
# sort with class names
return sorted(lowercase_ ,key=lambda lowercase_ : x.__name__ )
def lowercase__ ( lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase : Any = get_test_classes(lowercase_ )
_UpperCamelCase : Tuple = {test_class: get_model_tester_from_test_class(lowercase_ ) for test_class in test_classes}
return test_tester_mapping
def lowercase__ ( lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase : List[Any] = get_model_classes(lowercase_ )
_UpperCamelCase : Optional[int] = {
model_class: get_test_classes_for_model(lowercase_ ,lowercase_ ) for model_class in model_classes
}
return model_test_mapping
def lowercase__ ( lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase : Optional[Any] = get_model_classes(lowercase_ )
_UpperCamelCase : Tuple = {
model_class: get_tester_classes_for_model(lowercase_ ,lowercase_ ) for model_class in model_classes
}
return model_to_tester_mapping
def lowercase__ ( lowercase_ ) -> Optional[int]:
"""simple docstring"""
if isinstance(lowercase_ ,lowercase_ ):
return o
elif isinstance(lowercase_ ,lowercase_ ):
return o.__name__
elif isinstance(lowercase_ ,(list, tuple) ):
return [to_json(lowercase_ ) for x in o]
elif isinstance(lowercase_ ,lowercase_ ):
return {to_json(lowercase_ ): to_json(lowercase_ ) for k, v in o.items()}
else:
return o
| 51
|
"""simple docstring"""
from datetime import datetime
import requests
from bsa import BeautifulSoup
if __name__ == "__main__":
lowerCamelCase__ = input("Enter image url: ").strip()
print(f"""Downloading image from {url} ...""")
lowerCamelCase__ = BeautifulSoup(requests.get(url).content, "html.parser")
# The image URL is in the content field of the first meta tag with property og:image
lowerCamelCase__ = soup.find("meta", {"property": "og:image"})["content"]
lowerCamelCase__ = requests.get(image_url).content
lowerCamelCase__ = f"""{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg"""
with open(file_name, "wb") as fp:
fp.write(image_data)
print(f"""Done. Image saved to disk as {file_name}.""")
| 51
| 1
|
from __future__ import annotations
a : List[Any] = '''#'''
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self ) -> None:
'''simple docstring'''
__lowercase = {}
def A ( self , snake_case_ ) -> None:
'''simple docstring'''
__lowercase = self._trie
for char in text:
if char not in trie:
__lowercase = {}
__lowercase = trie[char]
__lowercase = True
def A ( self , snake_case_ ) -> tuple | list:
'''simple docstring'''
__lowercase = self._trie
for char in prefix:
if char in trie:
__lowercase = trie[char]
else:
return []
return self._elements(snake_case_ )
def A ( self , snake_case_ ) -> tuple:
'''simple docstring'''
__lowercase = []
for c, v in d.items():
__lowercase = [''' '''] if c == END else [(c + s) for s in self._elements(snake_case_ )]
result.extend(snake_case_ )
return tuple(snake_case_ )
a : Dict = Trie()
a : Union[str, Any] = ('''depart''', '''detergent''', '''daring''', '''dog''', '''deer''', '''deal''')
for word in words:
trie.insert_word(word)
def lowercase_ ( _UpperCamelCase ):
'''simple docstring'''
__lowercase = trie.find_word(_UpperCamelCase )
return tuple(string + word for word in suffixes )
def lowercase_ ( ):
'''simple docstring'''
print(autocomplete_using_trie('''de''' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 639
|
a : Any = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(100000)]
def lowercase_ ( _UpperCamelCase ):
'''simple docstring'''
__lowercase = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 10_00_00]
number //= 10_00_00
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
a : list[bool | None] = [None] * 10000000
a : int = True
a : Any = False
def lowercase_ ( _UpperCamelCase ):
'''simple docstring'''
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
__lowercase = chain(next_number(_UpperCamelCase ) )
__lowercase = number_chain
while number < 10_00_00_00:
__lowercase = number_chain
number *= 10
return number_chain
def lowercase_ ( _UpperCamelCase = 10_00_00_00 ):
'''simple docstring'''
for i in range(1 , _UpperCamelCase ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(_UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f'''{solution() = }''')
| 639
| 1
|
"""simple docstring"""
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = ['''model.decoder.embed_positions.weights''']
def UpperCAmelCase ( snake_case : List[Any] ):
if "emb" in name:
_lowerCAmelCase:str = name.replace('''emb''' , '''model.decoder.embed_tokens''' )
if "transformer" in name:
_lowerCAmelCase:Optional[Any] = name.replace('''transformer''' , '''model.decoder''' )
if "cross_attention" in name:
_lowerCAmelCase:int = name.replace('''cross_attention''' , '''encoder_attn''' )
if "linear1" in name:
_lowerCAmelCase:List[Any] = name.replace('''linear1''' , '''fc1''' )
if "linear2" in name:
_lowerCAmelCase:Union[str, Any] = name.replace('''linear2''' , '''fc2''' )
if "norm1" in name:
_lowerCAmelCase:Tuple = name.replace('''norm1''' , '''self_attn_layer_norm''' )
if "norm_cross" in name:
_lowerCAmelCase:Any = name.replace('''norm_cross''' , '''encoder_attn_layer_norm''' )
if "norm2" in name:
_lowerCAmelCase:List[Any] = name.replace('''norm2''' , '''final_layer_norm''' )
if "out_norm" in name:
_lowerCAmelCase:Optional[int] = name.replace('''out_norm''' , '''model.decoder.layer_norm''' )
if "linears" in name:
_lowerCAmelCase:Tuple = name.replace('''linears''' , '''lm_heads''' )
if "condition_provider.conditioners.description.output_proj" in name:
_lowerCAmelCase:str = name.replace('''condition_provider.conditioners.description.output_proj''' , '''enc_to_dec_proj''' )
return name
def UpperCAmelCase ( snake_case : OrderedDict , snake_case : int ):
_lowerCAmelCase:Union[str, Any] = list(state_dict.keys() )
_lowerCAmelCase:Dict = {}
for key in keys:
_lowerCAmelCase:str = state_dict.pop(snake_case )
_lowerCAmelCase:Optional[int] = rename_keys(snake_case )
if "in_proj_weight" in key:
# split fused qkv proj
_lowerCAmelCase:Tuple = val[:hidden_size, :]
_lowerCAmelCase:List[str] = val[hidden_size : 2 * hidden_size, :]
_lowerCAmelCase:Dict = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
_lowerCAmelCase:Optional[Any] = val
else:
_lowerCAmelCase:Tuple = val
return state_dict, enc_dec_proj_state_dict
def UpperCAmelCase ( snake_case : str ):
if checkpoint == "small":
# default config values
_lowerCAmelCase:Optional[int] = 1024
_lowerCAmelCase:Tuple = 24
_lowerCAmelCase:int = 16
elif checkpoint == "medium":
_lowerCAmelCase:Optional[int] = 1536
_lowerCAmelCase:Optional[int] = 48
_lowerCAmelCase:List[Any] = 24
elif checkpoint == "large":
_lowerCAmelCase:int = 2048
_lowerCAmelCase:int = 48
_lowerCAmelCase:Union[str, Any] = 32
else:
raise ValueError(F'Checkpoint should be one of `[\'small\', \'medium\', \'large\']`, got {checkpoint}.' )
_lowerCAmelCase:Union[str, Any] = MusicgenDecoderConfig(
hidden_size=snake_case , ffn_dim=hidden_size * 4 , num_hidden_layers=snake_case , num_attention_heads=snake_case , )
return config
@torch.no_grad()
def UpperCAmelCase ( snake_case : Dict , snake_case : List[Any]=None , snake_case : List[str]=None , snake_case : Optional[Any]="cpu" ):
_lowerCAmelCase:List[str] = MusicGen.get_pretrained(snake_case , device=snake_case )
_lowerCAmelCase:Optional[int] = decoder_config_from_checkpoint(snake_case )
_lowerCAmelCase:List[Any] = fairseq_model.lm.state_dict()
_lowerCAmelCase:int = rename_state_dict(
snake_case , hidden_size=decoder_config.hidden_size )
_lowerCAmelCase:Union[str, Any] = TaEncoderModel.from_pretrained('''t5-base''' )
_lowerCAmelCase:List[Any] = EncodecModel.from_pretrained('''facebook/encodec_32khz''' )
_lowerCAmelCase:int = MusicgenForCausalLM(snake_case ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
_lowerCAmelCase:str = decoder.load_state_dict(snake_case , strict=snake_case )
for key in missing_keys.copy():
if key.startswith(('''text_encoder''', '''audio_encoder''') ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(snake_case )
if len(snake_case ) > 0:
raise ValueError(F'Missing key(s) in state_dict: {missing_keys}' )
if len(snake_case ) > 0:
raise ValueError(F'Unexpected key(s) in state_dict: {unexpected_keys}' )
# init the composite model
_lowerCAmelCase:Dict = MusicgenForConditionalGeneration(text_encoder=snake_case , audio_encoder=snake_case , decoder=snake_case )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(snake_case )
# check we can do a forward pass
_lowerCAmelCase:List[Any] = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
_lowerCAmelCase:List[str] = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
_lowerCAmelCase:Union[str, Any] = model(input_ids=snake_case , decoder_input_ids=snake_case ).logits
if logits.shape != (8, 1, 2048):
raise ValueError('''Incorrect shape for logits''' )
# now construct the processor
_lowerCAmelCase:int = AutoTokenizer.from_pretrained('''t5-base''' )
_lowerCAmelCase:Optional[int] = AutoFeatureExtractor.from_pretrained('''facebook/encodec_32khz''' , padding_side='''left''' )
_lowerCAmelCase:Tuple = MusicgenProcessor(feature_extractor=snake_case , tokenizer=snake_case )
# set the appropriate bos/pad token ids
_lowerCAmelCase:Any = 2048
_lowerCAmelCase:str = 2048
# set other default generation config params
_lowerCAmelCase:int = int(30 * audio_encoder.config.frame_rate )
_lowerCAmelCase:Optional[int] = True
_lowerCAmelCase:Tuple = 3.0
if pytorch_dump_folder is not None:
Path(snake_case ).mkdir(exist_ok=snake_case )
logger.info(F'Saving model {checkpoint} to {pytorch_dump_folder}' )
model.save_pretrained(snake_case )
processor.save_pretrained(snake_case )
if repo_id:
logger.info(F'Pushing model {checkpoint} to {repo_id}' )
model.push_to_hub(snake_case )
processor.push_to_hub(snake_case )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint''',
default='''small''',
type=str,
help='''Checkpoint size of the MusicGen model you\'d like to convert. Can be one of: `[\'small\', \'medium\', \'large\']`.''',
)
parser.add_argument(
'''--pytorch_dump_folder''',
required=True,
default=None,
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
parser.add_argument(
'''--device''', default='''cpu''', type=str, help='''Torch device to run the conversion, either cpu or cuda.'''
)
UpperCamelCase__ = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 712
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
class a__ ( UpperCamelCase_ ):
snake_case__ = '''bert-generation'''
def __init__( self : Dict ,a__ : str=5_0358 ,a__ : List[str]=1024 ,a__ : int=24 ,a__ : Optional[Any]=16 ,a__ : List[str]=4096 ,a__ : Optional[int]="gelu" ,a__ : str=0.1 ,a__ : Union[str, Any]=0.1 ,a__ : int=512 ,a__ : Dict=0.02 ,a__ : List[Any]=1E-12 ,a__ : List[Any]=0 ,a__ : int=2 ,a__ : str=1 ,a__ : Dict="absolute" ,a__ : int=True ,**a__ : List[str] ,) -> List[Any]:
"""simple docstring"""
super().__init__(pad_token_id=a__ ,bos_token_id=a__ ,eos_token_id=a__ ,**a__)
_lowerCAmelCase:Optional[Any] = vocab_size
_lowerCAmelCase:Union[str, Any] = hidden_size
_lowerCAmelCase:Any = num_hidden_layers
_lowerCAmelCase:int = num_attention_heads
_lowerCAmelCase:int = hidden_act
_lowerCAmelCase:List[Any] = intermediate_size
_lowerCAmelCase:Optional[Any] = hidden_dropout_prob
_lowerCAmelCase:int = attention_probs_dropout_prob
_lowerCAmelCase:Optional[int] = max_position_embeddings
_lowerCAmelCase:Dict = initializer_range
_lowerCAmelCase:Union[str, Any] = layer_norm_eps
_lowerCAmelCase:int = position_embedding_type
_lowerCAmelCase:Tuple = use_cache
| 439
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__ : Union[str, Any] = logging.get_logger(__name__)
a__ : List[Any] = {
'microsoft/biogpt': 'https://huggingface.co/microsoft/biogpt/resolve/main/config.json',
# See all BioGPT models at https://huggingface.co/models?filter=biogpt
}
class lowerCAmelCase__ ( UpperCAmelCase_ ):
'''simple docstring'''
_lowerCamelCase ="biogpt"
def __init__( self : Union[str, Any] , a__ : List[Any]=42384 , a__ : List[Any]=1024 , a__ : Optional[Any]=24 , a__ : Tuple=16 , a__ : str=4096 , a__ : int="gelu" , a__ : List[str]=0.1 , a__ : List[str]=0.1 , a__ : Any=1024 , a__ : Dict=0.02 , a__ : Any=1e-1_2 , a__ : Union[str, Any]=True , a__ : Tuple=True , a__ : Optional[Any]=0.0 , a__ : Tuple=0.0 , a__ : List[Any]=1 , a__ : Tuple=0 , a__ : List[str]=2 , **a__ : Any , ):
UpperCAmelCase = vocab_size
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = initializer_range
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = scale_embedding
UpperCAmelCase = use_cache
UpperCAmelCase = layerdrop
UpperCAmelCase = activation_dropout
super().__init__(pad_token_id=a__ , bos_token_id=a__ , eos_token_id=a__ , **a__ )
| 51
|
import argparse
import logging
import pickle
from collections import Counter
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
_lowerCamelCase : str = logging.getLogger(__name__)
if __name__ == "__main__":
_lowerCamelCase : Dict = argparse.ArgumentParser(
description='Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)'
)
parser.add_argument(
'--data_file', type=str, default='data/dump.bert-base-uncased.pickle', help='The binarized dataset.'
)
parser.add_argument(
'--token_counts_dump', type=str, default='data/token_counts.bert-base-uncased.pickle', help='The dump file.'
)
parser.add_argument('--vocab_size', default=3_0522, type=int)
_lowerCamelCase : int = parser.parse_args()
logger.info(f"Loading data from {args.data_file}")
with open(args.data_file, 'rb') as fp:
_lowerCamelCase : Union[str, Any] = pickle.load(fp)
logger.info('Counting occurrences for MLM.')
_lowerCamelCase : str = Counter()
for tk_ids in data:
counter.update(tk_ids)
_lowerCamelCase : List[str] = [0] * args.vocab_size
for k, v in counter.items():
_lowerCamelCase : Dict = v
logger.info(f"Dump to {args.token_counts_dump}")
with open(args.token_counts_dump, 'wb') as handle:
pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
| 121
| 0
|
from __future__ import annotations
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTForImageClassification, TFViTModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class SCREAMING_SNAKE_CASE__ :
def __init__(self : Dict , a__ : Optional[int] , a__ : Tuple=13 , a__ : List[Any]=30 , a__ : List[Any]=2 , a__ : Optional[int]=3 , a__ : Optional[Any]=True , a__ : Optional[Any]=True , a__ : List[Any]=32 , a__ : Optional[Any]=2 , a__ : int=4 , a__ : Optional[int]=37 , a__ : str="gelu" , a__ : Tuple=0.1 , a__ : List[str]=0.1 , a__ : int=10 , a__ : List[Any]=0.0_2 , a__ : int=3 , a__ : Dict=None , ):
"""simple docstring"""
__snake_case = parent
__snake_case = batch_size
__snake_case = image_size
__snake_case = patch_size
__snake_case = num_channels
__snake_case = is_training
__snake_case = use_labels
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = type_sequence_label_size
__snake_case = initializer_range
__snake_case = scope
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
__snake_case = (image_size // patch_size) ** 2
__snake_case = num_patches + 1
def a (self : List[str] ):
"""simple docstring"""
__snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case = None
if self.use_labels:
__snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case = self.get_config()
return config, pixel_values, labels
def a (self : Any ):
"""simple docstring"""
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a__ , initializer_range=self.initializer_range , )
def a (self : Any , a__ : List[Any] , a__ : Any , a__ : int ):
"""simple docstring"""
__snake_case = TFViTModel(config=a__ )
__snake_case = model(a__ , training=a__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# Test with an image with different size than the one specified in config.
__snake_case = self.image_size // 2
__snake_case = pixel_values[:, :, :image_size, :image_size]
__snake_case = model(a__ , interpolate_pos_encoding=a__ , training=a__ )
__snake_case = (image_size // self.patch_size) ** 2 + 1
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, seq_length, self.hidden_size) )
def a (self : Optional[int] , a__ : Union[str, Any] , a__ : Dict , a__ : Optional[int] ):
"""simple docstring"""
__snake_case = self.type_sequence_label_size
__snake_case = TFViTForImageClassification(a__ )
__snake_case = model(a__ , labels=a__ , training=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# Test with an image with different size than the one specified in config.
__snake_case = self.image_size // 2
__snake_case = pixel_values[:, :, :image_size, :image_size]
__snake_case = model(a__ , interpolate_pos_encoding=a__ , training=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__snake_case = 1
__snake_case = TFViTForImageClassification(a__ )
__snake_case = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__snake_case = model(a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def a (self : List[Any] ):
"""simple docstring"""
__snake_case = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case = config_and_inputs
__snake_case = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
A_ : Any = (TFViTModel, TFViTForImageClassification) if is_tf_available() else ()
A_ : Optional[Any] = (
{'feature-extraction': TFViTModel, 'image-classification': TFViTForImageClassification}
if is_tf_available()
else {}
)
A_ : Optional[Any] = False
A_ : Dict = False
A_ : str = False
def a (self : Any ):
"""simple docstring"""
__snake_case = TFViTModelTester(self )
__snake_case = ConfigTester(self , config_class=a__ , has_text_modality=a__ , hidden_size=37 )
def a (self : Tuple ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViT does not use inputs_embeds''' )
def a (self : Optional[int] ):
"""simple docstring"""
pass
@unittest.skip(reason='''ViT does not use inputs_embeds''' )
def a (self : Tuple ):
"""simple docstring"""
pass
def a (self : Any ):
"""simple docstring"""
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = model_class(a__ )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
__snake_case = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a__ , tf.keras.layers.Layer ) )
def a (self : Any ):
"""simple docstring"""
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = model_class(a__ )
__snake_case = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case = [*signature.parameters.keys()]
__snake_case = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , a__ )
def a (self : str ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a__ )
def a (self : Dict ):
"""simple docstring"""
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a__ )
@slow
def a (self : Optional[int] ):
"""simple docstring"""
__snake_case = TFViTModel.from_pretrained('''google/vit-base-patch16-224''' )
self.assertIsNotNone(a__ )
def lowerCamelCase__ ( ) -> str:
__snake_case = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@cached_property
def a (self : Optional[Any] ):
"""simple docstring"""
return ViTImageProcessor.from_pretrained('''google/vit-base-patch16-224''' ) if is_vision_available() else None
@slow
def a (self : Optional[Any] ):
"""simple docstring"""
__snake_case = TFViTForImageClassification.from_pretrained('''google/vit-base-patch16-224''' )
__snake_case = self.default_image_processor
__snake_case = prepare_img()
__snake_case = image_processor(images=a__ , return_tensors='''tf''' )
# forward pass
__snake_case = model(**a__ )
# verify the logits
__snake_case = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , a__ )
__snake_case = tf.constant([-0.2_7_4_4, 0.8_2_1_5, -0.0_8_3_6] )
tf.debugging.assert_near(outputs.logits[0, :3] , a__ , atol=1E-4 )
| 721
|
import pytest
from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs
@pytest.mark.parametrize(
'''kwargs, expected''' , [
({'''num_shards''': 0, '''max_num_jobs''': 1}, []),
({'''num_shards''': 10, '''max_num_jobs''': 1}, [range(10 )]),
({'''num_shards''': 10, '''max_num_jobs''': 10}, [range(snake_case_ , i + 1 ) for i in range(10 )]),
({'''num_shards''': 1, '''max_num_jobs''': 10}, [range(1 )]),
({'''num_shards''': 10, '''max_num_jobs''': 3}, [range(0 , 4 ), range(4 , 7 ), range(7 , 10 )]),
({'''num_shards''': 3, '''max_num_jobs''': 10}, [range(0 , 1 ), range(1 , 2 ), range(2 , 3 )]),
] , )
def lowerCamelCase__ ( snake_case_ : Any , snake_case_ : int ) -> List[Any]:
__snake_case = _distribute_shards(**snake_case_ )
assert out == expected
@pytest.mark.parametrize(
'''gen_kwargs, max_num_jobs, expected''' , [
({'''foo''': 0}, 10, [{'''foo''': 0}]),
({'''shards''': [0, 1, 2, 3]}, 1, [{'''shards''': [0, 1, 2, 3]}]),
({'''shards''': [0, 1, 2, 3]}, 4, [{'''shards''': [0]}, {'''shards''': [1]}, {'''shards''': [2]}, {'''shards''': [3]}]),
({'''shards''': [0, 1]}, 4, [{'''shards''': [0]}, {'''shards''': [1]}]),
({'''shards''': [0, 1, 2, 3]}, 2, [{'''shards''': [0, 1]}, {'''shards''': [2, 3]}]),
] , )
def lowerCamelCase__ ( snake_case_ : Any , snake_case_ : Tuple , snake_case_ : List[Any] ) -> Any:
__snake_case = _split_gen_kwargs(snake_case_ , snake_case_ )
assert out == expected
@pytest.mark.parametrize(
'''gen_kwargs, expected''' , [
({'''foo''': 0}, 1),
({'''shards''': [0]}, 1),
({'''shards''': [0, 1, 2, 3]}, 4),
({'''shards''': [0, 1, 2, 3], '''foo''': 0}, 4),
({'''shards''': [0, 1, 2, 3], '''other''': (0, 1)}, 4),
({'''shards''': [0, 1, 2, 3], '''shards2''': [0, 1]}, RuntimeError),
] , )
def lowerCamelCase__ ( snake_case_ : List[str] , snake_case_ : str ) -> Tuple:
if expected is RuntimeError:
with pytest.raises(snake_case_ ):
_number_of_shards_in_gen_kwargs(snake_case_ )
else:
__snake_case = _number_of_shards_in_gen_kwargs(snake_case_ )
assert out == expected
| 388
| 0
|
"""simple docstring"""
import os
from tempfile import TemporaryDirectory
from unittest import TestCase
import pytest
from absl.testing import parameterized
from datasets import config
from datasets.arrow_reader import HF_GCP_BASE_URL
from datasets.builder import DatasetBuilder
from datasets.dataset_dict import IterableDatasetDict
from datasets.iterable_dataset import IterableDataset
from datasets.load import dataset_module_factory, import_main_class
from datasets.utils.file_utils import cached_path
UpperCAmelCase : List[Any] = [
{"dataset": "wikipedia", "config_name": "20220301.de"},
{"dataset": "wikipedia", "config_name": "20220301.en"},
{"dataset": "wikipedia", "config_name": "20220301.fr"},
{"dataset": "wikipedia", "config_name": "20220301.frr"},
{"dataset": "wikipedia", "config_name": "20220301.it"},
{"dataset": "wikipedia", "config_name": "20220301.simple"},
{"dataset": "snli", "config_name": "plain_text"},
{"dataset": "eli5", "config_name": "LFQA_reddit"},
{"dataset": "wiki40b", "config_name": "en"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.nq.compressed"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.nq.no_index"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.multiset.no_index"},
{"dataset": "natural_questions", "config_name": "default"},
]
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase=True ) -> List[str]:
'''simple docstring'''
if with_config:
return [
{
"testcase_name": d["dataset"] + "/" + d["config_name"],
"dataset": d["dataset"],
"config_name": d["config_name"],
}
for d in DATASETS_ON_HF_GCP
]
else:
return [
{"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP}
]
@parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=_lowerCamelCase ) )
class SCREAMING_SNAKE_CASE__ ( _lowerCamelCase ):
lowercase__ = None
lowercase__ = None
def _UpperCAmelCase ( self : int , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : List[Any]):
"""simple docstring"""
with TemporaryDirectory() as tmp_dir:
lowercase_ = dataset_module_factory(a__ , cache_dir=a__)
lowercase_ = import_main_class(dataset_module.module_path , dataset=a__)
lowercase_ = builder_cls(
cache_dir=a__ , config_name=a__ , hash=dataset_module.hash , )
lowercase_ = """/""".join(
[
HF_GCP_BASE_URL,
builder_instance._relative_data_dir(with_hash=a__).replace(os.sep , """/"""),
config.DATASET_INFO_FILENAME,
])
lowercase_ = cached_path(a__ , cache_dir=a__)
self.assertTrue(os.path.exists(a__))
@pytest.mark.integration
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ = tmp_path_factory.mktemp("""test_hf_gcp""" ) / """test_wikipedia_simple"""
lowercase_ = dataset_module_factory("""wikipedia""" , cache_dir=_lowerCAmelCase )
lowercase_ = import_main_class(dataset_module.module_path )
lowercase_ = builder_cls(
cache_dir=_lowerCAmelCase , config_name="""20220301.frr""" , hash=dataset_module.hash , )
# use the HF cloud storage, not the original download_and_prepare that uses apache-beam
lowercase_ = None
builder_instance.download_and_prepare()
lowercase_ = builder_instance.as_dataset()
assert ds
@pytest.mark.integration
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> Tuple:
'''simple docstring'''
lowercase_ = dataset_module_factory("""wikipedia""" , cache_dir=_lowerCAmelCase )
lowercase_ = import_main_class(dataset_module.module_path , dataset=_lowerCAmelCase )
lowercase_ = builder_cls(
cache_dir=_lowerCAmelCase , config_name="""20220301.frr""" , hash=dataset_module.hash , )
lowercase_ = builder_instance.as_streaming_dataset()
assert ds
assert isinstance(_lowerCAmelCase , _lowerCAmelCase )
assert "train" in ds
assert isinstance(ds["""train"""] , _lowerCAmelCase )
assert next(iter(ds["""train"""] ) )
| 567
|
import os
from math import logaa
def _lowerCAmelCase ( _lowerCAmelCase = "base_exp.txt" ):
'''simple docstring'''
A_ : float = 0
A_ : int = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(_lowerCAmelCase ) ,_lowerCAmelCase ) ) ):
A_ , A_ : str = list(map(_lowerCAmelCase ,line.split(""",""" ) ) )
if x * logaa(_lowerCAmelCase ) > largest:
A_ : Tuple = x * logaa(_lowerCAmelCase )
A_ : List[Any] = i + 1
return result
if __name__ == "__main__":
print(solution())
| 569
| 0
|
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
__UpperCamelCase : Dict = logging.get_logger(__name__)
__UpperCamelCase : List[str] = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.linear_k""": """encoder.layers.*.self_attn.linear_k""",
"""self_attn.linear_v""": """encoder.layers.*.self_attn.linear_v""",
"""self_attn.linear_q""": """encoder.layers.*.self_attn.linear_q""",
"""self_attn.pos_bias_u""": """encoder.layers.*.self_attn.pos_bias_u""",
"""self_attn.pos_bias_v""": """encoder.layers.*.self_attn.pos_bias_v""",
"""self_attn.linear_out""": """encoder.layers.*.self_attn.linear_out""",
"""self_attn.linear_pos""": """encoder.layers.*.self_attn.linear_pos""",
"""self_attn.rotary_emb""": """encoder.embed_positions""",
"""self_attn_layer_norm""": """encoder.layers.*.self_attn_layer_norm""",
"""conv_module.pointwise_conv1""": """encoder.layers.*.conv_module.pointwise_conv1""",
"""conv_module.pointwise_conv2""": """encoder.layers.*.conv_module.pointwise_conv2""",
"""conv_module.depthwise_conv""": """encoder.layers.*.conv_module.depthwise_conv""",
"""conv_module.batch_norm""": """encoder.layers.*.conv_module.batch_norm""",
"""conv_module.layer_norm""": """encoder.layers.*.conv_module.layer_norm""",
"""ffn1.w_1""": """encoder.layers.*.ffn1.intermediate_dense""",
"""ffn1.w_2""": """encoder.layers.*.ffn1.output_dense""",
"""ffn1.layer_norm""": """encoder.layers.*.ffn1_layer_norm""",
"""ffn2.w_1""": """encoder.layers.*.ffn2.intermediate_dense""",
"""ffn2.w_2""": """encoder.layers.*.ffn2.output_dense""",
"""ffn2.layer_norm""": """encoder.layers.*.ffn2_layer_norm""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
__UpperCamelCase : Optional[Any] = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def a_ ( _A , _A , _A , _A , _A ) -> Optional[int]:
"""simple docstring"""
for attribute in key.split('.' ):
snake_case__ = getattr(_A , _A )
if weight_type is not None:
snake_case__ = getattr(_A , _A ).shape
else:
snake_case__ = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}''' )
if weight_type == "weight":
snake_case__ = value
elif weight_type == "weight_g":
snake_case__ = value
elif weight_type == "weight_v":
snake_case__ = value
elif weight_type == "bias":
snake_case__ = value
elif weight_type == "running_mean":
snake_case__ = value
elif weight_type == "running_var":
snake_case__ = value
elif weight_type == "num_batches_tracked":
snake_case__ = value
elif weight_type == "inv_freq":
snake_case__ = value
else:
snake_case__ = value
logger.info(f'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def a_ ( _A , _A , _A ) -> Any:
"""simple docstring"""
snake_case__ = []
snake_case__ = fairseq_model.state_dict()
snake_case__ = hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
snake_case__ = False
if "conv_layers" in name:
load_conv_layer(
_A , _A , _A , _A , hf_model.config.feat_extract_norm == 'group' , )
snake_case__ = True
else:
for key, mapped_key in MAPPING.items():
snake_case__ = 'wav2vec2_conformer.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
snake_case__ = True
if "*" in mapped_key:
snake_case__ = name.split(_A )[0].split('.' )[-2]
snake_case__ = mapped_key.replace('*' , _A )
if "pos_bias_u" in name:
snake_case__ = None
elif "pos_bias_v" in name:
snake_case__ = None
elif "weight_g" in name:
snake_case__ = 'weight_g'
elif "weight_v" in name:
snake_case__ = 'weight_v'
elif "bias" in name:
snake_case__ = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
snake_case__ = 'weight'
elif "running_mean" in name:
snake_case__ = 'running_mean'
elif "inv_freq" in name:
snake_case__ = 'inv_freq'
elif "running_var" in name:
snake_case__ = 'running_var'
elif "num_batches_tracked" in name:
snake_case__ = 'num_batches_tracked'
else:
snake_case__ = None
set_recursively(_A , _A , _A , _A , _A )
continue
if not is_used:
unused_weights.append(_A )
logger.warning(f'''Unused weights: {unused_weights}''' )
def a_ ( _A , _A , _A , _A , _A ) -> Optional[Any]:
"""simple docstring"""
snake_case__ = full_name.split('conv_layers.' )[-1]
snake_case__ = name.split('.' )
snake_case__ = int(items[0] )
snake_case__ = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
snake_case__ = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
snake_case__ = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' )
snake_case__ = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' )
snake_case__ = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(_A )
@torch.no_grad()
def a_ ( _A , _A , _A=None , _A=None , _A=True ) -> Tuple:
"""simple docstring"""
if config_path is not None:
snake_case__ = WavaVecaConformerConfig.from_pretrained(_A , hidden_act='swish' )
else:
snake_case__ = WavaVecaConformerConfig()
if "rope" in checkpoint_path:
snake_case__ = 'rotary'
if is_finetuned:
if dict_path:
snake_case__ = Dictionary.load(_A )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
snake_case__ = target_dict.pad_index
snake_case__ = target_dict.bos_index
snake_case__ = target_dict.eos_index
snake_case__ = len(target_dict.symbols )
snake_case__ = os.path.join(_A , 'vocab.json' )
if not os.path.isdir(_A ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(_A ) )
return
os.makedirs(_A , exist_ok=_A )
snake_case__ = target_dict.indices
# fairseq has the <pad> and <s> switched
snake_case__ = 0
snake_case__ = 1
with open(_A , 'w' , encoding='utf-8' ) as vocab_handle:
json.dump(_A , _A )
snake_case__ = WavaVecaCTCTokenizer(
_A , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=_A , )
snake_case__ = True if config.feat_extract_norm == 'layer' else False
snake_case__ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=_A , return_attention_mask=_A , )
snake_case__ = WavaVecaProcessor(feature_extractor=_A , tokenizer=_A )
processor.save_pretrained(_A )
snake_case__ = WavaVecaConformerForCTC(_A )
else:
snake_case__ = WavaVecaConformerForPreTraining(_A )
if is_finetuned:
snake_case__ , snake_case__ , snake_case__ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
else:
snake_case__ = argparse.Namespace(task='audio_pretraining' )
snake_case__ = fairseq.tasks.setup_task(_A )
snake_case__ , snake_case__ , snake_case__ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=_A )
snake_case__ = model[0].eval()
recursively_load_weights(_A , _A , not is_finetuned )
hf_wavavec.save_pretrained(_A )
if __name__ == "__main__":
__UpperCamelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
__UpperCamelCase : int = parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 714
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCamelCase : int = {"""configuration_xglm""": ["""XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XGLMConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Dict = ["""XGLMTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[int] = ["""XGLMTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : List[str] = [
"""XGLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XGLMForCausalLM""",
"""XGLMModel""",
"""XGLMPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Any = [
"""FlaxXGLMForCausalLM""",
"""FlaxXGLMModel""",
"""FlaxXGLMPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : str = [
"""TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFXGLMForCausalLM""",
"""TFXGLMModel""",
"""TFXGLMPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
__UpperCamelCase : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 372
| 0
|
'''simple docstring'''
from ..utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_pt_objects import * # noqa F403
else:
from .scheduling_consistency_models import CMStochasticIterativeScheduler
from .scheduling_ddim import DDIMScheduler
from .scheduling_ddim_inverse import DDIMInverseScheduler
from .scheduling_ddim_parallel import DDIMParallelScheduler
from .scheduling_ddpm import DDPMScheduler
from .scheduling_ddpm_parallel import DDPMParallelScheduler
from .scheduling_deis_multistep import DEISMultistepScheduler
from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler
from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler
from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler
from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler
from .scheduling_euler_discrete import EulerDiscreteScheduler
from .scheduling_heun_discrete import HeunDiscreteScheduler
from .scheduling_ipndm import IPNDMScheduler
from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler
from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler
from .scheduling_karras_ve import KarrasVeScheduler
from .scheduling_pndm import PNDMScheduler
from .scheduling_repaint import RePaintScheduler
from .scheduling_sde_ve import ScoreSdeVeScheduler
from .scheduling_sde_vp import ScoreSdeVpScheduler
from .scheduling_unclip import UnCLIPScheduler
from .scheduling_unipc_multistep import UniPCMultistepScheduler
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
from .scheduling_vq_diffusion import VQDiffusionScheduler
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_flax_objects import * # noqa F403
else:
from .scheduling_ddim_flax import FlaxDDIMScheduler
from .scheduling_ddpm_flax import FlaxDDPMScheduler
from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler
from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler
from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler
from .scheduling_pndm_flax import FlaxPNDMScheduler
from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler
from .scheduling_utils_flax import (
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
broadcast_to_shape_from_left,
)
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .scheduling_lms_discrete import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
| 685
|
'''simple docstring'''
from typing import Tuple, Union
from ...modeling_outputs import BackboneOutput
from ...modeling_utils import PreTrainedModel
from ...utils import is_timm_available, is_torch_available, requires_backends
from ...utils.backbone_utils import BackboneMixin
from .configuration_timm_backbone import TimmBackboneConfig
if is_timm_available():
import timm
if is_torch_available():
from torch import Tensor
class SCREAMING_SNAKE_CASE__ ( lowercase_ , lowercase_ ):
_UpperCAmelCase ='''pixel_values'''
_UpperCAmelCase =False
_UpperCAmelCase =TimmBackboneConfig
def __init__( self: Union[str, Any] , a: Union[str, Any] , **a: Tuple) ->Optional[Any]:
'''simple docstring'''
requires_backends(self , "timm")
super().__init__(a)
a_ = config
if config.backbone is None:
raise ValueError("backbone is not set in the config. Please set it to a timm model name.")
if config.backbone not in timm.list_models():
raise ValueError(f"""backbone {config.backbone} is not supported by timm.""")
if hasattr(a , "out_features") and config.out_features is not None:
raise ValueError("out_features is not supported by TimmBackbone. Please use out_indices instead.")
a_ = getattr(a , "use_pretrained_backbone" , a)
if pretrained is None:
raise ValueError("use_pretrained_backbone is not set in the config. Please set it to True or False.")
# We just take the final layer by default. This matches the default for the transformers models.
a_ = config.out_indices if getattr(a , "out_indices" , a) is not None else (-1,)
a_ = timm.create_model(
config.backbone , pretrained=a , features_only=config.features_only , in_chans=config.num_channels , out_indices=a , **a , )
# These are used to control the output of the model when called. If output_hidden_states is True, then
# return_layers is modified to include all layers.
a_ = self._backbone.return_layers
a_ = {layer["module"]: str(a) for i, layer in enumerate(self._backbone.feature_info.info)}
super()._init_backbone(a)
@classmethod
def _lowerCAmelCase ( cls: Tuple , a: Optional[Any] , *a: Optional[Any] , **a: str) ->List[Any]:
'''simple docstring'''
requires_backends(cls , ["vision", "timm"])
from ...models.timm_backbone import TimmBackboneConfig
a_ = kwargs.pop("config" , TimmBackboneConfig())
a_ = kwargs.pop("use_timm_backbone" , a)
if not use_timm:
raise ValueError("use_timm_backbone must be True for timm backbones")
a_ = kwargs.pop("num_channels" , config.num_channels)
a_ = kwargs.pop("features_only" , config.features_only)
a_ = kwargs.pop("use_pretrained_backbone" , config.use_pretrained_backbone)
a_ = kwargs.pop("out_indices" , config.out_indices)
a_ = TimmBackboneConfig(
backbone=a , num_channels=a , features_only=a , use_pretrained_backbone=a , out_indices=a , )
return super()._from_config(a , **a)
def _lowerCAmelCase ( self: Optional[Any] , a: Optional[int]) ->str:
'''simple docstring'''
pass
def _lowerCAmelCase ( self: Tuple , a: List[Any] , a: Any=None , a: Dict=None , a: Optional[int]=None , **a: int) ->Union[BackboneOutput, Tuple[Tensor, ...]]:
'''simple docstring'''
a_ = return_dict if return_dict is not None else self.config.use_return_dict
a_ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
a_ = output_attentions if output_attentions is not None else self.config.output_attentions
if output_attentions:
raise ValueError("Cannot output attentions for timm backbones at the moment")
if output_hidden_states:
# We modify the return layers to include all the stages of the backbone
a_ = self._all_layers
a_ = self._backbone(a , **a)
a_ = self._return_layers
a_ = tuple(hidden_states[i] for i in self.out_indices)
else:
a_ = self._backbone(a , **a)
a_ = None
a_ = tuple(a)
a_ = tuple(a) if hidden_states is not None else None
if not return_dict:
a_ = (feature_maps,)
if output_hidden_states:
a_ = output + (hidden_states,)
return output
return BackboneOutput(feature_maps=a , hidden_states=a , attentions=a)
| 685
| 1
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
UpperCAmelCase_ =logging.get_logger(__name__)
if is_vision_available():
import PIL
class __UpperCamelCase ( __UpperCAmelCase ):
'''simple docstring'''
__a : List[str] =["""pixel_values"""]
def __init__( self , UpperCAmelCase_ = True , UpperCAmelCase_ = None , UpperCAmelCase_ = PILImageResampling.BICUBIC , UpperCAmelCase_ = True , UpperCAmelCase_ = None , UpperCAmelCase_ = True , UpperCAmelCase_ = 1 / 2_55 , UpperCAmelCase_ = True , UpperCAmelCase_ = None , UpperCAmelCase_ = None , UpperCAmelCase_ = True , **UpperCAmelCase_ , ):
super().__init__(**UpperCAmelCase_ )
lowerCAmelCase = size if size is not None else {'''shortest_edge''': 2_24}
lowerCAmelCase = get_size_dict(UpperCAmelCase_ , default_to_square=UpperCAmelCase_ )
lowerCAmelCase = crop_size if crop_size is not None else {'''height''': 2_24, '''width''': 2_24}
lowerCAmelCase = get_size_dict(UpperCAmelCase_ , default_to_square=UpperCAmelCase_ , param_name='''crop_size''' )
lowerCAmelCase = do_resize
lowerCAmelCase = size
lowerCAmelCase = resample
lowerCAmelCase = do_center_crop
lowerCAmelCase = crop_size
lowerCAmelCase = do_rescale
lowerCAmelCase = rescale_factor
lowerCAmelCase = do_normalize
lowerCAmelCase = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
lowerCAmelCase = image_std if image_std is not None else OPENAI_CLIP_STD
lowerCAmelCase = do_convert_rgb
def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = PILImageResampling.BICUBIC , UpperCAmelCase_ = None , **UpperCAmelCase_ , ):
lowerCAmelCase = get_size_dict(UpperCAmelCase_ , default_to_square=UpperCAmelCase_ )
if "shortest_edge" not in size:
raise ValueError(F"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
lowerCAmelCase = get_resize_output_image_size(UpperCAmelCase_ , size=size['''shortest_edge'''] , default_to_square=UpperCAmelCase_ )
return resize(UpperCAmelCase_ , size=UpperCAmelCase_ , resample=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_ )
def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = None , **UpperCAmelCase_ , ):
lowerCAmelCase = get_size_dict(UpperCAmelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(F"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""" )
return center_crop(UpperCAmelCase_ , size=(size['''height'''], size['''width''']) , data_format=UpperCAmelCase_ , **UpperCAmelCase_ )
def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = None , **UpperCAmelCase_ , ):
return rescale(UpperCAmelCase_ , scale=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_ )
def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = None , **UpperCAmelCase_ , ):
return normalize(UpperCAmelCase_ , mean=UpperCAmelCase_ , std=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_ )
def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ = None , UpperCAmelCase_ = None , UpperCAmelCase_ = None , UpperCAmelCase_ = None , UpperCAmelCase_ = None , UpperCAmelCase_ = None , UpperCAmelCase_ = None , UpperCAmelCase_ = None , UpperCAmelCase_ = None , UpperCAmelCase_ = None , UpperCAmelCase_ = None , UpperCAmelCase_ = None , UpperCAmelCase_ = ChannelDimension.FIRST , **UpperCAmelCase_ , ):
lowerCAmelCase = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase = size if size is not None else self.size
lowerCAmelCase = get_size_dict(UpperCAmelCase_ , param_name='''size''' , default_to_square=UpperCAmelCase_ )
lowerCAmelCase = resample if resample is not None else self.resample
lowerCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCAmelCase = crop_size if crop_size is not None else self.crop_size
lowerCAmelCase = get_size_dict(UpperCAmelCase_ , param_name='''crop_size''' , default_to_square=UpperCAmelCase_ )
lowerCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
lowerCAmelCase = image_mean if image_mean is not None else self.image_mean
lowerCAmelCase = image_std if image_std is not None else self.image_std
lowerCAmelCase = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
lowerCAmelCase = make_list_of_images(UpperCAmelCase_ )
if not valid_images(UpperCAmelCase_ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
lowerCAmelCase = [convert_to_rgb(UpperCAmelCase_ ) for image in images]
# All transformations expect numpy arrays.
lowerCAmelCase = [to_numpy_array(UpperCAmelCase_ ) for image in images]
if do_resize:
lowerCAmelCase = [self.resize(image=UpperCAmelCase_ , size=UpperCAmelCase_ , resample=UpperCAmelCase_ ) for image in images]
if do_center_crop:
lowerCAmelCase = [self.center_crop(image=UpperCAmelCase_ , size=UpperCAmelCase_ ) for image in images]
if do_rescale:
lowerCAmelCase = [self.rescale(image=UpperCAmelCase_ , scale=UpperCAmelCase_ ) for image in images]
if do_normalize:
lowerCAmelCase = [self.normalize(image=UpperCAmelCase_ , mean=UpperCAmelCase_ , std=UpperCAmelCase_ ) for image in images]
lowerCAmelCase = [to_channel_dimension_format(UpperCAmelCase_ , UpperCAmelCase_ ) for image in images]
lowerCAmelCase = {'''pixel_values''': images}
return BatchFeature(data=UpperCAmelCase_ , tensor_type=UpperCAmelCase_ )
| 709
|
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __UpperCamelCase ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
__a : Tuple =IFInpaintingSuperResolutionPipeline
__a : Dict =TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"""width""", """height"""}
__a : int =TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({"""original_image"""} )
__a : Union[str, Any] =PipelineTesterMixin.required_optional_params - {"""latents"""}
def __snake_case ( self ):
return self._get_superresolution_dummy_components()
def __snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_=0 ):
if str(UpperCAmelCase_ ).startswith('''mps''' ):
lowerCAmelCase = torch.manual_seed(UpperCAmelCase_ )
else:
lowerCAmelCase = torch.Generator(device=UpperCAmelCase_ ).manual_seed(UpperCAmelCase_ )
lowerCAmelCase = floats_tensor((1, 3, 16, 16) , rng=random.Random(UpperCAmelCase_ ) ).to(UpperCAmelCase_ )
lowerCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCAmelCase_ ) ).to(UpperCAmelCase_ )
lowerCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCAmelCase_ ) ).to(UpperCAmelCase_ )
lowerCAmelCase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''original_image''': original_image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def __snake_case ( self ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def __snake_case ( self ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' )
def __snake_case ( self ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def __snake_case ( self ):
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def __snake_case ( self ):
self._test_save_load_local()
def __snake_case ( self ):
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 33
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.