code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a__ = {
'''configuration_clipseg''': [
'''CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''CLIPSegConfig''',
'''CLIPSegTextConfig''',
'''CLIPSegVisionConfig''',
],
'''processing_clipseg''': ['''CLIPSegProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ = [
'''CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CLIPSegModel''',
'''CLIPSegPreTrainedModel''',
'''CLIPSegTextModel''',
'''CLIPSegVisionModel''',
'''CLIPSegForImageSegmentation''',
]
if TYPE_CHECKING:
from .configuration_clipseg import (
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPSegConfig,
CLIPSegTextConfig,
CLIPSegVisionConfig,
)
from .processing_clipseg import CLIPSegProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clipseg import (
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPSegForImageSegmentation,
CLIPSegModel,
CLIPSegPreTrainedModel,
CLIPSegTextModel,
CLIPSegVisionModel,
)
else:
import sys
a__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 14
|
a = 8.314_462 # Unit - J mol-1 K-1
def _SCREAMING_SNAKE_CASE ( snake_case , snake_case , snake_case ) -> float:
if moles < 0 or kelvin < 0 or volume < 0:
raise ValueError("""Invalid inputs. Enter positive value.""" )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume
def _SCREAMING_SNAKE_CASE ( snake_case , snake_case , snake_case ) -> float:
if moles < 0 or kelvin < 0 or pressure < 0:
raise ValueError("""Invalid inputs. Enter positive value.""" )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure
if __name__ == "__main__":
from doctest import testmod
testmod()
| 518
| 0
|
import warnings
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class A_ ( __lowercase ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : int = ["input_ids", "attention_mask"]
def __init__( self , _A="</s>" , _A="<unk>" , _A="<pad>" , _A=125 , _A=None , **_A , ) -> None:
"""simple docstring"""
if extra_ids > 0 and additional_special_tokens is None:
_UpperCAmelCase : Optional[int] = [f'''<extra_id_{i}>''' for i in range(_A)]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
_UpperCAmelCase : Optional[int] = len(set(filter(lambda _A: bool('''extra_id''' in str(_A)) , _A)))
if extra_tokens != extra_ids:
raise ValueError(
f'''Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'''
''' provided to ByT5Tokenizer. In this case the additional_special_tokens must include the'''
''' extra_ids tokens''')
_UpperCAmelCase : Tuple = AddedToken(_A , lstrip=_A , rstrip=_A) if isinstance(_A , _A) else pad_token
_UpperCAmelCase : Union[str, Any] = AddedToken(_A , lstrip=_A , rstrip=_A) if isinstance(_A , _A) else eos_token
_UpperCAmelCase : Union[str, Any] = AddedToken(_A , lstrip=_A , rstrip=_A) if isinstance(_A , _A) else unk_token
super().__init__(
eos_token=_A , unk_token=_A , pad_token=_A , extra_ids=_A , additional_special_tokens=_A , **_A , )
_UpperCAmelCase : Union[str, Any] = extra_ids
_UpperCAmelCase : Any = 2**8 # utf is 8 bits
# define special tokens dict
_UpperCAmelCase : Dict[int, str] = {
self.pad_token: 0,
self.eos_token: 1,
self.unk_token: 2,
}
_UpperCAmelCase : List[Any] = len(self.special_tokens_encoder)
_UpperCAmelCase : Dict = len(_A)
for i, token in enumerate(_A):
_UpperCAmelCase : Optional[Any] = self.vocab_size + i - n
_UpperCAmelCase : Dict[str, int] = {v: k for k, v in self.special_tokens_encoder.items()}
@property
def snake_case__ ( self) -> List[str]:
"""simple docstring"""
return self._utf_vocab_size + self._num_special_tokens + self._extra_ids
def snake_case__ ( self , _A , _A = None , _A = False) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_A , token_ids_a=_A , already_has_special_tokens=_A)
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(_A)) + [1]
return ([0] * len(_A)) + [1] + ([0] * len(_A)) + [1]
def snake_case__ ( self , _A) -> List[int]:
"""simple docstring"""
if len(_A) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
f'''This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated'''
''' eos tokens being added.''')
return token_ids
else:
return token_ids + [self.eos_token_id]
def snake_case__ ( self , _A , _A = None) -> List[int]:
"""simple docstring"""
_UpperCAmelCase : Optional[int] = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos) * [0]
return len(token_ids_a + eos + token_ids_a + eos) * [0]
def snake_case__ ( self , _A , _A = None) -> List[int]:
"""simple docstring"""
_UpperCAmelCase : List[Any] = self._add_eos_if_not_present(_A)
if token_ids_a is None:
return token_ids_a
else:
_UpperCAmelCase : List[str] = self._add_eos_if_not_present(_A)
return token_ids_a + token_ids_a
def snake_case__ ( self , _A) -> List[str]:
"""simple docstring"""
_UpperCAmelCase : int = [chr(_A) for i in text.encode('''utf-8''')]
return tokens
def snake_case__ ( self , _A) -> Dict:
"""simple docstring"""
if token in self.special_tokens_encoder:
_UpperCAmelCase : Optional[Any] = self.special_tokens_encoder[token]
elif token in self.added_tokens_encoder:
_UpperCAmelCase : Tuple = self.added_tokens_encoder[token]
elif len(_A) != 1:
_UpperCAmelCase : Any = self.unk_token_id
else:
_UpperCAmelCase : Dict = ord(_A) + self._num_special_tokens
return token_id
def snake_case__ ( self , _A) -> Any:
"""simple docstring"""
if index in self.special_tokens_decoder:
_UpperCAmelCase : List[Any] = self.special_tokens_decoder[index]
else:
_UpperCAmelCase : int = chr(index - self._num_special_tokens)
return token
def snake_case__ ( self , _A) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase : Tuple = B''''''
for token in tokens:
if token in self.special_tokens_decoder:
_UpperCAmelCase : str = self.special_tokens_decoder[token].encode('''utf-8''')
elif token in self.added_tokens_decoder:
_UpperCAmelCase : List[str] = self.special_tokens_decoder[token].encode('''utf-8''')
elif token in self.special_tokens_encoder:
_UpperCAmelCase : Optional[int] = token.encode('''utf-8''')
elif token in self.added_tokens_encoder:
_UpperCAmelCase : int = token.encode('''utf-8''')
else:
_UpperCAmelCase : Union[str, Any] = bytes([ord(_A)])
bstring += tok_string
_UpperCAmelCase : int = bstring.decode('''utf-8''' , errors='''ignore''')
return string
def snake_case__ ( self , _A , _A = None) -> Tuple[str]:
"""simple docstring"""
return ()
| 712
|
# Logistic Regression from scratch
# In[62]:
# In[63]:
# importing all the required libraries
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def _lowerCamelCase ( __A : Optional[int] ) -> str:
return 1 / (1 + np.exp(-z ))
def _lowerCamelCase ( __A : Tuple , __A : List[Any] ) -> Optional[int]:
return (-y * np.log(__A ) - (1 - y) * np.log(1 - h )).mean()
def _lowerCamelCase ( __A : Tuple , __A : List[Any] , __A : int ) -> Any:
_UpperCAmelCase : int = np.dot(__A , __A )
return np.sum(y * scores - np.log(1 + np.exp(__A ) ) )
def _lowerCamelCase ( __A : List[Any] , __A : Union[str, Any] , __A : str , __A : Dict=70_000 ) -> Optional[Any]:
_UpperCAmelCase : Union[str, Any] = np.zeros(x.shape[1] )
for iterations in range(__A ):
_UpperCAmelCase : List[Any] = np.dot(__A , __A )
_UpperCAmelCase : Union[str, Any] = sigmoid_function(__A )
_UpperCAmelCase : Optional[Any] = np.dot(x.T , h - y ) / y.size
_UpperCAmelCase : Optional[Any] = theta - alpha * gradient # updating the weights
_UpperCAmelCase : List[Any] = np.dot(__A , __A )
_UpperCAmelCase : Any = sigmoid_function(__A )
_UpperCAmelCase : Union[str, Any] = cost_function(__A , __A )
if iterations % 100 == 0:
print(f'''loss: {j} \t''' ) # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = datasets.load_iris()
SCREAMING_SNAKE_CASE = iris.data[:, :2]
SCREAMING_SNAKE_CASE = (iris.target != 0) * 1
SCREAMING_SNAKE_CASE = 0.1
SCREAMING_SNAKE_CASE = logistic_reg(alpha, x, y, max_iterations=70000)
print('theta: ', theta) # printing the theta i.e our weights vector
def _lowerCamelCase ( __A : Any ) -> int:
return sigmoid_function(
np.dot(__A , __A ) ) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(10, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color='b', label='0')
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color='r', label='1')
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = (x[:, 0].min(), x[:, 0].max())
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = (x[:, 1].min(), x[:, 1].max())
((SCREAMING_SNAKE_CASE) , (SCREAMING_SNAKE_CASE)) = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
SCREAMING_SNAKE_CASE = np.c_[xxa.ravel(), xxa.ravel()]
SCREAMING_SNAKE_CASE = predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors='black')
plt.legend()
plt.show()
| 186
| 0
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel
@require_tf
class A :
snake_case__ :str = BlenderbotConfig
snake_case__ :Optional[int] = {}
snake_case__ :int = 'gelu'
def __init__( self : Any , __magic_name__ : List[str] , __magic_name__ : Optional[int]=13 , __magic_name__ : Tuple=7 , __magic_name__ : Union[str, Any]=True , __magic_name__ : Optional[int]=False , __magic_name__ : Any=99 , __magic_name__ : List[str]=32 , __magic_name__ : List[str]=2 , __magic_name__ : Dict=4 , __magic_name__ : List[Any]=37 , __magic_name__ : Optional[int]=0.1 , __magic_name__ : Any=0.1 , __magic_name__ : Optional[Any]=20 , __magic_name__ : Tuple=2 , __magic_name__ : Any=1 , __magic_name__ : Union[str, Any]=0 , ):
"""simple docstring"""
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = seq_length
lowerCAmelCase__ = is_training
lowerCAmelCase__ = use_labels
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = hidden_size
lowerCAmelCase__ = num_hidden_layers
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = intermediate_size
lowerCAmelCase__ = hidden_dropout_prob
lowerCAmelCase__ = attention_probs_dropout_prob
lowerCAmelCase__ = max_position_embeddings
lowerCAmelCase__ = eos_token_id
lowerCAmelCase__ = pad_token_id
lowerCAmelCase__ = bos_token_id
def __SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
lowerCAmelCase__ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
lowerCAmelCase__ = tf.concat([input_ids, eos_tensor] , axis=1 )
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase__ = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
lowerCAmelCase__ = prepare_blenderbot_inputs_dict(__magic_name__ , __magic_name__ , __magic_name__ )
return config, inputs_dict
def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : Union[str, Any] , __magic_name__ : Optional[int] ):
"""simple docstring"""
lowerCAmelCase__ = TFBlenderbotModel(config=__magic_name__ ).get_decoder()
lowerCAmelCase__ = inputs_dict["input_ids"]
lowerCAmelCase__ = input_ids[:1, :]
lowerCAmelCase__ = inputs_dict["attention_mask"][:1, :]
lowerCAmelCase__ = inputs_dict["head_mask"]
lowerCAmelCase__ = 1
# first forward pass
lowerCAmelCase__ = model(__magic_name__ , attention_mask=__magic_name__ , head_mask=__magic_name__ , use_cache=__magic_name__ )
lowerCAmelCase__ ,lowerCAmelCase__ = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
lowerCAmelCase__ = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCAmelCase__ = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
lowerCAmelCase__ = tf.concat([input_ids, next_tokens] , axis=-1 )
lowerCAmelCase__ = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
lowerCAmelCase__ = model(__magic_name__ , attention_mask=__magic_name__ )[0]
lowerCAmelCase__ = model(__magic_name__ , attention_mask=__magic_name__ , past_key_values=__magic_name__ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
lowerCAmelCase__ = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
lowerCAmelCase__ = output_from_no_past[:, -3:, random_slice_idx]
lowerCAmelCase__ = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__magic_name__ , __magic_name__ , rtol=1E-3 )
def A ( UpperCamelCase_ : int , UpperCamelCase_ : str , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Any=None , UpperCamelCase_ : Tuple=None , UpperCamelCase_ : Optional[Any]=None , UpperCamelCase_ : Optional[Any]=None , UpperCamelCase_ : List[str]=None , ) -> Dict:
'''simple docstring'''
if attention_mask is None:
lowerCAmelCase__ = tf.cast(tf.math.not_equal(UpperCamelCase_ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
lowerCAmelCase__ = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
lowerCAmelCase__ = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
lowerCAmelCase__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
lowerCAmelCase__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class A ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
snake_case__ :Tuple = (TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else ()
snake_case__ :Optional[Any] = (TFBlenderbotForConditionalGeneration,) if is_tf_available() else ()
snake_case__ :List[Any] = (
{
'conversational': TFBlenderbotForConditionalGeneration,
'feature-extraction': TFBlenderbotModel,
'summarization': TFBlenderbotForConditionalGeneration,
'text2text-generation': TFBlenderbotForConditionalGeneration,
'translation': TFBlenderbotForConditionalGeneration,
}
if is_tf_available()
else {}
)
snake_case__ :int = True
snake_case__ :Optional[Any] = False
snake_case__ :Tuple = False
def __SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
lowerCAmelCase__ = TFBlenderbotModelTester(self )
lowerCAmelCase__ = ConfigTester(self , config_class=__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def __SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__magic_name__ )
@require_tokenizers
@require_tf
class A ( unittest.TestCase ):
snake_case__ :Any = ['My friends are cool but they eat too many carbs.']
snake_case__ :Optional[int] = 'facebook/blenderbot-400M-distill'
@cached_property
def __SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
return BlenderbotTokenizer.from_pretrained(self.model_name )
@cached_property
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
lowerCAmelCase__ = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def __SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
lowerCAmelCase__ = self.tokenizer(self.src_text , return_tensors="tf" )
lowerCAmelCase__ = self.model.generate(
model_inputs.input_ids , )
lowerCAmelCase__ = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=__magic_name__ )[0]
assert (
generated_words
== " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?"
)
| 48
|
"""simple docstring"""
from __future__ import annotations
import pandas as pd
def lowerCamelCase ( _UpperCamelCase : list[int] , _UpperCamelCase : list[int] , _UpperCamelCase : int ) -> list[int]:
'''simple docstring'''
__UpperCAmelCase : List[Any] = [0] * no_of_processes
__UpperCAmelCase : List[str] = [0] * no_of_processes
# Copy the burst time into remaining_time[]
for i in range(_UpperCamelCase ):
__UpperCAmelCase : Union[str, Any] = burst_time[i]
__UpperCAmelCase : Tuple = 0
__UpperCAmelCase : str = 0
__UpperCAmelCase : Dict = 9_9_9_9_9_9_9_9_9
__UpperCAmelCase : Any = 0
__UpperCAmelCase : List[str] = False
# Process until all processes are completed
while complete != no_of_processes:
for j in range(_UpperCamelCase ):
if arrival_time[j] <= increment_time and remaining_time[j] > 0:
if remaining_time[j] < minm:
__UpperCAmelCase : List[Any] = remaining_time[j]
__UpperCAmelCase : Tuple = j
__UpperCAmelCase : Any = True
if not check:
increment_time += 1
continue
remaining_time[short] -= 1
__UpperCAmelCase : Dict = remaining_time[short]
if minm == 0:
__UpperCAmelCase : List[str] = 9_9_9_9_9_9_9_9_9
if remaining_time[short] == 0:
complete += 1
__UpperCAmelCase : Dict = False
# Find finish time of current process
__UpperCAmelCase : int = increment_time + 1
# Calculate waiting time
__UpperCAmelCase : List[Any] = finish_time - arrival_time[short]
__UpperCAmelCase : Tuple = finar - burst_time[short]
if waiting_time[short] < 0:
__UpperCAmelCase : List[Any] = 0
# Increment time
increment_time += 1
return waiting_time
def lowerCamelCase ( _UpperCamelCase : list[int] , _UpperCamelCase : int , _UpperCamelCase : list[int] ) -> list[int]:
'''simple docstring'''
__UpperCAmelCase : List[str] = [0] * no_of_processes
for i in range(_UpperCamelCase ):
__UpperCAmelCase : List[Any] = burst_time[i] + waiting_time[i]
return turn_around_time
def lowerCamelCase ( _UpperCamelCase : list[int] , _UpperCamelCase : list[int] , _UpperCamelCase : int ) -> None:
'''simple docstring'''
__UpperCAmelCase : List[Any] = 0
__UpperCAmelCase : Tuple = 0
for i in range(_UpperCamelCase ):
__UpperCAmelCase : Optional[Any] = total_waiting_time + waiting_time[i]
__UpperCAmelCase : Dict = total_turn_around_time + turn_around_time[i]
print(f'''Average waiting time = {total_waiting_time / no_of_processes:.5f}''' )
print("""Average turn around time =""" , total_turn_around_time / no_of_processes )
if __name__ == "__main__":
print('Enter how many process you want to analyze')
UpperCAmelCase : Optional[Any] = int(input())
UpperCAmelCase : Any = [0] * no_of_processes
UpperCAmelCase : Tuple = [0] * no_of_processes
UpperCAmelCase : Tuple = list(range(1, no_of_processes + 1))
for i in range(no_of_processes):
print('Enter the arrival time and burst time for process:--' + str(i + 1))
UpperCAmelCase , UpperCAmelCase : Tuple = map(int, input().split())
UpperCAmelCase : List[str] = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
UpperCAmelCase : Dict = burst_time
UpperCAmelCase : Any = no_of_processes
UpperCAmelCase : int = waiting_time
UpperCAmelCase : Union[str, Any] = calculate_turnaroundtime(bt, n, wt)
calculate_average_times(waiting_time, turn_around_time, no_of_processes)
UpperCAmelCase : List[Any] = pd.DataFrame(
list(zip(processes, burst_time, arrival_time, waiting_time, turn_around_time)),
columns=[
'Process',
'BurstTime',
'ArrivalTime',
'WaitingTime',
'TurnAroundTime',
],
)
# Printing the dataFrame
pd.set_option('display.max_rows', fcfs.shape[0] + 1)
print(fcfs)
| 139
| 0
|
'''simple docstring'''
from .imports import is_rich_available
if is_rich_available():
from rich.traceback import install
install(show_locals=False)
else:
raise ModuleNotFoundError("To use the rich extension, install rich with `pip install rich`")
| 702
|
'''simple docstring'''
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
lowercase__ = TypeVar("T")
class A_ ( Generic[T] ):
'''simple docstring'''
UpperCAmelCase_ : deque[T] # Cache store of keys
UpperCAmelCase_ : set[T] # References of the keys in cache
UpperCAmelCase_ : int = 10 # Maximum capacity of cache
def __init__( self : List[Any] , lowercase_ : int ) -> None:
UpperCAmelCase : Any = deque()
UpperCAmelCase : Dict = set()
if not n:
UpperCAmelCase : Optional[int] = sys.maxsize
elif n < 0:
raise ValueError('n should be an integer greater than 0.' )
else:
UpperCAmelCase : str = n
def UpperCAmelCase_ ( self : List[str] , lowercase_ : T ) -> None:
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
UpperCAmelCase : Optional[Any] = self.dq_store.pop()
self.key_reference.remove(lowercase_ )
else:
self.dq_store.remove(lowercase_ )
self.dq_store.appendleft(lowercase_ )
self.key_reference.add(lowercase_ )
def UpperCAmelCase_ ( self : Dict ) -> None:
for k in self.dq_store:
print(lowercase_ )
def __repr__( self : Union[str, Any] ) -> str:
return f"""LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}"""
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase__ = LRUCache(4)
lru_cache.refer("A")
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer("A")
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 695
| 0
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bart import BartTokenizer
A : int = logging.get_logger(__name__)
A : List[str] = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
# See all BART models at https://huggingface.co/models?filter=bart
A : List[str] = {
"""vocab_file""": {
"""facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/vocab.json""",
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/vocab.json""",
"""facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json""",
"""facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json""",
"""facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json""",
"""yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json""",
},
"""merges_file""": {
"""facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/merges.txt""",
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/merges.txt""",
"""facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt""",
"""facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt""",
"""facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt""",
"""yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json""",
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json""",
"""facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json""",
"""facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json""",
"""facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json""",
"""yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json""",
},
}
A : Any = {
"""facebook/bart-base""": 1_024,
"""facebook/bart-large""": 1_024,
"""facebook/bart-large-mnli""": 1_024,
"""facebook/bart-large-cnn""": 1_024,
"""facebook/bart-large-xsum""": 1_024,
"""yjernite/bart_eli5""": 1_024,
}
class lowerCAmelCase_ ( a_ ):
__UpperCAmelCase = VOCAB_FILES_NAMES
__UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase = ['input_ids', 'attention_mask']
__UpperCAmelCase = BartTokenizer
def __init__( self : Any, _snake_case : str=None, _snake_case : List[Any]=None, _snake_case : str=None, _snake_case : str="replace", _snake_case : Dict="<s>", _snake_case : Optional[int]="</s>", _snake_case : int="</s>", _snake_case : Optional[int]="<s>", _snake_case : List[Any]="<unk>", _snake_case : Any="<pad>", _snake_case : Optional[Any]="<mask>", _snake_case : Optional[int]=False, _snake_case : Tuple=True, **_snake_case : Union[str, Any], ):
'''simple docstring'''
super().__init__(
_snake_case, _snake_case, tokenizer_file=_snake_case, errors=_snake_case, bos_token=_snake_case, eos_token=_snake_case, sep_token=_snake_case, cls_token=_snake_case, unk_token=_snake_case, pad_token=_snake_case, mask_token=_snake_case, add_prefix_space=_snake_case, trim_offsets=_snake_case, **_snake_case, )
snake_case : Dict =json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''', _snake_case ) != add_prefix_space:
snake_case : Any =getattr(_snake_case, pre_tok_state.pop('''type''' ) )
snake_case : Dict =add_prefix_space
snake_case : List[str] =pre_tok_class(**_snake_case )
snake_case : Tuple =add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
snake_case : Any ='''post_processor'''
snake_case : Dict =getattr(self.backend_tokenizer, _snake_case, _snake_case )
if tokenizer_component_instance:
snake_case : Dict =json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
snake_case : List[str] =tuple(state['''sep'''] )
if "cls" in state:
snake_case : List[Any] =tuple(state['''cls'''] )
snake_case : str =False
if state.get('''add_prefix_space''', _snake_case ) != add_prefix_space:
snake_case : List[Any] =add_prefix_space
snake_case : Dict =True
if state.get('''trim_offsets''', _snake_case ) != trim_offsets:
snake_case : str =trim_offsets
snake_case : List[str] =True
if changes_to_apply:
snake_case : Dict =getattr(_snake_case, state.pop('''type''' ) )
snake_case : Union[str, Any] =component_class(**_snake_case )
setattr(self.backend_tokenizer, _snake_case, _snake_case )
@property
def __snake_case ( self : Optional[Any] ):
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def __snake_case ( self : Dict, _snake_case : str ):
'''simple docstring'''
snake_case : int =AddedToken(_snake_case, lstrip=_snake_case, rstrip=_snake_case ) if isinstance(_snake_case, _snake_case ) else value
snake_case : List[str] =value
def __snake_case ( self : Optional[int], *_snake_case : Tuple, **_snake_case : Dict ):
'''simple docstring'''
snake_case : Dict =kwargs.get('''is_split_into_words''', _snake_case )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
'''to use it with pretokenized inputs.''' )
return super()._batch_encode_plus(*_snake_case, **_snake_case )
def __snake_case ( self : Union[str, Any], *_snake_case : Tuple, **_snake_case : Union[str, Any] ):
'''simple docstring'''
snake_case : List[str] =kwargs.get('''is_split_into_words''', _snake_case )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
'''to use it with pretokenized inputs.''' )
return super()._encode_plus(*_snake_case, **_snake_case )
def __snake_case ( self : Any, _snake_case : str, _snake_case : Optional[str] = None ):
'''simple docstring'''
snake_case : List[Any] =self._tokenizer.model.save(_snake_case, name=_snake_case )
return tuple(_snake_case )
def __snake_case ( self : Tuple, _snake_case : List[Any], _snake_case : Tuple=None ):
'''simple docstring'''
snake_case : Tuple =[self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __snake_case ( self : Dict, _snake_case : List[int], _snake_case : Optional[List[int]] = None ):
'''simple docstring'''
snake_case : int =[self.sep_token_id]
snake_case : Optional[Any] =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 349
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
A : str = {
"""configuration_layoutlmv3""": [
"""LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""LayoutLMv3Config""",
"""LayoutLMv3OnnxConfig""",
],
"""processing_layoutlmv3""": ["""LayoutLMv3Processor"""],
"""tokenization_layoutlmv3""": ["""LayoutLMv3Tokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : List[Any] = ["""LayoutLMv3TokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : List[str] = [
"""LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LayoutLMv3ForQuestionAnswering""",
"""LayoutLMv3ForSequenceClassification""",
"""LayoutLMv3ForTokenClassification""",
"""LayoutLMv3Model""",
"""LayoutLMv3PreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : int = [
"""TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFLayoutLMv3ForQuestionAnswering""",
"""TFLayoutLMv3ForSequenceClassification""",
"""TFLayoutLMv3ForTokenClassification""",
"""TFLayoutLMv3Model""",
"""TFLayoutLMv3PreTrainedModel""",
]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Tuple = ["""LayoutLMv3FeatureExtractor"""]
A : Dict = ["""LayoutLMv3ImageProcessor"""]
if TYPE_CHECKING:
from .configuration_layoutlmva import (
LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP,
LayoutLMvaConfig,
LayoutLMvaOnnxConfig,
)
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_layoutlmva import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
TFLayoutLMvaPreTrainedModel,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
else:
import sys
A : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 349
| 1
|
"""simple docstring"""
import requests
from bsa import BeautifulSoup
def _snake_case ( _snake_case : str = "https://www.worldometers.info/coronavirus" ):
lowerCAmelCase : str = BeautifulSoup(requests.get(_snake_case ).text , '''html.parser''' )
lowerCAmelCase : Any = soup.findAll('''h1''' )
lowerCAmelCase : Optional[Any] = soup.findAll('''div''' , {'''class''': '''maincounter-number'''} )
keys += soup.findAll('''span''' , {'''class''': '''panel-title'''} )
values += soup.findAll('''div''' , {'''class''': '''number-table-main'''} )
return {key.text.strip(): value.text.strip() for key, value in zip(_snake_case , _snake_case )}
if __name__ == "__main__":
print('''\033[1m''' + '''COVID-19 Status of the World''' + '''\033[0m\n''')
for key, value in world_covidaa_stats().items():
print(f"""{key}\n{value}\n""")
| 717
|
"""simple docstring"""
def _snake_case ( _snake_case : float , _snake_case : list[float] ):
if discount_rate < 0:
raise ValueError('''Discount rate cannot be negative''' )
if not cash_flows:
raise ValueError('''Cash flows list cannot be empty''' )
lowerCAmelCase : List[str] = sum(
cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(_snake_case ) )
return round(_snake_case , ndigits=2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 637
| 0
|
'''simple docstring'''
def __lowercase ( __SCREAMING_SNAKE_CASE ) -> bool:
"""simple docstring"""
if number < 0:
raise ValueError("""number must not be negative""" )
return number & (number - 1) == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 582
|
'''simple docstring'''
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
SCREAMING_SNAKE_CASE_ = 'src/diffusers'
SCREAMING_SNAKE_CASE_ = '.'
# This is to make sure the diffusers module imported is the one in the repo.
SCREAMING_SNAKE_CASE_ = importlib.util.spec_from_file_location(
'diffusers',
os.path.join(DIFFUSERS_PATH, '__init__.py'),
submodule_search_locations=[DIFFUSERS_PATH],
)
SCREAMING_SNAKE_CASE_ = spec.loader.load_module()
def __lowercase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
return line.startswith(__SCREAMING_SNAKE_CASE ) or len(__SCREAMING_SNAKE_CASE ) <= 1 or re.search(r"""^\s*\)(\s*->.*:|:)\s*$""" , __SCREAMING_SNAKE_CASE ) is not None
def __lowercase ( __SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
__a = object_name.split(""".""" )
__a = 0
# First let's find the module where our object lives.
__a = parts[i]
while i < len(__SCREAMING_SNAKE_CASE ) and not os.path.isfile(os.path.join(__SCREAMING_SNAKE_CASE , F'''{module}.py''' ) ):
i += 1
if i < len(__SCREAMING_SNAKE_CASE ):
__a = os.path.join(__SCREAMING_SNAKE_CASE , parts[i] )
if i >= len(__SCREAMING_SNAKE_CASE ):
raise ValueError(F'''`object_name` should begin with the name of a module of diffusers but got {object_name}.''' )
with open(os.path.join(__SCREAMING_SNAKE_CASE , F'''{module}.py''' ) , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
__a = f.readlines()
# Now let's find the class / func in the code!
__a = """"""
__a = 0
for name in parts[i + 1 :]:
while (
line_index < len(__SCREAMING_SNAKE_CASE ) and re.search(rF'''^{indent}(class|def)\s+{name}(\(|\:)''' , lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(__SCREAMING_SNAKE_CASE ):
raise ValueError(F''' {object_name} does not match any function or class in {module}.''' )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
__a = line_index
while line_index < len(__SCREAMING_SNAKE_CASE ) and _should_continue(lines[line_index] , __SCREAMING_SNAKE_CASE ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
__a = lines[start_index:line_index]
return "".join(__SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = re.compile(R'^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)')
SCREAMING_SNAKE_CASE_ = re.compile(R'^\s*(\S+)->(\S+)(\s+.*|$)')
SCREAMING_SNAKE_CASE_ = re.compile(R'<FILL\s+[^>]*>')
def __lowercase ( __SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
__a = code.split("""\n""" )
__a = 0
while idx < len(__SCREAMING_SNAKE_CASE ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(__SCREAMING_SNAKE_CASE ):
return re.search(r"""^(\s*)\S""" , lines[idx] ).groups()[0]
return ""
def __lowercase ( __SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
__a = len(get_indent(__SCREAMING_SNAKE_CASE ) ) > 0
if has_indent:
__a = F'''class Bla:\n{code}'''
__a = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 , preview=__SCREAMING_SNAKE_CASE )
__a = black.format_str(__SCREAMING_SNAKE_CASE , mode=__SCREAMING_SNAKE_CASE )
__a , __a = style_docstrings_in_code(__SCREAMING_SNAKE_CASE )
return result[len("""class Bla:\n""" ) :] if has_indent else result
def __lowercase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False ) -> str:
"""simple docstring"""
with open(__SCREAMING_SNAKE_CASE , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
__a = f.readlines()
__a = []
__a = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(__SCREAMING_SNAKE_CASE ):
__a = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
__a , __a , __a = search.groups()
__a = find_code_in_diffusers(__SCREAMING_SNAKE_CASE )
__a = get_indent(__SCREAMING_SNAKE_CASE )
__a = line_index + 1 if indent == theoretical_indent else line_index + 2
__a = theoretical_indent
__a = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
__a = True
while line_index < len(__SCREAMING_SNAKE_CASE ) and should_continue:
line_index += 1
if line_index >= len(__SCREAMING_SNAKE_CASE ):
break
__a = lines[line_index]
__a = _should_continue(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and re.search(F'''^{indent}# End copy''' , __SCREAMING_SNAKE_CASE ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
__a = lines[start_index:line_index]
__a = """""".join(__SCREAMING_SNAKE_CASE )
# Remove any nested `Copied from` comments to avoid circular copies
__a = [line for line in theoretical_code.split("""\n""" ) if _re_copy_warning.search(__SCREAMING_SNAKE_CASE ) is None]
__a = """\n""".join(__SCREAMING_SNAKE_CASE )
# Before comparing, use the `replace_pattern` on the original code.
if len(__SCREAMING_SNAKE_CASE ) > 0:
__a = replace_pattern.replace("""with""" , """""" ).split(""",""" )
__a = [_re_replace_pattern.search(__SCREAMING_SNAKE_CASE ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
__a , __a , __a = pattern.groups()
__a = re.sub(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if option.strip() == "all-casing":
__a = re.sub(obja.lower() , obja.lower() , __SCREAMING_SNAKE_CASE )
__a = re.sub(obja.upper() , obja.upper() , __SCREAMING_SNAKE_CASE )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
__a = blackify(lines[start_index - 1] + theoretical_code )
__a = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
__a = lines[:start_index] + [theoretical_code] + lines[line_index:]
__a = start_index + 1
if overwrite and len(__SCREAMING_SNAKE_CASE ) > 0:
# Warn the user a file has been modified.
print(F'''Detected changes, rewriting {filename}.''' )
with open(__SCREAMING_SNAKE_CASE , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(__SCREAMING_SNAKE_CASE )
return diffs
def __lowercase ( __SCREAMING_SNAKE_CASE = False ) -> Union[str, Any]:
"""simple docstring"""
__a = glob.glob(os.path.join(__SCREAMING_SNAKE_CASE , """**/*.py""" ) , recursive=__SCREAMING_SNAKE_CASE )
__a = []
for filename in all_files:
__a = is_copy_consistent(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
diffs += [F'''- {filename}: copy does not match {d[0]} at line {d[1]}''' for d in new_diffs]
if not overwrite and len(__SCREAMING_SNAKE_CASE ) > 0:
__a = """\n""".join(__SCREAMING_SNAKE_CASE )
raise Exception(
"""Found the following copy inconsistencies:\n"""
+ diff
+ """\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.""" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
SCREAMING_SNAKE_CASE_ = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 582
| 1
|
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 84
|
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class a_ ( unittest.TestCase ):
def __init__( self : Optional[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[int]=7 , UpperCAmelCase__ : Union[str, Any]=3 , UpperCAmelCase__ : int=18 , UpperCAmelCase__ : Optional[int]=30 , UpperCAmelCase__ : Optional[int]=400 , UpperCAmelCase__ : int=True , UpperCAmelCase__ : Optional[int]=None , UpperCAmelCase__ : int=True , ):
"""simple docstring"""
snake_case : int = size if size is not None else {'''height''': 18, '''width''': 18}
snake_case : Optional[Any] = parent
snake_case : Any = batch_size
snake_case : Any = num_channels
snake_case : Union[str, Any] = image_size
snake_case : Dict = min_resolution
snake_case : Dict = max_resolution
snake_case : int = do_resize
snake_case : List[str] = size
snake_case : List[Any] = apply_ocr
def lowerCAmelCase( self : int ):
"""simple docstring"""
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class a_ ( a , unittest.TestCase ):
A__ : List[Any] = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def lowerCAmelCase( self : Dict ):
"""simple docstring"""
snake_case : Optional[Any] = LayoutLMvaImageProcessingTester(self )
@property
def lowerCAmelCase( self : Dict ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase( self : List[Any] ):
"""simple docstring"""
snake_case : List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase__ , '''do_resize''' ) )
self.assertTrue(hasattr(UpperCAmelCase__ , '''size''' ) )
self.assertTrue(hasattr(UpperCAmelCase__ , '''apply_ocr''' ) )
def lowerCAmelCase( self : Optional[int] ):
"""simple docstring"""
snake_case : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 18} )
snake_case : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
def lowerCAmelCase( self : str ):
"""simple docstring"""
pass
def lowerCAmelCase( self : Dict ):
"""simple docstring"""
# Initialize image_processing
snake_case : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , Image.Image )
# Test not batched input
snake_case : Optional[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
self.assertIsInstance(encoding.words , UpperCAmelCase__ )
self.assertIsInstance(encoding.boxes , UpperCAmelCase__ )
# Test batched
snake_case : Dict = image_processing(UpperCAmelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def lowerCAmelCase( self : Union[str, Any] ):
"""simple docstring"""
# Initialize image_processing
snake_case : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , numpify=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , np.ndarray )
# Test not batched input
snake_case : List[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
snake_case : List[Any] = image_processing(UpperCAmelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
# Initialize image_processing
snake_case : str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , torchify=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , torch.Tensor )
# Test not batched input
snake_case : List[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
snake_case : Tuple = image_processing(UpperCAmelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def lowerCAmelCase( self : Optional[Any] ):
"""simple docstring"""
# with apply_OCR = True
snake_case : int = LayoutLMvaImageProcessor()
from datasets import load_dataset
snake_case : List[Any] = load_dataset('''hf-internal-testing/fixtures_docvqa''' , split='''test''' )
snake_case : List[Any] = Image.open(ds[0]['''file'''] ).convert('''RGB''' )
snake_case : Any = image_processing(UpperCAmelCase__ , return_tensors='''pt''' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
snake_case : Optional[Any] = [['''11:14''', '''to''', '''11:39''', '''a.m''', '''11:39''', '''to''', '''11:44''', '''a.m.''', '''11:44''', '''a.m.''', '''to''', '''12:25''', '''p.m.''', '''12:25''', '''to''', '''12:58''', '''p.m.''', '''12:58''', '''to''', '''4:00''', '''p.m.''', '''2:00''', '''to''', '''5:00''', '''p.m.''', '''Coffee''', '''Break''', '''Coffee''', '''will''', '''be''', '''served''', '''for''', '''men''', '''and''', '''women''', '''in''', '''the''', '''lobby''', '''adjacent''', '''to''', '''exhibit''', '''area.''', '''Please''', '''move''', '''into''', '''exhibit''', '''area.''', '''(Exhibits''', '''Open)''', '''TRRF''', '''GENERAL''', '''SESSION''', '''(PART''', '''|)''', '''Presiding:''', '''Lee''', '''A.''', '''Waller''', '''TRRF''', '''Vice''', '''President''', '''“Introductory''', '''Remarks”''', '''Lee''', '''A.''', '''Waller,''', '''TRRF''', '''Vice''', '''Presi-''', '''dent''', '''Individual''', '''Interviews''', '''with''', '''TRRF''', '''Public''', '''Board''', '''Members''', '''and''', '''Sci-''', '''entific''', '''Advisory''', '''Council''', '''Mem-''', '''bers''', '''Conducted''', '''by''', '''TRRF''', '''Treasurer''', '''Philip''', '''G.''', '''Kuehn''', '''to''', '''get''', '''answers''', '''which''', '''the''', '''public''', '''refrigerated''', '''warehousing''', '''industry''', '''is''', '''looking''', '''for.''', '''Plus''', '''questions''', '''from''', '''the''', '''floor.''', '''Dr.''', '''Emil''', '''M.''', '''Mrak,''', '''University''', '''of''', '''Cal-''', '''ifornia,''', '''Chairman,''', '''TRRF''', '''Board;''', '''Sam''', '''R.''', '''Cecil,''', '''University''', '''of''', '''Georgia''', '''College''', '''of''', '''Agriculture;''', '''Dr.''', '''Stanley''', '''Charm,''', '''Tufts''', '''University''', '''School''', '''of''', '''Medicine;''', '''Dr.''', '''Robert''', '''H.''', '''Cotton,''', '''ITT''', '''Continental''', '''Baking''', '''Company;''', '''Dr.''', '''Owen''', '''Fennema,''', '''University''', '''of''', '''Wis-''', '''consin;''', '''Dr.''', '''Robert''', '''E.''', '''Hardenburg,''', '''USDA.''', '''Questions''', '''and''', '''Answers''', '''Exhibits''', '''Open''', '''Capt.''', '''Jack''', '''Stoney''', '''Room''', '''TRRF''', '''Scientific''', '''Advisory''', '''Council''', '''Meeting''', '''Ballroom''', '''Foyer''']] # noqa: E231
snake_case : Union[str, Any] = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , UpperCAmelCase__ )
self.assertListEqual(encoding.boxes , UpperCAmelCase__ )
# with apply_OCR = False
snake_case : str = LayoutLMvaImageProcessor(apply_ocr=UpperCAmelCase__ )
snake_case : Optional[Any] = image_processing(UpperCAmelCase__ , return_tensors='''pt''' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 84
| 1
|
import doctest
from collections import deque
import numpy as np
class _A :
def __init__( self ):
_UpperCAmelCase = [2, 1, 2, -1]
_UpperCAmelCase = [1, 2, 3, 4]
def UpperCAmelCase ( self ):
_UpperCAmelCase = len(self.first_signal )
_UpperCAmelCase = len(self.second_signal )
_UpperCAmelCase = max(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# create a zero matrix of max_length x max_length
_UpperCAmelCase = [[0] * max_length for i in range(_SCREAMING_SNAKE_CASE )]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(_SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = deque(self.second_signal )
rotated_signal.rotate(_SCREAMING_SNAKE_CASE )
for j, item in enumerate(_SCREAMING_SNAKE_CASE ):
matrix[i][j] += item
# multiply the matrix with the first signal
_UpperCAmelCase = np.matmul(np.transpose(_SCREAMING_SNAKE_CASE ) , np.transpose(self.first_signal ) )
# rounding-off to two decimal places
return [round(_SCREAMING_SNAKE_CASE , 2 ) for i in final_signal]
if __name__ == "__main__":
doctest.testmod()
| 518
|
a = 8.314_462 # Unit - J mol-1 K-1
def _SCREAMING_SNAKE_CASE ( snake_case , snake_case , snake_case ) -> float:
if moles < 0 or kelvin < 0 or volume < 0:
raise ValueError("""Invalid inputs. Enter positive value.""" )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume
def _SCREAMING_SNAKE_CASE ( snake_case , snake_case , snake_case ) -> float:
if moles < 0 or kelvin < 0 or pressure < 0:
raise ValueError("""Invalid inputs. Enter positive value.""" )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure
if __name__ == "__main__":
from doctest import testmod
testmod()
| 518
| 1
|
def _lowercase ( _SCREAMING_SNAKE_CASE : str = "The quick brown fox jumps over the lazy dog" , ) -> bool:
'''simple docstring'''
__A : Tuple = set()
# Replace all the whitespace in our sentence
__A : Optional[Any] = input_str.replace(' ' , '' )
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower() )
return len(_SCREAMING_SNAKE_CASE ) == 26
def _lowercase ( _SCREAMING_SNAKE_CASE : str = "The quick brown fox jumps over the lazy dog" , ) -> bool:
'''simple docstring'''
__A : Optional[int] = [False] * 26
for char in input_str:
if char.islower():
__A : Dict = True
elif char.isupper():
__A : Any = True
return all(_SCREAMING_SNAKE_CASE )
def _lowercase ( _SCREAMING_SNAKE_CASE : str = "The quick brown fox jumps over the lazy dog" , ) -> bool:
'''simple docstring'''
return len({char for char in input_str.lower() if char.isalpha()} ) == 26
def _lowercase ( ) -> None:
'''simple docstring'''
from timeit import timeit
__A : int = 'from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest'
print(timeit('is_pangram()' , setup=_SCREAMING_SNAKE_CASE ) )
print(timeit('is_pangram_faster()' , setup=_SCREAMING_SNAKE_CASE ) )
print(timeit('is_pangram_fastest()' , setup=_SCREAMING_SNAKE_CASE ) )
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 703
|
"""simple docstring"""
# Copyright (c) 2021-, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
####################################################################################################
#
# Note: If when running this conversion script you're getting an exception:
# ModuleNotFoundError: No module named 'megatron.model.enums'
# you need to tell python where to find the clone of Megatron-LM, e.g.:
#
# cd /tmp
# git clone https://github.com/NVIDIA/Megatron-LM
# PYTHONPATH=/tmp/Megatron-LM python src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py ...
#
# if you already have it cloned elsewhere, simply adjust the path to the existing path
#
# If the training was done using a Megatron-LM fork, e.g.,
# https://github.com/microsoft/Megatron-DeepSpeed/ then chances are that you need to have that one
# in your path, i.e., /path/to/Megatron-DeepSpeed/
#
import argparse
import os
import re
import zipfile
import torch
from transformers import AutoTokenizer, GPTaConfig
def _lowercase ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Union[str, Any]=0 ) -> Tuple:
'''simple docstring'''
if name is None:
__A : str = None
else:
__A : Any = '.' * max(0 , spaces - 2 ) + '# {:' + str(50 - spaces ) + 's}'
__A : Tuple = fmt.format(_SCREAMING_SNAKE_CASE )
# Print and recurse (if needed).
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if msg is not None:
print(_SCREAMING_SNAKE_CASE )
for k in val.keys():
recursive_print(_SCREAMING_SNAKE_CASE , val[k] , spaces + 2 )
elif isinstance(_SCREAMING_SNAKE_CASE , torch.Tensor ):
print(_SCREAMING_SNAKE_CASE , ':' , val.size() )
else:
print(_SCREAMING_SNAKE_CASE , ':' , _SCREAMING_SNAKE_CASE )
def _lowercase ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : List[Any] ) -> Optional[Any]:
'''simple docstring'''
__A : Optional[int] = param.size()
if checkpoint_version == 1.0:
# version 1.0 stores [num_heads * hidden_size * num_splits, :]
__A : Any = (num_heads, hidden_size, num_splits) + input_shape[1:]
__A : Dict = param.view(*_SCREAMING_SNAKE_CASE )
__A : Any = param.transpose(0 , 2 )
__A : Dict = param.transpose(1 , 2 ).contiguous()
elif checkpoint_version >= 2.0:
# other versions store [num_heads * num_splits * hidden_size, :]
__A : Optional[int] = (num_heads, num_splits, hidden_size) + input_shape[1:]
__A : Union[str, Any] = param.view(*_SCREAMING_SNAKE_CASE )
__A : Any = param.transpose(0 , 1 ).contiguous()
__A : Any = param.view(*_SCREAMING_SNAKE_CASE )
return param
def _lowercase ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Dict ) -> Any:
'''simple docstring'''
__A : List[str] = {}
# old versions did not store training args
__A : Union[str, Any] = input_state_dict.get('args' , _SCREAMING_SNAKE_CASE )
if ds_args is not None:
# do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint
# from pprint import pprint
# pprint(vars(ds_args))
__A : List[str] = ds_args.padded_vocab_size
__A : Any = ds_args.max_position_embeddings
__A : int = ds_args.hidden_size
__A : Dict = ds_args.num_layers
__A : int = ds_args.num_attention_heads
__A : Optional[Any] = ds_args.ffn_hidden_size
# pprint(config)
# The number of heads.
__A : List[Any] = config.n_head
# The hidden_size per head.
__A : List[Any] = config.n_embd // config.n_head
# Megatron-LM checkpoint version
if "checkpoint_version" in input_state_dict.keys():
__A : str = input_state_dict['checkpoint_version']
else:
__A : Tuple = 0.0
# The model.
__A : Optional[Any] = input_state_dict['model']
# The language model.
__A : int = model['language_model']
# The embeddings.
__A : Dict = lm['embedding']
# The word embeddings.
__A : Optional[Any] = embeddings['word_embeddings']['weight']
# Truncate the embedding table to vocab_size rows.
__A : Optional[Any] = word_embeddings[: config.vocab_size, :]
__A : Any = word_embeddings
# The position embeddings.
__A : Optional[Any] = embeddings['position_embeddings']['weight']
# Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size]
__A : List[str] = pos_embeddings.size(0 )
if n_positions != config.n_positions:
raise ValueError(
F'pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don\'t match' )
# Store the position embeddings.
__A : Optional[Any] = pos_embeddings
# The transformer.
__A : str = lm['transformer'] if 'transformer' in lm.keys() else lm['encoder']
# The regex to extract layer names.
__A : Optional[Any] = re.compile(r'layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)' )
# The simple map of names for "automated" rules.
__A : List[Any] = {
'attention.dense': '.attn.c_proj.',
'self_attention.dense': '.attn.c_proj.',
'mlp.dense_h_to_4h': '.mlp.c_fc.',
'mlp.dense_4h_to_h': '.mlp.c_proj.',
}
# Extract the layers.
for key, val in transformer.items():
# Match the name.
__A : Any = layer_re.match(_SCREAMING_SNAKE_CASE )
# Stop if that's not a layer
if m is None:
break
# The index of the layer.
__A : Optional[Any] = int(m.group(1 ) )
# The name of the operation.
__A : Optional[Any] = m.group(2 )
# Is it a weight or a bias?
__A : List[Any] = m.group(3 )
# The name of the layer.
__A : int = F'transformer.h.{layer_idx}'
# For layernorm(s), simply store the layer norm.
if op_name.endswith('layernorm' ):
__A : List[Any] = 'ln_1' if op_name.startswith('input' ) else 'ln_2'
__A : List[str] = val
# Transpose the QKV matrix.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "weight":
# Insert a tensor of 1x1xDxD bias.
__A : int = torch.tril(torch.ones((n_positions, n_positions) , dtype=torch.floataa ) ).view(
1 , 1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__A : Dict = causal_mask
# Insert a "dummy" tensor for masked_bias.
__A : str = torch.tensor(-1E4 , dtype=torch.floataa )
__A : List[str] = masked_bias
__A : Optional[Any] = fix_query_key_value_ordering(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 3 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D.
__A : List[Any] = out_val.transpose(0 , 1 ).contiguous()
# Store.
__A : Optional[Any] = out_val
# Transpose the bias.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "bias":
__A : List[Any] = fix_query_key_value_ordering(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 3 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Store. No change of shape.
__A : List[str] = out_val
# Transpose the weights.
elif weight_or_bias == "weight":
__A : str = megatron_to_transformers[op_name]
__A : List[Any] = val.transpose(0 , 1 )
# Copy the bias.
elif weight_or_bias == "bias":
__A : List[Any] = megatron_to_transformers[op_name]
__A : List[str] = val
# DEBUG.
assert config.n_layer == layer_idx + 1
# The final layernorm.
__A : Tuple = transformer['final_layernorm.weight']
__A : List[Any] = transformer['final_layernorm.bias']
# For LM head, transformers' wants the matrix to weight embeddings.
__A : Any = word_embeddings
# It should be done!
return output_state_dict
def _lowercase ( ) -> str:
'''simple docstring'''
__A : Optional[Any] = argparse.ArgumentParser()
parser.add_argument('--print-checkpoint-structure' , action='store_true' )
parser.add_argument(
'path_to_checkpoint' , type=_SCREAMING_SNAKE_CASE , help='Path to the checkpoint file (.zip archive or direct .pt file)' , )
parser.add_argument(
'--config_file' , default='' , type=_SCREAMING_SNAKE_CASE , help='An optional config json file describing the pre-trained model.' , )
__A : str = parser.parse_args()
# Extract the basename.
__A : Any = os.path.dirname(args.path_to_checkpoint )
# Load the model.
# the .zip is very optional, let's keep it for backward compatibility
print(F'Extracting PyTorch state dictionary from {args.path_to_checkpoint}' )
if args.path_to_checkpoint.endswith('.zip' ):
with zipfile.ZipFile(args.path_to_checkpoint , 'r' ) as checkpoint:
with checkpoint.open('release/mp_rank_00/model_optim_rng.pt' ) as pytorch_dict:
__A : Tuple = torch.load(_SCREAMING_SNAKE_CASE , map_location='cpu' )
else:
__A : Optional[int] = torch.load(args.path_to_checkpoint , map_location='cpu' )
__A : Optional[Any] = input_state_dict.get('args' , _SCREAMING_SNAKE_CASE )
# Read the config, or default to the model released by NVIDIA.
if args.config_file == "":
if ds_args is not None:
if ds_args.bias_gelu_fusion:
__A : Optional[Any] = 'gelu_fast'
elif ds_args.openai_gelu:
__A : List[Any] = 'gelu_new'
else:
__A : str = 'gelu'
else:
# in the very early days this used to be "gelu_new"
__A : int = 'gelu_new'
# Spell out all parameters in case the defaults change.
__A : Dict = GPTaConfig(
vocab_size=50257 , n_positions=1024 , n_embd=1024 , n_layer=24 , n_head=16 , n_inner=4096 , activation_function=_SCREAMING_SNAKE_CASE , resid_pdrop=0.1 , embd_pdrop=0.1 , attn_pdrop=0.1 , layer_norm_epsilon=1E-5 , initializer_range=0.02 , summary_type='cls_index' , summary_use_proj=_SCREAMING_SNAKE_CASE , summary_activation=_SCREAMING_SNAKE_CASE , summary_proj_to_labels=_SCREAMING_SNAKE_CASE , summary_first_dropout=0.1 , scale_attn_weights=_SCREAMING_SNAKE_CASE , use_cache=_SCREAMING_SNAKE_CASE , bos_token_id=50256 , eos_token_id=50256 , )
else:
__A : Optional[Any] = GPTaConfig.from_json_file(args.config_file )
__A : Union[str, Any] = ['GPT2LMHeadModel']
# Convert.
print('Converting' )
__A : Optional[int] = convert_megatron_checkpoint(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Print the structure of converted state dict.
if args.print_checkpoint_structure:
recursive_print(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Add tokenizer class info to config
# see https://github.com/huggingface/transformers/issues/13906)
if ds_args is not None:
__A : Optional[Any] = ds_args.tokenizer_type
if tokenizer_type == "GPT2BPETokenizer":
__A : Dict = 'gpt2'
elif tokenizer_type == "PretrainedFromHF":
__A : Union[str, Any] = ds_args.tokenizer_name_or_path
else:
raise ValueError(F'Unrecognized tokenizer_type {tokenizer_type}' )
else:
__A : str = 'gpt2'
__A : Dict = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
__A : str = type(_SCREAMING_SNAKE_CASE ).__name__
__A : Dict = tokenizer_class
# Store the config to file.
print('Saving config' )
config.save_pretrained(_SCREAMING_SNAKE_CASE )
# Save tokenizer based on args
print(F'Adding {tokenizer_class} tokenizer files' )
tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE )
# Store the state_dict to file.
__A : str = os.path.join(_SCREAMING_SNAKE_CASE , 'pytorch_model.bin' )
print(F'Saving checkpoint to "{output_checkpoint_file}"' )
torch.save(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
####################################################################################################
if __name__ == "__main__":
main()
####################################################################################################
| 237
| 0
|
"""simple docstring"""
def _UpperCamelCase ( UpperCamelCase ) -> str:
"""simple docstring"""
__UpperCAmelCase : int = []
__UpperCAmelCase : Optional[int] = []
__UpperCAmelCase : Union[str, Any] = {
"^": 3,
"*": 2,
"/": 2,
"%": 2,
"+": 1,
"-": 1,
} # Priority of each operator
__UpperCAmelCase : List[Any] = len(UpperCamelCase ) if (len(UpperCamelCase ) > 7) else 7
# Print table header for output
print(
"Symbol".center(8 ) , "Stack".center(UpperCamelCase ) , "Postfix".center(UpperCamelCase ) , sep=" | " , )
print("-" * (print_width * 3 + 7) )
for x in infix:
if x.isalpha() or x.isdigit():
post_fix.append(UpperCamelCase ) # if x is Alphabet / Digit, add it to Postfix
elif x == "(":
stack.append(UpperCamelCase ) # if x is "(" push to Stack
elif x == ")": # if x is ")" pop stack until "(" is encountered
while stack[-1] != "(":
post_fix.append(stack.pop() ) # Pop stack & add the content to Postfix
stack.pop()
else:
if len(UpperCamelCase ) == 0:
stack.append(UpperCamelCase ) # If stack is empty, push x to stack
else: # while priority of x is not > priority of element in the stack
while len(UpperCamelCase ) > 0 and priority[x] <= priority[stack[-1]]:
post_fix.append(stack.pop() ) # pop stack & add to Postfix
stack.append(UpperCamelCase ) # push x to stack
print(
x.center(8 ) , ("".join(UpperCamelCase )).ljust(UpperCamelCase ) , ("".join(UpperCamelCase )).ljust(UpperCamelCase ) , sep=" | " , ) # Output in tabular format
while len(UpperCamelCase ) > 0: # while stack is not empty
post_fix.append(stack.pop() ) # pop stack & add to Postfix
print(
" ".center(8 ) , ("".join(UpperCamelCase )).ljust(UpperCamelCase ) , ("".join(UpperCamelCase )).ljust(UpperCamelCase ) , sep=" | " , ) # Output in tabular format
return "".join(UpperCamelCase ) # return Postfix as str
def _UpperCamelCase ( UpperCamelCase ) -> Optional[Any]:
"""simple docstring"""
__UpperCAmelCase : Any = list(infix[::-1] ) # reverse the infix equation
for i in range(len(UpperCamelCase ) ):
if infix[i] == "(":
__UpperCAmelCase : Optional[int] = ")" # change "(" to ")"
elif infix[i] == ")":
__UpperCAmelCase : int = "(" # change ")" to "("
return (infix_2_postfix("".join(UpperCamelCase ) ))[
::-1
] # call infix_2_postfix on Infix, return reverse of Postfix
if __name__ == "__main__":
A = input("""\nEnter an Infix Equation = """) # Input an Infix equation
A = """""".join(Infix.split()) # Remove spaces from the input
print("""\n\t""", Infix, """(Infix) -> """, infix_2_prefix(Infix), """(Prefix)""")
| 77
|
"""simple docstring"""
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class a__ ( unittest.TestCase ):
def __init__( self : List[Any] , UpperCamelCase_ : Any , UpperCamelCase_ : Tuple=3 , UpperCamelCase_ : Optional[int]=32 , UpperCamelCase_ : Dict=3 , UpperCamelCase_ : List[str]=10 , UpperCamelCase_ : str=[10, 20, 30, 40] , UpperCamelCase_ : Tuple=[1, 1, 2, 1] , UpperCamelCase_ : str=True , UpperCamelCase_ : Optional[int]=True , UpperCamelCase_ : Dict="relu" , UpperCamelCase_ : str=3 , UpperCamelCase_ : int=None , ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = parent
__UpperCAmelCase : List[str] = batch_size
__UpperCAmelCase : List[str] = image_size
__UpperCAmelCase : Tuple = num_channels
__UpperCAmelCase : Union[str, Any] = embeddings_size
__UpperCAmelCase : Dict = hidden_sizes
__UpperCAmelCase : Dict = depths
__UpperCAmelCase : Tuple = is_training
__UpperCAmelCase : List[Any] = use_labels
__UpperCAmelCase : Optional[int] = hidden_act
__UpperCAmelCase : str = num_labels
__UpperCAmelCase : Optional[int] = scope
__UpperCAmelCase : Dict = len(UpperCamelCase_)
def a_ ( self : Any):
"""simple docstring"""
__UpperCAmelCase : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
__UpperCAmelCase : Dict = self.get_config()
return config, pixel_values
def a_ ( self : Dict):
"""simple docstring"""
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def a_ ( self : Any , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Union[str, Any]):
"""simple docstring"""
__UpperCAmelCase : List[str] = FlaxRegNetModel(config=UpperCamelCase_)
__UpperCAmelCase : Dict = model(UpperCamelCase_)
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def a_ ( self : Any , UpperCamelCase_ : Dict , UpperCamelCase_ : Optional[int]):
"""simple docstring"""
__UpperCAmelCase : List[Any] = self.num_labels
__UpperCAmelCase : Tuple = FlaxRegNetForImageClassification(config=UpperCamelCase_)
__UpperCAmelCase : str = model(UpperCamelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def a_ ( self : Optional[Any]):
"""simple docstring"""
__UpperCAmelCase : Any = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase : Tuple = config_and_inputs
__UpperCAmelCase : Optional[Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_flax
class a__ ( __magic_name__ , unittest.TestCase ):
lowercase_ = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
lowercase_ = False
lowercase_ = False
lowercase_ = False
def a_ ( self : Tuple):
"""simple docstring"""
__UpperCAmelCase : Tuple = FlaxRegNetModelTester(self)
__UpperCAmelCase : str = ConfigTester(self , config_class=UpperCamelCase_ , has_text_modality=UpperCamelCase_)
def a_ ( self : Dict):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def a_ ( self : Tuple):
"""simple docstring"""
return
def a_ ( self : Optional[Any]):
"""simple docstring"""
__UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase_)
def a_ ( self : Union[str, Any]):
"""simple docstring"""
__UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase_)
@unittest.skip(reason="RegNet does not use inputs_embeds")
def a_ ( self : Union[str, Any]):
"""simple docstring"""
pass
@unittest.skip(reason="RegNet does not support input and output embeddings")
def a_ ( self : Optional[int]):
"""simple docstring"""
pass
def a_ ( self : str):
"""simple docstring"""
__UpperCAmelCase , __UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : int = model_class(UpperCamelCase_)
__UpperCAmelCase : Optional[int] = inspect.signature(model.__call__)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCAmelCase : Any = [*signature.parameters.keys()]
__UpperCAmelCase : Dict = ["pixel_values"]
self.assertListEqual(arg_names[:1] , UpperCamelCase_)
def a_ ( self : int):
"""simple docstring"""
def check_hidden_states_output(UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Tuple , UpperCamelCase_ : Union[str, Any]):
__UpperCAmelCase : Union[str, Any] = model_class(UpperCamelCase_)
__UpperCAmelCase : Optional[Any] = model(**self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_))
__UpperCAmelCase : List[str] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__UpperCAmelCase : str = self.model_tester.num_stages
self.assertEqual(len(UpperCamelCase_) , expected_num_stages + 1)
__UpperCAmelCase , __UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : List[str] = True
check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCAmelCase : Optional[int] = True
check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_)
def a_ ( self : Tuple):
"""simple docstring"""
__UpperCAmelCase , __UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
__UpperCAmelCase : List[Any] = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_)
__UpperCAmelCase : Optional[int] = model_class(UpperCamelCase_)
@jax.jit
def model_jitted(UpperCamelCase_ : int , **UpperCamelCase_ : Optional[int]):
return model(pixel_values=UpperCamelCase_ , **UpperCamelCase_)
with self.subTest("JIT Enabled"):
__UpperCAmelCase : Optional[Any] = model_jitted(**UpperCamelCase_).to_tuple()
with self.subTest("JIT Disabled"):
with jax.disable_jit():
__UpperCAmelCase : Dict = model_jitted(**UpperCamelCase_).to_tuple()
self.assertEqual(len(UpperCamelCase_) , len(UpperCamelCase_))
for jitted_output, output in zip(UpperCamelCase_ , UpperCamelCase_):
self.assertEqual(jitted_output.shape , output.shape)
def _UpperCamelCase ( ) -> Any:
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_flax
class a__ ( unittest.TestCase ):
@cached_property
def a_ ( self : Optional[int]):
"""simple docstring"""
return AutoImageProcessor.from_pretrained("facebook/regnet-y-040") if is_vision_available() else None
@slow
def a_ ( self : int):
"""simple docstring"""
__UpperCAmelCase : Any = FlaxRegNetForImageClassification.from_pretrained("facebook/regnet-y-040")
__UpperCAmelCase : Dict = self.default_image_processor
__UpperCAmelCase : str = prepare_img()
__UpperCAmelCase : int = image_processor(images=UpperCamelCase_ , return_tensors="np")
__UpperCAmelCase : Dict = model(**UpperCamelCase_)
# verify the logits
__UpperCAmelCase : Dict = (1, 1000)
self.assertEqual(outputs.logits.shape , UpperCamelCase_)
__UpperCAmelCase : Any = jnp.array([-0.4180, -1.5051, -3.4836])
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , UpperCamelCase_ , atol=1e-4))
| 77
| 1
|
import unittest
from diffusers.models.unet_ad_blocks import * # noqa F403
from diffusers.utils import torch_device
from .test_unet_blocks_common import UNetBlockTesterMixin
class __SCREAMING_SNAKE_CASE ( _a , unittest.TestCase ):
snake_case : Optional[int] = DownBlockaD # noqa F405
snake_case : Tuple = """down"""
def _lowerCamelCase ( self ):
UpperCamelCase__ = [-0.0232, -0.9869, 0.8054, -0.0637, -0.1688, -1.4264, 0.4470, -1.3394, 0.0904]
super().test_output(__lowerCAmelCase )
class __SCREAMING_SNAKE_CASE ( _a , unittest.TestCase ):
snake_case : Dict = ResnetDownsampleBlockaD # noqa F405
snake_case : str = """down"""
def _lowerCamelCase ( self ):
UpperCamelCase__ = [0.0710, 0.2410, -0.7320, -1.0757, -1.1343, 0.3540, -0.0133, -0.2576, 0.0948]
super().test_output(__lowerCAmelCase )
class __SCREAMING_SNAKE_CASE ( _a , unittest.TestCase ):
snake_case : int = AttnDownBlockaD # noqa F405
snake_case : int = """down"""
def _lowerCamelCase ( self ):
UpperCamelCase__ = [0.0636, 0.8964, -0.6234, -1.0131, 0.0844, 0.4935, 0.3437, 0.0911, -0.2957]
super().test_output(__lowerCAmelCase )
class __SCREAMING_SNAKE_CASE ( _a , unittest.TestCase ):
snake_case : Tuple = CrossAttnDownBlockaD # noqa F405
snake_case : Optional[int] = """down"""
def _lowerCamelCase ( self ):
UpperCamelCase__ , UpperCamelCase__ = super().prepare_init_args_and_inputs_for_common()
UpperCamelCase__ = 32
return init_dict, inputs_dict
def _lowerCamelCase ( self ):
UpperCamelCase__ = [0.2238, -0.7396, -0.2255, -0.3829, 0.1925, 1.1665, 0.0603, -0.7295, 0.1983]
super().test_output(__lowerCAmelCase )
class __SCREAMING_SNAKE_CASE ( _a , unittest.TestCase ):
snake_case : Dict = SimpleCrossAttnDownBlockaD # noqa F405
snake_case : List[Any] = """down"""
@property
def _lowerCamelCase ( self ):
return super().get_dummy_input(include_encoder_hidden_states=__lowerCAmelCase )
def _lowerCamelCase ( self ):
UpperCamelCase__ , UpperCamelCase__ = super().prepare_init_args_and_inputs_for_common()
UpperCamelCase__ = 32
return init_dict, inputs_dict
@unittest.skipIf(torch_device == """mps""" , """MPS result is not consistent""" )
def _lowerCamelCase ( self ):
UpperCamelCase__ = [0.7921, -0.0992, -0.1962, -0.7695, -0.4242, 0.7804, 0.4737, 0.2765, 0.3338]
super().test_output(__lowerCAmelCase )
class __SCREAMING_SNAKE_CASE ( _a , unittest.TestCase ):
snake_case : Dict = SkipDownBlockaD # noqa F405
snake_case : str = """down"""
@property
def _lowerCamelCase ( self ):
return super().get_dummy_input(include_skip_sample=__lowerCAmelCase )
def _lowerCamelCase ( self ):
UpperCamelCase__ = [-0.0845, -0.2087, -0.2465, 0.0971, 0.1900, -0.0484, 0.2664, 0.4179, 0.5069]
super().test_output(__lowerCAmelCase )
class __SCREAMING_SNAKE_CASE ( _a , unittest.TestCase ):
snake_case : List[str] = AttnSkipDownBlockaD # noqa F405
snake_case : Any = """down"""
@property
def _lowerCamelCase ( self ):
return super().get_dummy_input(include_skip_sample=__lowerCAmelCase )
def _lowerCamelCase ( self ):
UpperCamelCase__ = [0.5539, 0.1609, 0.4924, 0.0537, -0.1995, 0.4050, 0.0979, -0.2721, -0.0642]
super().test_output(__lowerCAmelCase )
class __SCREAMING_SNAKE_CASE ( _a , unittest.TestCase ):
snake_case : Union[str, Any] = DownEncoderBlockaD # noqa F405
snake_case : str = """down"""
@property
def _lowerCamelCase ( self ):
return super().get_dummy_input(include_temb=__lowerCAmelCase )
def _lowerCamelCase ( self ):
UpperCamelCase__ = {
"""in_channels""": 32,
"""out_channels""": 32,
}
UpperCamelCase__ = self.dummy_input
return init_dict, inputs_dict
def _lowerCamelCase ( self ):
UpperCamelCase__ = [1.1102, 0.5302, 0.4872, -0.0023, -0.8042, 0.0483, -0.3489, -0.5632, 0.7626]
super().test_output(__lowerCAmelCase )
class __SCREAMING_SNAKE_CASE ( _a , unittest.TestCase ):
snake_case : str = AttnDownEncoderBlockaD # noqa F405
snake_case : Optional[int] = """down"""
@property
def _lowerCamelCase ( self ):
return super().get_dummy_input(include_temb=__lowerCAmelCase )
def _lowerCamelCase ( self ):
UpperCamelCase__ = {
"""in_channels""": 32,
"""out_channels""": 32,
}
UpperCamelCase__ = self.dummy_input
return init_dict, inputs_dict
def _lowerCamelCase ( self ):
UpperCamelCase__ = [0.8966, -0.1486, 0.8568, 0.8141, -0.9046, -0.1342, -0.0972, -0.7417, 0.1538]
super().test_output(__lowerCAmelCase )
class __SCREAMING_SNAKE_CASE ( _a , unittest.TestCase ):
snake_case : Any = UNetMidBlockaD # noqa F405
snake_case : Dict = """mid"""
def _lowerCamelCase ( self ):
UpperCamelCase__ = {
"""in_channels""": 32,
"""temb_channels""": 128,
}
UpperCamelCase__ = self.dummy_input
return init_dict, inputs_dict
def _lowerCamelCase ( self ):
UpperCamelCase__ = [-0.1062, 1.7248, 0.3494, 1.4569, -0.0910, -1.2421, -0.9984, 0.6736, 1.0028]
super().test_output(__lowerCAmelCase )
class __SCREAMING_SNAKE_CASE ( _a , unittest.TestCase ):
snake_case : Dict = UNetMidBlockaDCrossAttn # noqa F405
snake_case : List[str] = """mid"""
def _lowerCamelCase ( self ):
UpperCamelCase__ , UpperCamelCase__ = super().prepare_init_args_and_inputs_for_common()
UpperCamelCase__ = 32
return init_dict, inputs_dict
def _lowerCamelCase ( self ):
UpperCamelCase__ = [0.0187, 2.4220, 0.4484, 1.1203, -0.6121, -1.5122, -0.8270, 0.7851, 1.8335]
super().test_output(__lowerCAmelCase )
class __SCREAMING_SNAKE_CASE ( _a , unittest.TestCase ):
snake_case : Dict = UNetMidBlockaDSimpleCrossAttn # noqa F405
snake_case : List[Any] = """mid"""
@property
def _lowerCamelCase ( self ):
return super().get_dummy_input(include_encoder_hidden_states=__lowerCAmelCase )
def _lowerCamelCase ( self ):
UpperCamelCase__ , UpperCamelCase__ = super().prepare_init_args_and_inputs_for_common()
UpperCamelCase__ = 32
return init_dict, inputs_dict
def _lowerCamelCase ( self ):
UpperCamelCase__ = [0.7143, 1.9974, 0.5448, 1.3977, 0.1282, -1.1237, -1.4238, 0.5530, 0.8880]
super().test_output(__lowerCAmelCase )
class __SCREAMING_SNAKE_CASE ( _a , unittest.TestCase ):
snake_case : Optional[int] = UpBlockaD # noqa F405
snake_case : int = """up"""
@property
def _lowerCamelCase ( self ):
return super().get_dummy_input(include_res_hidden_states_tuple=__lowerCAmelCase )
def _lowerCamelCase ( self ):
UpperCamelCase__ = [-0.2041, -0.4165, -0.3022, 0.0041, -0.6628, -0.7053, 0.1928, -0.0325, 0.0523]
super().test_output(__lowerCAmelCase )
class __SCREAMING_SNAKE_CASE ( _a , unittest.TestCase ):
snake_case : Optional[Any] = ResnetUpsampleBlockaD # noqa F405
snake_case : List[Any] = """up"""
@property
def _lowerCamelCase ( self ):
return super().get_dummy_input(include_res_hidden_states_tuple=__lowerCAmelCase )
def _lowerCamelCase ( self ):
UpperCamelCase__ = [0.2287, 0.3549, -0.1346, 0.4797, -0.1715, -0.9649, 0.7305, -0.5864, -0.6244]
super().test_output(__lowerCAmelCase )
class __SCREAMING_SNAKE_CASE ( _a , unittest.TestCase ):
snake_case : Union[str, Any] = CrossAttnUpBlockaD # noqa F405
snake_case : int = """up"""
@property
def _lowerCamelCase ( self ):
return super().get_dummy_input(include_res_hidden_states_tuple=__lowerCAmelCase )
def _lowerCamelCase ( self ):
UpperCamelCase__ , UpperCamelCase__ = super().prepare_init_args_and_inputs_for_common()
UpperCamelCase__ = 32
return init_dict, inputs_dict
def _lowerCamelCase ( self ):
UpperCamelCase__ = [-0.1403, -0.3515, -0.0420, -0.1425, 0.3167, 0.5094, -0.2181, 0.5931, 0.5582]
super().test_output(__lowerCAmelCase )
class __SCREAMING_SNAKE_CASE ( _a , unittest.TestCase ):
snake_case : str = SimpleCrossAttnUpBlockaD # noqa F405
snake_case : Optional[int] = """up"""
@property
def _lowerCamelCase ( self ):
return super().get_dummy_input(include_res_hidden_states_tuple=__lowerCAmelCase , include_encoder_hidden_states=__lowerCAmelCase )
def _lowerCamelCase ( self ):
UpperCamelCase__ , UpperCamelCase__ = super().prepare_init_args_and_inputs_for_common()
UpperCamelCase__ = 32
return init_dict, inputs_dict
def _lowerCamelCase ( self ):
UpperCamelCase__ = [0.2645, 0.1480, 0.0909, 0.8044, -0.9758, -0.9083, 0.0994, -1.1453, -0.7402]
super().test_output(__lowerCAmelCase )
class __SCREAMING_SNAKE_CASE ( _a , unittest.TestCase ):
snake_case : Dict = AttnUpBlockaD # noqa F405
snake_case : Optional[Any] = """up"""
@property
def _lowerCamelCase ( self ):
return super().get_dummy_input(include_res_hidden_states_tuple=__lowerCAmelCase )
@unittest.skipIf(torch_device == """mps""" , """MPS result is not consistent""" )
def _lowerCamelCase ( self ):
UpperCamelCase__ = [0.0979, 0.1326, 0.0021, 0.0659, 0.2249, 0.0059, 0.1132, 0.5952, 0.1033]
super().test_output(__lowerCAmelCase )
class __SCREAMING_SNAKE_CASE ( _a , unittest.TestCase ):
snake_case : List[str] = SkipUpBlockaD # noqa F405
snake_case : Optional[Any] = """up"""
@property
def _lowerCamelCase ( self ):
return super().get_dummy_input(include_res_hidden_states_tuple=__lowerCAmelCase )
def _lowerCamelCase ( self ):
UpperCamelCase__ = [-0.0893, -0.1234, -0.1506, -0.0332, 0.0123, -0.0211, 0.0566, 0.0143, 0.0362]
super().test_output(__lowerCAmelCase )
class __SCREAMING_SNAKE_CASE ( _a , unittest.TestCase ):
snake_case : str = AttnSkipUpBlockaD # noqa F405
snake_case : List[Any] = """up"""
@property
def _lowerCamelCase ( self ):
return super().get_dummy_input(include_res_hidden_states_tuple=__lowerCAmelCase )
def _lowerCamelCase ( self ):
UpperCamelCase__ = [0.0361, 0.0617, 0.2787, -0.0350, 0.0342, 0.3421, -0.0843, 0.0913, 0.3015]
super().test_output(__lowerCAmelCase )
class __SCREAMING_SNAKE_CASE ( _a , unittest.TestCase ):
snake_case : str = UpDecoderBlockaD # noqa F405
snake_case : Tuple = """up"""
@property
def _lowerCamelCase ( self ):
return super().get_dummy_input(include_temb=__lowerCAmelCase )
def _lowerCamelCase ( self ):
UpperCamelCase__ = {"""in_channels""": 32, """out_channels""": 32}
UpperCamelCase__ = self.dummy_input
return init_dict, inputs_dict
def _lowerCamelCase ( self ):
UpperCamelCase__ = [0.4404, 0.1998, -0.9886, -0.3320, -0.3128, -0.7034, -0.6955, -0.2338, -0.3137]
super().test_output(__lowerCAmelCase )
class __SCREAMING_SNAKE_CASE ( _a , unittest.TestCase ):
snake_case : Optional[int] = AttnUpDecoderBlockaD # noqa F405
snake_case : List[Any] = """up"""
@property
def _lowerCamelCase ( self ):
return super().get_dummy_input(include_temb=__lowerCAmelCase )
def _lowerCamelCase ( self ):
UpperCamelCase__ = {"""in_channels""": 32, """out_channels""": 32}
UpperCamelCase__ = self.dummy_input
return init_dict, inputs_dict
def _lowerCamelCase ( self ):
UpperCamelCase__ = [0.6738, 0.4491, 0.1055, 1.0710, 0.7316, 0.3339, 0.3352, 0.1023, 0.3568]
super().test_output(__lowerCAmelCase )
| 700
|
import unittest
from transformers import (
MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TextGenerationPipeline,
logging,
pipeline,
)
from transformers.testing_utils import (
CaptureLogger,
is_pipeline_test,
require_accelerate,
require_tf,
require_torch,
require_torch_gpu,
require_torch_or_tf,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
snake_case : Union[str, Any] = MODEL_FOR_CAUSAL_LM_MAPPING
snake_case : List[str] = TF_MODEL_FOR_CAUSAL_LM_MAPPING
@require_torch
def _lowerCamelCase ( self ):
UpperCamelCase__ = pipeline(task="""text-generation""" , model="""sshleifer/tiny-ctrl""" , framework="""pt""" )
# Using `do_sample=False` to force deterministic output
UpperCamelCase__ = text_generator("""This is a test""" , do_sample=__lowerCAmelCase )
self.assertEqual(
__lowerCAmelCase , [
{
"""generated_text""": (
"""This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope."""
""" oscope. FiliFili@@"""
)
}
] , )
UpperCamelCase__ = text_generator(["""This is a test""", """This is a second test"""] )
self.assertEqual(
__lowerCAmelCase , [
[
{
"""generated_text""": (
"""This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope."""
""" oscope. FiliFili@@"""
)
}
],
[
{
"""generated_text""": (
"""This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy"""
""" oscope. oscope. FiliFili@@"""
)
}
],
] , )
UpperCamelCase__ = text_generator("""This is a test""" , do_sample=__lowerCAmelCase , num_return_sequences=2 , return_tensors=__lowerCAmelCase )
self.assertEqual(
__lowerCAmelCase , [
{"""generated_token_ids""": ANY(__lowerCAmelCase )},
{"""generated_token_ids""": ANY(__lowerCAmelCase )},
] , )
UpperCamelCase__ = text_generator.model.config.eos_token_id
UpperCamelCase__ = """<pad>"""
UpperCamelCase__ = text_generator(
["""This is a test""", """This is a second test"""] , do_sample=__lowerCAmelCase , num_return_sequences=2 , batch_size=2 , return_tensors=__lowerCAmelCase , )
self.assertEqual(
__lowerCAmelCase , [
[
{"""generated_token_ids""": ANY(__lowerCAmelCase )},
{"""generated_token_ids""": ANY(__lowerCAmelCase )},
],
[
{"""generated_token_ids""": ANY(__lowerCAmelCase )},
{"""generated_token_ids""": ANY(__lowerCAmelCase )},
],
] , )
@require_tf
def _lowerCamelCase ( self ):
UpperCamelCase__ = pipeline(task="""text-generation""" , model="""sshleifer/tiny-ctrl""" , framework="""tf""" )
# Using `do_sample=False` to force deterministic output
UpperCamelCase__ = text_generator("""This is a test""" , do_sample=__lowerCAmelCase )
self.assertEqual(
__lowerCAmelCase , [
{
"""generated_text""": (
"""This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵"""
""" please,"""
)
}
] , )
UpperCamelCase__ = text_generator(["""This is a test""", """This is a second test"""] , do_sample=__lowerCAmelCase )
self.assertEqual(
__lowerCAmelCase , [
[
{
"""generated_text""": (
"""This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵"""
""" please,"""
)
}
],
[
{
"""generated_text""": (
"""This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes"""
""" Cannes 閲閲Cannes Cannes Cannes 攵 please,"""
)
}
],
] , )
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase__ = TextGenerationPipeline(model=__lowerCAmelCase , tokenizer=__lowerCAmelCase )
return text_generator, ["This is a test", "Another test"]
def _lowerCamelCase ( self ):
UpperCamelCase__ = """Hello I believe in"""
UpperCamelCase__ = pipeline("""text-generation""" , model="""hf-internal-testing/tiny-random-gpt2""" )
UpperCamelCase__ = text_generator(__lowerCAmelCase )
self.assertEqual(
__lowerCAmelCase , [{"""generated_text""": """Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe"""}] , )
UpperCamelCase__ = text_generator(__lowerCAmelCase , stop_sequence=""" fe""" )
self.assertEqual(__lowerCAmelCase , [{"""generated_text""": """Hello I believe in fe"""}] )
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase__ = text_generator.model
UpperCamelCase__ = text_generator.tokenizer
UpperCamelCase__ = text_generator("""This is a test""" )
self.assertEqual(__lowerCAmelCase , [{"""generated_text""": ANY(__lowerCAmelCase )}] )
self.assertTrue(outputs[0]["""generated_text"""].startswith("""This is a test""" ) )
UpperCamelCase__ = text_generator("""This is a test""" , return_full_text=__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , [{"""generated_text""": ANY(__lowerCAmelCase )}] )
self.assertNotIn("""This is a test""" , outputs[0]["""generated_text"""] )
UpperCamelCase__ = pipeline(task="""text-generation""" , model=__lowerCAmelCase , tokenizer=__lowerCAmelCase , return_full_text=__lowerCAmelCase )
UpperCamelCase__ = text_generator("""This is a test""" )
self.assertEqual(__lowerCAmelCase , [{"""generated_text""": ANY(__lowerCAmelCase )}] )
self.assertNotIn("""This is a test""" , outputs[0]["""generated_text"""] )
UpperCamelCase__ = text_generator("""This is a test""" , return_full_text=__lowerCAmelCase )
self.assertEqual(__lowerCAmelCase , [{"""generated_text""": ANY(__lowerCAmelCase )}] )
self.assertTrue(outputs[0]["""generated_text"""].startswith("""This is a test""" ) )
UpperCamelCase__ = text_generator(["""This is great !""", """Something else"""] , num_return_sequences=2 , do_sample=__lowerCAmelCase )
self.assertEqual(
__lowerCAmelCase , [
[{"""generated_text""": ANY(__lowerCAmelCase )}, {"""generated_text""": ANY(__lowerCAmelCase )}],
[{"""generated_text""": ANY(__lowerCAmelCase )}, {"""generated_text""": ANY(__lowerCAmelCase )}],
] , )
if text_generator.tokenizer.pad_token is not None:
UpperCamelCase__ = text_generator(
["""This is great !""", """Something else"""] , num_return_sequences=2 , batch_size=2 , do_sample=__lowerCAmelCase )
self.assertEqual(
__lowerCAmelCase , [
[{"""generated_text""": ANY(__lowerCAmelCase )}, {"""generated_text""": ANY(__lowerCAmelCase )}],
[{"""generated_text""": ANY(__lowerCAmelCase )}, {"""generated_text""": ANY(__lowerCAmelCase )}],
] , )
with self.assertRaises(__lowerCAmelCase ):
UpperCamelCase__ = text_generator("""test""" , return_full_text=__lowerCAmelCase , return_text=__lowerCAmelCase )
with self.assertRaises(__lowerCAmelCase ):
UpperCamelCase__ = text_generator("""test""" , return_full_text=__lowerCAmelCase , return_tensors=__lowerCAmelCase )
with self.assertRaises(__lowerCAmelCase ):
UpperCamelCase__ = text_generator("""test""" , return_text=__lowerCAmelCase , return_tensors=__lowerCAmelCase )
# Empty prompt is slighly special
# it requires BOS token to exist.
# Special case for Pegasus which will always append EOS so will
# work even without BOS.
if (
text_generator.tokenizer.bos_token_id is not None
or "Pegasus" in tokenizer.__class__.__name__
or "Git" in model.__class__.__name__
):
UpperCamelCase__ = text_generator("""""" )
self.assertEqual(__lowerCAmelCase , [{"""generated_text""": ANY(__lowerCAmelCase )}] )
else:
with self.assertRaises((ValueError, AssertionError) ):
UpperCamelCase__ = text_generator("""""" )
if text_generator.framework == "tf":
# TF generation does not support max_new_tokens, and it's impossible
# to control long generation with only max_length without
# fancy calculation, dismissing tests for now.
return
# We don't care about infinite range models.
# They already work.
# Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly.
UpperCamelCase__ = ["""RwkvForCausalLM""", """XGLMForCausalLM""", """GPTNeoXForCausalLM"""]
if (
tokenizer.model_max_length < 10000
and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS
):
# Handling of large generations
with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ):
text_generator("""This is a test""" * 500 , max_new_tokens=20 )
UpperCamelCase__ = text_generator("""This is a test""" * 500 , handle_long_generation="""hole""" , max_new_tokens=20 )
# Hole strategy cannot work
with self.assertRaises(__lowerCAmelCase ):
text_generator(
"""This is a test""" * 500 , handle_long_generation="""hole""" , max_new_tokens=tokenizer.model_max_length + 10 , )
@require_torch
@require_accelerate
@require_torch_gpu
def _lowerCamelCase ( self ):
import torch
# Classic `model_kwargs`
UpperCamelCase__ = pipeline(
model="""hf-internal-testing/tiny-random-bloom""" , model_kwargs={"""device_map""": """auto""", """torch_dtype""": torch.bfloataa} , )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
UpperCamelCase__ = pipe("""This is a test""" )
self.assertEqual(
__lowerCAmelCase , [
{
"""generated_text""": (
"""This is a test test test test test test test test test test test test test test test test"""
""" test"""
)
}
] , )
# Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.)
UpperCamelCase__ = pipeline(model="""hf-internal-testing/tiny-random-bloom""" , device_map="""auto""" , torch_dtype=torch.bfloataa )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
UpperCamelCase__ = pipe("""This is a test""" )
self.assertEqual(
__lowerCAmelCase , [
{
"""generated_text""": (
"""This is a test test test test test test test test test test test test test test test test"""
""" test"""
)
}
] , )
# torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602
UpperCamelCase__ = pipeline(model="""hf-internal-testing/tiny-random-bloom""" , device_map="""auto""" )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa )
UpperCamelCase__ = pipe("""This is a test""" )
self.assertEqual(
__lowerCAmelCase , [
{
"""generated_text""": (
"""This is a test test test test test test test test test test test test test test test test"""
""" test"""
)
}
] , )
@require_torch
@require_torch_gpu
def _lowerCamelCase ( self ):
import torch
UpperCamelCase__ = pipeline(model="""hf-internal-testing/tiny-random-bloom""" , device=0 , torch_dtype=torch.floataa )
pipe("""This is a test""" )
@require_torch
@require_accelerate
@require_torch_gpu
def _lowerCamelCase ( self ):
import torch
UpperCamelCase__ = pipeline(model="""hf-internal-testing/tiny-random-bloom""" , device_map="""auto""" , torch_dtype=torch.floataa )
pipe("""This is a test""" , do_sample=__lowerCAmelCase , top_p=0.5 )
def _lowerCamelCase ( self ):
UpperCamelCase__ = """Hello world"""
UpperCamelCase__ = pipeline("""text-generation""" , model="""hf-internal-testing/tiny-random-gpt2""" )
if text_generator.model.framework == "tf":
UpperCamelCase__ = logging.get_logger("""transformers.generation.tf_utils""" )
else:
UpperCamelCase__ = logging.get_logger("""transformers.generation.utils""" )
UpperCamelCase__ = """Both `max_new_tokens`""" # The beggining of the message to be checked in this test
# Both are set by the user -> log warning
with CaptureLogger(__lowerCAmelCase ) as cl:
UpperCamelCase__ = text_generator(__lowerCAmelCase , max_length=10 , max_new_tokens=1 )
self.assertIn(__lowerCAmelCase , cl.out )
# The user only sets one -> no warning
with CaptureLogger(__lowerCAmelCase ) as cl:
UpperCamelCase__ = text_generator(__lowerCAmelCase , max_new_tokens=1 )
self.assertNotIn(__lowerCAmelCase , cl.out )
with CaptureLogger(__lowerCAmelCase ) as cl:
UpperCamelCase__ = text_generator(__lowerCAmelCase , max_length=10 )
self.assertNotIn(__lowerCAmelCase , cl.out )
| 548
| 0
|
'''simple docstring'''
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def a_ ( ) -> Any:
__lowerCamelCase : Optional[int] = ArgumentParser('Transformers CLI tool' ,usage='transformers-cli <command> [<args>]' )
__lowerCamelCase : List[Any] = parser.add_subparsers(help='transformers-cli command helpers' )
# Register commands
ConvertCommand.register_subcommand(_lowerCAmelCase )
DownloadCommand.register_subcommand(_lowerCAmelCase )
EnvironmentCommand.register_subcommand(_lowerCAmelCase )
RunCommand.register_subcommand(_lowerCAmelCase )
ServeCommand.register_subcommand(_lowerCAmelCase )
UserCommands.register_subcommand(_lowerCAmelCase )
AddNewModelCommand.register_subcommand(_lowerCAmelCase )
AddNewModelLikeCommand.register_subcommand(_lowerCAmelCase )
LfsCommands.register_subcommand(_lowerCAmelCase )
PTtoTFCommand.register_subcommand(_lowerCAmelCase )
# Let's go
__lowerCamelCase : List[Any] = parser.parse_args()
if not hasattr(_lowerCAmelCase ,'func' ):
parser.print_help()
exit(1 )
# Run
__lowerCamelCase : Tuple = args.func(_lowerCAmelCase )
service.run()
if __name__ == "__main__":
main()
| 459
|
'''simple docstring'''
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
_UpperCamelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowerCamelCase_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def __init__( self : Union[str, Any] , _a : CLIPSegForImageSegmentation , _a : CLIPSegProcessor , _a : AutoencoderKL , _a : CLIPTextModel , _a : CLIPTokenizer , _a : UNetaDConditionModel , _a : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , _a : StableDiffusionSafetyChecker , _a : CLIPImageProcessor , ) -> int:
super().__init__()
if hasattr(scheduler.config , 'steps_offset' ) and scheduler.config.steps_offset != 1:
__lowerCamelCase : int = (
f'The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`'
f' should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure '
'to update the config accordingly as leaving `steps_offset` might led to incorrect results'
' in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,'
' it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`'
' file'
)
deprecate('steps_offset!=1' , '1.0.0' , _a , standard_warn=_a )
__lowerCamelCase : int = dict(scheduler.config )
__lowerCamelCase : Any = 1
__lowerCamelCase : List[Any] = FrozenDict(_a )
if hasattr(scheduler.config , 'skip_prk_steps' ) and scheduler.config.skip_prk_steps is False:
__lowerCamelCase : str = (
f'The configuration file of this scheduler: {scheduler} has not set the configuration'
' `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make'
' sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to'
' incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face'
' Hub, it would be very nice if you could open a Pull request for the'
' `scheduler/scheduler_config.json` file'
)
deprecate('skip_prk_steps not set' , '1.0.0' , _a , standard_warn=_a )
__lowerCamelCase : Dict = dict(scheduler.config )
__lowerCamelCase : Tuple = True
__lowerCamelCase : Union[str, Any] = FrozenDict(_a )
if safety_checker is None:
logger.warning(
f'You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure'
' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'
' results in services or applications open to the public. Both the diffusers team and Hugging Face'
' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'
' it only for use-cases that involve analyzing network behavior or auditing its results. For more'
' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .' )
self.register_modules(
segmentation_model=_a , segmentation_processor=_a , vae=_a , text_encoder=_a , tokenizer=_a , unet=_a , scheduler=_a , safety_checker=_a , feature_extractor=_a , )
def _lowercase ( self : Optional[int] , _a : Optional[Union[str, int]] = "auto" ) -> List[str]:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
__lowerCamelCase : List[Any] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_a )
def _lowercase ( self : Optional[Any] ) -> int:
self.enable_attention_slicing(_a )
def _lowercase ( self : str ) -> Any:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
__lowerCamelCase : Dict = torch.device('cuda' )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(_a , _a )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def _lowercase ( self : Dict ) -> int:
if self.device != torch.device('meta' ) or not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(_a , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__( self : List[str] , _a : Union[str, List[str]] , _a : Union[torch.FloatTensor, PIL.Image.Image] , _a : str , _a : int = 512 , _a : int = 512 , _a : int = 50 , _a : float = 7.5 , _a : Optional[Union[str, List[str]]] = None , _a : Optional[int] = 1 , _a : float = 0.0 , _a : Optional[torch.Generator] = None , _a : Optional[torch.FloatTensor] = None , _a : Optional[str] = "pil" , _a : bool = True , _a : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , _a : int = 1 , **_a : Optional[Any] , ) -> int:
__lowerCamelCase : Union[str, Any] = self.segmentation_processor(
text=[text] , images=[image] , padding='max_length' , return_tensors='pt' ).to(self.device )
__lowerCamelCase : Union[str, Any] = self.segmentation_model(**_a )
__lowerCamelCase : List[str] = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
__lowerCamelCase : Optional[Any] = self.numpy_to_pil(_a )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
__lowerCamelCase : Tuple = StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=_a , image=_a , mask_image=_a , height=_a , width=_a , num_inference_steps=_a , guidance_scale=_a , negative_prompt=_a , num_images_per_prompt=_a , eta=_a , generator=_a , latents=_a , output_type=_a , return_dict=_a , callback=_a , callback_steps=_a , )
| 459
| 1
|
from __future__ import annotations
from collections import Counter
from random import random
class __UpperCamelCase :
"""simple docstring"""
def __init__( self : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = {}
def UpperCAmelCase__ ( self : Any , _A : str ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = {}
def UpperCAmelCase__ ( self : Union[str, Any] , _A : str , _A : str , _A : float ):
"""simple docstring"""
if nodea not in self.connections:
self.add_node(_A )
if nodea not in self.connections:
self.add_node(_A )
__SCREAMING_SNAKE_CASE : List[Any] = probability
def UpperCAmelCase__ ( self : str ):
"""simple docstring"""
return list(self.connections )
def UpperCAmelCase__ ( self : List[Any] , _A : str ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = 0
__SCREAMING_SNAKE_CASE : List[str] = random()
for dest in self.connections[node]:
current_probability += self.connections[node][dest]
if current_probability > random_value:
return dest
return ""
def a__ ( snake_case , snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = MarkovChainGraphUndirectedUnweighted()
for nodea, nodea, probability in transitions:
graph.add_transition_probability(snake_case , snake_case , snake_case )
__SCREAMING_SNAKE_CASE : Optional[Any] = Counter(graph.get_nodes() )
__SCREAMING_SNAKE_CASE : str = start
for _ in range(snake_case ):
__SCREAMING_SNAKE_CASE : int = graph.transition(snake_case )
visited[node] += 1
return visited
if __name__ == "__main__":
import doctest
doctest.testmod()
| 131
|
from math import pi, sqrt
def a__ ( snake_case ):
"""simple docstring"""
if num <= 0:
raise ValueError('''math domain error''' )
if num > 171.5:
raise OverflowError('''math range error''' )
elif num - int(snake_case ) not in (0, 0.5):
raise NotImplementedError('''num must be an integer or a half-integer''' )
elif num == 0.5:
return sqrt(snake_case )
else:
return 1.0 if num == 1 else (num - 1) * gamma(num - 1 )
def a__ ( ):
"""simple docstring"""
assert gamma(0.5 ) == sqrt(snake_case )
assert gamma(1 ) == 1.0
assert gamma(2 ) == 1.0
if __name__ == "__main__":
from doctest import testmod
testmod()
lowercase_ = 1.0
while num:
lowercase_ = float(input("""Gamma of: """))
print(f'''gamma({num}) = {gamma(num)}''')
print("""\nEnter 0 to exit...""")
| 131
| 1
|
'''simple docstring'''
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
UpperCAmelCase__ :Union[str, Any] = pytest.mark.integration
@require_faiss
class SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
def a_ ( self : List[str] ):
"""simple docstring"""
__lowerCamelCase : Union[str, Any] = Dataset.from_dict({"""filename""": ["""my_name-train""" + """_""" + str(_a ) for x in np.arange(30 ).tolist()]} )
return dset
def a_ ( self : Dict ):
"""simple docstring"""
import faiss
__lowerCamelCase : Dataset = self._create_dummy_dataset()
__lowerCamelCase : Optional[Any] = dset.map(
lambda A__ , A__ : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=_a , keep_in_memory=_a )
__lowerCamelCase : Optional[int] = dset.add_faiss_index("""vecs""" , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT )
__lowerCamelCase : Optional[int] = dset.get_nearest_examples("""vecs""" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["""filename"""][0] , """my_name-train_29""" )
dset.drop_index("""vecs""" )
def a_ ( self : Optional[int] ):
"""simple docstring"""
import faiss
__lowerCamelCase : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="""vecs""" , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT , )
__lowerCamelCase : List[Any] = dset.get_nearest_examples("""vecs""" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["""filename"""][0] , """my_name-train_29""" )
def a_ ( self : str ):
"""simple docstring"""
import faiss
__lowerCamelCase : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="""vecs""" , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=_a ) as tmp_file:
dset.save_faiss_index("""vecs""" , tmp_file.name )
dset.load_faiss_index("""vecs2""" , tmp_file.name )
os.unlink(tmp_file.name )
__lowerCamelCase : Optional[Any] = dset.get_nearest_examples("""vecs2""" , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples["""filename"""][0] , """my_name-train_29""" )
def a_ ( self : List[str] ):
"""simple docstring"""
__lowerCamelCase : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name="""vecs""" )
dset.drop_index("""vecs""" )
self.assertRaises(_a , partial(dset.get_nearest_examples , """vecs2""" , np.ones(5 , dtype=np.floataa ) ) )
def a_ ( self : Optional[int] ):
"""simple docstring"""
from elasticsearch import Elasticsearch
__lowerCamelCase : Dataset = self._create_dummy_dataset()
with patch("""elasticsearch.Elasticsearch.search""" ) as mocked_search, patch(
"""elasticsearch.client.IndicesClient.create""" ) as mocked_index_create, patch("""elasticsearch.helpers.streaming_bulk""" ) as mocked_bulk:
__lowerCamelCase : int = {"acknowledged": True}
mocked_bulk.return_value([(True, None)] * 30 )
__lowerCamelCase : str = {"hits": {"hits": [{"_score": 1, "_id": 29}]}}
__lowerCamelCase : Union[str, Any] = Elasticsearch()
dset.add_elasticsearch_index("""filename""" , es_client=_a )
__lowerCamelCase : Dict = dset.get_nearest_examples("""filename""" , """my_name-train_29""" )
self.assertEqual(examples["""filename"""][0] , """my_name-train_29""" )
@require_faiss
class SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
def a_ ( self : int ):
"""simple docstring"""
import faiss
__lowerCamelCase : Optional[int] = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal , 5 )
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal , 10 )
# single query
__lowerCamelCase : Dict = np.zeros(5 , dtype=np.floataa )
__lowerCamelCase : Optional[Any] = 1
__lowerCamelCase : Optional[Any] = index.search(_a )
self.assertRaises(_a , index.search , query.reshape(-1 , 1 ) )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
# batched queries
__lowerCamelCase : Any = np.eye(5 , dtype=np.floataa )[::-1]
__lowerCamelCase : Union[str, Any] = index.search_batch(_a )
self.assertRaises(_a , index.search_batch , queries[0] )
__lowerCamelCase : List[str] = [scores[0] for scores in total_scores]
__lowerCamelCase : Union[str, Any] = [indices[0] for indices in total_indices]
self.assertGreater(np.min(_a ) , 0 )
self.assertListEqual([4, 3, 2, 1, 0] , _a )
def a_ ( self : Optional[Any] ):
"""simple docstring"""
import faiss
__lowerCamelCase : List[str] = FaissIndex(string_factory="""Flat""" )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
__lowerCamelCase : Dict = FaissIndex(string_factory="""LSH""" )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexLSH )
with self.assertRaises(_a ):
__lowerCamelCase : Optional[Any] = FaissIndex(string_factory="""Flat""" , custom_index=faiss.IndexFlat(5 ) )
def a_ ( self : List[Any] ):
"""simple docstring"""
import faiss
__lowerCamelCase : Tuple = faiss.IndexFlat(5 )
__lowerCamelCase : Optional[int] = FaissIndex(custom_index=_a )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
def a_ ( self : Any ):
"""simple docstring"""
import faiss
__lowerCamelCase : Any = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=_a ) as tmp_file:
index.save(tmp_file.name )
__lowerCamelCase : str = FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
__lowerCamelCase : Dict = np.zeros(5 , dtype=np.floataa )
__lowerCamelCase : Union[str, Any] = 1
__lowerCamelCase : List[str] = index.search(_a )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
@require_faiss
def __lowercase (_lowercase ) -> int:
"""simple docstring"""
import faiss
__lowerCamelCase : Optional[Any] = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5, dtype=np.floataa ) )
__lowerCamelCase : List[Any] = "index.faiss"
__lowerCamelCase : Dict = f"mock://{index_name}"
index.save(_snake_case, storage_options=mockfs.storage_options )
__lowerCamelCase : Any = FaissIndex.load(_snake_case, storage_options=mockfs.storage_options )
__lowerCamelCase : Optional[Any] = np.zeros(5, dtype=np.floataa )
__lowerCamelCase : List[str] = 1
__lowerCamelCase : List[str] = index.search(_snake_case )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
def a_ ( self : List[str] ):
"""simple docstring"""
from elasticsearch import Elasticsearch
with patch("""elasticsearch.Elasticsearch.search""" ) as mocked_search, patch(
"""elasticsearch.client.IndicesClient.create""" ) as mocked_index_create, patch("""elasticsearch.helpers.streaming_bulk""" ) as mocked_bulk:
__lowerCamelCase : Union[str, Any] = Elasticsearch()
__lowerCamelCase : int = {"acknowledged": True}
__lowerCamelCase : Optional[int] = ElasticSearchIndex(es_client=_a )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(["""foo""", """bar""", """foobar"""] )
# single query
__lowerCamelCase : Optional[int] = "foo"
__lowerCamelCase : Tuple = {"hits": {"hits": [{"_score": 1, "_id": 0}]}}
__lowerCamelCase : List[str] = index.search(_a )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# single query with timeout
__lowerCamelCase : Optional[int] = "foo"
__lowerCamelCase : Any = {"hits": {"hits": [{"_score": 1, "_id": 0}]}}
__lowerCamelCase : int = index.search(_a , request_timeout=30 )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# batched queries
__lowerCamelCase : Tuple = ["foo", "bar", "foobar"]
__lowerCamelCase : Union[str, Any] = {"hits": {"hits": [{"_score": 1, "_id": 1}]}}
__lowerCamelCase : str = index.search_batch(_a )
__lowerCamelCase : Tuple = [scores[0] for scores in total_scores]
__lowerCamelCase : Tuple = [indices[0] for indices in total_indices]
self.assertGreater(np.min(_a ) , 0 )
self.assertListEqual([1, 1, 1] , _a )
# batched queries with timeout
__lowerCamelCase : Any = ["foo", "bar", "foobar"]
__lowerCamelCase : Optional[Any] = {"hits": {"hits": [{"_score": 1, "_id": 1}]}}
__lowerCamelCase : Optional[Any] = index.search_batch(_a , request_timeout=30 )
__lowerCamelCase : Optional[int] = [scores[0] for scores in total_scores]
__lowerCamelCase : Dict = [indices[0] for indices in total_indices]
self.assertGreater(np.min(_a ) , 0 )
self.assertListEqual([1, 1, 1] , _a )
| 150
|
from typing import List, Optional, Union
import torch
from transformers import (
XLMRobertaTokenizer,
)
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
from .text_encoder import MultilingualCLIP
snake_case : str = logging.get_logger(__name__) # pylint: disable=invalid-name
snake_case : Optional[int] = "\n Examples:\n ```py\n >>> from diffusers import KandinskyPipeline, KandinskyPriorPipeline\n >>> import torch\n\n >>> pipe_prior = KandinskyPriorPipeline.from_pretrained(\"kandinsky-community/Kandinsky-2-1-prior\")\n >>> pipe_prior.to(\"cuda\")\n\n >>> prompt = \"red cat, 4k photo\"\n >>> out = pipe_prior(prompt)\n >>> image_emb = out.image_embeds\n >>> negative_image_emb = out.negative_image_embeds\n\n >>> pipe = KandinskyPipeline.from_pretrained(\"kandinsky-community/kandinsky-2-1\")\n >>> pipe.to(\"cuda\")\n\n >>> image = pipe(\n ... prompt,\n ... image_embeds=image_emb,\n ... negative_image_embeds=negative_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... ).images\n\n >>> image[0].save(\"cat.png\")\n ```\n"
def lowerCAmelCase_ ( _snake_case : List[Any] , _snake_case : Any , _snake_case : Optional[Any]=8 ) -> Tuple:
'''simple docstring'''
__magic_name__ : List[str] = h // scale_factor**2
if h % scale_factor**2 != 0:
new_h += 1
__magic_name__ : Dict = w // scale_factor**2
if w % scale_factor**2 != 0:
new_w += 1
return new_h * scale_factor, new_w * scale_factor
class _snake_case ( snake_case ):
def __init__( self , _a , _a , _a , _a , _a , ):
super().__init__()
self.register_modules(
text_encoder=_a , tokenizer=_a , unet=_a , scheduler=_a , movq=_a , )
__magic_name__ : List[Any] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def SCREAMING_SNAKE_CASE ( self , _a , _a , _a , _a , _a , _a ):
if latents is None:
__magic_name__ : List[Any] = randn_tensor(_a , generator=_a , device=_a , dtype=_a )
else:
if latents.shape != shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {shape}''' )
__magic_name__ : Optional[Any] = latents.to(_a )
__magic_name__ : Optional[Any] = latents * scheduler.init_noise_sigma
return latents
def SCREAMING_SNAKE_CASE ( self , _a , _a , _a , _a , _a=None , ):
__magic_name__ : List[str] = len(_a ) if isinstance(_a , _a ) else 1
# get prompt text embeddings
__magic_name__ : str = self.tokenizer(
_a , padding="max_length" , truncation=_a , max_length=77 , return_attention_mask=_a , add_special_tokens=_a , return_tensors="pt" , )
__magic_name__ : Optional[Any] = text_inputs.input_ids
__magic_name__ : Union[str, Any] = self.tokenizer(_a , padding="longest" , return_tensors="pt" ).input_ids
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(_a , _a ):
__magic_name__ : Any = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] )
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
f''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
__magic_name__ : Union[str, Any] = text_input_ids.to(_a )
__magic_name__ : str = text_inputs.attention_mask.to(_a )
__magic_name__ , __magic_name__ : Any = self.text_encoder(
input_ids=_a , attention_mask=_a )
__magic_name__ : List[Any] = prompt_embeds.repeat_interleave(_a , dim=0 )
__magic_name__ : Any = text_encoder_hidden_states.repeat_interleave(_a , dim=0 )
__magic_name__ : Optional[int] = text_mask.repeat_interleave(_a , dim=0 )
if do_classifier_free_guidance:
__magic_name__ : List[str]
if negative_prompt is None:
__magic_name__ : Optional[int] = [""] * batch_size
elif type(_a ) is not type(_a ):
raise TypeError(
f'''`negative_prompt` should be the same type to `prompt`, but got {type(_a )} !='''
f''' {type(_a )}.''' )
elif isinstance(_a , _a ):
__magic_name__ : Tuple = [negative_prompt]
elif batch_size != len(_a ):
raise ValueError(
f'''`negative_prompt`: {negative_prompt} has batch size {len(_a )}, but `prompt`:'''
f''' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'''
" the batch size of `prompt`." )
else:
__magic_name__ : Union[str, Any] = negative_prompt
__magic_name__ : List[Any] = self.tokenizer(
_a , padding="max_length" , max_length=77 , truncation=_a , return_attention_mask=_a , add_special_tokens=_a , return_tensors="pt" , )
__magic_name__ : str = uncond_input.input_ids.to(_a )
__magic_name__ : Any = uncond_input.attention_mask.to(_a )
__magic_name__ , __magic_name__ : str = self.text_encoder(
input_ids=_a , attention_mask=_a )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__magic_name__ : Tuple = negative_prompt_embeds.shape[1]
__magic_name__ : Union[str, Any] = negative_prompt_embeds.repeat(1 , _a )
__magic_name__ : str = negative_prompt_embeds.view(batch_size * num_images_per_prompt , _a )
__magic_name__ : Tuple = uncond_text_encoder_hidden_states.shape[1]
__magic_name__ : str = uncond_text_encoder_hidden_states.repeat(1 , _a , 1 )
__magic_name__ : int = uncond_text_encoder_hidden_states.view(
batch_size * num_images_per_prompt , _a , -1 )
__magic_name__ : Optional[int] = uncond_text_mask.repeat_interleave(_a , dim=0 )
# done duplicates
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__magic_name__ : List[Any] = torch.cat([negative_prompt_embeds, prompt_embeds] )
__magic_name__ : Tuple = torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states] )
__magic_name__ : Optional[int] = torch.cat([uncond_text_mask, text_mask] )
return prompt_embeds, text_encoder_hidden_states, text_mask
def SCREAMING_SNAKE_CASE ( self , _a=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
__magic_name__ : Tuple = torch.device(f'''cuda:{gpu_id}''' )
__magic_name__ : Union[str, Any] = [
self.unet,
self.text_encoder,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_a , _a )
def SCREAMING_SNAKE_CASE ( self , _a=0 ):
if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." )
__magic_name__ : int = torch.device(f'''cuda:{gpu_id}''' )
if self.device.type != "cpu":
self.to("cpu" , silence_dtype_warnings=_a )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
__magic_name__ : List[str] = None
for cpu_offloaded_model in [self.text_encoder, self.unet, self.movq]:
__magic_name__ , __magic_name__ : Any = cpu_offload_with_hook(_a , _a , prev_module_hook=_a )
if self.safety_checker is not None:
__magic_name__ , __magic_name__ : Optional[int] = cpu_offload_with_hook(self.safety_checker , _a , prev_module_hook=_a )
# We'll offload the last model manually.
__magic_name__ : List[Any] = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def SCREAMING_SNAKE_CASE ( self ):
if not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(_a , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(_a )
def __call__( self , _a , _a , _a , _a = None , _a = 512 , _a = 512 , _a = 100 , _a = 4.0 , _a = 1 , _a = None , _a = None , _a = "pil" , _a = True , ):
if isinstance(_a , _a ):
__magic_name__ : Dict = 1
elif isinstance(_a , _a ):
__magic_name__ : Union[str, Any] = len(_a )
else:
raise ValueError(f'''`prompt` has to be of type `str` or `list` but is {type(_a )}''' )
__magic_name__ : Dict = self._execution_device
__magic_name__ : List[str] = batch_size * num_images_per_prompt
__magic_name__ : Tuple = guidance_scale > 1.0
__magic_name__ , __magic_name__ , __magic_name__ : Union[str, Any] = self._encode_prompt(
_a , _a , _a , _a , _a )
if isinstance(_a , _a ):
__magic_name__ : Any = torch.cat(_a , dim=0 )
if isinstance(_a , _a ):
__magic_name__ : str = torch.cat(_a , dim=0 )
if do_classifier_free_guidance:
__magic_name__ : str = image_embeds.repeat_interleave(_a , dim=0 )
__magic_name__ : Tuple = negative_image_embeds.repeat_interleave(_a , dim=0 )
__magic_name__ : List[str] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(
dtype=prompt_embeds.dtype , device=_a )
self.scheduler.set_timesteps(_a , device=_a )
__magic_name__ : str = self.scheduler.timesteps
__magic_name__ : Union[str, Any] = self.unet.config.in_channels
__magic_name__ , __magic_name__ : str = get_new_h_w(_a , _a , self.movq_scale_factor )
# create initial latent
__magic_name__ : Optional[Any] = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , text_encoder_hidden_states.dtype , _a , _a , _a , self.scheduler , )
for i, t in enumerate(self.progress_bar(_a ) ):
# expand the latents if we are doing classifier free guidance
__magic_name__ : int = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__magic_name__ : Any = {"text_embeds": prompt_embeds, "image_embeds": image_embeds}
__magic_name__ : Optional[Any] = self.unet(
sample=_a , timestep=_a , encoder_hidden_states=_a , added_cond_kwargs=_a , return_dict=_a , )[0]
if do_classifier_free_guidance:
__magic_name__ , __magic_name__ : Dict = noise_pred.split(latents.shape[1] , dim=1 )
__magic_name__ , __magic_name__ : Optional[int] = noise_pred.chunk(2 )
__magic_name__ , __magic_name__ : int = variance_pred.chunk(2 )
__magic_name__ : Optional[int] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
__magic_name__ : Any = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , "variance_type" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
__magic_name__ , __magic_name__ : Tuple = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
__magic_name__ : str = self.scheduler.step(
_a , _a , _a , generator=_a , ).prev_sample
# post-processing
__magic_name__ : Optional[int] = self.movq.decode(_a , force_not_quantize=_a )["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' )
if output_type in ["np", "pil"]:
__magic_name__ : str = image * 0.5 + 0.5
__magic_name__ : Optional[int] = image.clamp(0 , 1 )
__magic_name__ : int = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
__magic_name__ : Any = self.numpy_to_pil(_a )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_a )
| 124
| 0
|
'''simple docstring'''
class UpperCAmelCase :
def __init__(self : int , A__ : str = "" , A__ : bool = False ) -> Any:
# Mapping from the first character of the prefix of the node
lowercase = {}
# A node will be a leaf if the tree contains its word
lowercase = is_leaf
lowercase = prefix
def UpperCAmelCase__ (self : Optional[int] , A__ : str ) -> Any:
lowercase = 0
for q, w in zip(self.prefix , A__ ):
if q != w:
break
x += 1
return self.prefix[:x], self.prefix[x:], word[x:]
def UpperCAmelCase__ (self : str , A__ : list[str] ) -> Optional[Any]:
for word in words:
self.insert(A__ )
def UpperCAmelCase__ (self : Any , A__ : str ) -> List[Any]:
# Case 1: If the word is the prefix of the node
# Solution: We set the current node as leaf
if self.prefix == word:
lowercase = True
# Case 2: The node has no edges that have a prefix to the word
# Solution: We create an edge from the current node to a new one
# containing the word
elif word[0] not in self.nodes:
lowercase = RadixNode(prefix=A__ , is_leaf=A__ )
else:
lowercase = self.nodes[word[0]]
lowercase = incoming_node.match(
A__ )
# Case 3: The node prefix is equal to the matching
# Solution: We insert remaining word on the next node
if remaining_prefix == "":
self.nodes[matching_string[0]].insert(A__ )
# Case 4: The word is greater equal to the matching
# Solution: Create a node in between both nodes, change
# prefixes and add the new node for the remaining word
else:
lowercase = remaining_prefix
lowercase = self.nodes[matching_string[0]]
lowercase = RadixNode(A__ , A__ )
lowercase = aux_node
if remaining_word == "":
lowercase = True
else:
self.nodes[matching_string[0]].insert(A__ )
def UpperCAmelCase__ (self : Union[str, Any] , A__ : str ) -> Any:
lowercase = self.nodes.get(word[0] , A__ )
if not incoming_node:
return False
else:
lowercase = incoming_node.match(
A__ )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# This applies when the word and the prefix are equal
elif remaining_word == "":
return incoming_node.is_leaf
# We have word remaining so we check the next node
else:
return incoming_node.find(A__ )
def UpperCAmelCase__ (self : int , A__ : str ) -> List[str]:
lowercase = self.nodes.get(word[0] , A__ )
if not incoming_node:
return False
else:
lowercase = incoming_node.match(
A__ )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# We have word remaining so we check the next node
elif remaining_word != "":
return incoming_node.delete(A__ )
else:
# If it is not a leaf, we don't have to delete
if not incoming_node.is_leaf:
return False
else:
# We delete the nodes if no edges go from it
if len(incoming_node.nodes ) == 0:
del self.nodes[word[0]]
# We merge the current node with its only child
if len(self.nodes ) == 1 and not self.is_leaf:
lowercase = list(self.nodes.values() )[0]
lowercase = merging_node.is_leaf
self.prefix += merging_node.prefix
lowercase = merging_node.nodes
# If there is more than 1 edge, we just mark it as non-leaf
elif len(incoming_node.nodes ) > 1:
lowercase = False
# If there is 1 edge, we merge it with its child
else:
lowercase = list(incoming_node.nodes.values() )[0]
lowercase = merging_node.is_leaf
incoming_node.prefix += merging_node.prefix
lowercase = merging_node.nodes
return True
def UpperCAmelCase__ (self : Tuple , A__ : int = 0 ) -> Tuple:
if self.prefix != "":
print("-" * height , self.prefix , " (leaf)" if self.is_leaf else "" )
for value in self.nodes.values():
value.print_tree(height + 1 )
def _lowercase ( ):
"""simple docstring"""
lowercase = "banana bananas bandana band apple all beast".split()
lowercase = RadixNode()
root.insert_many(lowerCamelCase__ )
assert all(root.find(lowerCamelCase__ ) for word in words )
assert not root.find("bandanas" )
assert not root.find("apps" )
root.delete("all" )
assert not root.find("all" )
root.delete("banana" )
assert not root.find("banana" )
assert root.find("bananas" )
return True
def _lowercase ( ):
"""simple docstring"""
assert test_trie()
def _lowercase ( ):
"""simple docstring"""
lowercase = RadixNode()
lowercase = "banana bananas bandanas bandana band apple all beast".split()
root.insert_many(lowerCamelCase__ )
print("Words:" , lowerCamelCase__ )
print("Tree:" )
root.print_tree()
if __name__ == "__main__":
main()
| 715
|
'''simple docstring'''
import inspect
import unittest
from transformers import RegNetConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import RegNetForImageClassification, RegNetModel
from transformers.models.regnet.modeling_regnet import REGNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCAmelCase :
def __init__(self : Dict , A__ : str , A__ : int=3 , A__ : Dict=3_2 , A__ : str=3 , A__ : str=1_0 , A__ : Optional[int]=[1_0, 2_0, 3_0, 4_0] , A__ : Tuple=[1, 1, 2, 1] , A__ : int=True , A__ : List[Any]=True , A__ : List[Any]="relu" , A__ : Any=3 , A__ : Any=None , ) -> Tuple:
lowercase = parent
lowercase = batch_size
lowercase = image_size
lowercase = num_channels
lowercase = embeddings_size
lowercase = hidden_sizes
lowercase = depths
lowercase = is_training
lowercase = use_labels
lowercase = hidden_act
lowercase = num_labels
lowercase = scope
lowercase = len(A__ )
def UpperCAmelCase__ (self : str ) -> List[str]:
lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase = None
if self.use_labels:
lowercase = ids_tensor([self.batch_size] , self.num_labels )
lowercase = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase__ (self : Optional[Any] ) -> List[str]:
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def UpperCAmelCase__ (self : Dict , A__ : Union[str, Any] , A__ : Optional[int] , A__ : Dict ) -> str:
lowercase = RegNetModel(config=A__ )
model.to(A__ )
model.eval()
lowercase = model(A__ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def UpperCAmelCase__ (self : List[str] , A__ : List[str] , A__ : Union[str, Any] , A__ : str ) -> Dict:
lowercase = self.num_labels
lowercase = RegNetForImageClassification(A__ )
model.to(A__ )
model.eval()
lowercase = model(A__ , labels=A__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase__ (self : Any ) -> Union[str, Any]:
lowercase = self.prepare_config_and_inputs()
lowercase , lowercase , lowercase = config_and_inputs
lowercase = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( _lowercase , _lowercase , unittest.TestCase ):
UpperCAmelCase : str = (RegNetModel, RegNetForImageClassification) if is_torch_available() else ()
UpperCAmelCase : Dict = (
{'''feature-extraction''': RegNetModel, '''image-classification''': RegNetForImageClassification}
if is_torch_available()
else {}
)
UpperCAmelCase : Dict = False
UpperCAmelCase : int = False
UpperCAmelCase : Tuple = False
UpperCAmelCase : Tuple = False
def UpperCAmelCase__ (self : Optional[int] ) -> Tuple:
lowercase = RegNetModelTester(self )
lowercase = ConfigTester(self , config_class=A__ , has_text_modality=A__ )
def UpperCAmelCase__ (self : List[Any] ) -> str:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCAmelCase__ (self : Optional[Any] ) -> int:
return
@unittest.skip(reason="RegNet does not use inputs_embeds" )
def UpperCAmelCase__ (self : int ) -> Optional[int]:
pass
@unittest.skip(reason="RegNet does not support input and output embeddings" )
def UpperCAmelCase__ (self : List[Any] ) -> Optional[Any]:
pass
def UpperCAmelCase__ (self : Any ) -> str:
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase = model_class(A__ )
lowercase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase = [*signature.parameters.keys()]
lowercase = ["pixel_values"]
self.assertListEqual(arg_names[:1] , A__ )
def UpperCAmelCase__ (self : List[Any] ) -> Union[str, Any]:
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A__ )
def UpperCAmelCase__ (self : List[str] ) -> Tuple:
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase = model_class(config=A__ )
for name, module in model.named_modules():
if isinstance(A__ , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , )
def UpperCAmelCase__ (self : Optional[Any] ) -> List[str]:
def check_hidden_states_output(A__ : Optional[Any] , A__ : List[str] , A__ : Tuple ):
lowercase = model_class(A__ )
model.to(A__ )
model.eval()
with torch.no_grad():
lowercase = model(**self._prepare_for_class(A__ , A__ ) )
lowercase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowercase = self.model_tester.num_stages
self.assertEqual(len(A__ ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
lowercase = ["basic", "bottleneck"]
for model_class in self.all_model_classes:
for layer_type in layers_type:
lowercase = layer_type
lowercase = True
check_hidden_states_output(A__ , A__ , A__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase = True
check_hidden_states_output(A__ , A__ , A__ )
def UpperCAmelCase__ (self : List[Any] ) -> Tuple:
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A__ )
@slow
def UpperCAmelCase__ (self : Tuple ) -> Any:
for model_name in REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase = RegNetModel.from_pretrained(A__ )
self.assertIsNotNone(A__ )
def UpperCAmelCase_ ( ):
"""simple docstring"""
lowercase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class UpperCAmelCase ( unittest.TestCase ):
@cached_property
def UpperCAmelCase__ (self : Optional[int] ) -> Union[str, Any]:
return (
AutoImageProcessor.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def UpperCAmelCase__ (self : List[str] ) -> int:
lowercase = RegNetForImageClassification.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(A__ )
lowercase = self.default_image_processor
lowercase = prepare_img()
lowercase = image_processor(images=A__ , return_tensors="pt" ).to(A__ )
# forward pass
with torch.no_grad():
lowercase = model(**A__ )
# verify the logits
lowercase = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , A__ )
lowercase = torch.tensor([-0.4_1_8_0, -1.5_0_5_1, -3.4_8_3_6] ).to(A__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , A__ , atol=1e-4 ) )
| 459
| 0
|
def __lowerCAmelCase ( UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Tuple ) -> bool:
lowerCamelCase_ = len(snake_case__ )
lowerCamelCase_ = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
lowerCamelCase_ = True
# sum is not zero and set is empty then false
for i in range(1 , required_sum + 1 ):
lowerCamelCase_ = False
for i in range(1 , arr_len + 1 ):
for j in range(1 , required_sum + 1 ):
if arr[i - 1] > j:
lowerCamelCase_ = subset[i - 1][j]
if arr[i - 1] <= j:
lowerCamelCase_ = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 272
|
import numpy as np
def UpperCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ = 1E-12 , snake_case__ = 100 , ) -> tuple[float, np.ndarray]:
"""simple docstring"""
assert np.shape(snake_case__ )[0] == np.shape(snake_case__ )[1]
# Ensure proper dimensionality.
assert np.shape(snake_case__ )[0] == np.shape(snake_case__ )[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(snake_case__ ) == np.iscomplexobj(snake_case__ )
lowerCAmelCase__ = np.iscomplexobj(snake_case__ )
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(snake_case__ , input_matrix.conj().T )
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
lowerCAmelCase__ = False
lowerCAmelCase__ = 0
lowerCAmelCase__ = 0
lowerCAmelCase__ = 1E12
while not convergence:
# Multiple matrix by the vector.
lowerCAmelCase__ = np.dot(snake_case__ , snake_case__ )
# Normalize the resulting output vector.
lowerCAmelCase__ = w / np.linalg.norm(snake_case__ )
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
lowerCAmelCase__ = vector.conj().T if is_complex else vector.T
lowerCAmelCase__ = np.dot(snake_case__ , np.dot(snake_case__ , snake_case__ ) )
# Check convergence.
lowerCAmelCase__ = np.abs(lambda_ - lambda_previous ) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
lowerCAmelCase__ = True
lowerCAmelCase__ = lambda_
if is_complex:
lowerCAmelCase__ = np.real(lambda_ )
return lambda_, vector
def UpperCAmelCase_ ( ) -> None:
"""simple docstring"""
lowerCAmelCase__ = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]] )
lowerCAmelCase__ = np.array([41, 4, 20] )
lowerCAmelCase__ = real_input_matrix.astype(np.complexaaa )
lowerCAmelCase__ = np.triu(1j * complex_input_matrix , 1 )
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
lowerCAmelCase__ = np.array([41, 4, 20] ).astype(np.complexaaa )
for problem_type in ["real", "complex"]:
if problem_type == "real":
lowerCAmelCase__ = real_input_matrix
lowerCAmelCase__ = real_vector
elif problem_type == "complex":
lowerCAmelCase__ = complex_input_matrix
lowerCAmelCase__ = complex_vector
# Our implementation.
lowerCAmelCase__ , lowerCAmelCase__ = power_iteration(snake_case__ , snake_case__ )
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
lowerCAmelCase__ , lowerCAmelCase__ = np.linalg.eigh(snake_case__ )
# Last eigenvalue is the maximum one.
lowerCAmelCase__ = eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
lowerCAmelCase__ = eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max ) <= 1E-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(snake_case__ ) - np.abs(snake_case__ ) ) <= 1E-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration()
| 193
| 0
|
'''simple docstring'''
import inspect
import unittest
from transformers import RegNetConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import RegNetForImageClassification, RegNetModel
from transformers.models.regnet.modeling_regnet import REGNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCAmelCase__ :
def __init__( self , UpperCamelCase , UpperCamelCase=3 , UpperCamelCase=32 , UpperCamelCase=3 , UpperCamelCase=10 , UpperCamelCase=[10, 20, 30, 40] , UpperCamelCase=[1, 1, 2, 1] , UpperCamelCase=True , UpperCamelCase=True , UpperCamelCase="relu" , UpperCamelCase=3 , UpperCamelCase=None , ) -> Optional[Any]:
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = image_size
__lowerCAmelCase = num_channels
__lowerCAmelCase = embeddings_size
__lowerCAmelCase = hidden_sizes
__lowerCAmelCase = depths
__lowerCAmelCase = is_training
__lowerCAmelCase = use_labels
__lowerCAmelCase = hidden_act
__lowerCAmelCase = num_labels
__lowerCAmelCase = scope
__lowerCAmelCase = len(lowerCAmelCase_ )
def UpperCAmelCase_ ( self ) -> Tuple:
__lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCAmelCase = None
if self.use_labels:
__lowerCAmelCase = ids_tensor([self.batch_size] , self.num_labels )
__lowerCAmelCase = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Optional[Any]:
__lowerCAmelCase = RegNetModel(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__lowerCAmelCase = model(lowerCAmelCase_ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> int:
__lowerCAmelCase = self.num_labels
__lowerCAmelCase = RegNetForImageClassification(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__lowerCAmelCase = model(lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase_ ( self ) -> Any:
__lowerCAmelCase = self.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = config_and_inputs
__lowerCAmelCase = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase__ ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
a : str = (RegNetModel, RegNetForImageClassification) if is_torch_available() else ()
a : Dict = (
{'''feature-extraction''': RegNetModel, '''image-classification''': RegNetForImageClassification}
if is_torch_available()
else {}
)
a : str = False
a : Optional[Any] = False
a : Any = False
a : Tuple = False
def UpperCAmelCase_ ( self ) -> Any:
__lowerCAmelCase = RegNetModelTester(self )
__lowerCAmelCase = ConfigTester(self , config_class=lowerCAmelCase_ , has_text_modality=lowerCAmelCase_ )
def UpperCAmelCase_ ( self ) -> Optional[int]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCAmelCase_ ( self ) -> Dict:
return
@unittest.skip(reason="RegNet does not use inputs_embeds" )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
pass
@unittest.skip(reason="RegNet does not support input and output embeddings" )
def UpperCAmelCase_ ( self ) -> Optional[int]:
pass
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(lowerCAmelCase_ )
__lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCAmelCase = [*signature.parameters.keys()]
__lowerCAmelCase = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCAmelCase_ )
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
def UpperCAmelCase_ ( self ) -> Optional[int]:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(config=lowerCAmelCase_ )
for name, module in model.named_modules():
if isinstance(lowerCAmelCase_ , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
def UpperCAmelCase_ ( self ) -> List[Any]:
def check_hidden_states_output(UpperCamelCase , UpperCamelCase , UpperCamelCase ):
__lowerCAmelCase = model_class(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
with torch.no_grad():
__lowerCAmelCase = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) )
__lowerCAmelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__lowerCAmelCase = self.model_tester.num_stages
self.assertEqual(len(lowerCAmelCase_ ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase = ["basic", "bottleneck"]
for model_class in self.all_model_classes:
for layer_type in layers_type:
__lowerCAmelCase = layer_type
__lowerCAmelCase = True
check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowerCAmelCase = True
check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def UpperCAmelCase_ ( self ) -> Tuple:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase_ )
@slow
def UpperCAmelCase_ ( self ) -> Any:
for model_name in REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase = RegNetModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
def __lowerCAmelCase ( ):
'''simple docstring'''
__lowerCAmelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class UpperCAmelCase__ ( unittest.TestCase ):
@cached_property
def UpperCAmelCase_ ( self ) -> Dict:
return (
AutoImageProcessor.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
__lowerCAmelCase = RegNetForImageClassification.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(lowerCAmelCase_ )
__lowerCAmelCase = self.default_image_processor
__lowerCAmelCase = prepare_img()
__lowerCAmelCase = image_processor(images=lowerCAmelCase_ , return_tensors="pt" ).to(lowerCAmelCase_ )
# forward pass
with torch.no_grad():
__lowerCAmelCase = model(**lowerCAmelCase_ )
# verify the logits
__lowerCAmelCase = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase_ )
__lowerCAmelCase = torch.tensor([-0.41_80, -1.50_51, -3.48_36] ).to(lowerCAmelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase_ , atol=1E-4 ) )
| 716
|
'''simple docstring'''
import os
import sys
import unittest
lowerCAmelCase : str = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
lowerCAmelCase : Tuple = os.path.join('''tests''', '''models''', '''bert''', '''test_modeling_bert.py''')
lowerCAmelCase : Tuple = os.path.join('''tests''', '''models''', '''blip''', '''test_modeling_blip.py''')
class UpperCAmelCase__ ( unittest.TestCase ):
def UpperCAmelCase_ ( self ) -> str:
__lowerCAmelCase = get_test_to_tester_mapping(UpperCamelCase )
__lowerCAmelCase = get_test_to_tester_mapping(UpperCamelCase )
__lowerCAmelCase = {"BertModelTest": "BertModelTester"}
__lowerCAmelCase = {
"BlipModelTest": "BlipModelTester",
"BlipTextImageModelTest": "BlipTextImageModelsModelTester",
"BlipTextModelTest": "BlipTextModelTester",
"BlipTextRetrievalModelTest": "BlipTextRetrievalModelTester",
"BlipVQAModelTest": "BlipVQAModelTester",
"BlipVisionModelTest": "BlipVisionModelTester",
}
self.assertEqual(get_test_info.to_json(UpperCamelCase ) , UpperCamelCase )
self.assertEqual(get_test_info.to_json(UpperCamelCase ) , UpperCamelCase )
def UpperCAmelCase_ ( self ) -> List[Any]:
__lowerCAmelCase = get_model_to_test_mapping(UpperCamelCase )
__lowerCAmelCase = get_model_to_test_mapping(UpperCamelCase )
__lowerCAmelCase = {
"BertForMaskedLM": ["BertModelTest"],
"BertForMultipleChoice": ["BertModelTest"],
"BertForNextSentencePrediction": ["BertModelTest"],
"BertForPreTraining": ["BertModelTest"],
"BertForQuestionAnswering": ["BertModelTest"],
"BertForSequenceClassification": ["BertModelTest"],
"BertForTokenClassification": ["BertModelTest"],
"BertLMHeadModel": ["BertModelTest"],
"BertModel": ["BertModelTest"],
}
__lowerCAmelCase = {
"BlipForConditionalGeneration": ["BlipTextImageModelTest"],
"BlipForImageTextRetrieval": ["BlipTextRetrievalModelTest"],
"BlipForQuestionAnswering": ["BlipVQAModelTest"],
"BlipModel": ["BlipModelTest"],
"BlipTextModel": ["BlipTextModelTest"],
"BlipVisionModel": ["BlipVisionModelTest"],
}
self.assertEqual(get_test_info.to_json(UpperCamelCase ) , UpperCamelCase )
self.assertEqual(get_test_info.to_json(UpperCamelCase ) , UpperCamelCase )
def UpperCAmelCase_ ( self ) -> str:
__lowerCAmelCase = get_model_to_tester_mapping(UpperCamelCase )
__lowerCAmelCase = get_model_to_tester_mapping(UpperCamelCase )
__lowerCAmelCase = {
"BertForMaskedLM": ["BertModelTester"],
"BertForMultipleChoice": ["BertModelTester"],
"BertForNextSentencePrediction": ["BertModelTester"],
"BertForPreTraining": ["BertModelTester"],
"BertForQuestionAnswering": ["BertModelTester"],
"BertForSequenceClassification": ["BertModelTester"],
"BertForTokenClassification": ["BertModelTester"],
"BertLMHeadModel": ["BertModelTester"],
"BertModel": ["BertModelTester"],
}
__lowerCAmelCase = {
"BlipForConditionalGeneration": ["BlipTextImageModelsModelTester"],
"BlipForImageTextRetrieval": ["BlipTextRetrievalModelTester"],
"BlipForQuestionAnswering": ["BlipVQAModelTester"],
"BlipModel": ["BlipModelTester"],
"BlipTextModel": ["BlipTextModelTester"],
"BlipVisionModel": ["BlipVisionModelTester"],
}
self.assertEqual(get_test_info.to_json(UpperCamelCase ) , UpperCamelCase )
self.assertEqual(get_test_info.to_json(UpperCamelCase ) , UpperCamelCase )
| 39
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available
lowercase__ : Optional[int] = {
"configuration_ernie": ["ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP", "ErnieConfig", "ErnieOnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Optional[Any] = [
"ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST",
"ErnieForCausalLM",
"ErnieForMaskedLM",
"ErnieForMultipleChoice",
"ErnieForNextSentencePrediction",
"ErnieForPreTraining",
"ErnieForQuestionAnswering",
"ErnieForSequenceClassification",
"ErnieForTokenClassification",
"ErnieModel",
"ErniePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ernie import (
ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST,
ErnieForCausalLM,
ErnieForMaskedLM,
ErnieForMultipleChoice,
ErnieForNextSentencePrediction,
ErnieForPreTraining,
ErnieForQuestionAnswering,
ErnieForSequenceClassification,
ErnieForTokenClassification,
ErnieModel,
ErniePreTrainedModel,
)
else:
import sys
lowercase__ : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 390
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ : str = logging.get_logger(__name__)
lowercase__ : Optional[int] = {
"sayakpaul/vit-msn-base": "https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json",
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class lowerCamelCase ( lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = '''vit_msn'''
def __init__( self : List[str] , UpperCAmelCase__ : Any=768 , UpperCAmelCase__ : Optional[int]=12 , UpperCAmelCase__ : Union[str, Any]=12 , UpperCAmelCase__ : int=3072 , UpperCAmelCase__ : Optional[int]="gelu" , UpperCAmelCase__ : Optional[int]=0.0 , UpperCAmelCase__ : Optional[Any]=0.0 , UpperCAmelCase__ : Union[str, Any]=0.02 , UpperCAmelCase__ : List[Any]=1e-06 , UpperCAmelCase__ : List[Any]=224 , UpperCAmelCase__ : Any=16 , UpperCAmelCase__ : Optional[Any]=3 , UpperCAmelCase__ : Optional[int]=True , **UpperCAmelCase__ : List[Any] , ) ->int:
super().__init__(**UpperCAmelCase__ )
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = image_size
UpperCAmelCase_ = patch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = qkv_bias
| 390
| 1
|
from math import isqrt, loga
def _SCREAMING_SNAKE_CASE ( snake_case_ : int ):
__magic_name__ = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , snake_case_ , snake_case_ ):
__magic_name__ = False
return [i for i in range(2 , snake_case_ ) if is_prime[i]]
def _SCREAMING_SNAKE_CASE ( snake_case_ : int = 80_0800 , snake_case_ : int = 80_0800 ):
__magic_name__ = degree * loga(snake_case_ )
__magic_name__ = int(snake_case_ )
__magic_name__ = calculate_prime_numbers(snake_case_ )
__magic_name__ = 0
__magic_name__ = 0
__magic_name__ = len(snake_case_ ) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left] )
+ prime_numbers[left] * loga(prime_numbers[right] )
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(F"""{solution() = }""")
| 678
|
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 678
| 1
|
from collections import deque
from math import floor
from random import random
from time import time
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self ) -> List[str]:
_a : Any = {}
def __lowercase ( self , _a , _a , _a=1 ) -> Tuple:
if self.graph.get(_a ):
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
_a : Union[str, Any] = [[w, v]]
if not self.graph.get(_a ):
_a : Dict = []
def __lowercase ( self ) -> Union[str, Any]:
return list(self.graph )
def __lowercase ( self , _a , _a ) -> Optional[Any]:
if self.graph.get(_a ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(_a )
def __lowercase ( self , _a=-2 , _a=-1 ) -> Dict:
if s == d:
return []
_a : Optional[int] = []
_a : int = []
if s == -2:
_a : Optional[int] = list(self.graph )[0]
stack.append(_a )
visited.append(_a )
_a : Union[str, Any] = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_a : Union[str, Any] = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(_a )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
_a : List[str] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(_a ) != 0:
_a : Any = stack[len(_a ) - 1]
else:
_a : Dict = ss
# check if se have reached the starting point
if len(_a ) == 0:
return visited
def __lowercase ( self , _a=-1 ) -> List[str]:
if c == -1:
_a : List[str] = floor(random() * 1_0_0_0_0 ) + 1_0
for i in range(_a ):
# every vertex has max 100 edges
for _ in range(floor(random() * 1_0_2 ) + 1 ):
_a : Optional[Any] = floor(random() * c ) + 1
if n != i:
self.add_pair(_a , _a , 1 )
def __lowercase ( self , _a=-2 ) -> List[Any]:
_a : Any = deque()
_a : List[str] = []
if s == -2:
_a : Any = list(self.graph )[0]
d.append(_a )
visited.append(_a )
while d:
_a : Optional[Any] = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def __lowercase ( self , _a ) -> Optional[Any]:
_a : Optional[int] = 0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def __lowercase ( self , _a ) -> int:
return len(self.graph[u] )
def __lowercase ( self , _a=-2 ) -> int:
_a : str = []
_a : str = []
if s == -2:
_a : Any = list(self.graph )[0]
stack.append(_a )
visited.append(_a )
_a : Dict = s
_a : Union[str, Any] = []
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_a : int = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_a : Optional[int] = node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop() )
if len(_a ) != 0:
_a : List[Any] = stack[len(_a ) - 1]
else:
_a : Dict = ss
# check if se have reached the starting point
if len(_a ) == 0:
return sorted_nodes
def __lowercase ( self ) -> Any:
_a : List[str] = []
_a : Optional[Any] = []
_a : Optional[Any] = list(self.graph )[0]
stack.append(_a )
visited.append(_a )
_a : Any = -2
_a : Optional[int] = []
_a : str = s
_a : List[Any] = False
_a : Optional[int] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_a : Optional[Any] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
_a : int = len(_a ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_a : Optional[int] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
_a : Union[str, Any] = True
if len(_a ) != 0:
_a : Any = stack[len(_a ) - 1]
else:
_a : Optional[int] = False
indirect_parents.append(_a )
_a : Dict = s
_a : Union[str, Any] = ss
# check if se have reached the starting point
if len(_a ) == 0:
return list(_a )
def __lowercase ( self ) -> Any:
_a : Any = []
_a : Union[str, Any] = []
_a : List[str] = list(self.graph )[0]
stack.append(_a )
visited.append(_a )
_a : List[str] = -2
_a : Dict = []
_a : str = s
_a : Optional[Any] = False
_a : str = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_a : str = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
_a : int = len(_a ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_a : Any = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
_a : Optional[Any] = True
if len(_a ) != 0:
_a : Tuple = stack[len(_a ) - 1]
else:
_a : Union[str, Any] = False
indirect_parents.append(_a )
_a : Union[str, Any] = s
_a : List[Any] = ss
# check if se have reached the starting point
if len(_a ) == 0:
return False
def __lowercase ( self , _a=-2 , _a=-1 ) -> List[str]:
_a : Dict = time()
self.dfs(_a , _a )
_a : Tuple = time()
return end - begin
def __lowercase ( self , _a=-2 ) -> Optional[int]:
_a : int = time()
self.bfs(_a )
_a : List[Any] = time()
return end - begin
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self ) -> Optional[Any]:
_a : Union[str, Any] = {}
def __lowercase ( self , _a , _a , _a=1 ) -> List[Any]:
# check if the u exists
if self.graph.get(_a ):
# if there already is a edge
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
# if u does not exist
_a : int = [[w, v]]
# add the other way
if self.graph.get(_a ):
# if there already is a edge
if self.graph[v].count([w, u] ) == 0:
self.graph[v].append([w, u] )
else:
# if u does not exist
_a : int = [[w, u]]
def __lowercase ( self , _a , _a ) -> List[Any]:
if self.graph.get(_a ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(_a )
# the other way round
if self.graph.get(_a ):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(_a )
def __lowercase ( self , _a=-2 , _a=-1 ) -> Any:
if s == d:
return []
_a : str = []
_a : Optional[Any] = []
if s == -2:
_a : List[str] = list(self.graph )[0]
stack.append(_a )
visited.append(_a )
_a : List[Any] = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_a : Optional[Any] = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(_a )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
_a : Optional[Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(_a ) != 0:
_a : Dict = stack[len(_a ) - 1]
else:
_a : Union[str, Any] = ss
# check if se have reached the starting point
if len(_a ) == 0:
return visited
def __lowercase ( self , _a=-1 ) -> int:
if c == -1:
_a : Any = floor(random() * 1_0_0_0_0 ) + 1_0
for i in range(_a ):
# every vertex has max 100 edges
for _ in range(floor(random() * 1_0_2 ) + 1 ):
_a : Optional[Any] = floor(random() * c ) + 1
if n != i:
self.add_pair(_a , _a , 1 )
def __lowercase ( self , _a=-2 ) -> Tuple:
_a : Optional[Any] = deque()
_a : List[Any] = []
if s == -2:
_a : int = list(self.graph )[0]
d.append(_a )
visited.append(_a )
while d:
_a : List[Any] = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def __lowercase ( self , _a ) -> Optional[Any]:
return len(self.graph[u] )
def __lowercase ( self ) -> List[Any]:
_a : List[str] = []
_a : Optional[Any] = []
_a : str = list(self.graph )[0]
stack.append(_a )
visited.append(_a )
_a : Any = -2
_a : int = []
_a : Optional[Any] = s
_a : str = False
_a : Optional[Any] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_a : List[Any] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
_a : Optional[int] = len(_a ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_a : int = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
_a : str = True
if len(_a ) != 0:
_a : List[Any] = stack[len(_a ) - 1]
else:
_a : List[Any] = False
indirect_parents.append(_a )
_a : Tuple = s
_a : Tuple = ss
# check if se have reached the starting point
if len(_a ) == 0:
return list(_a )
def __lowercase ( self ) -> List[str]:
_a : str = []
_a : Optional[int] = []
_a : List[Any] = list(self.graph )[0]
stack.append(_a )
visited.append(_a )
_a : Tuple = -2
_a : List[Any] = []
_a : Dict = s
_a : List[str] = False
_a : Optional[Any] = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
_a : Optional[int] = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
_a : Union[str, Any] = len(_a ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
_a : Union[str, Any] = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
_a : Union[str, Any] = True
if len(_a ) != 0:
_a : Optional[Any] = stack[len(_a ) - 1]
else:
_a : List[str] = False
indirect_parents.append(_a )
_a : Dict = s
_a : str = ss
# check if se have reached the starting point
if len(_a ) == 0:
return False
def __lowercase ( self ) -> Union[str, Any]:
return list(self.graph )
def __lowercase ( self , _a=-2 , _a=-1 ) -> Union[str, Any]:
_a : Optional[Any] = time()
self.dfs(_a , _a )
_a : str = time()
return end - begin
def __lowercase ( self , _a=-2 ) -> Dict:
_a : Union[str, Any] = time()
self.bfs(_a )
_a : Optional[int] = time()
return end - begin
| 14
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__ = logging.get_logger(__name__)
a__ = {
'''funnel-transformer/small''': '''https://huggingface.co/funnel-transformer/small/resolve/main/config.json''',
'''funnel-transformer/small-base''': '''https://huggingface.co/funnel-transformer/small-base/resolve/main/config.json''',
'''funnel-transformer/medium''': '''https://huggingface.co/funnel-transformer/medium/resolve/main/config.json''',
'''funnel-transformer/medium-base''': '''https://huggingface.co/funnel-transformer/medium-base/resolve/main/config.json''',
'''funnel-transformer/intermediate''': (
'''https://huggingface.co/funnel-transformer/intermediate/resolve/main/config.json'''
),
'''funnel-transformer/intermediate-base''': (
'''https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/config.json'''
),
'''funnel-transformer/large''': '''https://huggingface.co/funnel-transformer/large/resolve/main/config.json''',
'''funnel-transformer/large-base''': '''https://huggingface.co/funnel-transformer/large-base/resolve/main/config.json''',
'''funnel-transformer/xlarge''': '''https://huggingface.co/funnel-transformer/xlarge/resolve/main/config.json''',
'''funnel-transformer/xlarge-base''': '''https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/config.json''',
}
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = "funnel"
UpperCAmelCase__ : Tuple = {
"hidden_size": "d_model",
"num_attention_heads": "n_head",
}
def __init__( self , _a=3_0_5_2_2 , _a=[4, 4, 4] , _a=None , _a=2 , _a=7_6_8 , _a=1_2 , _a=6_4 , _a=3_0_7_2 , _a="gelu_new" , _a=0.1 , _a=0.1 , _a=0.0 , _a=0.1 , _a=None , _a=1e-9 , _a="mean" , _a="relative_shift" , _a=True , _a=True , _a=True , **_a , ) -> List[Any]:
_a : Optional[int] = vocab_size
_a : Dict = block_sizes
_a : Optional[int] = [1] * len(_a ) if block_repeats is None else block_repeats
assert len(_a ) == len(
self.block_repeats ), "`block_sizes` and `block_repeats` should have the same length."
_a : int = num_decoder_layers
_a : List[str] = d_model
_a : Optional[Any] = n_head
_a : Tuple = d_head
_a : Dict = d_inner
_a : List[str] = hidden_act
_a : int = hidden_dropout
_a : Union[str, Any] = attention_dropout
_a : Tuple = activation_dropout
_a : Optional[Any] = initializer_range
_a : Dict = initializer_std
_a : Union[str, Any] = layer_norm_eps
assert pooling_type in [
"mean",
"max",
], F"""Got {pooling_type} for `pooling_type` but only 'mean' and 'max' are supported."""
_a : Any = pooling_type
assert attention_type in [
"relative_shift",
"factorized",
], F"""Got {attention_type} for `attention_type` but only 'relative_shift' and 'factorized' are supported."""
_a : Optional[Any] = attention_type
_a : int = separate_cls
_a : Tuple = truncate_seq
_a : List[Any] = pool_q_only
super().__init__(**_a )
@property
def __lowercase ( self ) -> Tuple:
return sum(self.block_sizes )
@num_hidden_layers.setter
def __lowercase ( self , _a ) -> List[str]:
raise NotImplementedError(
'''This model does not support the setting of `num_hidden_layers`. Please set `block_sizes`.''' )
@property
def __lowercase ( self ) -> Optional[int]:
return len(self.block_sizes )
@num_blocks.setter
def __lowercase ( self , _a ) -> Dict:
raise NotImplementedError('''This model does not support the setting of `num_blocks`. Please set `block_sizes`.''' )
| 14
| 1
|
"""simple docstring"""
import gc
import threading
import time
import psutil
import torch
class _A :
def __init__( self ):
"""simple docstring"""
lowercase = psutil.Process()
lowercase = False
def A__ ( self ):
"""simple docstring"""
lowercase = -1
while True:
lowercase = max(self.process.memory_info().rss , self.cpu_memory_peak )
# can't sleep or will not catch the peak right (this comment is here on purpose)
if not self.peak_monitoring:
break
def A__ ( self ):
"""simple docstring"""
lowercase = True
lowercase = threading.Thread(target=self.peak_monitor )
lowercase = True
self.thread.start()
def A__ ( self ):
"""simple docstring"""
lowercase = False
self.thread.join()
return self.cpu_memory_peak
__lowerCAmelCase : int =PeakCPUMemory()
def UpperCAmelCase__ ( ) -> Any:
'''simple docstring'''
lowercase = {"""time""": time.time()}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
lowercase = psutil.Process().memory_info().rss
cpu_peak_tracker.start()
# GPU mem
for i in range(torch.cuda.device_count() ):
lowercase = torch.cuda.memory_allocated(lowerCAmelCase__ )
torch.cuda.reset_peak_memory_stats()
return measures
def UpperCAmelCase__ ( lowerCAmelCase__ :Dict ) -> Tuple:
'''simple docstring'''
lowercase = {"""time""": time.time() - start_measures["""time"""]}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
lowercase = (psutil.Process().memory_info().rss - start_measures["""cpu"""]) / 2**2_0
lowercase = (cpu_peak_tracker.stop() - start_measures["""cpu"""]) / 2**2_0
# GPU mem
for i in range(torch.cuda.device_count() ):
lowercase = (torch.cuda.memory_allocated(lowerCAmelCase__ ) - start_measures[str(lowerCAmelCase__ )]) / 2**2_0
lowercase = (torch.cuda.max_memory_allocated(lowerCAmelCase__ ) - start_measures[str(lowerCAmelCase__ )]) / 2**2_0
return measures
def UpperCAmelCase__ ( lowerCAmelCase__ :List[str] , lowerCAmelCase__ :List[str] ) -> List[str]:
'''simple docstring'''
print(f'{description}:' )
print(f'- Time: {measures["time"]:.2f}s' )
for i in range(torch.cuda.device_count() ):
print(f'- GPU {i} allocated: {measures[str(lowerCAmelCase__ )]:.2f}MiB' )
lowercase = measures[f'{i}-peak']
print(f'- GPU {i} peak: {peak:.2f}MiB' )
print(f'- CPU RAM allocated: {measures["cpu"]:.2f}MiB' )
print(f'- CPU RAM peak: {measures["cpu-peak"]:.2f}MiB' )
| 714
|
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bart import BartTokenizer
__lowerCAmelCase : Tuple =logging.get_logger(__name__)
__lowerCAmelCase : str ={"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
# See all BART models at https://huggingface.co/models?filter=bart
__lowerCAmelCase : Optional[int] ={
"""vocab_file""": {
"""facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/vocab.json""",
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/vocab.json""",
"""facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json""",
"""facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json""",
"""facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json""",
"""yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json""",
},
"""merges_file""": {
"""facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/merges.txt""",
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/merges.txt""",
"""facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt""",
"""facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt""",
"""facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt""",
"""yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json""",
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json""",
"""facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json""",
"""facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json""",
"""facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json""",
"""yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json""",
},
}
__lowerCAmelCase : Optional[int] ={
"""facebook/bart-base""": 1_0_2_4,
"""facebook/bart-large""": 1_0_2_4,
"""facebook/bart-large-mnli""": 1_0_2_4,
"""facebook/bart-large-cnn""": 1_0_2_4,
"""facebook/bart-large-xsum""": 1_0_2_4,
"""yjernite/bart_eli5""": 1_0_2_4,
}
class _A ( lowerCAmelCase ):
snake_case__ : Optional[int] = VOCAB_FILES_NAMES
snake_case__ : Tuple = PRETRAINED_VOCAB_FILES_MAP
snake_case__ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ : Optional[Any] = ['input_ids', 'attention_mask']
snake_case__ : Dict = BartTokenizer
def __init__( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase="replace" , __lowerCAmelCase="<s>" , __lowerCAmelCase="</s>" , __lowerCAmelCase="</s>" , __lowerCAmelCase="<s>" , __lowerCAmelCase="<unk>" , __lowerCAmelCase="<pad>" , __lowerCAmelCase="<mask>" , __lowerCAmelCase=False , __lowerCAmelCase=True , **__lowerCAmelCase , ):
"""simple docstring"""
super().__init__(
__lowerCAmelCase , __lowerCAmelCase , tokenizer_file=__lowerCAmelCase , errors=__lowerCAmelCase , bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , sep_token=__lowerCAmelCase , cls_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , mask_token=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase , trim_offsets=__lowerCAmelCase , **__lowerCAmelCase , )
lowercase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , __lowerCAmelCase ) != add_prefix_space:
lowercase = getattr(__lowerCAmelCase , pre_tok_state.pop("""type""" ) )
lowercase = add_prefix_space
lowercase = pre_tok_class(**__lowerCAmelCase )
lowercase = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
lowercase = """post_processor"""
lowercase = getattr(self.backend_tokenizer , __lowerCAmelCase , __lowerCAmelCase )
if tokenizer_component_instance:
lowercase = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowercase = tuple(state["""sep"""] )
if "cls" in state:
lowercase = tuple(state["""cls"""] )
lowercase = False
if state.get("""add_prefix_space""" , __lowerCAmelCase ) != add_prefix_space:
lowercase = add_prefix_space
lowercase = True
if state.get("""trim_offsets""" , __lowerCAmelCase ) != trim_offsets:
lowercase = trim_offsets
lowercase = True
if changes_to_apply:
lowercase = getattr(__lowerCAmelCase , state.pop("""type""" ) )
lowercase = component_class(**__lowerCAmelCase )
setattr(self.backend_tokenizer , __lowerCAmelCase , __lowerCAmelCase )
@property
def A__ ( self ):
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error("""Using mask_token, but it is not set yet.""" )
return None
return str(self._mask_token )
@mask_token.setter
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase = AddedToken(__lowerCAmelCase , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else value
lowercase = value
def A__ ( self , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
lowercase = kwargs.get("""is_split_into_words""" , __lowerCAmelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"""to use it with pretokenized inputs.""" )
return super()._batch_encode_plus(*__lowerCAmelCase , **__lowerCAmelCase )
def A__ ( self , *__lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
lowercase = kwargs.get("""is_split_into_words""" , __lowerCAmelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"""to use it with pretokenized inputs.""" )
return super()._encode_plus(*__lowerCAmelCase , **__lowerCAmelCase )
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase = None ):
"""simple docstring"""
lowercase = self._tokenizer.model.save(__lowerCAmelCase , name=__lowerCAmelCase )
return tuple(__lowerCAmelCase )
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase=None ):
"""simple docstring"""
lowercase = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase = None ):
"""simple docstring"""
lowercase = [self.sep_token_id]
lowercase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 197
| 0
|
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class UpperCAmelCase_ ( UpperCamelCase_ ):
"""simple docstring"""
UpperCamelCase_ = 42
class UpperCAmelCase_ ( UpperCamelCase_ , UpperCamelCase_ ):
"""simple docstring"""
@register_to_config
def __init__( self : Any , UpperCAmelCase : Dict = 6_5536 , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : str = 2 , UpperCAmelCase : int = 2 , UpperCAmelCase : int = 0 , UpperCAmelCase : Dict = "fourier" , UpperCAmelCase : Union[str, Any] = True , UpperCAmelCase : Any = False , UpperCAmelCase : str = 0.0 , UpperCAmelCase : Dict = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , UpperCAmelCase : Dict = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , UpperCAmelCase : Union[str, Any] = "UNetMidBlock1D" , UpperCAmelCase : Optional[Any] = None , UpperCAmelCase : List[str] = (32, 32, 64) , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : int = 8 , UpperCAmelCase : Any = 1 , UpperCAmelCase : Optional[int] = False , ) -> str:
'''simple docstring'''
super().__init__()
lowercase : Union[str, Any] =sample_size
# time
if time_embedding_type == "fourier":
lowercase : List[Any] =GaussianFourierProjection(
embedding_size=8 , set_W_to_weight=UpperCAmelCase , log=UpperCAmelCase , flip_sin_to_cos=UpperCAmelCase )
lowercase : int =2 * block_out_channels[0]
elif time_embedding_type == "positional":
lowercase : Union[str, Any] =Timesteps(
block_out_channels[0] , flip_sin_to_cos=UpperCAmelCase , downscale_freq_shift=UpperCAmelCase )
lowercase : List[str] =block_out_channels[0]
if use_timestep_embedding:
lowercase : Optional[int] =block_out_channels[0] * 4
lowercase : Tuple =TimestepEmbedding(
in_channels=UpperCAmelCase , time_embed_dim=UpperCAmelCase , act_fn=UpperCAmelCase , out_dim=block_out_channels[0] , )
lowercase : Any =nn.ModuleList([] )
lowercase : int =None
lowercase : Optional[Any] =nn.ModuleList([] )
lowercase : List[str] =None
# down
lowercase : Optional[Any] =in_channels
for i, down_block_type in enumerate(UpperCAmelCase ):
lowercase : Dict =output_channel
lowercase : str =block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
lowercase : Any =i == len(UpperCAmelCase ) - 1
lowercase : Tuple =get_down_block(
UpperCAmelCase , num_layers=UpperCAmelCase , in_channels=UpperCAmelCase , out_channels=UpperCAmelCase , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , )
self.down_blocks.append(UpperCAmelCase )
# mid
lowercase : Optional[int] =get_mid_block(
UpperCAmelCase , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=UpperCAmelCase , add_downsample=UpperCAmelCase , )
# up
lowercase : Any =list(reversed(UpperCAmelCase ) )
lowercase : List[str] =reversed_block_out_channels[0]
if out_block_type is None:
lowercase : str =out_channels
else:
lowercase : str =block_out_channels[0]
for i, up_block_type in enumerate(UpperCAmelCase ):
lowercase : Any =output_channel
lowercase : Any =(
reversed_block_out_channels[i + 1] if i < len(UpperCAmelCase ) - 1 else final_upsample_channels
)
lowercase : Union[str, Any] =i == len(UpperCAmelCase ) - 1
lowercase : List[str] =get_up_block(
UpperCAmelCase , num_layers=UpperCAmelCase , in_channels=UpperCAmelCase , out_channels=UpperCAmelCase , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , )
self.up_blocks.append(UpperCAmelCase )
lowercase : List[str] =output_channel
# out
lowercase : Any =norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32 )
lowercase : int =get_out_block(
out_block_type=UpperCAmelCase , num_groups_out=UpperCAmelCase , embed_dim=block_out_channels[0] , out_channels=UpperCAmelCase , act_fn=UpperCAmelCase , fc_dim=block_out_channels[-1] // 4 , )
def A__ ( self : Tuple , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : List[Any] = True , ) -> Union[UNetaDOutput, Tuple]:
'''simple docstring'''
lowercase : Dict =timestep
if not torch.is_tensor(UpperCAmelCase ):
lowercase : Dict =torch.tensor([timesteps] , dtype=torch.long , device=sample.device )
elif torch.is_tensor(UpperCAmelCase ) and len(timesteps.shape ) == 0:
lowercase : Optional[int] =timesteps[None].to(sample.device )
lowercase : Dict =self.time_proj(UpperCAmelCase )
if self.config.use_timestep_embedding:
lowercase : Any =self.time_mlp(UpperCAmelCase )
else:
lowercase : Optional[Any] =timestep_embed[..., None]
lowercase : Dict =timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype )
lowercase : Tuple =timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) )
# 2. down
lowercase : Optional[Any] =()
for downsample_block in self.down_blocks:
lowercase : Optional[Any] =downsample_block(hidden_states=UpperCAmelCase , temb=UpperCAmelCase )
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
lowercase : List[Any] =self.mid_block(UpperCAmelCase , UpperCAmelCase )
# 4. up
for i, upsample_block in enumerate(self.up_blocks ):
lowercase : Union[str, Any] =down_block_res_samples[-1:]
lowercase : Optional[Any] =down_block_res_samples[:-1]
lowercase : int =upsample_block(UpperCAmelCase , res_hidden_states_tuple=UpperCAmelCase , temb=UpperCAmelCase )
# 5. post-process
if self.out_block:
lowercase : Any =self.out_block(UpperCAmelCase , UpperCAmelCase )
if not return_dict:
return (sample,)
return UNetaDOutput(sample=UpperCAmelCase )
| 94
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase_ : Any = {
'configuration_blip_2': [
'BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Blip2Config',
'Blip2QFormerConfig',
'Blip2VisionConfig',
],
'processing_blip_2': ['Blip2Processor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : int = [
'BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST',
'Blip2Model',
'Blip2QFormerModel',
'Blip2PreTrainedModel',
'Blip2ForConditionalGeneration',
'Blip2VisionModel',
]
if TYPE_CHECKING:
from .configuration_blip_a import (
BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlipaConfig,
BlipaQFormerConfig,
BlipaVisionConfig,
)
from .processing_blip_a import BlipaProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip_a import (
BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipaForConditionalGeneration,
BlipaModel,
BlipaPreTrainedModel,
BlipaQFormerModel,
BlipaVisionModel,
)
else:
import sys
lowercase_ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 64
| 0
|
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def _a ( UpperCAmelCase ) -> Tuple:
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] = filter(lambda UpperCAmelCase : p.requires_grad , model.parameters() )
lowerCamelCase__ : List[str] = sum([np.prod(p.size() ) for p in model_parameters] )
return params
_A : Any = logging.getLogger(__name__)
def _a ( UpperCAmelCase , UpperCAmelCase ) -> Optional[int]:
"""simple docstring"""
if metric == "rouge2":
lowerCamelCase__ : Union[str, Any] = """{val_avg_rouge2:.4f}-{step_count}"""
elif metric == "bleu":
lowerCamelCase__ : Optional[Any] = """{val_avg_bleu:.4f}-{step_count}"""
elif metric == "em":
lowerCamelCase__ : List[Any] = """{val_avg_em:.4f}-{step_count}"""
else:
raise NotImplementedError(
f"seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this"
''' function.''' )
lowerCamelCase__ : str = ModelCheckpoint(
dirpath=lowerCamelCase_ , filename=lowerCamelCase_ , monitor=f"val_{metric}" , mode='''max''' , save_top_k=3 , every_n_epochs=1 , )
return checkpoint_callback
def _a ( UpperCAmelCase , UpperCAmelCase ) -> Dict:
"""simple docstring"""
return EarlyStopping(
monitor=f"val_{metric}" , mode='''min''' if '''loss''' in metric else '''max''' , patience=lowerCamelCase_ , verbose=lowerCamelCase_ , )
class __SCREAMING_SNAKE_CASE ( pl.Callback ):
def __lowerCamelCase ( self : Any , A : List[Any] , A : str ) ->int:
lowerCamelCase__ : Tuple = {F"lr_group_{i}": param["""lr"""] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(lowerCamelCase_ )
@rank_zero_only
def __lowerCamelCase ( self : List[Any] , A : pl.Trainer , A : pl.LightningModule , A : str , A : List[str]=True ) ->Optional[Any]:
logger.info(F"***** {type_path} results at step {trainer.global_step:05d} *****" )
lowerCamelCase__ : Union[str, Any] = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['''log''', '''progress_bar''', '''preds''']} )
# Log results
lowerCamelCase__ : Union[str, Any] = Path(pl_module.hparams.output_dir )
if type_path == "test":
lowerCamelCase__ : Optional[Any] = od / """test_results.txt"""
lowerCamelCase__ : Tuple = od / """test_generations.txt"""
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
lowerCamelCase__ : Optional[int] = od / F"{type_path}_results/{trainer.global_step:05d}.txt"
lowerCamelCase__ : int = od / F"{type_path}_generations/{trainer.global_step:05d}.txt"
results_file.parent.mkdir(exist_ok=lowerCamelCase_ )
generations_file.parent.mkdir(exist_ok=lowerCamelCase_ )
with open(lowerCamelCase_ , '''a+''' ) as writer:
for key in sorted(lowerCamelCase_ ):
if key in ["log", "progress_bar", "preds"]:
continue
lowerCamelCase__ : Any = metrics[key]
if isinstance(lowerCamelCase_ , torch.Tensor ):
lowerCamelCase__ : Optional[int] = val.item()
lowerCamelCase__ : Union[str, Any] = F"{key}: {val:.6f}\n"
writer.write(lowerCamelCase_ )
if not save_generations:
return
if "preds" in metrics:
lowerCamelCase__ : List[str] = """\n""".join(metrics['''preds'''] )
generations_file.open('''w+''' ).write(lowerCamelCase_ )
@rank_zero_only
def __lowerCamelCase ( self : str , A : Optional[int] , A : Any ) ->int:
try:
lowerCamelCase__ : Any = pl_module.model.model.num_parameters()
except AttributeError:
lowerCamelCase__ : List[str] = pl_module.model.num_parameters()
lowerCamelCase__ : str = count_trainable_parameters(lowerCamelCase_ )
# mp stands for million parameters
trainer.logger.log_metrics({'''n_params''': npars, '''mp''': npars / 1e6, '''grad_mp''': n_trainable_pars / 1e6} )
@rank_zero_only
def __lowerCamelCase ( self : List[str] , A : pl.Trainer , A : pl.LightningModule ) ->int:
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(lowerCamelCase_ , lowerCamelCase_ , '''test''' )
@rank_zero_only
def __lowerCamelCase ( self : List[str] , A : pl.Trainer , A : Any ) ->List[str]:
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 713
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
_A : Union[str, Any] = logging.get_logger(__name__)
_A : int = {
'allenai/longformer-base-4096': 'https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json',
'allenai/longformer-large-4096': 'https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json',
'allenai/longformer-large-4096-finetuned-triviaqa': (
'https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json'
),
'allenai/longformer-base-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json'
),
'allenai/longformer-large-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json'
),
}
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
_UpperCAmelCase : List[str] = "longformer"
def __init__( self : Union[str, Any] , A : Union[List[int], int] = 5_1_2 , A : int = 2 , A : int = 1 , A : int = 0 , A : int = 2 , A : int = 3_0_5_2_2 , A : int = 7_6_8 , A : int = 1_2 , A : int = 1_2 , A : int = 3_0_7_2 , A : str = "gelu" , A : float = 0.1 , A : float = 0.1 , A : int = 5_1_2 , A : int = 2 , A : float = 0.02 , A : float = 1e-12 , A : bool = False , **A : Optional[Any] , ) ->Dict:
super().__init__(pad_token_id=A , **A )
lowerCamelCase__ : Optional[Any] = attention_window
lowerCamelCase__ : List[Any] = sep_token_id
lowerCamelCase__ : int = bos_token_id
lowerCamelCase__ : Union[str, Any] = eos_token_id
lowerCamelCase__ : int = vocab_size
lowerCamelCase__ : List[str] = hidden_size
lowerCamelCase__ : List[str] = num_hidden_layers
lowerCamelCase__ : str = num_attention_heads
lowerCamelCase__ : int = hidden_act
lowerCamelCase__ : Optional[int] = intermediate_size
lowerCamelCase__ : str = hidden_dropout_prob
lowerCamelCase__ : Tuple = attention_probs_dropout_prob
lowerCamelCase__ : Union[str, Any] = max_position_embeddings
lowerCamelCase__ : List[Any] = type_vocab_size
lowerCamelCase__ : List[str] = initializer_range
lowerCamelCase__ : str = layer_norm_eps
lowerCamelCase__ : List[Any] = onnx_export
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
def __init__( self : Tuple , A : "PretrainedConfig" , A : str = "default" , A : "List[PatchingSpec]" = None ) ->Any:
super().__init__(A , A , A )
lowerCamelCase__ : Any = True
@property
def __lowerCamelCase ( self : int ) ->Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
lowerCamelCase__ : int = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowerCamelCase__ : Tuple = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''global_attention_mask''', dynamic_axis),
] )
@property
def __lowerCamelCase ( self : str ) ->Mapping[str, Mapping[int, str]]:
lowerCamelCase__ : Dict = super().outputs
if self.task == "default":
lowerCamelCase__ : str = {0: '''batch'''}
return outputs
@property
def __lowerCamelCase ( self : Optional[Any] ) ->float:
return 1e-4
@property
def __lowerCamelCase ( self : int ) ->int:
# needs to be >= 14 to support tril operator
return max(super().default_onnx_opset , 1_4 )
def __lowerCamelCase ( self : Union[str, Any] , A : "PreTrainedTokenizerBase" , A : int = -1 , A : int = -1 , A : bool = False , A : Optional[TensorType] = None , ) ->Mapping[str, Any]:
lowerCamelCase__ : Dict = super().generate_dummy_inputs(
preprocessor=A , batch_size=A , seq_length=A , is_pair=A , framework=A )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
lowerCamelCase__ : Any = torch.zeros_like(inputs['''input_ids'''] )
# make every second token global
lowerCamelCase__ : List[str] = 1
return inputs
| 130
| 0
|
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class A__ :
def __init__( self , A_ , A_=13 , A_=7 , A_=True , A_=True , A_=True , A_=True , A_=99 , A_=32 , A_=2 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=16 , A_=2 , A_=0.02 , A_=3 , A_=4 , A_=None , ):
'''simple docstring'''
UpperCamelCase : Optional[int] = parent
UpperCamelCase : Dict = 13
UpperCamelCase : Tuple = 7
UpperCamelCase : Union[str, Any] = True
UpperCamelCase : Any = True
UpperCamelCase : Dict = True
UpperCamelCase : List[Any] = True
UpperCamelCase : Dict = 99
UpperCamelCase : List[str] = 384
UpperCamelCase : Dict = 2
UpperCamelCase : Optional[int] = 4
UpperCamelCase : Dict = 37
UpperCamelCase : List[str] = "gelu"
UpperCamelCase : Optional[int] = 0.1
UpperCamelCase : str = 0.1
UpperCamelCase : Any = 512
UpperCamelCase : List[str] = 16
UpperCamelCase : Tuple = 2
UpperCamelCase : Dict = 0.02
UpperCamelCase : Optional[Any] = 3
UpperCamelCase : int = 4
UpperCamelCase : int = 128
UpperCamelCase : Any = 2
UpperCamelCase : Dict = 9
UpperCamelCase : int = 1
UpperCamelCase : str = None
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase : Union[str, Any] = None
if self.use_input_mask:
UpperCamelCase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase : Union[str, Any] = None
if self.use_token_type_ids:
UpperCamelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase : int = None
UpperCamelCase : Union[str, Any] = None
UpperCamelCase : Optional[int] = None
if self.use_labels:
UpperCamelCase : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase : List[Any] = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase : Optional[int] = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=__UpperCamelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : List[str] = TFConvBertModel(config=__UpperCamelCase )
UpperCamelCase : str = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
UpperCamelCase : str = [input_ids, input_mask]
UpperCamelCase : List[str] = model(__UpperCamelCase )
UpperCamelCase : Optional[int] = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : str = TFConvBertForMaskedLM(config=__UpperCamelCase )
UpperCamelCase : Optional[Any] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
UpperCamelCase : Union[str, Any] = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : int = self.num_labels
UpperCamelCase : List[str] = TFConvBertForSequenceClassification(config=__UpperCamelCase )
UpperCamelCase : Union[str, Any] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
UpperCamelCase : int = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : Any = self.num_choices
UpperCamelCase : Optional[int] = TFConvBertForMultipleChoice(config=__UpperCamelCase )
UpperCamelCase : List[Any] = tf.tile(tf.expand_dims(__UpperCamelCase , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase : str = tf.tile(tf.expand_dims(__UpperCamelCase , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase : Tuple = tf.tile(tf.expand_dims(__UpperCamelCase , 1 ) , (1, self.num_choices, 1) )
UpperCamelCase : Union[str, Any] = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
UpperCamelCase : Dict = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : int = self.num_labels
UpperCamelCase : List[Any] = TFConvBertForTokenClassification(config=__UpperCamelCase )
UpperCamelCase : Optional[int] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
UpperCamelCase : Union[str, Any] = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : Any = TFConvBertForQuestionAnswering(config=__UpperCamelCase )
UpperCamelCase : str = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
UpperCamelCase : Tuple = model(__UpperCamelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = self.prepare_config_and_inputs()
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) : str = config_and_inputs
UpperCamelCase : Any = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class A__ ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
_UpperCAmelCase :int = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
_UpperCAmelCase :Union[str, Any] = (
{
'feature-extraction': TFConvBertModel,
'fill-mask': TFConvBertForMaskedLM,
'question-answering': TFConvBertForQuestionAnswering,
'text-classification': TFConvBertForSequenceClassification,
'token-classification': TFConvBertForTokenClassification,
'zero-shot': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
_UpperCAmelCase :Tuple = False
_UpperCAmelCase :Optional[int] = False
_UpperCAmelCase :List[str] = False
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : int = TFConvBertModelTester(self )
UpperCamelCase : Optional[Any] = ConfigTester(self , config_class=__UpperCamelCase , hidden_size=37 )
def __UpperCamelCase( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__UpperCamelCase )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__UpperCamelCase )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__UpperCamelCase )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__UpperCamelCase )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__UpperCamelCase )
@slow
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase , UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase : Any = True
UpperCamelCase : List[str] = True
if hasattr(__UpperCamelCase , "use_cache" ):
UpperCamelCase : Optional[Any] = True
UpperCamelCase : int = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length )
UpperCamelCase : Optional[Any] = getattr(self.model_tester , "key_length" , __UpperCamelCase )
for model_class in self.all_model_classes:
UpperCamelCase : Optional[Any] = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase )
UpperCamelCase : str = model_class(__UpperCamelCase )
UpperCamelCase : Optional[Any] = len(model(__UpperCamelCase ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__UpperCamelCase , saved_model=__UpperCamelCase )
UpperCamelCase : List[str] = os.path.join(__UpperCamelCase , "saved_model" , "1" )
UpperCamelCase : Any = tf.keras.models.load_model(__UpperCamelCase )
UpperCamelCase : Dict = model(__UpperCamelCase )
if self.is_encoder_decoder:
UpperCamelCase : Optional[int] = outputs["encoder_hidden_states"]
UpperCamelCase : List[Any] = outputs["encoder_attentions"]
else:
UpperCamelCase : Any = outputs["hidden_states"]
UpperCamelCase : List[str] = outputs["attentions"]
self.assertEqual(len(__UpperCamelCase ) , __UpperCamelCase )
UpperCamelCase : Optional[Any] = getattr(
self.model_tester , "expected_num_hidden_layers" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(__UpperCamelCase ) , __UpperCamelCase )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(__UpperCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[int] = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
self.assertIsNotNone(__UpperCamelCase )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase , UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase : Optional[Any] = True
UpperCamelCase : Tuple = getattr(self.model_tester , "decoder_seq_length" , self.model_tester.seq_length )
UpperCamelCase : int = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length )
UpperCamelCase : Dict = getattr(self.model_tester , "key_length" , __UpperCamelCase )
UpperCamelCase : Tuple = getattr(self.model_tester , "key_length" , __UpperCamelCase )
def check_decoder_attentions_output(A_ ):
UpperCamelCase : int = len(__UpperCamelCase )
self.assertEqual(out_len % 2 , 0 )
UpperCamelCase : List[str] = outputs.decoder_attentions
self.assertEqual(len(__UpperCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(A_ ):
UpperCamelCase : Any = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(__UpperCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
UpperCamelCase : List[Any] = True
UpperCamelCase : Optional[int] = False
UpperCamelCase : Optional[Any] = model_class(__UpperCamelCase )
UpperCamelCase : List[str] = model(self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
UpperCamelCase : List[Any] = len(__UpperCamelCase )
self.assertEqual(config.output_hidden_states , __UpperCamelCase )
check_encoder_attentions_output(__UpperCamelCase )
if self.is_encoder_decoder:
UpperCamelCase : int = model_class(__UpperCamelCase )
UpperCamelCase : Optional[Any] = model(self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
self.assertEqual(config.output_hidden_states , __UpperCamelCase )
check_decoder_attentions_output(__UpperCamelCase )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
UpperCamelCase : Union[str, Any] = True
UpperCamelCase : List[Any] = model_class(__UpperCamelCase )
UpperCamelCase : List[str] = model(self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
self.assertEqual(config.output_hidden_states , __UpperCamelCase )
check_encoder_attentions_output(__UpperCamelCase )
# Check attention is always last and order is fine
UpperCamelCase : Tuple = True
UpperCamelCase : Tuple = True
UpperCamelCase : List[Any] = model_class(__UpperCamelCase )
UpperCamelCase : Optional[int] = model(self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(__UpperCamelCase ) )
self.assertEqual(model.config.output_hidden_states , __UpperCamelCase )
check_encoder_attentions_output(__UpperCamelCase )
@require_tf
class A__ ( unittest.TestCase ):
@slow
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[str] = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
UpperCamelCase : List[Any] = tf.constant([[0, 1, 2, 3, 4, 5]] )
UpperCamelCase : Optional[Any] = model(__UpperCamelCase )[0]
UpperCamelCase : Optional[Any] = [1, 6, 768]
self.assertEqual(output.shape , __UpperCamelCase )
UpperCamelCase : int = tf.constant(
[
[
[-0.03_47_54_93, -0.4_68_60_34, -0.30_63_88_32],
[0.22_63_72_48, -0.26_98_86_46, -0.7_42_34_24],
[0.10_32_48_68, -0.45_01_35_08, -0.58_28_07_84],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __UpperCamelCase , atol=1e-4 )
| 629
|
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
__snake_case :Optional[Any] =logging.get_logger(__name__)
__snake_case :int =list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
__snake_case :Optional[int] =tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class lowerCAmelCase__ :
A_ : str = field(
default=_lowerCamelCase , metadata={'help': 'Model type selected in the list: ' + ', '.join(_lowerCamelCase )} )
A_ : str = field(
default=_lowerCamelCase , metadata={'help': 'The input data dir. Should contain the .json files for the SQuAD task.'} )
A_ : int = field(
default=1_2_8 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
A_ : int = field(
default=1_2_8 , metadata={'help': 'When splitting up a long document into chunks, how much stride to take between chunks.'} , )
A_ : int = field(
default=6_4 , metadata={
'help': (
'The maximum number of tokens for the question. Questions longer than this will '
'be truncated to this length.'
)
} , )
A_ : int = field(
default=3_0 , metadata={
'help': (
'The maximum length of an answer that can be generated. This is needed because the start '
'and end predictions are not conditioned on one another.'
)
} , )
A_ : bool = field(
default=_lowerCamelCase , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
A_ : bool = field(
default=_lowerCamelCase , metadata={'help': 'If true, the SQuAD examples contain some that do not have an answer.'} )
A_ : float = field(
default=0.0 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
A_ : int = field(
default=2_0 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
A_ : int = field(
default=0 , metadata={
'help': (
'language id of input for language-specific xlm models (see'
' tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)'
)
} , )
A_ : int = field(default=1 , metadata={'help': 'multiple threads for converting example to features'} )
class lowerCAmelCase__ ( _lowerCamelCase ):
A_ : str = 'train'
A_ : str = 'dev'
class lowerCAmelCase__ ( _lowerCamelCase ):
A_ : SquadDataTrainingArguments
A_ : List[SquadFeatures]
A_ : Split
A_ : bool
def __init__( self : Optional[int] , __UpperCamelCase : SquadDataTrainingArguments , __UpperCamelCase : PreTrainedTokenizer , __UpperCamelCase : Optional[int] = None , __UpperCamelCase : Union[str, Split] = Split.train , __UpperCamelCase : Optional[bool] = False , __UpperCamelCase : Optional[str] = None , __UpperCamelCase : Optional[str] = "pt" , ) -> Any:
A = args
A = is_language_sensitive
A = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(__UpperCamelCase , __UpperCamelCase ):
try:
A = Split[mode]
except KeyError:
raise KeyError('mode is not a valid split name' )
A = mode
# Load data features from cache or dataset file
A = 'v2' if args.version_2_with_negative else 'v1'
A = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f'''cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}''' , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
A = cached_features_file + '.lock'
with FileLock(__UpperCamelCase ):
if os.path.exists(__UpperCamelCase ) and not args.overwrite_cache:
A = time.time()
A = torch.load(__UpperCamelCase )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
A = self.old_features['features']
A = self.old_features.get('dataset' , __UpperCamelCase )
A = self.old_features.get('examples' , __UpperCamelCase )
logger.info(
f'''Loading features from cached file {cached_features_file} [took %.3f s]''' , time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
f'''Deleting cached file {cached_features_file} will allow dataset and examples to be cached in'''
' future run' )
else:
if mode == Split.dev:
A = self.processor.get_dev_examples(args.data_dir )
else:
A = self.processor.get_train_examples(args.data_dir )
A , A = squad_convert_examples_to_features(
examples=self.examples , tokenizer=__UpperCamelCase , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=__UpperCamelCase , )
A = time.time()
torch.save(
{'features': self.features, 'dataset': self.dataset, 'examples': self.examples} , __UpperCamelCase , )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f'''Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]''' )
def __len__( self : Optional[Any] ) -> Tuple:
return len(self.features )
def __getitem__( self : Tuple , __UpperCamelCase : List[Any] ) -> Dict[str, torch.Tensor]:
# Convert to Tensors and build dataset
A = self.features[i]
A = torch.tensor(feature.input_ids , dtype=torch.long )
A = torch.tensor(feature.attention_mask , dtype=torch.long )
A = torch.tensor(feature.token_type_ids , dtype=torch.long )
A = torch.tensor(feature.cls_index , dtype=torch.long )
A = torch.tensor(feature.p_mask , dtype=torch.float )
A = torch.tensor(feature.is_impossible , dtype=torch.float )
A = {
'input_ids': input_ids,
'attention_mask': attention_mask,
'token_type_ids': token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({'cls_index': cls_index, 'p_mask': p_mask} )
if self.args.version_2_with_negative:
inputs.update({'is_impossible': is_impossible} )
if self.is_language_sensitive:
inputs.update({'langs': (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
A = torch.tensor(feature.start_position , dtype=torch.long )
A = torch.tensor(feature.end_position , dtype=torch.long )
inputs.update({'start_positions': start_positions, 'end_positions': end_positions} )
return inputs
| 106
| 0
|
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ ):
UpperCamelCase__ : List[str] = []
UpperCamelCase__ : str = []
UpperCamelCase__ : str = {
'''^''': 3,
'''*''': 2,
'''/''': 2,
'''%''': 2,
'''+''': 1,
'''-''': 1,
} # Priority of each operator
UpperCamelCase__ : Union[str, Any] = len(UpperCamelCase__ ) if (len(UpperCamelCase__ ) > 7) else 7
# Print table header for output
print(
'''Symbol'''.center(8 ) , '''Stack'''.center(UpperCamelCase__ ) , '''Postfix'''.center(UpperCamelCase__ ) , sep=''' | ''' , )
print('''-''' * (print_width * 3 + 7) )
for x in infix:
if x.isalpha() or x.isdigit():
post_fix.append(UpperCamelCase__ ) # if x is Alphabet / Digit, add it to Postfix
elif x == "(":
stack.append(UpperCamelCase__ ) # if x is "(" push to Stack
elif x == ")": # if x is ")" pop stack until "(" is encountered
while stack[-1] != "(":
post_fix.append(stack.pop() ) # Pop stack & add the content to Postfix
stack.pop()
else:
if len(UpperCamelCase__ ) == 0:
stack.append(UpperCamelCase__ ) # If stack is empty, push x to stack
else: # while priority of x is not > priority of element in the stack
while len(UpperCamelCase__ ) > 0 and priority[x] <= priority[stack[-1]]:
post_fix.append(stack.pop() ) # pop stack & add to Postfix
stack.append(UpperCamelCase__ ) # push x to stack
print(
x.center(8 ) , (''''''.join(UpperCamelCase__ )).ljust(UpperCamelCase__ ) , (''''''.join(UpperCamelCase__ )).ljust(UpperCamelCase__ ) , sep=''' | ''' , ) # Output in tabular format
while len(UpperCamelCase__ ) > 0: # while stack is not empty
post_fix.append(stack.pop() ) # pop stack & add to Postfix
print(
''' '''.center(8 ) , (''''''.join(UpperCamelCase__ )).ljust(UpperCamelCase__ ) , (''''''.join(UpperCamelCase__ )).ljust(UpperCamelCase__ ) , sep=''' | ''' , ) # Output in tabular format
return "".join(UpperCamelCase__ ) # return Postfix as str
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ ):
UpperCamelCase__ : List[str] = list(infix[::-1] ) # reverse the infix equation
for i in range(len(UpperCamelCase__ ) ):
if infix[i] == "(":
UpperCamelCase__ : Optional[Any] = ''')''' # change "(" to ")"
elif infix[i] == ")":
UpperCamelCase__ : Union[str, Any] = '''(''' # change ")" to "("
return (infix_2_postfix(''''''.join(UpperCamelCase__ ) ))[
::-1
] # call infix_2_postfix on Infix, return reverse of Postfix
if __name__ == "__main__":
lowerCamelCase =input("\nEnter an Infix Equation = ") # Input an Infix equation
lowerCamelCase ="".join(Infix.split()) # Remove spaces from the input
print("\n\t", Infix, "(Infix) -> ", infix_2_prefix(Infix), "(Prefix)")
| 702
|
from __future__ import annotations
from collections.abc import Callable
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = 1_0_0 , ):
UpperCamelCase__ : Union[str, Any] = x_start
UpperCamelCase__ : List[Any] = fnc(UpperCamelCase__ )
UpperCamelCase__ : Any = 0.0
for _ in range(UpperCamelCase__ ):
# Approximates small segments of curve as linear and solve
# for trapezoidal area
UpperCamelCase__ : str = (x_end - x_start) / steps + xa
UpperCamelCase__ : Dict = fnc(UpperCamelCase__ )
area += abs(fxa + fxa ) * (xa - xa) / 2
# Increment step
UpperCamelCase__ : Tuple = xa
UpperCamelCase__ : Union[str, Any] = fxa
return area
if __name__ == "__main__":
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ ):
return x**3 + x**2
print("f(x) = x^3 + x^2")
print("The area between the curve, x = -5, x = 5 and the x axis is:")
lowerCamelCase =1_0
while i <= 1_0_0_0_0_0:
print(F'''with {i} steps: {trapezoidal_area(f, -5, 5, i)}''')
i *= 1_0
| 462
| 0
|
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def A__ ( SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[Any] ) -> str:
"""simple docstring"""
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
_UpperCAmelCase = flax_key_tuple[:-1] + ('''weight''',)
_UpperCAmelCase = torch.permute(SCREAMING_SNAKE_CASE_ , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(SCREAMING_SNAKE_CASE_ ):
# linear layer
_UpperCAmelCase = flax_key_tuple[:-1] + ('''weight''',)
_UpperCAmelCase = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
_UpperCAmelCase = flax_key_tuple[:-1] + ('''weight''',)
return flax_key_tuple, flax_tensor
def A__ ( SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : List[Any] ) -> List[Any]:
"""simple docstring"""
if "metadata" in layer:
_UpperCAmelCase = layer.split('''metadata''' )
_UpperCAmelCase = ''''''.join(split_layer[0] )[:-1]
_UpperCAmelCase = [tuple(('''metadata''' + split_layer[1]).split('''/''' ) )]
elif "kvstore" in layer:
_UpperCAmelCase = layer.split('''kvstore''' )
_UpperCAmelCase = ''''''.join(split_layer[0] )[:-1]
_UpperCAmelCase = [tuple(('''kvstore''' + split_layer[1]).split('''/''' ) )]
else:
_UpperCAmelCase = layer.split('''/''' )
_UpperCAmelCase = '''/'''.join(split_layer[:-1] )
_UpperCAmelCase = (split_layer[-1],)
if "kvstore/path" in layer:
_UpperCAmelCase = F'''{switch_checkpoint_path}/{checkpoint_info[layer]}'''
elif "kvstore/driver" in layer:
_UpperCAmelCase = '''file'''
else:
_UpperCAmelCase = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def A__ ( SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Any:
"""simple docstring"""
_UpperCAmelCase = rename_keys(SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase = {}
for k, v in current_block.items():
_UpperCAmelCase = v
_UpperCAmelCase = new_current_block
torch.save(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def A__ ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : str = WEIGHTS_NAME ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = convert_file_size_to_int(SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase = []
_UpperCAmelCase = {}
_UpperCAmelCase = 0
_UpperCAmelCase = 0
os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
with gfile.GFile(switch_checkpoint_path + '''/checkpoint''' , '''rb''' ) as fp:
_UpperCAmelCase = serialization.msgpack_restore(fp.read() )['''optimizer''']['''target''']
_UpperCAmelCase = flatten_dict(SCREAMING_SNAKE_CASE_ , sep='''/''' )
_UpperCAmelCase = {}
for layer in checkpoint_info.keys():
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = get_key_and_tensorstore_dict(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if curr_real_layer_name in all_layers:
_UpperCAmelCase = content
else:
_UpperCAmelCase = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
_UpperCAmelCase = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
_UpperCAmelCase = torch.tensor(SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
_UpperCAmelCase , _UpperCAmelCase = rename_base_flax_keys(tuple(key.split('''/''' ) ) , SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase = '''/'''.join(SCREAMING_SNAKE_CASE_ )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
_UpperCAmelCase = os.path.join(
SCREAMING_SNAKE_CASE_ , weights_name.replace('''.bin''' , F'''-{len(SCREAMING_SNAKE_CASE_ )+1:05d}-of-???.bin''' ) )
rename_and_save_block(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
sharded_state_dicts.append(current_block.keys() )
del current_block
_UpperCAmelCase = {}
_UpperCAmelCase = 0
_UpperCAmelCase = raw_weights.to(getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
_UpperCAmelCase = os.path.join(SCREAMING_SNAKE_CASE_ , weights_name.replace('''.bin''' , F'''-{len(SCREAMING_SNAKE_CASE_ )+1:05d}-of-???.bin''' ) )
rename_and_save_block(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(SCREAMING_SNAKE_CASE_ ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
_UpperCAmelCase = {}
_UpperCAmelCase = {}
for idx, shard in enumerate(SCREAMING_SNAKE_CASE_ ):
_UpperCAmelCase = weights_name.replace(
'''.bin''' , F'''-{idx+1:05d}-of-{len(SCREAMING_SNAKE_CASE_ ):05d}.bin''' ) # len(sharded_state_dicts):05d}
_UpperCAmelCase = os.path.join(SCREAMING_SNAKE_CASE_ , weights_name.replace('''.bin''' , F'''-{idx+1:05d}-of-???.bin''' ) )
os.rename(SCREAMING_SNAKE_CASE_ , os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
_UpperCAmelCase = shard
for key in shard:
_UpperCAmelCase = shard_file
# Add the metadata
_UpperCAmelCase = {'''total_size''': total_size}
_UpperCAmelCase = {'''metadata''': metadata, '''weight_map''': weight_map}
with open(os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , '''w''' , encoding='''utf-8''' ) as f:
_UpperCAmelCase = json.dumps(SCREAMING_SNAKE_CASE_ , indent=2 , sort_keys=SCREAMING_SNAKE_CASE_ ) + '''\n'''
f.write(SCREAMING_SNAKE_CASE_ )
return metadata, index
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--switch_t5x_checkpoint_path",
default="/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600",
type=str,
required=False,
help="Path to a directory containing a folder per layer. Follows the original Google format.",
)
parser.add_argument("--max_shard_size", default="10GB", required=False, help="Max shard size")
parser.add_argument("--dtype", default="bfloat16", type=str, required=False, help="dtype of the saved model")
parser.add_argument(
"--pytorch_dump_folder_path",
default="/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted",
type=str,
required=False,
help="Path to the output pytorch model.",
)
UpperCAmelCase_ = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def A__ ( ) -> Dict:
"""simple docstring"""
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
_UpperCAmelCase = SwitchTransformersConfig.from_pretrained('''google/switch-base-8''' )
config.save_pretrained('''/home/arthur_huggingface_co/transformers/switch_converted''' )
_UpperCAmelCase = SwitchTransformersForConditionalGeneration.from_pretrained(
'''/home/arthur_huggingface_co/transformers/switch_converted''' , device_map='''auto''' )
_UpperCAmelCase = TaTokenizer.from_pretrained('''t5-small''' )
_UpperCAmelCase = '''A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.'''
_UpperCAmelCase = tokenizer(SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' ).input_ids
_UpperCAmelCase = model.generate(SCREAMING_SNAKE_CASE_ , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) )
| 32
|
"""simple docstring"""
import heapq as hq
import math
from collections.abc import Iterator
class _UpperCamelCase :
'''simple docstring'''
def __init__( self , __a ):
__lowerCAmelCase = str(id_ )
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = []
__lowerCAmelCase = {} # {vertex:distance}
def __lt__( self , __a ):
return self.key < other.key
def __repr__( self ):
return self.id
def snake_case ( self , __a ):
self.neighbors.append(__a )
def snake_case ( self , __a , __a ):
__lowerCAmelCase = weight
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1] , _UpperCamelCase )
graph[b - 1].add_edge(graph[a - 1] , _UpperCamelCase )
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = []
for u in graph:
__lowerCAmelCase = math.inf
__lowerCAmelCase = None
__lowerCAmelCase = 0
__lowerCAmelCase = graph[:]
while q:
__lowerCAmelCase = min(_UpperCamelCase )
q.remove(_UpperCamelCase )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
__lowerCAmelCase = u
__lowerCAmelCase = u.edges[v.id]
for i in range(1 , len(_UpperCamelCase ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
for u in graph:
__lowerCAmelCase = math.inf
__lowerCAmelCase = None
__lowerCAmelCase = 0
__lowerCAmelCase = list(_UpperCamelCase )
hq.heapify(_UpperCamelCase )
while h:
__lowerCAmelCase = hq.heappop(_UpperCamelCase )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
__lowerCAmelCase = u
__lowerCAmelCase = u.edges[v.id]
hq.heapify(_UpperCamelCase )
for i in range(1 , len(_UpperCamelCase ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def _lowerCamelCase ( ):
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 636
| 0
|
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_torch_available
from ...utils import OptionalDependencyNotAvailable
lowercase__ ={
'configuration_gpt_neox_japanese': ['GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GPTNeoXJapaneseConfig'],
'tokenization_gpt_neox_japanese': ['GPTNeoXJapaneseTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ =[
'GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST',
'GPTNeoXJapaneseForCausalLM',
'GPTNeoXJapaneseLayer',
'GPTNeoXJapaneseModel',
'GPTNeoXJapanesePreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig
from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox_japanese import (
GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXJapaneseForCausalLM,
GPTNeoXJapaneseLayer,
GPTNeoXJapaneseModel,
GPTNeoXJapanesePreTrainedModel,
)
else:
import sys
lowercase__ =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 705
|
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def __UpperCamelCase ( lowerCAmelCase__ : Dataset , lowerCAmelCase__ : Dict[str, str] ):
__a : Tuple = args.log_outputs
__a : Tuple = '''_'''.join(args.dataset.split('''/''' ) + [args.config, args.split] )
# load metric
__a : Any = load_metric('''wer''' )
__a : int = load_metric('''cer''' )
# compute metrics
__a : Tuple = wer.compute(references=result['''target'''] , predictions=result['''prediction'''] )
__a : Tuple = cer.compute(references=result['''target'''] , predictions=result['''prediction'''] )
# print & log results
__a : List[Any] = f"WER: {wer_result}\nCER: {cer_result}"
print(lowerCAmelCase__ )
with open(f"{dataset_id}_eval_results.txt" , '''w''' ) as f:
f.write(lowerCAmelCase__ )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
__a : str = f"log_{dataset_id}_predictions.txt"
__a : Any = f"log_{dataset_id}_targets.txt"
with open(lowerCAmelCase__ , '''w''' ) as p, open(lowerCAmelCase__ , '''w''' ) as t:
# mapping function to write output
def write_to_file(lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Union[str, Any] ):
p.write(f"{i}" + '''\n''' )
p.write(batch['''prediction'''] + '''\n''' )
t.write(f"{i}" + '''\n''' )
t.write(batch['''target'''] + '''\n''' )
result.map(lowerCAmelCase__ , with_indices=lowerCAmelCase__ )
def __UpperCamelCase ( lowerCAmelCase__ : str ):
__a : int = '''[,?.!\-\;\:"“%‘”�—’…–]''' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
__a : Union[str, Any] = re.sub(lowerCAmelCase__ , '''''' , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
__a : List[str] = ['''\n\n''', '''\n''', ''' ''', ''' ''']
for t in token_sequences_to_ignore:
__a : Any = ''' '''.join(text.split(lowerCAmelCase__ ) )
return text
def __UpperCamelCase ( lowerCAmelCase__ : str ):
# load dataset
__a : str = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=lowerCAmelCase__ )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
__a : str = AutoFeatureExtractor.from_pretrained(args.model_id )
__a : int = feature_extractor.sampling_rate
# resample audio
__a : Any = dataset.cast_column('''audio''' , Audio(sampling_rate=lowerCAmelCase__ ) )
# load eval pipeline
if args.device is None:
__a : List[str] = 0 if torch.cuda.is_available() else -1
__a : List[str] = pipeline('''automatic-speech-recognition''' , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(lowerCAmelCase__ : Tuple ):
__a : Any = asr(
batch['''audio''']['''array'''] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
__a : int = prediction['''text''']
__a : Optional[Any] = normalize_text(batch['''sentence'''] )
return batch
# run inference on all examples
__a : List[Any] = dataset.map(lowerCAmelCase__ , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(lowerCAmelCase__ , lowerCAmelCase__ )
if __name__ == "__main__":
lowercase__ =argparse.ArgumentParser()
parser.add_argument(
'--model_id', type=str, required=True, help='Model identifier. Should be loadable with 🤗 Transformers'
)
parser.add_argument(
'--dataset',
type=str,
required=True,
help='Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets',
)
parser.add_argument(
'--config', type=str, required=True, help='Config of the dataset. *E.g.* `\'en\'` for Common Voice'
)
parser.add_argument('--split', type=str, required=True, help='Split of the dataset. *E.g.* `\'test\'`')
parser.add_argument(
'--chunk_length_s', type=float, default=None, help='Chunk length in seconds. Defaults to 5 seconds.'
)
parser.add_argument(
'--stride_length_s', type=float, default=None, help='Stride of the audio chunks. Defaults to 1 second.'
)
parser.add_argument(
'--log_outputs', action='store_true', help='If defined, write outputs to log file for analysis.'
)
parser.add_argument(
'--device',
type=int,
default=None,
help='The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.',
)
lowercase__ =parser.parse_args()
main(args)
| 326
| 0
|
'''simple docstring'''
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ) -> int:
return int((input_a, input_a).count(0 ) != 0 )
def _UpperCamelCase ( ) -> None:
assert nand_gate(0 ,0 ) == 1
assert nand_gate(0 ,1 ) == 1
assert nand_gate(1 ,0 ) == 1
assert nand_gate(1 ,1 ) == 0
if __name__ == "__main__":
print(nand_gate(0, 0))
print(nand_gate(0, 1))
print(nand_gate(1, 0))
print(nand_gate(1, 1))
| 42
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
a_ : Tuple = {'configuration_unispeech': ['UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP', 'UniSpeechConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Union[str, Any] = [
'UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST',
'UniSpeechForCTC',
'UniSpeechForPreTraining',
'UniSpeechForSequenceClassification',
'UniSpeechModel',
'UniSpeechPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
a_ : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 623
| 0
|
"""simple docstring"""
import numpy as np
def lowerCamelCase ( _snake_case ):
return 1 / (1 + np.exp(-vector ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 720
|
"""simple docstring"""
def lowerCamelCase ( _snake_case ):
return sum(i for i in range(1 ,number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print('Program to check whether a number is a Perfect number or not...')
UpperCamelCase__ = int(input('Enter number: ').strip())
print(f'{number} is {"" if perfect(number) else "not "}a Perfect Number.')
| 254
| 0
|
'''simple docstring'''
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class UpperCAmelCase :
'''simple docstring'''
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Any:
'''simple docstring'''
return None
class UpperCAmelCase :
'''simple docstring'''
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
'''simple docstring'''
return None
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = [
# (model_name, model_kwargs)
('bert-base-cased', {}),
('gpt2', {'use_cache': False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def UpperCamelCase( self ) -> str:
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(SCREAMING_SNAKE_CASE_ , 'tf' , 12 , **SCREAMING_SNAKE_CASE_ )
@require_torch
@slow
def UpperCamelCase( self ) -> List[str]:
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(SCREAMING_SNAKE_CASE_ , 'pt' , 12 , **SCREAMING_SNAKE_CASE_ )
@require_torch
@slow
def UpperCamelCase( self ) -> List[Any]:
'''simple docstring'''
from transformers import BertModel
lowerCamelCase_ = ['[UNK]', '[SEP]', '[CLS]', '[PAD]', '[MASK]', 'some', 'other', 'words']
with NamedTemporaryFile(mode='w+t' ) as vocab_file:
vocab_file.write('\n'.join(SCREAMING_SNAKE_CASE_ ) )
vocab_file.flush()
lowerCamelCase_ = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
lowerCamelCase_ = BertModel(BertConfig(vocab_size=len(SCREAMING_SNAKE_CASE_ ) ) )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
self._test_export(SCREAMING_SNAKE_CASE_ , 'pt' , 12 , SCREAMING_SNAKE_CASE_ )
@require_tf
@slow
def UpperCamelCase( self ) -> int:
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
lowerCamelCase_ = self._test_export(SCREAMING_SNAKE_CASE_ , 'tf' , 12 , **SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = quantize(Path(SCREAMING_SNAKE_CASE_ ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(SCREAMING_SNAKE_CASE_ ).stat().st_size:
self.fail('Quantized model is bigger than initial ONNX model' )
@require_torch
@slow
def UpperCamelCase( self ) -> str:
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
lowerCamelCase_ = self._test_export(SCREAMING_SNAKE_CASE_ , 'pt' , 12 , **SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = quantize(SCREAMING_SNAKE_CASE_ )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(SCREAMING_SNAKE_CASE_ ).stat().st_size:
self.fail('Quantized model is bigger than initial ONNX model' )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
'''simple docstring'''
try:
# Compute path
with TemporaryDirectory() as tempdir:
lowerCamelCase_ = Path(SCREAMING_SNAKE_CASE_ ).joinpath('model.onnx' )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
return path
except Exception as e:
self.fail(SCREAMING_SNAKE_CASE_ )
@require_torch
@require_tokenizers
@slow
def UpperCamelCase( self ) -> Tuple:
'''simple docstring'''
from transformers import BertModel
lowerCamelCase_ = BertModel(BertConfig.from_pretrained('lysandre/tiny-bert-random' ) )
lowerCamelCase_ = BertTokenizerFast.from_pretrained('lysandre/tiny-bert-random' )
self._test_infer_dynamic_axis(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 'pt' )
@require_tf
@require_tokenizers
@slow
def UpperCamelCase( self ) -> Optional[Any]:
'''simple docstring'''
from transformers import TFBertModel
lowerCamelCase_ = TFBertModel(BertConfig.from_pretrained('lysandre/tiny-bert-random' ) )
lowerCamelCase_ = BertTokenizerFast.from_pretrained('lysandre/tiny-bert-random' )
self._test_infer_dynamic_axis(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 'tf' )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Dict:
'''simple docstring'''
lowerCamelCase_ = FeatureExtractionPipeline(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = ['input_ids', 'token_type_ids', 'attention_mask', 'output_0', 'output_1']
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = infer_shapes(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Assert all variables are present
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , len(SCREAMING_SNAKE_CASE_ ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3] , SCREAMING_SNAKE_CASE_ )
self.assertSequenceEqual(variable_names[3:] , SCREAMING_SNAKE_CASE_ )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name] , {0: 'batch', 1: 'sequence'} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes['output_0'] , {0: 'batch', 1: 'sequence'} )
self.assertDictEqual(shapes['output_1'] , {0: 'batch'} )
def UpperCamelCase( self ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ = ['input_ids', 'attention_mask', 'token_type_ids']
lowerCamelCase_ = {'input_ids': [1, 2, 3, 4], 'attention_mask': [0, 0, 0, 0], 'token_type_ids': [1, 1, 1, 1]}
lowerCamelCase_ ,lowerCamelCase_ = ensure_valid_input(FuncContiguousArgs() , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 3 )
# Should have exactly the same input names
self.assertEqual(set(SCREAMING_SNAKE_CASE_ ) , set(SCREAMING_SNAKE_CASE_ ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(SCREAMING_SNAKE_CASE_ , (tokens['input_ids'], tokens['token_type_ids'], tokens['attention_mask']) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
lowerCamelCase_ ,lowerCamelCase_ = ensure_valid_input(FuncNonContiguousArgs() , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 1 )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0] , tokens['input_ids'] )
self.assertEqual(ordered_input_names[0] , 'input_ids' )
def UpperCamelCase( self ) -> Dict:
'''simple docstring'''
lowerCamelCase_ = generate_identified_filename(Path('/home/something/my_fake_model.onnx' ) , '-test' )
self.assertEqual('/home/something/my_fake_model-test.onnx' , generated.as_posix() )
| 42
|
import pytest
from datasets import inspect_metric, list_metrics, load_metric
@pytest.fixture
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: int ):
monkeypatch.setattr("datasets.utils.deprecation_utils._emitted_deprecation_warnings" , set() )
@pytest.fixture
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: Tuple ):
class snake_case__ :
def __init__( self : Any , A__ : List[str] ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : List[Any] = metric_id
class snake_case__ :
_SCREAMING_SNAKE_CASE : List[str] = [MetricMock(_UpperCamelCase ) for metric_id in ["accuracy", "mse", "precision", "codeparrot/apps_metric"]]
def UpperCAmelCase__ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
return self._metrics
monkeypatch.setattr("datasets.inspect.huggingface_hub" , HfhMock() )
@pytest.mark.parametrize(
"func, args" , [(load_metric, ("metrics/mse",)), (list_metrics, ()), (inspect_metric, ("metrics/mse", "tmp_path"))] )
def SCREAMING_SNAKE_CASE_ ( lowerCAmelCase_: Tuple , lowerCAmelCase_: int , lowerCAmelCase_: List[Any] , lowerCAmelCase_: Any , lowerCAmelCase_: List[str] ):
if "tmp_path" in args:
snake_case_ : List[Any] = tuple(arg if arg != "tmp_path" else tmp_path for arg in args )
with pytest.warns(lowerCAmelCase_ , match="https://huggingface.co/docs/evaluate" ):
func(*lowerCAmelCase_ )
| 666
| 0
|
def SCREAMING_SNAKE_CASE ( ):
snake_case__ : Dict = 0
for i in range(1 , 1001 ):
total += i**i
return str(snake_case_ )[-10:]
if __name__ == "__main__":
print(solution())
| 721
|
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
__lowerCamelCase : Union[str, Any] = """2.13.1"""
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse("""3.7"""):
raise ImportWarning(
"""To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition."""
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
"""To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n"""
"""If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`."""
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
__lowerCamelCase : List[Any] = concatenate_datasets
__lowerCamelCase : List[str] = DownloadConfig
__lowerCamelCase : Union[str, Any] = DownloadManager
__lowerCamelCase : str = DownloadMode
__lowerCamelCase : Union[str, Any] = DownloadConfig
__lowerCamelCase : List[str] = DownloadMode
__lowerCamelCase : Dict = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 25
| 0
|
"""simple docstring"""
def UpperCamelCase ( _lowerCAmelCase : list ) -> List[str]:
_UpperCAmelCase : int = len(_lowerCAmelCase )
for _ in range(_lowerCAmelCase ):
for i in range(_ % 2, arr_size - 1, 2 ):
if arr[i + 1] < arr[i]:
_UpperCAmelCase : Any = arr[i + 1], arr[i]
return arr
if __name__ == "__main__":
lowerCamelCase__ : int = list(range(10, 0, -1))
print(F'''Original: {arr}. Sorted: {odd_even_transposition(arr)}''')
| 238
|
'''simple docstring'''
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
A : List[str] = get_logger(__name__)
class __lowerCamelCase :
"""simple docstring"""
a = "dummy_data"
a = "datasets"
a = False
def __init__( self : List[str] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Union[Version, str] , SCREAMING_SNAKE_CASE : Optional[str] = None , SCREAMING_SNAKE_CASE : bool = False , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : Optional[List[Callable]] = None , ):
_A : Dict = 0
_A : Dict = dataset_name
_A : Any = cache_dir
_A : List[str] = use_local_dummy_data
_A : Optional[Any] = config
# download_callbacks take a single url as input
_A : List[Callable] = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
_A : int = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
_A : Any = str(SCREAMING_SNAKE_CASE)
# to be downloaded
_A : Optional[Any] = None
_A : List[str] = None
@property
def A ( self : str):
if self._dummy_file is None:
_A : Tuple = self.download_dummy_data()
return self._dummy_file
@property
def A ( self : Optional[Any]):
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join('dummy' , self.config.name , self.version_name)
# structure is dummy / version_name
return os.path.join('dummy' , self.version_name)
@property
def A ( self : Tuple):
return os.path.join(self.dummy_data_folder , 'dummy_data.zip')
def A ( self : int):
_A : Dict = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
_A : Optional[int] = cached_path(
SCREAMING_SNAKE_CASE , cache_dir=self.cache_dir , extract_compressed_file=SCREAMING_SNAKE_CASE , force_extract=SCREAMING_SNAKE_CASE)
return os.path.join(SCREAMING_SNAKE_CASE , self.dummy_file_name)
@property
def A ( self : List[str]):
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file)
@property
def A ( self : str):
if self._bucket_url is None:
_A : Tuple = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , '/'))
return self._bucket_url
@property
def A ( self : str):
# return full path if its a dir
if os.path.isdir(self.dummy_file):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , '/').split('/')[:-1])
def A ( self : List[str] , SCREAMING_SNAKE_CASE : List[Any] , *SCREAMING_SNAKE_CASE : Optional[Any]):
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
_A : Any = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
_A : Dict = self.dummy_file_name
# special case when data_url is a dict
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE):
return self.create_dummy_data_dict(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)
elif isinstance(SCREAMING_SNAKE_CASE , (list, tuple)):
return self.create_dummy_data_list(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)
else:
return self.create_dummy_data_single(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)
def A ( self : Optional[Any] , SCREAMING_SNAKE_CASE : str , *SCREAMING_SNAKE_CASE : str):
return self.download_and_extract(SCREAMING_SNAKE_CASE)
def A ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Tuple):
return self.download_and_extract(SCREAMING_SNAKE_CASE)
def A ( self : Tuple , SCREAMING_SNAKE_CASE : Optional[Any] , *SCREAMING_SNAKE_CASE : Union[str, Any] , **SCREAMING_SNAKE_CASE : Any):
return path
def A ( self : str):
return {}
def A ( self : Any , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Union[str, Any]):
_A : List[str] = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE):
for single_url in single_urls:
download_callback(SCREAMING_SNAKE_CASE)
else:
_A : Optional[Any] = single_urls
download_callback(SCREAMING_SNAKE_CASE)
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE):
_A : List[Any] = [os.path.join(SCREAMING_SNAKE_CASE , urllib.parse.quote_plus(Path(SCREAMING_SNAKE_CASE).name)) for x in single_urls]
else:
_A : str = single_urls
_A : Optional[Any] = os.path.join(SCREAMING_SNAKE_CASE , urllib.parse.quote_plus(Path(SCREAMING_SNAKE_CASE).name))
_A : Tuple = value
# make sure that values are unique
if all(isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) for i in dummy_data_dict.values()) and len(set(dummy_data_dict.values())) < len(
dummy_data_dict.values()):
# append key to value to make its name unique
_A : List[Any] = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def A ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int):
_A : List[str] = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
_A : Union[str, Any] = all(bool(re.findall('[0-9]{3,}-of-[0-9]{3,}' , SCREAMING_SNAKE_CASE)) for url in data_url)
_A : Optional[Any] = all(
url.startswith('https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed') for url in data_url)
if data_url and (is_tf_records or is_pubmed_records):
_A : Optional[Any] = [data_url[0]] * len(SCREAMING_SNAKE_CASE)
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(SCREAMING_SNAKE_CASE)
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
_A : Any = os.path.join(SCREAMING_SNAKE_CASE , urllib.parse.quote_plus(single_url.split('/')[-1]))
dummy_data_list.append(SCREAMING_SNAKE_CASE)
return dummy_data_list
def A ( self : List[Any] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Any):
for download_callback in self.download_callbacks:
download_callback(SCREAMING_SNAKE_CASE)
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
_A : Tuple = os.path.join(SCREAMING_SNAKE_CASE , urllib.parse.quote_plus(data_url.split('/')[-1]))
if os.path.exists(SCREAMING_SNAKE_CASE) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def A ( self : str):
pass
def A ( self : str):
pass
def A ( self : Tuple , SCREAMING_SNAKE_CASE : Optional[Any]):
def _iter_archive_members(SCREAMING_SNAKE_CASE : str):
# this preserves the order of the members inside the ZIP archive
_A : Dict = Path(self.dummy_file).parent
_A : Union[str, Any] = path.relative_to(SCREAMING_SNAKE_CASE)
with ZipFile(self.local_path_to_dummy_data) as zip_file:
_A : Any = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix()):
yield dummy_parent_path.joinpath(SCREAMING_SNAKE_CASE)
_A : str = Path(SCREAMING_SNAKE_CASE)
_A : Any = _iter_archive_members(SCREAMING_SNAKE_CASE) if self.use_local_dummy_data else path.rglob('*')
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith(('.', '__')):
yield file_path.relative_to(SCREAMING_SNAKE_CASE).as_posix(), file_path.open('rb')
def A ( self : int , SCREAMING_SNAKE_CASE : Tuple):
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE):
_A : Tuple = [paths]
for path in paths:
if os.path.isfile(SCREAMING_SNAKE_CASE):
if os.path.basename(SCREAMING_SNAKE_CASE).startswith(('.', '__')):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(SCREAMING_SNAKE_CASE):
if os.path.basename(SCREAMING_SNAKE_CASE).startswith(('.', '__')):
continue
dirnames.sort()
for filename in sorted(SCREAMING_SNAKE_CASE):
if filename.startswith(('.', '__')):
continue
yield os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)
| 128
| 0
|
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ) ->str:
_UpperCAmelCase =[]
for part_id in partition_order:
_UpperCAmelCase =df.where(F"SPARK_PARTITION_ID() = {part_id}" ).collect()
for row_idx, row in enumerate(_lowerCamelCase ):
expected_row_ids_and_row_dicts.append((F"{part_id}_{row_idx}", row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def lowerCamelCase__ ( ) ->int:
_UpperCAmelCase =pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
_UpperCAmelCase =spark.range(100 ).repartition(1 )
_UpperCAmelCase =Spark(_lowerCamelCase )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def lowerCamelCase__ ( ) ->Tuple:
_UpperCAmelCase =pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
_UpperCAmelCase =spark.range(10 ).repartition(2 )
_UpperCAmelCase =[1, 0]
_UpperCAmelCase =_generate_iterable_examples(_lowerCamelCase , _lowerCamelCase ) # Reverse the partitions.
_UpperCAmelCase =_get_expected_row_ids_and_row_dicts_for_partition_order(_lowerCamelCase , _lowerCamelCase )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
_UpperCAmelCase , _UpperCAmelCase =expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def lowerCamelCase__ ( ) ->Optional[int]:
_UpperCAmelCase =pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
_UpperCAmelCase =spark.range(10 ).repartition(1 )
_UpperCAmelCase =SparkExamplesIterable(_lowerCamelCase )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(_lowerCamelCase ):
assert row_id == F"0_{i}"
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def lowerCamelCase__ ( ) ->Optional[int]:
_UpperCAmelCase =pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
_UpperCAmelCase =spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch("numpy.random.Generator" ) as generator_mock:
_UpperCAmelCase =lambda _lowerCamelCase : x.reverse()
_UpperCAmelCase =_get_expected_row_ids_and_row_dicts_for_partition_order(_lowerCamelCase , [2, 1, 0] )
_UpperCAmelCase =SparkExamplesIterable(_lowerCamelCase ).shuffle_data_sources(_lowerCamelCase )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(_lowerCamelCase ):
_UpperCAmelCase , _UpperCAmelCase =expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def lowerCamelCase__ ( ) ->str:
_UpperCAmelCase =pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
_UpperCAmelCase =spark.range(20 ).repartition(4 )
# Partitions 0 and 2
_UpperCAmelCase =SparkExamplesIterable(_lowerCamelCase ).shard_data_sources(worker_id=0 , num_workers=2 )
assert shard_it_a.n_shards == 2
_UpperCAmelCase =_get_expected_row_ids_and_row_dicts_for_partition_order(_lowerCamelCase , [0, 2] )
for i, (row_id, row_dict) in enumerate(_lowerCamelCase ):
_UpperCAmelCase , _UpperCAmelCase =expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
_UpperCAmelCase =SparkExamplesIterable(_lowerCamelCase ).shard_data_sources(worker_id=1 , num_workers=2 )
assert shard_it_a.n_shards == 2
_UpperCAmelCase =_get_expected_row_ids_and_row_dicts_for_partition_order(_lowerCamelCase , [1, 3] )
for i, (row_id, row_dict) in enumerate(_lowerCamelCase ):
_UpperCAmelCase , _UpperCAmelCase =expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def lowerCamelCase__ ( ) ->List[Any]:
_UpperCAmelCase =pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
_UpperCAmelCase =spark.range(100 ).repartition(1 )
_UpperCAmelCase =Spark(_lowerCamelCase )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 100
| 592
|
import warnings
from ...utils import logging
from .image_processing_segformer import SegformerImageProcessor
snake_case__ : Tuple = logging.get_logger(__name__)
class _a ( A__ ):
"""simple docstring"""
def __init__( self , *_snake_case , **_snake_case ):
warnings.warn(
"The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use SegformerImageProcessor instead." , _snake_case , )
super().__init__(*_snake_case , **_snake_case )
| 592
| 1
|
import os
import re
import warnings
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_ta import TaTokenizer
else:
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : Optional[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : List[Any] = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
SCREAMING_SNAKE_CASE : int = {
"vocab_file": {
"t5-small": "https://huggingface.co/t5-small/resolve/main/spiece.model",
"t5-base": "https://huggingface.co/t5-base/resolve/main/spiece.model",
"t5-large": "https://huggingface.co/t5-large/resolve/main/spiece.model",
"t5-3b": "https://huggingface.co/t5-3b/resolve/main/spiece.model",
"t5-11b": "https://huggingface.co/t5-11b/resolve/main/spiece.model",
},
"tokenizer_file": {
"t5-small": "https://huggingface.co/t5-small/resolve/main/tokenizer.json",
"t5-base": "https://huggingface.co/t5-base/resolve/main/tokenizer.json",
"t5-large": "https://huggingface.co/t5-large/resolve/main/tokenizer.json",
"t5-3b": "https://huggingface.co/t5-3b/resolve/main/tokenizer.json",
"t5-11b": "https://huggingface.co/t5-11b/resolve/main/tokenizer.json",
},
}
# TODO(PVP) - this should be removed in Transformers v5
SCREAMING_SNAKE_CASE : List[Any] = {
"t5-small": 512,
"t5-base": 512,
"t5-large": 512,
"t5-3b": 512,
"t5-11b": 512,
}
class _lowerCamelCase( _a ):
lowercase_ : Tuple = VOCAB_FILES_NAMES
lowercase_ : Any = PRETRAINED_VOCAB_FILES_MAP
lowercase_ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ : Any = ["""input_ids""", """attention_mask"""]
lowercase_ : List[str] = TaTokenizer
lowercase_ : List[int] = []
def __init__( self, lowerCamelCase=None, lowerCamelCase=None, lowerCamelCase="</s>", lowerCamelCase="<unk>", lowerCamelCase="<pad>", lowerCamelCase=1_00, lowerCamelCase=None, **lowerCamelCase, ) -> Optional[int]:
"""simple docstring"""
if extra_ids > 0 and additional_special_tokens is None:
_lowercase : Tuple = [F'''<extra_id_{i}>''' for i in range(lowerCamelCase)]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra special tokens
_lowercase : Optional[int] = len(set(filter(lambda lowerCamelCase: bool('extra_id_' in str(lowerCamelCase)), lowerCamelCase)))
if extra_tokens != extra_ids:
raise ValueError(
F'''Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'''
' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids'
' tokens')
super().__init__(
lowerCamelCase, tokenizer_file=lowerCamelCase, eos_token=lowerCamelCase, unk_token=lowerCamelCase, pad_token=lowerCamelCase, extra_ids=lowerCamelCase, additional_special_tokens=lowerCamelCase, **lowerCamelCase, )
_lowercase : Any = vocab_file
_lowercase : Dict = False if not self.vocab_file else True
_lowercase : Dict = extra_ids
@staticmethod
def UpperCamelCase ( lowerCamelCase, lowerCamelCase, lowerCamelCase) -> Dict:
"""simple docstring"""
if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes:
_lowercase : Dict = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
'This tokenizer was incorrectly instantiated with a model max length of'
F''' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'''
' behavior is kept to avoid breaking backwards compatibility when padding/encoding with'
' `truncation is True`.\n- Be aware that you SHOULD NOT rely on'
F''' {pretrained_model_name_or_path} automatically truncating your input to'''
F''' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'''
F''' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'''
' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please'
' instantiate this tokenizer with `model_max_length` set to your preferred value.', lowerCamelCase, )
return max_model_length
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase = None) -> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.')
if not os.path.isdir(lowerCamelCase):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''')
return
_lowercase : List[Any] = os.path.join(
lowerCamelCase, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(lowerCamelCase):
copyfile(self.vocab_file, lowerCamelCase)
logger.info(F'''Copy vocab file to {out_vocab_file}''')
return (out_vocab_file,)
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase = None) -> List[int]:
"""simple docstring"""
_lowercase : List[Any] = token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return self.prefix_tokens + token_ids_a
else:
_lowercase : Tuple = token_ids_a + [self.eos_token_id]
return self.prefix_tokens + token_ids_a + token_ids_a
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase = None) -> List[int]:
"""simple docstring"""
_lowercase : List[Any] = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos) * [0]
return len(token_ids_a + eos + token_ids_a + eos) * [0]
def UpperCamelCase ( self) -> int:
"""simple docstring"""
return list(
set(filter(lambda lowerCamelCase: bool(re.search(R'<extra_id_\d+>', lowerCamelCase)) is not None, self.additional_special_tokens)))
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
return [self.convert_tokens_to_ids(lowerCamelCase) for token in self.get_sentinel_tokens()]
| 89
|
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
SCREAMING_SNAKE_CASE : int = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
SCREAMING_SNAKE_CASE : Dict = [file for file in filepaths if file != file.lower()]
if upper_files:
print(F"{len(upper_files)} files contain uppercase characters:")
print("\n".join(upper_files) + "\n")
SCREAMING_SNAKE_CASE : List[Any] = [file for file in filepaths if " " in file]
if space_files:
print(F"{len(space_files)} files contain space characters:")
print("\n".join(space_files) + "\n")
SCREAMING_SNAKE_CASE : Any = [file for file in filepaths if "-" in file]
if hyphen_files:
print(F"{len(hyphen_files)} files contain hyphen characters:")
print("\n".join(hyphen_files) + "\n")
SCREAMING_SNAKE_CASE : str = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(F"{len(nodir_files)} files are not in a directory:")
print("\n".join(nodir_files) + "\n")
SCREAMING_SNAKE_CASE : Tuple = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 89
| 1
|
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
a_ : Optional[Any] = logging.getLogger(__name__)
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = np.argmax(_UpperCAmelCase , axis=1)
return np.sum(outputs == labels)
def lowerCamelCase__ (_UpperCAmelCase):
with open(_UpperCAmelCase , encoding='utf_8') as f:
SCREAMING_SNAKE_CASE = csv.reader(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = []
next(_UpperCAmelCase) # skip the first line
for line in tqdm(_UpperCAmelCase):
output.append((' '.join(line[1:5]), line[5], line[6], int(line[-1]) - 1))
return output
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = []
for dataset in encoded_datasets:
SCREAMING_SNAKE_CASE = len(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = np.zeros((n_batch, 2, input_len) , dtype=np.intaa)
SCREAMING_SNAKE_CASE = np.zeros((n_batch, 2) , dtype=np.intaa)
SCREAMING_SNAKE_CASE = np.full((n_batch, 2, input_len) , fill_value=-100 , dtype=np.intaa)
SCREAMING_SNAKE_CASE = np.zeros((n_batch,) , dtype=np.intaa)
for (
i,
(story, conta, conta, mc_label),
) in enumerate(_UpperCAmelCase):
SCREAMING_SNAKE_CASE = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
SCREAMING_SNAKE_CASE = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
SCREAMING_SNAKE_CASE = with_conta
SCREAMING_SNAKE_CASE = with_conta
SCREAMING_SNAKE_CASE = len(_UpperCAmelCase) - 1
SCREAMING_SNAKE_CASE = len(_UpperCAmelCase) - 1
SCREAMING_SNAKE_CASE = with_conta
SCREAMING_SNAKE_CASE = with_conta
SCREAMING_SNAKE_CASE = mc_label
SCREAMING_SNAKE_CASE = (input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(_UpperCAmelCase) for t in all_inputs))
return tensor_datasets
def lowerCamelCase__ ():
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument('--model_name' , type=_UpperCAmelCase , default='openai-gpt' , help='pretrained model name')
parser.add_argument('--do_train' , action='store_true' , help='Whether to run training.')
parser.add_argument('--do_eval' , action='store_true' , help='Whether to run eval on the dev set.')
parser.add_argument(
'--output_dir' , default=_UpperCAmelCase , type=_UpperCAmelCase , required=_UpperCAmelCase , help='The output directory where the model predictions and checkpoints will be written.' , )
parser.add_argument('--train_dataset' , type=_UpperCAmelCase , default='')
parser.add_argument('--eval_dataset' , type=_UpperCAmelCase , default='')
parser.add_argument('--seed' , type=_UpperCAmelCase , default=42)
parser.add_argument('--num_train_epochs' , type=_UpperCAmelCase , default=3)
parser.add_argument('--train_batch_size' , type=_UpperCAmelCase , default=8)
parser.add_argument('--eval_batch_size' , type=_UpperCAmelCase , default=16)
parser.add_argument('--adam_epsilon' , default=1e-8 , type=_UpperCAmelCase , help='Epsilon for Adam optimizer.')
parser.add_argument('--max_grad_norm' , type=_UpperCAmelCase , default=1)
parser.add_argument(
'--max_steps' , default=-1 , type=_UpperCAmelCase , help=(
'If > 0: set total number of training steps to perform. Override num_train_epochs.'
) , )
parser.add_argument(
'--gradient_accumulation_steps' , type=_UpperCAmelCase , default=1 , help='Number of updates steps to accumulate before performing a backward/update pass.' , )
parser.add_argument('--learning_rate' , type=_UpperCAmelCase , default=6.25e-5)
parser.add_argument('--warmup_steps' , default=0 , type=_UpperCAmelCase , help='Linear warmup over warmup_steps.')
parser.add_argument('--lr_schedule' , type=_UpperCAmelCase , default='warmup_linear')
parser.add_argument('--weight_decay' , type=_UpperCAmelCase , default=0.01)
parser.add_argument('--lm_coef' , type=_UpperCAmelCase , default=0.9)
parser.add_argument('--n_valid' , type=_UpperCAmelCase , default=374)
parser.add_argument('--server_ip' , type=_UpperCAmelCase , default='' , help='Can be used for distant debugging.')
parser.add_argument('--server_port' , type=_UpperCAmelCase , default='' , help='Can be used for distant debugging.')
SCREAMING_SNAKE_CASE = parser.parse_args()
print(_UpperCAmelCase)
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('Waiting for debugger attach')
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=_UpperCAmelCase)
ptvsd.wait_for_attach()
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
SCREAMING_SNAKE_CASE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
SCREAMING_SNAKE_CASE = torch.cuda.device_count()
logger.info('device: {}, n_gpu {}'.format(_UpperCAmelCase , _UpperCAmelCase))
if not args.do_train and not args.do_eval:
raise ValueError('At least one of `do_train` or `do_eval` must be True.')
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
SCREAMING_SNAKE_CASE = ['_start_', '_delimiter_', '_classify_']
SCREAMING_SNAKE_CASE = OpenAIGPTTokenizer.from_pretrained(args.model_name)
tokenizer.add_tokens(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = tokenizer.convert_tokens_to_ids(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name)
model.resize_token_embeddings(len(_UpperCAmelCase))
model.to(_UpperCAmelCase)
# Load and encode the datasets
def tokenize_and_encode(_UpperCAmelCase):
if isinstance(_UpperCAmelCase , _UpperCAmelCase):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(_UpperCAmelCase))
elif isinstance(_UpperCAmelCase , _UpperCAmelCase):
return obj
return [tokenize_and_encode(_UpperCAmelCase) for o in obj]
logger.info('Encoding dataset...')
SCREAMING_SNAKE_CASE = load_rocstories_dataset(args.train_dataset)
SCREAMING_SNAKE_CASE = load_rocstories_dataset(args.eval_dataset)
SCREAMING_SNAKE_CASE = (train_dataset, eval_dataset)
SCREAMING_SNAKE_CASE = tokenize_and_encode(_UpperCAmelCase)
# Compute the max input length for the Transformer
SCREAMING_SNAKE_CASE = model.config.n_positions // 2 - 2
SCREAMING_SNAKE_CASE = max(
len(story[:max_length]) + max(len(conta[:max_length]) , len(conta[:max_length])) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset)
SCREAMING_SNAKE_CASE = min(_UpperCAmelCase , model.config.n_positions) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
SCREAMING_SNAKE_CASE = pre_process_datasets(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , *_UpperCAmelCase)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = tensor_datasets[0], tensor_datasets[1]
SCREAMING_SNAKE_CASE = TensorDataset(*_UpperCAmelCase)
SCREAMING_SNAKE_CASE = RandomSampler(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = DataLoader(_UpperCAmelCase , sampler=_UpperCAmelCase , batch_size=args.train_batch_size)
SCREAMING_SNAKE_CASE = TensorDataset(*_UpperCAmelCase)
SCREAMING_SNAKE_CASE = SequentialSampler(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = DataLoader(_UpperCAmelCase , sampler=_UpperCAmelCase , batch_size=args.eval_batch_size)
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
SCREAMING_SNAKE_CASE = args.max_steps
SCREAMING_SNAKE_CASE = args.max_steps // (len(_UpperCAmelCase) // args.gradient_accumulation_steps) + 1
else:
SCREAMING_SNAKE_CASE = len(_UpperCAmelCase) // args.gradient_accumulation_steps * args.num_train_epochs
SCREAMING_SNAKE_CASE = list(model.named_parameters())
SCREAMING_SNAKE_CASE = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
SCREAMING_SNAKE_CASE = [
{
'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)],
'weight_decay': args.weight_decay,
},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0},
]
SCREAMING_SNAKE_CASE = AdamW(_UpperCAmelCase , lr=args.learning_rate , eps=args.adam_epsilon)
SCREAMING_SNAKE_CASE = get_linear_schedule_with_warmup(
_UpperCAmelCase , num_warmup_steps=args.warmup_steps , num_training_steps=_UpperCAmelCase)
if args.do_train:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs) , desc='Epoch'):
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = tqdm(_UpperCAmelCase , desc='Training')
for step, batch in enumerate(_UpperCAmelCase):
SCREAMING_SNAKE_CASE = tuple(t.to(_UpperCAmelCase) for t in batch)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = batch
SCREAMING_SNAKE_CASE = model(_UpperCAmelCase , mc_token_ids=_UpperCAmelCase , lm_labels=_UpperCAmelCase , mc_labels=_UpperCAmelCase)
SCREAMING_SNAKE_CASE = args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
SCREAMING_SNAKE_CASE = (
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
SCREAMING_SNAKE_CASE = 'Training loss: {:.2e} lr: {:.2e}'.format(_UpperCAmelCase , scheduler.get_lr()[0])
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
SCREAMING_SNAKE_CASE = model.module if hasattr(_UpperCAmelCase , 'module') else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
SCREAMING_SNAKE_CASE = os.path.join(args.output_dir , _UpperCAmelCase)
SCREAMING_SNAKE_CASE = os.path.join(args.output_dir , _UpperCAmelCase)
torch.save(model_to_save.state_dict() , _UpperCAmelCase)
model_to_save.config.to_json_file(_UpperCAmelCase)
tokenizer.save_vocabulary(args.output_dir)
# Load a trained model and vocabulary that you have fine-tuned
SCREAMING_SNAKE_CASE = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir)
SCREAMING_SNAKE_CASE = OpenAIGPTTokenizer.from_pretrained(args.output_dir)
model.to(_UpperCAmelCase)
if args.do_eval:
model.eval()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 0, 0
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 0, 0
for batch in tqdm(_UpperCAmelCase , desc='Evaluating'):
SCREAMING_SNAKE_CASE = tuple(t.to(_UpperCAmelCase) for t in batch)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = batch
with torch.no_grad():
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = model(
_UpperCAmelCase , mc_token_ids=_UpperCAmelCase , lm_labels=_UpperCAmelCase , mc_labels=_UpperCAmelCase)
SCREAMING_SNAKE_CASE = mc_logits.detach().cpu().numpy()
SCREAMING_SNAKE_CASE = mc_labels.to('cpu').numpy()
SCREAMING_SNAKE_CASE = accuracy(_UpperCAmelCase , _UpperCAmelCase)
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0)
nb_eval_steps += 1
SCREAMING_SNAKE_CASE = eval_loss / nb_eval_steps
SCREAMING_SNAKE_CASE = eval_accuracy / nb_eval_examples
SCREAMING_SNAKE_CASE = tr_loss / nb_tr_steps if args.do_train else None
SCREAMING_SNAKE_CASE = {'eval_loss': eval_loss, 'eval_accuracy': eval_accuracy, 'train_loss': train_loss}
SCREAMING_SNAKE_CASE = os.path.join(args.output_dir , 'eval_results.txt')
with open(_UpperCAmelCase , 'w') as writer:
logger.info('***** Eval results *****')
for key in sorted(result.keys()):
logger.info(' %s = %s' , _UpperCAmelCase , str(result[key]))
writer.write('%s = %s\n' % (key, str(result[key])))
if __name__ == "__main__":
main()
| 444
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
a_ : Optional[int] = logging.get_logger(__name__)
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = b.T
SCREAMING_SNAKE_CASE = np.sum(np.square(_UpperCAmelCase) , axis=1)
SCREAMING_SNAKE_CASE = np.sum(np.square(_UpperCAmelCase) , axis=0)
SCREAMING_SNAKE_CASE = np.matmul(_UpperCAmelCase , _UpperCAmelCase)
SCREAMING_SNAKE_CASE = aa[:, None] - 2 * ab + ba[None, :]
return d
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = x.reshape(-1 , 3)
SCREAMING_SNAKE_CASE = squared_euclidean_distance(_UpperCAmelCase , _UpperCAmelCase)
return np.argmin(_UpperCAmelCase , axis=1)
class _snake_case ( A__ ):
_lowercase : str = ['''pixel_values''']
def __init__( self , a = None , a = True , a = None , a = PILImageResampling.BILINEAR , a = True , a = True , **a , ) -> None:
super().__init__(**a)
SCREAMING_SNAKE_CASE = size if size is not None else {'height': 256, 'width': 256}
SCREAMING_SNAKE_CASE = get_size_dict(a)
SCREAMING_SNAKE_CASE = np.array(a) if clusters is not None else None
SCREAMING_SNAKE_CASE = do_resize
SCREAMING_SNAKE_CASE = size
SCREAMING_SNAKE_CASE = resample
SCREAMING_SNAKE_CASE = do_normalize
SCREAMING_SNAKE_CASE = do_color_quantize
def SCREAMING_SNAKE_CASE__ ( self , a , a , a = PILImageResampling.BILINEAR , a = None , **a , ) -> np.ndarray:
SCREAMING_SNAKE_CASE = get_size_dict(a)
if "height" not in size or "width" not in size:
raise ValueError(f'''Size dictionary must contain both height and width keys. Got {size.keys()}''')
return resize(
a , size=(size['height'], size['width']) , resample=a , data_format=a , **a)
def SCREAMING_SNAKE_CASE__ ( self , a , a = None , ) -> np.ndarray:
SCREAMING_SNAKE_CASE = rescale(image=a , scale=1 / 1_27.5 , data_format=a)
SCREAMING_SNAKE_CASE = image - 1
return image
def SCREAMING_SNAKE_CASE__ ( self , a , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = ChannelDimension.FIRST , **a , ) -> PIL.Image.Image:
SCREAMING_SNAKE_CASE = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE = size if size is not None else self.size
SCREAMING_SNAKE_CASE = get_size_dict(a)
SCREAMING_SNAKE_CASE = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
SCREAMING_SNAKE_CASE = clusters if clusters is not None else self.clusters
SCREAMING_SNAKE_CASE = np.array(a)
SCREAMING_SNAKE_CASE = make_list_of_images(a)
if not valid_images(a):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.')
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.')
if do_color_quantize and clusters is None:
raise ValueError('Clusters must be specified if do_color_quantize is True.')
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE = [to_numpy_array(a) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE = [self.resize(image=a , size=a , resample=a) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE = [self.normalize(image=a) for image in images]
if do_color_quantize:
SCREAMING_SNAKE_CASE = [to_channel_dimension_format(a , ChannelDimension.LAST) for image in images]
# color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
SCREAMING_SNAKE_CASE = np.array(a)
SCREAMING_SNAKE_CASE = color_quantize(a , a).reshape(images.shape[:-1])
# flatten to (batch_size, height*width)
SCREAMING_SNAKE_CASE = images.shape[0]
SCREAMING_SNAKE_CASE = images.reshape(a , -1)
# We need to convert back to a list of images to keep consistent behaviour across processors.
SCREAMING_SNAKE_CASE = list(a)
else:
SCREAMING_SNAKE_CASE = [to_channel_dimension_format(a , a) for image in images]
SCREAMING_SNAKE_CASE = {'input_ids': images}
return BatchFeature(data=a , tensor_type=a)
| 444
| 1
|
from typing import List
import numpy as np
def __snake_case ( _UpperCamelCase ) -> int:
_a = {key: len(UpperCamelCase__ ) for key, value in gen_kwargs.items() if isinstance(UpperCamelCase__ , UpperCamelCase__ )}
if len(set(lists_lengths.values() ) ) > 1:
raise RuntimeError(
(
'''Sharding is ambiguous for this dataset: '''
+ '''we found several data sources lists of different lengths, and we don\'t know over which list we should parallelize:\n'''
+ '''\n'''.join(f"\t- key {key} has length {length}" for key, length in lists_lengths.items() )
+ '''\nTo fix this, check the \'gen_kwargs\' and make sure to use lists only for data sources, '''
+ '''and use tuples otherwise. In the end there should only be one single list, or several lists with the same length.'''
) )
_a = max(lists_lengths.values() , default=0 )
return max(1 , UpperCamelCase__ )
def __snake_case ( _UpperCamelCase , _UpperCamelCase ) -> Dict:
_a = []
for group_idx in range(UpperCamelCase__ ):
_a = num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs))
if num_shards_to_add == 0:
break
_a = shards_indices_per_group[-1].stop if shards_indices_per_group else 0
_a = range(UpperCamelCase__ , start + num_shards_to_add )
shards_indices_per_group.append(UpperCamelCase__ )
return shards_indices_per_group
def __snake_case ( _UpperCamelCase , _UpperCamelCase ) -> Optional[int]:
_a = _number_of_shards_in_gen_kwargs(UpperCamelCase__ )
if num_shards == 1:
return [dict(UpperCamelCase__ )]
else:
_a = _distribute_shards(num_shards=UpperCamelCase__ , max_num_jobs=UpperCamelCase__ )
return [
{
key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]]
if isinstance(UpperCamelCase__ , UpperCamelCase__ )
else value
for key, value in gen_kwargs.items()
}
for group_idx in range(len(UpperCamelCase__ ) )
]
def __snake_case ( _UpperCamelCase ) -> Dict:
return {
key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]]
if isinstance(gen_kwargs_list[0][key] , UpperCamelCase__ )
else gen_kwargs_list[0][key]
for key in gen_kwargs_list[0]
}
def __snake_case ( _UpperCamelCase , _UpperCamelCase ) -> Dict:
_a = {len(UpperCamelCase__ ) for value in gen_kwargs.values() if isinstance(UpperCamelCase__ , UpperCamelCase__ )}
_a = {}
for size in list_sizes:
_a = list(range(UpperCamelCase__ ) )
rng.shuffle(indices_per_size[size] )
# Now let's copy the gen_kwargs and shuffle the lists based on their sizes
_a = dict(UpperCamelCase__ )
for key, value in shuffled_kwargs.items():
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
_a = [value[i] for i in indices_per_size[len(UpperCamelCase__ )]]
return shuffled_kwargs
| 487
|
from graphs.minimum_spanning_tree_kruskal import kruskal
def lowerCamelCase_ ( ):
'''simple docstring'''
UpperCamelCase__ = 9
UpperCamelCase__ = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
UpperCamelCase__ = kruskal(UpperCamelCase__, UpperCamelCase__ )
UpperCamelCase__ = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
assert sorted(UpperCamelCase__ ) == sorted(UpperCamelCase__ )
| 240
| 0
|
from typing import List, Optional, Union
import numpy as np
import tensorflow as tf
from .utils import logging
UpperCamelCase = logging.get_logger(__name__)
def __lowerCamelCase ( snake_case__ ) -> List[int]:
"""simple docstring"""
if isinstance(snake_case__ ,np.ndarray ):
return list(tensor.shape )
_SCREAMING_SNAKE_CASE = tf.shape(snake_case__ )
if tensor.shape == tf.TensorShape(snake_case__ ):
return dynamic
_SCREAMING_SNAKE_CASE = tensor.shape.as_list()
return [dynamic[i] if s is None else s for i, s in enumerate(snake_case__ )]
def __lowerCamelCase ( snake_case__ ,snake_case__ = None ,snake_case__ = None ) -> tf.Tensor:
"""simple docstring"""
return tf.nn.softmax(logits=logits + 1e-9 ,axis=snake_case__ ,name=snake_case__ )
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ,snake_case__=1e-5 ,snake_case__=-1 ) -> Tuple:
"""simple docstring"""
if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(snake_case__ ,snake_case__ ):
raise NotImplementedError("""Only 1D weight and bias tensors are supported for now, with only a single axis.""" )
# Get mean and variance on the axis to be normalized
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = tf.nn.moments(snake_case__ ,axes=[axis] ,keepdims=snake_case__ )
if axis != -1:
# Reshape scale and weight to have the same rank as inputs, but with 1 dimensions
# on every dimension except axis
_SCREAMING_SNAKE_CASE = [1] * inputs.shape.rank
_SCREAMING_SNAKE_CASE = shape_list(snake_case__ )[axis]
_SCREAMING_SNAKE_CASE = tf.reshape(snake_case__ ,snake_case__ )
_SCREAMING_SNAKE_CASE = tf.reshape(snake_case__ ,snake_case__ )
# Compute layer normalization using the batch_normalization
# function.
_SCREAMING_SNAKE_CASE = tf.nn.batch_normalization(
snake_case__ ,snake_case__ ,snake_case__ ,offset=snake_case__ ,scale=snake_case__ ,variance_epsilon=snake_case__ ,)
return outputs
def __lowerCamelCase ( snake_case__ ,snake_case__=0 ,snake_case__=-1 ) -> Optional[int]:
"""simple docstring"""
if end_dim < 0:
end_dim += input.shape.rank
if start_dim < 0:
start_dim += input.shape.rank
if start_dim == end_dim:
return input
_SCREAMING_SNAKE_CASE = tf.shape(snake_case__ )
_SCREAMING_SNAKE_CASE = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] )
_SCREAMING_SNAKE_CASE = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] ,axis=0 )
return tf.reshape(snake_case__ ,snake_case__ )
def __lowerCamelCase ( snake_case__ ) -> tf.Tensor:
"""simple docstring"""
if not isinstance(snake_case__ ,tf.Tensor ):
_SCREAMING_SNAKE_CASE = tf.convert_to_tensor(snake_case__ ) # Catches stray NumPy inputs
if encoder_attention_mask.shape.rank == 3:
_SCREAMING_SNAKE_CASE = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.shape.rank == 2:
_SCREAMING_SNAKE_CASE = encoder_attention_mask[:, None, None, :]
# T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
# Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow
# /transformer/transformer_layers.py#L270
# encoder_extended_attention_mask = (encoder_extended_attention_mask ==
# encoder_extended_attention_mask.transpose(-1, -2))
_SCREAMING_SNAKE_CASE = (
tf.cast(1 ,encoder_attention_mask.dtype ) - encoder_extended_attention_mask
) * encoder_extended_attention_mask.dtype.min
return encoder_extended_attention_mask
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ = "input_ids" ) -> None:
"""simple docstring"""
tf.debugging.assert_less(
snake_case__ ,tf.cast(snake_case__ ,dtype=tensor.dtype ) ,message=(
F'The maximum value of {tensor_name} ({tf.math.reduce_max(snake_case__ )}) must be smaller than the embedding '
F'layer\'s input dimension ({embed_dim}). The likely cause is some problem at tokenization time.'
) ,)
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = 6_45_12
# Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT`
# because in that case even chunking the array would not make the saving
# possible.
_SCREAMING_SNAKE_CASE = [x for x in data if len(snake_case__ ) > HDF5_OBJECT_HEADER_LIMIT]
# Expecting this to never be true.
if bad_attributes:
raise RuntimeError(
"""The following attributes cannot be saved to HDF5 file because """
F'they are larger than {HDF5_OBJECT_HEADER_LIMIT} '
F'bytes: {bad_attributes}' )
_SCREAMING_SNAKE_CASE = np.asarray(snake_case__ )
_SCREAMING_SNAKE_CASE = 1
_SCREAMING_SNAKE_CASE = np.array_split(snake_case__ ,snake_case__ )
# This will never loop forever thanks to the test above.
while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ):
num_chunks += 1
_SCREAMING_SNAKE_CASE = np.array_split(snake_case__ ,snake_case__ )
if num_chunks > 1:
for chunk_id, chunk_data in enumerate(snake_case__ ):
_SCREAMING_SNAKE_CASE = chunk_data
else:
_SCREAMING_SNAKE_CASE = data
def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> Tuple:
"""simple docstring"""
if name in group.attrs:
_SCREAMING_SNAKE_CASE = [n.decode("""utf8""" ) if hasattr(snake_case__ ,"""decode""" ) else n for n in group.attrs[name]]
else:
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = 0
while "%s%d" % (name, chunk_id) in group.attrs:
data.extend(
[n.decode("""utf8""" ) if hasattr(snake_case__ ,"""decode""" ) else n for n in group.attrs["""%s%d""" % (name, chunk_id)]] )
chunk_id += 1
return data
def __lowerCamelCase ( snake_case__ ) -> Optional[int]:
"""simple docstring"""
def _expand_single_ad_tensor(snake_case__ ):
if isinstance(snake_case__ ,tf.Tensor ) and t.shape.rank == 1:
return tf.expand_dims(snake_case__ ,axis=-1 )
return t
return tf.nest.map_structure(_expand_single_ad_tensor ,snake_case__ )
| 707
|
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class __UpperCAmelCase (unittest.TestCase ):
def __init__( self: Optional[Any] , UpperCAmelCase_: List[Any] , UpperCAmelCase_: List[Any]=7 , UpperCAmelCase_: Union[str, Any]=3 , UpperCAmelCase_: List[str]=18 , UpperCAmelCase_: Any=30 , UpperCAmelCase_: str=400 , UpperCAmelCase_: List[Any]=True , UpperCAmelCase_: str=None , UpperCAmelCase_: List[Any]=True , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = size if size is not None else {"""height""": 18, """width""": 18}
_SCREAMING_SNAKE_CASE = parent
_SCREAMING_SNAKE_CASE = batch_size
_SCREAMING_SNAKE_CASE = num_channels
_SCREAMING_SNAKE_CASE = image_size
_SCREAMING_SNAKE_CASE = min_resolution
_SCREAMING_SNAKE_CASE = max_resolution
_SCREAMING_SNAKE_CASE = do_resize
_SCREAMING_SNAKE_CASE = size
_SCREAMING_SNAKE_CASE = apply_ocr
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class __UpperCAmelCase (_UpperCAmelCase ,unittest.TestCase ):
__snake_case : Any = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = LayoutLMvaImageProcessingTester(self )
@property
def UpperCamelCase ( self: Optional[int] ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase_ , """do_resize""" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , """size""" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , """apply_ocr""" ) )
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18} )
_SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
def UpperCamelCase ( self: int ):
'''simple docstring'''
pass
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , Image.Image )
# Test not batched input
_SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors="""pt""" )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
self.assertIsInstance(encoding.words , UpperCAmelCase_ )
self.assertIsInstance(encoding.boxes , UpperCAmelCase_ )
# Test batched
_SCREAMING_SNAKE_CASE = image_processing(UpperCAmelCase_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def UpperCamelCase ( self: int ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , numpify=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , np.ndarray )
# Test not batched input
_SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
_SCREAMING_SNAKE_CASE = image_processing(UpperCAmelCase_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , torchify=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , torch.Tensor )
# Test not batched input
_SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
_SCREAMING_SNAKE_CASE = image_processing(UpperCAmelCase_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def UpperCamelCase ( self: str ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = LayoutLMvaImageProcessor()
from datasets import load_dataset
_SCREAMING_SNAKE_CASE = load_dataset("""hf-internal-testing/fixtures_docvqa""" , split="""test""" )
_SCREAMING_SNAKE_CASE = Image.open(ds[0]["""file"""] ).convert("""RGB""" )
_SCREAMING_SNAKE_CASE = image_processing(UpperCAmelCase_ , return_tensors="""pt""" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
_SCREAMING_SNAKE_CASE = [["""11:14""", """to""", """11:39""", """a.m""", """11:39""", """to""", """11:44""", """a.m.""", """11:44""", """a.m.""", """to""", """12:25""", """p.m.""", """12:25""", """to""", """12:58""", """p.m.""", """12:58""", """to""", """4:00""", """p.m.""", """2:00""", """to""", """5:00""", """p.m.""", """Coffee""", """Break""", """Coffee""", """will""", """be""", """served""", """for""", """men""", """and""", """women""", """in""", """the""", """lobby""", """adjacent""", """to""", """exhibit""", """area.""", """Please""", """move""", """into""", """exhibit""", """area.""", """(Exhibits""", """Open)""", """TRRF""", """GENERAL""", """SESSION""", """(PART""", """|)""", """Presiding:""", """Lee""", """A.""", """Waller""", """TRRF""", """Vice""", """President""", """“Introductory""", """Remarks”""", """Lee""", """A.""", """Waller,""", """TRRF""", """Vice""", """Presi-""", """dent""", """Individual""", """Interviews""", """with""", """TRRF""", """Public""", """Board""", """Members""", """and""", """Sci-""", """entific""", """Advisory""", """Council""", """Mem-""", """bers""", """Conducted""", """by""", """TRRF""", """Treasurer""", """Philip""", """G.""", """Kuehn""", """to""", """get""", """answers""", """which""", """the""", """public""", """refrigerated""", """warehousing""", """industry""", """is""", """looking""", """for.""", """Plus""", """questions""", """from""", """the""", """floor.""", """Dr.""", """Emil""", """M.""", """Mrak,""", """University""", """of""", """Cal-""", """ifornia,""", """Chairman,""", """TRRF""", """Board;""", """Sam""", """R.""", """Cecil,""", """University""", """of""", """Georgia""", """College""", """of""", """Agriculture;""", """Dr.""", """Stanley""", """Charm,""", """Tufts""", """University""", """School""", """of""", """Medicine;""", """Dr.""", """Robert""", """H.""", """Cotton,""", """ITT""", """Continental""", """Baking""", """Company;""", """Dr.""", """Owen""", """Fennema,""", """University""", """of""", """Wis-""", """consin;""", """Dr.""", """Robert""", """E.""", """Hardenburg,""", """USDA.""", """Questions""", """and""", """Answers""", """Exhibits""", """Open""", """Capt.""", """Jack""", """Stoney""", """Room""", """TRRF""", """Scientific""", """Advisory""", """Council""", """Meeting""", """Ballroom""", """Foyer"""]] # noqa: E231
_SCREAMING_SNAKE_CASE = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , UpperCAmelCase_ )
self.assertListEqual(encoding.boxes , UpperCAmelCase_ )
# with apply_OCR = False
_SCREAMING_SNAKE_CASE = LayoutLMvaImageProcessor(apply_ocr=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = image_processing(UpperCAmelCase_ , return_tensors="""pt""" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 569
| 0
|
"""simple docstring"""
import itertools
import random
import unittest
import numpy as np
from transformers import ASTFeatureExtractor
from transformers.testing_utils import require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
lowerCamelCase__ : Optional[Any] = random.Random()
if is_torch_available():
import torch
def __A ( a_ : Tuple , a_ : Dict=1.0 , a_ : List[str]=None , a_ : Tuple=None )-> int:
'''simple docstring'''
if rng is None:
SCREAMING_SNAKE_CASE : Any = global_rng
SCREAMING_SNAKE_CASE : Dict = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class lowercase__( unittest.TestCase ):
'''simple docstring'''
def __init__( self :Union[str, Any] , lowerCamelCase_ :Tuple , lowerCamelCase_ :Tuple=7 , lowerCamelCase_ :List[Any]=4_00 , lowerCamelCase_ :List[str]=20_00 , lowerCamelCase_ :str=1 , lowerCamelCase_ :Optional[int]=0.0 , lowerCamelCase_ :Any=1_60_00 , lowerCamelCase_ :Dict=True , lowerCamelCase_ :str=True , ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = parent
SCREAMING_SNAKE_CASE : Optional[Any] = batch_size
SCREAMING_SNAKE_CASE : Dict = min_seq_length
SCREAMING_SNAKE_CASE : Dict = max_seq_length
SCREAMING_SNAKE_CASE : Optional[Any] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
SCREAMING_SNAKE_CASE : int = feature_size
SCREAMING_SNAKE_CASE : List[str] = padding_value
SCREAMING_SNAKE_CASE : Tuple = sampling_rate
SCREAMING_SNAKE_CASE : Optional[int] = return_attention_mask
SCREAMING_SNAKE_CASE : Union[str, Any] = do_normalize
def __lowerCAmelCase ( self :List[Any] ) -> Union[str, Any]:
'''simple docstring'''
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def __lowerCAmelCase ( self :List[Any] , lowerCamelCase_ :List[Any]=False , lowerCamelCase_ :Union[str, Any]=False ) -> Tuple:
'''simple docstring'''
def _flatten(lowerCamelCase_ :List[Any] ):
return list(itertools.chain(*lowerCamelCase_ ) )
if equal_length:
SCREAMING_SNAKE_CASE : Union[str, Any] = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
SCREAMING_SNAKE_CASE : Tuple = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
SCREAMING_SNAKE_CASE : Any = [np.asarray(lowerCamelCase_ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class lowercase__( _UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = ASTFeatureExtractor
def __lowerCAmelCase ( self :Any ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = ASTFeatureExtractionTester(self )
def __lowerCAmelCase ( self :List[str] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
SCREAMING_SNAKE_CASE : List[str] = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
SCREAMING_SNAKE_CASE : Union[str, Any] = [np.asarray(lowerCamelCase_ ) for speech_input in speech_inputs]
# Test not batched input
SCREAMING_SNAKE_CASE : Union[str, Any] = feat_extract(speech_inputs[0] , return_tensors='''np''' ).input_values
SCREAMING_SNAKE_CASE : Tuple = feat_extract(np_speech_inputs[0] , return_tensors='''np''' ).input_values
self.assertTrue(np.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1E-3 ) )
# Test batched
SCREAMING_SNAKE_CASE : Optional[Any] = feat_extract(lowerCamelCase_ , padding=lowerCamelCase_ , return_tensors='''np''' ).input_values
SCREAMING_SNAKE_CASE : List[str] = feat_extract(lowerCamelCase_ , padding=lowerCamelCase_ , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCamelCase_ , lowerCamelCase_ ):
self.assertTrue(np.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
SCREAMING_SNAKE_CASE : Dict = [floats_list((1, x) )[0] for x in (8_00, 8_00, 8_00)]
SCREAMING_SNAKE_CASE : Union[str, Any] = np.asarray(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = feat_extract(lowerCamelCase_ , return_tensors='''np''' ).input_values
SCREAMING_SNAKE_CASE : List[str] = feat_extract(lowerCamelCase_ , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCamelCase_ , lowerCamelCase_ ):
self.assertTrue(np.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1E-3 ) )
@require_torch
def __lowerCAmelCase ( self :int ) -> Union[str, Any]:
'''simple docstring'''
import torch
SCREAMING_SNAKE_CASE : List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE : Union[str, Any] = np.random.rand(1_00 ).astype(np.floataa )
SCREAMING_SNAKE_CASE : Optional[int] = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
SCREAMING_SNAKE_CASE : List[Any] = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''np''' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
SCREAMING_SNAKE_CASE : Tuple = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''pt''' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def __lowerCAmelCase ( self :List[str] , lowerCamelCase_ :Union[str, Any] ) -> List[Any]:
'''simple docstring'''
from datasets import load_dataset
SCREAMING_SNAKE_CASE : Optional[Any] = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
SCREAMING_SNAKE_CASE : Optional[int] = ds.sort('''id''' ).select(range(lowerCamelCase_ ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
@require_torch
def __lowerCAmelCase ( self :Union[str, Any] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = torch.tensor(
[-0.9_8_9_4, -1.2_7_7_6, -0.9_0_6_6, -1.2_7_7_6, -0.9_3_4_9, -1.2_6_0_9, -1.0_3_8_6, -1.2_7_7_6,
-1.1_5_6_1, -1.2_7_7_6, -1.2_0_5_2, -1.2_7_2_3, -1.2_1_9_0, -1.2_1_3_2, -1.2_7_7_6, -1.1_1_3_3,
-1.1_9_5_3, -1.1_3_4_3, -1.1_5_8_4, -1.2_2_0_3, -1.1_7_7_0, -1.2_4_7_4, -1.2_3_8_1, -1.1_9_3_6,
-0.9_2_7_0, -0.8_3_1_7, -0.8_0_4_9, -0.7_7_0_6, -0.7_5_6_5, -0.7_8_6_9] )
# fmt: on
SCREAMING_SNAKE_CASE : str = self._load_datasamples(1 )
SCREAMING_SNAKE_CASE : List[str] = ASTFeatureExtractor()
SCREAMING_SNAKE_CASE : Optional[Any] = feature_extractor(lowerCamelCase_ , return_tensors='''pt''' ).input_values
self.assertEquals(input_values.shape , (1, 10_24, 1_28) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , lowerCamelCase_ , atol=1E-4 ) )
| 698
|
"""simple docstring"""
# using dfs for finding eulerian path traversal
def __A ( a_ : Dict , a_ : int , a_ : str , a_ : Optional[Any]=None )-> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = (path or []) + [u]
for v in graph[u]:
if visited_edge[u][v] is False:
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : str = True, True
SCREAMING_SNAKE_CASE : List[str] = dfs(a_ , a_ , a_ , a_ )
return path
def __A ( a_ : List[str] , a_ : Any )-> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = 0
SCREAMING_SNAKE_CASE : str = -1
for i in range(a_ ):
if i not in graph.keys():
continue
if len(graph[i] ) % 2 == 1:
odd_degree_nodes += 1
SCREAMING_SNAKE_CASE : Tuple = i
if odd_degree_nodes == 0:
return 1, odd_node
if odd_degree_nodes == 2:
return 2, odd_node
return 3, odd_node
def __A ( a_ : Any , a_ : int )-> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = [[False for _ in range(max_node + 1 )] for _ in range(max_node + 1 )]
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : List[str] = check_circuit_or_path(a_ , a_ )
if check == 3:
print('''graph is not Eulerian''' )
print('''no path''' )
return
SCREAMING_SNAKE_CASE : Tuple = 1
if check == 2:
SCREAMING_SNAKE_CASE : Optional[int] = odd_node
print('''graph has a Euler path''' )
if check == 1:
print('''graph has a Euler cycle''' )
SCREAMING_SNAKE_CASE : Optional[int] = dfs(a_ , a_ , a_ )
print(a_ )
def __A ( )-> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = {1: [2, 3, 4], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [4]}
SCREAMING_SNAKE_CASE : str = {1: [2, 3, 4, 5], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [1, 4]}
SCREAMING_SNAKE_CASE : str = {1: [2, 3, 4], 2: [1, 3, 4], 3: [1, 2], 4: [1, 2, 5], 5: [4]}
SCREAMING_SNAKE_CASE : int = {1: [2, 3], 2: [1, 3], 3: [1, 2]}
SCREAMING_SNAKE_CASE : int = {
1: [],
2: []
# all degree is zero
}
SCREAMING_SNAKE_CASE : List[str] = 10
check_euler(a_ , a_ )
check_euler(a_ , a_ )
check_euler(a_ , a_ )
check_euler(a_ , a_ )
check_euler(a_ , a_ )
if __name__ == "__main__":
main()
| 698
| 1
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__A : Any = logging.get_logger(__name__)
__A : str = """▁"""
__A : Optional[Any] = {"""vocab_file""": """sentencepiece.bpe.model"""}
__A : Any = {
"""vocab_file""": {
"""facebook/xglm-564M""": """https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model""",
}
}
__A : int = {
"""facebook/xglm-564M""": 20_48,
}
class lowercase ( _lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = ["input_ids", "attention_mask"]
def __init__( self : int , __lowerCamelCase : List[str] , __lowerCamelCase : Optional[int]="<s>" , __lowerCamelCase : Optional[int]="</s>" , __lowerCamelCase : Any="</s>" , __lowerCamelCase : List[str]="<s>" , __lowerCamelCase : List[str]="<unk>" , __lowerCamelCase : Optional[Any]="<pad>" , __lowerCamelCase : Optional[Dict[str, Any]] = None , **__lowerCamelCase : Dict , ) -> None:
'''simple docstring'''
lowerCamelCase__ = {} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
lowerCamelCase__ = 7
lowerCamelCase__ = [f'''<madeupword{i}>''' for i in range(self.num_madeup_words )]
lowerCamelCase__ = kwargs.get("additional_special_tokens" , [] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , pad_token=__lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCamelCase , )
lowerCamelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__lowerCamelCase ) )
lowerCamelCase__ = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
lowerCamelCase__ = 1
# Mimic fairseq token-to-id alignment for the first 4 token
lowerCamelCase__ = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
lowerCamelCase__ = len(self.sp_model )
lowerCamelCase__ = {f'''<madeupword{i}>''': sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(__lowerCamelCase )
lowerCamelCase__ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : Dict ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase__ = self.__dict__.copy()
lowerCamelCase__ = None
lowerCamelCase__ = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : List[Any] , __lowerCamelCase : int ) -> Any:
'''simple docstring'''
lowerCamelCase__ = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
lowerCamelCase__ = {}
lowerCamelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def a__ ( self : str , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
lowerCamelCase__ = [self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def a__ ( self : Any , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None , __lowerCamelCase : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCamelCase , token_ids_a=__lowerCamelCase , already_has_special_tokens=__lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(__lowerCamelCase ))
return [1] + ([0] * len(__lowerCamelCase )) + [1, 1] + ([0] * len(__lowerCamelCase ))
def a__ ( self : str , __lowerCamelCase : List[int] , __lowerCamelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
lowerCamelCase__ = [self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def a__ ( self : Any ) -> Dict:
'''simple docstring'''
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def a__ ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase__ = {self.convert_ids_to_tokens(__lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def a__ ( self : Union[str, Any] , __lowerCamelCase : str ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(__lowerCamelCase , out_type=__lowerCamelCase )
def a__ ( self : List[Any] , __lowerCamelCase : Optional[int] ) -> List[Any]:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowerCamelCase__ = self.sp_model.PieceToId(__lowerCamelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def a__ ( self : Tuple , __lowerCamelCase : int ) -> str:
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def a__ ( self : Tuple , __lowerCamelCase : Optional[int] ) -> List[Any]:
'''simple docstring'''
lowerCamelCase__ = "".join(__lowerCamelCase ).replace(__lowerCamelCase , " " ).strip()
return out_string
def a__ ( self : List[Any] , __lowerCamelCase : str , __lowerCamelCase : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(__lowerCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCamelCase__ = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowerCamelCase , "wb" ) as fi:
lowerCamelCase__ = self.sp_model.serialized_model_proto()
fi.write(__lowerCamelCase )
return (out_vocab_file,)
| 187
|
'''simple docstring'''
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class lowercase ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
@register_to_config
def __init__( self : Optional[Any] , __lowerCamelCase : int = 128 , __lowerCamelCase : int = 256 , __lowerCamelCase : float = 2_0_0_0.0 , __lowerCamelCase : int = 768 , __lowerCamelCase : int = 12 , __lowerCamelCase : int = 12 , __lowerCamelCase : int = 64 , __lowerCamelCase : int = 2048 , __lowerCamelCase : float = 0.1 , ) -> str:
'''simple docstring'''
super().__init__()
lowerCamelCase__ = nn.Sequential(
nn.Linear(__lowerCamelCase , d_model * 4 , bias=__lowerCamelCase ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=__lowerCamelCase ) , nn.SiLU() , )
lowerCamelCase__ = nn.Embedding(__lowerCamelCase , __lowerCamelCase )
lowerCamelCase__ = False
lowerCamelCase__ = nn.Linear(__lowerCamelCase , __lowerCamelCase , bias=__lowerCamelCase )
lowerCamelCase__ = nn.Dropout(p=__lowerCamelCase )
lowerCamelCase__ = nn.ModuleList()
for lyr_num in range(__lowerCamelCase ):
# FiLM conditional T5 decoder
lowerCamelCase__ = DecoderLayer(d_model=__lowerCamelCase , d_kv=__lowerCamelCase , num_heads=__lowerCamelCase , d_ff=__lowerCamelCase , dropout_rate=__lowerCamelCase )
self.decoders.append(__lowerCamelCase )
lowerCamelCase__ = TaLayerNorm(__lowerCamelCase )
lowerCamelCase__ = nn.Dropout(p=__lowerCamelCase )
lowerCamelCase__ = nn.Linear(__lowerCamelCase , __lowerCamelCase , bias=__lowerCamelCase )
def a__ ( self : Dict , __lowerCamelCase : Dict , __lowerCamelCase : Optional[int] ) -> Any:
'''simple docstring'''
lowerCamelCase__ = torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def a__ ( self : Tuple , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
lowerCamelCase__ = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype )
lowerCamelCase__ = self.conditioning_emb(__lowerCamelCase ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
lowerCamelCase__ = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
lowerCamelCase__ = torch.broadcast_to(
torch.arange(__lowerCamelCase , device=decoder_input_tokens.device ) , (batch, seq_length) , )
lowerCamelCase__ = self.position_encoding(__lowerCamelCase )
lowerCamelCase__ = self.continuous_inputs_projection(__lowerCamelCase )
inputs += position_encodings
lowerCamelCase__ = self.dropout(__lowerCamelCase )
# decoder: No padding present.
lowerCamelCase__ = torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
lowerCamelCase__ = [(x, self.encoder_decoder_mask(__lowerCamelCase , __lowerCamelCase )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
lowerCamelCase__ = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 )
lowerCamelCase__ = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 )
for lyr in self.decoders:
lowerCamelCase__ = lyr(
__lowerCamelCase , conditioning_emb=__lowerCamelCase , encoder_hidden_states=__lowerCamelCase , encoder_attention_mask=__lowerCamelCase , )[0]
lowerCamelCase__ = self.decoder_norm(__lowerCamelCase )
lowerCamelCase__ = self.post_dropout(__lowerCamelCase )
lowerCamelCase__ = self.spec_out(__lowerCamelCase )
return spec_out
class lowercase ( nn.Module ):
'''simple docstring'''
def __init__( self : Union[str, Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : int , __lowerCamelCase : List[Any] , __lowerCamelCase : str=1E-6 ) -> int:
'''simple docstring'''
super().__init__()
lowerCamelCase__ = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=__lowerCamelCase , d_kv=__lowerCamelCase , num_heads=__lowerCamelCase , dropout_rate=__lowerCamelCase ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=__lowerCamelCase , d_kv=__lowerCamelCase , num_heads=__lowerCamelCase , dropout_rate=__lowerCamelCase , layer_norm_epsilon=__lowerCamelCase , ) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=__lowerCamelCase , d_ff=__lowerCamelCase , dropout_rate=__lowerCamelCase , layer_norm_epsilon=__lowerCamelCase ) )
def a__ ( self : List[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[Any]=None , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : int=None , __lowerCamelCase : List[Any]=None , __lowerCamelCase : List[str]=None , ) -> Dict:
'''simple docstring'''
lowerCamelCase__ = self.layer[0](
__lowerCamelCase , conditioning_emb=__lowerCamelCase , attention_mask=__lowerCamelCase , )
if encoder_hidden_states is not None:
lowerCamelCase__ = torch.where(encoder_attention_mask > 0 , 0 , -1E10 ).to(
encoder_hidden_states.dtype )
lowerCamelCase__ = self.layer[1](
__lowerCamelCase , key_value_states=__lowerCamelCase , attention_mask=__lowerCamelCase , )
# Apply Film Conditional Feed Forward layer
lowerCamelCase__ = self.layer[-1](__lowerCamelCase , __lowerCamelCase )
return (hidden_states,)
class lowercase ( nn.Module ):
'''simple docstring'''
def __init__( self : List[Any] , __lowerCamelCase : str , __lowerCamelCase : str , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Tuple ) -> Optional[int]:
'''simple docstring'''
super().__init__()
lowerCamelCase__ = TaLayerNorm(__lowerCamelCase )
lowerCamelCase__ = TaFiLMLayer(in_features=d_model * 4 , out_features=__lowerCamelCase )
lowerCamelCase__ = Attention(query_dim=__lowerCamelCase , heads=__lowerCamelCase , dim_head=__lowerCamelCase , out_bias=__lowerCamelCase , scale_qk=__lowerCamelCase )
lowerCamelCase__ = nn.Dropout(__lowerCamelCase )
def a__ ( self : Dict , __lowerCamelCase : Any , __lowerCamelCase : Optional[Any]=None , __lowerCamelCase : str=None , ) -> List[Any]:
'''simple docstring'''
lowerCamelCase__ = self.layer_norm(__lowerCamelCase )
if conditioning_emb is not None:
lowerCamelCase__ = self.FiLMLayer(__lowerCamelCase , __lowerCamelCase )
# Self-attention block
lowerCamelCase__ = self.attention(__lowerCamelCase )
lowerCamelCase__ = hidden_states + self.dropout(__lowerCamelCase )
return hidden_states
class lowercase ( nn.Module ):
'''simple docstring'''
def __init__( self : Any , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : str , __lowerCamelCase : Any , __lowerCamelCase : Dict ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
lowerCamelCase__ = Attention(query_dim=__lowerCamelCase , heads=__lowerCamelCase , dim_head=__lowerCamelCase , out_bias=__lowerCamelCase , scale_qk=__lowerCamelCase )
lowerCamelCase__ = TaLayerNorm(__lowerCamelCase , eps=__lowerCamelCase )
lowerCamelCase__ = nn.Dropout(__lowerCamelCase )
def a__ ( self : List[str] , __lowerCamelCase : Any , __lowerCamelCase : Optional[Any]=None , __lowerCamelCase : Dict=None , ) -> Any:
'''simple docstring'''
lowerCamelCase__ = self.layer_norm(__lowerCamelCase )
lowerCamelCase__ = self.attention(
__lowerCamelCase , encoder_hidden_states=__lowerCamelCase , attention_mask=attention_mask.squeeze(1 ) , )
lowerCamelCase__ = hidden_states + self.dropout(__lowerCamelCase )
return layer_output
class lowercase ( nn.Module ):
'''simple docstring'''
def __init__( self : List[str] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : int , __lowerCamelCase : List[Any] , __lowerCamelCase : List[str] ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
lowerCamelCase__ = TaDenseGatedActDense(d_model=__lowerCamelCase , d_ff=__lowerCamelCase , dropout_rate=__lowerCamelCase )
lowerCamelCase__ = TaFiLMLayer(in_features=d_model * 4 , out_features=__lowerCamelCase )
lowerCamelCase__ = TaLayerNorm(__lowerCamelCase , eps=__lowerCamelCase )
lowerCamelCase__ = nn.Dropout(__lowerCamelCase )
def a__ ( self : Any , __lowerCamelCase : Optional[int] , __lowerCamelCase : Union[str, Any]=None ) -> Dict:
'''simple docstring'''
lowerCamelCase__ = self.layer_norm(__lowerCamelCase )
if conditioning_emb is not None:
lowerCamelCase__ = self.film(__lowerCamelCase , __lowerCamelCase )
lowerCamelCase__ = self.DenseReluDense(__lowerCamelCase )
lowerCamelCase__ = hidden_states + self.dropout(__lowerCamelCase )
return hidden_states
class lowercase ( nn.Module ):
'''simple docstring'''
def __init__( self : List[str] , __lowerCamelCase : Tuple , __lowerCamelCase : Dict , __lowerCamelCase : str ) -> str:
'''simple docstring'''
super().__init__()
lowerCamelCase__ = nn.Linear(__lowerCamelCase , __lowerCamelCase , bias=__lowerCamelCase )
lowerCamelCase__ = nn.Linear(__lowerCamelCase , __lowerCamelCase , bias=__lowerCamelCase )
lowerCamelCase__ = nn.Linear(__lowerCamelCase , __lowerCamelCase , bias=__lowerCamelCase )
lowerCamelCase__ = nn.Dropout(__lowerCamelCase )
lowerCamelCase__ = NewGELUActivation()
def a__ ( self : Dict , __lowerCamelCase : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase__ = self.act(self.wi_a(__lowerCamelCase ) )
lowerCamelCase__ = self.wi_a(__lowerCamelCase )
lowerCamelCase__ = hidden_gelu * hidden_linear
lowerCamelCase__ = self.dropout(__lowerCamelCase )
lowerCamelCase__ = self.wo(__lowerCamelCase )
return hidden_states
class lowercase ( nn.Module ):
'''simple docstring'''
def __init__( self : str , __lowerCamelCase : Tuple , __lowerCamelCase : List[Any]=1E-6 ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
lowerCamelCase__ = nn.Parameter(torch.ones(__lowerCamelCase ) )
lowerCamelCase__ = eps
def a__ ( self : Optional[int] , __lowerCamelCase : str ) -> int:
'''simple docstring'''
lowerCamelCase__ = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=__lowerCamelCase )
lowerCamelCase__ = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
lowerCamelCase__ = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class lowercase ( nn.Module ):
'''simple docstring'''
def a__ ( self : Tuple , __lowerCamelCase : torch.Tensor ) -> torch.Tensor:
'''simple docstring'''
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.0_4_4_7_1_5 * torch.pow(__lowerCamelCase , 3.0 )) ))
class lowercase ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[Any] ) -> Optional[int]:
'''simple docstring'''
super().__init__()
lowerCamelCase__ = nn.Linear(__lowerCamelCase , out_features * 2 , bias=__lowerCamelCase )
def a__ ( self : str , __lowerCamelCase : str , __lowerCamelCase : str ) -> List[str]:
'''simple docstring'''
lowerCamelCase__ = self.scale_bias(__lowerCamelCase )
lowerCamelCase__ , lowerCamelCase__ = torch.chunk(__lowerCamelCase , 2 , -1 )
lowerCamelCase__ = x * (1 + scale) + shift
return x
| 187
| 1
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A__ : Optional[int] = logging.get_logger(__name__)
A__ : str = {
"""camembert-base""": """https://huggingface.co/camembert-base/resolve/main/config.json""",
"""umberto-commoncrawl-cased-v1""": (
"""https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json"""
),
"""umberto-wikipedia-uncased-v1""": (
"""https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json"""
),
}
class UpperCAmelCase_ (_UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase : str = 'camembert'
def __init__( self , SCREAMING_SNAKE_CASE_=3_05_22 , SCREAMING_SNAKE_CASE_=7_68 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=30_72 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=5_12 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=0.0_2 , SCREAMING_SNAKE_CASE_=1E-12 , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_="absolute" , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ , ) -> Any:
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : int = vocab_size
__lowerCamelCase : Optional[int] = hidden_size
__lowerCamelCase : Tuple = num_hidden_layers
__lowerCamelCase : Any = num_attention_heads
__lowerCamelCase : str = hidden_act
__lowerCamelCase : Dict = intermediate_size
__lowerCamelCase : str = hidden_dropout_prob
__lowerCamelCase : Any = attention_probs_dropout_prob
__lowerCamelCase : Tuple = max_position_embeddings
__lowerCamelCase : Dict = type_vocab_size
__lowerCamelCase : List[str] = initializer_range
__lowerCamelCase : Tuple = layer_norm_eps
__lowerCamelCase : Optional[int] = position_embedding_type
__lowerCamelCase : Optional[Any] = use_cache
__lowerCamelCase : int = classifier_dropout
class UpperCAmelCase_ (_UpperCAmelCase ):
"""simple docstring"""
@property
def lowercase_ ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
__lowerCamelCase : Dict = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__lowerCamelCase : str = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 13
|
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 304
| 0
|
import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
__SCREAMING_SNAKE_CASE : List[Any] = logging.get_logger(__name__)
class lowercase_ ( enum.Enum ):
_lowerCamelCase = 0
_lowerCamelCase = 1
@add_end_docstrings(__snake_case )
class lowercase_ ( __snake_case ):
_lowerCamelCase = 'generated'
def __init__( self , *lowercase_ , **lowercase_ ):
super().__init__(*lowercase_ , **lowercase_ )
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == "tf"
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING )
def UpperCamelCase ( self , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , **lowercase_ , ):
_snake_case : List[Any] = {}
if truncation is not None:
_snake_case : Union[str, Any] = truncation
_snake_case : List[Any] = generate_kwargs
_snake_case : str = {}
if return_tensors is not None and return_type is None:
_snake_case : str = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
_snake_case : Optional[int] = return_type
if clean_up_tokenization_spaces is not None:
_snake_case : Optional[Any] = clean_up_tokenization_spaces
if stop_sequence is not None:
_snake_case : str = self.tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
if len(lowercase_ ) > 1:
warnings.warn(
"Stopping on a multiple token sequence is not yet supported on transformers. The first token of"
" the stop sequence will be used as the stop sequence string in the interim." )
_snake_case : List[str] = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ ):
return True
def UpperCamelCase ( self , *lowercase_ , lowercase_ ):
_snake_case : str = self.model.config.prefix if self.model.config.prefix is not None else ""
if isinstance(args[0] , lowercase_ ):
if self.tokenizer.pad_token_id is None:
raise ValueError("Please make sure that the tokenizer has a pad_token_id when using a batch input" )
_snake_case : Dict = ([prefix + arg for arg in args[0]],)
_snake_case : Optional[int] = True
elif isinstance(args[0] , lowercase_ ):
_snake_case : Union[str, Any] = (prefix + args[0],)
_snake_case : Tuple = False
else:
raise ValueError(
f""" `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`""" )
_snake_case : Tuple = self.tokenizer(*lowercase_ , padding=lowercase_ , truncation=lowercase_ , return_tensors=self.framework )
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__( self , *lowercase_ , **lowercase_ ):
_snake_case : Optional[Any] = super().__call__(*lowercase_ , **lowercase_ )
if (
isinstance(args[0] , lowercase_ )
and all(isinstance(lowercase_ , lowercase_ ) for el in args[0] )
and all(len(lowercase_ ) == 1 for res in result )
):
return [res[0] for res in result]
return result
def UpperCamelCase ( self , lowercase_ , lowercase_=TruncationStrategy.DO_NOT_TRUNCATE , **lowercase_ ):
_snake_case : Dict = self._parse_and_tokenize(lowercase_ , truncation=lowercase_ , **lowercase_ )
return inputs
def UpperCamelCase ( self , lowercase_ , **lowercase_ ):
if self.framework == "pt":
_snake_case : Dict = model_inputs["input_ids"].shape
elif self.framework == "tf":
_snake_case : Optional[int] = tf.shape(model_inputs["input_ids"] ).numpy()
_snake_case : List[str] = generate_kwargs.get("min_length" , self.model.config.min_length )
_snake_case : Union[str, Any] = generate_kwargs.get("max_length" , self.model.config.max_length )
self.check_inputs(lowercase_ , generate_kwargs["min_length"] , generate_kwargs["max_length"] )
_snake_case : Optional[int] = self.model.generate(**lowercase_ , **lowercase_ )
_snake_case : Tuple = output_ids.shape[0]
if self.framework == "pt":
_snake_case : int = output_ids.reshape(lowercase_ , out_b // in_b , *output_ids.shape[1:] )
elif self.framework == "tf":
_snake_case : str = tf.reshape(lowercase_ , (in_b, out_b // in_b, *output_ids.shape[1:]) )
return {"output_ids": output_ids}
def UpperCamelCase ( self , lowercase_ , lowercase_=ReturnType.TEXT , lowercase_=False ):
_snake_case : int = []
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
_snake_case : Union[str, Any] = {f"""{self.return_name}_token_ids""": output_ids}
elif return_type == ReturnType.TEXT:
_snake_case : Tuple = {
f"""{self.return_name}_text""": self.tokenizer.decode(
lowercase_ , skip_special_tokens=lowercase_ , clean_up_tokenization_spaces=lowercase_ , )
}
records.append(lowercase_ )
return records
@add_end_docstrings(__snake_case )
class lowercase_ ( __snake_case ):
_lowerCamelCase = 'summary'
def __call__( self , *lowercase_ , **lowercase_ ):
return super().__call__(*lowercase_ , **lowercase_ )
def UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ ):
if max_length < min_length:
logger.warning(f"""Your min_length={min_length} must be inferior than your max_length={max_length}.""" )
if input_length < max_length:
logger.warning(
f"""Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is """
"a summarization task, where outputs shorter than the input are typically wanted, you might "
f"""consider decreasing max_length manually, e.g. summarizer('...', max_length={input_length//2})""" )
@add_end_docstrings(__snake_case )
class lowercase_ ( __snake_case ):
_lowerCamelCase = 'translation'
def UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ ):
if input_length > 0.9 * max_length:
logger.warning(
f"""Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider """
"increasing your max_length manually, e.g. translator('...', max_length=400)" )
return True
def UpperCamelCase ( self , *lowercase_ , lowercase_=TruncationStrategy.DO_NOT_TRUNCATE , lowercase_=None , lowercase_=None ):
if getattr(self.tokenizer , "_build_translation_inputs" , lowercase_ ):
return self.tokenizer._build_translation_inputs(
*lowercase_ , return_tensors=self.framework , truncation=lowercase_ , src_lang=lowercase_ , tgt_lang=lowercase_ )
else:
return super()._parse_and_tokenize(*lowercase_ , truncation=lowercase_ )
def UpperCamelCase ( self , lowercase_=None , lowercase_=None , **lowercase_ ):
_snake_case : int = super()._sanitize_parameters(**lowercase_ )
if src_lang is not None:
_snake_case : Dict = src_lang
if tgt_lang is not None:
_snake_case : str = tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
_snake_case : Optional[int] = kwargs.get("task" , self.task )
_snake_case : Optional[int] = task.split("_" )
if task and len(lowercase_ ) == 4:
# translation, XX, to YY
_snake_case : Optional[int] = items[1]
_snake_case : Any = items[3]
return preprocess_params, forward_params, postprocess_params
def __call__( self , *lowercase_ , **lowercase_ ):
return super().__call__(*lowercase_ , **lowercase_ )
| 709
|
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class lowercase_ ( __snake_case ):
def __init__( self , lowercase_ , lowercase_=None , lowercase_=None , lowercase_=0 ):
_snake_case : Optional[Any] = 1.0 if scale is None else scale
_snake_case : Optional[Any] = 0.0 if loc is None else loc
super().__init__(lowercase_ , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=lowercase_ )] )
@property
def UpperCamelCase ( self ):
return self.base_dist.mean * self.scale + self.loc
@property
def UpperCamelCase ( self ):
return self.base_dist.variance * self.scale**2
@property
def UpperCamelCase ( self ):
return self.variance.sqrt()
class lowercase_ ( nn.Module ):
def __init__( self , lowercase_ , lowercase_ , lowercase_ , **lowercase_ ):
super().__init__(**lowercase_ )
_snake_case : List[Any] = args_dim
_snake_case : Any = nn.ModuleList([nn.Linear(lowercase_ , lowercase_ ) for dim in args_dim.values()] )
_snake_case : List[Any] = domain_map
def UpperCamelCase ( self , lowercase_ ):
_snake_case : int = [proj(lowercase_ ) for proj in self.proj]
return self.domain_map(*lowercase_ )
class lowercase_ ( nn.Module ):
def __init__( self , lowercase_ ):
super().__init__()
_snake_case : Optional[int] = function
def UpperCamelCase ( self , lowercase_ , *lowercase_ ):
return self.function(lowercase_ , *lowercase_ )
class lowercase_ :
_lowerCamelCase = 42
_lowerCamelCase = 42
_lowerCamelCase = 42
def __init__( self , lowercase_ = 1 ):
_snake_case : Any = dim
_snake_case : Optional[int] = {k: dim * self.args_dim[k] for k in self.args_dim}
def UpperCamelCase ( self , lowercase_ ):
if self.dim == 1:
return self.distribution_class(*lowercase_ )
else:
return Independent(self.distribution_class(*lowercase_ ) , 1 )
def UpperCamelCase ( self , lowercase_ , lowercase_ = None , lowercase_ = None , ):
_snake_case : Union[str, Any] = self._base_distribution(lowercase_ )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(lowercase_ , loc=lowercase_ , scale=lowercase_ , event_dim=self.event_dim )
@property
def UpperCamelCase ( self ):
return () if self.dim == 1 else (self.dim,)
@property
def UpperCamelCase ( self ):
return len(self.event_shape )
@property
def UpperCamelCase ( self ):
return 0.0
def UpperCamelCase ( self , lowercase_ ):
return ParameterProjection(
in_features=lowercase_ , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , )
def UpperCamelCase ( self , *lowercase_ ):
raise NotImplementedError()
@staticmethod
def UpperCamelCase ( lowercase_ ):
return (x + torch.sqrt(torch.square(lowercase_ ) + 4.0 )) / 2.0
class lowercase_ ( __snake_case ):
_lowerCamelCase = {"df": 1, "loc": 1, "scale": 1}
_lowerCamelCase = StudentT
@classmethod
def UpperCamelCase ( cls , lowercase_ , lowercase_ , lowercase_ ):
_snake_case : int = cls.squareplus(lowercase_ ).clamp_min(torch.finfo(scale.dtype ).eps )
_snake_case : Optional[Any] = 2.0 + cls.squareplus(lowercase_ )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class lowercase_ ( __snake_case ):
_lowerCamelCase = {"loc": 1, "scale": 1}
_lowerCamelCase = Normal
@classmethod
def UpperCamelCase ( cls , lowercase_ , lowercase_ ):
_snake_case : Optional[int] = cls.squareplus(lowercase_ ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class lowercase_ ( __snake_case ):
_lowerCamelCase = {"total_count": 1, "logits": 1}
_lowerCamelCase = NegativeBinomial
@classmethod
def UpperCamelCase ( cls , lowercase_ , lowercase_ ):
_snake_case : Optional[Any] = cls.squareplus(lowercase_ )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def UpperCamelCase ( self , lowercase_ ):
_snake_case ,_snake_case : int = distr_args
if self.dim == 1:
return self.distribution_class(total_count=lowercase_ , logits=lowercase_ )
else:
return Independent(self.distribution_class(total_count=lowercase_ , logits=lowercase_ ) , 1 )
def UpperCamelCase ( self , lowercase_ , lowercase_ = None , lowercase_ = None ):
_snake_case ,_snake_case : int = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) )
| 580
| 0
|
from math import log
from scipy.constants import Boltzmann, physical_constants
lowerCAmelCase__ : List[str] =3_00 # TEMPERATURE (unit = K)
def a__ ( A__, A__, A__, ):
if donor_conc <= 0:
raise ValueError('Donor concentration should be positive' )
elif acceptor_conc <= 0:
raise ValueError('Acceptor concentration should be positive' )
elif intrinsic_conc <= 0:
raise ValueError('Intrinsic concentration should be positive' )
elif donor_conc <= intrinsic_conc:
raise ValueError(
'Donor concentration should be greater than intrinsic concentration' )
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
'Acceptor concentration should be greater than intrinsic concentration' )
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2 )
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 101
|
import argparse
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
parser.add_argument(
'''--original_config_file''',
type=str,
required=True,
help='''The YAML config file corresponding to the original architecture.''',
)
parser.add_argument(
'''--num_in_channels''',
default=None,
type=int,
help='''The number of input channels. If `None` number of input channels will be automatically inferred.''',
)
parser.add_argument(
'''--image_size''',
default=512,
type=int,
help=(
'''The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'''
''' Base. Use 768 for Stable Diffusion v2.'''
),
)
parser.add_argument(
'''--extract_ema''',
action='''store_true''',
help=(
'''Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'''
''' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'''
''' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'''
),
)
parser.add_argument(
'''--upcast_attention''',
action='''store_true''',
help=(
'''Whether the attention computation should always be upcasted. This is necessary when running stable'''
''' diffusion 2.1.'''
),
)
parser.add_argument(
'''--from_safetensors''',
action='''store_true''',
help='''If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.''',
)
parser.add_argument(
'''--to_safetensors''',
action='''store_true''',
help='''Whether to store pipeline in safetensors format or not.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument('''--device''', type=str, help='''Device to use (e.g. cpu, cuda:0, cuda:1, etc.)''')
def __UpperCAmelCase ( __a : Any ) -> List[Any]:
"""simple docstring"""
if string == "True":
return True
elif string == "False":
return False
else:
raise ValueError(F"""could not parse string as bool {string}""" )
parser.add_argument(
'''--use_linear_projection''', help='''Override for use linear projection''', required=False, type=parse_bool
)
parser.add_argument('''--cross_attention_dim''', help='''Override for cross attention_dim''', required=False, type=int)
a__ = parser.parse_args()
a__ = download_controlnet_from_original_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
extract_ema=args.extract_ema,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
use_linear_projection=args.use_linear_projection,
cross_attention_dim=args.cross_attention_dim,
)
controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 14
| 0
|
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def _lowercase ( lowercase__ ):
__lowerCAmelCase : Union[str, Any] = args.pruning_method
__lowerCAmelCase : List[Any] = args.threshold
__lowerCAmelCase : Union[str, Any] = args.model_name_or_path.rstrip('''/''' )
__lowerCAmelCase : List[str] = args.target_model_path
print(f"""Load fine-pruned model from {model_name_or_path}""" )
__lowerCAmelCase : List[str] = torch.load(os.path.join(lowercase__ , '''pytorch_model.bin''' ) )
__lowerCAmelCase : Tuple = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
__lowerCAmelCase : Tuple = tensor
print(f"""Copied layer {name}""" )
elif "classifier" in name or "qa_output" in name:
__lowerCAmelCase : Union[str, Any] = tensor
print(f"""Copied layer {name}""" )
elif "bias" in name:
__lowerCAmelCase : Any = tensor
print(f"""Copied layer {name}""" )
else:
if pruning_method == "magnitude":
__lowerCAmelCase : int = MagnitudeBinarizer.apply(inputs=lowercase__ , threshold=lowercase__ )
__lowerCAmelCase : Optional[Any] = tensor * mask
print(f"""Pruned layer {name}""" )
elif pruning_method == "topK":
if "mask_scores" in name:
continue
__lowerCAmelCase : Union[str, Any] = name[:-6]
__lowerCAmelCase : Tuple = model[f"""{prefix_}mask_scores"""]
__lowerCAmelCase : Any = TopKBinarizer.apply(lowercase__ , lowercase__ )
__lowerCAmelCase : Dict = tensor * mask
print(f"""Pruned layer {name}""" )
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
__lowerCAmelCase : Tuple = name[:-6]
__lowerCAmelCase : Optional[Any] = model[f"""{prefix_}mask_scores"""]
__lowerCAmelCase : int = ThresholdBinarizer.apply(lowercase__ , lowercase__ , lowercase__ )
__lowerCAmelCase : Dict = tensor * mask
print(f"""Pruned layer {name}""" )
elif pruning_method == "l0":
if "mask_scores" in name:
continue
__lowerCAmelCase : Dict = name[:-6]
__lowerCAmelCase : str = model[f"""{prefix_}mask_scores"""]
__lowerCAmelCase, __lowerCAmelCase : List[str] = -0.1, 1.1
__lowerCAmelCase : Tuple = torch.sigmoid(lowercase__ )
__lowerCAmelCase : Optional[Any] = s * (r - l) + l
__lowerCAmelCase : Any = s_bar.clamp(min=0.0 , max=1.0 )
__lowerCAmelCase : Optional[int] = tensor * mask
print(f"""Pruned layer {name}""" )
else:
raise ValueError('''Unknown pruning method''' )
if target_model_path is None:
__lowerCAmelCase : Any = os.path.join(
os.path.dirname(lowercase__ ) , f"""bertarized_{os.path.basename(lowercase__ )}""" )
if not os.path.isdir(lowercase__ ):
shutil.copytree(lowercase__ , lowercase__ )
print(f"""\nCreated folder {target_model_path}""" )
torch.save(lowercase__ , os.path.join(lowercase__ , '''pytorch_model.bin''' ) )
print('''\nPruned model saved! See you later!''' )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
parser.add_argument(
"--pruning_method",
choices=["l0", "magnitude", "topK", "sigmoied_threshold"],
type=str,
required=True,
help=(
"Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,"
" sigmoied_threshold = Soft movement pruning)"
),
)
parser.add_argument(
"--threshold",
type=float,
required=False,
help=(
"For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model."
"For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared."
"Not needed for `l0`"
),
)
parser.add_argument(
"--model_name_or_path",
type=str,
required=True,
help="Folder containing the model that was previously fine-pruned",
)
parser.add_argument(
"--target_model_path",
default=None,
type=str,
required=False,
help="Folder containing the model that was previously fine-pruned",
)
_UpperCamelCase = parser.parse_args()
main(args)
| 701
|
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {
"huggingface/time-series-transformer-tourism-monthly": (
"https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json"
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class __lowercase (_UpperCAmelCase ):
_UpperCamelCase = """time_series_transformer"""
_UpperCamelCase = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
"""num_hidden_layers""": """encoder_layers""",
}
def __init__( self , A_ = None , A_ = None , A_ = "student_t" , A_ = "nll" , A_ = 1 , A_ = [1, 2, 3, 4, 5, 6, 7] , A_ = "mean" , A_ = 0 , A_ = 0 , A_ = 0 , A_ = 0 , A_ = None , A_ = None , A_ = 32 , A_ = 32 , A_ = 2 , A_ = 2 , A_ = 2 , A_ = 2 , A_ = True , A_ = "gelu" , A_ = 64 , A_ = 0.1 , A_ = 0.1 , A_ = 0.1 , A_ = 0.1 , A_ = 0.1 , A_ = 100 , A_ = 0.02 , A_=True , **A_ , ) ->Any:
'''simple docstring'''
__lowerCAmelCase : Tuple = prediction_length
__lowerCAmelCase : Tuple = context_length or prediction_length
__lowerCAmelCase : str = distribution_output
__lowerCAmelCase : Any = loss
__lowerCAmelCase : List[str] = input_size
__lowerCAmelCase : Any = num_time_features
__lowerCAmelCase : Optional[int] = lags_sequence
__lowerCAmelCase : Any = scaling
__lowerCAmelCase : Dict = num_dynamic_real_features
__lowerCAmelCase : Any = num_static_real_features
__lowerCAmelCase : Optional[Any] = num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(A_ ) != num_static_categorical_features:
raise ValueError(
'''The cardinality should be a list of the same length as `num_static_categorical_features`''' )
__lowerCAmelCase : Any = cardinality
else:
__lowerCAmelCase : Optional[Any] = [0]
if embedding_dimension and num_static_categorical_features > 0:
if len(A_ ) != num_static_categorical_features:
raise ValueError(
'''The embedding dimension should be a list of the same length as `num_static_categorical_features`''' )
__lowerCAmelCase : List[Any] = embedding_dimension
else:
__lowerCAmelCase : List[str] = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
__lowerCAmelCase : Optional[Any] = num_parallel_samples
# Transformer architecture configuration
__lowerCAmelCase : List[Any] = input_size * len(A_ ) + self._number_of_features
__lowerCAmelCase : int = d_model
__lowerCAmelCase : List[Any] = encoder_attention_heads
__lowerCAmelCase : int = decoder_attention_heads
__lowerCAmelCase : Tuple = encoder_ffn_dim
__lowerCAmelCase : int = decoder_ffn_dim
__lowerCAmelCase : List[Any] = encoder_layers
__lowerCAmelCase : List[Any] = decoder_layers
__lowerCAmelCase : Dict = dropout
__lowerCAmelCase : int = attention_dropout
__lowerCAmelCase : Optional[int] = activation_dropout
__lowerCAmelCase : Optional[Any] = encoder_layerdrop
__lowerCAmelCase : str = decoder_layerdrop
__lowerCAmelCase : Optional[Any] = activation_function
__lowerCAmelCase : Optional[Any] = init_std
__lowerCAmelCase : int = use_cache
super().__init__(is_encoder_decoder=A_ , **A_ )
@property
def UpperCamelCase__ ( self ) ->int:
'''simple docstring'''
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 583
| 0
|
"""simple docstring"""
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
A = logging.get_logger(__name__)
A = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
A = {
"""vocab_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json""",
},
"""merges_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json""",
},
}
A = {
"""allenai/led-base-16384""": 16_384,
}
class a__ ( __magic_name__ ):
lowercase_ = VOCAB_FILES_NAMES
lowercase_ = PRETRAINED_VOCAB_FILES_MAP
lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ = LEDTokenizer
lowercase_ = ["input_ids", "attention_mask"]
def __init__( self : Union[str, Any] , UpperCamelCase_ : Any=None , UpperCamelCase_ : List[Any]=None , UpperCamelCase_ : Dict=None , UpperCamelCase_ : str="replace" , UpperCamelCase_ : List[Any]="<s>" , UpperCamelCase_ : Dict="</s>" , UpperCamelCase_ : Tuple="</s>" , UpperCamelCase_ : List[Any]="<s>" , UpperCamelCase_ : Optional[int]="<unk>" , UpperCamelCase_ : Any="<pad>" , UpperCamelCase_ : List[Any]="<mask>" , UpperCamelCase_ : Optional[Any]=False , UpperCamelCase_ : List[Any]=True , **UpperCamelCase_ : List[str] , ):
"""simple docstring"""
super().__init__(
UpperCamelCase_ , UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , errors=UpperCamelCase_ , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ , trim_offsets=UpperCamelCase_ , **UpperCamelCase_ , )
__UpperCAmelCase : Optional[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
if pre_tok_state.get("add_prefix_space" , UpperCamelCase_) != add_prefix_space:
__UpperCAmelCase : str = getattr(UpperCamelCase_ , pre_tok_state.pop("type"))
__UpperCAmelCase : Any = add_prefix_space
__UpperCAmelCase : Union[str, Any] = pre_tok_class(**UpperCamelCase_)
__UpperCAmelCase : List[str] = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
__UpperCAmelCase : Optional[int] = "post_processor"
__UpperCAmelCase : Optional[int] = getattr(self.backend_tokenizer , UpperCamelCase_ , UpperCamelCase_)
if tokenizer_component_instance:
__UpperCAmelCase : List[str] = json.loads(tokenizer_component_instance.__getstate__())
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
__UpperCAmelCase : Dict = tuple(state["sep"])
if "cls" in state:
__UpperCAmelCase : Dict = tuple(state["cls"])
__UpperCAmelCase : Union[str, Any] = False
if state.get("add_prefix_space" , UpperCamelCase_) != add_prefix_space:
__UpperCAmelCase : Optional[Any] = add_prefix_space
__UpperCAmelCase : List[str] = True
if state.get("trim_offsets" , UpperCamelCase_) != trim_offsets:
__UpperCAmelCase : Any = trim_offsets
__UpperCAmelCase : Optional[Any] = True
if changes_to_apply:
__UpperCAmelCase : Any = getattr(UpperCamelCase_ , state.pop("type"))
__UpperCAmelCase : str = component_class(**UpperCamelCase_)
setattr(self.backend_tokenizer , UpperCamelCase_ , UpperCamelCase_)
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def a_ ( self : List[Any]):
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet.")
return None
return str(self._mask_token)
@mask_token.setter
def a_ ( self : List[str] , UpperCamelCase_ : List[str]):
"""simple docstring"""
__UpperCAmelCase : List[str] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_) if isinstance(UpperCamelCase_ , UpperCamelCase_) else value
__UpperCAmelCase : Any = value
def a_ ( self : Tuple , *UpperCamelCase_ : Union[str, Any] , **UpperCamelCase_ : Optional[int]):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = kwargs.get("is_split_into_words" , UpperCamelCase_)
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs.")
return super()._batch_encode_plus(*UpperCamelCase_ , **UpperCamelCase_)
def a_ ( self : Optional[Any] , *UpperCamelCase_ : Tuple , **UpperCamelCase_ : List[Any]):
"""simple docstring"""
__UpperCAmelCase : List[Any] = kwargs.get("is_split_into_words" , UpperCamelCase_)
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs.")
return super()._encode_plus(*UpperCamelCase_ , **UpperCamelCase_)
def a_ ( self : Union[str, Any] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[str] = None):
"""simple docstring"""
__UpperCAmelCase : int = self._tokenizer.model.save(UpperCamelCase_ , name=UpperCamelCase_)
return tuple(UpperCamelCase_)
def a_ ( self : Union[str, Any] , UpperCamelCase_ : Any , UpperCamelCase_ : Optional[Any]=None):
"""simple docstring"""
__UpperCAmelCase : List[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def a_ ( self : str , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None):
"""simple docstring"""
__UpperCAmelCase : List[str] = [self.sep_token_id]
__UpperCAmelCase : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def a_ ( self : Optional[int] , UpperCamelCase_ : Union[Dict[str, EncodedInput], BatchEncoding] , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , UpperCamelCase_ : Optional[int] = None , UpperCamelCase_ : Optional[bool] = None , ):
"""simple docstring"""
__UpperCAmelCase : Dict = super()._pad(
encoded_inputs=UpperCamelCase_ , max_length=UpperCamelCase_ , padding_strategy=UpperCamelCase_ , pad_to_multiple_of=UpperCamelCase_ , return_attention_mask=UpperCamelCase_ , )
# Load from model defaults
if return_attention_mask is None:
__UpperCAmelCase : int = "attention_mask" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
__UpperCAmelCase : Any = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
__UpperCAmelCase : str = len(encoded_inputs["global_attention_mask"]) != len(UpperCamelCase_)
if needs_to_be_padded:
__UpperCAmelCase : List[Any] = len(UpperCamelCase_) - len(encoded_inputs["global_attention_mask"])
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
__UpperCAmelCase : Any = (
encoded_inputs["global_attention_mask"] + [-1] * difference
)
elif self.padding_side == "left":
__UpperCAmelCase : Union[str, Any] = [-1] * difference + encoded_inputs[
"global_attention_mask"
]
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side))
return encoded_inputs
| 77
|
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class A_ ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_lowerCAmelCase = DanceDiffusionPipeline
_lowerCAmelCase = UNCONDITIONAL_AUDIO_GENERATION_PARAMS
_lowerCAmelCase = PipelineTesterMixin.required_optional_params - {
"""callback""",
"""latents""",
"""callback_steps""",
"""output_type""",
"""num_images_per_prompt""",
}
_lowerCAmelCase = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS
_lowerCAmelCase = False
_lowerCAmelCase = False
def a ( self ):
torch.manual_seed(0 )
_UpperCamelCase = UNetaDModel(
block_out_channels=(32, 32, 64) , extra_in_channels=16 , sample_size=5_12 , sample_rate=1_60_00 , in_channels=2 , out_channels=2 , flip_sin_to_cos=A_ , use_timestep_embedding=A_ , time_embedding_type="fourier" , mid_block_type="UNetMidBlock1D" , down_block_types=("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , up_block_types=("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , )
_UpperCamelCase = IPNDMScheduler()
_UpperCamelCase = {
"unet": unet,
"scheduler": scheduler,
}
return components
def a ( self , A_ , A_=0 ):
if str(A_ ).startswith("mps" ):
_UpperCamelCase = torch.manual_seed(A_ )
else:
_UpperCamelCase = torch.Generator(device=A_ ).manual_seed(A_ )
_UpperCamelCase = {
"batch_size": 1,
"generator": generator,
"num_inference_steps": 4,
}
return inputs
def a ( self ):
_UpperCamelCase = "cpu" # ensure determinism for the device-dependent torch.Generator
_UpperCamelCase = self.get_dummy_components()
_UpperCamelCase = DanceDiffusionPipeline(**A_ )
_UpperCamelCase = pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
_UpperCamelCase = self.get_dummy_inputs(A_ )
_UpperCamelCase = pipe(**A_ )
_UpperCamelCase = output.audios
_UpperCamelCase = audio[0, -3:, -3:]
assert audio.shape == (1, 2, components["unet"].sample_size)
_UpperCamelCase = np.array([-0.7265, 1.0000, -0.8388, 0.1175, 0.9498, -1.0000] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def a ( self ):
return super().test_save_load_local()
@skip_mps
def a ( self ):
return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
@skip_mps
def a ( self ):
return super().test_save_load_optional_components()
@skip_mps
def a ( self ):
return super().test_attention_slicing_forward_pass()
def a ( self ):
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class A_ ( unittest.TestCase ):
'''simple docstring'''
def a ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a ( self ):
_UpperCamelCase = torch_device
_UpperCamelCase = DanceDiffusionPipeline.from_pretrained("harmonai/maestro-150k" )
_UpperCamelCase = pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
_UpperCamelCase = torch.manual_seed(0 )
_UpperCamelCase = pipe(generator=A_ , num_inference_steps=1_00 , audio_length_in_s=4.096 )
_UpperCamelCase = output.audios
_UpperCamelCase = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
_UpperCamelCase = np.array([-0.0192, -0.0231, -0.0318, -0.0059, 0.0002, -0.0020] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
def a ( self ):
_UpperCamelCase = torch_device
_UpperCamelCase = DanceDiffusionPipeline.from_pretrained("harmonai/maestro-150k" , torch_dtype=torch.floataa )
_UpperCamelCase = pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
_UpperCamelCase = torch.manual_seed(0 )
_UpperCamelCase = pipe(generator=A_ , num_inference_steps=1_00 , audio_length_in_s=4.096 )
_UpperCamelCase = output.audios
_UpperCamelCase = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
_UpperCamelCase = np.array([-0.0367, -0.0488, -0.0771, -0.0525, -0.0444, -0.0341] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
| 138
| 0
|
'''simple docstring'''
def A_( A : float , A : float):
return price * (1 + tax_rate)
if __name__ == "__main__":
print(f"""{price_plus_tax(1_00, 0.25) = }""")
print(f"""{price_plus_tax(125.50, 0.05) = }""")
| 719
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase : Optional[Any] = {
'configuration_rembert': ['REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RemBertConfig', 'RemBertOnnxConfig']
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Dict = ['RemBertTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : List[Any] = ['RemBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Optional[Any] = [
'REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'RemBertForCausalLM',
'RemBertForMaskedLM',
'RemBertForMultipleChoice',
'RemBertForQuestionAnswering',
'RemBertForSequenceClassification',
'RemBertForTokenClassification',
'RemBertLayer',
'RemBertModel',
'RemBertPreTrainedModel',
'load_tf_weights_in_rembert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Any = [
'TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFRemBertForCausalLM',
'TFRemBertForMaskedLM',
'TFRemBertForMultipleChoice',
'TFRemBertForQuestionAnswering',
'TFRemBertForSequenceClassification',
'TFRemBertForTokenClassification',
'TFRemBertLayer',
'TFRemBertModel',
'TFRemBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_rembert import REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RemBertConfig, RemBertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert import RemBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert_fast import RemBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rembert import (
REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RemBertForCausalLM,
RemBertForMaskedLM,
RemBertForMultipleChoice,
RemBertForQuestionAnswering,
RemBertForSequenceClassification,
RemBertForTokenClassification,
RemBertLayer,
RemBertModel,
RemBertPreTrainedModel,
load_tf_weights_in_rembert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rembert import (
TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRemBertForCausalLM,
TFRemBertForMaskedLM,
TFRemBertForMultipleChoice,
TFRemBertForQuestionAnswering,
TFRemBertForSequenceClassification,
TFRemBertForTokenClassification,
TFRemBertLayer,
TFRemBertModel,
TFRemBertPreTrainedModel,
)
else:
import sys
lowerCAmelCase : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 432
| 0
|
from manim import *
class _SCREAMING_SNAKE_CASE ( a_ ):
'''simple docstring'''
def _snake_case ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE = Rectangle(height=0.5 , width=0.5 )
SCREAMING_SNAKE_CASE = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
SCREAMING_SNAKE_CASE = Rectangle(height=0.25 , width=0.25 )
SCREAMING_SNAKE_CASE = [mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE = [mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE = VGroup(*lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0 )
SCREAMING_SNAKE_CASE = VGroup(*lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0 )
SCREAMING_SNAKE_CASE = VGroup(lowerCAmelCase__ , lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0 )
SCREAMING_SNAKE_CASE = Text("CPU" , font_size=24 )
SCREAMING_SNAKE_CASE = Group(lowerCAmelCase__ , lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0.5 , aligned_edge=lowerCAmelCase__ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = [mem.copy() for i in range(4 )]
SCREAMING_SNAKE_CASE = VGroup(*lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0 )
SCREAMING_SNAKE_CASE = Text("GPU" , font_size=24 )
SCREAMING_SNAKE_CASE = Group(lowerCAmelCase__ , lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0.5 , aligned_edge=lowerCAmelCase__ )
gpu.move_to([-1, -1, 0] )
self.add(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = [mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE = VGroup(*lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0 )
SCREAMING_SNAKE_CASE = Text("Model" , font_size=24 )
SCREAMING_SNAKE_CASE = Group(lowerCAmelCase__ , lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0.5 , aligned_edge=lowerCAmelCase__ )
model.move_to([3, -1.0, 0] )
self.add(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = []
for i, rect in enumerate(lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE = fill.copy().set_fill(lowerCAmelCase__ , opacity=0.8 )
target.move_to(lowerCAmelCase__ )
model_arr.append(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(lowerCAmelCase__ , opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(lowerCAmelCase__ )
self.add(*lowerCAmelCase__ , *lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = [meta_mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE = [meta_mem.copy() for i in range(6 )]
SCREAMING_SNAKE_CASE = VGroup(*lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0 )
SCREAMING_SNAKE_CASE = VGroup(*lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0 )
SCREAMING_SNAKE_CASE = VGroup(lowerCAmelCase__ , lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0 )
SCREAMING_SNAKE_CASE = Text("Disk" , font_size=24 )
SCREAMING_SNAKE_CASE = Group(lowerCAmelCase__ , lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0.5 , aligned_edge=lowerCAmelCase__ )
disk.move_to([-4, -1.25, 0] )
self.add(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
SCREAMING_SNAKE_CASE = MarkupText(
f"<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = MarkupText(
f"<span fgcolor=\'{BLUE}\'>●</span> Checkpoint" , font_size=18 , )
blue_text.next_to(lowerCAmelCase__ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = MarkupText(
f"Now watch as an input is passed through the model\nand how the memory is utilized and handled." , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(lowerCAmelCase__ ) )
SCREAMING_SNAKE_CASE = Square(0.3 )
input.set_fill(lowerCAmelCase__ , opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] , lowerCAmelCase__ , buff=0.5 )
self.play(Write(lowerCAmelCase__ ) )
input.generate_target()
input.target.next_to(model_arr[0] , direction=lowerCAmelCase__ , buff=0.02 )
self.play(MoveToTarget(lowerCAmelCase__ ) )
self.play(FadeOut(lowerCAmelCase__ ) )
SCREAMING_SNAKE_CASE = Arrow(start=lowerCAmelCase__ , end=lowerCAmelCase__ , color=lowerCAmelCase__ , buff=0.5 )
a.next_to(model_arr[0].get_left() , lowerCAmelCase__ , buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
SCREAMING_SNAKE_CASE = MarkupText(
f"As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back." , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(lowerCAmelCase__ , run_time=3 ) )
SCREAMING_SNAKE_CASE = {"run_time": 1, "fade_in": True, "fade_out": True, "buff": 0.02}
self.play(
Write(lowerCAmelCase__ ) , Circumscribe(model_arr[0] , color=lowerCAmelCase__ , **lowerCAmelCase__ ) , Circumscribe(model_cpu_arr[0] , color=lowerCAmelCase__ , **lowerCAmelCase__ ) , Circumscribe(gpu_rect[0] , color=lowerCAmelCase__ , **lowerCAmelCase__ ) , )
self.play(MoveToTarget(model_cpu_arr[0] ) )
SCREAMING_SNAKE_CASE = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.02 , lowerCAmelCase__ , buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.02 )
SCREAMING_SNAKE_CASE = AnimationGroup(
FadeOut(lowerCAmelCase__ , run_time=0.5 ) , MoveToTarget(lowerCAmelCase__ , run_time=0.5 ) , FadeIn(lowerCAmelCase__ , run_time=0.5 ) , lag_ratio=0.2 )
self.play(lowerCAmelCase__ )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
SCREAMING_SNAKE_CASE = 0.7
self.play(
Circumscribe(model_arr[i] , **lowerCAmelCase__ ) , Circumscribe(cpu_left_col_base[i] , **lowerCAmelCase__ ) , Circumscribe(cpu_left_col_base[i + 1] , color=lowerCAmelCase__ , **lowerCAmelCase__ ) , Circumscribe(gpu_rect[0] , color=lowerCAmelCase__ , **lowerCAmelCase__ ) , Circumscribe(model_arr[i + 1] , color=lowerCAmelCase__ , **lowerCAmelCase__ ) , )
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , )
else:
self.play(
MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , )
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.02 , buff=0.2 )
self.play(
Circumscribe(model_arr[-1] , color=lowerCAmelCase__ , **lowerCAmelCase__ ) , Circumscribe(cpu_left_col_base[-1] , color=lowerCAmelCase__ , **lowerCAmelCase__ ) , Circumscribe(gpu_rect[0] , color=lowerCAmelCase__ , **lowerCAmelCase__ ) , )
self.play(MoveToTarget(model_cpu_arr[i] ) )
SCREAMING_SNAKE_CASE = a_c
SCREAMING_SNAKE_CASE = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] , RIGHT + 0.02 , buff=0.5 )
self.play(
FadeOut(lowerCAmelCase__ ) , FadeOut(lowerCAmelCase__ , run_time=0.5 ) , )
SCREAMING_SNAKE_CASE = MarkupText(f"Inference on a model too large for GPU memory\nis successfully completed." , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(lowerCAmelCase__ , run_time=3 ) , MoveToTarget(lowerCAmelCase__ ) )
self.wait()
| 16
|
'''simple docstring'''
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__lowerCamelCase : Tuple = 16
__lowerCamelCase : Optional[int] = 32
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ = 16 )-> int:
"""simple docstring"""
snake_case_ : Optional[int] = AutoTokenizer.from_pretrained("bert-base-cased" )
snake_case_ : str = load_dataset("glue" ,"mrpc" )
def tokenize_function(__magic_name__ ):
# max_length=None => use the model max length (it's actually the default)
snake_case_ : Dict = tokenizer(examples["sentence1"] ,examples["sentence2"] ,truncation=__magic_name__ ,max_length=__magic_name__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
snake_case_ : Any = datasets.map(
__magic_name__ ,batched=__magic_name__ ,remove_columns=["idx", "sentence1", "sentence2"] ,)
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
snake_case_ : List[Any] = tokenized_datasets.rename_column("label" ,"labels" )
def collate_fn(__magic_name__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
snake_case_ : int = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
snake_case_ : Tuple = 16
elif accelerator.mixed_precision != "no":
snake_case_ : str = 8
else:
snake_case_ : Optional[Any] = None
return tokenizer.pad(
__magic_name__ ,padding="longest" ,max_length=__magic_name__ ,pad_to_multiple_of=__magic_name__ ,return_tensors="pt" ,)
# Instantiate dataloaders.
snake_case_ : str = DataLoader(
tokenized_datasets["train"] ,shuffle=__magic_name__ ,collate_fn=__magic_name__ ,batch_size=__magic_name__ )
snake_case_ : Optional[Any] = DataLoader(
tokenized_datasets["validation"] ,shuffle=__magic_name__ ,collate_fn=__magic_name__ ,batch_size=__magic_name__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
__lowerCamelCase : Optional[Any] = mocked_dataloaders # noqa: F811
def __UpperCAmelCase ( __magic_name__ ,__magic_name__ )-> Dict:
"""simple docstring"""
if os.environ.get("TESTING_MOCKED_DATALOADERS" ,__magic_name__ ) == "1":
snake_case_ : List[str] = 2
# Initialize accelerator
snake_case_ : Union[str, Any] = Accelerator(cpu=args.cpu ,mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
snake_case_ : List[str] = config["lr"]
snake_case_ : Dict = int(config["num_epochs"] )
snake_case_ : Dict = int(config["seed"] )
snake_case_ : Optional[int] = int(config["batch_size"] )
snake_case_ : Dict = evaluate.load("glue" ,"mrpc" )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=__magic_name__ )
def inner_training_loop(__magic_name__ ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(__magic_name__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
snake_case_ : Optional[int] = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" ,return_dict=__magic_name__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
snake_case_ : Optional[int] = model.to(accelerator.device )
# Instantiate optimizer
snake_case_ : List[Any] = AdamW(params=model.parameters() ,lr=__magic_name__ )
snake_case_, snake_case_ : int = get_dataloaders(__magic_name__ ,__magic_name__ )
# Instantiate scheduler
snake_case_ : Tuple = get_linear_schedule_with_warmup(
optimizer=__magic_name__ ,num_warmup_steps=100 ,num_training_steps=(len(__magic_name__ ) * num_epochs) ,)
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
snake_case_, snake_case_, snake_case_, snake_case_, snake_case_ : Tuple = accelerator.prepare(
__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ ,__magic_name__ )
# Now we train the model
for epoch in range(__magic_name__ ):
model.train()
for step, batch in enumerate(__magic_name__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
snake_case_ : int = model(**__magic_name__ )
snake_case_ : Any = outputs.loss
accelerator.backward(__magic_name__ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__magic_name__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
snake_case_ : Union[str, Any] = model(**__magic_name__ )
snake_case_ : List[str] = outputs.logits.argmax(dim=-1 )
snake_case_, snake_case_ : Union[str, Any] = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=__magic_name__ ,references=__magic_name__ ,)
snake_case_ : Tuple = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' ,__magic_name__ )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def __UpperCAmelCase ( )-> List[str]:
"""simple docstring"""
snake_case_ : List[Any] = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" ,type=__magic_name__ ,default=__magic_name__ ,choices=["no", "fp16", "bf16", "fp8"] ,help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." ,)
parser.add_argument("--cpu" ,action="store_true" ,help="If passed, will train on the CPU." )
snake_case_ : str = parser.parse_args()
snake_case_ : Optional[int] = {"lr": 2E-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(__magic_name__ ,__magic_name__ )
if __name__ == "__main__":
main()
| 653
| 0
|
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
UpperCamelCase = {
"vocab_file": {
"allegro/herbert-base-cased": "https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json"
},
"merges_file": {
"allegro/herbert-base-cased": "https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt"
},
}
UpperCamelCase = {"allegro/herbert-base-cased": 514}
UpperCamelCase = {}
class lowerCAmelCase_ ( __snake_case ):
_UpperCamelCase : int = VOCAB_FILES_NAMES
_UpperCamelCase : List[Any] = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : Dict = PRETRAINED_INIT_CONFIGURATION
_UpperCamelCase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : Optional[Any] = HerbertTokenizer
def __init__( self , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase="<s>" , _lowerCAmelCase="<unk>" , _lowerCAmelCase="<pad>" , _lowerCAmelCase="<mask>" , _lowerCAmelCase="</s>" , **_lowerCAmelCase , ):
super().__init__(
_lowerCAmelCase , _lowerCAmelCase , tokenizer_file=_lowerCAmelCase , cls_token=_lowerCAmelCase , unk_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , **_lowerCAmelCase , )
def __a ( self , _lowerCAmelCase , _lowerCAmelCase = None ):
_lowercase : Dict = [self.cls_token_id]
_lowercase : Any = [self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __a ( self , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCAmelCase , token_ids_a=_lowerCAmelCase , already_has_special_tokens=_lowerCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(_lowerCAmelCase )) + [1]
return [1] + ([0] * len(_lowerCAmelCase )) + [1] + ([0] * len(_lowerCAmelCase )) + [1]
def __a ( self , _lowerCAmelCase , _lowerCAmelCase = None ):
_lowercase : int = [self.sep_token_id]
_lowercase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __a ( self , _lowerCAmelCase , _lowerCAmelCase = None ):
_lowercase : Dict = self._tokenizer.model.save(_lowerCAmelCase , name=_lowerCAmelCase )
return tuple(_lowerCAmelCase )
| 677
|
from __future__ import annotations
import typing
from collections.abc import Iterable
import numpy as np
UpperCamelCase = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
UpperCamelCase = typing.Union[np.floataa, int, float] # noqa: UP007
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> VectorOut:
return np.sqrt(np.sum((np.asarray(SCREAMING_SNAKE_CASE ) - np.asarray(SCREAMING_SNAKE_CASE )) ** 2 ) )
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> VectorOut:
return sum((va - va) ** 2 for va, va in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ) ** (1 / 2)
if __name__ == "__main__":
def __magic_name__ ( ) -> None:
from timeit import timeit
print('Without Numpy' )
print(
timeit(
'euclidean_distance_no_np([1, 2, 3], [4, 5, 6])' , number=10_000 , globals=globals() , ) )
print('With Numpy' )
print(
timeit(
'euclidean_distance([1, 2, 3], [4, 5, 6])' , number=10_000 , globals=globals() , ) )
benchmark()
| 677
| 1
|
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class lowercase ( __SCREAMING_SNAKE_CASE ):
_a = DistilBertTokenizer
_a = DistilBertTokenizerFast
_a = True
@slow
def a__ ( self ) -> List[str]:
_A : Tuple = DistilBertTokenizer.from_pretrained("""distilbert-base-uncased""" )
_A : List[Any] = tokenizer.encode("""sequence builders""" , add_special_tokens=_a )
_A : Union[str, Any] = tokenizer.encode("""multi-sequence build""" , add_special_tokens=_a )
_A : Tuple = tokenizer.build_inputs_with_special_tokens(_a )
_A : Optional[Any] = tokenizer.build_inputs_with_special_tokens(_a , _a )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 307
|
"""simple docstring"""
from __future__ import annotations
def lowercase ( lowerCAmelCase__ : float , lowerCAmelCase__ : float , lowerCAmelCase__ : float ) -> float:
if days_between_payments <= 0:
raise ValueError('''days_between_payments must be > 0''' )
if daily_interest_rate < 0:
raise ValueError('''daily_interest_rate must be >= 0''' )
if principal <= 0:
raise ValueError('''principal must be > 0''' )
return principal * daily_interest_rate * days_between_payments
def lowercase ( lowerCAmelCase__ : float , lowerCAmelCase__ : float , lowerCAmelCase__ : float , ) -> float:
if number_of_compounding_periods <= 0:
raise ValueError('''number_of_compounding_periods must be > 0''' )
if nominal_annual_interest_rate_percentage < 0:
raise ValueError('''nominal_annual_interest_rate_percentage must be >= 0''' )
if principal <= 0:
raise ValueError('''principal must be > 0''' )
return principal * (
(1 + nominal_annual_interest_rate_percentage) ** number_of_compounding_periods
- 1
)
def lowercase ( lowerCAmelCase__ : float , lowerCAmelCase__ : float , lowerCAmelCase__ : float , ) -> float:
if number_of_years <= 0:
raise ValueError('''number_of_years must be > 0''' )
if nominal_annual_percentage_rate < 0:
raise ValueError('''nominal_annual_percentage_rate must be >= 0''' )
if principal <= 0:
raise ValueError('''principal must be > 0''' )
return compound_interest(
lowerCAmelCase__ , nominal_annual_percentage_rate / 365 , number_of_years * 365 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 695
| 0
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
_UpperCAmelCase : Dict = logging.get_logger(__name__)
_UpperCAmelCase : List[str] = {"""vocab_file""": """vocab.txt"""}
_UpperCAmelCase : List[Any] = {
"""vocab_file""": {
"""YituTech/conv-bert-base""": """https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt""",
"""YituTech/conv-bert-medium-small""": (
"""https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt"""
),
"""YituTech/conv-bert-small""": """https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt""",
}
}
_UpperCAmelCase : Dict = {
"""YituTech/conv-bert-base""": 5_12,
"""YituTech/conv-bert-medium-small""": 5_12,
"""YituTech/conv-bert-small""": 5_12,
}
_UpperCAmelCase : Optional[int] = {
"""YituTech/conv-bert-base""": {"""do_lower_case""": True},
"""YituTech/conv-bert-medium-small""": {"""do_lower_case""": True},
"""YituTech/conv-bert-small""": {"""do_lower_case""": True},
}
class lowerCAmelCase ( __UpperCamelCase ):
'''simple docstring'''
UpperCAmelCase__ = VOCAB_FILES_NAMES
UpperCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ = PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ = ConvBertTokenizer
def __init__( self : Optional[int] , UpperCAmelCase : Tuple=None , UpperCAmelCase : List[str]=None , UpperCAmelCase : Optional[Any]=True , UpperCAmelCase : List[str]="[UNK]" , UpperCAmelCase : Tuple="[SEP]" , UpperCAmelCase : Any="[PAD]" , UpperCAmelCase : str="[CLS]" , UpperCAmelCase : Optional[Any]="[MASK]" , UpperCAmelCase : List[str]=True , UpperCAmelCase : Dict=None , **UpperCAmelCase : str , ) -> List[Any]:
super().__init__(
UpperCAmelCase , tokenizer_file=UpperCAmelCase , do_lower_case=UpperCAmelCase , unk_token=UpperCAmelCase , sep_token=UpperCAmelCase , pad_token=UpperCAmelCase , cls_token=UpperCAmelCase , mask_token=UpperCAmelCase , tokenize_chinese_chars=UpperCAmelCase , strip_accents=UpperCAmelCase , **UpperCAmelCase , )
lowerCamelCase__ : Any = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , UpperCAmelCase ) != do_lower_case
or normalizer_state.get('strip_accents' , UpperCAmelCase ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , UpperCAmelCase ) != tokenize_chinese_chars
):
lowerCamelCase__ : List[str] = getattr(UpperCAmelCase , normalizer_state.pop('type' ) )
lowerCamelCase__ : List[str] = do_lower_case
lowerCamelCase__ : str = strip_accents
lowerCamelCase__ : Optional[int] = tokenize_chinese_chars
lowerCamelCase__ : Tuple = normalizer_class(**UpperCAmelCase )
lowerCamelCase__ : str = do_lower_case
def A_ ( self : Optional[Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[Any]=None ) -> str:
lowerCamelCase__ : str = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def A_ ( self : str , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
lowerCamelCase__ : Union[str, Any] = [self.sep_token_id]
lowerCamelCase__ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A_ ( self : Union[str, Any] , UpperCAmelCase : str , UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
lowerCamelCase__ : Union[str, Any] = self._tokenizer.model.save(UpperCAmelCase , name=UpperCAmelCase )
return tuple(UpperCAmelCase )
| 701
|
_UpperCAmelCase : List[Any] = {
"""meter""": """m""",
"""kilometer""": """km""",
"""megametre""": """Mm""",
"""gigametre""": """Gm""",
"""terametre""": """Tm""",
"""petametre""": """Pm""",
"""exametre""": """Em""",
"""zettametre""": """Zm""",
"""yottametre""": """Ym""",
}
# Exponent of the factor(meter)
_UpperCAmelCase : Dict = {
"""m""": 0,
"""km""": 3,
"""Mm""": 6,
"""Gm""": 9,
"""Tm""": 12,
"""Pm""": 15,
"""Em""": 18,
"""Zm""": 21,
"""Ym""": 24,
}
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> float:
lowerCamelCase__ : Dict = from_type.lower().strip('s' )
lowerCamelCase__ : Dict = to_type.lower().strip('s' )
lowerCamelCase__ : Dict = UNIT_SYMBOL.get(_UpperCAmelCase , _UpperCAmelCase )
lowerCamelCase__ : str = UNIT_SYMBOL.get(_UpperCAmelCase , _UpperCAmelCase )
if from_sanitized not in METRIC_CONVERSION:
lowerCamelCase__ : List[Any] = (
F"""Invalid 'from_type' value: {from_type!r}.\n"""
F"""Conversion abbreviations are: {", ".join(_UpperCAmelCase )}"""
)
raise ValueError(_UpperCAmelCase )
if to_sanitized not in METRIC_CONVERSION:
lowerCamelCase__ : Optional[Any] = (
F"""Invalid 'to_type' value: {to_type!r}.\n"""
F"""Conversion abbreviations are: {", ".join(_UpperCAmelCase )}"""
)
raise ValueError(_UpperCAmelCase )
lowerCamelCase__ : Any = METRIC_CONVERSION[from_sanitized]
lowerCamelCase__ : Optional[int] = METRIC_CONVERSION[to_sanitized]
lowerCamelCase__ : List[str] = 1
if from_exponent > to_exponent:
lowerCamelCase__ : Dict = from_exponent - to_exponent
else:
lowerCamelCase__ : Dict = -(to_exponent - from_exponent)
return value * pow(10 , _UpperCAmelCase )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 188
| 0
|
from math import pi
def UpperCAmelCase_ ( __UpperCAmelCase : int , __UpperCAmelCase : int ) -> float:
return 2 * pi * radius * (angle / 3_60)
if __name__ == "__main__":
print(arc_length(90, 10))
| 31
|
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
SCREAMING_SNAKE_CASE__ : Optional[Any] = {
"return_dict": False,
"output_hidden_states": True,
"output_attentions": True,
"torchscript": True,
"torch_dtype": "float16",
"use_bfloat16": True,
"tf_legacy_loss": True,
"pruned_heads": {"a": 1},
"tie_word_embeddings": False,
"is_decoder": True,
"cross_attention_hidden_size": 128,
"add_cross_attention": True,
"tie_encoder_decoder": True,
"max_length": 50,
"min_length": 3,
"do_sample": True,
"early_stopping": True,
"num_beams": 3,
"num_beam_groups": 3,
"diversity_penalty": 0.5,
"temperature": 2.0,
"top_k": 10,
"top_p": 0.7,
"typical_p": 0.2,
"repetition_penalty": 0.8,
"length_penalty": 0.8,
"no_repeat_ngram_size": 5,
"encoder_no_repeat_ngram_size": 5,
"bad_words_ids": [1, 2, 3],
"num_return_sequences": 3,
"chunk_size_feed_forward": 5,
"output_scores": True,
"return_dict_in_generate": True,
"forced_bos_token_id": 2,
"forced_eos_token_id": 3,
"remove_invalid_values": True,
"architectures": ["BertModel"],
"finetuning_task": "translation",
"id2label": {0: "label"},
"label2id": {"label": "0"},
"tokenizer_class": "BertTokenizerFast",
"prefix": "prefix",
"bos_token_id": 6,
"pad_token_id": 7,
"eos_token_id": 8,
"sep_token_id": 9,
"decoder_start_token_id": 10,
"exponential_decay_length_penalty": (5, 1.0_1),
"suppress_tokens": [0, 1],
"begin_suppress_tokens": 2,
"task_specific_params": {"translation": "some_params"},
"problem_type": "regression",
}
@is_staging_test
class lowerCAmelCase__ ( unittest.TestCase ):
@classmethod
def __A ( cls : str ) -> List[Any]:
__lowerCamelCase = TOKEN
HfFolder.save_token(SCREAMING_SNAKE_CASE__ )
@classmethod
def __A ( cls : Any ) -> Union[str, Any]:
try:
delete_repo(token=cls._token , repo_id='''test-config''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-config-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-config''' )
except HTTPError:
pass
def __A ( self : int ) -> List[str]:
__lowerCamelCase = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub('''test-config''' , use_auth_token=self._token )
__lowerCamelCase = BertConfig.from_pretrained(f'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(SCREAMING_SNAKE_CASE__ , getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
# Reset repo
delete_repo(token=self._token , repo_id='''test-config''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(SCREAMING_SNAKE_CASE__ , repo_id='''test-config''' , push_to_hub=SCREAMING_SNAKE_CASE__ , use_auth_token=self._token )
__lowerCamelCase = BertConfig.from_pretrained(f'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(SCREAMING_SNAKE_CASE__ , getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
def __A ( self : Optional[Any] ) -> Any:
__lowerCamelCase = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
config.push_to_hub('''valid_org/test-config-org''' , use_auth_token=self._token )
__lowerCamelCase = BertConfig.from_pretrained('''valid_org/test-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(SCREAMING_SNAKE_CASE__ , getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-config-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
SCREAMING_SNAKE_CASE__ , repo_id='''valid_org/test-config-org''' , push_to_hub=SCREAMING_SNAKE_CASE__ , use_auth_token=self._token )
__lowerCamelCase = BertConfig.from_pretrained('''valid_org/test-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(SCREAMING_SNAKE_CASE__ , getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
def __A ( self : Optional[Any] ) -> Dict:
CustomConfig.register_for_auto_class()
__lowerCamelCase = CustomConfig(attribute=42 )
config.push_to_hub('''test-dynamic-config''' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map , {'''AutoConfig''': '''custom_configuration.CustomConfig'''} )
__lowerCamelCase = AutoConfig.from_pretrained(f'''{USER}/test-dynamic-config''' , trust_remote_code=SCREAMING_SNAKE_CASE__ )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ , '''CustomConfig''' )
self.assertEqual(new_config.attribute , 42 )
class lowerCAmelCase__ ( unittest.TestCase ):
def __A ( self : Optional[int] ) -> Tuple:
__lowerCamelCase = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
__lowerCamelCase = c.n_embd + 1 # int
__lowerCamelCase = c.resid_pdrop + 1.0 # float
__lowerCamelCase = not c.scale_attn_weights # bool
__lowerCamelCase = c.summary_type + '''foo''' # str
c.update_from_string(
f'''n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}''' )
self.assertEqual(SCREAMING_SNAKE_CASE__ , c.n_embd , '''mismatch for key: n_embd''' )
self.assertEqual(SCREAMING_SNAKE_CASE__ , c.resid_pdrop , '''mismatch for key: resid_pdrop''' )
self.assertEqual(SCREAMING_SNAKE_CASE__ , c.scale_attn_weights , '''mismatch for key: scale_attn_weights''' )
self.assertEqual(SCREAMING_SNAKE_CASE__ , c.summary_type , '''mismatch for key: summary_type''' )
def __A ( self : str ) -> Dict:
__lowerCamelCase = PretrainedConfig()
__lowerCamelCase = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
SCREAMING_SNAKE_CASE__ , ['''is_encoder_decoder''', '''_name_or_path''', '''_commit_hash''', '''transformers_version'''] )
__lowerCamelCase = [key for key, value in config_common_kwargs.items() if value == getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )]
if len(SCREAMING_SNAKE_CASE__ ) > 0:
raise ValueError(
'''The following keys are set with the default values in'''
''' `test_configuration_common.config_common_kwargs` pick another value for them:'''
f''' {', '.join(SCREAMING_SNAKE_CASE__ )}.''' )
def __A ( self : Dict ) -> List[str]:
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
# config is in subfolder, the following should not work without specifying the subfolder
__lowerCamelCase = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert-subfolder''' )
__lowerCamelCase = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert-subfolder''' , subfolder='''bert''' )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
def __A ( self : Any ) -> Optional[Any]:
# A mock response for an HTTP head request to emulate server down
__lowerCamelCase = mock.Mock()
__lowerCamelCase = 5_00
__lowerCamelCase = {}
__lowerCamelCase = HTTPError
__lowerCamelCase = {}
# Download this model to make sure it's in the cache.
__lowerCamelCase = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('''requests.Session.request''' , return_value=SCREAMING_SNAKE_CASE__ ) as mock_head:
__lowerCamelCase = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
# This check we did call the fake head request
mock_head.assert_called()
def __A ( self : List[str] ) -> Optional[Any]:
# This test is for deprecated behavior and can be removed in v5
__lowerCamelCase = BertConfig.from_pretrained(
'''https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json''' )
def __A ( self : List[Any] ) -> List[Any]:
__lowerCamelCase = AutoConfig.from_pretrained('''bert-base-cased''' )
__lowerCamelCase = ['''config.4.0.0.json''']
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = 2
json.dump(configuration.to_dict() , open(os.path.join(SCREAMING_SNAKE_CASE__ , '''config.4.0.0.json''' ) , '''w''' ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
__lowerCamelCase = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertEqual(new_configuration.hidden_size , 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
__lowerCamelCase = ['''config.42.0.0.json''']
__lowerCamelCase = 7_68
configuration.save_pretrained(SCREAMING_SNAKE_CASE__ )
shutil.move(os.path.join(SCREAMING_SNAKE_CASE__ , '''config.4.0.0.json''' ) , os.path.join(SCREAMING_SNAKE_CASE__ , '''config.42.0.0.json''' ) )
__lowerCamelCase = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertEqual(new_configuration.hidden_size , 7_68 )
def __A ( self : Optional[Any] ) -> Tuple:
# This repo has two configuration files, one for v4.0.0 and above with a different hidden size.
__lowerCamelCase = '''hf-internal-testing/test-two-configs'''
import transformers as new_transformers
__lowerCamelCase = '''v4.0.0'''
__lowerCamelCase , __lowerCamelCase = new_transformers.models.auto.AutoConfig.from_pretrained(
SCREAMING_SNAKE_CASE__ , return_unused_kwargs=SCREAMING_SNAKE_CASE__ )
self.assertEqual(new_configuration.hidden_size , 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(SCREAMING_SNAKE_CASE__ , {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
__lowerCamelCase = '''v3.0.0'''
__lowerCamelCase = old_transformers.models.auto.AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertEqual(old_configuration.hidden_size , 7_68 )
| 298
| 0
|
from cva import destroyAllWindows, imread, imshow, waitKey
def _lowerCAmelCase ( UpperCamelCase__: Dict ) -> Optional[Any]:
"""simple docstring"""
A , A = img.shape[0], img.shape[1]
# converting each pixel's color to its negative
for i in range(_snake_case ):
for j in range(_snake_case ):
A = [2_55, 2_55, 2_55] - img[i][j]
return img
if __name__ == "__main__":
# read original image
_lowercase : Any = imread("image_data/lena.jpg", 1)
# convert to its negative
_lowercase : Optional[Any] = convert_to_negative(img)
# show result image
imshow("negative of original image", img)
waitKey(0)
destroyAllWindows()
| 711
|
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class _UpperCamelCase :
"""simple docstring"""
@property
def _UpperCAmelCase ( self ) -> Any:
return self.get_dummy_input()
@property
def _UpperCAmelCase ( self ) -> Union[str, Any]:
if self.block_type == "down":
return (4, 32, 16, 16)
elif self.block_type == "mid":
return (4, 32, 32, 32)
elif self.block_type == "up":
return (4, 32, 64, 64)
raise ValueError(f'\'{self.block_type}\' is not a supported block_type. Set it to \'up\', \'mid\', or \'down\'.' )
def _UpperCAmelCase ( self , a__=True , a__=False , a__=False , a__=False , ) -> Optional[Any]:
A = 4
A = 32
A = (32, 32)
A = torch.manual_seed(0 )
A = torch.device(a__ )
A = (batch_size, num_channels) + sizes
A = randn_tensor(a__ , generator=a__ , device=a__ )
A = {"""hidden_states""": hidden_states}
if include_temb:
A = 128
A = randn_tensor((batch_size, temb_channels) , generator=a__ , device=a__ )
if include_res_hidden_states_tuple:
A = torch.manual_seed(1 )
A = (randn_tensor(a__ , generator=a__ , device=a__ ),)
if include_encoder_hidden_states:
A = floats_tensor((batch_size, 32, 32) ).to(a__ )
if include_skip_sample:
A = randn_tensor(((batch_size, 3) + sizes) , generator=a__ , device=a__ )
return dummy_input
def _UpperCAmelCase ( self ) -> int:
A = {
"""in_channels""": 32,
"""out_channels""": 32,
"""temb_channels""": 128,
}
if self.block_type == "up":
A = 32
if self.block_type == "mid":
init_dict.pop("""out_channels""" )
A = self.dummy_input
return init_dict, inputs_dict
def _UpperCAmelCase ( self , a__ ) -> Optional[int]:
A , A = self.prepare_init_args_and_inputs_for_common()
A = self.block_class(**a__ )
unet_block.to(a__ )
unet_block.eval()
with torch.no_grad():
A = unet_block(**a__ )
if isinstance(a__ , a__ ):
A = output[0]
self.assertEqual(output.shape , self.output_shape )
A = output[0, -1, -3:, -3:]
A = torch.tensor(a__ ).to(a__ )
assert torch_all_close(output_slice.flatten() , a__ , atol=5e-3 )
@unittest.skipIf(torch_device == """mps""" , """Training is not supported in mps""" )
def _UpperCAmelCase ( self ) -> str:
A , A = self.prepare_init_args_and_inputs_for_common()
A = self.block_class(**a__ )
model.to(a__ )
model.train()
A = model(**a__ )
if isinstance(a__ , a__ ):
A = output[0]
A = torch.device(a__ )
A = randn_tensor(output.shape , device=a__ )
A = torch.nn.functional.mse_loss(a__ , a__ )
loss.backward()
| 546
| 0
|
'''simple docstring'''
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny model through reduction of a normal pre-trained model, but keeping the
# full vocab, merges file, and thus also resulting in a larger model due to a large vocab size.
# This gives ~3MB in total for all files.
#
# If you want a 50 times smaller than this see `fsmt-make-super-tiny-model.py`, which is slightly more complicated
#
#
# It will be used then as "stas/tiny-wmt19-en-de"
# Build
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
A_ = "facebook/wmt19-en-de"
A_ = FSMTTokenizer.from_pretrained(mname)
# get the correct vocab sizes, etc. from the master model
A_ = FSMTConfig.from_pretrained(mname)
config.update(
dict(
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
)
A_ = FSMTForConditionalGeneration(config)
print(f'''num of params {tiny_model.num_parameters()}''')
# Test
A_ = tokenizer(["Making tiny model"], return_tensors="pt")
A_ = tiny_model(**batch)
print("test output:", len(outputs.logits[0]))
# Save
A_ = "tiny-wmt19-en-de"
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(f'''Generated {mname_tiny}''')
# Upload
# transformers-cli upload tiny-wmt19-en-de
| 42
|
import shutil
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_tf_cross_test,
require_tf,
require_torch,
require_torchvision,
require_vision,
)
from transformers.utils import is_tf_available, is_torch_available, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, SamImageProcessor, SamProcessor
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
@require_vision
@require_torchvision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def UpperCamelCase ( self ):
A__ = tempfile.mkdtemp()
A__ = SamImageProcessor()
A__ = SamProcessor(__lowerCamelCase )
processor.save_pretrained(self.tmpdirname )
def UpperCamelCase ( self,**__lowerCamelCase ):
return AutoProcessor.from_pretrained(self.tmpdirname,**__lowerCamelCase ).image_processor
def UpperCamelCase ( self ):
shutil.rmtree(self.tmpdirname )
def UpperCamelCase ( self ):
A__ = [np.random.randint(255,size=(3, 30, 400),dtype=np.uinta )]
A__ = [Image.fromarray(np.moveaxis(__lowerCamelCase,0,-1 ) ) for x in image_inputs]
return image_inputs
def UpperCamelCase ( self ):
A__ = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
A__ = self.get_image_processor(do_normalize=__lowerCamelCase,padding_value=1.0 )
A__ = SamProcessor.from_pretrained(self.tmpdirname,do_normalize=__lowerCamelCase,padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string(),image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor,__lowerCamelCase )
def UpperCamelCase ( self ):
A__ = self.get_image_processor()
A__ = SamProcessor(image_processor=__lowerCamelCase )
A__ = self.prepare_image_inputs()
A__ = image_processor(__lowerCamelCase,return_tensors='''np''' )
A__ = processor(images=__lowerCamelCase,return_tensors='''np''' )
input_feat_extract.pop('''original_sizes''' ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop('''reshaped_input_sizes''' ) # pop original_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum(),input_processor[key].sum(),delta=1E-2 )
@require_torch
def UpperCamelCase ( self ):
A__ = self.get_image_processor()
A__ = SamProcessor(image_processor=__lowerCamelCase )
A__ = [torch.ones((1, 3, 5, 5) )]
A__ = [[1764, 2646]]
A__ = [[683, 1024]]
A__ = processor.post_process_masks(__lowerCamelCase,__lowerCamelCase,__lowerCamelCase )
self.assertEqual(masks[0].shape,(1, 3, 1764, 2646) )
A__ = processor.post_process_masks(
__lowerCamelCase,torch.tensor(__lowerCamelCase ),torch.tensor(__lowerCamelCase ) )
self.assertEqual(masks[0].shape,(1, 3, 1764, 2646) )
# should also work with np
A__ = [np.ones((1, 3, 5, 5) )]
A__ = processor.post_process_masks(__lowerCamelCase,np.array(__lowerCamelCase ),np.array(__lowerCamelCase ) )
self.assertEqual(masks[0].shape,(1, 3, 1764, 2646) )
A__ = [[1, 0], [0, 1]]
with self.assertRaises(__lowerCamelCase ):
A__ = processor.post_process_masks(__lowerCamelCase,np.array(__lowerCamelCase ),np.array(__lowerCamelCase ) )
@require_vision
@require_tf
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def UpperCamelCase ( self ):
A__ = tempfile.mkdtemp()
A__ = SamImageProcessor()
A__ = SamProcessor(__lowerCamelCase )
processor.save_pretrained(self.tmpdirname )
def UpperCamelCase ( self,**__lowerCamelCase ):
return AutoProcessor.from_pretrained(self.tmpdirname,**__lowerCamelCase ).image_processor
def UpperCamelCase ( self ):
shutil.rmtree(self.tmpdirname )
def UpperCamelCase ( self ):
A__ = [np.random.randint(255,size=(3, 30, 400),dtype=np.uinta )]
A__ = [Image.fromarray(np.moveaxis(__lowerCamelCase,0,-1 ) ) for x in image_inputs]
return image_inputs
def UpperCamelCase ( self ):
A__ = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
A__ = self.get_image_processor(do_normalize=__lowerCamelCase,padding_value=1.0 )
A__ = SamProcessor.from_pretrained(self.tmpdirname,do_normalize=__lowerCamelCase,padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string(),image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor,__lowerCamelCase )
def UpperCamelCase ( self ):
A__ = self.get_image_processor()
A__ = SamProcessor(image_processor=__lowerCamelCase )
A__ = self.prepare_image_inputs()
A__ = image_processor(__lowerCamelCase,return_tensors='''np''' )
A__ = processor(images=__lowerCamelCase,return_tensors='''np''' )
input_feat_extract.pop('''original_sizes''' ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop('''reshaped_input_sizes''' ) # pop reshaped_input_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum(),input_processor[key].sum(),delta=1E-2 )
@require_tf
def UpperCamelCase ( self ):
A__ = self.get_image_processor()
A__ = SamProcessor(image_processor=__lowerCamelCase )
A__ = [tf.ones((1, 3, 5, 5) )]
A__ = [[1764, 2646]]
A__ = [[683, 1024]]
A__ = processor.post_process_masks(__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,return_tensors='''tf''' )
self.assertEqual(masks[0].shape,(1, 3, 1764, 2646) )
A__ = processor.post_process_masks(
__lowerCamelCase,tf.convert_to_tensor(__lowerCamelCase ),tf.convert_to_tensor(__lowerCamelCase ),return_tensors='''tf''',)
self.assertEqual(masks[0].shape,(1, 3, 1764, 2646) )
# should also work with np
A__ = [np.ones((1, 3, 5, 5) )]
A__ = processor.post_process_masks(
__lowerCamelCase,np.array(__lowerCamelCase ),np.array(__lowerCamelCase ),return_tensors='''tf''' )
self.assertEqual(masks[0].shape,(1, 3, 1764, 2646) )
A__ = [[1, 0], [0, 1]]
with self.assertRaises(tf.errors.InvalidArgumentError ):
A__ = processor.post_process_masks(
__lowerCamelCase,np.array(__lowerCamelCase ),np.array(__lowerCamelCase ),return_tensors='''tf''' )
@require_vision
@require_torchvision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def UpperCamelCase ( self ):
A__ = tempfile.mkdtemp()
A__ = SamImageProcessor()
A__ = SamProcessor(__lowerCamelCase )
processor.save_pretrained(self.tmpdirname )
def UpperCamelCase ( self,**__lowerCamelCase ):
return AutoProcessor.from_pretrained(self.tmpdirname,**__lowerCamelCase ).image_processor
def UpperCamelCase ( self ):
shutil.rmtree(self.tmpdirname )
def UpperCamelCase ( self ):
A__ = [np.random.randint(255,size=(3, 30, 400),dtype=np.uinta )]
A__ = [Image.fromarray(np.moveaxis(__lowerCamelCase,0,-1 ) ) for x in image_inputs]
return image_inputs
@is_pt_tf_cross_test
def UpperCamelCase ( self ):
A__ = self.get_image_processor()
A__ = SamProcessor(image_processor=__lowerCamelCase )
A__ = np.random.randint(0,2,size=(1, 3, 5, 5) ).astype(np.floataa )
A__ = [tf.convert_to_tensor(__lowerCamelCase )]
A__ = [torch.tensor(__lowerCamelCase )]
A__ = [[1764, 2646]]
A__ = [[683, 1024]]
A__ = processor.post_process_masks(
__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,return_tensors='''tf''' )
A__ = processor.post_process_masks(
__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,return_tensors='''pt''' )
self.assertTrue(np.all(tf_masks[0].numpy() == pt_masks[0].numpy() ) )
@is_pt_tf_cross_test
def UpperCamelCase ( self ):
A__ = self.get_image_processor()
A__ = SamProcessor(image_processor=__lowerCamelCase )
A__ = self.prepare_image_inputs()
A__ = image_processor(__lowerCamelCase,return_tensors='''pt''' )['''pixel_values'''].numpy()
A__ = processor(images=__lowerCamelCase,return_tensors='''pt''' )['''pixel_values'''].numpy()
A__ = image_processor(__lowerCamelCase,return_tensors='''tf''' )['''pixel_values'''].numpy()
A__ = processor(images=__lowerCamelCase,return_tensors='''tf''' )['''pixel_values'''].numpy()
self.assertTrue(np.allclose(__lowerCamelCase,__lowerCamelCase ) )
self.assertTrue(np.allclose(__lowerCamelCase,__lowerCamelCase ) )
self.assertTrue(np.allclose(__lowerCamelCase,__lowerCamelCase ) )
| 190
| 0
|
"""simple docstring"""
import re
def a_ ( lowerCamelCase ):
if len(re.findall('[ATCG]' , lowerCamelCase ) ) != len(lowerCamelCase ):
raise ValueError('Invalid Strand' )
return dna.translate(dna.maketrans('ATCG' , 'TAGC' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 632
|
"""simple docstring"""
import os
import sys
from contextlib import contextmanager
# Windows only
if os.name == "nt":
import ctypes
import msvcrt # noqa
class snake_case ( ctypes.Structure ):
"""simple docstring"""
snake_case__ = [("size", ctypes.c_int), ("visible", ctypes.c_byte)]
def a_ ( ):
if os.name == "nt":
UpperCAmelCase__ = CursorInfo()
UpperCAmelCase__ = ctypes.windll.kernelaa.GetStdHandle(-1_1 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(lowerCamelCase , ctypes.byref(lowerCamelCase ) )
UpperCAmelCase__ = False
ctypes.windll.kernelaa.SetConsoleCursorInfo(lowerCamelCase , ctypes.byref(lowerCamelCase ) )
elif os.name == "posix":
sys.stdout.write('\033[?25l' )
sys.stdout.flush()
def a_ ( ):
if os.name == "nt":
UpperCAmelCase__ = CursorInfo()
UpperCAmelCase__ = ctypes.windll.kernelaa.GetStdHandle(-1_1 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(lowerCamelCase , ctypes.byref(lowerCamelCase ) )
UpperCAmelCase__ = True
ctypes.windll.kernelaa.SetConsoleCursorInfo(lowerCamelCase , ctypes.byref(lowerCamelCase ) )
elif os.name == "posix":
sys.stdout.write('\033[?25h' )
sys.stdout.flush()
@contextmanager
def a_ ( ):
try:
hide_cursor()
yield
finally:
show_cursor()
| 632
| 1
|
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class lowerCAmelCase_ ( metaclass=snake_case_ ):
'''simple docstring'''
_snake_case = ['''flax''']
def __init__( self , *snake_case_ , **snake_case_ ) -> List[Any]:
requires_backends(self , ["""flax"""] )
@classmethod
def A__ ( cls , *snake_case_ , **snake_case_ ) -> str:
requires_backends(cls , ["""flax"""] )
@classmethod
def A__ ( cls , *snake_case_ , **snake_case_ ) -> str:
requires_backends(cls , ["""flax"""] )
class lowerCAmelCase_ ( metaclass=snake_case_ ):
'''simple docstring'''
_snake_case = ['''flax''']
def __init__( self , *snake_case_ , **snake_case_ ) -> int:
requires_backends(self , ["""flax"""] )
@classmethod
def A__ ( cls , *snake_case_ , **snake_case_ ) -> List[str]:
requires_backends(cls , ["""flax"""] )
@classmethod
def A__ ( cls , *snake_case_ , **snake_case_ ) -> List[str]:
requires_backends(cls , ["""flax"""] )
class lowerCAmelCase_ ( metaclass=snake_case_ ):
'''simple docstring'''
_snake_case = ['''flax''']
def __init__( self , *snake_case_ , **snake_case_ ) -> List[str]:
requires_backends(self , ["""flax"""] )
@classmethod
def A__ ( cls , *snake_case_ , **snake_case_ ) -> Optional[int]:
requires_backends(cls , ["""flax"""] )
@classmethod
def A__ ( cls , *snake_case_ , **snake_case_ ) -> Dict:
requires_backends(cls , ["""flax"""] )
class lowerCAmelCase_ ( metaclass=snake_case_ ):
'''simple docstring'''
_snake_case = ['''flax''']
def __init__( self , *snake_case_ , **snake_case_ ) -> Optional[int]:
requires_backends(self , ["""flax"""] )
@classmethod
def A__ ( cls , *snake_case_ , **snake_case_ ) -> int:
requires_backends(cls , ["""flax"""] )
@classmethod
def A__ ( cls , *snake_case_ , **snake_case_ ) -> Optional[Any]:
requires_backends(cls , ["""flax"""] )
class lowerCAmelCase_ ( metaclass=snake_case_ ):
'''simple docstring'''
_snake_case = ['''flax''']
def __init__( self , *snake_case_ , **snake_case_ ) -> Dict:
requires_backends(self , ["""flax"""] )
@classmethod
def A__ ( cls , *snake_case_ , **snake_case_ ) -> List[str]:
requires_backends(cls , ["""flax"""] )
@classmethod
def A__ ( cls , *snake_case_ , **snake_case_ ) -> List[str]:
requires_backends(cls , ["""flax"""] )
class lowerCAmelCase_ ( metaclass=snake_case_ ):
'''simple docstring'''
_snake_case = ['''flax''']
def __init__( self , *snake_case_ , **snake_case_ ) -> str:
requires_backends(self , ["""flax"""] )
@classmethod
def A__ ( cls , *snake_case_ , **snake_case_ ) -> Union[str, Any]:
requires_backends(cls , ["""flax"""] )
@classmethod
def A__ ( cls , *snake_case_ , **snake_case_ ) -> str:
requires_backends(cls , ["""flax"""] )
class lowerCAmelCase_ ( metaclass=snake_case_ ):
'''simple docstring'''
_snake_case = ['''flax''']
def __init__( self , *snake_case_ , **snake_case_ ) -> Any:
requires_backends(self , ["""flax"""] )
@classmethod
def A__ ( cls , *snake_case_ , **snake_case_ ) -> Any:
requires_backends(cls , ["""flax"""] )
@classmethod
def A__ ( cls , *snake_case_ , **snake_case_ ) -> Tuple:
requires_backends(cls , ["""flax"""] )
class lowerCAmelCase_ ( metaclass=snake_case_ ):
'''simple docstring'''
_snake_case = ['''flax''']
def __init__( self , *snake_case_ , **snake_case_ ) -> str:
requires_backends(self , ["""flax"""] )
@classmethod
def A__ ( cls , *snake_case_ , **snake_case_ ) -> Any:
requires_backends(cls , ["""flax"""] )
@classmethod
def A__ ( cls , *snake_case_ , **snake_case_ ) -> List[str]:
requires_backends(cls , ["""flax"""] )
class lowerCAmelCase_ ( metaclass=snake_case_ ):
'''simple docstring'''
_snake_case = ['''flax''']
def __init__( self , *snake_case_ , **snake_case_ ) -> Any:
requires_backends(self , ["""flax"""] )
@classmethod
def A__ ( cls , *snake_case_ , **snake_case_ ) -> List[str]:
requires_backends(cls , ["""flax"""] )
@classmethod
def A__ ( cls , *snake_case_ , **snake_case_ ) -> Tuple:
requires_backends(cls , ["""flax"""] )
class lowerCAmelCase_ ( metaclass=snake_case_ ):
'''simple docstring'''
_snake_case = ['''flax''']
def __init__( self , *snake_case_ , **snake_case_ ) -> List[Any]:
requires_backends(self , ["""flax"""] )
@classmethod
def A__ ( cls , *snake_case_ , **snake_case_ ) -> str:
requires_backends(cls , ["""flax"""] )
@classmethod
def A__ ( cls , *snake_case_ , **snake_case_ ) -> Optional[Any]:
requires_backends(cls , ["""flax"""] )
class lowerCAmelCase_ ( metaclass=snake_case_ ):
'''simple docstring'''
_snake_case = ['''flax''']
def __init__( self , *snake_case_ , **snake_case_ ) -> Optional[Any]:
requires_backends(self , ["""flax"""] )
@classmethod
def A__ ( cls , *snake_case_ , **snake_case_ ) -> Union[str, Any]:
requires_backends(cls , ["""flax"""] )
@classmethod
def A__ ( cls , *snake_case_ , **snake_case_ ) -> str:
requires_backends(cls , ["""flax"""] )
class lowerCAmelCase_ ( metaclass=snake_case_ ):
'''simple docstring'''
_snake_case = ['''flax''']
def __init__( self , *snake_case_ , **snake_case_ ) -> Dict:
requires_backends(self , ["""flax"""] )
@classmethod
def A__ ( cls , *snake_case_ , **snake_case_ ) -> Union[str, Any]:
requires_backends(cls , ["""flax"""] )
@classmethod
def A__ ( cls , *snake_case_ , **snake_case_ ) -> Dict:
requires_backends(cls , ["""flax"""] )
class lowerCAmelCase_ ( metaclass=snake_case_ ):
'''simple docstring'''
_snake_case = ['''flax''']
def __init__( self , *snake_case_ , **snake_case_ ) -> str:
requires_backends(self , ["""flax"""] )
@classmethod
def A__ ( cls , *snake_case_ , **snake_case_ ) -> Any:
requires_backends(cls , ["""flax"""] )
@classmethod
def A__ ( cls , *snake_case_ , **snake_case_ ) -> Any:
requires_backends(cls , ["""flax"""] )
| 465
|
'''simple docstring'''
def lowerCamelCase ( __lowerCamelCase : int ) ->int:
assert (
isinstance(__lowerCamelCase , __lowerCamelCase ) and number_of_steps > 0
), F'number_of_steps needs to be positive integer, your input {number_of_steps}'
if number_of_steps == 1:
return 1
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 1, 1
for _ in range(number_of_steps - 1 ):
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = current + previous, current
return current
if __name__ == "__main__":
import doctest
doctest.testmod()
| 314
| 0
|
import math
class A :
'''simple docstring'''
def a_ ( self : str , __lowerCAmelCase : list[list[float]] , __lowerCAmelCase : list[int] ) -> int:
"""simple docstring"""
A__ = 0.0
A__ = 0.0
for i in range(len(__lowerCAmelCase ) ):
da += math.pow((sample[i] - weights[0][i]) , 2 )
da += math.pow((sample[i] - weights[1][i]) , 2 )
return 0 if da > da else 1
return 0
def a_ ( self : List[str] , __lowerCAmelCase : list[list[int | float]] , __lowerCAmelCase : list[int] , __lowerCAmelCase : int , __lowerCAmelCase : float ) -> list[list[int | float]]:
"""simple docstring"""
for i in range(len(__lowerCAmelCase ) ):
weights[j][i] += alpha * (sample[i] - weights[j][i])
return weights
def __lowerCamelCase ( ) -> None:
"""simple docstring"""
A__ = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]]
# weight initialization ( n, C )
A__ = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]]
# training
A__ = SelfOrganizingMap()
A__ = 3
A__ = 0.5
for _ in range(__a ):
for j in range(len(__a ) ):
# training sample
A__ = training_samples[j]
# Compute the winning vector
A__ = self_organizing_map.get_winner(__a , __a )
# Update the winning vector
A__ = self_organizing_map.update(__a , __a , __a , __a )
# classify test sample
A__ = [0, 0, 0, 1]
A__ = self_organizing_map.get_winner(__a , __a )
# results
print(F'Clusters that the test sample belongs to : {winner}' )
print(F'Weights that have been trained : {weights}' )
# running the main() function
if __name__ == "__main__":
main()
| 247
|
def __lowerCamelCase ( __a :int ) -> list[int]:
"""simple docstring"""
if num <= 0:
raise ValueError("""Input must be a positive integer""" )
A__ = [True] * (num + 1)
A__ = 2
while p * p <= num:
if primes[p]:
for i in range(p * p , num + 1 , __a ):
A__ = False
p += 1
return [prime for prime in range(2 , num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
A : Any = int(input('''Enter a positive integer: ''').strip())
print(prime_sieve_eratosthenes(user_num))
| 247
| 1
|
class lowercase__:
"""simple docstring"""
def __init__( self : Tuple , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Optional[Any]:
lowercase_ = name
lowercase_ = val
def __str__( self : Union[str, Any] ) -> Optional[int]:
return f'''{self.__class__.__name__}({self.name}, {self.val})'''
def __lt__( self : int , SCREAMING_SNAKE_CASE_ : int ) -> Optional[int]:
return self.val < other.val
class lowercase__:
"""simple docstring"""
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Dict ) -> Optional[int]:
lowercase_ = {}
lowercase_ = {}
lowercase_ = self.build_heap(SCREAMING_SNAKE_CASE__ )
def __getitem__( self : Dict , SCREAMING_SNAKE_CASE_ : int ) -> int:
return self.get_value(SCREAMING_SNAKE_CASE__ )
def _lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE_ : Any ) -> List[str]:
return (idx - 1) // 2
def _lowercase ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[Any] ) -> Any:
return idx * 2 + 1
def _lowercase ( self : List[str] , SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> Optional[int]:
return idx * 2 + 2
def _lowercase ( self : str , SCREAMING_SNAKE_CASE_ : Dict ) -> int:
return self.heap_dict[key]
def _lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE_ : Tuple ) -> Union[str, Any]:
lowercase_ = len(SCREAMING_SNAKE_CASE__ ) - 1
lowercase_ = self.get_parent_idx(SCREAMING_SNAKE_CASE__ )
for idx, i in enumerate(SCREAMING_SNAKE_CASE__ ):
lowercase_ = idx
lowercase_ = i.val
for i in range(SCREAMING_SNAKE_CASE__ , -1 , -1 ):
self.sift_down(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return array
def _lowercase ( self : List[str] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Union[str, Any]:
while True:
lowercase_ = self.get_left_child_idx(SCREAMING_SNAKE_CASE__ ) # noqa: E741
lowercase_ = self.get_right_child_idx(SCREAMING_SNAKE_CASE__ )
lowercase_ = idx
if l < len(SCREAMING_SNAKE_CASE__ ) and array[l] < array[idx]:
lowercase_ = l
if r < len(SCREAMING_SNAKE_CASE__ ) and array[r] < array[smallest]:
lowercase_ = r
if smallest != idx:
lowercase_ = array[smallest], array[idx]
(
lowercase_
) = (
self.idx_of_element[array[smallest]],
self.idx_of_element[array[idx]],
)
lowercase_ = smallest
else:
break
def _lowercase ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Any ) -> Optional[int]:
lowercase_ = self.get_parent_idx(SCREAMING_SNAKE_CASE__ )
while p >= 0 and self.heap[p] > self.heap[idx]:
lowercase_ = self.heap[idx], self.heap[p]
lowercase_ = (
self.idx_of_element[self.heap[idx]],
self.idx_of_element[self.heap[p]],
)
lowercase_ = p
lowercase_ = self.get_parent_idx(SCREAMING_SNAKE_CASE__ )
def _lowercase ( self : Union[str, Any] ) -> Optional[Any]:
return self.heap[0]
def _lowercase ( self : Dict ) -> Any:
lowercase_ = self.heap[-1], self.heap[0]
lowercase_ = (
self.idx_of_element[self.heap[-1]],
self.idx_of_element[self.heap[0]],
)
lowercase_ = self.heap.pop()
del self.idx_of_element[x]
self.sift_down(0 , self.heap )
return x
def _lowercase ( self : int , SCREAMING_SNAKE_CASE_ : Dict ) -> Optional[int]:
self.heap.append(SCREAMING_SNAKE_CASE__ )
lowercase_ = len(self.heap ) - 1
lowercase_ = node.val
self.sift_up(len(self.heap ) - 1 )
def _lowercase ( self : Union[str, Any] ) -> str:
return len(self.heap ) == 0
def _lowercase ( self : Any , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> str:
assert (
self.heap[self.idx_of_element[node]].val > new_value
), "newValue must be less that current value"
lowercase_ = new_value
lowercase_ = new_value
self.sift_up(self.idx_of_element[node] )
__a = Node('R', -1)
__a = Node('B', 6)
__a = Node('A', 3)
__a = Node('X', 1)
__a = Node('E', 4)
# Use one of these two ways to generate Min-Heap
# Generating Min-Heap from array
__a = MinHeap([r, b, a, x, e])
# Generating Min-Heap by Insert method
# myMinHeap.insert(a)
# myMinHeap.insert(b)
# myMinHeap.insert(x)
# myMinHeap.insert(r)
# myMinHeap.insert(e)
# Before
print('Min Heap - before decrease key')
for i in my_min_heap.heap:
print(i)
print('Min Heap - After decrease key of node [B -> -17]')
my_min_heap.decrease_key(b, -1_7)
# After
for i in my_min_heap.heap:
print(i)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 97
|
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class __SCREAMING_SNAKE_CASE :
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=14 , SCREAMING_SNAKE_CASE__=7 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=99 , SCREAMING_SNAKE_CASE__=32 , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=37 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=512 , SCREAMING_SNAKE_CASE__=0.02 , ):
lowercase : Any = parent
lowercase : Any = batch_size
lowercase : Union[str, Any] = seq_length
lowercase : Dict = is_training
lowercase : List[Any] = use_input_mask
lowercase : Optional[int] = use_token_type_ids
lowercase : Union[str, Any] = use_labels
lowercase : Any = vocab_size
lowercase : List[Any] = hidden_size
lowercase : str = rotary_dim
lowercase : Tuple = num_hidden_layers
lowercase : Dict = num_attention_heads
lowercase : Optional[int] = intermediate_size
lowercase : Optional[int] = hidden_act
lowercase : List[str] = hidden_dropout_prob
lowercase : Any = attention_probs_dropout_prob
lowercase : Union[str, Any] = max_position_embeddings
lowercase : List[Any] = initializer_range
lowercase : str = None
lowercase : Dict = vocab_size - 1
lowercase : List[Any] = vocab_size - 1
lowercase : Optional[Any] = vocab_size - 1
def __lowerCamelCase ( self ):
lowercase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase : List[Any] = None
if self.use_input_mask:
lowercase : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
lowercase : Union[str, Any] = GPTJConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=SCREAMING_SNAKE_CASE__ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , )
return (config, input_ids, input_mask)
def __lowerCamelCase ( self ):
lowercase : str = self.prepare_config_and_inputs()
lowercase , lowercase , lowercase : int = config_and_inputs
lowercase : List[str] = {'''input_ids''': input_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : Union[str, Any] = 20
lowercase : Optional[int] = model_class_name(SCREAMING_SNAKE_CASE__ )
lowercase : int = model.init_cache(input_ids.shape[0] , SCREAMING_SNAKE_CASE__ )
lowercase : Union[str, Any] = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype='''i4''' )
lowercase : Union[str, Any] = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
lowercase : str = model(
input_ids[:, :-1] , attention_mask=SCREAMING_SNAKE_CASE__ , past_key_values=SCREAMING_SNAKE_CASE__ , position_ids=SCREAMING_SNAKE_CASE__ , )
lowercase : Dict = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='''i4''' )
lowercase : int = model(
input_ids[:, -1:] , attention_mask=SCREAMING_SNAKE_CASE__ , past_key_values=outputs_cache.past_key_values , position_ids=SCREAMING_SNAKE_CASE__ , )
lowercase : Tuple = model(SCREAMING_SNAKE_CASE__ )
lowercase : Optional[Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f"""Max diff is {diff}""" )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : str = 20
lowercase : List[Any] = model_class_name(SCREAMING_SNAKE_CASE__ )
lowercase : Any = jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , )
lowercase : Optional[Any] = model.init_cache(input_ids.shape[0] , SCREAMING_SNAKE_CASE__ )
lowercase : int = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
lowercase : List[Any] = model(
input_ids[:, :-1] , attention_mask=SCREAMING_SNAKE_CASE__ , past_key_values=SCREAMING_SNAKE_CASE__ , position_ids=SCREAMING_SNAKE_CASE__ , )
lowercase : Optional[int] = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='''i4''' )
lowercase : str = model(
input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=SCREAMING_SNAKE_CASE__ , position_ids=SCREAMING_SNAKE_CASE__ , )
lowercase : Union[str, Any] = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ )
lowercase : List[str] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f"""Max diff is {diff}""" )
@require_flax
class __SCREAMING_SNAKE_CASE ( A__ , A__ , unittest.TestCase ):
A : Optional[Any] = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
A : Union[str, Any] = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def __lowerCamelCase ( self ):
lowercase : List[Any] = FlaxGPTJModelTester(self )
def __lowerCamelCase ( self ):
for model_class_name in self.all_model_classes:
lowercase , lowercase , lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self ):
for model_class_name in self.all_model_classes:
lowercase , lowercase , lowercase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@tooslow
def __lowerCamelCase ( self ):
lowercase : str = GPTaTokenizer.from_pretrained('''gpt2''' , pad_token='''<|endoftext|>''' , padding_side='''left''' )
lowercase : int = tokenizer(['''Hello this is a long string''', '''Hey'''] , return_tensors='''np''' , padding=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ )
lowercase : List[Any] = FlaxGPTJForCausalLM.from_pretrained('''EleutherAI/gpt-j-6B''' )
lowercase : Tuple = False
lowercase : List[Any] = model.config.eos_token_id
lowercase : Optional[int] = jax.jit(model.generate )
lowercase : Optional[Any] = jit_generate(
inputs['''input_ids'''] , attention_mask=inputs['''attention_mask'''] , pad_token_id=tokenizer.pad_token_id ).sequences
lowercase : Union[str, Any] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE__ , skip_special_tokens=SCREAMING_SNAKE_CASE__ )
lowercase : Dict = [
'''Hello this is a long string of text.\n\nI\'m trying to get the text of the''',
'''Hey, I\'m a little late to the party. I\'m going to''',
]
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@is_pt_flax_cross_test
def __lowerCamelCase ( self ):
lowercase , lowercase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
lowercase : Union[str, Any] = self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase : Optional[int] = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
lowercase : List[Any] = model_class.__name__[4:] # Skip the "Flax" at the beginning
lowercase : List[str] = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase , lowercase : Union[str, Any] = pt_inputs['''input_ids'''].shape
lowercase : int = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(SCREAMING_SNAKE_CASE__ ):
lowercase : int = 0
lowercase : List[str] = 1
lowercase : int = 0
lowercase : Optional[Any] = 1
lowercase : Dict = pt_model_class(SCREAMING_SNAKE_CASE__ ).eval()
lowercase : int = model_class(SCREAMING_SNAKE_CASE__ , dtype=jnp.floataa )
lowercase : List[str] = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , SCREAMING_SNAKE_CASE__ )
lowercase : Union[str, Any] = fx_state
with torch.no_grad():
lowercase : Optional[int] = pt_model(**SCREAMING_SNAKE_CASE__ ).to_tuple()
lowercase : Union[str, Any] = fx_model(**SCREAMING_SNAKE_CASE__ ).to_tuple()
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , len(SCREAMING_SNAKE_CASE__ ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(SCREAMING_SNAKE_CASE__ )
lowercase : List[str] = model_class.from_pretrained(SCREAMING_SNAKE_CASE__ , from_pt=SCREAMING_SNAKE_CASE__ )
lowercase : Union[str, Any] = fx_model_loaded(**SCREAMING_SNAKE_CASE__ ).to_tuple()
self.assertEqual(
len(SCREAMING_SNAKE_CASE__ ) , len(SCREAMING_SNAKE_CASE__ ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output_loaded, pt_output in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
@is_pt_flax_cross_test
def __lowerCamelCase ( self ):
lowercase , lowercase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
lowercase : Optional[Any] = self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase : Optional[Any] = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
lowercase : Any = model_class.__name__[4:] # Skip the "Flax" at the beginning
lowercase : Any = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase : Dict = pt_model_class(SCREAMING_SNAKE_CASE__ ).eval()
lowercase : Tuple = model_class(SCREAMING_SNAKE_CASE__ , dtype=jnp.floataa )
lowercase : List[Any] = load_flax_weights_in_pytorch_model(SCREAMING_SNAKE_CASE__ , fx_model.params )
lowercase , lowercase : Optional[int] = pt_inputs['''input_ids'''].shape
lowercase : str = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(SCREAMING_SNAKE_CASE__ ):
lowercase : Dict = 0
lowercase : List[Any] = 1
lowercase : str = 0
lowercase : Tuple = 1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
lowercase : List[Any] = pt_model(**SCREAMING_SNAKE_CASE__ ).to_tuple()
lowercase : Optional[Any] = fx_model(**SCREAMING_SNAKE_CASE__ ).to_tuple()
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , len(SCREAMING_SNAKE_CASE__ ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(SCREAMING_SNAKE_CASE__ )
lowercase : int = pt_model_class.from_pretrained(SCREAMING_SNAKE_CASE__ , from_flax=SCREAMING_SNAKE_CASE__ )
with torch.no_grad():
lowercase : Dict = pt_model_loaded(**SCREAMING_SNAKE_CASE__ ).to_tuple()
self.assertEqual(
len(SCREAMING_SNAKE_CASE__ ) , len(SCREAMING_SNAKE_CASE__ ) , '''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
@tooslow
def __lowerCamelCase ( self ):
for model_class_name in self.all_model_classes:
lowercase : Dict = model_class_name.from_pretrained('''EleutherAI/gpt-j-6B''' )
lowercase : str = model(np.ones((1, 1) ) )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
| 319
| 0
|
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
lowerCamelCase_ : Dict = False
class _lowerCamelCase (unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class _lowerCamelCase (unittest.TestCase ):
def __lowerCamelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self ):
__snake_case = VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa )
pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
__snake_case = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
__snake_case = torch.manual_seed(0 )
__snake_case = pipe.dual_guided(
prompt='first prompt' , image=SCREAMING_SNAKE_CASE_ , text_to_image_strength=0.7_5 , generator=SCREAMING_SNAKE_CASE_ , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(SCREAMING_SNAKE_CASE_ )
__snake_case = VersatileDiffusionPipeline.from_pretrained(SCREAMING_SNAKE_CASE_ , torch_dtype=torch.floataa )
pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
__snake_case = generator.manual_seed(0 )
__snake_case = pipe.dual_guided(
prompt='first prompt' , image=SCREAMING_SNAKE_CASE_ , text_to_image_strength=0.7_5 , generator=SCREAMING_SNAKE_CASE_ , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def __lowerCamelCase ( self ):
__snake_case = VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa )
pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
__snake_case = 'cyberpunk 2077'
__snake_case = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
__snake_case = torch.manual_seed(0 )
__snake_case = pipe.dual_guided(
prompt=SCREAMING_SNAKE_CASE_ , image=SCREAMING_SNAKE_CASE_ , text_to_image_strength=0.7_5 , generator=SCREAMING_SNAKE_CASE_ , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy' , ).images
__snake_case = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
__snake_case = np.array([0.1_4_4_8, 0.1_6_1_9, 0.1_7_4_1, 0.1_0_8_6, 0.1_1_4_7, 0.1_1_2_8, 0.1_1_9_9, 0.1_1_6_5, 0.1_0_0_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
__snake_case = 'A painting of a squirrel eating a burger '
__snake_case = torch.manual_seed(0 )
__snake_case = pipe.text_to_image(
prompt=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy' ).images
__snake_case = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
__snake_case = np.array([0.3_3_6_7, 0.3_1_6_9, 0.2_6_5_6, 0.3_8_7_0, 0.4_7_9_0, 0.3_7_9_6, 0.4_0_0_9, 0.4_8_7_8, 0.4_7_7_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
__snake_case = pipe.image_variation(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , output_type='numpy' ).images
__snake_case = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
__snake_case = np.array([0.3_0_7_6, 0.3_1_2_3, 0.3_2_8_4, 0.3_7_8_2, 0.3_7_7_0, 0.3_8_9_4, 0.4_2_9_7, 0.4_3_3_1, 0.4_4_5_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
| 345
|
import inspect
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel, VQModel
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class _lowerCamelCase (lowerCamelCase ):
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
super().__init__()
self.register_modules(vqvae=SCREAMING_SNAKE_CASE_ , unet=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ )
@torch.no_grad()
def __call__( self , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 0.0 , SCREAMING_SNAKE_CASE_ = 50 , SCREAMING_SNAKE_CASE_ = "pil" , SCREAMING_SNAKE_CASE_ = True , **SCREAMING_SNAKE_CASE_ , ):
__snake_case = randn_tensor(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=SCREAMING_SNAKE_CASE_ , )
__snake_case = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
__snake_case = latents * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ )
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
__snake_case = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
__snake_case = {}
if accepts_eta:
__snake_case = eta
for t in self.progress_bar(self.scheduler.timesteps ):
__snake_case = self.scheduler.scale_model_input(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# predict the noise residual
__snake_case = self.unet(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).sample
# compute the previous noisy sample x_t -> x_t-1
__snake_case = self.scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ).prev_sample
# decode the image latents with the VAE
__snake_case = self.vqvae.decode(SCREAMING_SNAKE_CASE_ ).sample
__snake_case = (image / 2 + 0.5).clamp(0 , 1 )
__snake_case = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__snake_case = self.numpy_to_pil(SCREAMING_SNAKE_CASE_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=SCREAMING_SNAKE_CASE_ )
| 345
| 1
|
'''simple docstring'''
def snake_case_ (UpperCamelCase : int ):
'''simple docstring'''
if not isinstance(UpperCamelCase , UpperCamelCase ):
raise TypeError('''only integers accepted as input''' )
else:
_a = str(abs(UpperCamelCase ) )
_a = [list(UpperCamelCase ) for char in range(len(UpperCamelCase ) )]
for index in range(len(UpperCamelCase ) ):
num_transpositions[index].pop(UpperCamelCase )
return max(
int(''''''.join(list(UpperCamelCase ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__('doctest').testmod()
| 22
|
'''simple docstring'''
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class a__ :
@property
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
return self.get_dummy_input()
@property
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
if self.block_type == "down":
return (4, 32, 16, 16)
elif self.block_type == "mid":
return (4, 32, 32, 32)
elif self.block_type == "up":
return (4, 32, 64, 64)
raise ValueError(f"""'{self.block_type}' is not a supported block_type. Set it to 'up', 'mid', or 'down'.""" )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , a : List[Any]=True , a : Dict=False , a : Optional[Any]=False , a : List[Any]=False , ):
"""simple docstring"""
__lowerCamelCase = 4
__lowerCamelCase = 32
__lowerCamelCase = (32, 32)
__lowerCamelCase = torch.manual_seed(0 )
__lowerCamelCase = torch.device(a )
__lowerCamelCase = (batch_size, num_channels) + sizes
__lowerCamelCase = randn_tensor(a , generator=a , device=a )
__lowerCamelCase = {'''hidden_states''': hidden_states}
if include_temb:
__lowerCamelCase = 1_28
__lowerCamelCase = randn_tensor((batch_size, temb_channels) , generator=a , device=a )
if include_res_hidden_states_tuple:
__lowerCamelCase = torch.manual_seed(1 )
__lowerCamelCase = (randn_tensor(a , generator=a , device=a ),)
if include_encoder_hidden_states:
__lowerCamelCase = floats_tensor((batch_size, 32, 32) ).to(a )
if include_skip_sample:
__lowerCamelCase = randn_tensor(((batch_size, 3) + sizes) , generator=a , device=a )
return dummy_input
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
__lowerCamelCase = {
'''in_channels''': 32,
'''out_channels''': 32,
'''temb_channels''': 1_28,
}
if self.block_type == "up":
__lowerCamelCase = 32
if self.block_type == "mid":
init_dict.pop('''out_channels''' )
__lowerCamelCase = self.dummy_input
return init_dict, inputs_dict
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , a : List[str] ):
"""simple docstring"""
__lowerCamelCase , __lowerCamelCase = self.prepare_init_args_and_inputs_for_common()
__lowerCamelCase = self.block_class(**a )
unet_block.to(a )
unet_block.eval()
with torch.no_grad():
__lowerCamelCase = unet_block(**a )
if isinstance(a , a ):
__lowerCamelCase = output[0]
self.assertEqual(output.shape , self.output_shape )
__lowerCamelCase = output[0, -1, -3:, -3:]
__lowerCamelCase = torch.tensor(a ).to(a )
assert torch_all_close(output_slice.flatten() , a , atol=5e-3 )
@unittest.skipIf(torch_device == '''mps''' , '''Training is not supported in mps''' )
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
__lowerCamelCase , __lowerCamelCase = self.prepare_init_args_and_inputs_for_common()
__lowerCamelCase = self.block_class(**a )
model.to(a )
model.train()
__lowerCamelCase = model(**a )
if isinstance(a , a ):
__lowerCamelCase = output[0]
__lowerCamelCase = torch.device(a )
__lowerCamelCase = randn_tensor(output.shape , device=a )
__lowerCamelCase = torch.nn.functional.mse_loss(a , a )
loss.backward()
| 546
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowercase__ : Union[str, Any] = {
"configuration_resnet": ["RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "ResNetConfig", "ResNetOnnxConfig"]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Any = [
"RESNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"ResNetForImageClassification",
"ResNetModel",
"ResNetPreTrainedModel",
"ResNetBackbone",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : int = [
"TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFResNetForImageClassification",
"TFResNetModel",
"TFResNetPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Any = [
"FlaxResNetForImageClassification",
"FlaxResNetModel",
"FlaxResNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_resnet import (
RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
ResNetBackbone,
ResNetForImageClassification,
ResNetModel,
ResNetPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_resnet import (
TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFResNetForImageClassification,
TFResNetModel,
TFResNetPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel
else:
import sys
lowercase__ : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 451
|
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def A_ ( snake_case : int ) -> int:
'''simple docstring'''
def is_in_circle(snake_case : float , snake_case : float ) -> bool:
__UpperCamelCase = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
__UpperCamelCase = mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(snake_case ) )
# The ratio of the area for circle to square is pi/4.
__UpperCamelCase = proportion * 4
print(f"The estimated value of pi is {pi_estimate}" )
print(f"The numpy value of pi is {pi}" )
print(f"The total error is {abs(pi - pi_estimate )}" )
def A_ ( snake_case : int , snake_case : Callable[[float], float] , snake_case : float = 0.0 , snake_case : float = 1.0 , ) -> float:
'''simple docstring'''
return mean(
function_to_integrate(uniform(snake_case , snake_case ) ) for _ in range(snake_case ) ) * (max_value - min_value)
def A_ ( snake_case : int , snake_case : float = 0.0 , snake_case : float = 1.0 ) -> None:
'''simple docstring'''
def identity_function(snake_case : float ) -> float:
return x
__UpperCamelCase = area_under_curve_estimator(
snake_case , snake_case , snake_case , snake_case )
__UpperCamelCase = (max_value * max_value - min_value * min_value) / 2
print('''******************''' )
print(f"Estimating area under y=x where x varies from {min_value} to {max_value}" )
print(f"Estimated value is {estimated_value}" )
print(f"Expected value is {expected_value}" )
print(f"Total error is {abs(estimated_value - expected_value )}" )
print('''******************''' )
def A_ ( snake_case : int ) -> None:
'''simple docstring'''
def function_to_integrate(snake_case : float ) -> float:
return sqrt(4.0 - x * x )
__UpperCamelCase = area_under_curve_estimator(
snake_case , snake_case , 0.0 , 2.0 )
print('''******************''' )
print('''Estimating pi using area_under_curve_estimator''' )
print(f"Estimated value is {estimated_value}" )
print(f"Expected value is {pi}" )
print(f"Total error is {abs(estimated_value - pi )}" )
print('''******************''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 451
| 1
|
import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
SCREAMING_SNAKE_CASE : Any = "0.12" # assumed parallelism: 8
if is_torch_available():
import torch
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=None ) -> Optional[int]:
if rng is None:
_lowercase : List[Any] = random.Random()
_lowercase : Dict = 1
for dim in shape:
total_dims *= dim
_lowercase : Union[str, Any] = []
for _ in range(lowerCamelCase_ ):
values.append(rng.randint(0 , vocab_size - 1 ) )
_lowercase : Any = np.array(lowerCamelCase_ , dtype=jnp.intaa ).reshape(lowerCamelCase_ )
return output
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_=None ) -> str:
_lowercase : Dict = ids_tensor(lowerCamelCase_ , vocab_size=2 , rng=lowerCamelCase_ )
# make sure that at least one token is attended to for each batch
_lowercase : str = 1
return attn_mask
@require_flax
class _lowerCamelCase:
lowercase_ : Tuple = None
lowercase_ : Tuple = ()
def UpperCamelCase ( self) -> int:
"""simple docstring"""
_lowercase , _lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
_lowercase : List[str] = 2
_lowercase : int = inputs['input_ids'].shape[-1] // 2
_lowercase : str = inputs['input_ids'][:max_batch_size, :sequence_length]
_lowercase : int = jnp.ones_like(lowerCamelCase)
_lowercase : int = attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
_lowercase : Union[str, Any] = input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
_lowercase : List[str] = config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase , _lowercase , _lowercase , _lowercase : Optional[Any] = self._get_input_ids_and_config()
_lowercase : List[str] = False
_lowercase : List[str] = max_length
_lowercase : Optional[Any] = 0
for model_class in self.all_generative_model_classes:
_lowercase : Optional[Any] = model_class(lowerCamelCase)
_lowercase : List[str] = model_class.__name__[4:] # Skip the "Flax" at the beginning
_lowercase : Tuple = getattr(lowerCamelCase, lowerCamelCase)
_lowercase : Dict = pt_model_class(lowerCamelCase).eval()
_lowercase : Tuple = load_flax_weights_in_pytorch_model(lowerCamelCase, flax_model.params)
_lowercase : Optional[Any] = flax_model.generate(lowerCamelCase).sequences
_lowercase : Optional[Any] = pt_model.generate(torch.tensor(lowerCamelCase, dtype=torch.long))
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
_lowercase : Dict = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist(), flax_generation_outputs.tolist())
def UpperCamelCase ( self) -> int:
"""simple docstring"""
_lowercase , _lowercase , _lowercase , _lowercase : Union[str, Any] = self._get_input_ids_and_config()
_lowercase : Union[str, Any] = False
_lowercase : Tuple = max_length
for model_class in self.all_generative_model_classes:
_lowercase : Union[str, Any] = model_class(lowerCamelCase)
_lowercase : str = model.generate(lowerCamelCase).sequences
self.assertEqual(generation_outputs.shape[-1], lowerCamelCase)
_lowercase : Tuple = jit(model.generate)
_lowercase : Union[str, Any] = jit_generate(lowerCamelCase).sequences
self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist())
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
_lowercase , _lowercase , _lowercase , _lowercase : Optional[int] = self._get_input_ids_and_config()
_lowercase : Any = True
_lowercase : Union[str, Any] = max_length
for model_class in self.all_generative_model_classes:
_lowercase : Any = model_class(lowerCamelCase)
_lowercase : int = model.generate(lowerCamelCase).sequences
self.assertEqual(generation_outputs.shape[-1], lowerCamelCase)
_lowercase : str = jit(model.generate)
_lowercase : str = jit_generate(lowerCamelCase).sequences
self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist())
def UpperCamelCase ( self) -> str:
"""simple docstring"""
_lowercase , _lowercase , _lowercase , _lowercase : int = self._get_input_ids_and_config()
_lowercase : List[str] = False
_lowercase : Optional[Any] = max_length
_lowercase : str = 2
for model_class in self.all_generative_model_classes:
_lowercase : List[str] = model_class(lowerCamelCase)
_lowercase : Dict = model.generate(lowerCamelCase).sequences
self.assertEqual(generation_outputs.shape[-1], lowerCamelCase)
_lowercase : Optional[Any] = jit(model.generate)
_lowercase : List[str] = jit_generate(lowerCamelCase).sequences
self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist())
def UpperCamelCase ( self) -> Dict:
"""simple docstring"""
_lowercase , _lowercase , _lowercase , _lowercase : List[str] = self._get_input_ids_and_config()
_lowercase : Dict = False
_lowercase : List[Any] = max_length
_lowercase : int = 2
_lowercase : Any = 2
for model_class in self.all_generative_model_classes:
_lowercase : Union[str, Any] = model_class(lowerCamelCase)
_lowercase : Dict = model.generate(lowerCamelCase).sequences
self.assertEqual(generation_outputs.shape[0], input_ids.shape[0] * config.num_return_sequences)
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
_lowercase , _lowercase , _lowercase , _lowercase : Optional[int] = self._get_input_ids_and_config()
_lowercase : List[Any] = True
_lowercase : Optional[Any] = max_length
_lowercase : int = 0.8
_lowercase : Optional[Any] = 10
_lowercase : Dict = 0.3
_lowercase : Union[str, Any] = 1
_lowercase : Union[str, Any] = 8
_lowercase : Optional[Any] = 9
for model_class in self.all_generative_model_classes:
_lowercase : Any = model_class(lowerCamelCase)
_lowercase : str = model.generate(lowerCamelCase).sequences
self.assertEqual(generation_outputs.shape[-1], lowerCamelCase)
_lowercase : List[str] = jit(model.generate)
_lowercase : List[Any] = jit_generate(lowerCamelCase).sequences
self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist())
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
_lowercase , _lowercase , _lowercase , _lowercase : List[str] = self._get_input_ids_and_config()
_lowercase : List[str] = max_length
_lowercase : List[Any] = 1
_lowercase : Optional[Any] = 8
_lowercase : List[Any] = 9
for model_class in self.all_generative_model_classes:
_lowercase : List[str] = model_class(lowerCamelCase)
_lowercase : List[str] = model.generate(lowerCamelCase).sequences
self.assertEqual(generation_outputs.shape[-1], lowerCamelCase)
_lowercase : Dict = jit(model.generate)
_lowercase : List[Any] = jit_generate(lowerCamelCase).sequences
self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist())
def UpperCamelCase ( self) -> Dict:
"""simple docstring"""
_lowercase , _lowercase , _lowercase , _lowercase : Union[str, Any] = self._get_input_ids_and_config()
_lowercase : Union[str, Any] = max_length
_lowercase : Optional[int] = 2
_lowercase : Dict = 1
_lowercase : int = 8
_lowercase : Optional[int] = 9
for model_class in self.all_generative_model_classes:
_lowercase : Any = model_class(lowerCamelCase)
_lowercase : int = model.generate(lowerCamelCase).sequences
self.assertEqual(generation_outputs.shape[-1], lowerCamelCase)
_lowercase : Any = jit(model.generate)
_lowercase : str = jit_generate(lowerCamelCase).sequences
self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist())
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase , _lowercase , _lowercase , _lowercase : Tuple = self._get_input_ids_and_config()
# pad attention mask on the left
_lowercase : Any = attention_mask.at[(0, 0)].set(0)
_lowercase : Union[str, Any] = False
_lowercase : Any = max_length
for model_class in self.all_generative_model_classes:
_lowercase : List[str] = model_class(lowerCamelCase)
_lowercase : Any = model.generate(lowerCamelCase, attention_mask=lowerCamelCase).sequences
self.assertEqual(generation_outputs.shape[-1], lowerCamelCase)
_lowercase : List[str] = jit(model.generate)
_lowercase : List[str] = jit_generate(lowerCamelCase, attention_mask=lowerCamelCase).sequences
self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist())
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase , _lowercase , _lowercase , _lowercase : Optional[int] = self._get_input_ids_and_config()
# pad attention mask on the left
_lowercase : Optional[int] = attention_mask.at[(0, 0)].set(0)
_lowercase : Tuple = True
_lowercase : int = max_length
for model_class in self.all_generative_model_classes:
_lowercase : int = model_class(lowerCamelCase)
_lowercase : Optional[int] = model.generate(lowerCamelCase, attention_mask=lowerCamelCase).sequences
self.assertEqual(generation_outputs.shape[-1], lowerCamelCase)
_lowercase : Optional[Any] = jit(model.generate)
_lowercase : Any = jit_generate(lowerCamelCase, attention_mask=lowerCamelCase).sequences
self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist())
def UpperCamelCase ( self) -> str:
"""simple docstring"""
_lowercase , _lowercase , _lowercase , _lowercase : Union[str, Any] = self._get_input_ids_and_config()
# pad attention mask on the left
_lowercase : List[str] = attention_mask.at[(0, 0)].set(0)
_lowercase : List[Any] = 2
_lowercase : List[Any] = max_length
for model_class in self.all_generative_model_classes:
_lowercase : Optional[Any] = model_class(lowerCamelCase)
_lowercase : Optional[Any] = model.generate(lowerCamelCase, attention_mask=lowerCamelCase).sequences
self.assertEqual(generation_outputs.shape[-1], lowerCamelCase)
_lowercase : Tuple = jit(model.generate)
_lowercase : int = jit_generate(lowerCamelCase, attention_mask=lowerCamelCase).sequences
self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist())
@require_flax
class _lowerCamelCase( unittest.TestCase ):
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
_lowercase : Union[str, Any] = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-bert')
_lowercase : Optional[Any] = FlaxAutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-bert-flax-only')
_lowercase : Optional[int] = 'Hello world'
_lowercase : List[Any] = tokenizer(lowerCamelCase, return_tensors='np').input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(lowerCamelCase, 'do_samples'):
model.generate(lowerCamelCase, do_samples=lowerCamelCase)
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(lowerCamelCase, 'foo'):
_lowercase : Dict = {'foo': 'bar'}
model.generate(lowerCamelCase, **lowerCamelCase)
| 89
|
import argparse
import torch
from transformers import GPTaLMHeadModel, RobertaForMaskedLM
if __name__ == "__main__":
A__ = argparse.ArgumentParser(
description=(
'''Extraction some layers of the full RobertaForMaskedLM or GPT2LMHeadModel for Transfer Learned'''
''' Distillation'''
)
)
parser.add_argument('''--model_type''', default='''roberta''', choices=['''roberta''', '''gpt2'''])
parser.add_argument('''--model_name''', default='''roberta-large''', type=str)
parser.add_argument('''--dump_checkpoint''', default='''serialization_dir/tf_roberta_048131723.pth''', type=str)
parser.add_argument('''--vocab_transform''', action='''store_true''')
A__ = parser.parse_args()
if args.model_type == "roberta":
A__ = RobertaForMaskedLM.from_pretrained(args.model_name)
A__ = '''roberta'''
elif args.model_type == "gpt2":
A__ = GPTaLMHeadModel.from_pretrained(args.model_name)
A__ = '''transformer'''
A__ = model.state_dict()
A__ = {}
# Embeddings #
if args.model_type == "gpt2":
for param_name in ["wte.weight", "wpe.weight"]:
A__ = state_dict[f"""{prefix}.{param_name}"""]
else:
for w in ["word_embeddings", "position_embeddings", "token_type_embeddings"]:
A__ = f"""{prefix}.embeddings.{w}.weight"""
A__ = state_dict[param_name]
for w in ["weight", "bias"]:
A__ = f"""{prefix}.embeddings.LayerNorm.{w}"""
A__ = state_dict[param_name]
# Transformer Blocks #
A__ = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
if args.model_type == "gpt2":
for layer in ["ln_1", "attn.c_attn", "attn.c_proj", "ln_2", "mlp.c_fc", "mlp.c_proj"]:
for w in ["weight", "bias"]:
A__ = state_dict[
f"""{prefix}.h.{teacher_idx}.{layer}.{w}"""
]
A__ = state_dict[f"""{prefix}.h.{teacher_idx}.attn.bias"""]
else:
for layer in [
"attention.self.query",
"attention.self.key",
"attention.self.value",
"attention.output.dense",
"attention.output.LayerNorm",
"intermediate.dense",
"output.dense",
"output.LayerNorm",
]:
for w in ["weight", "bias"]:
A__ = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.{layer}.{w}"""
]
std_idx += 1
# Language Modeling Head ###s
if args.model_type == "roberta":
for layer in ["lm_head.decoder.weight", "lm_head.bias"]:
A__ = state_dict[f"""{layer}"""]
if args.vocab_transform:
for w in ["weight", "bias"]:
A__ = state_dict[f"""lm_head.dense.{w}"""]
A__ = state_dict[f"""lm_head.layer_norm.{w}"""]
elif args.model_type == "gpt2":
for w in ["weight", "bias"]:
A__ = state_dict[f"""{prefix}.ln_f.{w}"""]
A__ = state_dict['''lm_head.weight''']
print(f"""N layers selected for distillation: {std_idx}""")
print(f"""Number of params transferred for distillation: {len(compressed_sd.keys())}""")
print(f"""Save transferred checkpoint to {args.dump_checkpoint}.""")
torch.save(compressed_sd, args.dump_checkpoint)
| 252
| 0
|
'''simple docstring'''
def __UpperCamelCase ( UpperCAmelCase = 100_0000 ):
lowercase__ : Tuple = [i - 1 for i in range(limit + 1 )]
for i in range(2 , limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i , limit + 1 , UpperCAmelCase ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution())
| 705
|
'''simple docstring'''
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__a: Optional[Any] = get_tests_dir("""fixtures/test_sentencepiece_no_bos.model""")
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase ( a__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = PegasusTokenizer
SCREAMING_SNAKE_CASE = PegasusTokenizerFast
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = True
def _lowerCAmelCase( self ) -> int:
super().setUp()
# We have a SentencePiece fixture for testing
lowercase__ : str = PegasusTokenizer(__lowerCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _lowerCAmelCase( self ) -> List[str]:
return PegasusTokenizer.from_pretrained('''google/pegasus-large''' )
def _lowerCAmelCase( self , **__lowerCAmelCase ) -> PegasusTokenizer:
return PegasusTokenizer.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def _lowerCAmelCase( self , __lowerCAmelCase ) -> Union[str, Any]:
return ("This is a test", "This is a test")
def _lowerCAmelCase( self ) -> Tuple:
lowercase__ : List[str] = '''</s>'''
lowercase__ : List[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowerCAmelCase ) , __lowerCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowerCAmelCase ) , __lowerCAmelCase )
def _lowerCAmelCase( self ) -> Dict:
lowercase__ : Optional[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<pad>''' )
self.assertEqual(vocab_keys[1] , '''</s>''' )
self.assertEqual(vocab_keys[-1] , '''v''' )
self.assertEqual(len(__lowerCAmelCase ) , 1103 )
def _lowerCAmelCase( self ) -> Union[str, Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 1103 )
def _lowerCAmelCase( self ) -> Optional[int]:
lowercase__ : int = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
lowercase__ : Optional[int] = self.tokenizer_class.from_pretrained(self.tmpdirname )
lowercase__ : Any = (
'''Let\'s see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important'''
''' </s> <pad> <pad> <pad>'''
)
lowercase__ : Union[str, Any] = rust_tokenizer([raw_input_str] , return_tensors=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ).input_ids[0]
lowercase__ : Optional[int] = py_tokenizer([raw_input_str] , return_tensors=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ).input_ids[0]
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
def _lowerCAmelCase( self ) -> Optional[int]:
lowercase__ : str = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
lowercase__ : Union[str, Any] = '''<mask_1> To ensure a <mask_2> flow of bank resolutions.'''
lowercase__ : int = [2, 413, 615, 114, 3, 1971, 113, 1679, 10710, 107, 1]
lowercase__ : List[Any] = tokenizer([raw_input_str] , return_tensors=__lowerCAmelCase ).input_ids[0]
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
def _lowerCAmelCase( self ) -> Union[str, Any]:
lowercase__ : str = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 96103
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 103
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1024
lowercase__ : Optional[Any] = '''To ensure a smooth flow of bank resolutions.'''
lowercase__ : List[Any] = [413, 615, 114, 2291, 1971, 113, 1679, 10710, 107, 1]
lowercase__ : str = tokenizer([raw_input_str] , return_tensors=__lowerCAmelCase ).input_ids[0]
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def _lowerCAmelCase( self ) -> List[Any]:
lowercase__ : List[str] = ['''This is going to be way too long.''' * 150, '''short example''']
lowercase__ : Tuple = ['''not super long but more than 5 tokens''', '''tiny''']
lowercase__ : Optional[Any] = self._large_tokenizer(__lowerCAmelCase , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , return_tensors='''pt''' )
lowercase__ : Dict = self._large_tokenizer(
text_target=__lowerCAmelCase , max_length=5 , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , return_tensors='''pt''' )
assert batch.input_ids.shape == (2, 1024)
assert batch.attention_mask.shape == (2, 1024)
assert targets["input_ids"].shape == (2, 5)
assert len(__lowerCAmelCase ) == 2 # input_ids, attention_mask.
@slow
def _lowerCAmelCase( self ) -> int:
# fmt: off
lowercase__ : List[Any] = {'''input_ids''': [[38979, 143, 18485, 606, 130, 26669, 87686, 121, 54189, 1129, 111, 26669, 87686, 121, 9114, 14787, 121, 13249, 158, 592, 956, 121, 14621, 31576, 143, 62613, 108, 9688, 930, 43430, 11562, 62613, 304, 108, 11443, 897, 108, 9314, 17415, 63399, 108, 11443, 7614, 18316, 118, 4284, 7148, 12430, 143, 1400, 25703, 158, 111, 4284, 7148, 11772, 143, 21297, 1064, 158, 122, 204, 3506, 1754, 1133, 14787, 1581, 115, 33224, 4482, 111, 1355, 110, 29173, 317, 50833, 108, 20147, 94665, 111, 77198, 107, 1], [110, 62613, 117, 638, 112, 1133, 121, 20098, 1355, 79050, 13872, 135, 1596, 53541, 1352, 141, 13039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 18289, 17780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__lowerCAmelCase , model_name='''google/bigbird-pegasus-large-arxiv''' , revision='''ba85d0851d708441f91440d509690f1ab6353415''' , )
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase ( a__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = PegasusTokenizer
SCREAMING_SNAKE_CASE = PegasusTokenizerFast
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = True
def _lowerCAmelCase( self ) -> Dict:
super().setUp()
# We have a SentencePiece fixture for testing
lowercase__ : Optional[Any] = PegasusTokenizer(__lowerCAmelCase , offset=0 , mask_token_sent=__lowerCAmelCase , mask_token='''[MASK]''' )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _lowerCAmelCase( self ) -> Optional[Any]:
return PegasusTokenizer.from_pretrained('''google/bigbird-pegasus-large-arxiv''' )
def _lowerCAmelCase( self , **__lowerCAmelCase ) -> PegasusTokenizer:
return PegasusTokenizer.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def _lowerCAmelCase( self , __lowerCAmelCase ) -> int:
return ("This is a test", "This is a test")
def _lowerCAmelCase( self ) -> int:
lowercase__ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
lowercase__ : Any = self.tokenizer_class.from_pretrained(self.tmpdirname )
lowercase__ : Tuple = (
'''Let\'s see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>'''
''' <pad> <pad> <pad>'''
)
lowercase__ : Optional[Any] = rust_tokenizer([raw_input_str] , return_tensors=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ).input_ids[0]
lowercase__ : Dict = py_tokenizer([raw_input_str] , return_tensors=__lowerCAmelCase , add_special_tokens=__lowerCAmelCase ).input_ids[0]
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
@require_torch
def _lowerCAmelCase( self ) -> str:
lowercase__ : List[str] = ['''This is going to be way too long.''' * 1000, '''short example''']
lowercase__ : Dict = ['''not super long but more than 5 tokens''', '''tiny''']
lowercase__ : Union[str, Any] = self._large_tokenizer(__lowerCAmelCase , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , return_tensors='''pt''' )
lowercase__ : str = self._large_tokenizer(
text_target=__lowerCAmelCase , max_length=5 , padding=__lowerCAmelCase , truncation=__lowerCAmelCase , return_tensors='''pt''' )
assert batch.input_ids.shape == (2, 4096)
assert batch.attention_mask.shape == (2, 4096)
assert targets["input_ids"].shape == (2, 5)
assert len(__lowerCAmelCase ) == 2 # input_ids, attention_mask.
def _lowerCAmelCase( self ) -> Union[str, Any]:
lowercase__ : str = (
'''This is an example string that is used to test the original TF implementation against the HF'''
''' implementation'''
)
lowercase__ : Dict = self._large_tokenizer(__lowerCAmelCase ).input_ids
self.assertListEqual(
__lowerCAmelCase , [182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 25016, 3137, 464, 109, 26955, 3137, 1] , )
| 428
| 0
|
"""simple docstring"""
def lowercase_ ( _lowercase : Optional[Any] , _lowercase : Optional[int] , _lowercase : List[Any] ):
'''simple docstring'''
UpperCAmelCase : Any = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
# formula for sum of series
return total
def lowercase_ ( ):
'''simple docstring'''
print(sum_of_series(1 , 1 , 10 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 595
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
SCREAMING_SNAKE_CASE_ = {
'''configuration_mega''': ['''MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MegaConfig''', '''MegaOnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
'''MEGA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MegaForCausalLM''',
'''MegaForMaskedLM''',
'''MegaForMultipleChoice''',
'''MegaForQuestionAnswering''',
'''MegaForSequenceClassification''',
'''MegaForTokenClassification''',
'''MegaModel''',
'''MegaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 426
| 0
|
'''simple docstring'''
from __future__ import annotations
def lowerCAmelCase_ ( a : list[int] , a : int , a : int , a : int ):
if (direction == 1 and array[indexa] > array[indexa]) or (
direction == 0 and array[indexa] < array[indexa]
):
a__ = array[indexa], array[indexa]
def lowerCAmelCase_ ( a : list[int] , a : int , a : int , a : int ):
if length > 1:
a__ = int(length / 2 )
for i in range(a , low + middle ):
comp_and_swap(a , a , i + middle , a )
bitonic_merge(a , a , a , a )
bitonic_merge(a , low + middle , a , a )
def lowerCAmelCase_ ( a : list[int] , a : int , a : int , a : int ):
if length > 1:
a__ = int(length / 2 )
bitonic_sort(a , a , a , 1 )
bitonic_sort(a , low + middle , a , 0 )
bitonic_merge(a , a , a , a )
if __name__ == "__main__":
__A : Optional[int] = input('Enter numbers separated by a comma:\n').strip()
__A : Dict = [int(item.strip()) for item in user_input.split(',')]
bitonic_sort(unsorted, 0, len(unsorted), 1)
print('\nSorted array in ascending order is: ', end='')
print(*unsorted, sep=', ')
bitonic_merge(unsorted, 0, len(unsorted), 0)
print('Sorted array in descending order is: ', end='')
print(*unsorted, sep=', ')
| 700
|
'''simple docstring'''
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
__A : int = get_logger()
__A : Optional[dict] = None
class _UpperCamelCase ( TensorFormatter[Mapping, 'jax.Array', Mapping] ):
'''simple docstring'''
def __init__( self , _a=None , _a=None , **_a ):
"""simple docstring"""
super().__init__(features=_a )
import jax
from jaxlib.xla_client import Device
if isinstance(_a , _a ):
raise ValueError(
F'''Expected {device} to be a `str` not {type(_a )}, as `jaxlib.xla_extension.Device` '''
'is not serializable neither with `pickle` nor with `dill`. Instead you can surround '
'the device with `str()` to get its string identifier that will be internally mapped '
'to the actual `jaxlib.xla_extension.Device`.' )
a__ = device if isinstance(_a , _a ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
a__ = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
F'''Device with string identifier {self.device} not listed among the available '''
F'''devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default '''
F'''device: {str(jax.devices()[0] )}.''' )
a__ = str(jax.devices()[0] )
a__ = jnp_array_kwargs
@staticmethod
def lowercase__ ( ):
"""simple docstring"""
import jax
return {str(_a ): device for device in jax.devices()}
def lowercase__ ( self , _a ):
"""simple docstring"""
import jax
import jax.numpy as jnp
if isinstance(_a , _a ) and column:
if all(
isinstance(_a , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(_a , axis=0 )
return column
def lowercase__ ( self , _a ):
"""simple docstring"""
import jax
import jax.numpy as jnp
if isinstance(_a , (str, bytes, type(_a )) ):
return value
elif isinstance(_a , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
a__ = {}
if isinstance(_a , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
a__ = {'dtype': jnp.intaa}
else:
a__ = {'dtype': jnp.intaa}
elif isinstance(_a , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
a__ = {'dtype': jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(_a , PIL.Image.Image ):
a__ = np.asarray(_a )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
a__ = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(_a , **{**default_dtype, **self.jnp_array_kwargs} )
def lowercase__ ( self , _a ):
"""simple docstring"""
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(_a , torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(_a , '__array__' ) and not isinstance(_a , jax.Array ):
a__ = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(_a , np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(_a ) for substruct in data_struct] )
elif isinstance(_a , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(_a ) for substruct in data_struct] )
return self._tensorize(_a )
def lowercase__ ( self , _a ):
"""simple docstring"""
return map_nested(self._recursive_tensorize , _a , map_list=_a )
def lowercase__ ( self , _a ):
"""simple docstring"""
a__ = self.numpy_arrow_extractor().extract_row(_a )
a__ = self.python_features_decoder.decode_row(_a )
return self.recursive_tensorize(_a )
def lowercase__ ( self , _a ):
"""simple docstring"""
a__ = self.numpy_arrow_extractor().extract_column(_a )
a__ = self.python_features_decoder.decode_column(_a , pa_table.column_names[0] )
a__ = self.recursive_tensorize(_a )
a__ = self._consolidate(_a )
return column
def lowercase__ ( self , _a ):
"""simple docstring"""
a__ = self.numpy_arrow_extractor().extract_batch(_a )
a__ = self.python_features_decoder.decode_batch(_a )
a__ = self.recursive_tensorize(_a )
for column_name in batch:
a__ = self._consolidate(batch[column_name] )
return batch
| 126
| 0
|
'''simple docstring'''
lowerCAmelCase: Union[str, Any] = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []}
lowerCAmelCase: Dict = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]}
def lowerCamelCase__ ( _A , _A , _A ):
a : Union[str, Any] = True
a : List[Any] = []
for neighbour in graph[vert]:
if not visited[neighbour]:
order += topology_sort(_A , _A , _A )
order.append(_A )
return order
def lowerCamelCase__ ( _A , _A , _A ):
a : Tuple = True
a : List[str] = [vert]
for neighbour in reversed_graph[vert]:
if not visited[neighbour]:
component += find_components(_A , _A , _A )
return component
def lowerCamelCase__ ( _A ):
a : Any = len(_A ) * [False]
a : dict[int, list[int]] = {vert: [] for vert in range(len(_A ) )}
for vert, neighbours in graph.items():
for neighbour in neighbours:
reversed_graph[neighbour].append(_A )
a : Any = []
for i, was_visited in enumerate(_A ):
if not was_visited:
order += topology_sort(_A , _A , _A )
a : Optional[int] = []
a : Tuple = len(_A ) * [False]
for i in range(len(_A ) ):
a : Any = order[len(_A ) - i - 1]
if not visited[vert]:
a : Dict = find_components(_A , _A , _A )
components_list.append(_A )
return components_list
| 526
|
'''simple docstring'''
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class a__( lowerCamelCase__ , unittest.TestCase ):
lowercase__ = CodeGenTokenizer
lowercase__ = CodeGenTokenizerFast
lowercase__ = True
lowercase__ = {"""add_prefix_space""": True}
lowercase__ = False
def lowercase_ ( self : List[str] ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
a : Any = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
'<|endoftext|>',
]
a : Dict = dict(zip(__snake_case , range(len(__snake_case ) ) ) )
a : str = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
a : List[str] = {'unk_token': '<unk>'}
a : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
a : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(__snake_case ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(__snake_case ) )
def lowercase_ ( self : Tuple , **__snake_case : Dict ):
kwargs.update(self.special_tokens_map )
return CodeGenTokenizer.from_pretrained(self.tmpdirname , **__snake_case )
def lowercase_ ( self : List[Any] , **__snake_case : List[str] ):
kwargs.update(self.special_tokens_map )
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname , **__snake_case )
def lowercase_ ( self : Optional[int] , __snake_case : List[Any] ):
a : Tuple = 'lower newer'
a : str = 'lower newer'
return input_text, output_text
def lowercase_ ( self : str ):
a : List[Any] = CodeGenTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
a : Dict = 'lower newer'
a : int = ['\u0120low', 'er', '\u0120', 'n', 'e', 'w', 'er']
a : List[str] = tokenizer.tokenize(__snake_case , add_prefix_space=__snake_case )
self.assertListEqual(__snake_case , __snake_case )
a : Dict = tokens + [tokenizer.unk_token]
a : int = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__snake_case ) , __snake_case )
def lowercase_ ( self : List[Any] ):
if not self.test_rust_tokenizer:
return
a : Optional[int] = self.get_tokenizer()
a : Tuple = self.get_rust_tokenizer(add_prefix_space=__snake_case )
a : List[str] = 'lower newer'
# Testing tokenization
a : Optional[int] = tokenizer.tokenize(__snake_case , add_prefix_space=__snake_case )
a : Optional[Any] = rust_tokenizer.tokenize(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
# Testing conversion to ids without special tokens
a : Tuple = tokenizer.encode(__snake_case , add_special_tokens=__snake_case , add_prefix_space=__snake_case )
a : str = rust_tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
self.assertListEqual(__snake_case , __snake_case )
# Testing conversion to ids with special tokens
a : Optional[int] = self.get_rust_tokenizer(add_prefix_space=__snake_case )
a : Optional[int] = tokenizer.encode(__snake_case , add_prefix_space=__snake_case )
a : Dict = rust_tokenizer.encode(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
# Testing the unknown token
a : Any = tokens + [rust_tokenizer.unk_token]
a : List[Any] = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(__snake_case ) , __snake_case )
def lowercase_ ( self : List[str] , *__snake_case : int , **__snake_case : str ):
# It's very difficult to mix/test pretokenization with byte-level
# And get both CodeGen and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def lowercase_ ( self : List[str] , __snake_case : str=15 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
a : List[Any] = self.rust_tokenizer_class.from_pretrained(__snake_case , **__snake_case )
# Simple input
a : Dict = 'This is a simple input'
a : int = ['This is a simple input 1', 'This is a simple input 2']
a : Dict = ('This is a simple input', 'This is a pair')
a : Dict = [
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
self.assertRaises(__snake_case , tokenizer_r.encode , __snake_case , max_length=__snake_case , padding='max_length' )
# Simple input
self.assertRaises(__snake_case , tokenizer_r.encode_plus , __snake_case , max_length=__snake_case , padding='max_length' )
# Simple input
self.assertRaises(
__snake_case , tokenizer_r.batch_encode_plus , __snake_case , max_length=__snake_case , padding='max_length' , )
# Pair input
self.assertRaises(__snake_case , tokenizer_r.encode , __snake_case , max_length=__snake_case , padding='max_length' )
# Pair input
self.assertRaises(__snake_case , tokenizer_r.encode_plus , __snake_case , max_length=__snake_case , padding='max_length' )
# Pair input
self.assertRaises(
__snake_case , tokenizer_r.batch_encode_plus , __snake_case , max_length=__snake_case , padding='max_length' , )
def lowercase_ ( self : Optional[Any] ):
a : Any = CodeGenTokenizer.from_pretrained(self.tmpdirname , pad_token='<pad>' )
# Simple input
a : Union[str, Any] = 'This is a simple input'
a : Dict = ['This is a simple input looooooooong', 'This is a simple input']
a : int = ('This is a simple input', 'This is a pair')
a : List[Any] = [
('This is a simple input loooooong', 'This is a simple input'),
('This is a simple pair loooooong', 'This is a simple pair'),
]
a : Optional[int] = tokenizer.pad_token_id
a : List[Any] = tokenizer(__snake_case , padding='max_length' , max_length=30 , return_tensors='np' )
a : Union[str, Any] = tokenizer(__snake_case , padding=__snake_case , truncate=__snake_case , return_tensors='np' )
a : Optional[Any] = tokenizer(*__snake_case , padding='max_length' , max_length=60 , return_tensors='np' )
a : List[Any] = tokenizer(__snake_case , padding=__snake_case , truncate=__snake_case , return_tensors='np' )
# s
# test single string max_length padding
self.assertEqual(out_s['input_ids'].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s['input_ids'] )
self.assertTrue(0 in out_s['attention_mask'] )
# s2
# test automatic padding
self.assertEqual(out_sa['input_ids'].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa['input_ids'][0] )
self.assertFalse(0 in out_sa['attention_mask'][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa['input_ids'][1] )
self.assertTrue(0 in out_sa['attention_mask'][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p['input_ids'].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p['input_ids'] )
self.assertTrue(0 in out_p['attention_mask'] )
# p2
# test automatic padding pair
self.assertEqual(out_pa['input_ids'].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa['input_ids'][0] )
self.assertFalse(0 in out_pa['attention_mask'][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa['input_ids'][1] )
self.assertTrue(0 in out_pa['attention_mask'][1] )
def lowercase_ ( self : Optional[int] ):
a : int = '$$$'
a : Dict = CodeGenTokenizer.from_pretrained(self.tmpdirname , bos_token=__snake_case , add_bos_token=__snake_case )
a : int = 'This is a simple input'
a : Tuple = ['This is a simple input 1', 'This is a simple input 2']
a : Any = tokenizer.bos_token_id
a : Union[str, Any] = tokenizer(__snake_case )
a : Dict = tokenizer(__snake_case )
self.assertEqual(out_s.input_ids[0] , __snake_case )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
a : Optional[Any] = tokenizer.decode(out_s.input_ids )
a : Optional[Any] = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , __snake_case )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
@slow
def lowercase_ ( self : List[Any] ):
a : Tuple = CodeGenTokenizer.from_pretrained('Salesforce/codegen-350M-mono' )
a : Union[str, Any] = '\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#'
a : Optional[Any] = '\nif len_a > len_b: result = a\nelse: result = b'
a : Any = tokenizer.encode(__snake_case )
a : Union[str, Any] = ['^#', re.escape('<|endoftext|>' ), '^\'\'\'', '^"""', '\n\n\n']
a : List[str] = tokenizer.decode(__snake_case , truncate_before_pattern=__snake_case )
self.assertEqual(__snake_case , __snake_case )
def lowercase_ ( self : Any ):
pass
| 526
| 1
|
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def _UpperCamelCase ( self :Any ) -> Dict:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
a__ = FlaxDiffusionPipeline.from_pretrained(
'''hf-internal-testing/tiny-stable-diffusion-pipe''' , safety_checker=__magic_name__ , cache_dir=__magic_name__ )
a__ = [t[-1] for t in os.walk(os.path.join(__magic_name__ , os.listdir(__magic_name__ )[0] , '''snapshots''' ) )]
a__ = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith('''.bin''' ) for f in files )
@slow
@require_flax
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def _UpperCamelCase ( self :str ) -> Dict:
'''simple docstring'''
a__ , a__ = FlaxStableDiffusionPipeline.from_pretrained(
'''hf-internal-testing/tiny-stable-diffusion-pipe''' , safety_checker=__magic_name__ )
a__ = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
a__ = jax.random.PRNGKey(0 )
a__ = 4
a__ = jax.device_count()
a__ = num_samples * [prompt]
a__ = pipeline.prepare_inputs(__magic_name__ )
# shard inputs and rng
a__ = replicate(__magic_name__ )
a__ = jax.random.split(__magic_name__ , __magic_name__ )
a__ = shard(__magic_name__ )
a__ = pipeline(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , jit=__magic_name__ ).images
assert images.shape == (num_samples, 1, 64, 64, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 4.1_514_745 ) < 1e-3
assert np.abs(np.abs(__magic_name__ , dtype=np.floataa ).sum() - 49947.875 ) < 5e-1
a__ = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(__magic_name__ ) == num_samples
def _UpperCamelCase ( self :str ) -> Dict:
'''simple docstring'''
a__ , a__ = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''flax''' , safety_checker=__magic_name__ )
a__ = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
a__ = jax.random.PRNGKey(0 )
a__ = 50
a__ = jax.device_count()
a__ = num_samples * [prompt]
a__ = pipeline.prepare_inputs(__magic_name__ )
# shard inputs and rng
a__ = replicate(__magic_name__ )
a__ = jax.random.split(__magic_name__ , __magic_name__ )
a__ = shard(__magic_name__ )
a__ = pipeline(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , jit=__magic_name__ ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.05_652_401) ) < 1e-3
assert np.abs((np.abs(__magic_name__ , dtype=np.floataa ).sum() - 2383808.2) ) < 5e-1
def _UpperCamelCase ( self :List[str] ) -> Tuple:
'''simple docstring'''
a__ , a__ = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , safety_checker=__magic_name__ )
a__ = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
a__ = jax.random.PRNGKey(0 )
a__ = 50
a__ = jax.device_count()
a__ = num_samples * [prompt]
a__ = pipeline.prepare_inputs(__magic_name__ )
# shard inputs and rng
a__ = replicate(__magic_name__ )
a__ = jax.random.split(__magic_name__ , __magic_name__ )
a__ = shard(__magic_name__ )
a__ = pipeline(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , jit=__magic_name__ ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04_003_906) ) < 1e-3
assert np.abs((np.abs(__magic_name__ , dtype=np.floataa ).sum() - 2373516.75) ) < 5e-1
def _UpperCamelCase ( self :str ) -> List[str]:
'''simple docstring'''
a__ , a__ = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa )
a__ = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
a__ = jax.random.PRNGKey(0 )
a__ = 50
a__ = jax.device_count()
a__ = num_samples * [prompt]
a__ = pipeline.prepare_inputs(__magic_name__ )
# shard inputs and rng
a__ = replicate(__magic_name__ )
a__ = jax.random.split(__magic_name__ , __magic_name__ )
a__ = shard(__magic_name__ )
a__ = pipeline(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , jit=__magic_name__ ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04_003_906) ) < 1e-3
assert np.abs((np.abs(__magic_name__ , dtype=np.floataa ).sum() - 2373516.75) ) < 5e-1
def _UpperCamelCase ( self :Optional[int] ) -> int:
'''simple docstring'''
a__ = FlaxDDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , set_alpha_to_one=__magic_name__ , steps_offset=1 , )
a__ , a__ = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , scheduler=__magic_name__ , safety_checker=__magic_name__ , )
a__ = scheduler.create_state()
a__ = scheduler_state
a__ = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
a__ = jax.random.PRNGKey(0 )
a__ = 50
a__ = jax.device_count()
a__ = num_samples * [prompt]
a__ = pipeline.prepare_inputs(__magic_name__ )
# shard inputs and rng
a__ = replicate(__magic_name__ )
a__ = jax.random.split(__magic_name__ , __magic_name__ )
a__ = shard(__magic_name__ )
a__ = pipeline(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , jit=__magic_name__ ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.045_043_945) ) < 1e-3
assert np.abs((np.abs(__magic_name__ , dtype=np.floataa ).sum() - 2347693.5) ) < 5e-1
def _UpperCamelCase ( self :Union[str, Any] ) -> str:
'''simple docstring'''
a__ = (
'''A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'''
''' field, close up, split lighting, cinematic'''
)
a__ = jax.device_count()
a__ = num_samples * [prompt]
a__ = jax.random.split(jax.random.PRNGKey(0 ) , __magic_name__ )
a__ , a__ = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , safety_checker=__magic_name__ , )
a__ = replicate(__magic_name__ )
a__ = pipeline.prepare_inputs(__magic_name__ )
a__ = shard(__magic_name__ )
a__ = pipeline(__magic_name__ , __magic_name__ , __magic_name__ , jit=__magic_name__ ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
a__ = images[2, 0, 256, 10:17, 1]
# With memory efficient attention
a__ , a__ = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , safety_checker=__magic_name__ , use_memory_efficient_attention=__magic_name__ , )
a__ = replicate(__magic_name__ )
a__ = pipeline.prepare_inputs(__magic_name__ )
a__ = shard(__magic_name__ )
a__ = pipeline(__magic_name__ , __magic_name__ , __magic_name__ , jit=__magic_name__ ).images
assert images_eff.shape == (num_samples, 1, 512, 512, 3)
a__ = images[2, 0, 256, 10:17, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1e-2
| 713
|
"""simple docstring"""
import tempfile
import unittest
import numpy as np
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax
if is_flax_available():
import os
from flax.core.frozen_dict import unfreeze
from flax.traverse_util import flatten_dict
from transformers import FlaxBertModel
__lowerCAmelCase : List[str] = '''0.12''' # assumed parallelism: 8
@require_flax
@is_staging_test
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def _UpperCamelCase ( cls :Any ) -> List[str]:
'''simple docstring'''
a__ = TOKEN
HfFolder.save_token(__magic_name__ )
@classmethod
def _UpperCamelCase ( cls :Any ) -> Optional[Any]:
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id='''test-model-flax''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-model-flax-org''' )
except HTTPError:
pass
def _UpperCamelCase ( self :List[str] ) -> Tuple:
'''simple docstring'''
a__ = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
a__ = FlaxBertModel(__magic_name__ )
model.push_to_hub('''test-model-flax''' , use_auth_token=self._token )
a__ = FlaxBertModel.from_pretrained(F"{USER}/test-model-flax" )
a__ = flatten_dict(unfreeze(model.params ) )
a__ = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
a__ = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(__magic_name__ , 1e-3 , msg=F"{key} not identical" )
# Reset repo
delete_repo(token=self._token , repo_id='''test-model-flax''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(__magic_name__ , repo_id='''test-model-flax''' , push_to_hub=__magic_name__ , use_auth_token=self._token )
a__ = FlaxBertModel.from_pretrained(F"{USER}/test-model-flax" )
a__ = flatten_dict(unfreeze(model.params ) )
a__ = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
a__ = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(__magic_name__ , 1e-3 , msg=F"{key} not identical" )
def _UpperCamelCase ( self :str ) -> Any:
'''simple docstring'''
a__ = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
a__ = FlaxBertModel(__magic_name__ )
model.push_to_hub('''valid_org/test-model-flax-org''' , use_auth_token=self._token )
a__ = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' )
a__ = flatten_dict(unfreeze(model.params ) )
a__ = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
a__ = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(__magic_name__ , 1e-3 , msg=F"{key} not identical" )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-model-flax-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(
__magic_name__ , repo_id='''valid_org/test-model-flax-org''' , push_to_hub=__magic_name__ , use_auth_token=self._token )
a__ = FlaxBertModel.from_pretrained('''valid_org/test-model-flax-org''' )
a__ = flatten_dict(unfreeze(model.params ) )
a__ = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
a__ = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(__magic_name__ , 1e-3 , msg=F"{key} not identical" )
def __snake_case ( UpperCamelCase , UpperCamelCase ) -> List[str]:
"""simple docstring"""
a__ = True
a__ = flatten_dict(modela.params )
a__ = flatten_dict(modela.params )
for key in flat_params_a.keys():
if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1E-4:
a__ = False
return models_are_equal
@require_flax
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def _UpperCamelCase ( self :Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
a__ = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
a__ = FlaxBertModel(__magic_name__ )
a__ = '''bert'''
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(__magic_name__ , __magic_name__ ) )
with self.assertRaises(__magic_name__ ):
a__ = FlaxBertModel.from_pretrained(__magic_name__ )
a__ = FlaxBertModel.from_pretrained(__magic_name__ , subfolder=__magic_name__ )
self.assertTrue(check_models_equal(__magic_name__ , __magic_name__ ) )
def _UpperCamelCase ( self :List[Any] ) -> Dict:
'''simple docstring'''
a__ = BertConfig.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
a__ = FlaxBertModel(__magic_name__ )
a__ = '''bert'''
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(__magic_name__ , __magic_name__ ) , max_shard_size='''10KB''' )
with self.assertRaises(__magic_name__ ):
a__ = FlaxBertModel.from_pretrained(__magic_name__ )
a__ = FlaxBertModel.from_pretrained(__magic_name__ , subfolder=__magic_name__ )
self.assertTrue(check_models_equal(__magic_name__ , __magic_name__ ) )
def _UpperCamelCase ( self :List[str] ) -> Union[str, Any]:
'''simple docstring'''
a__ = '''bert'''
a__ = '''hf-internal-testing/tiny-random-bert-subfolder'''
with self.assertRaises(__magic_name__ ):
a__ = FlaxBertModel.from_pretrained(__magic_name__ )
a__ = FlaxBertModel.from_pretrained(__magic_name__ , subfolder=__magic_name__ )
self.assertIsNotNone(__magic_name__ )
def _UpperCamelCase ( self :List[str] ) -> Optional[int]:
'''simple docstring'''
a__ = '''bert'''
a__ = '''hf-internal-testing/tiny-random-bert-sharded-subfolder'''
with self.assertRaises(__magic_name__ ):
a__ = FlaxBertModel.from_pretrained(__magic_name__ )
a__ = FlaxBertModel.from_pretrained(__magic_name__ , subfolder=__magic_name__ )
self.assertIsNotNone(__magic_name__ )
| 158
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__snake_case : List[str] = {
'configuration_efficientformer': [
'EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'EfficientFormerConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Tuple = ['EfficientFormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Optional[Any] = [
'EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'EfficientFormerForImageClassification',
'EfficientFormerForImageClassificationWithTeacher',
'EfficientFormerModel',
'EfficientFormerPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : List[str] = [
'TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFEfficientFormerForImageClassification',
'TFEfficientFormerForImageClassificationWithTeacher',
'TFEfficientFormerModel',
'TFEfficientFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientformer import EfficientFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientformer import (
EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientFormerForImageClassification,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerModel,
EfficientFormerPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
TFEfficientFormerPreTrainedModel,
)
else:
import sys
__snake_case : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 571
|
"""simple docstring"""
__snake_case : Optional[Any] = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
def a_ ( __a , __a , __a , __a ):
# Return True if there is node that has not iterated.
A__ = [False] * len(__a )
A__ = [s]
A__ = True
while queue:
A__ = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(__a )
A__ = True
A__ = u
return visited[t]
def a_ ( __a , __a , __a ):
A__ = [-1] * (len(__a ))
A__ = 0
A__ = []
A__ = [i[:] for i in graph] # Record original cut, copy.
while bfs(__a , __a , __a , __a ):
A__ = float('''Inf''' )
A__ = sink
while s != source:
# Find the minimum value in select path
A__ = min(__a , graph[parent[s]][s] )
A__ = parent[s]
max_flow += path_flow
A__ = sink
while v != source:
A__ = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
A__ = parent[v]
for i in range(len(__a ) ):
for j in range(len(graph[0] ) ):
if graph[i][j] == 0 and temp[i][j] > 0:
res.append((i, j) )
return res
if __name__ == "__main__":
print(mincut(test_graph, source=0, sink=5))
| 571
| 1
|
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int ):
if n == 1 or not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return 0
elif n == 2:
return 1
else:
__UpperCamelCase =[0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int ):
__UpperCamelCase =0
__UpperCamelCase =2
while digits < n:
index += 1
__UpperCamelCase =len(str(fibonacci(SCREAMING_SNAKE_CASE__ ) ) )
return index
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int = 10_00 ):
return fibonacci_digits_index(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 682
|
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
return 1 if input_a == input_a else 0
def _UpperCAmelCase ( ):
assert xnor_gate(0 , 0 ) == 1
assert xnor_gate(0 , 1 ) == 0
assert xnor_gate(1 , 0 ) == 0
assert xnor_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(xnor_gate(0, 0))
print(xnor_gate(0, 1))
print(xnor_gate(1, 0))
print(xnor_gate(1, 1))
| 682
| 1
|
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
SCREAMING_SNAKE_CASE__ : Optional[int] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Optional[Any] = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
SCREAMING_SNAKE_CASE__ : List[Any] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class UpperCAmelCase_ :
__lowerCamelCase = field(
default=__lowerCamelCase , metadata={'help': 'Model type selected in the list: ' + ', '.join(__lowerCamelCase )} )
__lowerCamelCase = field(
default=__lowerCamelCase , metadata={'help': 'The input data dir. Should contain the .json files for the SQuAD task.'} )
__lowerCamelCase = field(
default=128 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
__lowerCamelCase = field(
default=128 , metadata={'help': 'When splitting up a long document into chunks, how much stride to take between chunks.'} , )
__lowerCamelCase = field(
default=64 , metadata={
'help': (
'The maximum number of tokens for the question. Questions longer than this will '
'be truncated to this length.'
)
} , )
__lowerCamelCase = field(
default=30 , metadata={
'help': (
'The maximum length of an answer that can be generated. This is needed because the start '
'and end predictions are not conditioned on one another.'
)
} , )
__lowerCamelCase = field(
default=__lowerCamelCase , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
__lowerCamelCase = field(
default=__lowerCamelCase , metadata={'help': 'If true, the SQuAD examples contain some that do not have an answer.'} )
__lowerCamelCase = field(
default=0.0 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
__lowerCamelCase = field(
default=20 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
__lowerCamelCase = field(
default=0 , metadata={
'help': (
'language id of input for language-specific xlm models (see'
' tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)'
)
} , )
__lowerCamelCase = field(default=1 , metadata={'help': 'multiple threads for converting example to features'} )
class UpperCAmelCase_ ( __lowerCamelCase ):
__lowerCamelCase = 'train'
__lowerCamelCase = 'dev'
class UpperCAmelCase_ ( __lowerCamelCase ):
__lowerCamelCase = 42
__lowerCamelCase = 42
__lowerCamelCase = 42
__lowerCamelCase = 42
def __init__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = Split.train , _lowerCAmelCase = False , _lowerCAmelCase = None , _lowerCAmelCase = "pt" , ):
UpperCAmelCase__ : Optional[int] = args
UpperCAmelCase__ : List[str] = is_language_sensitive
UpperCAmelCase__ : Optional[Any] = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
try:
UpperCAmelCase__ : Union[str, Any] = Split[mode]
except KeyError:
raise KeyError("""mode is not a valid split name""" )
UpperCAmelCase__ : List[Any] = mode
# Load data features from cache or dataset file
UpperCAmelCase__ : Optional[Any] = """v2""" if args.version_2_with_negative else """v1"""
UpperCAmelCase__ : Union[str, Any] = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f"cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}" , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
UpperCAmelCase__ : List[str] = cached_features_file + """.lock"""
with FileLock(_lowerCAmelCase ):
if os.path.exists(_lowerCAmelCase ) and not args.overwrite_cache:
UpperCAmelCase__ : Dict = time.time()
UpperCAmelCase__ : List[Any] = torch.load(_lowerCAmelCase )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
UpperCAmelCase__ : str = self.old_features["""features"""]
UpperCAmelCase__ : List[str] = self.old_features.get("""dataset""" , _lowerCAmelCase )
UpperCAmelCase__ : Dict = self.old_features.get("""examples""" , _lowerCAmelCase )
logger.info(
f"Loading features from cached file {cached_features_file} [took %.3f s]" , time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
f"Deleting cached file {cached_features_file} will allow dataset and examples to be cached in"
""" future run""" )
else:
if mode == Split.dev:
UpperCAmelCase__ : List[str] = self.processor.get_dev_examples(args.data_dir )
else:
UpperCAmelCase__ : Any = self.processor.get_train_examples(args.data_dir )
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = squad_convert_examples_to_features(
examples=self.examples , tokenizer=_lowerCAmelCase , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=_lowerCAmelCase , )
UpperCAmelCase__ : Dict = time.time()
torch.save(
{"""features""": self.features, """dataset""": self.dataset, """examples""": self.examples} , _lowerCAmelCase , )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f"Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]" )
def __len__( self ):
return len(self.features )
def __getitem__( self , _lowerCAmelCase ):
# Convert to Tensors and build dataset
UpperCAmelCase__ : Tuple = self.features[i]
UpperCAmelCase__ : Union[str, Any] = torch.tensor(feature.input_ids , dtype=torch.long )
UpperCAmelCase__ : Tuple = torch.tensor(feature.attention_mask , dtype=torch.long )
UpperCAmelCase__ : str = torch.tensor(feature.token_type_ids , dtype=torch.long )
UpperCAmelCase__ : Tuple = torch.tensor(feature.cls_index , dtype=torch.long )
UpperCAmelCase__ : List[Any] = torch.tensor(feature.p_mask , dtype=torch.float )
UpperCAmelCase__ : Dict = torch.tensor(feature.is_impossible , dtype=torch.float )
UpperCAmelCase__ : int = {
"""input_ids""": input_ids,
"""attention_mask""": attention_mask,
"""token_type_ids""": token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({"""cls_index""": cls_index, """p_mask""": p_mask} )
if self.args.version_2_with_negative:
inputs.update({"""is_impossible""": is_impossible} )
if self.is_language_sensitive:
inputs.update({"""langs""": (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
UpperCAmelCase__ : Any = torch.tensor(feature.start_position , dtype=torch.long )
UpperCAmelCase__ : Tuple = torch.tensor(feature.end_position , dtype=torch.long )
inputs.update({"""start_positions""": start_positions, """end_positions""": end_positions} )
return inputs
| 79
|
"""simple docstring"""
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class a ( __snake_case , unittest.TestCase ):
SCREAMING_SNAKE_CASE : Optional[int] = DebertaTokenizer
SCREAMING_SNAKE_CASE : Optional[int] = True
SCREAMING_SNAKE_CASE : Any = DebertaTokenizerFast
def UpperCamelCase ( self : Optional[Any] ) -> Dict:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCamelCase_ = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'[UNK]',
]
lowerCamelCase_ = dict(zip(__SCREAMING_SNAKE_CASE , range(len(__SCREAMING_SNAKE_CASE ) ) ) )
lowerCamelCase_ = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
lowerCamelCase_ = {'unk_token': '[UNK]'}
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(__SCREAMING_SNAKE_CASE ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(__SCREAMING_SNAKE_CASE ) )
def UpperCamelCase ( self : Tuple , **__SCREAMING_SNAKE_CASE : Dict ) -> List[Any]:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE )
def UpperCamelCase ( self : str , __SCREAMING_SNAKE_CASE : Any ) -> Dict:
lowerCamelCase_ = 'lower newer'
lowerCamelCase_ = 'lower newer'
return input_text, output_text
def UpperCamelCase ( self : Optional[Any] ) -> Optional[Any]:
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = 'lower newer'
lowerCamelCase_ = ['l', 'o', 'w', 'er', '\u0120', 'n', 'e', 'w', 'er']
lowerCamelCase_ = tokenizer.tokenize(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowerCamelCase_ = tokens + [tokenizer.unk_token]
lowerCamelCase_ = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def UpperCamelCase ( self : Union[str, Any] ) -> Tuple:
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = tokenizer('Hello' , 'World' )
lowerCamelCase_ = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd['token_type_ids'] , __SCREAMING_SNAKE_CASE )
@slow
def UpperCamelCase ( self : Union[str, Any] ) -> Tuple:
lowerCamelCase_ = self.tokenizer_class.from_pretrained('microsoft/deberta-base' )
lowerCamelCase_ = tokenizer.encode('sequence builders' , add_special_tokens=__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = tokenizer.encode('multi-sequence build' , add_special_tokens=__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = tokenizer.encode(
'sequence builders' , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = tokenizer.encode(
'sequence builders' , 'multi-sequence build' , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = tokenizer.build_inputs_with_special_tokens(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = tokenizer.build_inputs_with_special_tokens(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def UpperCamelCase ( self : Optional[int] ) -> Optional[Any]:
lowerCamelCase_ = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class )
for tokenizer_class in tokenizer_classes:
lowerCamelCase_ = tokenizer_class.from_pretrained('microsoft/deberta-base' )
lowerCamelCase_ = [
'ALBERT: A Lite BERT for Self-supervised Learning of Language Representations',
'ALBERT incorporates two parameter reduction techniques',
'The first one is a factorized embedding parameterization. By decomposing the large vocabulary'
' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'
' vocabulary embedding.',
]
lowerCamelCase_ = tokenizer(__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = [tokenizer.decode(__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE ) for seq in encoding['input_ids']]
# fmt: off
lowerCamelCase_ = {
'input_ids': [
[1, 2118, 11126, 565, 35, 83, 25191, 163, 18854, 13, 12156, 12, 16101, 25376, 13807, 9, 22205, 27893, 1635, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 2118, 11126, 565, 24536, 80, 43797, 4878, 7373, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 133, 78, 65, 16, 10, 3724, 1538, 33183, 11303, 43797, 1938, 4, 870, 24165, 29105, 5, 739, 32644, 33183, 11303, 36173, 88, 80, 650, 7821, 45940, 6, 52, 2559, 5, 1836, 9, 5, 7397, 13171, 31, 5, 1836, 9, 32644, 33183, 11303, 4, 2]
],
'token_type_ids': [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
'attention_mask': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
lowerCamelCase_ = [
'ALBERT: A Lite BERT for Self-supervised Learning of Language Representations',
'ALBERT incorporates two parameter reduction techniques',
'The first one is a factorized embedding parameterization. By decomposing the large vocabulary'
' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'
' vocabulary embedding.',
]
self.assertDictEqual(encoding.data , __SCREAMING_SNAKE_CASE )
for expected, decoded in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
| 549
| 0
|
from typing import Any
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self , _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
snake_case_ : Dict = data
snake_case_ : Tuple = None
def __repr__( self ) -> str:
return f'''Node({self.data})'''
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self ) -> Optional[Any]:
snake_case_ : str = None
def __iter__( self ) -> Any:
snake_case_ : Optional[int] = self.head
while node:
yield node.data
snake_case_ : Optional[Any] = node.next
def __len__( self ) -> int:
return sum(1 for _ in self )
def __repr__( self ) -> str:
return "->".join([str(_SCREAMING_SNAKE_CASE ) for item in self] )
def __getitem__( self , _SCREAMING_SNAKE_CASE ) -> Any:
if not 0 <= index < len(self ):
raise ValueError("list index out of range." )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> None:
if not 0 <= index < len(self ):
raise ValueError("list index out of range." )
snake_case_ : Any = self.head
for _ in range(_SCREAMING_SNAKE_CASE ):
snake_case_ : List[Any] = current.next
snake_case_ : Union[str, Any] = data
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE ) -> None:
self.insert_nth(len(self ) , _SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE ) -> None:
self.insert_nth(0 , _SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> None:
if not 0 <= index <= len(self ):
raise IndexError("list index out of range" )
snake_case_ : Dict = Node(_SCREAMING_SNAKE_CASE )
if self.head is None:
snake_case_ : Tuple = new_node
elif index == 0:
snake_case_ : List[Any] = self.head # link new_node to head
snake_case_ : List[str] = new_node
else:
snake_case_ : str = self.head
for _ in range(index - 1 ):
snake_case_ : Tuple = temp.next
snake_case_ : Optional[int] = temp.next
snake_case_ : Optional[Any] = new_node
def _lowerCAmelCase ( self ) -> None: # print every node data
print(self )
def _lowerCAmelCase ( self ) -> Any:
return self.delete_nth(0 )
def _lowerCAmelCase ( self ) -> Any: # delete from tail
return self.delete_nth(len(self ) - 1 )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE = 0 ) -> Any:
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError("List index out of range." )
snake_case_ : Any = self.head # default first node
if index == 0:
snake_case_ : Optional[Any] = self.head.next
else:
snake_case_ : Optional[Any] = self.head
for _ in range(index - 1 ):
snake_case_ : Union[str, Any] = temp.next
snake_case_ : Optional[int] = temp.next
snake_case_ : Union[str, Any] = temp.next.next
return delete_node.data
def _lowerCAmelCase ( self ) -> bool:
return self.head is None
def _lowerCAmelCase ( self ) -> None:
snake_case_ : Dict = None
snake_case_ : List[Any] = self.head
while current:
# Store the current node's next node.
snake_case_ : str = current.next
# Make the current node's next point backwards
snake_case_ : str = prev
# Make the previous node be the current node
snake_case_ : str = current
# Make the current node the next node (to progress iteration)
snake_case_ : Union[str, Any] = next_node
# Return prev in order to put the head at the end
snake_case_ : int = prev
def lowerCAmelCase__ ( ):
snake_case_ : List[str] = LinkedList()
assert linked_list.is_empty() is True
assert str(_a ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(10 ):
assert len(_a ) == i
linked_list.insert_nth(_a , i + 1 )
assert str(_a ) == "->".join(str(_a ) for i in range(1 , 11 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(11 )
assert str(_a ) == "->".join(str(_a ) for i in range(0 , 12 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 10
assert linked_list.delete_tail() == 11
assert len(_a ) == 9
assert str(_a ) == "->".join(str(_a ) for i in range(1 , 10 ) )
assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True
for i in range(0 , 9 ):
snake_case_ : Union[str, Any] = -i
assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True
linked_list.reverse()
assert str(_a ) == "->".join(str(_a ) for i in range(-8 , 1 ) )
def lowerCAmelCase__ ( ):
snake_case_ : Union[str, Any] = [
-9,
1_00,
Node(77_34_51_12 ),
"dlrow olleH",
7,
55_55,
0,
-192.55_555,
"Hello, world!",
77.9,
Node(10 ),
None,
None,
12.20,
]
snake_case_ : Tuple = LinkedList()
for i in test_input:
linked_list.insert_tail(_a )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(_a ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
snake_case_ : Any = linked_list.delete_head()
assert result == -9
assert (
str(_a ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
snake_case_ : int = linked_list.delete_tail()
assert result == 12.2
assert (
str(_a ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
snake_case_ : Any = linked_list.delete_nth(10 )
assert result is None
assert (
str(_a ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node("Hello again, world!" ) )
assert (
str(_a )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(_a )
assert (
str(_a )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(_a )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def lowerCAmelCase__ ( ):
from doctest import testmod
testmod()
snake_case_ : int = LinkedList()
linked_list.insert_head(input("Inserting 1st at head " ).strip() )
linked_list.insert_head(input("Inserting 2nd at head " ).strip() )
print("\nPrint list:" )
linked_list.print_list()
linked_list.insert_tail(input("\nInserting 1st at tail " ).strip() )
linked_list.insert_tail(input("Inserting 2nd at tail " ).strip() )
print("\nPrint list:" )
linked_list.print_list()
print("\nDelete head" )
linked_list.delete_head()
print("Delete tail" )
linked_list.delete_tail()
print("\nPrint list:" )
linked_list.print_list()
print("\nReverse linked list" )
linked_list.reverse()
print("\nPrint list:" )
linked_list.print_list()
print("\nString representation of linked list:" )
print(_a )
print("\nReading/changing Node data using indexing:" )
print(F'''Element at Position 1: {linked_list[1]}''' )
snake_case_ : List[str] = input("Enter New Value: " ).strip()
print("New list:" )
print(_a )
print(F'''length of linked_list is : {len(_a )}''' )
if __name__ == "__main__":
main()
| 114
|
def lowerCAmelCase__ ( _a : str , _a : int ):
# "extended trapezoidal rule"
# int(f) = dx/2 * (f1 + 2f2 + ... + fn)
snake_case_ : Optional[Any] = (boundary[1] - boundary[0]) / steps
snake_case_ : str = boundary[0]
snake_case_ : Dict = boundary[1]
snake_case_ : str = make_points(_a , _a , _a )
snake_case_ : str = 0.0
y += (h / 2.0) * f(_a )
for i in x_i:
# print(i)
y += h * f(_a )
y += (h / 2.0) * f(_a )
return y
def lowerCAmelCase__ ( _a : str , _a : Optional[Any] , _a : Tuple ):
snake_case_ : Tuple = a + h
while x < (b - h):
yield x
snake_case_ : List[str] = x + h
def lowerCAmelCase__ ( _a : Tuple ): # enter your function here
snake_case_ : Tuple = (x - 0) * (x - 0)
return y
def lowerCAmelCase__ ( ):
snake_case_ : Union[str, Any] = 0.0 # Lower bound of integration
snake_case_ : List[Any] = 1.0 # Upper bound of integration
snake_case_ : Tuple = 10.0 # define number of steps or resolution
snake_case_ : List[Any] = [a, b] # define boundary of integration
snake_case_ : Any = method_a(_a , _a )
print(F'''y = {y}''' )
if __name__ == "__main__":
main()
| 114
| 1
|
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __UpperCamelCase ( A__ ):
__A : UNetaDModel
__A : ScoreSdeVeScheduler
def __init__( self , _UpperCamelCase , _UpperCamelCase ):
super().__init__()
self.register_modules(unet=_UpperCamelCase , scheduler=_UpperCamelCase )
@torch.no_grad()
def __call__( self , _UpperCamelCase = 1 , _UpperCamelCase = 2000 , _UpperCamelCase = None , _UpperCamelCase = "pil" , _UpperCamelCase = True , **_UpperCamelCase , ):
_UpperCAmelCase = self.unet.config.sample_size
_UpperCAmelCase = (batch_size, 3, img_size, img_size)
_UpperCAmelCase = self.unet
_UpperCAmelCase = randn_tensor(_UpperCamelCase , generator=_UpperCamelCase ) * self.scheduler.init_noise_sigma
_UpperCAmelCase = sample.to(self.device )
self.scheduler.set_timesteps(_UpperCamelCase )
self.scheduler.set_sigmas(_UpperCamelCase )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
_UpperCAmelCase = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
_UpperCAmelCase = self.unet(_UpperCamelCase , _UpperCamelCase ).sample
_UpperCAmelCase = self.scheduler.step_correct(_UpperCamelCase , _UpperCamelCase , generator=_UpperCamelCase ).prev_sample
# prediction step
_UpperCAmelCase = model(_UpperCamelCase , _UpperCamelCase ).sample
_UpperCAmelCase = self.scheduler.step_pred(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , generator=_UpperCamelCase )
_UpperCAmelCase , _UpperCAmelCase = output.prev_sample, output.prev_sample_mean
_UpperCAmelCase = sample_mean.clamp(0 , 1 )
_UpperCAmelCase = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_UpperCAmelCase = self.numpy_to_pil(_UpperCamelCase )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=_UpperCamelCase )
| 32
|
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.local_sgd import LocalSGD
########################################################################
# This is a fully working simple example to use Accelerate
# with LocalSGD, which is a method to synchronize model
# parameters every K batches. It is different, but complementary
# to gradient accumulation.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__snake_case = 16
__snake_case = 32
def _lowercase ( SCREAMING_SNAKE_CASE_ : Accelerator , SCREAMING_SNAKE_CASE_ : int = 16 ):
"""simple docstring"""
UpperCamelCase = AutoTokenizer.from_pretrained("""bert-base-cased""" )
UpperCamelCase = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(SCREAMING_SNAKE_CASE_ : str ):
# max_length=None => use the model max length (it's actually the default)
UpperCamelCase = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
UpperCamelCase = datasets.map(
SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCamelCase = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(SCREAMING_SNAKE_CASE_ : List[Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
UpperCamelCase = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
UpperCamelCase = 16
elif accelerator.mixed_precision != "no":
UpperCamelCase = 8
else:
UpperCamelCase = None
return tokenizer.pad(
SCREAMING_SNAKE_CASE_ , padding="""longest""" , max_length=SCREAMING_SNAKE_CASE_ , pad_to_multiple_of=SCREAMING_SNAKE_CASE_ , return_tensors="""pt""" , )
# Instantiate dataloaders.
UpperCamelCase = DataLoader(
tokenized_datasets["""train"""] , shuffle=SCREAMING_SNAKE_CASE_ , collate_fn=SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ )
UpperCamelCase = DataLoader(
tokenized_datasets["""validation"""] , shuffle=SCREAMING_SNAKE_CASE_ , collate_fn=SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
__snake_case = mocked_dataloaders # noqa: F811
def _lowercase ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Tuple ):
"""simple docstring"""
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , SCREAMING_SNAKE_CASE_ ) == "1":
UpperCamelCase = 2
# New Code #
UpperCamelCase = int(args.gradient_accumulation_steps )
UpperCamelCase = int(args.local_sgd_steps )
# Initialize accelerator
UpperCamelCase = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=SCREAMING_SNAKE_CASE_ )
if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]:
raise NotImplementedError("""LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)""" )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCamelCase = config["""lr"""]
UpperCamelCase = int(config["""num_epochs"""] )
UpperCamelCase = int(config["""seed"""] )
UpperCamelCase = int(config["""batch_size"""] )
UpperCamelCase = evaluate.load("""glue""" , """mrpc""" )
set_seed(SCREAMING_SNAKE_CASE_ )
UpperCamelCase , UpperCamelCase = get_dataloaders(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCamelCase = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=SCREAMING_SNAKE_CASE_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
UpperCamelCase = model.to(accelerator.device )
# Instantiate optimizer
UpperCamelCase = AdamW(params=model.parameters() , lr=SCREAMING_SNAKE_CASE_ )
# Instantiate scheduler
UpperCamelCase = get_linear_schedule_with_warmup(
optimizer=SCREAMING_SNAKE_CASE_ , num_warmup_steps=100 , num_training_steps=(len(SCREAMING_SNAKE_CASE_ ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Now we train the model
for epoch in range(SCREAMING_SNAKE_CASE_ ):
model.train()
with LocalSGD(
accelerator=SCREAMING_SNAKE_CASE_ , model=SCREAMING_SNAKE_CASE_ , local_sgd_steps=SCREAMING_SNAKE_CASE_ , enabled=local_sgd_steps is not None ) as local_sgd:
for step, batch in enumerate(SCREAMING_SNAKE_CASE_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase = model(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase = output.loss
accelerator.backward(SCREAMING_SNAKE_CASE_ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# LocalSGD-specific line
local_sgd.step()
model.eval()
for step, batch in enumerate(SCREAMING_SNAKE_CASE_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCamelCase = model(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase = outputs.logits.argmax(dim=-1 )
UpperCamelCase , UpperCamelCase = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=SCREAMING_SNAKE_CASE_ , references=SCREAMING_SNAKE_CASE_ , )
UpperCamelCase = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'epoch {epoch}:' , SCREAMING_SNAKE_CASE_ )
def _lowercase ( ):
"""simple docstring"""
UpperCamelCase = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
# New Code #
parser.add_argument(
"""--gradient_accumulation_steps""" , type=SCREAMING_SNAKE_CASE_ , default=1 , help="""The number of minibatches to be ran before gradients are accumulated.""" , )
parser.add_argument(
"""--local_sgd_steps""" , type=SCREAMING_SNAKE_CASE_ , default=8 , help="""Number of local SGD steps or None to disable local SGD""" )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
UpperCamelCase = parser.parse_args()
UpperCamelCase = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
main()
| 386
| 0
|
"""simple docstring"""
class __UpperCamelCase :
def __init__( self , lowerCAmelCase__ ) -> Optional[Any]:
a : Optional[int] = arr.split("," )
def __a ( self ) -> Tuple:
a : Union[str, Any] = [int(self.array[0] )] * len(self.array )
a : str = [int(self.array[0] )] * len(self.array )
for i in range(1 , len(self.array ) ):
a : Optional[Any] = max(
int(self.array[i] ) + sum_value[i - 1] , int(self.array[i] ) )
a : Tuple = max(sum_value[i] , rear[i - 1] )
return rear[len(self.array ) - 1]
if __name__ == "__main__":
a : List[str] = input('''please input some numbers:''')
a : int = SubArray(whole_array)
a : str = array.solve_sub_array()
print(('''the results is:''', re))
| 712
|
"""simple docstring"""
import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers.generation import (
MaxLengthCriteria,
MaxNewTokensCriteria,
MaxTimeCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
@require_torch
class __UpperCamelCase ( unittest.TestCase ):
def __a ( self , lowerCAmelCase__ ) -> Optional[int]:
a : str = 3
a : str = 250
a : List[Any] = ids_tensor((batch_size, length) , lowerCAmelCase__ )
a : Optional[Any] = torch.ones((batch_size, length) , device=lowerCAmelCase__ , dtype=torch.float ) / length
return input_ids, scores
def __a ( self ) -> List[Any]:
a, a : str = self._get_tensors(5 )
a : Any = StoppingCriteriaList(
[
MaxLengthCriteria(max_length=10 ),
MaxTimeCriteria(max_time=0.1 ),
] )
self.assertFalse(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
a, a : str = self._get_tensors(9 )
self.assertFalse(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
a, a : Union[str, Any] = self._get_tensors(10 )
self.assertTrue(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
def __a ( self ) -> List[Any]:
a : Optional[Any] = MaxLengthCriteria(max_length=10 )
a, a : int = self._get_tensors(5 )
self.assertFalse(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
a, a : int = self._get_tensors(9 )
self.assertFalse(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
a, a : Union[str, Any] = self._get_tensors(10 )
self.assertTrue(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
def __a ( self ) -> List[str]:
a : Tuple = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 )
a, a : str = self._get_tensors(5 )
self.assertFalse(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
a, a : int = self._get_tensors(9 )
self.assertFalse(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
a, a : int = self._get_tensors(10 )
self.assertTrue(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
a : List[Any] = StoppingCriteriaList([criteria] )
self.assertEqual(criteria_list.max_length , 10 )
def __a ( self ) -> str:
a, a : Tuple = self._get_tensors(5 )
a : str = MaxTimeCriteria(max_time=0.1 )
self.assertFalse(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
a : Optional[int] = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 )
self.assertTrue(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
def __a ( self ) -> str:
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 10 )
with self.assertWarns(lowerCAmelCase__ ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 11 )
a : Optional[int] = validate_stopping_criteria(StoppingCriteriaList() , 11 )
self.assertEqual(len(lowerCAmelCase__ ) , 1 )
| 31
| 0
|
__lowerCamelCase : int = {
"""meter""": """m""",
"""kilometer""": """km""",
"""megametre""": """Mm""",
"""gigametre""": """Gm""",
"""terametre""": """Tm""",
"""petametre""": """Pm""",
"""exametre""": """Em""",
"""zettametre""": """Zm""",
"""yottametre""": """Ym""",
}
# Exponent of the factor(meter)
__lowerCamelCase : int = {
"""m""": 0,
"""km""": 3,
"""Mm""": 6,
"""Gm""": 9,
"""Tm""": 12,
"""Pm""": 15,
"""Em""": 18,
"""Zm""": 21,
"""Ym""": 24,
}
def A__ ( _a : float , _a : str , _a : str ):
'''simple docstring'''
snake_case__ : int =from_type.lower().strip("""s""" )
snake_case__ : Tuple =to_type.lower().strip("""s""" )
snake_case__ : str =UNIT_SYMBOL.get(_UpperCamelCase , _UpperCamelCase )
snake_case__ : List[Any] =UNIT_SYMBOL.get(_UpperCamelCase , _UpperCamelCase )
if from_sanitized not in METRIC_CONVERSION:
snake_case__ : str =(
f"Invalid 'from_type' value: {from_type!r}.\n"
f"Conversion abbreviations are: {', '.join(_UpperCamelCase )}"
)
raise ValueError(_UpperCamelCase )
if to_sanitized not in METRIC_CONVERSION:
snake_case__ : str =(
f"Invalid 'to_type' value: {to_type!r}.\n"
f"Conversion abbreviations are: {', '.join(_UpperCamelCase )}"
)
raise ValueError(_UpperCamelCase )
snake_case__ : Optional[int] =METRIC_CONVERSION[from_sanitized]
snake_case__ : Dict =METRIC_CONVERSION[to_sanitized]
snake_case__ : Union[str, Any] =1
if from_exponent > to_exponent:
snake_case__ : List[Any] =from_exponent - to_exponent
else:
snake_case__ : Tuple =-(to_exponent - from_exponent)
return value * pow(10 , _UpperCamelCase )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 385
|
def a__ ( _UpperCamelCase : str ):
if not all(x.isalpha() for x in string ):
raise ValueError('''String must only contain alphabetic characters.''' )
__lowerCamelCase = sorted(string.lower() )
return len(_UpperCamelCase ) == len(set(_UpperCamelCase ) )
if __name__ == "__main__":
a_ = input("""Enter a string """).strip()
a_ = is_isogram(input_str)
print(f"{input_str} is {'an' if isogram else 'not an'} isogram.")
| 175
| 0
|
"""simple docstring"""
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class snake_case :
a_ : int
a_ : TreeNode | None = None
a_ : TreeNode | None = None
UpperCamelCase_ = namedtuple('CoinsDistribResult', 'moves excess')
def UpperCamelCase ( UpperCAmelCase ) ->int:
"""simple docstring"""
if root is None:
return 0
# Validation
def count_nodes(UpperCAmelCase ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(UpperCAmelCase ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(UpperCAmelCase ) != count_coins(UpperCAmelCase ):
raise ValueError("The nodes number should be same as the number of coins" )
# Main calculation
def get_distrib(UpperCAmelCase ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
a_ , a_ = get_distrib(node.left )
a_ , a_ = get_distrib(node.right )
a_ = 1 - left_distrib_excess
a_ = 1 - right_distrib_excess
a_ = (
left_distrib_moves
+ right_distrib_moves
+ abs(UpperCAmelCase )
+ abs(UpperCAmelCase )
)
a_ = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(UpperCAmelCase , UpperCAmelCase )
return get_distrib(UpperCAmelCase )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 721
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
'google/realm-cc-news-pretrained-embedder': (
'https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-encoder': (
'https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-scorer': (
'https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json'
),
'google/realm-cc-news-pretrained-openqa': (
'https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json'
),
'google/realm-orqa-nq-openqa': 'https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json',
'google/realm-orqa-nq-reader': 'https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json',
'google/realm-orqa-wq-openqa': 'https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json',
'google/realm-orqa-wq-reader': 'https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json',
# See all REALM models at https://huggingface.co/models?filter=realm
}
class snake_case ( SCREAMING_SNAKE_CASE_ ):
a_ : Tuple = """realm"""
def __init__( self , __UpperCAmelCase=3_05_22 , __UpperCAmelCase=7_68 , __UpperCAmelCase=1_28 , __UpperCAmelCase=12 , __UpperCAmelCase=12 , __UpperCAmelCase=8 , __UpperCAmelCase=30_72 , __UpperCAmelCase="gelu_new" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=5_12 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-12 , __UpperCAmelCase=2_56 , __UpperCAmelCase=10 , __UpperCAmelCase=1E-3 , __UpperCAmelCase=5 , __UpperCAmelCase=3_20 , __UpperCAmelCase=13_35_37_18 , __UpperCAmelCase=50_00 , __UpperCAmelCase=1 , __UpperCAmelCase=0 , __UpperCAmelCase=2 , **__UpperCAmelCase , ) ->Tuple:
super().__init__(pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase)
# Common config
a_ = vocab_size
a_ = max_position_embeddings
a_ = hidden_size
a_ = retriever_proj_size
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = num_candidates
a_ = intermediate_size
a_ = hidden_act
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = initializer_range
a_ = type_vocab_size
a_ = layer_norm_eps
# Reader config
a_ = span_hidden_size
a_ = max_span_width
a_ = reader_layer_norm_eps
a_ = reader_beam_size
a_ = reader_seq_len
# Retrieval config
a_ = num_block_records
a_ = searcher_beam_size
| 210
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
"abeja/gpt-neox-japanese-2.7b": "https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json",
}
class __SCREAMING_SNAKE_CASE (__A ):
"""simple docstring"""
_a : List[Any] = '''gpt_neox_japanese'''
def __init__( self , UpperCamelCase__=32_000 , UpperCamelCase__=2_560 , UpperCamelCase__=32 , UpperCamelCase__=32 , UpperCamelCase__=4 , UpperCamelCase__="gelu" , UpperCamelCase__=1.00 , UpperCamelCase__=10_000 , UpperCamelCase__=2_048 , UpperCamelCase__=0.02 , UpperCamelCase__=1e-5 , UpperCamelCase__=True , UpperCamelCase__=31_996 , UpperCamelCase__=31_999 , UpperCamelCase__=0.1 , UpperCamelCase__=0.0 , **UpperCamelCase__ , ):
"""simple docstring"""
super().__init__(bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ )
a_ = vocab_size
a_ = max_position_embeddings
a_ = hidden_size
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = intermediate_multiple_size
a_ = hidden_act
a_ = rotary_pct
a_ = rotary_emb_base
a_ = initializer_range
a_ = layer_norm_eps
a_ = use_cache
a_ = attention_dropout
a_ = hidden_dropout
| 536
|
'''simple docstring'''
import json
import logging
import os
import socket
import git
import numpy as np
import torch
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
__lowerCAmelCase = logging.getLogger(__name__)
def __UpperCamelCase ( lowercase_ : str ):
"""simple docstring"""
a_ = git.Repo(search_parent_directories=lowercase_ )
a_ = {
'repo_id': str(lowercase_ ),
'repo_sha': str(repo.head.object.hexsha ),
'repo_branch': str(repo.active_branch ),
}
with open(os.path.join(lowercase_ , 'git_log.json' ) , 'w' ) as f:
json.dump(lowercase_ , lowercase_ , indent=4 )
def __UpperCamelCase ( lowercase_ : List[Any] ):
"""simple docstring"""
if params.n_gpu <= 0:
a_ = 0
a_ = -1
a_ = True
a_ = False
return
assert torch.cuda.is_available()
logger.info('Initializing GPUs' )
if params.n_gpu > 1:
assert params.local_rank != -1
a_ = int(os.environ['WORLD_SIZE'] )
a_ = int(os.environ['N_GPU_NODE'] )
a_ = int(os.environ['RANK'] )
# number of nodes / node ID
a_ = params.world_size // params.n_gpu_per_node
a_ = params.global_rank // params.n_gpu_per_node
a_ = True
assert params.n_nodes == int(os.environ['N_NODES'] )
assert params.node_id == int(os.environ['NODE_RANK'] )
# local job (single GPU)
else:
assert params.local_rank == -1
a_ = 1
a_ = 0
a_ = 0
a_ = 0
a_ = 1
a_ = 1
a_ = False
# sanity checks
assert params.n_nodes >= 1
assert 0 <= params.node_id < params.n_nodes
assert 0 <= params.local_rank <= params.global_rank < params.world_size
assert params.world_size == params.n_nodes * params.n_gpu_per_node
# define whether this is the master process / if we are in multi-node distributed mode
a_ = params.node_id == 0 and params.local_rank == 0
a_ = params.n_nodes > 1
# summary
a_ = F'--- Global rank: {params.global_rank} - '
logger.info(PREFIX + 'Number of nodes: %i' % params.n_nodes )
logger.info(PREFIX + 'Node ID : %i' % params.node_id )
logger.info(PREFIX + 'Local rank : %i' % params.local_rank )
logger.info(PREFIX + 'World size : %i' % params.world_size )
logger.info(PREFIX + 'GPUs per node : %i' % params.n_gpu_per_node )
logger.info(PREFIX + 'Master : %s' % str(params.is_master ) )
logger.info(PREFIX + 'Multi-node : %s' % str(params.multi_node ) )
logger.info(PREFIX + 'Multi-GPU : %s' % str(params.multi_gpu ) )
logger.info(PREFIX + 'Hostname : %s' % socket.gethostname() )
# set GPU device
torch.cuda.set_device(params.local_rank )
# initialize multi-GPU
if params.multi_gpu:
logger.info('Initializing PyTorch distributed' )
torch.distributed.init_process_group(
init_method='env://' , backend='nccl' , )
def __UpperCamelCase ( lowercase_ : Union[str, Any] ):
"""simple docstring"""
np.random.seed(args.seed )
torch.manual_seed(args.seed )
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed )
| 536
| 1
|
from __future__ import annotations
def lowerCamelCase_ ( lowerCAmelCase: List[str] , lowerCAmelCase: Optional[int] , lowerCAmelCase: Dict , lowerCAmelCase: Tuple )-> List[Any]: # noqa: E741
while r - l > 1:
_snake_case : Dict = (l + r) // 2
if v[m] >= key:
_snake_case : Dict = m
else:
_snake_case : Union[str, Any] = m # noqa: E741
return r
def lowerCamelCase_ ( lowerCAmelCase: list[int] )-> Optional[int]:
if len(lowerCAmelCase__ ) == 0:
return 0
_snake_case : int = [0] * len(lowerCAmelCase__ )
_snake_case : List[Any] = 1
_snake_case : Union[str, Any] = v[0]
for i in range(1 , len(lowerCAmelCase__ ) ):
if v[i] < tail[0]:
_snake_case : int = v[i]
elif v[i] > tail[length - 1]:
_snake_case : Optional[int] = v[i]
length += 1
else:
_snake_case : Union[str, Any] = v[i]
return length
if __name__ == "__main__":
import doctest
doctest.testmod()
| 709
|
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
lowerCAmelCase_ = ["""gpt2"""]
lowerCAmelCase_ = """gpt2"""
if is_tf_available():
class _lowerCAmelCase ( tf.Module ):
'''simple docstring'''
def __init__( self : List[str] , UpperCamelCase : Dict ):
'''simple docstring'''
super().__init__()
_snake_case : Optional[int] = tokenizer
_snake_case : Union[str, Any] = AutoConfig.from_pretrained(UpperCamelCase )
_snake_case : int = TFGPTaLMHeadModel.from_config(UpperCamelCase )
@tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name='text' ),) )
def UpperCamelCase_ ( self : Optional[int] , UpperCamelCase : Optional[int] ):
'''simple docstring'''
_snake_case : Dict = self.tokenizer(UpperCamelCase )
_snake_case : Union[str, Any] = tokenized['input_ids'].to_tensor()
_snake_case : Any = tf.cast(input_ids_dense > 0 , tf.intaa )
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
_snake_case : Tuple = self.model(input_ids=UpperCamelCase , attention_mask=UpperCamelCase )['logits']
return outputs
@require_tf
@require_keras_nlp
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
super().setUp()
_snake_case : Optional[int] = [GPTaTokenizer.from_pretrained(UpperCamelCase ) for checkpoint in (TOKENIZER_CHECKPOINTS)]
_snake_case : Tuple = [TFGPTaTokenizer.from_pretrained(UpperCamelCase ) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
_snake_case : Any = [
'This is a straightforward English test sentence.',
'This one has some weird characters\rto\nsee\r\nif those\u00E9break things.',
'Now we\'re going to add some Chinese: 一 二 三 一二三',
'And some much more rare Chinese: 齉 堃 齉堃',
'Je vais aussi écrire en français pour tester les accents',
'Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ',
]
_snake_case : Tuple = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in self.test_sentences:
_snake_case : Optional[int] = tokenizer([test_inputs] , return_tensors='tf' )
_snake_case : Tuple = tf_tokenizer([test_inputs] )
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
_snake_case : Dict = python_outputs[key].numpy()
_snake_case : Optional[Any] = tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) )
self.assertTrue(tf.reduce_all(tf.cast(UpperCamelCase , tf.intaa ) == tf_outputs_values ) )
@slow
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
_snake_case : str = tf.function(UpperCamelCase )
for test_inputs in self.test_sentences:
_snake_case : int = tf.constant(UpperCamelCase )
_snake_case : Tuple = compiled_tokenizer(UpperCamelCase )
_snake_case : int = tf_tokenizer(UpperCamelCase )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
_snake_case : Union[str, Any] = ModelToSave(tokenizer=UpperCamelCase )
_snake_case : Optional[Any] = tf.convert_to_tensor([self.test_sentences[0]] )
_snake_case : Tuple = model.serving(UpperCamelCase ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
_snake_case : str = Path(UpperCamelCase ) / 'saved.model'
tf.saved_model.save(UpperCamelCase , UpperCamelCase , signatures={'serving_default': model.serving} )
_snake_case : Optional[int] = tf.saved_model.load(UpperCamelCase )
_snake_case : List[str] = loaded_model.signatures['serving_default'](UpperCamelCase )['output_0']
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output ) )
@slow
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
_snake_case : Optional[Any] = tf.convert_to_tensor([self.test_sentences[0]] )
_snake_case : Any = tf_tokenizer(UpperCamelCase ) # Build model with some sample inputs
_snake_case : Optional[Any] = tf_tokenizer.get_config()
_snake_case : Tuple = TFGPTaTokenizer.from_config(UpperCamelCase )
_snake_case : Optional[Any] = model_from_config(UpperCamelCase )
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) )
@slow
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
_snake_case : Union[str, Any] = 12_31_23
for max_length in [3, 5, 10_24]:
_snake_case : Union[str, Any] = tf.convert_to_tensor([self.test_sentences[0]] )
_snake_case : List[str] = tf_tokenizer(UpperCamelCase , max_length=UpperCamelCase )
_snake_case : int = out['input_ids'].numpy().shape[1]
assert out_length == max_length
| 669
| 0
|
def a_ ( SCREAMING_SNAKE_CASE__ : Tuple ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] =[]
_lowerCamelCase : Any =[]
_lowerCamelCase : Union[str, Any] ={
'^': 3,
'*': 2,
'/': 2,
'%': 2,
'+': 1,
'-': 1,
} # Priority of each operator
_lowerCamelCase : Optional[int] =len(SCREAMING_SNAKE_CASE__ ) if (len(SCREAMING_SNAKE_CASE__ ) > 7) else 7
# Print table header for output
print(
'Symbol'.center(8 ) , 'Stack'.center(SCREAMING_SNAKE_CASE__ ) , 'Postfix'.center(SCREAMING_SNAKE_CASE__ ) , sep=' | ' , )
print('-' * (print_width * 3 + 7) )
for x in infix:
if x.isalpha() or x.isdigit():
post_fix.append(SCREAMING_SNAKE_CASE__ ) # if x is Alphabet / Digit, add it to Postfix
elif x == "(":
stack.append(SCREAMING_SNAKE_CASE__ ) # if x is "(" push to Stack
elif x == ")": # if x is ")" pop stack until "(" is encountered
while stack[-1] != "(":
post_fix.append(stack.pop() ) # Pop stack & add the content to Postfix
stack.pop()
else:
if len(SCREAMING_SNAKE_CASE__ ) == 0:
stack.append(SCREAMING_SNAKE_CASE__ ) # If stack is empty, push x to stack
else: # while priority of x is not > priority of element in the stack
while len(SCREAMING_SNAKE_CASE__ ) > 0 and priority[x] <= priority[stack[-1]]:
post_fix.append(stack.pop() ) # pop stack & add to Postfix
stack.append(SCREAMING_SNAKE_CASE__ ) # push x to stack
print(
x.center(8 ) , (''.join(SCREAMING_SNAKE_CASE__ )).ljust(SCREAMING_SNAKE_CASE__ ) , (''.join(SCREAMING_SNAKE_CASE__ )).ljust(SCREAMING_SNAKE_CASE__ ) , sep=' | ' , ) # Output in tabular format
while len(SCREAMING_SNAKE_CASE__ ) > 0: # while stack is not empty
post_fix.append(stack.pop() ) # pop stack & add to Postfix
print(
' '.center(8 ) , (''.join(SCREAMING_SNAKE_CASE__ )).ljust(SCREAMING_SNAKE_CASE__ ) , (''.join(SCREAMING_SNAKE_CASE__ )).ljust(SCREAMING_SNAKE_CASE__ ) , sep=' | ' , ) # Output in tabular format
return "".join(SCREAMING_SNAKE_CASE__ ) # return Postfix as str
def a_ ( SCREAMING_SNAKE_CASE__ : Tuple ):
'''simple docstring'''
_lowerCamelCase : int =list(infix[::-1] ) # reverse the infix equation
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
if infix[i] == "(":
_lowerCamelCase : Any =')' # change "(" to ")"
elif infix[i] == ")":
_lowerCamelCase : Any ='(' # change ")" to "("
return (infix_2_postfix(''.join(SCREAMING_SNAKE_CASE__ ) ))[
::-1
] # call infix_2_postfix on Infix, return reverse of Postfix
if __name__ == "__main__":
lowerCamelCase = input('\nEnter an Infix Equation = ') # Input an Infix equation
lowerCamelCase = ''.join(Infix.split()) # Remove spaces from the input
print('\n\t', Infix, '(Infix) -> ', infix_2_prefix(Infix), '(Prefix)')
| 464
|
from timeit import timeit
lowerCamelCase = {
'MALAYALAM': True,
'String': False,
'rotor': True,
'level': True,
'A': True,
'BB': True,
'ABC': False,
'amanaplanacanalpanama': True, # "a man a plan a canal panama"
}
# Ensure our test data is valid
assert all((key == key[::-1]) is value for key, value in test_data.items())
def a_ ( SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
_lowerCamelCase : Optional[int] =0
_lowerCamelCase : Union[str, Any] =len(SCREAMING_SNAKE_CASE__ ) - 1
while start_i < end_i:
if s[start_i] == s[end_i]:
start_i += 1
end_i -= 1
else:
return False
return True
def a_ ( SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
_lowerCamelCase : List[str] =len(SCREAMING_SNAKE_CASE__ ) // 2
_lowerCamelCase : Optional[Any] =len(SCREAMING_SNAKE_CASE__ )
# We need to traverse till half of the length of string
# as we can get access of the i'th last element from
# i'th index.
# eg: [0,1,2,3,4,5] => 4th index can be accessed
# with the help of 1st index (i==n-i-1)
# where n is length of string
return all(s[i] == s[n - i - 1] for i in range(SCREAMING_SNAKE_CASE__ ) )
def a_ ( SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
if len(SCREAMING_SNAKE_CASE__ ) <= 2:
return True
if s[0] == s[len(SCREAMING_SNAKE_CASE__ ) - 1]:
return is_palindrome_recursive(s[1:-1] )
else:
return False
def a_ ( SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
return s == s[::-1]
def a_ ( SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
_lowerCamelCase : Optional[int] =F'''all({name}(key) is value for key, value in test_data.items())'''
_lowerCamelCase : List[Any] =F'''from __main__ import test_data, {name}'''
_lowerCamelCase : Any =500_000
_lowerCamelCase : Dict =timeit(stmt=SCREAMING_SNAKE_CASE__ , setup=SCREAMING_SNAKE_CASE__ , number=SCREAMING_SNAKE_CASE__ )
print(F'''{name:<35} finished {number:,} runs in {result:.5f} seconds''' )
if __name__ == "__main__":
for key, value in test_data.items():
assert is_palindrome(key) is is_palindrome_recursive(key)
assert is_palindrome(key) is is_palindrome_slice(key)
print(F"""{key:21} {value}""")
print('a man a plan a canal panama')
# finished 500,000 runs in 0.46793 seconds
benchmark_function('is_palindrome_slice')
# finished 500,000 runs in 0.85234 seconds
benchmark_function('is_palindrome')
# finished 500,000 runs in 1.32028 seconds
benchmark_function('is_palindrome_recursive')
# finished 500,000 runs in 2.08679 seconds
benchmark_function('is_palindrome_traversal')
| 464
| 1
|
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision.transforms import functional as F
from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
_lowerCamelCase = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', F'''encoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', F'''encoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.weight''', F'''encoder.layers.{i}.fc1.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.bias''', F'''encoder.layers.{i}.fc1.bias'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.weight''', F'''encoder.layers.{i}.fc2.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.bias''', F'''encoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm1.weight''', F'''encoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((F'''transformer.encoder.layers.{i}.norm1.bias''', F'''encoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.weight''', F'''encoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.bias''', F'''encoder.layers.{i}.final_layer_norm.bias'''))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', F'''decoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', F'''decoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.multihead_attn.out_proj.weight''',
F'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
)
)
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.multihead_attn.out_proj.bias''',
F'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
)
)
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.weight''', F'''decoder.layers.{i}.fc1.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.bias''', F'''decoder.layers.{i}.fc1.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.weight''', F'''decoder.layers.{i}.fc2.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.bias''', F'''decoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm1.weight''', F'''decoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.norm1.bias''', F'''decoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.weight''', F'''decoder.layers.{i}.encoder_attn_layer_norm.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.bias''', F'''decoder.layers.{i}.encoder_attn_layer_norm.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.weight''', F'''decoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.bias''', F'''decoder.layers.{i}.final_layer_norm.bias'''))
# convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
('input_proj.weight', 'input_projection.weight'),
('input_proj.bias', 'input_projection.bias'),
('query_embed.weight', 'query_position_embeddings.weight'),
('transformer.encoder.norm.weight', 'encoder.layernorm.weight'),
('transformer.encoder.norm.bias', 'encoder.layernorm.bias'),
('transformer.decoder.norm.weight', 'decoder.layernorm.weight'),
('transformer.decoder.norm.bias', 'decoder.layernorm.bias'),
('class_embed.weight', 'class_labels_classifier.weight'),
('class_embed.bias', 'class_labels_classifier.bias'),
('bbox_embed.layers.0.weight', 'bbox_predictor.layers.0.weight'),
('bbox_embed.layers.0.bias', 'bbox_predictor.layers.0.bias'),
('bbox_embed.layers.1.weight', 'bbox_predictor.layers.1.weight'),
('bbox_embed.layers.1.bias', 'bbox_predictor.layers.1.bias'),
('bbox_embed.layers.2.weight', 'bbox_predictor.layers.2.weight'),
('bbox_embed.layers.2.bias', 'bbox_predictor.layers.2.bias'),
]
)
def __UpperCAmelCase( lowercase_ , lowercase_ , lowercase_ ):
_lowerCamelCase : str = state_dict.pop(lowercase_ )
_lowerCamelCase : Any = val
def __UpperCAmelCase( lowercase_ ):
_lowerCamelCase : int = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
_lowerCamelCase : Any = key.replace('''backbone.0.body''' , '''backbone.conv_encoder.model''' )
_lowerCamelCase : Union[str, Any] = value
else:
_lowerCamelCase : Optional[int] = value
return new_state_dict
def __UpperCAmelCase( lowercase_ ):
_lowerCamelCase : Any = ''''''
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
_lowerCamelCase : Union[str, Any] = state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" )
_lowerCamelCase : List[str] = state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
_lowerCamelCase : Union[str, Any] = in_proj_weight[:2_56, :]
_lowerCamelCase : str = in_proj_bias[:2_56]
_lowerCamelCase : Any = in_proj_weight[2_56:5_12, :]
_lowerCamelCase : Optional[Any] = in_proj_bias[2_56:5_12]
_lowerCamelCase : Dict = in_proj_weight[-2_56:, :]
_lowerCamelCase : str = in_proj_bias[-2_56:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
_lowerCamelCase : Tuple = state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight""" )
_lowerCamelCase : Tuple = state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
_lowerCamelCase : Optional[int] = in_proj_weight[:2_56, :]
_lowerCamelCase : Optional[Any] = in_proj_bias[:2_56]
_lowerCamelCase : List[str] = in_proj_weight[2_56:5_12, :]
_lowerCamelCase : List[str] = in_proj_bias[2_56:5_12]
_lowerCamelCase : int = in_proj_weight[-2_56:, :]
_lowerCamelCase : Any = in_proj_bias[-2_56:]
# read in weights + bias of input projection layer of cross-attention
_lowerCamelCase : Any = state_dict.pop(
F"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight""" )
_lowerCamelCase : int = state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) of cross-attention to the state dict
_lowerCamelCase : List[Any] = in_proj_weight_cross_attn[:2_56, :]
_lowerCamelCase : Any = in_proj_bias_cross_attn[:2_56]
_lowerCamelCase : Union[str, Any] = in_proj_weight_cross_attn[2_56:5_12, :]
_lowerCamelCase : Tuple = in_proj_bias_cross_attn[2_56:5_12]
_lowerCamelCase : Any = in_proj_weight_cross_attn[-2_56:, :]
_lowerCamelCase : Optional[int] = in_proj_bias_cross_attn[-2_56:]
def __UpperCAmelCase( lowercase_ , lowercase_ ):
_lowerCamelCase, _lowerCamelCase : Any = image.size
_lowerCamelCase : Dict = max(lowercase_ , lowercase_ )
_lowerCamelCase : Dict = 8_00 if '''detection''' in checkpoint_url else 10_00
_lowerCamelCase : str = target_max_size / current_max_size
_lowerCamelCase : Any = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) )
return resized_image
def __UpperCAmelCase( lowercase_ ):
_lowerCamelCase : Any = F.to_tensor(lowercase_ )
_lowerCamelCase : Dict = F.normalize(lowercase_ , mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , std=[0.2_2_9, 0.2_2_4, 0.2_2_5] )
return image
@torch.no_grad()
def __UpperCAmelCase( lowercase_ , lowercase_ , lowercase_ ):
logger.info('''Converting model...''' )
# load original state dict
_lowerCamelCase : Dict = torch.hub.load_state_dict_from_url(lowercase_ , map_location='''cpu''' )
# rename keys
for src, dest in rename_keys:
rename_key(lowercase_ , lowercase_ , lowercase_ )
_lowerCamelCase : Optional[int] = rename_backbone_keys(lowercase_ )
# query, key and value matrices need special treatment
read_in_q_k_v(lowercase_ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
_lowerCamelCase : List[Any] = '''model.'''
for key in state_dict.copy().keys():
if not key.startswith('''class_labels_classifier''' ) and not key.startswith('''bbox_predictor''' ):
_lowerCamelCase : Tuple = state_dict.pop(lowercase_ )
_lowerCamelCase : Dict = val
# create HuggingFace model and load state dict
_lowerCamelCase : Optional[Any] = TableTransformerConfig(
backbone='''resnet18''' , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , )
if "detection" in checkpoint_url:
_lowerCamelCase : Any = 15
_lowerCamelCase : Optional[int] = 2
_lowerCamelCase : Any = {0: '''table''', 1: '''table rotated'''}
_lowerCamelCase : List[str] = idalabel
_lowerCamelCase : List[Any] = {v: k for k, v in idalabel.items()}
else:
_lowerCamelCase : Union[str, Any] = 1_25
_lowerCamelCase : Any = 6
_lowerCamelCase : List[str] = {
0: '''table''',
1: '''table column''',
2: '''table row''',
3: '''table column header''',
4: '''table projected row header''',
5: '''table spanning cell''',
}
_lowerCamelCase : Optional[int] = idalabel
_lowerCamelCase : Tuple = {v: k for k, v in idalabel.items()}
_lowerCamelCase : Union[str, Any] = DetrImageProcessor(
format='''coco_detection''' , max_size=8_00 if '''detection''' in checkpoint_url else 10_00 )
_lowerCamelCase : Optional[Any] = TableTransformerForObjectDetection(lowercase_ )
model.load_state_dict(lowercase_ )
model.eval()
# verify our conversion
_lowerCamelCase : Union[str, Any] = '''example_pdf.png''' if '''detection''' in checkpoint_url else '''example_table.png'''
_lowerCamelCase : Optional[Any] = hf_hub_download(repo_id='''nielsr/example-pdf''' , repo_type='''dataset''' , filename=lowercase_ )
_lowerCamelCase : Union[str, Any] = Image.open(lowercase_ ).convert('''RGB''' )
_lowerCamelCase : Any = normalize(resize(lowercase_ , lowercase_ ) ).unsqueeze(0 )
_lowerCamelCase : Tuple = model(lowercase_ )
if "detection" in checkpoint_url:
_lowerCamelCase : Tuple = (1, 15, 3)
_lowerCamelCase : List[Any] = torch.tensor(
[[-6.7_8_9_7, -1_6.9_9_8_5, 6.7_9_3_7], [-8.0_1_8_6, -2_2.2_1_9_2, 6.9_6_7_7], [-7.3_1_1_7, -2_1.0_7_0_8, 7.4_0_5_5]] )
_lowerCamelCase : int = torch.tensor([[0.4_8_6_7, 0.1_7_6_7, 0.6_7_3_2], [0.6_7_1_8, 0.4_4_7_9, 0.3_8_3_0], [0.4_7_1_6, 0.1_7_6_0, 0.6_3_6_4]] )
else:
_lowerCamelCase : List[str] = (1, 1_25, 7)
_lowerCamelCase : Optional[Any] = torch.tensor(
[[-1_8.1_4_3_0, -8.3_2_1_4, 4.8_2_7_4], [-1_8.4_6_8_5, -7.1_3_6_1, -4.2_6_6_7], [-2_6.3_6_9_3, -9.3_4_2_9, -4.9_9_6_2]] )
_lowerCamelCase : Tuple = torch.tensor([[0.4_9_8_3, 0.5_5_9_5, 0.9_4_4_0], [0.4_9_1_6, 0.6_3_1_5, 0.5_9_5_4], [0.6_1_0_8, 0.8_6_3_7, 0.1_1_3_5]] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, :3, :3] , lowercase_ , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , lowercase_ , atol=1e-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(F"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(lowercase_ ).mkdir(exist_ok=lowercase_ )
model.save_pretrained(lowercase_ )
image_processor.save_pretrained(lowercase_ )
if push_to_hub:
# Push model to HF hub
logger.info('''Pushing model to the hub...''' )
_lowerCamelCase : List[Any] = (
'''microsoft/table-transformer-detection'''
if '''detection''' in checkpoint_url
else '''microsoft/table-transformer-structure-recognition'''
)
model.push_to_hub(lowercase_ )
image_processor.push_to_hub(lowercase_ )
if __name__ == "__main__":
_lowerCamelCase = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_url',
default='https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth',
type=str,
choices=[
'https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth',
'https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth',
],
help='URL of the Table Transformer checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
_lowerCamelCase = parser.parse_args()
convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 613
|
import pytest
import datasets
# Import fixture modules as plugins
_lowerCamelCase = ['tests.fixtures.files', 'tests.fixtures.hub', 'tests.fixtures.fsspec']
def __UpperCAmelCase( lowercase_ , lowercase_ ):
# Mark tests as "unit" by default if not marked as "integration" (or already marked as "unit")
for item in items:
if any(marker in item.keywords for marker in ['''integration''', '''unit'''] ):
continue
item.add_marker(pytest.mark.unit )
def __UpperCAmelCase( lowercase_ ):
config.addinivalue_line('''markers''' , '''torchaudio_latest: mark test to run with torchaudio>=0.12''' )
@pytest.fixture(autouse=lowercase_ )
def __UpperCAmelCase( lowercase_ , lowercase_ ):
# test_hf_cache_home = tmp_path_factory.mktemp("cache") # TODO: why a cache dir per test function does not work?
_lowerCamelCase : Optional[Any] = tmp_path_factory.getbasetemp() / '''cache'''
_lowerCamelCase : Optional[Any] = test_hf_cache_home / '''datasets'''
_lowerCamelCase : Union[str, Any] = test_hf_cache_home / '''metrics'''
_lowerCamelCase : Dict = test_hf_cache_home / '''modules'''
monkeypatch.setattr('''datasets.config.HF_DATASETS_CACHE''' , str(lowercase_ ) )
monkeypatch.setattr('''datasets.config.HF_METRICS_CACHE''' , str(lowercase_ ) )
monkeypatch.setattr('''datasets.config.HF_MODULES_CACHE''' , str(lowercase_ ) )
_lowerCamelCase : str = test_hf_datasets_cache / '''downloads'''
monkeypatch.setattr('''datasets.config.DOWNLOADED_DATASETS_PATH''' , str(lowercase_ ) )
_lowerCamelCase : Optional[int] = test_hf_datasets_cache / '''downloads''' / '''extracted'''
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_PATH''' , str(lowercase_ ) )
@pytest.fixture(autouse=lowercase_ , scope='''session''' )
def __UpperCAmelCase( ):
datasets.disable_progress_bar()
@pytest.fixture(autouse=lowercase_ )
def __UpperCAmelCase( lowercase_ ):
# don't take tests into account when counting downloads
monkeypatch.setattr('''datasets.config.HF_UPDATE_DOWNLOAD_COUNTS''' , lowercase_ )
@pytest.fixture
def __UpperCAmelCase( lowercase_ ):
# Required to suppress RemovedIn20Warning when feature(s) are not compatible with SQLAlchemy 2.0
# To be removed once SQLAlchemy 2.0 supported
monkeypatch.setattr('''sqlalchemy.util.deprecations.SILENCE_UBER_WARNING''' , lowercase_ )
| 613
| 1
|
def UpperCamelCase ( __magic_name__ : int , __magic_name__ : int ) -> str:
"""simple docstring"""
if a < 0 or b < 0:
raise ValueError("""the value of both inputs must be positive""" )
lowercase__ = str(bin(__magic_name__ ) )[2:] # remove the leading "0b"
lowercase__ = str(bin(__magic_name__ ) )[2:] # remove the leading "0b"
lowercase__ = max(len(__magic_name__ ) , len(__magic_name__ ) )
return "0b" + "".join(
str(int(char_a != char_b ) )
for char_a, char_b in zip(a_binary.zfill(__magic_name__ ) , b_binary.zfill(__magic_name__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 15
|
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def __UpperCamelCase () -> Optional[Any]:
lowercase__ = {
'repo_name': ['test_repo1', 'test_repo2', 'test_repo3'],
'path': ['test_1.py', 'test_2.py', 'unit_test.py'],
'content': ['a ' * 20, 'a ' * 30, 'b ' * 7],
}
lowercase__ = Dataset.from_dict(_SCREAMING_SNAKE_CASE )
return dataset
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] )-> Union[str, Any]:
"""simple docstring"""
lowercase__ = get_dataset()
lowercase__ = make_duplicate_clusters(a , 0.85 )
self.assertEqual(len(duplicate_clusters[0] ) , 2 )
def SCREAMING_SNAKE_CASE_ ( self : Dict )-> Dict:
"""simple docstring"""
lowercase__ = get_dataset()
lowercase__ , lowercase__ = deduplicate_dataset(a )
self.assertEqual(len(a ) , 2 )
print(a )
self.assertEqual(duplicate_clusters[0][0]['copies'] , 2 )
self.assertEqual(duplicate_clusters[0][0]['is_extreme'] , a )
| 235
| 0
|
class a__ :
def __init__( self , lowercase__ , lowercase__ , lowercase__ ) -> Tuple:
__A = None
__A = None
__A = graph
self._normalize_graph(lowercase__ , lowercase__ )
__A = len(lowercase__ )
__A = None
def _lowerCamelCase ( self , lowercase__ , lowercase__ ) -> Dict:
if sources is int:
__A = [sources]
if sinks is int:
__A = [sinks]
if len(lowercase__ ) == 0 or len(lowercase__ ) == 0:
return
__A = sources[0]
__A = sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(lowercase__ ) > 1 or len(lowercase__ ) > 1:
__A = 0
for i in sources:
max_input_flow += sum(self.graph[i] )
__A = len(self.graph ) + 1
for room in self.graph:
room.insert(0 , 0 )
self.graph.insert(0 , [0] * size )
for i in sources:
__A = max_input_flow
__A = 0
__A = len(self.graph ) + 1
for room in self.graph:
room.append(0 )
self.graph.append([0] * size )
for i in sinks:
__A = max_input_flow
__A = size - 1
def _lowerCamelCase ( self ) -> str:
if self.maximum_flow_algorithm is None:
raise Exception("You need to set maximum flow algorithm before." )
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def _lowerCamelCase ( self , lowercase__ ) -> Any:
__A = algorithm(self )
class a__ :
def __init__( self , lowercase__ ) -> Tuple:
__A = flow_network
__A = flow_network.verticesCount
__A = flow_network.sourceIndex
__A = flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
__A = flow_network.graph
__A = False
def _lowerCamelCase ( self ) -> List[Any]:
if not self.executed:
self._algorithm()
__A = True
def _lowerCamelCase ( self ) -> Union[str, Any]:
pass
class a__ ( lowerCAmelCase__ ):
def __init__( self , lowercase__ ) -> List[str]:
super().__init__(lowercase__ )
# use this to save your result
__A = -1
def _lowerCamelCase ( self ) -> Tuple:
if not self.executed:
raise Exception("You should execute algorithm before using its result!" )
return self.maximum_flow
class a__ ( lowerCAmelCase__ ):
def __init__( self , lowercase__ ) -> Dict:
super().__init__(lowercase__ )
__A = [[0] * self.verticies_count for i in range(self.verticies_count )]
__A = [0] * self.verticies_count
__A = [0] * self.verticies_count
def _lowerCamelCase ( self ) -> List[str]:
__A = self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
__A = [
i
for i in range(self.verticies_count )
if i != self.source_index and i != self.sink_index
]
# move through list
__A = 0
while i < len(lowercase__ ):
__A = vertices_list[i]
__A = self.heights[vertex_index]
self.process_vertex(lowercase__ )
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0 , vertices_list.pop(lowercase__ ) )
__A = 0
else:
i += 1
__A = sum(self.preflow[self.source_index] )
def _lowerCamelCase ( self , lowercase__ ) -> Optional[int]:
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count ):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(lowercase__ , lowercase__ )
self.relabel(lowercase__ )
def _lowerCamelCase ( self , lowercase__ , lowercase__ ) -> List[Any]:
__A = min(
self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , )
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def _lowerCamelCase ( self , lowercase__ ) -> str:
__A = None
for to_index in range(self.verticies_count ):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
__A = self.heights[to_index]
if min_height is not None:
__A = min_height + 1
if __name__ == "__main__":
snake_case_ : List[Any] =[0]
snake_case_ : str =[3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
snake_case_ : Optional[Any] =[[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
snake_case_ : str =FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
snake_case_ : Any =flow_network.find_maximum_flow()
print(f"""maximum flow is {maximum_flow}""")
| 714
|
from __future__ import annotations
def UpperCAmelCase ( lowerCAmelCase__ ):
'''simple docstring'''
if not nums:
return 0
__A = nums[0]
__A = 0
for num in nums[1:]:
__A , __A = (
max_excluding + num,
max(lowerCAmelCase__ , lowerCAmelCase__ ),
)
return max(lowerCAmelCase__ , lowerCAmelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 205
| 0
|
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = {
'''facebook/wav2vec2-base-960h''': '''https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json''',
# See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2
}
class lowerCAmelCase ( lowerCamelCase_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[str] = """wav2vec2"""
def __init__( self , lowerCAmelCase__=32 , lowerCAmelCase__=768 , lowerCAmelCase__=12 , lowerCAmelCase__=12 , lowerCAmelCase__=3_072 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.02 , lowerCAmelCase__=1e-5 , lowerCAmelCase__="group" , lowerCAmelCase__="gelu" , lowerCAmelCase__=(512, 512, 512, 512, 512, 512, 512) , lowerCAmelCase__=(5, 2, 2, 2, 2, 2, 2) , lowerCAmelCase__=(10, 3, 3, 3, 3, 2, 2) , lowerCAmelCase__=False , lowerCAmelCase__=128 , lowerCAmelCase__=16 , lowerCAmelCase__=False , lowerCAmelCase__=True , lowerCAmelCase__=0.05 , lowerCAmelCase__=10 , lowerCAmelCase__=2 , lowerCAmelCase__=0.0 , lowerCAmelCase__=10 , lowerCAmelCase__=0 , lowerCAmelCase__=320 , lowerCAmelCase__=2 , lowerCAmelCase__=0.1 , lowerCAmelCase__=100 , lowerCAmelCase__=256 , lowerCAmelCase__=256 , lowerCAmelCase__=0.1 , lowerCAmelCase__="sum" , lowerCAmelCase__=False , lowerCAmelCase__=False , lowerCAmelCase__=256 , lowerCAmelCase__=(512, 512, 512, 512, 1_500) , lowerCAmelCase__=(5, 3, 3, 1, 1) , lowerCAmelCase__=(1, 2, 3, 1, 1) , lowerCAmelCase__=512 , lowerCAmelCase__=0 , lowerCAmelCase__=1 , lowerCAmelCase__=2 , lowerCAmelCase__=False , lowerCAmelCase__=3 , lowerCAmelCase__=2 , lowerCAmelCase__=3 , lowerCAmelCase__=None , lowerCAmelCase__=None , **lowerCAmelCase__ , ) -> str:
super().__init__(**lowerCAmelCase__ , pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = feat_extract_norm
SCREAMING_SNAKE_CASE = feat_extract_activation
SCREAMING_SNAKE_CASE = list(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = list(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = list(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = conv_bias
SCREAMING_SNAKE_CASE = num_conv_pos_embeddings
SCREAMING_SNAKE_CASE = num_conv_pos_embedding_groups
SCREAMING_SNAKE_CASE = len(self.conv_dim )
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = hidden_dropout
SCREAMING_SNAKE_CASE = attention_dropout
SCREAMING_SNAKE_CASE = activation_dropout
SCREAMING_SNAKE_CASE = feat_proj_dropout
SCREAMING_SNAKE_CASE = final_dropout
SCREAMING_SNAKE_CASE = layerdrop
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = do_stable_layer_norm
SCREAMING_SNAKE_CASE = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
F' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'
F' `len(config.conv_kernel) = {len(self.conv_kernel )}`.' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
SCREAMING_SNAKE_CASE = apply_spec_augment
SCREAMING_SNAKE_CASE = mask_time_prob
SCREAMING_SNAKE_CASE = mask_time_length
SCREAMING_SNAKE_CASE = mask_time_min_masks
SCREAMING_SNAKE_CASE = mask_feature_prob
SCREAMING_SNAKE_CASE = mask_feature_length
SCREAMING_SNAKE_CASE = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
SCREAMING_SNAKE_CASE = num_codevectors_per_group
SCREAMING_SNAKE_CASE = num_codevector_groups
SCREAMING_SNAKE_CASE = contrastive_logits_temperature
SCREAMING_SNAKE_CASE = feat_quantizer_dropout
SCREAMING_SNAKE_CASE = num_negatives
SCREAMING_SNAKE_CASE = codevector_dim
SCREAMING_SNAKE_CASE = proj_codevector_dim
SCREAMING_SNAKE_CASE = diversity_loss_weight
# ctc loss
SCREAMING_SNAKE_CASE = ctc_loss_reduction
SCREAMING_SNAKE_CASE = ctc_zero_infinity
# adapter
SCREAMING_SNAKE_CASE = add_adapter
SCREAMING_SNAKE_CASE = adapter_kernel_size
SCREAMING_SNAKE_CASE = adapter_stride
SCREAMING_SNAKE_CASE = num_adapter_layers
SCREAMING_SNAKE_CASE = output_hidden_size or hidden_size
SCREAMING_SNAKE_CASE = adapter_attn_dim
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
SCREAMING_SNAKE_CASE = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
SCREAMING_SNAKE_CASE = list(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = list(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = list(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = xvector_output_dim
@property
def __A ( self ) -> Union[str, Any]:
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 247
|
"""simple docstring"""
def lowercase (SCREAMING_SNAKE_CASE_ : str ) -> bool:
if not all(x.isalpha() for x in string ):
raise ValueError('String must only contain alphabetic characters.' )
SCREAMING_SNAKE_CASE = sorted(string.lower() )
return len(SCREAMING_SNAKE_CASE_ ) == len(set(SCREAMING_SNAKE_CASE_ ) )
if __name__ == "__main__":
__UpperCamelCase = input('''Enter a string ''').strip()
__UpperCamelCase = is_isogram(input_str)
print(f'''{input_str} is {'an' if isogram else 'not an'} isogram.''')
| 247
| 1
|
"""simple docstring"""
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class a__ ( __a ):
@staticmethod
@abstractmethod
def __magic_name__ ( _a ):
raise NotImplementedError()
@abstractmethod
def __magic_name__ ( self ):
raise NotImplementedError()
| 706
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_glpn import GLPNImageProcessor
_A : int = logging.get_logger(__name__)
class a__ ( a_ ):
def __init__( self , *_a , **_a ):
warnings.warn(
"The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use GLPNImageProcessor instead." , _a , )
super().__init__(*_a , **_a )
| 518
| 0
|
'''simple docstring'''
__UpperCAmelCase = '''
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
__UpperCAmelCase = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
__UpperCAmelCase = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 90
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {
'microsoft/biogpt': 'https://huggingface.co/microsoft/biogpt/resolve/main/config.json',
# See all BioGPT models at https://huggingface.co/models?filter=biogpt
}
class lowerCAmelCase_ ( snake_case__ ):
"""simple docstring"""
a_ :List[str] ="""biogpt"""
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any]=4_2_3_8_4 , SCREAMING_SNAKE_CASE__ : Tuple=1_0_2_4 , SCREAMING_SNAKE_CASE__ : List[str]=2_4 , SCREAMING_SNAKE_CASE__ : Tuple=1_6 , SCREAMING_SNAKE_CASE__ : List[Any]=4_0_9_6 , SCREAMING_SNAKE_CASE__ : List[Any]="gelu" , SCREAMING_SNAKE_CASE__ : List[str]=0.1 , SCREAMING_SNAKE_CASE__ : Dict=0.1 , SCREAMING_SNAKE_CASE__ : List[str]=1_0_2_4 , SCREAMING_SNAKE_CASE__ : str=0.0_2 , SCREAMING_SNAKE_CASE__ : str=1E-12 , SCREAMING_SNAKE_CASE__ : Optional[int]=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , SCREAMING_SNAKE_CASE__ : int=0.0 , SCREAMING_SNAKE_CASE__ : str=0.0 , SCREAMING_SNAKE_CASE__ : str=1 , SCREAMING_SNAKE_CASE__ : int=0 , SCREAMING_SNAKE_CASE__ : int=2 , **SCREAMING_SNAKE_CASE__ : Tuple , ):
'''simple docstring'''
__a = vocab_size
__a = max_position_embeddings
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = initializer_range
__a = layer_norm_eps
__a = scale_embedding
__a = use_cache
__a = layerdrop
__a = activation_dropout
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
| 582
| 0
|
import math
def lowerCAmelCase ( snake_case__ : int )-> bool:
assert isinstance(snake_case__ , snake_case__ ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
A_ = range(3 , int(math.sqrt(snake_case__ ) + 1 ) , 2 )
return not any(not number % i for i in odd_numbers )
def lowerCAmelCase ( snake_case__ : Any , snake_case__ : Dict=1 , **snake_case__ : Dict )-> str:
A_ = factor * value
A_ = value
while not is_prime(snake_case__ ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1 , **snake_case__ )
return value
| 608
|
import unittest
from datasets import load_dataset
from transformers.pipelines import pipeline
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow
@is_pipeline_test
@require_torch
class lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
@require_torch
def lowercase_ ( self ):
A_ = pipeline(
task="zero-shot-audio-classification" , model="hf-internal-testing/tiny-clap-htsat-unfused" )
A_ = load_dataset("ashraq/esc50" )
A_ = dataset["train"]["audio"][-1]["array"]
A_ = audio_classifier(__UpperCamelCase , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] )
self.assertEqual(
nested_simplify(__UpperCamelCase ) , [{"score": 0.501, "label": "Sound of a dog"}, {"score": 0.499, "label": "Sound of vaccum cleaner"}] , )
@unittest.skip("No models are available in TF" )
def lowercase_ ( self ):
pass
@slow
@require_torch
def lowercase_ ( self ):
A_ = pipeline(
task="zero-shot-audio-classification" , model="laion/clap-htsat-unfused" , )
# This is an audio of a dog
A_ = load_dataset("ashraq/esc50" )
A_ = dataset["train"]["audio"][-1]["array"]
A_ = audio_classifier(__UpperCamelCase , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] )
self.assertEqual(
nested_simplify(__UpperCamelCase ) , [
{"score": 0.999, "label": "Sound of a dog"},
{"score": 0.001, "label": "Sound of vaccum cleaner"},
] , )
A_ = audio_classifier([audio] * 5 , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] )
self.assertEqual(
nested_simplify(__UpperCamelCase ) , [
[
{"score": 0.999, "label": "Sound of a dog"},
{"score": 0.001, "label": "Sound of vaccum cleaner"},
],
]
* 5 , )
A_ = audio_classifier(
[audio] * 5 , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] , batch_size=5 )
self.assertEqual(
nested_simplify(__UpperCamelCase ) , [
[
{"score": 0.999, "label": "Sound of a dog"},
{"score": 0.001, "label": "Sound of vaccum cleaner"},
],
]
* 5 , )
@unittest.skip("No models are available in TF" )
def lowercase_ ( self ):
pass
| 608
| 1
|
class UpperCamelCase :
'''simple docstring'''
def __init__( self , UpperCamelCase_ , UpperCamelCase_=None , UpperCamelCase_=None ):
lowercase_ :Dict = data
lowercase_ :Dict = previous
lowercase_ :int = next_node
def __str__( self ):
return f"{self.data}"
def UpperCamelCase ( self ):
return self.data
def UpperCamelCase ( self ):
return self.next
def UpperCamelCase ( self ):
return self.previous
class UpperCamelCase :
'''simple docstring'''
def __init__( self , UpperCamelCase_ ):
lowercase_ :Optional[Any] = head
def __iter__( self ):
return self
def UpperCamelCase ( self ):
if not self.current:
raise StopIteration
else:
lowercase_ :str = self.current.get_data()
lowercase_ :List[str] = self.current.get_next()
return value
class UpperCamelCase :
'''simple docstring'''
def __init__( self ):
lowercase_ :str = None # First node in list
lowercase_ :Tuple = None # Last node in list
def __str__( self ):
lowercase_ :int = self.head
lowercase_ :str = []
while current is not None:
nodes.append(current.get_data() )
lowercase_ :Any = current.get_next()
return " ".join(str(lowerCAmelCase_ ) for node in nodes )
def __contains__( self , UpperCamelCase_ ):
lowercase_ :Union[str, Any] = self.head
while current:
if current.get_data() == value:
return True
lowercase_ :Union[str, Any] = current.get_next()
return False
def __iter__( self ):
return LinkedListIterator(self.head )
def UpperCamelCase ( self ):
if self.head:
return self.head.get_data()
return None
def UpperCamelCase ( self ):
if self.tail:
return self.tail.get_data()
return None
def UpperCamelCase ( self , UpperCamelCase_ ):
if self.head is None:
lowercase_ :str = node
lowercase_ :Optional[int] = node
else:
self.insert_before_node(self.head , lowerCAmelCase_ )
def UpperCamelCase ( self , UpperCamelCase_ ):
if self.head is None:
self.set_head(lowerCAmelCase_ )
else:
self.insert_after_node(self.tail , lowerCAmelCase_ )
def UpperCamelCase ( self , UpperCamelCase_ ):
lowercase_ :List[Any] = Node(lowerCAmelCase_ )
if self.head is None:
self.set_head(lowerCAmelCase_ )
else:
self.set_tail(lowerCAmelCase_ )
def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ ):
lowercase_ :str = node
lowercase_ :Union[str, Any] = node.previous
if node.get_previous() is None:
lowercase_ :Dict = node_to_insert
else:
lowercase_ :Optional[Any] = node_to_insert
lowercase_ :Any = node_to_insert
def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ ):
lowercase_ :Dict = node
lowercase_ :Optional[Any] = node.next
if node.get_next() is None:
lowercase_ :Optional[int] = node_to_insert
else:
lowercase_ :List[Any] = node_to_insert
lowercase_ :Optional[Any] = node_to_insert
def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ ):
lowercase_ :str = 1
lowercase_ :Tuple = Node(lowerCAmelCase_ )
lowercase_ :Dict = self.head
while node:
if current_position == position:
self.insert_before_node(lowerCAmelCase_ , lowerCAmelCase_ )
return
current_position += 1
lowercase_ :List[str] = node.next
self.insert_after_node(self.tail , lowerCAmelCase_ )
def UpperCamelCase ( self , UpperCamelCase_ ):
lowercase_ :Dict = self.head
while node:
if node.get_data() == item:
return node
lowercase_ :Optional[int] = node.get_next()
raise Exception('''Node not found''' )
def UpperCamelCase ( self , UpperCamelCase_ ):
if (node := self.get_node(lowerCAmelCase_ )) is not None:
if node == self.head:
lowercase_ :Tuple = self.head.get_next()
if node == self.tail:
lowercase_ :Dict = self.tail.get_previous()
self.remove_node_pointers(lowerCAmelCase_ )
@staticmethod
def UpperCamelCase ( UpperCamelCase_ ):
if node.get_next():
lowercase_ :Any = node.previous
if node.get_previous():
lowercase_ :int = node.next
lowercase_ :str = None
lowercase_ :int = None
def UpperCamelCase ( self ):
return self.head is None
def UpperCamelCase ( ) -> Optional[Any]:
'''simple docstring'''
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 257
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase = logging.get_logger(__name__)
def __UpperCamelCase ( snake_case__ ):
A_ : List[str] = DPTConfig()
if "large" in checkpoint_url:
A_ : List[Any] = 1_024
A_ : int = 4_096
A_ : List[str] = 24
A_ : List[str] = 16
A_ : int = [5, 11, 17, 23]
A_ : str = [256, 512, 1_024, 1_024]
A_ : str = (1, 384, 384)
if "ade" in checkpoint_url:
A_ : Tuple = True
A_ : Optional[Any] = 150
A_ : str = """huggingface/label-files"""
A_ : List[Any] = """ade20k-id2label.json"""
A_ : List[str] = json.load(open(cached_download(hf_hub_url(snake_case__ , snake_case__ , repo_type="""dataset""" ) ) , """r""" ) )
A_ : Dict = {int(snake_case__ ): v for k, v in idalabel.items()}
A_ : int = idalabel
A_ : Dict = {v: k for k, v in idalabel.items()}
A_ : Optional[Any] = [1, 150, 480, 480]
return config, expected_shape
def __UpperCamelCase ( snake_case__ ):
A_ : List[str] = ["""pretrained.model.head.weight""", """pretrained.model.head.bias"""]
for k in ignore_keys:
state_dict.pop(snake_case__ , snake_case__ )
def __UpperCamelCase ( snake_case__ ):
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
A_ : Any = name.replace("""pretrained.model""" , """dpt.encoder""" )
if "pretrained.model" in name:
A_ : int = name.replace("""pretrained.model""" , """dpt.embeddings""" )
if "patch_embed" in name:
A_ : List[Any] = name.replace("""patch_embed""" , """patch_embeddings""" )
if "pos_embed" in name:
A_ : Optional[int] = name.replace("""pos_embed""" , """position_embeddings""" )
if "attn.proj" in name:
A_ : str = name.replace("""attn.proj""" , """attention.output.dense""" )
if "proj" in name and "project" not in name:
A_ : Tuple = name.replace("""proj""" , """projection""" )
if "blocks" in name:
A_ : Dict = name.replace("""blocks""" , """layer""" )
if "mlp.fc1" in name:
A_ : List[str] = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
A_ : Any = name.replace("""mlp.fc2""" , """output.dense""" )
if "norm1" in name:
A_ : Tuple = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
A_ : Optional[int] = name.replace("""norm2""" , """layernorm_after""" )
if "scratch.output_conv" in name:
A_ : Dict = name.replace("""scratch.output_conv""" , """head""" )
if "scratch" in name:
A_ : int = name.replace("""scratch""" , """neck""" )
if "layer1_rn" in name:
A_ : Any = name.replace("""layer1_rn""" , """convs.0""" )
if "layer2_rn" in name:
A_ : List[str] = name.replace("""layer2_rn""" , """convs.1""" )
if "layer3_rn" in name:
A_ : int = name.replace("""layer3_rn""" , """convs.2""" )
if "layer4_rn" in name:
A_ : Optional[Any] = name.replace("""layer4_rn""" , """convs.3""" )
if "refinenet" in name:
A_ : Dict = int(name[len("""neck.refinenet""" ) : len("""neck.refinenet""" ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
A_ : Tuple = name.replace(F"""refinenet{layer_idx}""" , F"""fusion_stage.layers.{abs(layer_idx-4 )}""" )
if "out_conv" in name:
A_ : int = name.replace("""out_conv""" , """projection""" )
if "resConfUnit1" in name:
A_ : str = name.replace("""resConfUnit1""" , """residual_layer1""" )
if "resConfUnit2" in name:
A_ : Optional[int] = name.replace("""resConfUnit2""" , """residual_layer2""" )
if "conv1" in name:
A_ : Any = name.replace("""conv1""" , """convolution1""" )
if "conv2" in name:
A_ : Dict = name.replace("""conv2""" , """convolution2""" )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
A_ : List[str] = name.replace("""pretrained.act_postprocess1.0.project.0""" , """neck.reassemble_stage.readout_projects.0.0""" )
if "pretrained.act_postprocess2.0.project.0" in name:
A_ : List[str] = name.replace("""pretrained.act_postprocess2.0.project.0""" , """neck.reassemble_stage.readout_projects.1.0""" )
if "pretrained.act_postprocess3.0.project.0" in name:
A_ : int = name.replace("""pretrained.act_postprocess3.0.project.0""" , """neck.reassemble_stage.readout_projects.2.0""" )
if "pretrained.act_postprocess4.0.project.0" in name:
A_ : Union[str, Any] = name.replace("""pretrained.act_postprocess4.0.project.0""" , """neck.reassemble_stage.readout_projects.3.0""" )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
A_ : Union[str, Any] = name.replace("""pretrained.act_postprocess1.3""" , """neck.reassemble_stage.layers.0.projection""" )
if "pretrained.act_postprocess1.4" in name:
A_ : Optional[Any] = name.replace("""pretrained.act_postprocess1.4""" , """neck.reassemble_stage.layers.0.resize""" )
if "pretrained.act_postprocess2.3" in name:
A_ : List[str] = name.replace("""pretrained.act_postprocess2.3""" , """neck.reassemble_stage.layers.1.projection""" )
if "pretrained.act_postprocess2.4" in name:
A_ : Optional[int] = name.replace("""pretrained.act_postprocess2.4""" , """neck.reassemble_stage.layers.1.resize""" )
if "pretrained.act_postprocess3.3" in name:
A_ : Any = name.replace("""pretrained.act_postprocess3.3""" , """neck.reassemble_stage.layers.2.projection""" )
if "pretrained.act_postprocess4.3" in name:
A_ : int = name.replace("""pretrained.act_postprocess4.3""" , """neck.reassemble_stage.layers.3.projection""" )
if "pretrained.act_postprocess4.4" in name:
A_ : Dict = name.replace("""pretrained.act_postprocess4.4""" , """neck.reassemble_stage.layers.3.resize""" )
if "pretrained" in name:
A_ : List[str] = name.replace("""pretrained""" , """dpt""" )
if "bn" in name:
A_ : str = name.replace("""bn""" , """batch_norm""" )
if "head" in name:
A_ : Any = name.replace("""head""" , """head.head""" )
if "encoder.norm" in name:
A_ : Optional[Any] = name.replace("""encoder.norm""" , """layernorm""" )
if "auxlayer" in name:
A_ : List[Any] = name.replace("""auxlayer""" , """auxiliary_head.head""" )
return name
def __UpperCamelCase ( snake_case__ , snake_case__ ):
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
A_ : Union[str, Any] = state_dict.pop(F"""dpt.encoder.layer.{i}.attn.qkv.weight""" )
A_ : Any = state_dict.pop(F"""dpt.encoder.layer.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
A_ : Tuple = in_proj_weight[: config.hidden_size, :]
A_ : str = in_proj_bias[: config.hidden_size]
A_ : List[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A_ : List[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
A_ : int = in_proj_weight[
-config.hidden_size :, :
]
A_ : List[Any] = in_proj_bias[-config.hidden_size :]
def __UpperCamelCase ( ):
A_ : Optional[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
A_ : Optional[Any] = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw )
return im
@torch.no_grad()
def __UpperCamelCase ( snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
A_ , A_ : int = get_dpt_config(snake_case__ )
# load original state_dict from URL
A_ : Dict = torch.hub.load_state_dict_from_url(snake_case__ , map_location="""cpu""" )
# remove certain keys
remove_ignore_keys_(snake_case__ )
# rename keys
for key in state_dict.copy().keys():
A_ : str = state_dict.pop(snake_case__ )
A_ : str = val
# read in qkv matrices
read_in_q_k_v(snake_case__ , snake_case__ )
# load HuggingFace model
A_ : Union[str, Any] = DPTForSemanticSegmentation(snake_case__ ) if """ade""" in checkpoint_url else DPTForDepthEstimation(snake_case__ )
model.load_state_dict(snake_case__ )
model.eval()
# Check outputs on an image
A_ : str = 480 if """ade""" in checkpoint_url else 384
A_ : List[str] = DPTImageProcessor(size=snake_case__ )
A_ : Dict = prepare_img()
A_ : List[Any] = image_processor(snake_case__ , return_tensors="""pt""" )
# forward pass
A_ : Tuple = model(**snake_case__ ).logits if """ade""" in checkpoint_url else model(**snake_case__ ).predicted_depth
# Assert logits
A_ : List[str] = torch.tensor([[6.3_199, 6.3_629, 6.4_148], [6.3_850, 6.3_615, 6.4_166], [6.3_519, 6.3_176, 6.3_575]] )
if "ade" in checkpoint_url:
A_ : str = torch.tensor([[4.0_480, 4.2_420, 4.4_360], [4.3_124, 4.5_693, 4.8_261], [4.5_768, 4.8_965, 5.2_163]] )
assert outputs.shape == torch.Size(snake_case__ )
assert (
torch.allclose(outputs[0, 0, :3, :3] , snake_case__ , atol=1E-4 )
if "ade" in checkpoint_url
else torch.allclose(outputs[0, :3, :3] , snake_case__ )
)
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(snake_case__ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(snake_case__ )
if push_to_hub:
print("""Pushing model to hub...""" )
model.push_to_hub(
repo_path_or_name=Path(snake_case__ , snake_case__ ) , organization="""nielsr""" , commit_message="""Add model""" , use_temp_dir=snake_case__ , )
image_processor.push_to_hub(
repo_path_or_name=Path(snake_case__ , snake_case__ ) , organization="""nielsr""" , commit_message="""Add image processor""" , use_temp_dir=snake_case__ , )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt",
type=str,
help="URL of the original DPT checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
)
parser.add_argument(
"--model_name",
default="dpt-large",
type=str,
help="Name of the model, in case you're pushing to the hub.",
)
_lowerCAmelCase = parser.parse_args()
convert_dpt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 180
| 0
|
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class lowercase__ (unittest.TestCase ):
"""simple docstring"""
def lowercase ( self : Dict ):
snake_case__ : List[str] = [
"""safety_checker/pytorch_model.bin""",
"""safety_checker/model.safetensors""",
"""vae/diffusion_pytorch_model.bin""",
"""vae/diffusion_pytorch_model.safetensors""",
"""text_encoder/pytorch_model.bin""",
"""text_encoder/model.safetensors""",
"""unet/diffusion_pytorch_model.bin""",
"""unet/diffusion_pytorch_model.safetensors""",
]
self.assertTrue(is_safetensors_compatible(__a ) )
def lowercase ( self : Union[str, Any] ):
snake_case__ : Any = [
"""unet/diffusion_pytorch_model.bin""",
"""unet/diffusion_pytorch_model.safetensors""",
]
self.assertTrue(is_safetensors_compatible(__a ) )
def lowercase ( self : Any ):
snake_case__ : Union[str, Any] = [
"""safety_checker/pytorch_model.bin""",
"""safety_checker/model.safetensors""",
"""vae/diffusion_pytorch_model.bin""",
"""vae/diffusion_pytorch_model.safetensors""",
"""text_encoder/pytorch_model.bin""",
"""text_encoder/model.safetensors""",
"""unet/diffusion_pytorch_model.bin""",
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(__a ) )
def lowercase ( self : str ):
snake_case__ : int = [
"""text_encoder/pytorch_model.bin""",
"""text_encoder/model.safetensors""",
]
self.assertTrue(is_safetensors_compatible(__a ) )
def lowercase ( self : Optional[int] ):
snake_case__ : List[str] = [
"""safety_checker/pytorch_model.bin""",
"""safety_checker/model.safetensors""",
"""vae/diffusion_pytorch_model.bin""",
"""vae/diffusion_pytorch_model.safetensors""",
"""text_encoder/pytorch_model.bin""",
# Removed: 'text_encoder/model.safetensors',
"""unet/diffusion_pytorch_model.bin""",
"""unet/diffusion_pytorch_model.safetensors""",
]
self.assertFalse(is_safetensors_compatible(__a ) )
def lowercase ( self : Dict ):
snake_case__ : List[str] = [
"""safety_checker/pytorch_model.fp16.bin""",
"""safety_checker/model.fp16.safetensors""",
"""vae/diffusion_pytorch_model.fp16.bin""",
"""vae/diffusion_pytorch_model.fp16.safetensors""",
"""text_encoder/pytorch_model.fp16.bin""",
"""text_encoder/model.fp16.safetensors""",
"""unet/diffusion_pytorch_model.fp16.bin""",
"""unet/diffusion_pytorch_model.fp16.safetensors""",
]
snake_case__ : Dict = """fp16"""
self.assertTrue(is_safetensors_compatible(__a , variant=__a ) )
def lowercase ( self : int ):
snake_case__ : List[str] = [
"""unet/diffusion_pytorch_model.fp16.bin""",
"""unet/diffusion_pytorch_model.fp16.safetensors""",
]
snake_case__ : Optional[Any] = """fp16"""
self.assertTrue(is_safetensors_compatible(__a , variant=__a ) )
def lowercase ( self : List[Any] ):
# pass variant but use the non-variant filenames
snake_case__ : Any = [
"""unet/diffusion_pytorch_model.bin""",
"""unet/diffusion_pytorch_model.safetensors""",
]
snake_case__ : List[Any] = """fp16"""
self.assertTrue(is_safetensors_compatible(__a , variant=__a ) )
def lowercase ( self : str ):
snake_case__ : List[str] = [
"""safety_checker/pytorch_model.fp16.bin""",
"""safety_checker/model.fp16.safetensors""",
"""vae/diffusion_pytorch_model.fp16.bin""",
"""vae/diffusion_pytorch_model.fp16.safetensors""",
"""text_encoder/pytorch_model.fp16.bin""",
"""text_encoder/model.fp16.safetensors""",
"""unet/diffusion_pytorch_model.fp16.bin""",
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
snake_case__ : Tuple = """fp16"""
self.assertFalse(is_safetensors_compatible(__a , variant=__a ) )
def lowercase ( self : Tuple ):
snake_case__ : List[str] = [
"""text_encoder/pytorch_model.fp16.bin""",
"""text_encoder/model.fp16.safetensors""",
]
snake_case__ : Optional[int] = """fp16"""
self.assertTrue(is_safetensors_compatible(__a , variant=__a ) )
def lowercase ( self : Tuple ):
# pass variant but use the non-variant filenames
snake_case__ : Any = [
"""text_encoder/pytorch_model.bin""",
"""text_encoder/model.safetensors""",
]
snake_case__ : List[str] = """fp16"""
self.assertTrue(is_safetensors_compatible(__a , variant=__a ) )
def lowercase ( self : Union[str, Any] ):
snake_case__ : int = [
"""safety_checker/pytorch_model.fp16.bin""",
"""safety_checker/model.fp16.safetensors""",
"""vae/diffusion_pytorch_model.fp16.bin""",
"""vae/diffusion_pytorch_model.fp16.safetensors""",
"""text_encoder/pytorch_model.fp16.bin""",
# 'text_encoder/model.fp16.safetensors',
"""unet/diffusion_pytorch_model.fp16.bin""",
"""unet/diffusion_pytorch_model.fp16.safetensors""",
]
snake_case__ : Any = """fp16"""
self.assertFalse(is_safetensors_compatible(__a , variant=__a ) )
| 127
|
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_: Optional[int] = logging.get_logger(__name__)
lowercase_: Optional[int] = {
'facebook/encodec_24khz': 'https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json',
'facebook/encodec_48khz': 'https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json',
}
class lowercase__ (__snake_case ):
"""simple docstring"""
__UpperCamelCase : Optional[int] = 'encodec'
def __init__( self : List[Any] , __a : Any=[1.5, 3.0, 6.0, 12.0, 24.0] , __a : Any=2_4_0_0_0 , __a : str=1 , __a : Optional[Any]=False , __a : Optional[Any]=None , __a : Any=None , __a : Dict=1_2_8 , __a : Union[str, Any]=3_2 , __a : Optional[Any]=1 , __a : Union[str, Any]=[8, 5, 4, 2] , __a : Tuple="weight_norm" , __a : Union[str, Any]=7 , __a : Tuple=7 , __a : List[Any]=3 , __a : List[str]=2 , __a : List[Any]=True , __a : str="reflect" , __a : int=2 , __a : Dict=2 , __a : str=1.0 , __a : Dict=1_0_2_4 , __a : str=None , __a : str=True , **__a : List[str] , ):
snake_case__ : List[str] = target_bandwidths
snake_case__ : Union[str, Any] = sampling_rate
snake_case__ : str = audio_channels
snake_case__ : List[str] = normalize
snake_case__ : Any = chunk_length_s
snake_case__ : Optional[int] = overlap
snake_case__ : Any = hidden_size
snake_case__ : List[str] = num_filters
snake_case__ : Union[str, Any] = num_residual_layers
snake_case__ : Optional[Any] = upsampling_ratios
snake_case__ : Dict = norm_type
snake_case__ : List[str] = kernel_size
snake_case__ : Tuple = last_kernel_size
snake_case__ : List[str] = residual_kernel_size
snake_case__ : int = dilation_growth_rate
snake_case__ : Optional[Any] = use_causal_conv
snake_case__ : Optional[Any] = pad_mode
snake_case__ : int = compress
snake_case__ : str = num_lstm_layers
snake_case__ : Tuple = trim_right_ratio
snake_case__ : Union[str, Any] = codebook_size
snake_case__ : Optional[int] = codebook_dim if codebook_dim is not None else hidden_size
snake_case__ : Optional[int] = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
f'self.norm_type must be one of `"weight_norm"`, `"time_group_norm"`), got {self.norm_type}' )
super().__init__(**__a )
@property
def lowercase ( self : Union[str, Any] ):
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def lowercase ( self : Dict ):
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
@property
def lowercase ( self : str ):
snake_case__ : int = np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def lowercase ( self : Union[str, Any] ):
return int(1_0_0_0 * self.target_bandwidths[-1] // (self.frame_rate * 1_0) )
| 127
| 1
|
"""simple docstring"""
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 224
|
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax.numpy as jnp
from jax import random
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .scheduling_utils_flax import FlaxSchedulerMixin
@flax.struct.dataclass
class lowerCAmelCase__ :
# setable values
__a = None
__a = None
__a = None # sigma(t_i)
@classmethod
def lowercase ( cls : str ):
return cls()
@dataclass
class lowerCAmelCase__ ( A_ ):
__a = 42
__a = 42
__a = 42
class lowerCAmelCase__ ( A_ , A_ ):
@property
def lowercase ( self : Union[str, Any] ):
return True
@register_to_config
def __init__( self : Optional[int] , _lowerCamelCase : float = 0.0_2 , _lowerCamelCase : float = 100 , _lowerCamelCase : float = 1.0_0_7 , _lowerCamelCase : float = 80 , _lowerCamelCase : float = 0.0_5 , _lowerCamelCase : float = 50 , ):
pass
def lowercase ( self : Any ):
return KarrasVeSchedulerState.create()
def lowercase ( self : str , _lowerCamelCase : KarrasVeSchedulerState , _lowerCamelCase : int , _lowerCamelCase : Tuple = () ):
_snake_case = jnp.arange(0 , _lowerCamelCase )[::-1].copy()
_snake_case = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in timesteps
]
return state.replace(
num_inference_steps=_lowerCamelCase , schedule=jnp.array(_lowerCamelCase , dtype=jnp.floataa ) , timesteps=_lowerCamelCase , )
def lowercase ( self : str , _lowerCamelCase : KarrasVeSchedulerState , _lowerCamelCase : jnp.ndarray , _lowerCamelCase : float , _lowerCamelCase : random.KeyArray , ):
if self.config.s_min <= sigma <= self.config.s_max:
_snake_case = min(self.config.s_churn / state.num_inference_steps , 2**0.5 - 1 )
else:
_snake_case = 0
# sample eps ~ N(0, S_noise^2 * I)
_snake_case = random.split(_lowerCamelCase , num=1 )
_snake_case = self.config.s_noise * random.normal(key=_lowerCamelCase , shape=sample.shape )
_snake_case = sigma + gamma * sigma
_snake_case = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def lowercase ( self : List[Any] , _lowerCamelCase : KarrasVeSchedulerState , _lowerCamelCase : jnp.ndarray , _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : jnp.ndarray , _lowerCamelCase : bool = True , ):
_snake_case = sample_hat + sigma_hat * model_output
_snake_case = (sample_hat - pred_original_sample) / sigma_hat
_snake_case = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=_lowerCamelCase , derivative=_lowerCamelCase , state=_lowerCamelCase )
def lowercase ( self : Any , _lowerCamelCase : KarrasVeSchedulerState , _lowerCamelCase : jnp.ndarray , _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : jnp.ndarray , _lowerCamelCase : jnp.ndarray , _lowerCamelCase : jnp.ndarray , _lowerCamelCase : bool = True , ):
_snake_case = sample_prev + sigma_prev * model_output
_snake_case = (sample_prev - pred_original_sample) / sigma_prev
_snake_case = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=_lowerCamelCase , derivative=_lowerCamelCase , state=_lowerCamelCase )
def lowercase ( self : Optional[int] , _lowerCamelCase : KarrasVeSchedulerState , _lowerCamelCase : int , _lowerCamelCase : List[Any] , _lowerCamelCase : Dict ):
raise NotImplementedError()
| 224
| 1
|
'''simple docstring'''
import os
import unittest
from transformers import BatchEncoding
from transformers.models.bert.tokenization_bert import (
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer
from transformers.testing_utils import require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
class _lowercase ( a , unittest.TestCase ):
_UpperCamelCase = ProphetNetTokenizer
_UpperCamelCase = False
def snake_case ( self ):
super().setUp()
A : Optional[int] = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
A : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def snake_case ( self , _UpperCAmelCase ):
A : Dict = '''UNwant\u00E9d,running'''
A : int = '''unwanted, running'''
return input_text, output_text
def snake_case ( self ):
A : Dict = self.tokenizer_class(self.vocab_file )
A : Dict = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(_UpperCAmelCase , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , [9, 6, 7, 12, 10, 11] )
def snake_case ( self ):
A : Union[str, Any] = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] )
def snake_case ( self ):
A : List[str] = BasicTokenizer(do_lower_case=_UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def snake_case ( self ):
A : Optional[int] = BasicTokenizer(do_lower_case=_UpperCAmelCase , strip_accents=_UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] )
def snake_case ( self ):
A : Dict = BasicTokenizer(do_lower_case=_UpperCAmelCase , strip_accents=_UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def snake_case ( self ):
A : Union[str, Any] = BasicTokenizer(do_lower_case=_UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def snake_case ( self ):
A : Optional[int] = BasicTokenizer(do_lower_case=_UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def snake_case ( self ):
A : List[Any] = BasicTokenizer(do_lower_case=_UpperCAmelCase , strip_accents=_UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def snake_case ( self ):
A : List[Any] = BasicTokenizer(do_lower_case=_UpperCAmelCase , strip_accents=_UpperCAmelCase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def snake_case ( self ):
A : List[Any] = BasicTokenizer(do_lower_case=_UpperCAmelCase , never_split=['''[UNK]'''] )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] )
def snake_case ( self ):
A : List[Any] = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''']
A : List[Any] = {}
for i, token in enumerate(_UpperCAmelCase ):
A : Any = i
A : Optional[Any] = WordpieceTokenizer(vocab=_UpperCAmelCase , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] )
@require_torch
def snake_case ( self ):
A : str = self.tokenizer_class.from_pretrained('''microsoft/prophetnet-large-uncased''' )
A : Any = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
A : int = [1_037, 2_146, 20_423, 2_005, 7_680, 7_849, 3_989, 1_012, 102]
A : Any = tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , return_tensors='''pt''' )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
A : Dict = list(batch.input_ids.numpy()[0] )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
def snake_case ( self ):
self.assertTrue(_is_whitespace(''' ''' ) )
self.assertTrue(_is_whitespace('''\t''' ) )
self.assertTrue(_is_whitespace('''\r''' ) )
self.assertTrue(_is_whitespace('''\n''' ) )
self.assertTrue(_is_whitespace('''\u00A0''' ) )
self.assertFalse(_is_whitespace('''A''' ) )
self.assertFalse(_is_whitespace('''-''' ) )
def snake_case ( self ):
self.assertTrue(_is_control('''\u0005''' ) )
self.assertFalse(_is_control('''A''' ) )
self.assertFalse(_is_control(''' ''' ) )
self.assertFalse(_is_control('''\t''' ) )
self.assertFalse(_is_control('''\r''' ) )
def snake_case ( self ):
self.assertTrue(_is_punctuation('''-''' ) )
self.assertTrue(_is_punctuation('''$''' ) )
self.assertTrue(_is_punctuation('''`''' ) )
self.assertTrue(_is_punctuation('''.''' ) )
self.assertFalse(_is_punctuation('''A''' ) )
self.assertFalse(_is_punctuation(''' ''' ) )
@slow
def snake_case ( self ):
A : List[str] = self.tokenizer_class.from_pretrained('''microsoft/prophetnet-large-uncased''' )
A : int = tokenizer.encode('''sequence builders''' , add_special_tokens=_UpperCAmelCase )
A : str = tokenizer.encode('''multi-sequence build''' , add_special_tokens=_UpperCAmelCase )
A : Dict = tokenizer.build_inputs_with_special_tokens(_UpperCAmelCase )
A : str = tokenizer.build_inputs_with_special_tokens(_UpperCAmelCase , _UpperCAmelCase )
assert encoded_sentence == text + [102]
assert encoded_pair == text + [102] + text_a + [102]
| 537
|
'''simple docstring'''
def _lowerCamelCase( UpperCamelCase__ : Optional[int] , UpperCamelCase__ : str ) -> Optional[int]:
A : Optional[int] = 0
A : str = len(UpperCamelCase__ ) - 1
while left <= right:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
A : Any = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(UpperCamelCase__ ):
return None
A : Union[str, Any] = sorted_collection[point]
if current_item == item:
return point
else:
if point < left:
A : Tuple = left
A : Tuple = point
elif point > right:
A : Optional[Any] = right
A : str = point
else:
if item < current_item:
A : Tuple = point - 1
else:
A : Optional[Any] = point + 1
return None
def _lowerCamelCase( UpperCamelCase__ : Dict , UpperCamelCase__ : str , UpperCamelCase__ : Dict , UpperCamelCase__ : List[str] ) -> List[Any]:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
A : Dict = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(UpperCamelCase__ ):
return None
if sorted_collection[point] == item:
return point
elif point < left:
return interpolation_search_by_recursion(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
elif point > right:
return interpolation_search_by_recursion(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else:
if sorted_collection[point] > item:
return interpolation_search_by_recursion(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , point - 1 )
else:
return interpolation_search_by_recursion(
UpperCamelCase__ , UpperCamelCase__ , point + 1 , UpperCamelCase__ )
def _lowerCamelCase( UpperCamelCase__ : Optional[int] ) -> List[Any]:
if collection != sorted(UpperCamelCase__ ):
raise ValueError('''Collection must be ascending sorted''' )
return True
if __name__ == "__main__":
import sys
snake_case_ = 0
if debug == 1:
snake_case_ = [10, 30, 40, 45, 50, 66, 77, 93]
try:
__assert_sorted(collection)
except ValueError:
sys.exit("""Sequence must be ascending sorted to apply interpolation search""")
snake_case_ = 67
snake_case_ = interpolation_search(collection, target)
if result is not None:
print(f'''{target} found at positions: {result}''')
else:
print("""Not found""")
| 537
| 1
|
'''simple docstring'''
import math
import qiskit
def A__ ( UpperCAmelCase_ = 1 , UpperCAmelCase_ = 1 , UpperCAmelCase_ = 1 ):
if (
isinstance(_UpperCAmelCase , _UpperCAmelCase )
or isinstance(_UpperCAmelCase , _UpperCAmelCase )
or isinstance(_UpperCAmelCase , _UpperCAmelCase )
):
raise TypeError('inputs must be integers.' )
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError('inputs must be positive.' )
if (
(math.floor(_UpperCAmelCase ) != input_a)
or (math.floor(_UpperCAmelCase ) != input_a)
or (math.floor(_UpperCAmelCase ) != carry_in)
):
raise ValueError('inputs must be exact integers.' )
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError('inputs must be less or equal to 2.' )
# build registers
_UpperCamelCase : Any = qiskit.QuantumRegister(4 , 'qr' )
_UpperCamelCase : Optional[Any] = qiskit.ClassicalRegister(2 , 'cr' )
# list the entries
_UpperCamelCase : Any = [input_a, input_a, carry_in]
_UpperCamelCase : List[str] = qiskit.QuantumCircuit(_UpperCAmelCase , _UpperCAmelCase )
for i in range(0 , 3 ):
if entry[i] == 2:
quantum_circuit.h(_UpperCAmelCase ) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(_UpperCAmelCase ) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(_UpperCAmelCase ) # for 0 entries
# build the circuit
quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate
quantum_circuit.cx(0 , 1 )
quantum_circuit.ccx(1 , 2 , 3 )
quantum_circuit.cx(1 , 2 )
quantum_circuit.cx(0 , 1 )
quantum_circuit.measure([2, 3] , _UpperCAmelCase ) # measure the last two qbits
_UpperCamelCase : Union[str, Any] = qiskit.Aer.get_backend('aer_simulator' )
_UpperCamelCase : Union[str, Any] = qiskit.execute(_UpperCAmelCase , _UpperCAmelCase , shots=1_0_0_0 )
return job.result().get_counts(_UpperCAmelCase )
if __name__ == "__main__":
print(F"""Total sum count for state is: {quantum_full_adder(1, 1, 1)}""")
| 195
|
'''simple docstring'''
import fire
from utils import calculate_rouge, save_json
def __snake_case ( _UpperCAmelCase : int, _UpperCAmelCase : Dict, _UpperCAmelCase : Any=None, **_UpperCAmelCase : List[Any]):
UpperCamelCase = [x.strip() for x in open(_UpperCAmelCase).readlines()]
UpperCamelCase = [x.strip() for x in open(_UpperCAmelCase).readlines()][: len(_UpperCAmelCase)]
UpperCamelCase = calculate_rouge(_UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase)
if save_path is not None:
save_json(_UpperCAmelCase, _UpperCAmelCase, indent=_UpperCAmelCase)
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 212
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_SCREAMING_SNAKE_CASE = {
"configuration_bloom": ["BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP", "BloomConfig", "BloomOnnxConfig"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ["BloomTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST",
"BloomForCausalLM",
"BloomModel",
"BloomPreTrainedModel",
"BloomForSequenceClassification",
"BloomForTokenClassification",
"BloomForQuestionAnswering",
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 489
|
'''simple docstring'''
from __future__ import annotations
def __a(SCREAMING_SNAKE_CASE_ : list[float] , SCREAMING_SNAKE_CASE_ : list[float] ):
'''simple docstring'''
_lowerCAmelCase = sorted(numsa + numsa )
_lowerCAmelCase , _lowerCAmelCase = divmod(len(SCREAMING_SNAKE_CASE_ ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
_SCREAMING_SNAKE_CASE = [float(x) for x in input("Enter the elements of first array: ").split()]
_SCREAMING_SNAKE_CASE = [float(x) for x in input("Enter the elements of second array: ").split()]
print(f'''The median of two arrays is: {median_of_two_arrays(array_a, array_a)}''')
| 489
| 1
|
"""simple docstring"""
import argparse
import os.path as osp
import re
import torch
from safetensors.torch import load_file, save_file
# =================#
# UNet Conversion #
# =================#
_lowerCAmelCase : str = [
# (stable-diffusion, HF Diffusers)
('''time_embed.0.weight''', '''time_embedding.linear_1.weight'''),
('''time_embed.0.bias''', '''time_embedding.linear_1.bias'''),
('''time_embed.2.weight''', '''time_embedding.linear_2.weight'''),
('''time_embed.2.bias''', '''time_embedding.linear_2.bias'''),
('''input_blocks.0.0.weight''', '''conv_in.weight'''),
('''input_blocks.0.0.bias''', '''conv_in.bias'''),
('''out.0.weight''', '''conv_norm_out.weight'''),
('''out.0.bias''', '''conv_norm_out.bias'''),
('''out.2.weight''', '''conv_out.weight'''),
('''out.2.bias''', '''conv_out.bias'''),
]
_lowerCAmelCase : Any = [
# (stable-diffusion, HF Diffusers)
('''in_layers.0''', '''norm1'''),
('''in_layers.2''', '''conv1'''),
('''out_layers.0''', '''norm2'''),
('''out_layers.3''', '''conv2'''),
('''emb_layers.1''', '''time_emb_proj'''),
('''skip_connection''', '''conv_shortcut'''),
]
_lowerCAmelCase : Union[str, Any] = []
# hardcoded number of downblocks and resnets/attentions...
# would need smarter logic for other networks.
for i in range(4):
# loop over downblocks/upblocks
for j in range(2):
# loop over resnets/attentions for downblocks
_lowerCAmelCase : Optional[Any] = f'''down_blocks.{i}.resnets.{j}.'''
_lowerCAmelCase : Optional[int] = f'''input_blocks.{3*i + j + 1}.0.'''
unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix))
if i < 3:
# no attention layers in down_blocks.3
_lowerCAmelCase : str = f'''down_blocks.{i}.attentions.{j}.'''
_lowerCAmelCase : Union[str, Any] = f'''input_blocks.{3*i + j + 1}.1.'''
unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix))
for j in range(3):
# loop over resnets/attentions for upblocks
_lowerCAmelCase : Dict = f'''up_blocks.{i}.resnets.{j}.'''
_lowerCAmelCase : Optional[int] = f'''output_blocks.{3*i + j}.0.'''
unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix))
if i > 0:
# no attention layers in up_blocks.0
_lowerCAmelCase : List[Any] = f'''up_blocks.{i}.attentions.{j}.'''
_lowerCAmelCase : Dict = f'''output_blocks.{3*i + j}.1.'''
unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix))
if i < 3:
# no downsample in down_blocks.3
_lowerCAmelCase : Optional[int] = f'''down_blocks.{i}.downsamplers.0.conv.'''
_lowerCAmelCase : List[str] = f'''input_blocks.{3*(i+1)}.0.op.'''
unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix))
# no upsample in up_blocks.3
_lowerCAmelCase : Optional[int] = f'''up_blocks.{i}.upsamplers.0.'''
_lowerCAmelCase : Dict = f'''output_blocks.{3*i + 2}.{1 if i == 0 else 2}.'''
unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix))
_lowerCAmelCase : int = '''mid_block.attentions.0.'''
_lowerCAmelCase : Optional[int] = '''middle_block.1.'''
unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix))
for j in range(2):
_lowerCAmelCase : Tuple = f'''mid_block.resnets.{j}.'''
_lowerCAmelCase : List[str] = f'''middle_block.{2*j}.'''
unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix))
def lowerCamelCase_( _lowerCamelCase ) -> Tuple:
'''simple docstring'''
_lowerCamelCase : int = {k: k for k in unet_state_dict.keys()}
for sd_name, hf_name in unet_conversion_map:
_lowerCamelCase : Any = sd_name
for k, v in mapping.items():
if "resnets" in k:
for sd_part, hf_part in unet_conversion_map_resnet:
_lowerCamelCase : int = v.replace(_lowerCamelCase , _lowerCamelCase )
_lowerCamelCase : Optional[Any] = v
for k, v in mapping.items():
for sd_part, hf_part in unet_conversion_map_layer:
_lowerCamelCase : List[Any] = v.replace(_lowerCamelCase , _lowerCamelCase )
_lowerCamelCase : str = v
_lowerCamelCase : List[str] = {v: unet_state_dict[k] for k, v in mapping.items()}
return new_state_dict
# ================#
# VAE Conversion #
# ================#
_lowerCAmelCase : Union[str, Any] = [
# (stable-diffusion, HF Diffusers)
('''nin_shortcut''', '''conv_shortcut'''),
('''norm_out''', '''conv_norm_out'''),
('''mid.attn_1.''', '''mid_block.attentions.0.'''),
]
for i in range(4):
# down_blocks have two resnets
for j in range(2):
_lowerCAmelCase : Optional[Any] = f'''encoder.down_blocks.{i}.resnets.{j}.'''
_lowerCAmelCase : Optional[int] = f'''encoder.down.{i}.block.{j}.'''
vae_conversion_map.append((sd_down_prefix, hf_down_prefix))
if i < 3:
_lowerCAmelCase : Dict = f'''down_blocks.{i}.downsamplers.0.'''
_lowerCAmelCase : str = f'''down.{i}.downsample.'''
vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix))
_lowerCAmelCase : List[Any] = f'''up_blocks.{i}.upsamplers.0.'''
_lowerCAmelCase : Any = f'''up.{3-i}.upsample.'''
vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix))
# up_blocks have three resnets
# also, up blocks in hf are numbered in reverse from sd
for j in range(3):
_lowerCAmelCase : int = f'''decoder.up_blocks.{i}.resnets.{j}.'''
_lowerCAmelCase : Union[str, Any] = f'''decoder.up.{3-i}.block.{j}.'''
vae_conversion_map.append((sd_up_prefix, hf_up_prefix))
# this part accounts for mid blocks in both the encoder and the decoder
for i in range(2):
_lowerCAmelCase : str = f'''mid_block.resnets.{i}.'''
_lowerCAmelCase : Any = f'''mid.block_{i+1}.'''
vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix))
_lowerCAmelCase : Optional[Any] = [
# (stable-diffusion, HF Diffusers)
('''norm.''', '''group_norm.'''),
('''q.''', '''query.'''),
('''k.''', '''key.'''),
('''v.''', '''value.'''),
('''proj_out.''', '''proj_attn.'''),
]
def lowerCamelCase_( _lowerCamelCase ) -> List[Any]:
'''simple docstring'''
return w.reshape(*w.shape , 1 , 1 )
def lowerCamelCase_( _lowerCamelCase ) -> str:
'''simple docstring'''
_lowerCamelCase : int = {k: k for k in vae_state_dict.keys()}
for k, v in mapping.items():
for sd_part, hf_part in vae_conversion_map:
_lowerCamelCase : str = v.replace(_lowerCamelCase , _lowerCamelCase )
_lowerCamelCase : int = v
for k, v in mapping.items():
if "attentions" in k:
for sd_part, hf_part in vae_conversion_map_attn:
_lowerCamelCase : Any = v.replace(_lowerCamelCase , _lowerCamelCase )
_lowerCamelCase : int = v
_lowerCamelCase : List[Any] = {v: vae_state_dict[k] for k, v in mapping.items()}
_lowerCamelCase : str = ["q", "k", "v", "proj_out"]
for k, v in new_state_dict.items():
for weight_name in weights_to_convert:
if F"""mid.attn_1.{weight_name}.weight""" in k:
print(F"""Reshaping {k} for SD format""" )
_lowerCamelCase : Union[str, Any] = reshape_weight_for_sd(_lowerCamelCase )
return new_state_dict
# =========================#
# Text Encoder Conversion #
# =========================#
_lowerCAmelCase : List[Any] = [
# (stable-diffusion, HF Diffusers)
('''resblocks.''', '''text_model.encoder.layers.'''),
('''ln_1''', '''layer_norm1'''),
('''ln_2''', '''layer_norm2'''),
('''.c_fc.''', '''.fc1.'''),
('''.c_proj.''', '''.fc2.'''),
('''.attn''', '''.self_attn'''),
('''ln_final.''', '''transformer.text_model.final_layer_norm.'''),
('''token_embedding.weight''', '''transformer.text_model.embeddings.token_embedding.weight'''),
('''positional_embedding''', '''transformer.text_model.embeddings.position_embedding.weight'''),
]
_lowerCAmelCase : str = {re.escape(x[1]): x[0] for x in textenc_conversion_lst}
_lowerCAmelCase : Dict = re.compile('''|'''.join(protected.keys()))
# Ordering is from https://github.com/pytorch/pytorch/blob/master/test/cpp/api/modules.cpp
_lowerCAmelCase : str = {'''q''': 0, '''k''': 1, '''v''': 2}
def lowerCamelCase_( _lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
_lowerCamelCase : Tuple = {}
_lowerCamelCase : List[str] = {}
_lowerCamelCase : Dict = {}
for k, v in text_enc_dict.items():
if (
k.endswith(".self_attn.q_proj.weight" )
or k.endswith(".self_attn.k_proj.weight" )
or k.endswith(".self_attn.v_proj.weight" )
):
_lowerCamelCase : Dict = k[: -len(".q_proj.weight" )]
_lowerCamelCase : Optional[int] = k[-len("q_proj.weight" )]
if k_pre not in capture_qkv_weight:
_lowerCamelCase : Optional[Any] = [None, None, None]
_lowerCamelCase : Optional[Any] = v
continue
if (
k.endswith(".self_attn.q_proj.bias" )
or k.endswith(".self_attn.k_proj.bias" )
or k.endswith(".self_attn.v_proj.bias" )
):
_lowerCamelCase : str = k[: -len(".q_proj.bias" )]
_lowerCamelCase : Optional[Any] = k[-len("q_proj.bias" )]
if k_pre not in capture_qkv_bias:
_lowerCamelCase : Union[str, Any] = [None, None, None]
_lowerCamelCase : str = v
continue
_lowerCamelCase : Any = textenc_pattern.sub(lambda _lowerCamelCase : protected[re.escape(m.group(0 ) )] , _lowerCamelCase )
_lowerCamelCase : Dict = v
for k_pre, tensors in capture_qkv_weight.items():
if None in tensors:
raise Exception("CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing" )
_lowerCamelCase : Dict = textenc_pattern.sub(lambda _lowerCamelCase : protected[re.escape(m.group(0 ) )] , _lowerCamelCase )
_lowerCamelCase : str = torch.cat(_lowerCamelCase )
for k_pre, tensors in capture_qkv_bias.items():
if None in tensors:
raise Exception("CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing" )
_lowerCamelCase : Any = textenc_pattern.sub(lambda _lowerCamelCase : protected[re.escape(m.group(0 ) )] , _lowerCamelCase )
_lowerCamelCase : Union[str, Any] = torch.cat(_lowerCamelCase )
return new_state_dict
def lowerCamelCase_( _lowerCamelCase ) -> str:
'''simple docstring'''
return text_enc_dict
if __name__ == "__main__":
_lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument('''--model_path''', default=None, type=str, required=True, help='''Path to the model to convert.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument('''--half''', action='''store_true''', help='''Save weights in half precision.''')
parser.add_argument(
'''--use_safetensors''', action='''store_true''', help='''Save weights use safetensors, default is ckpt.'''
)
_lowerCAmelCase : str = parser.parse_args()
assert args.model_path is not None, "Must provide a model path!"
assert args.checkpoint_path is not None, "Must provide a checkpoint path!"
# Path for safetensors
_lowerCAmelCase : Union[str, Any] = osp.join(args.model_path, '''unet''', '''diffusion_pytorch_model.safetensors''')
_lowerCAmelCase : Optional[int] = osp.join(args.model_path, '''vae''', '''diffusion_pytorch_model.safetensors''')
_lowerCAmelCase : int = osp.join(args.model_path, '''text_encoder''', '''model.safetensors''')
# Load models from safetensors if it exists, if it doesn't pytorch
if osp.exists(unet_path):
_lowerCAmelCase : Optional[Any] = load_file(unet_path, device='''cpu''')
else:
_lowerCAmelCase : Optional[Any] = osp.join(args.model_path, '''unet''', '''diffusion_pytorch_model.bin''')
_lowerCAmelCase : Dict = torch.load(unet_path, map_location='''cpu''')
if osp.exists(vae_path):
_lowerCAmelCase : Optional[int] = load_file(vae_path, device='''cpu''')
else:
_lowerCAmelCase : Union[str, Any] = osp.join(args.model_path, '''vae''', '''diffusion_pytorch_model.bin''')
_lowerCAmelCase : Optional[Any] = torch.load(vae_path, map_location='''cpu''')
if osp.exists(text_enc_path):
_lowerCAmelCase : Any = load_file(text_enc_path, device='''cpu''')
else:
_lowerCAmelCase : Any = osp.join(args.model_path, '''text_encoder''', '''pytorch_model.bin''')
_lowerCAmelCase : List[str] = torch.load(text_enc_path, map_location='''cpu''')
# Convert the UNet model
_lowerCAmelCase : int = convert_unet_state_dict(unet_state_dict)
_lowerCAmelCase : Tuple = {'''model.diffusion_model.''' + k: v for k, v in unet_state_dict.items()}
# Convert the VAE model
_lowerCAmelCase : Any = convert_vae_state_dict(vae_state_dict)
_lowerCAmelCase : List[str] = {'''first_stage_model.''' + k: v for k, v in vae_state_dict.items()}
# Easiest way to identify v2.0 model seems to be that the text encoder (OpenCLIP) is deeper
_lowerCAmelCase : List[str] = '''text_model.encoder.layers.22.layer_norm2.bias''' in text_enc_dict
if is_vaa_model:
# Need to add the tag 'transformer' in advance so we can knock it out from the final layer-norm
_lowerCAmelCase : Union[str, Any] = {'''transformer.''' + k: v for k, v in text_enc_dict.items()}
_lowerCAmelCase : List[Any] = convert_text_enc_state_dict_vaa(text_enc_dict)
_lowerCAmelCase : List[Any] = {'''cond_stage_model.model.''' + k: v for k, v in text_enc_dict.items()}
else:
_lowerCAmelCase : int = convert_text_enc_state_dict(text_enc_dict)
_lowerCAmelCase : str = {'''cond_stage_model.transformer.''' + k: v for k, v in text_enc_dict.items()}
# Put together new checkpoint
_lowerCAmelCase : Dict = {**unet_state_dict, **vae_state_dict, **text_enc_dict}
if args.half:
_lowerCAmelCase : Optional[Any] = {k: v.half() for k, v in state_dict.items()}
if args.use_safetensors:
save_file(state_dict, args.checkpoint_path)
else:
_lowerCAmelCase : int = {'''state_dict''': state_dict}
torch.save(state_dict, args.checkpoint_path)
| 46
|
'''simple docstring'''
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
_lowerCAmelCase = ''''''
_lowerCAmelCase = ''''''
_lowerCAmelCase = ''''''
_lowerCAmelCase = 1 # (0 is vertical, 1 is horizontal)
def _SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ : int = get_dataset(UpperCamelCase , UpperCamelCase )
print("""Processing...""" )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : str = update_image_and_anno(UpperCamelCase , UpperCamelCase , UpperCamelCase )
for index, image in enumerate(UpperCamelCase ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
lowerCAmelCase__ : List[Any] = random_chars(32 )
lowerCAmelCase__ : Optional[Any] = paths[index].split(os.sep )[-1].rsplit(""".""" , 1 )[0]
lowerCAmelCase__ : Dict = f"""{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}"""
cva.imwrite(f"""/{file_root}.jpg""" , UpperCamelCase , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(f"""Success {index+1}/{len(UpperCamelCase )} with {file_name}""" )
lowerCAmelCase__ : Tuple = []
for anno in new_annos[index]:
lowerCAmelCase__ : Union[str, Any] = f"""{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}"""
annos_list.append(UpperCamelCase )
with open(f"""/{file_root}.txt""" , """w""" ) as outfile:
outfile.write("""\n""".join(line for line in annos_list ) )
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : str = []
lowerCAmelCase__ : Tuple = []
for label_file in glob.glob(os.path.join(UpperCamelCase , """*.txt""" ) ):
lowerCAmelCase__ : Tuple = label_file.split(os.sep )[-1].rsplit(""".""" , 1 )[0]
with open(UpperCamelCase ) as in_file:
lowerCAmelCase__ : Any = in_file.readlines()
lowerCAmelCase__ : str = os.path.join(UpperCamelCase , f"""{label_name}.jpg""" )
lowerCAmelCase__ : Tuple = []
for obj_list in obj_lists:
lowerCAmelCase__ : Optional[int] = obj_list.rstrip("""\n""" ).split(""" """ )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(UpperCamelCase )
labels.append(UpperCamelCase )
return img_paths, labels
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase = 1 ):
"""simple docstring"""
lowerCAmelCase__ : Tuple = []
lowerCAmelCase__ : Any = []
lowerCAmelCase__ : List[str] = []
for idx in range(len(UpperCamelCase ) ):
lowerCAmelCase__ : Optional[Any] = []
lowerCAmelCase__ : Optional[int] = img_list[idx]
path_list.append(UpperCamelCase )
lowerCAmelCase__ : List[Any] = anno_list[idx]
lowerCAmelCase__ : Dict = cva.imread(UpperCamelCase )
if flip_type == 1:
lowerCAmelCase__ : List[str] = cva.flip(UpperCamelCase , UpperCamelCase )
for bbox in img_annos:
lowerCAmelCase__ : Dict = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
lowerCAmelCase__ : Union[str, Any] = cva.flip(UpperCamelCase , UpperCamelCase )
for bbox in img_annos:
lowerCAmelCase__ : Any = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(UpperCamelCase )
new_imgs_list.append(UpperCamelCase )
return new_imgs_list, new_annos_lists, path_list
def _SCREAMING_SNAKE_CASE ( UpperCamelCase = 32 ):
"""simple docstring"""
assert number_char > 1, "The number of character should greater than 1"
lowerCAmelCase__ : Tuple = ascii_lowercase + digits
return "".join(random.choice(UpperCamelCase ) for _ in range(UpperCamelCase ) )
if __name__ == "__main__":
main()
print('''DONE ✅''')
| 565
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_A = {
'configuration_nllb_moe': [
'NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP',
'NllbMoeConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST',
'NllbMoeForConditionalGeneration',
'NllbMoeModel',
'NllbMoePreTrainedModel',
'NllbMoeTop2Router',
'NllbMoeSparseMLP',
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
_A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 714
|
"""simple docstring"""
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _lowercase ( __UpperCAmelCase ):
lowercase_ = ['image_processor', 'tokenizer']
lowercase_ = 'BlipImageProcessor'
lowercase_ = 'AutoTokenizer'
def __init__( self , UpperCAmelCase_ , UpperCAmelCase_ ) -> Dict:
lowerCamelCase : Tuple = False
super().__init__(UpperCAmelCase_ , UpperCAmelCase_ )
lowerCamelCase : List[Any] = self.image_processor
def __call__( self , UpperCAmelCase_ = None , UpperCAmelCase_ = None , UpperCAmelCase_ = True , UpperCAmelCase_ = False , UpperCAmelCase_ = None , UpperCAmelCase_ = None , UpperCAmelCase_ = 0 , UpperCAmelCase_ = None , UpperCAmelCase_ = None , UpperCAmelCase_ = False , UpperCAmelCase_ = False , UpperCAmelCase_ = False , UpperCAmelCase_ = False , UpperCAmelCase_ = False , UpperCAmelCase_ = True , UpperCAmelCase_ = None , **UpperCAmelCase_ , ) -> BatchEncoding:
if images is None and text is None:
raise ValueError('You have to specify either images or text.' )
# Get only text
if images is None:
lowerCamelCase : str = self.tokenizer
lowerCamelCase : List[str] = self.tokenizer(
text=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ , max_length=UpperCAmelCase_ , stride=UpperCAmelCase_ , pad_to_multiple_of=UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ , return_overflowing_tokens=UpperCAmelCase_ , return_special_tokens_mask=UpperCAmelCase_ , return_offsets_mapping=UpperCAmelCase_ , return_token_type_ids=UpperCAmelCase_ , return_length=UpperCAmelCase_ , verbose=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_ , )
return text_encoding
# add pixel_values
lowerCamelCase : Any = self.image_processor(UpperCAmelCase_ , return_tensors=UpperCAmelCase_ )
if text is not None:
lowerCamelCase : List[Any] = self.tokenizer(
text=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ , max_length=UpperCAmelCase_ , stride=UpperCAmelCase_ , pad_to_multiple_of=UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ , return_overflowing_tokens=UpperCAmelCase_ , return_special_tokens_mask=UpperCAmelCase_ , return_offsets_mapping=UpperCAmelCase_ , return_token_type_ids=UpperCAmelCase_ , return_length=UpperCAmelCase_ , verbose=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_ , )
else:
lowerCamelCase : str = None
if text_encoding is not None:
encoding_image_processor.update(UpperCAmelCase_ )
return encoding_image_processor
def _UpperCamelCase ( self , *UpperCAmelCase_ , **UpperCAmelCase_ ) -> str:
return self.tokenizer.batch_decode(*UpperCAmelCase_ , **UpperCAmelCase_ )
def _UpperCamelCase ( self , *UpperCAmelCase_ , **UpperCAmelCase_ ) -> List[str]:
return self.tokenizer.decode(*UpperCAmelCase_ , **UpperCAmelCase_ )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def _UpperCamelCase ( self ) -> Union[str, Any]:
lowerCamelCase : List[Any] = self.tokenizer.model_input_names
lowerCamelCase : Optional[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 133
| 0
|
import inspect
import unittest
import numpy as np
from transformers import ViTConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __init__( self : str , A : int , A : Tuple=1_3 , A : List[str]=3_0 , A : Any=2 , A : List[Any]=3 , A : Dict=True , A : Tuple=True , A : Optional[int]=3_2 , A : List[Any]=5 , A : Any=4 , A : Optional[int]=3_7 , A : Union[str, Any]="gelu" , A : Optional[int]=0.1 , A : Optional[int]=0.1 , A : Optional[int]=1_0 , A : Optional[int]=0.02 , ) ->Optional[int]:
lowerCamelCase__ : Any = parent
lowerCamelCase__ : Any = batch_size
lowerCamelCase__ : str = image_size
lowerCamelCase__ : Any = patch_size
lowerCamelCase__ : Dict = num_channels
lowerCamelCase__ : List[Any] = is_training
lowerCamelCase__ : str = use_labels
lowerCamelCase__ : str = hidden_size
lowerCamelCase__ : Optional[Any] = num_hidden_layers
lowerCamelCase__ : Any = num_attention_heads
lowerCamelCase__ : int = intermediate_size
lowerCamelCase__ : Optional[Any] = hidden_act
lowerCamelCase__ : Tuple = hidden_dropout_prob
lowerCamelCase__ : int = attention_probs_dropout_prob
lowerCamelCase__ : int = type_sequence_label_size
lowerCamelCase__ : Optional[Any] = initializer_range
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
lowerCamelCase__ : Optional[Any] = (image_size // patch_size) ** 2
lowerCamelCase__ : Optional[Any] = num_patches + 1
def __lowerCamelCase ( self : str ) ->Any:
lowerCamelCase__ : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase__ : int = ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=A , initializer_range=self.initializer_range , )
return config, pixel_values
def __lowerCamelCase ( self : Tuple , A : List[Any] , A : Optional[int] ) ->int:
lowerCamelCase__ : Dict = FlaxViTModel(config=A )
lowerCamelCase__ : Optional[int] = model(A )
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token)
lowerCamelCase__ : Optional[Any] = (self.image_size, self.image_size)
lowerCamelCase__ : List[str] = (self.patch_size, self.patch_size)
lowerCamelCase__ : Optional[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, num_patches + 1, self.hidden_size) )
def __lowerCamelCase ( self : Optional[Any] , A : int , A : Optional[int] ) ->Optional[int]:
lowerCamelCase__ : Optional[int] = self.type_sequence_label_size
lowerCamelCase__ : Optional[Any] = FlaxViTForImageClassification(config=A )
lowerCamelCase__ : int = model(A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowerCamelCase__ : Union[str, Any] = 1
lowerCamelCase__ : int = FlaxViTForImageClassification(A )
lowerCamelCase__ : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCamelCase__ : Optional[Any] = model(A )
def __lowerCamelCase ( self : int ) ->str:
lowerCamelCase__ : Optional[Any] = self.prepare_config_and_inputs()
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) : Optional[Any] = config_and_inputs
lowerCamelCase__ : Any = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_flax
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ,unittest.TestCase ):
_UpperCAmelCase : Union[str, Any] = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else ()
def __lowerCamelCase ( self : Optional[Any] ) ->None:
lowerCamelCase__ : int = FlaxViTModelTester(self )
lowerCamelCase__ : List[Any] = ConfigTester(self , config_class=A , has_text_modality=A , hidden_size=3_7 )
def __lowerCamelCase ( self : Any ) ->Dict:
self.config_tester.run_common_tests()
def __lowerCamelCase ( self : str ) ->List[Any]:
lowerCamelCase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def __lowerCamelCase ( self : Any ) ->Union[str, Any]:
lowerCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A )
def __lowerCamelCase ( self : int ) ->int:
lowerCamelCase__ , lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : int = model_class(A )
lowerCamelCase__ : Any = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__ : List[Any] = [*signature.parameters.keys()]
lowerCamelCase__ : Union[str, Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , A )
def __lowerCamelCase ( self : int ) ->List[str]:
lowerCamelCase__ , lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowerCamelCase__ : List[str] = self._prepare_for_class(A , A )
lowerCamelCase__ : int = model_class(A )
@jax.jit
def model_jitted(A : Union[str, Any] , **A : Union[str, Any] ):
return model(pixel_values=A , **A )
with self.subTest('''JIT Enabled''' ):
lowerCamelCase__ : Union[str, Any] = model_jitted(**A ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
lowerCamelCase__ : Optional[Any] = model_jitted(**A ).to_tuple()
self.assertEqual(len(A ) , len(A ) )
for jitted_output, output in zip(A , A ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def __lowerCamelCase ( self : Any ) ->Tuple:
for model_class_name in self.all_model_classes:
lowerCamelCase__ : List[str] = model_class_name.from_pretrained('''google/vit-base-patch16-224''' )
lowerCamelCase__ : Dict = model(np.ones((1, 3, 2_2_4, 2_2_4) ) )
self.assertIsNotNone(A )
| 315
|
from __future__ import annotations
def _a ( UpperCAmelCase ) -> None:
"""simple docstring"""
create_state_space_tree(UpperCAmelCase , [] , 0 , [0 for i in range(len(UpperCAmelCase ) )] )
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , ) -> None:
"""simple docstring"""
if index == len(UpperCAmelCase ):
print(UpperCAmelCase )
return
for i in range(len(UpperCAmelCase ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
lowerCamelCase__ : List[Any] = True
create_state_space_tree(UpperCAmelCase , UpperCAmelCase , index + 1 , UpperCAmelCase )
current_sequence.pop()
lowerCamelCase__ : str = False
_A : list[int | str] = [3, 1, 2, 4]
generate_all_permutations(sequence)
_A : list[int | str] = ["A", "B", "C"]
generate_all_permutations(sequence_a)
| 315
| 1
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCamelCase = logging.get_logger(__name__)
def __lowercase ( UpperCAmelCase__ ):
"""simple docstring"""
if isinstance(UpperCAmelCase__ , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(UpperCAmelCase__ , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(UpperCAmelCase__ ):
return [[videos]]
raise ValueError(F"""Could not make batched video from {videos}""" )
class snake_case_ ( _a ):
"""simple docstring"""
__UpperCAmelCase =["""pixel_values"""]
def __init__( self , _A = True , _A = None , _A = PILImageResampling.BILINEAR , _A = True , _A = None , _A = True , _A = 1 / 2_5_5 , _A = True , _A = None , _A = None , **_A , ):
super().__init__(**_A )
__lowerCAmelCase = size if size is not None else {'shortest_edge': 2_2_4}
__lowerCAmelCase = get_size_dict(_A , default_to_square=_A )
__lowerCAmelCase = crop_size if crop_size is not None else {'height': 2_2_4, 'width': 2_2_4}
__lowerCAmelCase = get_size_dict(_A , param_name='crop_size' )
__lowerCAmelCase = do_resize
__lowerCAmelCase = size
__lowerCAmelCase = do_center_crop
__lowerCAmelCase = crop_size
__lowerCAmelCase = resample
__lowerCAmelCase = do_rescale
__lowerCAmelCase = rescale_factor
__lowerCAmelCase = do_normalize
__lowerCAmelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__lowerCAmelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def A__ ( self , _A , _A , _A = PILImageResampling.BILINEAR , _A = None , **_A , ):
__lowerCAmelCase = get_size_dict(_A , default_to_square=_A )
if "shortest_edge" in size:
__lowerCAmelCase = get_resize_output_image_size(_A , size['shortest_edge'] , default_to_square=_A )
elif "height" in size and "width" in size:
__lowerCAmelCase = (size['height'], size['width'])
else:
raise ValueError(F"""Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}""" )
return resize(_A , size=_A , resample=_A , data_format=_A , **_A )
def A__ ( self , _A , _A , _A = None , **_A , ):
__lowerCAmelCase = get_size_dict(_A )
if "height" not in size or "width" not in size:
raise ValueError(F"""Size must have 'height' and 'width' as keys. Got {size.keys()}""" )
return center_crop(_A , size=(size['height'], size['width']) , data_format=_A , **_A )
def A__ ( self , _A , _A , _A = None , **_A , ):
return rescale(_A , scale=_A , data_format=_A , **_A )
def A__ ( self , _A , _A , _A , _A = None , **_A , ):
return normalize(_A , mean=_A , std=_A , data_format=_A , **_A )
def A__ ( self , _A , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = ChannelDimension.FIRST , ):
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
__lowerCAmelCase = to_numpy_array(_A )
if do_resize:
__lowerCAmelCase = self.resize(image=_A , size=_A , resample=_A )
if do_center_crop:
__lowerCAmelCase = self.center_crop(_A , size=_A )
if do_rescale:
__lowerCAmelCase = self.rescale(image=_A , scale=_A )
if do_normalize:
__lowerCAmelCase = self.normalize(image=_A , mean=_A , std=_A )
__lowerCAmelCase = to_channel_dimension_format(_A , _A )
return image
def A__ ( self , _A , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = ChannelDimension.FIRST , **_A , ):
__lowerCAmelCase = do_resize if do_resize is not None else self.do_resize
__lowerCAmelCase = resample if resample is not None else self.resample
__lowerCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
__lowerCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
__lowerCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowerCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
__lowerCAmelCase = image_mean if image_mean is not None else self.image_mean
__lowerCAmelCase = image_std if image_std is not None else self.image_std
__lowerCAmelCase = size if size is not None else self.size
__lowerCAmelCase = get_size_dict(_A , default_to_square=_A )
__lowerCAmelCase = crop_size if crop_size is not None else self.crop_size
__lowerCAmelCase = get_size_dict(_A , param_name='crop_size' )
if not valid_images(_A ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
__lowerCAmelCase = make_batched(_A )
__lowerCAmelCase = [
[
self._preprocess_image(
image=_A , do_resize=_A , size=_A , resample=_A , do_center_crop=_A , crop_size=_A , do_rescale=_A , rescale_factor=_A , do_normalize=_A , image_mean=_A , image_std=_A , data_format=_A , )
for img in video
]
for video in videos
]
__lowerCAmelCase = {'pixel_values': videos}
return BatchFeature(data=_A , tensor_type=_A )
| 102
|
def __lowercase ( UpperCAmelCase__ = 10 , UpperCAmelCase__ = 1_000 , UpperCAmelCase__ = True ):
"""simple docstring"""
assert (
isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
and isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
and isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
), "Invalid type of value(s) specified to function!"
if min_val > max_val:
raise ValueError('Invalid value for min_val or max_val (min_value < max_value)' )
return min_val if option else max_val
def __lowercase ( UpperCAmelCase__ , UpperCAmelCase__ ):
"""simple docstring"""
return int((number_a + number_a) / 2 )
def __lowercase ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
"""simple docstring"""
assert (
isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
), 'argument values must be type of "int"'
if lower > higher:
raise ValueError('argument value for lower and higher must be(lower > higher)' )
if not lower < to_guess < higher:
raise ValueError(
'guess value must be within the range of lower and higher value' )
def answer(UpperCAmelCase__ ) -> str:
if number > to_guess:
return "high"
elif number < to_guess:
return "low"
else:
return "same"
print('started...' )
__lowerCAmelCase = lower
__lowerCAmelCase = higher
__lowerCAmelCase = []
while True:
__lowerCAmelCase = get_avg(UpperCAmelCase__ , UpperCAmelCase__ )
last_numbers.append(UpperCAmelCase__ )
if answer(UpperCAmelCase__ ) == "low":
__lowerCAmelCase = number
elif answer(UpperCAmelCase__ ) == "high":
__lowerCAmelCase = number
else:
break
print(F"""guess the number : {last_numbers[-1]}""" )
print(F"""details : {last_numbers!s}""" )
def __lowercase ( ):
"""simple docstring"""
__lowerCAmelCase = int(input('Enter lower value : ' ).strip() )
__lowerCAmelCase = int(input('Enter high value : ' ).strip() )
__lowerCAmelCase = int(input('Enter value to guess : ' ).strip() )
guess_the_number(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
if __name__ == "__main__":
main()
| 102
| 1
|
import collections
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
snake_case_ = logging.get_logger(__name__)
snake_case_ = '▁'
snake_case_ = {'vocab_file': 'prophetnet.tokenizer'}
snake_case_ = {
'vocab_file': {
'microsoft/xprophetnet-large-wiki100-cased': (
'https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/prophetnet.tokenizer'
),
}
}
snake_case_ = {
'microsoft/xprophetnet-large-wiki100-cased': {'do_lower_case': False},
}
snake_case_ = {
'microsoft/xprophetnet-large-wiki100-cased': 512,
}
def lowerCamelCase__ ( snake_case_ : Tuple ) -> Optional[int]:
__snake_case = collections.OrderedDict()
with open(snake_case_ , '''r''' , encoding='''utf-8''' ) as reader:
__snake_case = reader.readlines()
for index, token in enumerate(snake_case_ ):
__snake_case = token.rstrip('''\n''' )
__snake_case = index
return vocab
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
A_ : List[Any] = VOCAB_FILES_NAMES
A_ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
A_ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ : Optional[int] = ['input_ids', 'attention_mask']
def __init__(self : Optional[Any] , a__ : Tuple , a__ : Dict="[SEP]" , a__ : Optional[int]="[SEP]" , a__ : Tuple="[SEP]" , a__ : str="[UNK]" , a__ : Tuple="[PAD]" , a__ : List[Any]="[CLS]" , a__ : int="[MASK]" , a__ : Optional[Dict[str, Any]] = None , **a__ : Optional[int] , ):
"""simple docstring"""
__snake_case = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=a__ , eos_token=a__ , sep_token=a__ , unk_token=a__ , pad_token=a__ , cls_token=a__ , mask_token=a__ , sp_model_kwargs=self.sp_model_kwargs , **a__ , )
try:
import sentencepiece as spm
except ImportError:
logger.warning(
'''You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece'''
''' pip install sentencepiece''' )
raise
__snake_case = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(a__ ) )
__snake_case = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# put special tokens and [unused] tokens into the vocab
__snake_case = {'''[PAD]''': 0, '''[CLS]''': 1, '''[SEP]''': 2, '''[UNK]''': 3, '''[MASK]''': 4}
for i in range(10 ):
__snake_case = f"""[unused{i}]"""
__snake_case = 5 + i
# The first "real" token "," has position 15 in the embedding vocab and position 3 in the spm vocab
__snake_case = 12
__snake_case = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
for k in self.fairseq_tokens_to_ids.keys():
self.unique_no_split_tokens.append(a__ )
def __getstate__(self : Union[str, Any] ):
"""simple docstring"""
__snake_case = self.__dict__.copy()
__snake_case = None
return state
def __setstate__(self : Any , a__ : List[str] ):
"""simple docstring"""
__snake_case = d
try:
import sentencepiece as spm
except ImportError:
logger.warning(
'''You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece'''
''' pip install sentencepiece''' )
raise
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
__snake_case = {}
__snake_case = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def a (self : Optional[int] , a__ : List[int] , a__ : Optional[List[int]] = None , a__ : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a__ , token_ids_a=a__ , already_has_special_tokens=a__ )
if token_ids_a is None:
return ([0] * len(a__ )) + [1]
return ([0] * len(a__ )) + [1] + ([0] * len(a__ )) + [1]
def a (self : List[Any] , a__ : List[int] , a__ : Optional[List[int]] = None ):
"""simple docstring"""
__snake_case = [self.sep_token_id]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0]
return len(token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def a (self : Any ):
"""simple docstring"""
return len(self.sp_model ) + self.fairseq_offset
def a (self : Union[str, Any] ):
"""simple docstring"""
__snake_case = {self.convert_ids_to_tokens(a__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def a (self : Tuple , a__ : str ):
"""simple docstring"""
return self.sp_model.encode(a__ , out_type=a__ )
def a (self : str , a__ : Optional[int] ):
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
__snake_case = self.sp_model.PieceToId(a__ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def a (self : List[str] , a__ : Optional[int] ):
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def a (self : int , a__ : List[str] ):
"""simple docstring"""
__snake_case = ''''''.join(a__ ).replace(a__ , ''' ''' ).strip()
return out_string
def a (self : Optional[int] , a__ : str , a__ : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(a__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__snake_case = os.path.join(
a__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , a__ )
elif not os.path.isfile(self.vocab_file ):
with open(a__ , '''wb''' ) as fi:
__snake_case = self.sp_model.serialized_model_proto()
fi.write(a__ )
return (out_vocab_file,)
def a (self : Union[str, Any] , a__ : List[int] , a__ : Optional[List[int]] = None ):
"""simple docstring"""
if token_ids_a is None:
return token_ids_a + [self.sep_token_id]
__snake_case = [self.sep_token_id]
return token_ids_a + sep + token_ids_a + sep
| 592
|
def lowerCamelCase__ ( snake_case_ : Dict=2_8123 ) -> Tuple:
__snake_case = [1] * (limit + 1)
for i in range(2 , int(limit**0.5 ) + 1 ):
sum_divs[i * i] += i
for k in range(i + 1 , limit // i + 1 ):
sum_divs[k * i] += k + i
__snake_case = set()
__snake_case = 0
for n in range(1 , limit + 1 ):
if sum_divs[n] > n:
abundants.add(snake_case_ )
if not any((n - a in abundants) for a in abundants ):
res += n
return res
if __name__ == "__main__":
print(solution())
| 592
| 1
|
from collections import defaultdict
from math import ceil, sqrt
def _lowercase ( SCREAMING_SNAKE_CASE_ : int = 1_000_000 , SCREAMING_SNAKE_CASE_ : int = 10 ):
"""simple docstring"""
UpperCamelCase = defaultdict(SCREAMING_SNAKE_CASE_ )
for outer_width in range(3 , (t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
UpperCamelCase = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 )
else:
UpperCamelCase = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(SCREAMING_SNAKE_CASE_ , outer_width - 1 , 2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 10 )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 702
|
import torch
from transformers import AutoModel
class UpperCAmelCase ( torch.nn.Module ):
def __init__( self : int , __magic_name__ : List[Any]="sayef/fsner-bert-base-uncased" ):
"""simple docstring"""
super(__magic_name__ , self ).__init__()
UpperCamelCase = AutoModel.from_pretrained(__magic_name__ , return_dict=__magic_name__ )
UpperCamelCase = torch.nn.CosineSimilarity(3 , 1e-08 )
UpperCamelCase = torch.nn.Softmax(dim=1 )
def lowerCamelCase_ ( self : Optional[int] , **__magic_name__ : List[Any] ):
"""simple docstring"""
return self.bert(**__magic_name__ ).last_hidden_state
def lowerCamelCase_ ( self : Tuple , __magic_name__ : int ):
"""simple docstring"""
return token_embeddings.sum(2 , keepdim=__magic_name__ )
def lowerCamelCase_ ( self : Optional[Any] , __magic_name__ : List[str] , __magic_name__ : Union[str, Any] , __magic_name__ : Optional[Any]=1 ):
"""simple docstring"""
return self.softmax(T * self.cos(__magic_name__ , __magic_name__ ) )
def lowerCamelCase_ ( self : Union[str, Any] , __magic_name__ : List[Any] , __magic_name__ : List[str] ):
"""simple docstring"""
UpperCamelCase = W_supports["""sizes"""].tolist()
UpperCamelCase = W_supports["""start_token_id"""].item()
UpperCamelCase = W_supports["""end_token_id"""].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
UpperCamelCase = self.BERT(**__magic_name__ )
UpperCamelCase = self.BERT(**__magic_name__ )
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = W_supports["""input_ids"""] == start_token_id
UpperCamelCase = W_supports["""input_ids"""] == end_token_id
for i, size in enumerate(__magic_name__ ):
if i == 0:
UpperCamelCase = 0
else:
UpperCamelCase = support_sizes[i - 1]
UpperCamelCase = S[s : s + size][start_token_masks[s : s + size]]
UpperCamelCase = S[s : s + size][end_token_masks[s : s + size]]
UpperCamelCase = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
UpperCamelCase = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
UpperCamelCase = torch.vstack((p_starts, p_start) )
UpperCamelCase = torch.vstack((p_ends, p_end) )
else:
UpperCamelCase = p_start
UpperCamelCase = p_end
return p_starts, p_ends
| 181
| 0
|
"""simple docstring"""
from __future__ import annotations
from cmath import sqrt
def _a ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> tuple[complex, complex]:
if a == 0:
raise ValueError('''Coefficient \'a\' must not be zero.''' )
__SCREAMING_SNAKE_CASE = b * b - 4 * a * c
__SCREAMING_SNAKE_CASE = (-b + sqrt(lowercase_ )) / (2 * a)
__SCREAMING_SNAKE_CASE = (-b - sqrt(lowercase_ )) / (2 * a)
return (
root_a.real if not root_a.imag else root_a,
root_a.real if not root_a.imag else root_a,
)
def _a ( ) -> List[str]:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = quadratic_roots(a=5 , b=6 , c=1 )
print(f"""The solutions are: {solutiona} and {solutiona}""" )
if __name__ == "__main__":
main()
| 482
|
import copy
import tempfile
import unittest
from transformers import MaMaaaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from transformers.utils import cached_property
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaTokenizer
from transformers.models.mam_aaa.modeling_mam_aaa import MaMaaaDecoder, MaMaaaEncoder
def __magic_name__ ( lowercase_ , lowercase_ , lowercase_ , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , ) -> Union[str, Any]:
'''simple docstring'''
if attention_mask is None:
UpperCamelCase = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
UpperCamelCase = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
UpperCamelCase = torch.ones(config.encoder_layers , config.encoder_attention_heads , device=lowercase_ )
if decoder_head_mask is None:
UpperCamelCase = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=lowercase_ )
if cross_attn_head_mask is None:
UpperCamelCase = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=lowercase_ )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
class __UpperCAmelCase :
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=13 , SCREAMING_SNAKE_CASE=7 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=99 , SCREAMING_SNAKE_CASE=16 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE="relu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=20 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=1 , SCREAMING_SNAKE_CASE=0 , ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = seq_length
UpperCamelCase = is_training
UpperCamelCase = use_labels
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = encoder_layerdrop
UpperCamelCase = decoder_layerdrop
UpperCamelCase = max_position_embeddings
UpperCamelCase = eos_token_id
UpperCamelCase = pad_token_id
UpperCamelCase = bos_token_id
def __lowerCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase = self.eos_token_id # Eos Token
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for M2M100 the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
UpperCamelCase = input_ids.clamp(self.pad_token_id + 1 )
UpperCamelCase = decoder_input_ids.clamp(self.pad_token_id + 1 )
UpperCamelCase = self.get_config()
UpperCamelCase = prepare_mam_aaa_inputs_dict(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return config, inputs_dict
def __lowerCAmelCase ( self ) -> Dict:
"""simple docstring"""
return MaMaaaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , encoder_layerdrop=self.encoder_layerdrop , decoder_layerdrop=self.decoder_layerdrop , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , )
def __lowerCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.prepare_config_and_inputs()
return config, inputs_dict
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = MaMaaaModel(config=SCREAMING_SNAKE_CASE ).get_decoder().to(SCREAMING_SNAKE_CASE ).eval()
UpperCamelCase = inputs_dict["input_ids"]
UpperCamelCase = inputs_dict["attention_mask"]
UpperCamelCase = inputs_dict["head_mask"]
# first forward pass
UpperCamelCase = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , head_mask=SCREAMING_SNAKE_CASE , use_cache=SCREAMING_SNAKE_CASE )
UpperCamelCase , UpperCamelCase = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
UpperCamelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCamelCase = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
UpperCamelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCamelCase = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
UpperCamelCase = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE )["last_hidden_state"]
UpperCamelCase = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , past_key_values=SCREAMING_SNAKE_CASE )[
"last_hidden_state"
]
# select random slice
UpperCamelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCamelCase = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCamelCase = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1e-2 ) )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
UpperCamelCase = MaMaaaModel(config=SCREAMING_SNAKE_CASE ).to(SCREAMING_SNAKE_CASE ).eval()
UpperCamelCase = model(**SCREAMING_SNAKE_CASE )
UpperCamelCase = outputs.encoder_last_hidden_state
UpperCamelCase = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase = model.get_encoder()
encoder.save_pretrained(SCREAMING_SNAKE_CASE )
UpperCamelCase = MaMaaaEncoder.from_pretrained(SCREAMING_SNAKE_CASE ).to(SCREAMING_SNAKE_CASE )
UpperCamelCase = encoder(inputs_dict["input_ids"] , attention_mask=inputs_dict["attention_mask"] )[
0
]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase = model.get_decoder()
decoder.save_pretrained(SCREAMING_SNAKE_CASE )
UpperCamelCase = MaMaaaDecoder.from_pretrained(SCREAMING_SNAKE_CASE ).to(SCREAMING_SNAKE_CASE )
UpperCamelCase = decoder(
input_ids=inputs_dict["decoder_input_ids"] , attention_mask=inputs_dict["decoder_attention_mask"] , encoder_hidden_states=SCREAMING_SNAKE_CASE , encoder_attention_mask=inputs_dict["attention_mask"] , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 )
@require_torch
class __UpperCAmelCase ( snake_case__ , snake_case__ , snake_case__ , unittest.TestCase ):
"""simple docstring"""
lowercase = (
(
MaMaaaModel,
MaMaaaForConditionalGeneration,
)
if is_torch_available()
else ()
)
lowercase = (MaMaaaForConditionalGeneration,) if is_torch_available() else ()
lowercase = (
{
"""conversational""": MaMaaaForConditionalGeneration,
"""feature-extraction""": MaMaaaModel,
"""summarization""": MaMaaaForConditionalGeneration,
"""text2text-generation""": MaMaaaForConditionalGeneration,
"""translation""": MaMaaaForConditionalGeneration,
}
if is_torch_available()
else {}
)
lowercase = True
lowercase = True
lowercase = False
lowercase = False
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
if pipeline_test_casse_name == "TranslationPipelineTests":
# Get `ValueError: Translation requires a `src_lang` and a `tgt_lang` for this model`.
# `M2M100Config` was never used in pipeline tests: cannot create a simple tokenizer.
return True
return False
def __lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = MaMaaaModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> Any:
"""simple docstring"""
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
UpperCamelCase = model_class(SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(SCREAMING_SNAKE_CASE )
UpperCamelCase , UpperCamelCase = model_class.from_pretrained(SCREAMING_SNAKE_CASE , output_loading_info=SCREAMING_SNAKE_CASE )
self.assertEqual(info["missing_keys"] , [] )
def __lowerCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in (MaMaaaModel, MaMaaaForConditionalGeneration):
UpperCamelCase = model_class(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase = copy.deepcopy(self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
if not self.is_encoder_decoder:
UpperCamelCase = inputs["input_ids"]
del inputs["input_ids"]
else:
UpperCamelCase = inputs["input_ids"]
UpperCamelCase = inputs.get("decoder_input_ids" , SCREAMING_SNAKE_CASE )
del inputs["input_ids"]
inputs.pop("decoder_input_ids" , SCREAMING_SNAKE_CASE )
UpperCamelCase = model.get_input_embeddings()
if not self.is_encoder_decoder:
UpperCamelCase = wte(SCREAMING_SNAKE_CASE )
else:
UpperCamelCase = wte(SCREAMING_SNAKE_CASE )
UpperCamelCase = wte(SCREAMING_SNAKE_CASE )
with torch.no_grad():
model(**SCREAMING_SNAKE_CASE )[0]
def __lowerCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs()
UpperCamelCase = input_dict["input_ids"]
UpperCamelCase = input_ids.ne(1 ).to(SCREAMING_SNAKE_CASE )
UpperCamelCase = MaMaaaForConditionalGeneration(SCREAMING_SNAKE_CASE ).eval().to(SCREAMING_SNAKE_CASE )
if torch_device == "cuda":
model.half()
model.generate(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE )
model.generate(num_beams=4 , do_sample=SCREAMING_SNAKE_CASE , early_stopping=SCREAMING_SNAKE_CASE , num_return_sequences=3 )
def __magic_name__ ( lowercase_ ) -> int:
'''simple docstring'''
return torch.tensor(lowercase_ , dtype=torch.long , device=lowercase_ )
__a : List[str] = 1E-4
@require_torch
@require_sentencepiece
@require_tokenizers
@slow
class __UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __lowerCAmelCase ( self ) -> str:
"""simple docstring"""
return MaMaaaTokenizer.from_pretrained("facebook/m2m100_418M" )
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase = MaMaaaModel.from_pretrained("facebook/m2m100_418M" ).to(SCREAMING_SNAKE_CASE )
UpperCamelCase = _long_tensor([[128028, 98, 12, 30527, 2732, 159, 7755, 61904, 39144, 38, 2]] )
UpperCamelCase = _long_tensor([[2, 128028, 98, 12, 30527, 2732, 159, 7755, 61904, 39144, 38]] )
UpperCamelCase = prepare_mam_aaa_inputs_dict(model.config , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
with torch.no_grad():
UpperCamelCase = model(**SCREAMING_SNAKE_CASE )[0]
UpperCamelCase = torch.Size((1, 11, 1024) )
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE )
# change to expected output here
UpperCamelCase = torch.tensor(
[[-0.7_780, -0.1_676, 0.1_038], [-6.7_556, -1.3_992, 0.0_567], [-7.5_383, -0.5_920, -0.2_779]] , device=SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE , atol=SCREAMING_SNAKE_CASE ) )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = MaMaaaForConditionalGeneration.from_pretrained("facebook/m2m100_418M" ).to(SCREAMING_SNAKE_CASE )
# change to intended input
UpperCamelCase = _long_tensor([[128028, 98, 12, 30527, 2732, 159, 7755, 61904, 39144, 38, 2]] )
UpperCamelCase = _long_tensor([[2, 128028, 98, 12, 30527, 2732, 159, 7755, 61904, 39144, 38]] )
UpperCamelCase = prepare_mam_aaa_inputs_dict(model.config , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
with torch.no_grad():
UpperCamelCase = model(**SCREAMING_SNAKE_CASE )[0]
UpperCamelCase = torch.Size((1, 11, model.config.vocab_size) )
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE )
# change to expected output here
UpperCamelCase = torch.tensor(
[[-1.0_448, -1.0_411, 3.7_992], [-3.2_191, -3.2_386, -1.3_451], [-3.6_210, -3.5_993, 0.4_925]] , device=SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE , atol=SCREAMING_SNAKE_CASE ) )
def __lowerCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = MaMaaaForConditionalGeneration.from_pretrained("facebook/m2m100_418M" ).to(SCREAMING_SNAKE_CASE )
UpperCamelCase = MaMaaaTokenizer.from_pretrained("facebook/m2m100_418M" , src_lang="fr" , tgt_lang="en" )
UpperCamelCase = [
"L'affaire NSA souligne l'absence totale de débat sur le renseignement",
"Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.",
"Lorsque François Hollande téléphone à Barack Obama ou quand le ministre des affaires étrangères Laurent"
" Fabius convoque l'ambassadeur des Etats-Unis, ils réagissent à une vraie découverte, qui est celle de"
" l'ampleur de la surveillance américaine sur l'ensemble des communications en France.",
]
# The below article tests that we don't add any hypotheses outside of the top n_beams
UpperCamelCase = tokenizer(SCREAMING_SNAKE_CASE , padding=SCREAMING_SNAKE_CASE , return_tensors="pt" )
UpperCamelCase = model.generate(
input_ids=dct["input_ids"].to(SCREAMING_SNAKE_CASE ) , attention_mask=dct["attention_mask"].to(SCREAMING_SNAKE_CASE ) , num_beams=5 , forced_bos_token_id=tokenizer.get_lang_id("en" ) , )
UpperCamelCase = [
"The NSA case highlights the total absence of intelligence debate",
"I think there are two levels of response from the French government.",
"When François Hollande calls Barack Obama or when Foreign Minister Laurent Fabius calls the U.S."
" Ambassador, they respond to a real discovery, which is that of the scale of U.S. surveillance on all"
" communications in France.",
]
UpperCamelCase = tokenizer.batch_decode(
hypotheses_batch.tolist() , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE , skip_special_tokens=SCREAMING_SNAKE_CASE )
assert generated == expected_en
| 606
| 0
|
"""simple docstring"""
import qiskit
def A_ ( _lowercase, _lowercase ):
'''simple docstring'''
snake_case_ :Optional[Any] = qiskit.Aer.get_backend("""aer_simulator""" )
# Create a Quantum Circuit acting on the q register
snake_case_ :str = qiskit.QuantumCircuit(lowercase_, lowercase_ )
# Map the quantum measurement to the classical bits
circuit.measure([0], [0] )
# Execute the circuit on the simulator
snake_case_ :str = qiskit.execute(lowercase_, lowercase_, shots=1000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(lowercase_ )
if __name__ == "__main__":
print(F"""Total count for various states are: {single_qubit_measure(1, 1)}""")
| 707
|
"""simple docstring"""
from __future__ import annotations
__a = list[tuple[int, int]]
__a = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
__a = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right
class lowerCamelCase :
'''simple docstring'''
def __init__( self: int , snake_case: int , snake_case: int , snake_case: int , snake_case: int , snake_case: float , snake_case: Node | None , ) -> Any:
snake_case_ :Tuple = pos_x
snake_case_ :Optional[Any] = pos_y
snake_case_ :List[Any] = (pos_y, pos_x)
snake_case_ :int = goal_x
snake_case_ :Optional[Any] = goal_y
snake_case_ :str = g_cost
snake_case_ :Tuple = parent
snake_case_ :Tuple = self.calculate_heuristic()
def lowerCAmelCase_ ( self: Any ) -> float:
snake_case_ :List[Any] = abs(self.pos_x - self.goal_x )
snake_case_ :Any = abs(self.pos_y - self.goal_y )
return dx + dy
def __lt__( self: Any , snake_case: Optional[Any] ) -> bool:
return self.f_cost < other.f_cost
class lowerCamelCase :
'''simple docstring'''
def __init__( self: Any , snake_case: tuple[int, int] , snake_case: tuple[int, int] ) -> str:
snake_case_ :List[Any] = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , snake_case )
snake_case_ :str = Node(goal[1] , goal[0] , goal[1] , goal[0] , 99_999 , snake_case )
snake_case_ :str = [self.start]
snake_case_ :list[Node] = []
snake_case_ :Dict = False
def lowerCAmelCase_ ( self: List[Any] ) -> Path | None:
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
snake_case_ :Optional[int] = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
snake_case_ :Optional[Any] = True
return self.retrace_path(snake_case )
self.closed_nodes.append(snake_case )
snake_case_ :int = self.get_successors(snake_case )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(snake_case )
else:
# retrieve the best current path
snake_case_ :Optional[Any] = self.open_nodes.pop(self.open_nodes.index(snake_case ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(snake_case )
else:
self.open_nodes.append(snake_case )
if not self.reached:
return [self.start.pos]
return None
def lowerCAmelCase_ ( self: Tuple , snake_case: Node ) -> list[Node]:
snake_case_ :List[Any] = []
for action in delta:
snake_case_ :int = parent.pos_x + action[1]
snake_case_ :List[Any] = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(snake_case ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
snake_case , snake_case , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , snake_case , ) )
return successors
def lowerCAmelCase_ ( self: List[str] , snake_case: Node | None ) -> Path:
snake_case_ :Dict = node
snake_case_ :List[str] = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
snake_case_ :Any = current_node.parent
path.reverse()
return path
if __name__ == "__main__":
__a = (0, 0)
__a = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
print("------")
__a = GreedyBestFirst(init, goal)
__a = greedy_bf.search()
if path:
for pos_x, pos_y in path:
__a = 2
for elem in grid:
print(elem)
| 310
| 0
|
'''simple docstring'''
from unittest.mock import Mock, patch
from file_transfer.send_file import send_file
@patch("socket.socket" )
@patch("builtins.open" )
def UpperCamelCase ( _lowerCamelCase : Optional[int] , _lowerCamelCase : int ):
# ===== initialization =====
A__ = Mock()
A__ = conn, Mock()
A__ = iter([1, None] )
A__ = lambda _lowerCamelCase : next(_lowerCamelCase )
# ===== invoke =====
send_file(filename="mytext.txt" , testing=_lowerCamelCase )
# ===== ensurance =====
sock.assert_called_once()
sock.return_value.bind.assert_called_once()
sock.return_value.listen.assert_called_once()
sock.return_value.accept.assert_called_once()
conn.recv.assert_called_once()
file.return_value.__enter__.assert_called_once()
file.return_value.__enter__.return_value.read.assert_called()
conn.send.assert_called_once()
conn.close.assert_called_once()
sock.return_value.shutdown.assert_called_once()
sock.return_value.close.assert_called_once()
| 440
|
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline
else:
from .pipeline_kandinsky import KandinskyPipeline
from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline
from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput
from .text_encoder import MultilingualCLIP
| 440
| 1
|
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 423
|
def snake_case__ ( lowerCamelCase_ , lowerCamelCase_ ):
return x if y == 0 else greatest_common_divisor(lowerCamelCase_ , x % y )
def snake_case__ ( lowerCamelCase_ , lowerCamelCase_ ):
return (x * y) // greatest_common_divisor(lowerCamelCase_ , lowerCamelCase_ )
def snake_case__ ( lowerCamelCase_ = 20 ):
A : Optional[Any] = 1
for i in range(1 , n + 1 ):
A : Dict = lcm(lowerCamelCase_ , lowerCamelCase_ )
return g
if __name__ == "__main__":
print(F"{solution() = }")
| 423
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.